You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by bh...@apache.org on 2020/09/19 22:04:56 UTC

[hbase] branch branch-1 updated (20d0a32 -> 55cae10)

This is an automated email from the ASF dual-hosted git repository.

bharathv pushed a change to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git.


    from 20d0a32  HBASE-24481 REST - Fix incorrect response code of get-regions in rest api
     new 98d80fc5 HBASE-23257: Track clusterID in stand by masters (#798)
     new e4161e5  HBASE-23275: Track active master's address in ActiveMasterManager (#812)
     new 9a1d5a0  HBASE-23281: Track meta region locations in masters (#830)
     new 18200b0  HBASE-23304: RPCs needed for client meta information lookup (#904)
     new cb75662  HBASE-7767 Get rid of ZKTable, and table enable/disable state in ZK (Andrey Stepachev)
     new d866994  HBASE-23604: Clarify Registry usage in the code
     new ebe9e68  HBASE-23305: Implement master based registry for client connections
     new 3e1450d  HBASE-24765: Dynamic master discovery (#2314)
     new 55cae10  HBASE-23330: Fix delegation token fetch with MasterRegistry

The 9 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../org/apache/hadoop/hbase/client/Connection.java |     5 +
 .../hadoop/hbase/client/ConnectionAdapter.java     |    10 +
 .../hadoop/hbase/client/ConnectionManager.java     |   107 +-
 .../{Registry.java => ConnectionRegistry.java}     |    26 +-
 ...Factory.java => ConnectionRegistryFactory.java} |    16 +-
 .../apache/hadoop/hbase/client/HConnection.java    |     7 +
 .../hbase/client/MasterAddressRefresher.java       |   125 +
 .../apache/hadoop/hbase/client/MasterRegistry.java |   266 +
 .../hadoop/hbase/client/MetricsConnection.java     |    11 +-
 .../org/apache/hadoop/hbase/client/TableState.java |   205 +
 ...eperRegistry.java => ZKConnectionRegistry.java} |    63 +-
 .../apache/hadoop/hbase/protobuf/ProtobufUtil.java |   105 +
 .../hadoop/hbase/protobuf/RequestConverter.java    |    16 +-
 .../apache/hadoop/hbase/security/SecurityInfo.java |     3 +
 .../hadoop/hbase/security/token/TokenUtil.java     |    24 +-
 .../hbase/zookeeper/MasterAddressTracker.java      |    55 +
 .../zookeeper/ZKTableStateClientSideReader.java    |   205 -
 .../hadoop/hbase/zookeeper/ZooKeeperWatcher.java   |   134 +-
 .../hadoop/hbase/client/TestAsyncProcess.java      |    18 +-
 .../hadoop/hbase/client/TestClientNoCluster.java   |    21 +-
 .../hadoop/hbase/client/TestMetricsConnection.java |     6 +
 .../TestZKTableStateClientSideReader.java          |    52 -
 .../java/org/apache/hadoop/hbase/HConstants.java   |     3 +-
 ...tion.java => MasterRegistryFetchException.java} |    34 +-
 .../java/org/apache/hadoop/hbase/util/DNS.java     |    11 +
 .../apache/hadoop/hbase/util/PrettyPrinter.java    |    19 +
 .../hbase/protobuf/generated/HBaseProtos.java      | 14725 ++++++++------
 .../hbase/protobuf/generated/MasterProtos.java     | 20179 ++++++++++++-------
 .../hbase/protobuf/generated/ZooKeeperProtos.java  |   213 +-
 hbase-protocol/src/main/protobuf/HBase.proto       |    27 +
 hbase-protocol/src/main/protobuf/Master.proto      |    70 +
 hbase-protocol/src/main/protobuf/ZooKeeper.proto   |     3 +-
 .../hadoop/hbase/rsgroup/RSGroupAdminServer.java   |     6 +-
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java      |     8 +-
 .../hadoop/hbase/CoordinatedStateManager.java      |     8 -
 .../org/apache/hadoop/hbase/TableDescriptor.java   |   161 +
 .../org/apache/hadoop/hbase/TableDescriptors.java  |    25 +
 .../org/apache/hadoop/hbase/TableStateManager.java |   121 -
 .../coordination/BaseCoordinatedStateManager.java  |     5 -
 .../coordination/ZkCoordinatedStateManager.java    |    14 -
 .../coordination/ZkOpenRegionCoordination.java     |     4 +-
 .../hadoop/hbase/master/ActiveMasterManager.java   |    92 +-
 .../hadoop/hbase/master/AssignmentManager.java     |   114 +-
 .../hadoop/hbase/master/CachedClusterId.java       |   155 +
 .../org/apache/hadoop/hbase/master/HMaster.java    |   144 +-
 .../hadoop/hbase/master/MasterFileSystem.java      |     1 -
 .../hadoop/hbase/master/MasterRpcServices.java     |    96 +-
 .../apache/hadoop/hbase/master/MasterServices.java |     5 +
 .../hbase/master/MetaRegionLocationCache.java      |   256 +
 .../apache/hadoop/hbase/master/RegionStates.java   |    14 +-
 .../hadoop/hbase/master/TableNamespaceManager.java |     9 +-
 .../hadoop/hbase/master/TableStateManager.java     |   219 +
 .../hbase/master/handler/ClosedRegionHandler.java  |     5 +-
 .../hbase/master/handler/CreateTableHandler.java   |    84 +-
 .../hbase/master/handler/DisableTableHandler.java  |    30 +-
 .../hbase/master/handler/EnableTableHandler.java   |    45 +-
 .../hbase/master/handler/TableEventHandler.java    |    13 +-
 .../master/procedure/AddColumnFamilyProcedure.java |     4 +-
 .../master/procedure/CreateTableProcedure.java     |    15 +-
 .../procedure/DeleteColumnFamilyProcedure.java     |     4 +-
 .../master/procedure/DisableTableProcedure.java    |    14 +-
 .../master/procedure/EnableTableProcedure.java     |    12 +-
 .../master/procedure/MasterDDLOperationHelper.java |     4 +-
 .../procedure/ModifyColumnFamilyProcedure.java     |     4 +-
 .../master/procedure/ModifyTableProcedure.java     |     6 +-
 .../master/procedure/ServerCrashProcedure.java     |     8 +-
 .../hbase/master/snapshot/SnapshotManager.java     |     8 +-
 .../hadoop/hbase/migration/NamespaceUpgrade.java   |     4 +-
 .../hadoop/hbase/regionserver/CompactionTool.java  |    14 +-
 .../hbase/regionserver/wal/WALCellCodec.java       |     1 +
 .../hadoop/hbase/snapshot/SnapshotManifest.java    |     8 +-
 .../hadoop/hbase/util/FSTableDescriptors.java      |   212 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java    |    60 +-
 .../java/org/apache/hadoop/hbase/util/HMerge.java  |     3 +-
 .../apache/hadoop/hbase/util/JVMClusterUtil.java   |     7 +
 .../java/org/apache/hadoop/hbase/util/Merge.java   |     5 +-
 .../apache/hadoop/hbase/util/ZKDataMigrator.java   |    90 +-
 .../org/apache/hadoop/hbase/wal/WALSplitter.java   |    25 +-
 .../hbase/zookeeper/ZKTableStateManager.java       |   369 -
 .../apache/hadoop/hbase/HBaseTestingUtility.java   |     4 +
 .../apache/hadoop/hbase/TestCachedClusterId.java   |    86 +
 .../apache/hadoop/hbase/TestDrainingServer.java    |   246 +-
 .../hbase/TestFSTableDescriptorForceCreation.java  |    12 +-
 .../TestHColumnDescriptorDefaultVersions.java      |     4 +-
 .../apache/hadoop/hbase/TestTableDescriptor.java   |    57 +
 .../org/apache/hadoop/hbase/client/TestAdmin1.java |    26 +-
 .../hadoop/hbase/client/TestFromClientSide.java    |    85 +-
 .../client/TestFromClientSideWithCoprocessor.java  |    29 +-
 .../hbase/client/TestMasterAddressRefresher.java   |   181 +
 .../hadoop/hbase/client/TestMasterRegistry.java    |   236 +
 .../hbase/client/TestMetaRegionLocationCache.java  |   236 +
 .../hbase/master/MockNoopMasterServices.java       |     5 +
 .../hbase/master/TestActiveMasterManager.java      |   275 +-
 .../master/TestAssignmentManagerOnCluster.java     |    16 +-
 .../hadoop/hbase/master/TestCatalogJanitor.java    |    35 +-
 .../hbase/master/TestClientMetaServiceRPCs.java    |   137 +
 .../org/apache/hadoop/hbase/master/TestMaster.java |     4 +-
 .../hadoop/hbase/master/TestMasterFailover.java    |    30 +-
 .../TestMasterRestartAfterDisablingTable.java      |     8 +-
 .../hbase/master/TestOpenedRegionHandler.java      |    11 +-
 .../hadoop/hbase/master/TestRegionStates.java      |     2 -
 .../master/TestRegionsRecoveryConfigManager.java   |     3 +-
 .../hadoop/hbase/master/TestTableLockManager.java  |     6 +-
 .../procedure/MasterProcedureTestingUtility.java   |     8 +-
 .../procedure/TestCreateTableProcedure2.java       |    10 +-
 .../TestTableDescriptorModificationFromClient.java |     6 +-
 .../hadoop/hbase/protobuf/TestProtobufUtil.java    |    35 +-
 .../regionserver/TestMasterAddressTracker.java     |    37 +-
 .../hbase/snapshot/SnapshotTestingUtils.java       |     8 +-
 .../hadoop/hbase/util/TestFSTableDescriptors.java  |    39 +-
 .../apache/hadoop/hbase/util/TestHBaseFsck.java    |    51 -
 .../hbase/zookeeper/TestZKTableStateManager.java   |   114 -
 112 files changed, 26256 insertions(+), 15061 deletions(-)
 rename hbase-client/src/main/java/org/apache/hadoop/hbase/client/{Registry.java => ConnectionRegistry.java} (76%)
 rename hbase-client/src/main/java/org/apache/hadoop/hbase/client/{RegistryFactory.java => ConnectionRegistryFactory.java} (71%)
 create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterAddressRefresher.java
 create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java
 create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java
 rename hbase-client/src/main/java/org/apache/hadoop/hbase/client/{ZooKeeperRegistry.java => ZKConnectionRegistry.java} (74%)
 delete mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateClientSideReader.java
 delete mode 100644 hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateClientSideReader.java
 copy hbase-common/src/main/java/org/apache/hadoop/hbase/exceptions/{UnexpectedStateException.java => MasterRegistryFetchException.java} (61%)
 create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java
 delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/TableStateManager.java
 create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/CachedClusterId.java
 create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java
 create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
 delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateManager.java
 create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/TestCachedClusterId.java
 create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/TestTableDescriptor.java
 create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterAddressRefresher.java
 create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java
 create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java
 create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClientMetaServiceRPCs.java
 delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateManager.java


[hbase] 01/09: HBASE-23257: Track clusterID in stand by masters (#798)

Posted by bh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

bharathv pushed a commit to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 98d80fc5e4e865d58a798baba1480b544631450a
Author: Bharath Vissapragada <bh...@apache.org>
AuthorDate: Thu Nov 7 18:32:44 2019 -0800

    HBASE-23257: Track clusterID in stand by masters (#798)
    
    This patch implements a simple cache that all the masters
    can lookup to serve cluster ID to clients. Active HMaster
    is still responsible for creating it but all the masters
    will read it from fs to serve clients.
    
    RPCs exposing it will come in a separate patch as a part of
    HBASE-18095.
    
    Signed-off-by: Andrew Purtell <ap...@apache.org>
    Signed-off-by: Wellington Chevreuil <wc...@apache.org>
    Signed-off-by: Guangxu Cheng <gu...@gmail.com>
    (cherry picked from commit c2e01f2398424104a6faae901b12f782ca74c284)
    (cherry picked from commit 9ab652982b1c1409be04c36911b44965d4b6bbbf)
---
 .../hadoop/hbase/master/CachedClusterId.java       | 155 +++++++++++++++++++++
 .../org/apache/hadoop/hbase/master/HMaster.java    |  11 ++
 .../apache/hadoop/hbase/TestCachedClusterId.java   |  86 ++++++++++++
 3 files changed, 252 insertions(+)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CachedClusterId.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CachedClusterId.java
new file mode 100644
index 0000000..6825b89
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CachedClusterId.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.ClusterId;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Caches the cluster ID of the cluster. For standby masters, this is used to serve the client
+ * RPCs that fetch the cluster ID. ClusterID is only created by an active master if one does not
+ * already exist. Standby masters just read the information from the file system. This class is
+ * thread-safe.
+ *
+ * TODO: Make it a singleton without affecting concurrent junit tests.
+ */
+@InterfaceAudience.Private
+public class CachedClusterId {
+
+  public static final Logger LOG = LoggerFactory.getLogger(CachedClusterId.class);
+  private static final int MAX_FETCH_TIMEOUT_MS = 10000;
+
+  private Path rootDir;
+  private FileSystem fs;
+
+  // When true, indicates that a FileSystem fetch of ClusterID is in progress. This is used to
+  // avoid multiple fetches from FS and let only one thread fetch the information.
+  AtomicBoolean fetchInProgress = new AtomicBoolean(false);
+
+  // When true, it means that the cluster ID has been fetched successfully from fs.
+  private AtomicBoolean isClusterIdSet = new AtomicBoolean(false);
+  // Immutable once set and read multiple times.
+  private ClusterId clusterId;
+
+  // cache stats for testing.
+  private AtomicInteger cacheMisses = new AtomicInteger(0);
+
+  public CachedClusterId(Configuration conf) throws IOException {
+    rootDir = FSUtils.getRootDir(conf);
+    fs = rootDir.getFileSystem(conf);
+  }
+
+  /**
+   * Succeeds only once, when setting to a non-null value. Overwrites are not allowed.
+   */
+  private void setClusterId(ClusterId id) {
+    if (id == null || isClusterIdSet.get()) {
+      return;
+    }
+    clusterId = id;
+    isClusterIdSet.set(true);
+  }
+
+  /**
+   * Returns a cached copy of the cluster ID. null if the cache is not populated.
+   */
+  private String getClusterId() {
+    if (!isClusterIdSet.get()) {
+      return null;
+    }
+    // It is ok to read without a lock since clusterId is immutable once set.
+    return clusterId.toString();
+  }
+
+  /**
+   * Attempts to fetch the cluster ID from the file system. If no attempt is already in progress,
+   * synchronously fetches the cluster ID and sets it. If an attempt is already in progress,
+   * returns right away and the caller is expected to wait for the fetch to finish.
+   * @return true if the attempt is done, false if another thread is already fetching it.
+   */
+  private boolean attemptFetch() {
+    if (fetchInProgress.compareAndSet(false, true)) {
+      // A fetch is not in progress, so try fetching the cluster ID synchronously and then notify
+      // the waiting threads.
+      try {
+        cacheMisses.incrementAndGet();
+        setClusterId(FSUtils.getClusterId(fs, rootDir));
+      } catch (IOException e) {
+        LOG.warn("Error fetching cluster ID", e);
+      } finally {
+        Preconditions.checkState(fetchInProgress.compareAndSet(true, false));
+        synchronized (fetchInProgress) {
+          fetchInProgress.notifyAll();
+        }
+      }
+      return true;
+    }
+    return false;
+  }
+
+  private void waitForFetchToFinish() throws InterruptedException {
+    synchronized (fetchInProgress) {
+      while (fetchInProgress.get()) {
+        // We don't want the fetches to block forever, for example if there are bugs
+        // of missing notifications.
+        fetchInProgress.wait(MAX_FETCH_TIMEOUT_MS);
+      }
+    }
+  }
+
+  /**
+   * Fetches the ClusterId from FS if it is not cached locally. Atomically updates the cached
+   * copy and is thread-safe. Optimized to do a single fetch when there are multiple threads are
+   * trying get from a clean cache.
+   *
+   * @return ClusterId by reading from FileSystem or null in any error case or cluster ID does
+   *     not exist on the file system.
+   */
+  public String getFromCacheOrFetch() {
+    String id = getClusterId();
+    if (id != null) {
+      return id;
+    }
+    if (!attemptFetch()) {
+      // A fetch is in progress.
+      try {
+        waitForFetchToFinish();
+      } catch (InterruptedException e) {
+        // pass and return whatever is in the cache.
+      }
+    }
+    return getClusterId();
+  }
+
+  @VisibleForTesting
+  public int getCacheStats() {
+    return cacheMisses.get();
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index ffabbb4..b6311e1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -386,6 +386,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
   /** jetty server for master to redirect requests to regionserver infoServer */
   private org.mortbay.jetty.Server masterJettyServer;
 
+  // Cached clusterId on stand by masters to serve clusterID requests from clients.
+  private final CachedClusterId cachedClusterId;
+
   public static class RedirectServlet extends HttpServlet {
     private static final long serialVersionUID = 2894774810058302473L;
     private final int regionServerInfoPort;
@@ -521,6 +524,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     } else {
       activeMasterManager = null;
     }
+    cachedClusterId = new CachedClusterId(conf);
   }
 
   // return the actual infoPort, -1 means disable info server.
@@ -3429,4 +3433,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     }
     return replicationLoadSourceMap;
   }
+
+  public String getClusterId() {
+    if (activeMaster) {
+      return super.getClusterId();
+    }
+    return cachedClusterId.getFromCacheOrFetch();
+  }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCachedClusterId.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCachedClusterId.java
new file mode 100644
index 0000000..8f0b32d
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCachedClusterId.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import static org.junit.Assert.assertEquals;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext;
+import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread;
+import org.apache.hadoop.hbase.master.CachedClusterId;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(MediumTests.class)
+public class TestCachedClusterId {
+
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+  private static String clusterId;
+  private static HMaster activeMaster;
+  private static HMaster standByMaster;
+
+  private static class GetClusterIdThread extends TestThread {
+    CachedClusterId cachedClusterId;
+    public GetClusterIdThread(TestContext ctx, CachedClusterId clusterId) {
+      super(ctx);
+      cachedClusterId = clusterId;
+    }
+
+    @Override
+    public void doWork() throws Exception {
+      assertEquals(clusterId, cachedClusterId.getFromCacheOrFetch());
+    }
+  }
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    TEST_UTIL.startMiniCluster(1);
+    activeMaster = TEST_UTIL.getHBaseCluster().getMaster();
+    clusterId = activeMaster.getClusterId();
+    standByMaster = TEST_UTIL.getHBaseCluster().startMaster().getMaster();
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void testClusterIdMatch() {
+    assertEquals(clusterId, standByMaster.getClusterId());
+  }
+
+  @Test
+  public void testMultiThreadedGetClusterId() throws Exception {
+    Configuration conf = TEST_UTIL.getConfiguration();
+    CachedClusterId cachedClusterId = new CachedClusterId(conf);
+    TestContext context = new TestContext(conf);
+    int numThreads = 100;
+    for (int i = 0; i < numThreads; i++) {
+      context.addThread(new GetClusterIdThread(context, cachedClusterId));
+    }
+    context.startThreads();
+    context.stop();
+    int cacheMisses = cachedClusterId.getCacheStats();
+    assertEquals(cacheMisses, 1);
+  }
+}


[hbase] 05/09: HBASE-7767 Get rid of ZKTable, and table enable/disable state in ZK (Andrey Stepachev)

Posted by bh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

bharathv pushed a commit to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit cb756629b0d7f89d530de4d1ad06c65401eb0801
Author: stack <st...@apache.org>
AuthorDate: Mon Sep 15 09:34:10 2014 -0700

    HBASE-7767 Get rid of ZKTable, and table enable/disable state in ZK (Andrey Stepachev)
    
    Signed-off-by: Andrew Purtell <ap...@apache.org>
    
    (cherry picked from commit 3cc5d19039904361f60c413f10f3cbca27a7ba96)
---
 .../hadoop/hbase/client/ConnectionAdapter.java     |    5 +
 .../hadoop/hbase/client/ConnectionManager.java     |   26 +-
 .../apache/hadoop/hbase/client/HConnection.java    |    7 +
 .../org/apache/hadoop/hbase/client/Registry.java   |    7 -
 .../org/apache/hadoop/hbase/client/TableState.java |  205 +++
 .../hadoop/hbase/client/ZooKeeperRegistry.java     |   21 -
 .../hadoop/hbase/protobuf/RequestConverter.java    |   16 +-
 .../zookeeper/ZKTableStateClientSideReader.java    |  205 ---
 .../hadoop/hbase/zookeeper/ZooKeeperWatcher.java   |    1 +
 .../hadoop/hbase/client/TestAsyncProcess.java      |    5 -
 .../hadoop/hbase/client/TestClientNoCluster.java   |    6 -
 .../TestZKTableStateClientSideReader.java          |   52 -
 .../hbase/protobuf/generated/HBaseProtos.java      | 1761 +++++++++++++++++++-
 .../hbase/protobuf/generated/MasterProtos.java     | 1733 ++++++++++++++++---
 .../hbase/protobuf/generated/ZooKeeperProtos.java  |  213 +--
 hbase-protocol/src/main/protobuf/HBase.proto       |   21 +
 hbase-protocol/src/main/protobuf/Master.proto      |   12 +-
 hbase-protocol/src/main/protobuf/ZooKeeper.proto   |    3 +-
 .../hadoop/hbase/rsgroup/RSGroupAdminServer.java   |    6 +-
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java      |    8 +-
 .../hadoop/hbase/CoordinatedStateManager.java      |    8 -
 .../org/apache/hadoop/hbase/TableDescriptor.java   |  161 ++
 .../org/apache/hadoop/hbase/TableDescriptors.java  |   25 +
 .../org/apache/hadoop/hbase/TableStateManager.java |  121 --
 .../coordination/BaseCoordinatedStateManager.java  |    5 -
 .../coordination/ZkCoordinatedStateManager.java    |   14 -
 .../coordination/ZkOpenRegionCoordination.java     |    4 +-
 .../hadoop/hbase/master/AssignmentManager.java     |  114 +-
 .../org/apache/hadoop/hbase/master/HMaster.java    |   33 +-
 .../hadoop/hbase/master/MasterFileSystem.java      |    1 -
 .../hadoop/hbase/master/MasterRpcServices.java     |   26 +-
 .../apache/hadoop/hbase/master/MasterServices.java |    5 +
 .../apache/hadoop/hbase/master/RegionStates.java   |   14 +-
 .../hadoop/hbase/master/TableNamespaceManager.java |    9 +-
 .../hadoop/hbase/master/TableStateManager.java     |  219 +++
 .../hbase/master/handler/ClosedRegionHandler.java  |    5 +-
 .../hbase/master/handler/CreateTableHandler.java   |   84 +-
 .../hbase/master/handler/DisableTableHandler.java  |   30 +-
 .../hbase/master/handler/EnableTableHandler.java   |   45 +-
 .../hbase/master/handler/TableEventHandler.java    |   13 +-
 .../master/procedure/AddColumnFamilyProcedure.java |    4 +-
 .../master/procedure/CreateTableProcedure.java     |   15 +-
 .../procedure/DeleteColumnFamilyProcedure.java     |    4 +-
 .../master/procedure/DisableTableProcedure.java    |   14 +-
 .../master/procedure/EnableTableProcedure.java     |   12 +-
 .../master/procedure/MasterDDLOperationHelper.java |    4 +-
 .../procedure/ModifyColumnFamilyProcedure.java     |    4 +-
 .../master/procedure/ModifyTableProcedure.java     |    6 +-
 .../master/procedure/ServerCrashProcedure.java     |    8 +-
 .../hbase/master/snapshot/SnapshotManager.java     |    8 +-
 .../hadoop/hbase/migration/NamespaceUpgrade.java   |    4 +-
 .../hadoop/hbase/regionserver/CompactionTool.java  |   14 +-
 .../hbase/regionserver/wal/WALCellCodec.java       |    1 +
 .../hadoop/hbase/snapshot/SnapshotManifest.java    |    8 +-
 .../hadoop/hbase/util/FSTableDescriptors.java      |  212 ++-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java    |   60 +-
 .../java/org/apache/hadoop/hbase/util/HMerge.java  |    3 +-
 .../java/org/apache/hadoop/hbase/util/Merge.java   |    5 +-
 .../apache/hadoop/hbase/util/ZKDataMigrator.java   |   90 +-
 .../org/apache/hadoop/hbase/wal/WALSplitter.java   |   25 +-
 .../hbase/zookeeper/ZKTableStateManager.java       |  369 ----
 .../apache/hadoop/hbase/HBaseTestingUtility.java   |    1 +
 .../apache/hadoop/hbase/TestDrainingServer.java    |  246 +--
 .../hbase/TestFSTableDescriptorForceCreation.java  |   12 +-
 .../TestHColumnDescriptorDefaultVersions.java      |    4 +-
 .../apache/hadoop/hbase/TestTableDescriptor.java   |   57 +
 .../org/apache/hadoop/hbase/client/TestAdmin1.java |   26 +-
 .../hbase/master/MockNoopMasterServices.java       |    5 +
 .../master/TestAssignmentManagerOnCluster.java     |   16 +-
 .../hadoop/hbase/master/TestCatalogJanitor.java    |   35 +-
 .../org/apache/hadoop/hbase/master/TestMaster.java |    4 +-
 .../hadoop/hbase/master/TestMasterFailover.java    |   19 +-
 .../TestMasterRestartAfterDisablingTable.java      |    8 +-
 .../hbase/master/TestOpenedRegionHandler.java      |   11 +-
 .../hadoop/hbase/master/TestRegionStates.java      |    2 -
 .../hadoop/hbase/master/TestTableLockManager.java  |    6 +-
 .../procedure/MasterProcedureTestingUtility.java   |    8 +-
 .../procedure/TestCreateTableProcedure2.java       |   10 +-
 .../TestTableDescriptorModificationFromClient.java |    6 +-
 .../hbase/snapshot/SnapshotTestingUtils.java       |    8 +-
 .../hadoop/hbase/util/TestFSTableDescriptors.java  |   39 +-
 .../apache/hadoop/hbase/util/TestHBaseFsck.java    |   51 -
 .../hbase/zookeeper/TestZKTableStateManager.java   |  114 --
 83 files changed, 4764 insertions(+), 2043 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
index 4e3e55e..0bed7ef 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
@@ -185,6 +185,11 @@ abstract class ConnectionAdapter implements ClusterConnection {
   }
 
   @Override
+  public TableState getTableState(TableName tableName) throws IOException {
+    return wrappedConnection.getTableState(tableName);
+  }
+
+  @Override
   public HTableDescriptor[] listTables() throws IOException {
     return wrappedConnection.listTables();
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index 61107f7..961ee3a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -124,6 +124,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescripto
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
@@ -1006,7 +1008,7 @@ class ConnectionManager {
 
     @Override
     public boolean isTableEnabled(TableName tableName) throws IOException {
-      return this.registry.isTableOnlineState(tableName, true);
+      return getTableState(tableName).inStates(TableState.State.ENABLED);
     }
 
     @Override
@@ -1016,7 +1018,7 @@ class ConnectionManager {
 
     @Override
     public boolean isTableDisabled(TableName tableName) throws IOException {
-      return this.registry.isTableOnlineState(tableName, false);
+      return getTableState(tableName).inStates(TableState.State.DISABLED);
     }
 
     @Override
@@ -2174,6 +2176,13 @@ class ConnectionManager {
         }
 
         @Override
+        public GetTableStateResponse getTableState(
+                RpcController controller, GetTableStateRequest request)
+                throws ServiceException {
+          return stub.getTableState(controller, request);
+        }
+
+        @Override
         public void close() {
           release(this.mss);
         }
@@ -2800,6 +2809,19 @@ class ConnectionManager {
     public RpcControllerFactory getRpcControllerFactory() {
       return this.rpcControllerFactory;
     }
+
+    public TableState getTableState(TableName tableName) throws IOException {
+      MasterKeepAliveConnection master = getKeepAliveMasterService();
+      try {
+        GetTableStateResponse resp = master.getTableState(null,
+                RequestConverter.buildGetTableStateRequest(tableName));
+        return TableState.convert(resp.getTableState());
+      } catch (ServiceException se) {
+        throw ProtobufUtil.getRemoteException(se);
+      } finally {
+        master.close();
+      }
+    }
   }
 
   /**
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
index e476d5f..7de1dfb 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
@@ -213,6 +213,13 @@ public interface HConnection extends Connection {
   boolean isTableDisabled(byte[] tableName) throws IOException;
 
   /**
+   * Retrieve TableState, represent current table state.
+   * @param tableName table state for
+   * @return state of the table
+   */
+  public TableState getTableState(TableName tableName)  throws IOException;
+
+  /**
    * @param tableName table name
    * @return true if all regions of the table are available, false otherwise
    * @throws IOException if a remote or network exception occurs
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java
index 58ec3c4..9debd63 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java
@@ -21,7 +21,6 @@ import java.io.IOException;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.RegionLocations;
-import org.apache.hadoop.hbase.TableName;
 
 /**
  * Cluster registry.
@@ -47,12 +46,6 @@ interface Registry {
   String getClusterId();
 
   /**
-   * @param enabled Return true if table is enabled
-   * @throws IOException
-   */
-  boolean isTableOnlineState(TableName tableName, boolean enabled) throws IOException;
-
-  /**
    * @return Count of 'running' regionservers
    * @throws IOException
    */
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java
new file mode 100644
index 0000000..384d4e6
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java
@@ -0,0 +1,205 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+
+/**
+ * Represents table state.
+ */
+@InterfaceAudience.Private
+public class TableState {
+
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
+  public static enum State {
+    ENABLED,
+    DISABLED,
+    DISABLING,
+    ENABLING;
+
+    /**
+     * Covert from PB version of State
+     *
+     * @param state convert from
+     * @return POJO
+     */
+    public static State convert(HBaseProtos.TableState.State state) {
+      State ret;
+      switch (state) {
+        case ENABLED:
+          ret = State.ENABLED;
+          break;
+        case DISABLED:
+          ret = State.DISABLED;
+          break;
+        case DISABLING:
+          ret = State.DISABLING;
+          break;
+        case ENABLING:
+          ret = State.ENABLING;
+          break;
+        default:
+          throw new IllegalStateException(state.toString());
+      }
+      return ret;
+    }
+
+    /**
+     * Covert to PB version of State
+     *
+     * @return PB
+     */
+    public HBaseProtos.TableState.State convert() {
+      HBaseProtos.TableState.State state;
+      switch (this) {
+        case ENABLED:
+          state = HBaseProtos.TableState.State.ENABLED;
+          break;
+        case DISABLED:
+          state = HBaseProtos.TableState.State.DISABLED;
+          break;
+        case DISABLING:
+          state = HBaseProtos.TableState.State.DISABLING;
+          break;
+        case ENABLING:
+          state = HBaseProtos.TableState.State.ENABLING;
+          break;
+        default:
+          throw new IllegalStateException(this.toString());
+      }
+      return state;
+    }
+
+  }
+
+  private final long timestamp;
+  private final TableName tableName;
+  private final State state;
+
+  /**
+   * Create instance of TableState.
+   * @param state table state
+   */
+  public TableState(TableName tableName, State state, long timestamp) {
+    this.tableName = tableName;
+    this.state = state;
+    this.timestamp = timestamp;
+  }
+
+  /**
+   * Create instance of TableState with current timestamp
+   *
+   * @param tableName table for which state is created
+   * @param state     state of the table
+   */
+  public TableState(TableName tableName, State state) {
+    this(tableName, state, System.currentTimeMillis());
+  }
+
+  /**
+   * @return table state
+   */
+  public State getState() {
+    return state;
+  }
+
+  /**
+   * Timestamp of table state
+   *
+   * @return milliseconds
+   */
+  public long getTimestamp() {
+    return timestamp;
+  }
+
+  /**
+   * Table name for state
+   *
+   * @return milliseconds
+   */
+  public TableName getTableName() {
+    return tableName;
+  }
+
+  /**
+   * Check that table in given states
+   * @param state state
+   * @return true if satisfies
+   */
+  public boolean inStates(State state) {
+    return this.state.equals(state);
+  }
+
+  /**
+   * Check that table in given states
+   * @param states state list
+   * @return true if satisfies
+   */
+  public boolean inStates(State... states) {
+    for (State s : states) {
+      if (s.equals(this.state)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+
+  /**
+   * Covert to PB version of TableState
+   * @return PB
+   */
+  public HBaseProtos.TableState convert() {
+    return HBaseProtos.TableState.newBuilder()
+        .setState(this.state.convert())
+        .setTable(ProtobufUtil.toProtoTableName(this.tableName))
+        .setTimestamp(this.timestamp)
+            .build();
+  }
+
+  /**
+   * Covert from PB version of TableState
+   * @param tableState convert from
+   * @return POJO
+   */
+  public static TableState convert(HBaseProtos.TableState tableState) {
+    TableState.State state = State.convert(tableState.getState());
+    return new TableState(ProtobufUtil.toTableName(tableState.getTable()),
+        state, tableState.getTimestamp());
+  }
+
+  /**
+   * Static version of state checker
+   * @param state desired
+   * @param target equals to any of
+   * @return true if satisfies
+   */
+  public static boolean isInStates(State state, State... target) {
+    for (State tableState : target) {
+      if (state.equals(tableState)) {
+        return true;
+      }
+    }
+    return false;
+  }
+}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
index 05572b7..8f7257e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
@@ -27,10 +26,8 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
-import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.zookeeper.KeeperException;
 
@@ -117,24 +114,6 @@ class ZooKeeperRegistry implements Registry {
   }
 
   @Override
-  public boolean isTableOnlineState(TableName tableName, boolean enabled)
-  throws IOException {
-    ZooKeeperKeepAliveConnection zkw = hci.getKeepAliveZooKeeperWatcher();
-    try {
-      if (enabled) {
-        return ZKTableStateClientSideReader.isEnabledTable(zkw, tableName);
-      }
-      return ZKTableStateClientSideReader.isDisabledTable(zkw, tableName);
-    } catch (KeeperException e) {
-      throw new IOException("Enable/Disable failed", e);
-    } catch (InterruptedException e) {
-      throw new InterruptedIOException();
-    } finally {
-       zkw.close();
-    }
-  }
-
-  @Override
   public int getCurrentNrHRS() throws IOException {
     ZooKeeperKeepAliveConnection zkw = hci.getKeepAliveZooKeeperWatcher();
     try {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
index 63b8af2..31e69cc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
@@ -22,6 +22,8 @@ import java.util.List;
 import java.util.regex.Pattern;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.util.ByteStringer;
+
 import org.apache.hadoop.hbase.CellScannable;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -95,6 +97,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusR
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest;
@@ -117,7 +120,6 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEn
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
-import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
@@ -1411,6 +1413,18 @@ public final class RequestConverter {
     return builder.build();
   }
 
+  /*
+   * Creates a protocol buffer GetTableStateRequest
+   *
+   * @param tableName table to get request for
+   * @return a GetTableStateRequest
+   */
+  public static GetTableStateRequest buildGetTableStateRequest(final TableName tableName) {
+    return GetTableStateRequest.newBuilder()
+            .setTableName(ProtobufUtil.toProtoTableName(tableName))
+            .build();
+  }
+
   /**
    * Creates a protocol buffer GetTableDescriptorsRequest for a single table
    *
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateClientSideReader.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateClientSideReader.java
deleted file mode 100644
index 7c21b01..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateClientSideReader.java
+++ /dev/null
@@ -1,205 +0,0 @@
-/**
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.zookeeper;
-
-import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
-import org.apache.zookeeper.KeeperException;
-
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-/**
- * Non-instantiable class that provides helper functions to learn
- * about HBase table state for code running on client side (hence, not having
- * access to consensus context).
- *
- * Doesn't cache any table state, just goes directly to ZooKeeper.
- * TODO: decouple this class from ZooKeeper.
- */
-@InterfaceAudience.Private
-public class ZKTableStateClientSideReader {
-
-  private ZKTableStateClientSideReader() {}
-  
-  /**
-   * Go to zookeeper and see if state of table is {@code ZooKeeperProtos.Table.State#DISABLED}.
-   * This method does not use cache.
-   * This method is for clients other than AssignmentManager
-   * @param zkw ZooKeeperWatcher instance to use
-   * @param tableName table we're checking
-   * @return True if table is enabled.
-   * @throws KeeperException
-   */
-  public static boolean isDisabledTable(final ZooKeeperWatcher zkw,
-      final TableName tableName)
-      throws KeeperException, InterruptedException, TableNotFoundException {
-    ZooKeeperProtos.Table.State state = getTableState(zkw, tableName);
-    return isTableState(ZooKeeperProtos.Table.State.DISABLED, state);
-  }
-
-  /**
-   * Go to zookeeper and see if state of table is {@code ZooKeeperProtos.Table.State#ENABLED}.
-   * This method does not use cache.
-   * This method is for clients other than AssignmentManager
-   * @param zkw ZooKeeperWatcher instance to use
-   * @param tableName table we're checking
-   * @return True if table is enabled.
-   * @throws KeeperException
-   */
-  public static boolean isEnabledTable(final ZooKeeperWatcher zkw,
-      final TableName tableName)
-      throws KeeperException, InterruptedException, TableNotFoundException {
-    return getTableState(zkw, tableName) == ZooKeeperProtos.Table.State.ENABLED;
-  }
-
-  /**
-   * Go to zookeeper and see if state of table is {@code ZooKeeperProtos.Table.State#DISABLING}
-   * of {@code ZooKeeperProtos.Table.State#DISABLED}.
-   * This method does not use cache.
-   * This method is for clients other than AssignmentManager.
-   * @param zkw ZooKeeperWatcher instance to use
-   * @param tableName table we're checking
-   * @return True if table is enabled.
-   * @throws KeeperException
-   */
-  public static boolean isDisablingOrDisabledTable(final ZooKeeperWatcher zkw,
-      final TableName tableName)
-      throws KeeperException, InterruptedException, TableNotFoundException {
-    ZooKeeperProtos.Table.State state = getTableState(zkw, tableName);
-    return isTableState(ZooKeeperProtos.Table.State.DISABLING, state) ||
-      isTableState(ZooKeeperProtos.Table.State.DISABLED, state);
-  }
-
-  /**
-   * Gets a list of all the tables set as disabled in zookeeper.
-   * @return Set of disabled tables, empty Set if none
-   * @throws KeeperException
-   */
-  public static Set<TableName> getDisabledTables(ZooKeeperWatcher zkw)
-      throws KeeperException, InterruptedException, TableNotFoundException {
-    Set<TableName> disabledTables = new HashSet<TableName>();
-    List<String> children =
-      ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode);
-    for (String child: children) {
-      TableName tableName =
-          TableName.valueOf(child);
-      ZooKeeperProtos.Table.State state = getTableState(zkw, tableName);
-      if (state == ZooKeeperProtos.Table.State.DISABLED) disabledTables.add(tableName);
-    }
-    return disabledTables;
-  }
-
-  /**
-   * Gets a list of all the tables set as disabled in zookeeper.
-   * @return Set of disabled tables, empty Set if none
-   * @throws KeeperException
-   */
-  public static Set<TableName> getDisabledOrDisablingTables(ZooKeeperWatcher zkw)
-      throws KeeperException, InterruptedException, TableNotFoundException {
-    return
-        getTablesInStates(
-          zkw,
-          ZooKeeperProtos.Table.State.DISABLED,
-          ZooKeeperProtos.Table.State.DISABLING);
-  }
-
-  /**
-   * Gets a list of all the tables set as enabling in zookeeper.
-   * @param zkw ZooKeeperWatcher instance to use
-   * @return Set of enabling tables, empty Set if none
-   * @throws KeeperException
-   * @throws InterruptedException
-   */
-  public static Set<TableName> getEnablingTables(ZooKeeperWatcher zkw)
-      throws KeeperException, InterruptedException, TableNotFoundException {
-    return getTablesInStates(zkw, ZooKeeperProtos.Table.State.ENABLING);
-  }
-
-  /**
-   * Gets a list of tables that are set as one of the passing in states in zookeeper.
-   * @param zkw ZooKeeperWatcher instance to use
-   * @param states the list of states that a table could be in
-   * @return Set of tables in one of the states, empty Set if none
-   * @throws KeeperException
-   * @throws InterruptedException
-   */
-  private static Set<TableName> getTablesInStates(
-    ZooKeeperWatcher zkw,
-    ZooKeeperProtos.Table.State... states)
-      throws KeeperException, InterruptedException, TableNotFoundException {
-    Set<TableName> tableNameSet = new HashSet<TableName>();
-    List<String> children = ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode);
-    TableName tableName;
-    ZooKeeperProtos.Table.State tableState;
-    for (String child: children) {
-      tableName = TableName.valueOf(child);
-      tableState = getTableState(zkw, tableName);
-      for (ZooKeeperProtos.Table.State state : states) {
-         if (tableState == state) {
-           tableNameSet.add(tableName);
-           break;
-         }
-      }
-    }
-    return tableNameSet;
-  }
-
-  static boolean isTableState(final ZooKeeperProtos.Table.State expectedState,
-      final ZooKeeperProtos.Table.State currentState) {
-    return currentState != null && currentState.equals(expectedState);
-  }
-
-  /**
-   * @param zkw ZooKeeperWatcher instance to use
-   * @param tableName table we're checking
-   * @return {@link ZooKeeperProtos.Table.State} found in znode.
-   * @throws KeeperException
-   * @throws TableNotFoundException if tableName doesn't exist
-   */
-  static ZooKeeperProtos.Table.State getTableState(final ZooKeeperWatcher zkw,
-      final TableName tableName)
-      throws KeeperException, InterruptedException, TableNotFoundException {
-    String znode = ZKUtil.joinZNode(zkw.tableZNode, tableName.getNameAsString());
-    byte [] data = ZKUtil.getData(zkw, znode);
-    if (data == null || data.length <= 0) {
-      throw new TableNotFoundException(tableName);
-    }
-    try {
-      ProtobufUtil.expectPBMagicPrefix(data);
-      ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder();
-      int magicLen = ProtobufUtil.lengthOfPBMagic();
-      ProtobufUtil.mergeFrom(builder, data, magicLen, data.length - magicLen);
-      return builder.getState();
-    } catch (IOException e) {
-      KeeperException ke = new KeeperException.DataInconsistencyException();
-      ke.initCause(e);
-      throw ke;
-    } catch (DeserializationException e) {
-      throw ZKUtil.convert(e);
-    }
-  }
-}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
index b180fb9..be05054 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
@@ -138,6 +138,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
   // znode used for region transitioning and assignment
   public String assignmentZNode;
   // znode used for table disabling/enabling
+  @Deprecated
   public String tableZNode;
   // znode containing the unique cluster ID
   public String clusterIdZNode;
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
index 5d37ad7..21e3d85 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
@@ -475,11 +475,6 @@ public class TestAsyncProcess {
       }
 
       @Override
-      public boolean isTableOnlineState(TableName tableName, boolean enabled) throws IOException {
-        return false;
-      }
-
-      @Override
       public int getCurrentNrHRS() throws IOException {
         return 1;
       }
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
index f6968bc..06647ca 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
@@ -130,12 +130,6 @@ public class TestClientNoCluster extends Configured implements Tool {
     }
 
     @Override
-    public boolean isTableOnlineState(TableName tableName, boolean enabled)
-    throws IOException {
-      return enabled;
-    }
-
-    @Override
     public int getCurrentNrHRS() throws IOException {
       return 1;
     }
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateClientSideReader.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateClientSideReader.java
deleted file mode 100644
index e82d3b0..0000000
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateClientSideReader.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.zookeeper;
-
-import static org.junit.Assert.fail;
-
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
-
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.data.Stat;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.mockito.Mockito;
-
-@Category({SmallTests.class})
-public class TestZKTableStateClientSideReader {
-
-  @Test
-  public void test() throws Exception {
-    ZooKeeperWatcher zkw = Mockito.mock(ZooKeeperWatcher.class);
-    RecoverableZooKeeper rzk = Mockito.mock(RecoverableZooKeeper.class);
-    Mockito.doReturn(rzk).when(zkw).getRecoverableZooKeeper();
-    Mockito.doReturn(null).when(rzk).getData(Mockito.anyString(),
-        Mockito.any(Watcher.class), Mockito.any(Stat.class));
-    TableName table = TableName.valueOf("table-not-exists");
-    try {
-      ZKTableStateClientSideReader.getTableState(zkw, table);
-      fail("Shouldn't reach here");
-    } catch(TableNotFoundException e) {
-      // Expected Table not found exception
-    }
-  }
-}
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
index 82fcb61..f86370d 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
@@ -1975,6 +1975,1576 @@ public final class HBaseProtos {
     // @@protoc_insertion_point(class_scope:hbase.pb.TableSchema)
   }
 
+  public interface TableStateOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required .hbase.pb.TableState.State state = 1;
+    /**
+     * <code>required .hbase.pb.TableState.State state = 1;</code>
+     *
+     * <pre>
+     * This is the table's state.
+     * </pre>
+     */
+    boolean hasState();
+    /**
+     * <code>required .hbase.pb.TableState.State state = 1;</code>
+     *
+     * <pre>
+     * This is the table's state.
+     * </pre>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState();
+
+    // required .hbase.pb.TableName table = 2;
+    /**
+     * <code>required .hbase.pb.TableName table = 2;</code>
+     */
+    boolean hasTable();
+    /**
+     * <code>required .hbase.pb.TableName table = 2;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTable();
+    /**
+     * <code>required .hbase.pb.TableName table = 2;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableOrBuilder();
+
+    // optional uint64 timestamp = 3;
+    /**
+     * <code>optional uint64 timestamp = 3;</code>
+     */
+    boolean hasTimestamp();
+    /**
+     * <code>optional uint64 timestamp = 3;</code>
+     */
+    long getTimestamp();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.TableState}
+   *
+   * <pre>
+   ** Denotes state of the table 
+   * </pre>
+   */
+  public static final class TableState extends
+      com.google.protobuf.GeneratedMessage
+      implements TableStateOrBuilder {
+    // Use TableState.newBuilder() to construct.
+    private TableState(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private TableState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final TableState defaultInstance;
+    public static TableState getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public TableState getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private TableState(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 8: {
+              int rawValue = input.readEnum();
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State value = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.valueOf(rawValue);
+              if (value == null) {
+                unknownFields.mergeVarintField(1, rawValue);
+              } else {
+                bitField0_ |= 0x00000001;
+                state_ = value;
+              }
+              break;
+            }
+            case 18: {
+              org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000002) == 0x00000002)) {
+                subBuilder = table_.toBuilder();
+              }
+              table_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(table_);
+                table_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000002;
+              break;
+            }
+            case 24: {
+              bitField0_ |= 0x00000004;
+              timestamp_ = input.readUInt64();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableState_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableState_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<TableState> PARSER =
+        new com.google.protobuf.AbstractParser<TableState>() {
+      public TableState parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new TableState(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<TableState> getParserForType() {
+      return PARSER;
+    }
+
+    /**
+     * Protobuf enum {@code hbase.pb.TableState.State}
+     *
+     * <pre>
+     * Table's current state
+     * </pre>
+     */
+    public enum State
+        implements com.google.protobuf.ProtocolMessageEnum {
+      /**
+       * <code>ENABLED = 0;</code>
+       */
+      ENABLED(0, 0),
+      /**
+       * <code>DISABLED = 1;</code>
+       */
+      DISABLED(1, 1),
+      /**
+       * <code>DISABLING = 2;</code>
+       */
+      DISABLING(2, 2),
+      /**
+       * <code>ENABLING = 3;</code>
+       */
+      ENABLING(3, 3),
+      ;
+
+      /**
+       * <code>ENABLED = 0;</code>
+       */
+      public static final int ENABLED_VALUE = 0;
+      /**
+       * <code>DISABLED = 1;</code>
+       */
+      public static final int DISABLED_VALUE = 1;
+      /**
+       * <code>DISABLING = 2;</code>
+       */
+      public static final int DISABLING_VALUE = 2;
+      /**
+       * <code>ENABLING = 3;</code>
+       */
+      public static final int ENABLING_VALUE = 3;
+
+
+      public final int getNumber() { return value; }
+
+      public static State valueOf(int value) {
+        switch (value) {
+          case 0: return ENABLED;
+          case 1: return DISABLED;
+          case 2: return DISABLING;
+          case 3: return ENABLING;
+          default: return null;
+        }
+      }
+
+      public static com.google.protobuf.Internal.EnumLiteMap<State>
+          internalGetValueMap() {
+        return internalValueMap;
+      }
+      private static com.google.protobuf.Internal.EnumLiteMap<State>
+          internalValueMap =
+            new com.google.protobuf.Internal.EnumLiteMap<State>() {
+              public State findValueByNumber(int number) {
+                return State.valueOf(number);
+              }
+            };
+
+      public final com.google.protobuf.Descriptors.EnumValueDescriptor
+          getValueDescriptor() {
+        return getDescriptor().getValues().get(index);
+      }
+      public final com.google.protobuf.Descriptors.EnumDescriptor
+          getDescriptorForType() {
+        return getDescriptor();
+      }
+      public static final com.google.protobuf.Descriptors.EnumDescriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDescriptor().getEnumTypes().get(0);
+      }
+
+      private static final State[] VALUES = values();
+
+      public static State valueOf(
+          com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+        if (desc.getType() != getDescriptor()) {
+          throw new java.lang.IllegalArgumentException(
+            "EnumValueDescriptor is not for this type.");
+        }
+        return VALUES[desc.getIndex()];
+      }
+
+      private final int index;
+      private final int value;
+
+      private State(int index, int value) {
+        this.index = index;
+        this.value = value;
+      }
+
+      // @@protoc_insertion_point(enum_scope:hbase.pb.TableState.State)
+    }
+
+    private int bitField0_;
+    // required .hbase.pb.TableState.State state = 1;
+    public static final int STATE_FIELD_NUMBER = 1;
+    private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State state_;
+    /**
+     * <code>required .hbase.pb.TableState.State state = 1;</code>
+     *
+     * <pre>
+     * This is the table's state.
+     * </pre>
+     */
+    public boolean hasState() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required .hbase.pb.TableState.State state = 1;</code>
+     *
+     * <pre>
+     * This is the table's state.
+     * </pre>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() {
+      return state_;
+    }
+
+    // required .hbase.pb.TableName table = 2;
+    public static final int TABLE_FIELD_NUMBER = 2;
+    private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName table_;
+    /**
+     * <code>required .hbase.pb.TableName table = 2;</code>
+     */
+    public boolean hasTable() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>required .hbase.pb.TableName table = 2;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTable() {
+      return table_;
+    }
+    /**
+     * <code>required .hbase.pb.TableName table = 2;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableOrBuilder() {
+      return table_;
+    }
+
+    // optional uint64 timestamp = 3;
+    public static final int TIMESTAMP_FIELD_NUMBER = 3;
+    private long timestamp_;
+    /**
+     * <code>optional uint64 timestamp = 3;</code>
+     */
+    public boolean hasTimestamp() {
+      return ((bitField0_ & 0x00000004) == 0x00000004);
+    }
+    /**
+     * <code>optional uint64 timestamp = 3;</code>
+     */
+    public long getTimestamp() {
+      return timestamp_;
+    }
+
+    private void initFields() {
+      state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED;
+      table_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
+      timestamp_ = 0L;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasState()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasTable()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!getTable().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeEnum(1, state_.getNumber());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeMessage(2, table_);
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        output.writeUInt64(3, timestamp_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeEnumSize(1, state_.getNumber());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(2, table_);
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(3, timestamp_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState) obj;
+
+      boolean result = true;
+      result = result && (hasState() == other.hasState());
+      if (hasState()) {
+        result = result &&
+            (getState() == other.getState());
+      }
+      result = result && (hasTable() == other.hasTable());
+      if (hasTable()) {
+        result = result && getTable()
+            .equals(other.getTable());
+      }
+      result = result && (hasTimestamp() == other.hasTimestamp());
+      if (hasTimestamp()) {
+        result = result && (getTimestamp()
+            == other.getTimestamp());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasState()) {
+        hash = (37 * hash) + STATE_FIELD_NUMBER;
+        hash = (53 * hash) + hashEnum(getState());
+      }
+      if (hasTable()) {
+        hash = (37 * hash) + TABLE_FIELD_NUMBER;
+        hash = (53 * hash) + getTable().hashCode();
+      }
+      if (hasTimestamp()) {
+        hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getTimestamp());
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.TableState}
+     *
+     * <pre>
+     ** Denotes state of the table 
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableState_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableState_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getTableFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        if (tableBuilder_ == null) {
+          table_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
+        } else {
+          tableBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000002);
+        timestamp_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000004);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableState_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState build() {
+        org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.state_ = state_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        if (tableBuilder_ == null) {
+          result.table_ = table_;
+        } else {
+          result.table_ = tableBuilder_.build();
+        }
+        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+          to_bitField0_ |= 0x00000004;
+        }
+        result.timestamp_ = timestamp_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance()) return this;
+        if (other.hasState()) {
+          setState(other.getState());
+        }
+        if (other.hasTable()) {
+          mergeTable(other.getTable());
+        }
+        if (other.hasTimestamp()) {
+          setTimestamp(other.getTimestamp());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasState()) {
+          
+          return false;
+        }
+        if (!hasTable()) {
+          
+          return false;
+        }
+        if (!getTable().isInitialized()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required .hbase.pb.TableState.State state = 1;
+      private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED;
+      /**
+       * <code>required .hbase.pb.TableState.State state = 1;</code>
+       *
+       * <pre>
+       * This is the table's state.
+       * </pre>
+       */
+      public boolean hasState() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required .hbase.pb.TableState.State state = 1;</code>
+       *
+       * <pre>
+       * This is the table's state.
+       * </pre>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() {
+        return state_;
+      }
+      /**
+       * <code>required .hbase.pb.TableState.State state = 1;</code>
+       *
+       * <pre>
+       * This is the table's state.
+       * </pre>
+       */
+      public Builder setState(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State value) {
+        if (value == null) {
+          throw new NullPointerException();
+        }
+        bitField0_ |= 0x00000001;
+        state_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableState.State state = 1;</code>
+       *
+       * <pre>
+       * This is the table's state.
+       * </pre>
+       */
+      public Builder clearState() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED;
+        onChanged();
+        return this;
+      }
+
+      // required .hbase.pb.TableName table = 2;
+      private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName table_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> tableBuilder_;
+      /**
+       * <code>required .hbase.pb.TableName table = 2;</code>
+       */
+      public boolean hasTable() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>required .hbase.pb.TableName table = 2;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTable() {
+        if (tableBuilder_ == null) {
+          return table_;
+        } else {
+          return tableBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>required .hbase.pb.TableName table = 2;</code>
+       */
+      public Builder setTable(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) {
+        if (tableBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          table_ = value;
+          onChanged();
+        } else {
+          tableBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableName table = 2;</code>
+       */
+      public Builder setTable(
+          org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) {
+        if (tableBuilder_ == null) {
+          table_ = builderForValue.build();
+          onChanged();
+        } else {
+          tableBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableName table = 2;</code>
+       */
+      public Builder mergeTable(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) {
+        if (tableBuilder_ == null) {
+          if (((bitField0_ & 0x00000002) == 0x00000002) &&
+              table_ != org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()) {
+            table_ =
+              org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.newBuilder(table_).mergeFrom(value).buildPartial();
+          } else {
+            table_ = value;
+          }
+          onChanged();
+        } else {
+          tableBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableName table = 2;</code>
+       */
+      public Builder clearTable() {
+        if (tableBuilder_ == null) {
+          table_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
+          onChanged();
+        } else {
+          tableBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableName table = 2;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder getTableBuilder() {
+        bitField0_ |= 0x00000002;
+        onChanged();
+        return getTableFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>required .hbase.pb.TableName table = 2;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableOrBuilder() {
+        if (tableBuilder_ != null) {
+          return tableBuilder_.getMessageOrBuilder();
+        } else {
+          return table_;
+        }
+      }
+      /**
+       * <code>required .hbase.pb.TableName table = 2;</code>
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> 
+          getTableFieldBuilder() {
+        if (tableBuilder_ == null) {
+          tableBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>(
+                  table_,
+                  getParentForChildren(),
+                  isClean());
+          table_ = null;
+        }
+        return tableBuilder_;
+      }
+
+      // optional uint64 timestamp = 3;
+      private long timestamp_ ;
+      /**
+       * <code>optional uint64 timestamp = 3;</code>
+       */
+      public boolean hasTimestamp() {
+        return ((bitField0_ & 0x00000004) == 0x00000004);
+      }
+      /**
+       * <code>optional uint64 timestamp = 3;</code>
+       */
+      public long getTimestamp() {
+        return timestamp_;
+      }
+      /**
+       * <code>optional uint64 timestamp = 3;</code>
+       */
+      public Builder setTimestamp(long value) {
+        bitField0_ |= 0x00000004;
+        timestamp_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional uint64 timestamp = 3;</code>
+       */
+      public Builder clearTimestamp() {
+        bitField0_ = (bitField0_ & ~0x00000004);
+        timestamp_ = 0L;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.TableState)
+    }
+
+    static {
+      defaultInstance = new TableState(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.TableState)
+  }
+
+  public interface TableDescriptorOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required .hbase.pb.TableSchema schema = 1;
+    /**
+     * <code>required .hbase.pb.TableSchema schema = 1;</code>
+     */
+    boolean hasSchema();
+    /**
+     * <code>required .hbase.pb.TableSchema schema = 1;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getSchema();
+    /**
+     * <code>required .hbase.pb.TableSchema schema = 1;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getSchemaOrBuilder();
+
+    // optional .hbase.pb.TableState.State state = 2 [default = ENABLED];
+    /**
+     * <code>optional .hbase.pb.TableState.State state = 2 [default = ENABLED];</code>
+     */
+    boolean hasState();
+    /**
+     * <code>optional .hbase.pb.TableState.State state = 2 [default = ENABLED];</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.TableDescriptor}
+   *
+   * <pre>
+   ** On HDFS representation of table state. 
+   * </pre>
+   */
+  public static final class TableDescriptor extends
+      com.google.protobuf.GeneratedMessage
+      implements TableDescriptorOrBuilder {
+    // Use TableDescriptor.newBuilder() to construct.
+    private TableDescriptor(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private TableDescriptor(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final TableDescriptor defaultInstance;
+    public static TableDescriptor getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public TableDescriptor getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private TableDescriptor(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000001) == 0x00000001)) {
+                subBuilder = schema_.toBuilder();
+              }
+              schema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(schema_);
+                schema_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000001;
+              break;
+            }
+            case 16: {
+              int rawValue = input.readEnum();
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State value = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.valueOf(rawValue);
+              if (value == null) {
+                unknownFields.mergeVarintField(2, rawValue);
+              } else {
+                bitField0_ |= 0x00000002;
+                state_ = value;
+              }
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<TableDescriptor> PARSER =
+        new com.google.protobuf.AbstractParser<TableDescriptor>() {
+      public TableDescriptor parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new TableDescriptor(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<TableDescriptor> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required .hbase.pb.TableSchema schema = 1;
+    public static final int SCHEMA_FIELD_NUMBER = 1;
+    private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema schema_;
+    /**
+     * <code>required .hbase.pb.TableSchema schema = 1;</code>
+     */
+    public boolean hasSchema() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required .hbase.pb.TableSchema schema = 1;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getSchema() {
+      return schema_;
+    }
+    /**
+     * <code>required .hbase.pb.TableSchema schema = 1;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getSchemaOrBuilder() {
+      return schema_;
+    }
+
+    // optional .hbase.pb.TableState.State state = 2 [default = ENABLED];
+    public static final int STATE_FIELD_NUMBER = 2;
+    private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State state_;
+    /**
+     * <code>optional .hbase.pb.TableState.State state = 2 [default = ENABLED];</code>
+     */
+    public boolean hasState() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>optional .hbase.pb.TableState.State state = 2 [default = ENABLED];</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() {
+      return state_;
+    }
+
+    private void initFields() {
+      schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+      state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasSchema()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!getSchema().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeMessage(1, schema_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeEnum(2, state_.getNumber());
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, schema_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeEnumSize(2, state_.getNumber());
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor) obj;
+
+      boolean result = true;
+      result = result && (hasSchema() == other.hasSchema());
+      if (hasSchema()) {
+        result = result && getSchema()
+            .equals(other.getSchema());
+      }
+      result = result && (hasState() == other.hasState());
+      if (hasState()) {
+        result = result &&
+            (getState() == other.getState());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasSchema()) {
+        hash = (37 * hash) + SCHEMA_FIELD_NUMBER;
+        hash = (53 * hash) + getSchema().hashCode();
+      }
+      if (hasState()) {
+        hash = (37 * hash) + STATE_FIELD_NUMBER;
+        hash = (53 * hash) + hashEnum(getState());
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.TableDescriptor}
+     *
+     * <pre>
+     ** On HDFS representation of table state. 
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptorOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getSchemaFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        if (schemaBuilder_ == null) {
+          schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+        } else {
+          schemaBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED;
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor build() {
+        org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        if (schemaBuilder_ == null) {
+          result.schema_ = schema_;
+        } else {
+          result.schema_ = schemaBuilder_.build();
+        }
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.state_ = state_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.getDefaultInstance()) return this;
+        if (other.hasSchema()) {
+          mergeSchema(other.getSchema());
+        }
+        if (other.hasState()) {
+          setState(other.getState());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasSchema()) {
+          
+          return false;
+        }
+        if (!getSchema().isInitialized()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required .hbase.pb.TableSchema schema = 1;
+      private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> schemaBuilder_;
+      /**
+       * <code>required .hbase.pb.TableSchema schema = 1;</code>
+       */
+      public boolean hasSchema() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required .hbase.pb.TableSchema schema = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getSchema() {
+        if (schemaBuilder_ == null) {
+          return schema_;
+        } else {
+          return schemaBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>required .hbase.pb.TableSchema schema = 1;</code>
+       */
+      public Builder setSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+        if (schemaBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          schema_ = value;
+          onChanged();
+        } else {
+          schemaBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableSchema schema = 1;</code>
+       */
+      public Builder setSchema(
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) {
+        if (schemaBuilder_ == null) {
+          schema_ = builderForValue.build();
+          onChanged();
+        } else {
+          schemaBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableSchema schema = 1;</code>
+       */
+      public Builder mergeSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+        if (schemaBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001) &&
+              schema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) {
+            schema_ =
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(schema_).mergeFrom(value).buildPartial();
+          } else {
+            schema_ = value;
+          }
+          onChanged();
+        } else {
+          schemaBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableSchema schema = 1;</code>
+       */
+      public Builder clearSchema() {
+        if (schemaBuilder_ == null) {
+          schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+          onChanged();
+        } else {
+          schemaBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableSchema schema = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getSchemaBuilder() {
+        bitField0_ |= 0x00000001;
+        onChanged();
+        return getSchemaFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>required .hbase.pb.TableSchema schema = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getSchemaOrBuilder() {
+        if (schemaBuilder_ != null) {
+          return schemaBuilder_.getMessageOrBuilder();
+        } else {
+          return schema_;
+        }
+      }
+      /**
+       * <code>required .hbase.pb.TableSchema schema = 1;</code>
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> 
+          getSchemaFieldBuilder() {
+        if (schemaBuilder_ == null) {
+          schemaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>(
+                  schema_,
+                  getParentForChildren(),
+                  isClean());
+          schema_ = null;
+        }
+        return schemaBuilder_;
+      }
+
+      // optional .hbase.pb.TableState.State state = 2 [default = ENABLED];
+      private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED;
+      /**
+       * <code>optional .hbase.pb.TableState.State state = 2 [default = ENABLED];</code>
+       */
+      public boolean hasState() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>optional .hbase.pb.TableState.State state = 2 [default = ENABLED];</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() {
+        return state_;
+      }
+      /**
+       * <code>optional .hbase.pb.TableState.State state = 2 [default = ENABLED];</code>
+       */
+      public Builder setState(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State value) {
+        if (value == null) {
+          throw new NullPointerException();
+        }
+        bitField0_ |= 0x00000002;
+        state_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.TableState.State state = 2 [default = ENABLED];</code>
+       */
+      public Builder clearState() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.TableDescriptor)
+    }
+
+    static {
+      defaultInstance = new TableDescriptor(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.TableDescriptor)
+  }
+
   public interface ColumnFamilySchemaOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
 
@@ -19558,6 +21128,16 @@ public final class HBaseProtos {
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_hbase_pb_TableSchema_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_TableState_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_TableState_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_TableDescriptor_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_TableDescriptor_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_ColumnFamilySchema_descriptor;
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -19687,64 +21267,71 @@ public final class HBaseProtos {
       "leName\022,\n\nattributes\030\002 \003(\0132\030.hbase.pb.By" +
       "tesBytesPair\0225\n\017column_families\030\003 \003(\0132\034." +
       "hbase.pb.ColumnFamilySchema\022/\n\rconfigura" +
-      "tion\030\004 \003(\0132\030.hbase.pb.NameStringPair\"\201\001\n" +
-      "\022ColumnFamilySchema\022\014\n\004name\030\001 \002(\014\022,\n\natt" +
-      "ributes\030\002 \003(\0132\030.hbase.pb.BytesBytesPair\022" +
-      "/\n\rconfiguration\030\003 \003(\0132\030.hbase.pb.NameSt",
-      "ringPair\"\243\001\n\nRegionInfo\022\021\n\tregion_id\030\001 \002" +
-      "(\004\022\'\n\ntable_name\030\002 \002(\0132\023.hbase.pb.TableN" +
-      "ame\022\021\n\tstart_key\030\003 \001(\014\022\017\n\007end_key\030\004 \001(\014\022" +
-      "\017\n\007offline\030\005 \001(\010\022\r\n\005split\030\006 \001(\010\022\025\n\nrepli" +
-      "ca_id\030\007 \001(\005:\0010\":\n\014FavoredNodes\022*\n\014favore" +
-      "d_node\030\001 \003(\0132\024.hbase.pb.ServerName\"\236\001\n\017R" +
-      "egionSpecifier\022;\n\004type\030\001 \002(\0162-.hbase.pb." +
-      "RegionSpecifier.RegionSpecifierType\022\r\n\005v" +
-      "alue\030\002 \002(\014\"?\n\023RegionSpecifierType\022\017\n\013REG" +
-      "ION_NAME\020\001\022\027\n\023ENCODED_REGION_NAME\020\002\"%\n\tT",
-      "imeRange\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 \001(\004\"W\n\025Co" +
-      "lumnFamilyTimeRange\022\025\n\rcolumn_family\030\001 \002" +
-      "(\014\022\'\n\ntime_range\030\002 \002(\0132\023.hbase.pb.TimeRa" +
-      "nge\"A\n\nServerName\022\021\n\thost_name\030\001 \002(\t\022\014\n\004" +
-      "port\030\002 \001(\r\022\022\n\nstart_code\030\003 \001(\004\"\033\n\013Coproc" +
-      "essor\022\014\n\004name\030\001 \002(\t\"-\n\016NameStringPair\022\014\n" +
-      "\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\rNameBytesP" +
-      "air\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(\014\"/\n\016Byte" +
-      "sBytesPair\022\r\n\005first\030\001 \002(\014\022\016\n\006second\030\002 \002(" +
-      "\014\",\n\rNameInt64Pair\022\014\n\004name\030\001 \001(\t\022\r\n\005valu",
-      "e\030\002 \001(\003\"\206\001\n\024ProcedureDescription\022\021\n\tsign" +
-      "ature\030\001 \002(\t\022\020\n\010instance\030\002 \001(\t\022\030\n\rcreatio" +
-      "n_time\030\003 \001(\003:\0010\022/\n\rconfiguration\030\004 \003(\0132\030" +
-      ".hbase.pb.NameStringPair\"\n\n\010EmptyMsg\"\033\n\007" +
-      "LongMsg\022\020\n\010long_msg\030\001 \002(\003\"\037\n\tDoubleMsg\022\022" +
-      "\n\ndouble_msg\030\001 \002(\001\"\'\n\rBigDecimalMsg\022\026\n\016b" +
-      "igdecimal_msg\030\001 \002(\014\"5\n\004UUID\022\026\n\016least_sig" +
-      "_bits\030\001 \002(\004\022\025\n\rmost_sig_bits\030\002 \002(\004\"T\n\023Na" +
-      "mespaceDescriptor\022\014\n\004name\030\001 \002(\014\022/\n\rconfi" +
-      "guration\030\002 \003(\0132\030.hbase.pb.NameStringPair",
-      "\"\235\001\n\013VersionInfo\022\017\n\007version\030\001 \002(\t\022\013\n\003url" +
-      "\030\002 \002(\t\022\020\n\010revision\030\003 \002(\t\022\014\n\004user\030\004 \002(\t\022\014" +
-      "\n\004date\030\005 \002(\t\022\024\n\014src_checksum\030\006 \002(\t\022\025\n\rve" +
-      "rsion_major\030\007 \001(\r\022\025\n\rversion_minor\030\010 \001(\r" +
-      "\"Q\n\020RegionServerInfo\022\020\n\010infoPort\030\001 \001(\005\022+" +
-      "\n\014version_info\030\002 \001(\0132\025.hbase.pb.VersionI" +
-      "nfo\"\243\002\n\023SnapshotDescription\022\014\n\004name\030\001 \002(" +
-      "\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcreation_time\030\003 \001(\003:" +
-      "\0010\0227\n\004type\030\004 \001(\0162\".hbase.pb.SnapshotDesc" +
-      "ription.Type:\005FLUSH\022\017\n\007version\030\005 \001(\005\022\r\n\005",
-      "owner\030\006 \001(\t\022<\n\025users_and_permissions\030\007 \001" +
-      "(\0132\035.hbase.pb.UsersAndPermissions\022\016\n\003ttl" +
-      "\030\010 \001(\003:\0010\".\n\004Type\022\014\n\010DISABLED\020\000\022\t\n\005FLUSH" +
-      "\020\001\022\r\n\tSKIPFLUSH\020\002\"w\n\016RegionLocation\022)\n\013r" +
-      "egion_info\030\001 \002(\0132\024.hbase.pb.RegionInfo\022)" +
-      "\n\013server_name\030\002 \001(\0132\024.hbase.pb.ServerNam" +
-      "e\022\017\n\007seq_num\030\003 \002(\003*r\n\013CompareType\022\010\n\004LES" +
-      "S\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT" +
-      "_EQUAL\020\003\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007GREATE" +
-      "R\020\005\022\t\n\005NO_OP\020\006*n\n\010TimeUnit\022\017\n\013NANOSECOND",
-      "S\020\001\022\020\n\014MICROSECONDS\020\002\022\020\n\014MILLISECONDS\020\003\022" +
-      "\013\n\007SECONDS\020\004\022\013\n\007MINUTES\020\005\022\t\n\005HOURS\020\006\022\010\n\004" +
-      "DAYS\020\007B>\n*org.apache.hadoop.hbase.protob" +
-      "uf.generatedB\013HBaseProtosH\001\240\001\001"
+      "tion\030\004 \003(\0132\030.hbase.pb.NameStringPair\"\257\001\n" +
+      "\nTableState\022)\n\005state\030\001 \002(\0162\032.hbase.pb.Ta" +
+      "bleState.State\022\"\n\005table\030\002 \002(\0132\023.hbase.pb" +
+      ".TableName\022\021\n\ttimestamp\030\003 \001(\004\"?\n\005State\022\013",
+      "\n\007ENABLED\020\000\022\014\n\010DISABLED\020\001\022\r\n\tDISABLING\020\002" +
+      "\022\014\n\010ENABLING\020\003\"l\n\017TableDescriptor\022%\n\006sch" +
+      "ema\030\001 \002(\0132\025.hbase.pb.TableSchema\0222\n\005stat" +
+      "e\030\002 \001(\0162\032.hbase.pb.TableState.State:\007ENA" +
+      "BLED\"\201\001\n\022ColumnFamilySchema\022\014\n\004name\030\001 \002(" +
+      "\014\022,\n\nattributes\030\002 \003(\0132\030.hbase.pb.BytesBy" +
+      "tesPair\022/\n\rconfiguration\030\003 \003(\0132\030.hbase.p" +
+      "b.NameStringPair\"\243\001\n\nRegionInfo\022\021\n\tregio" +
+      "n_id\030\001 \002(\004\022\'\n\ntable_name\030\002 \002(\0132\023.hbase.p" +
+      "b.TableName\022\021\n\tstart_key\030\003 \001(\014\022\017\n\007end_ke",
+      "y\030\004 \001(\014\022\017\n\007offline\030\005 \001(\010\022\r\n\005split\030\006 \001(\010\022" +
+      "\025\n\nreplica_id\030\007 \001(\005:\0010\":\n\014FavoredNodes\022*" +
+      "\n\014favored_node\030\001 \003(\0132\024.hbase.pb.ServerNa" +
+      "me\"\236\001\n\017RegionSpecifier\022;\n\004type\030\001 \002(\0162-.h" +
+      "base.pb.RegionSpecifier.RegionSpecifierT" +
+      "ype\022\r\n\005value\030\002 \002(\014\"?\n\023RegionSpecifierTyp" +
+      "e\022\017\n\013REGION_NAME\020\001\022\027\n\023ENCODED_REGION_NAM" +
+      "E\020\002\"%\n\tTimeRange\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 \001" +
+      "(\004\"W\n\025ColumnFamilyTimeRange\022\025\n\rcolumn_fa" +
+      "mily\030\001 \002(\014\022\'\n\ntime_range\030\002 \002(\0132\023.hbase.p",
+      "b.TimeRange\"A\n\nServerName\022\021\n\thost_name\030\001" +
+      " \002(\t\022\014\n\004port\030\002 \001(\r\022\022\n\nstart_code\030\003 \001(\004\"\033" +
+      "\n\013Coprocessor\022\014\n\004name\030\001 \002(\t\"-\n\016NameStrin" +
+      "gPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\rNa" +
+      "meBytesPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(\014" +
+      "\"/\n\016BytesBytesPair\022\r\n\005first\030\001 \002(\014\022\016\n\006sec" +
+      "ond\030\002 \002(\014\",\n\rNameInt64Pair\022\014\n\004name\030\001 \001(\t" +
+      "\022\r\n\005value\030\002 \001(\003\"\206\001\n\024ProcedureDescription" +
+      "\022\021\n\tsignature\030\001 \002(\t\022\020\n\010instance\030\002 \001(\t\022\030\n" +
+      "\rcreation_time\030\003 \001(\003:\0010\022/\n\rconfiguration",
+      "\030\004 \003(\0132\030.hbase.pb.NameStringPair\"\n\n\010Empt" +
+      "yMsg\"\033\n\007LongMsg\022\020\n\010long_msg\030\001 \002(\003\"\037\n\tDou" +
+      "bleMsg\022\022\n\ndouble_msg\030\001 \002(\001\"\'\n\rBigDecimal" +
+      "Msg\022\026\n\016bigdecimal_msg\030\001 \002(\014\"5\n\004UUID\022\026\n\016l" +
+      "east_sig_bits\030\001 \002(\004\022\025\n\rmost_sig_bits\030\002 \002" +
+      "(\004\"T\n\023NamespaceDescriptor\022\014\n\004name\030\001 \002(\014\022" +
+      "/\n\rconfiguration\030\002 \003(\0132\030.hbase.pb.NameSt" +
+      "ringPair\"\235\001\n\013VersionInfo\022\017\n\007version\030\001 \002(" +
+      "\t\022\013\n\003url\030\002 \002(\t\022\020\n\010revision\030\003 \002(\t\022\014\n\004user" +
+      "\030\004 \002(\t\022\014\n\004date\030\005 \002(\t\022\024\n\014src_checksum\030\006 \002",
+      "(\t\022\025\n\rversion_major\030\007 \001(\r\022\025\n\rversion_min" +
+      "or\030\010 \001(\r\"Q\n\020RegionServerInfo\022\020\n\010infoPort" +
+      "\030\001 \001(\005\022+\n\014version_info\030\002 \001(\0132\025.hbase.pb." +
+      "VersionInfo\"\243\002\n\023SnapshotDescription\022\014\n\004n" +
+      "ame\030\001 \002(\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcreation_tim" +
+      "e\030\003 \001(\003:\0010\0227\n\004type\030\004 \001(\0162\".hbase.pb.Snap" +
+      "shotDescription.Type:\005FLUSH\022\017\n\007version\030\005" +
+      " \001(\005\022\r\n\005owner\030\006 \001(\t\022<\n\025users_and_permiss" +
+      "ions\030\007 \001(\0132\035.hbase.pb.UsersAndPermission" +
+      "s\022\016\n\003ttl\030\010 \001(\003:\0010\".\n\004Type\022\014\n\010DISABLED\020\000\022",
+      "\t\n\005FLUSH\020\001\022\r\n\tSKIPFLUSH\020\002\"w\n\016RegionLocat" +
+      "ion\022)\n\013region_info\030\001 \002(\0132\024.hbase.pb.Regi" +
+      "onInfo\022)\n\013server_name\030\002 \001(\0132\024.hbase.pb.S" +
+      "erverName\022\017\n\007seq_num\030\003 \002(\003*r\n\013CompareTyp" +
+      "e\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020" +
+      "\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_OR_EQUAL\020\004\022\013" +
+      "\n\007GREATER\020\005\022\t\n\005NO_OP\020\006*n\n\010TimeUnit\022\017\n\013NA" +
+      "NOSECONDS\020\001\022\020\n\014MICROSECONDS\020\002\022\020\n\014MILLISE" +
+      "CONDS\020\003\022\013\n\007SECONDS\020\004\022\013\n\007MINUTES\020\005\022\t\n\005HOU" +
+      "RS\020\006\022\010\n\004DAYS\020\007B>\n*org.apache.hadoop.hbas",
+      "e.protobuf.generatedB\013HBaseProtosH\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -19757,140 +21344,152 @@ public final class HBaseProtos {
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_TableSchema_descriptor,
               new java.lang.String[] { "TableName", "Attributes", "ColumnFamilies", "Configuration", });
-          internal_static_hbase_pb_ColumnFamilySchema_descriptor =
+          internal_static_hbase_pb_TableState_descriptor =
             getDescriptor().getMessageTypes().get(1);
+          internal_static_hbase_pb_TableState_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_TableState_descriptor,
+              new java.lang.String[] { "State", "Table", "Timestamp", });
+          internal_static_hbase_pb_TableDescriptor_descriptor =
+            getDescriptor().getMessageTypes().get(2);
+          internal_static_hbase_pb_TableDescriptor_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_TableDescriptor_descriptor,
+              new java.lang.String[] { "Schema", "State", });
+          internal_static_hbase_pb_ColumnFamilySchema_descriptor =
+            getDescriptor().getMessageTypes().get(3);
           internal_static_hbase_pb_ColumnFamilySchema_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ColumnFamilySchema_descriptor,
               new java.lang.String[] { "Name", "Attributes", "Configuration", });
           internal_static_hbase_pb_RegionInfo_descriptor =
-            getDescriptor().getMessageTypes().get(2);
+            getDescriptor().getMessageTypes().get(4);
           internal_static_hbase_pb_RegionInfo_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_RegionInfo_descriptor,
               new java.lang.String[] { "RegionId", "TableName", "StartKey", "EndKey", "Offline", "Split", "ReplicaId", });
           internal_static_hbase_pb_FavoredNodes_descriptor =
-            getDescriptor().getMessageTypes().get(3);
+            getDescriptor().getMessageTypes().get(5);
           internal_static_hbase_pb_FavoredNodes_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_FavoredNodes_descriptor,
               new java.lang.String[] { "FavoredNode", });
           internal_static_hbase_pb_RegionSpecifier_descriptor =
-            getDescriptor().getMessageTypes().get(4);
+            getDescriptor().getMessageTypes().get(6);
           internal_static_hbase_pb_RegionSpecifier_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_RegionSpecifier_descriptor,
               new java.lang.String[] { "Type", "Value", });
           internal_static_hbase_pb_TimeRange_descriptor =
-            getDescriptor().getMessageTypes().get(5);
+            getDescriptor().getMessageTypes().get(7);
           internal_static_hbase_pb_TimeRange_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_TimeRange_descriptor,
               new java.lang.String[] { "From", "To", });
           internal_static_hbase_pb_ColumnFamilyTimeRange_descriptor =
-            getDescriptor().getMessageTypes().get(6);
+            getDescriptor().getMessageTypes().get(8);
           internal_static_hbase_pb_ColumnFamilyTimeRange_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ColumnFamilyTimeRange_descriptor,
               new java.lang.String[] { "ColumnFamily", "TimeRange", });
           internal_static_hbase_pb_ServerName_descriptor =
-            getDescriptor().getMessageTypes().get(7);
+            getDescriptor().getMessageTypes().get(9);
           internal_static_hbase_pb_ServerName_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ServerName_descriptor,
               new java.lang.String[] { "HostName", "Port", "StartCode", });
           internal_static_hbase_pb_Coprocessor_descriptor =
-            getDescriptor().getMessageTypes().get(8);
+            getDescriptor().getMessageTypes().get(10);
           internal_static_hbase_pb_Coprocessor_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_Coprocessor_descriptor,
               new java.lang.String[] { "Name", });
           internal_static_hbase_pb_NameStringPair_descriptor =
-            getDescriptor().getMessageTypes().get(9);
+            getDescriptor().getMessageTypes().get(11);
           internal_static_hbase_pb_NameStringPair_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_NameStringPair_descriptor,
               new java.lang.String[] { "Name", "Value", });
           internal_static_hbase_pb_NameBytesPair_descriptor =
-            getDescriptor().getMessageTypes().get(10);
+            getDescriptor().getMessageTypes().get(12);
           internal_static_hbase_pb_NameBytesPair_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_NameBytesPair_descriptor,
               new java.lang.String[] { "Name", "Value", });
           internal_static_hbase_pb_BytesBytesPair_descriptor =
-            getDescriptor().getMessageTypes().get(11);
+            getDescriptor().getMessageTypes().get(13);
           internal_static_hbase_pb_BytesBytesPair_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_BytesBytesPair_descriptor,
               new java.lang.String[] { "First", "Second", });
           internal_static_hbase_pb_NameInt64Pair_descriptor =
-            getDescriptor().getMessageTypes().get(12);
+            getDescriptor().getMessageTypes().get(14);
           internal_static_hbase_pb_NameInt64Pair_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_NameInt64Pair_descriptor,
               new java.lang.String[] { "Name", "Value", });
           internal_static_hbase_pb_ProcedureDescription_descriptor =
-            getDescriptor().getMessageTypes().get(13);
+            getDescriptor().getMessageTypes().get(15);
           internal_static_hbase_pb_ProcedureDescription_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ProcedureDescription_descriptor,
               new java.lang.String[] { "Signature", "Instance", "CreationTime", "Configuration", });
           internal_static_hbase_pb_EmptyMsg_descriptor =
-            getDescriptor().getMessageTypes().get(14);
+            getDescriptor().getMessageTypes().get(16);
           internal_static_hbase_pb_EmptyMsg_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_EmptyMsg_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_LongMsg_descriptor =
-            getDescriptor().getMessageTypes().get(15);
+            getDescriptor().getMessageTypes().get(17);
           internal_static_hbase_pb_LongMsg_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_LongMsg_descriptor,
               new java.lang.String[] { "LongMsg", });
           internal_static_hbase_pb_DoubleMsg_descriptor =
-            getDescriptor().getMessageTypes().get(16);
+            getDescriptor().getMessageTypes().get(18);
           internal_static_hbase_pb_DoubleMsg_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_DoubleMsg_descriptor,
               new java.lang.String[] { "DoubleMsg", });
           internal_static_hbase_pb_BigDecimalMsg_descriptor =
-            getDescriptor().getMessageTypes().get(17);
+            getDescriptor().getMessageTypes().get(19);
           internal_static_hbase_pb_BigDecimalMsg_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_BigDecimalMsg_descriptor,
               new java.lang.String[] { "BigdecimalMsg", });
           internal_static_hbase_pb_UUID_descriptor =
-            getDescriptor().getMessageTypes().get(18);
+            getDescriptor().getMessageTypes().get(20);
           internal_static_hbase_pb_UUID_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_UUID_descriptor,
               new java.lang.String[] { "LeastSigBits", "MostSigBits", });
           internal_static_hbase_pb_NamespaceDescriptor_descriptor =
-            getDescriptor().getMessageTypes().get(19);
+            getDescriptor().getMessageTypes().get(21);
           internal_static_hbase_pb_NamespaceDescriptor_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_NamespaceDescriptor_descriptor,
               new java.lang.String[] { "Name", "Configuration", });
           internal_static_hbase_pb_VersionInfo_descriptor =
-            getDescriptor().getMessageTypes().get(20);
+            getDescriptor().getMessageTypes().get(22);
           internal_static_hbase_pb_VersionInfo_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_VersionInfo_descriptor,
               new java.lang.String[] { "Version", "Url", "Revision", "User", "Date", "SrcChecksum", "VersionMajor", "VersionMinor", });
           internal_static_hbase_pb_RegionServerInfo_descriptor =
-            getDescriptor().getMessageTypes().get(21);
+            getDescriptor().getMessageTypes().get(23);
           internal_static_hbase_pb_RegionServerInfo_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_RegionServerInfo_descriptor,
               new java.lang.String[] { "InfoPort", "VersionInfo", });
           internal_static_hbase_pb_SnapshotDescription_descriptor =
-            getDescriptor().getMessageTypes().get(22);
+            getDescriptor().getMessageTypes().get(24);
           internal_static_hbase_pb_SnapshotDescription_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SnapshotDescription_descriptor,
               new java.lang.String[] { "Name", "Table", "CreationTime", "Type", "Version", "Owner", "UsersAndPermissions", "Ttl", });
           internal_static_hbase_pb_RegionLocation_descriptor =
-            getDescriptor().getMessageTypes().get(23);
+            getDescriptor().getMessageTypes().get(25);
           internal_static_hbase_pb_RegionLocation_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_RegionLocation_descriptor,
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index 76cbbe9..87b780b 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -49724,6 +49724,1128 @@ public final class MasterProtos {
     // @@protoc_insertion_point(class_scope:hbase.pb.GetTableNamesResponse)
   }
 
+  public interface GetTableStateRequestOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required .hbase.pb.TableName table_name = 1;
+    /**
+     * <code>required .hbase.pb.TableName table_name = 1;</code>
+     */
+    boolean hasTableName();
+    /**
+     * <code>required .hbase.pb.TableName table_name = 1;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName();
+    /**
+     * <code>required .hbase.pb.TableName table_name = 1;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.GetTableStateRequest}
+   */
+  public static final class GetTableStateRequest extends
+      com.google.protobuf.GeneratedMessage
+      implements GetTableStateRequestOrBuilder {
+    // Use GetTableStateRequest.newBuilder() to construct.
+    private GetTableStateRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private GetTableStateRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final GetTableStateRequest defaultInstance;
+    public static GetTableStateRequest getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public GetTableStateRequest getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private GetTableStateRequest(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000001) == 0x00000001)) {
+                subBuilder = tableName_.toBuilder();
+              }
+              tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(tableName_);
+                tableName_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000001;
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateRequest_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateRequest_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<GetTableStateRequest> PARSER =
+        new com.google.protobuf.AbstractParser<GetTableStateRequest>() {
+      public GetTableStateRequest parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new GetTableStateRequest(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<GetTableStateRequest> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required .hbase.pb.TableName table_name = 1;
+    public static final int TABLE_NAME_FIELD_NUMBER = 1;
+    private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_;
+    /**
+     * <code>required .hbase.pb.TableName table_name = 1;</code>
+     */
+    public boolean hasTableName() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required .hbase.pb.TableName table_name = 1;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() {
+      return tableName_;
+    }
+    /**
+     * <code>required .hbase.pb.TableName table_name = 1;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() {
+      return tableName_;
+    }
+
+    private void initFields() {
+      tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasTableName()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!getTableName().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeMessage(1, tableName_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, tableName_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest) obj;
+
+      boolean result = true;
+      result = result && (hasTableName() == other.hasTableName());
+      if (hasTableName()) {
+        result = result && getTableName()
+            .equals(other.getTableName());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasTableName()) {
+        hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
+        hash = (53 * hash) + getTableName().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.GetTableStateRequest}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequestOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateRequest_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateRequest_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getTableNameFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        if (tableNameBuilder_ == null) {
+          tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
+        } else {
+          tableNameBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateRequest_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest build() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        if (tableNameBuilder_ == null) {
+          result.tableName_ = tableName_;
+        } else {
+          result.tableName_ = tableNameBuilder_.build();
+        }
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance()) return this;
+        if (other.hasTableName()) {
+          mergeTableName(other.getTableName());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasTableName()) {
+          
+          return false;
+        }
+        if (!getTableName().isInitialized()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required .hbase.pb.TableName table_name = 1;
+      private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> tableNameBuilder_;
+      /**
+       * <code>required .hbase.pb.TableName table_name = 1;</code>
+       */
+      public boolean hasTableName() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required .hbase.pb.TableName table_name = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() {
+        if (tableNameBuilder_ == null) {
+          return tableName_;
+        } else {
+          return tableNameBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>required .hbase.pb.TableName table_name = 1;</code>
+       */
+      public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) {
+        if (tableNameBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          tableName_ = value;
+          onChanged();
+        } else {
+          tableNameBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableName table_name = 1;</code>
+       */
+      public Builder setTableName(
+          org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) {
+        if (tableNameBuilder_ == null) {
+          tableName_ = builderForValue.build();
+          onChanged();
+        } else {
+          tableNameBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableName table_name = 1;</code>
+       */
+      public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) {
+        if (tableNameBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001) &&
+              tableName_ != org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()) {
+            tableName_ =
+              org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
+          } else {
+            tableName_ = value;
+          }
+          onChanged();
+        } else {
+          tableNameBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableName table_name = 1;</code>
+       */
+      public Builder clearTableName() {
+        if (tableNameBuilder_ == null) {
+          tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
+          onChanged();
+        } else {
+          tableNameBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableName table_name = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder getTableNameBuilder() {
+        bitField0_ |= 0x00000001;
+        onChanged();
+        return getTableNameFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>required .hbase.pb.TableName table_name = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() {
+        if (tableNameBuilder_ != null) {
+          return tableNameBuilder_.getMessageOrBuilder();
+        } else {
+          return tableName_;
+        }
+      }
+      /**
+       * <code>required .hbase.pb.TableName table_name = 1;</code>
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> 
+          getTableNameFieldBuilder() {
+        if (tableNameBuilder_ == null) {
+          tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>(
+                  tableName_,
+                  getParentForChildren(),
+                  isClean());
+          tableName_ = null;
+        }
+        return tableNameBuilder_;
+      }
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.GetTableStateRequest)
+    }
+
+    static {
+      defaultInstance = new GetTableStateRequest(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.GetTableStateRequest)
+  }
+
+  public interface GetTableStateResponseOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required .hbase.pb.TableState table_state = 1;
+    /**
+     * <code>required .hbase.pb.TableState table_state = 1;</code>
+     */
+    boolean hasTableState();
+    /**
+     * <code>required .hbase.pb.TableState table_state = 1;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState getTableState();
+    /**
+     * <code>required .hbase.pb.TableState table_state = 1;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder getTableStateOrBuilder();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.GetTableStateResponse}
+   */
+  public static final class GetTableStateResponse extends
+      com.google.protobuf.GeneratedMessage
+      implements GetTableStateResponseOrBuilder {
+    // Use GetTableStateResponse.newBuilder() to construct.
+    private GetTableStateResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private GetTableStateResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final GetTableStateResponse defaultInstance;
+    public static GetTableStateResponse getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public GetTableStateResponse getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private GetTableStateResponse(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000001) == 0x00000001)) {
+                subBuilder = tableState_.toBuilder();
+              }
+              tableState_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(tableState_);
+                tableState_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000001;
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateResponse_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateResponse_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<GetTableStateResponse> PARSER =
+        new com.google.protobuf.AbstractParser<GetTableStateResponse>() {
+      public GetTableStateResponse parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new GetTableStateResponse(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<GetTableStateResponse> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required .hbase.pb.TableState table_state = 1;
+    public static final int TABLE_STATE_FIELD_NUMBER = 1;
+    private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState tableState_;
+    /**
+     * <code>required .hbase.pb.TableState table_state = 1;</code>
+     */
+    public boolean hasTableState() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required .hbase.pb.TableState table_state = 1;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState getTableState() {
+      return tableState_;
+    }
+    /**
+     * <code>required .hbase.pb.TableState table_state = 1;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder getTableStateOrBuilder() {
+      return tableState_;
+    }
+
+    private void initFields() {
+      tableState_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance();
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasTableState()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!getTableState().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeMessage(1, tableState_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, tableState_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) obj;
+
+      boolean result = true;
+      result = result && (hasTableState() == other.hasTableState());
+      if (hasTableState()) {
+        result = result && getTableState()
+            .equals(other.getTableState());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasTableState()) {
+        hash = (37 * hash) + TABLE_STATE_FIELD_NUMBER;
+        hash = (53 * hash) + getTableState().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.GetTableStateResponse}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponseOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateResponse_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateResponse_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getTableStateFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        if (tableStateBuilder_ == null) {
+          tableState_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance();
+        } else {
+          tableStateBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateResponse_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse build() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        if (tableStateBuilder_ == null) {
+          result.tableState_ = tableState_;
+        } else {
+          result.tableState_ = tableStateBuilder_.build();
+        }
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance()) return this;
+        if (other.hasTableState()) {
+          mergeTableState(other.getTableState());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasTableState()) {
+          
+          return false;
+        }
+        if (!getTableState().isInitialized()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required .hbase.pb.TableState table_state = 1;
+      private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState tableState_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder> tableStateBuilder_;
+      /**
+       * <code>required .hbase.pb.TableState table_state = 1;</code>
+       */
+      public boolean hasTableState() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required .hbase.pb.TableState table_state = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState getTableState() {
+        if (tableStateBuilder_ == null) {
+          return tableState_;
+        } else {
+          return tableStateBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>required .hbase.pb.TableState table_state = 1;</code>
+       */
+      public Builder setTableState(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState value) {
+        if (tableStateBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          tableState_ = value;
+          onChanged();
+        } else {
+          tableStateBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableState table_state = 1;</code>
+       */
+      public Builder setTableState(
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder builderForValue) {
+        if (tableStateBuilder_ == null) {
+          tableState_ = builderForValue.build();
+          onChanged();
+        } else {
+          tableStateBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableState table_state = 1;</code>
+       */
+      public Builder mergeTableState(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState value) {
+        if (tableStateBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001) &&
+              tableState_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance()) {
+            tableState_ =
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.newBuilder(tableState_).mergeFrom(value).buildPartial();
+          } else {
+            tableState_ = value;
+          }
+          onChanged();
+        } else {
+          tableStateBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableState table_state = 1;</code>
+       */
+      public Builder clearTableState() {
+        if (tableStateBuilder_ == null) {
+          tableState_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance();
+          onChanged();
+        } else {
+          tableStateBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableState table_state = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder getTableStateBuilder() {
+        bitField0_ |= 0x00000001;
+        onChanged();
+        return getTableStateFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>required .hbase.pb.TableState table_state = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder getTableStateOrBuilder() {
+        if (tableStateBuilder_ != null) {
+          return tableStateBuilder_.getMessageOrBuilder();
+        } else {
+          return tableState_;
+        }
+      }
+      /**
+       * <code>required .hbase.pb.TableState table_state = 1;</code>
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder> 
+          getTableStateFieldBuilder() {
+        if (tableStateBuilder_ == null) {
+          tableStateBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder>(
+                  tableState_,
+                  getParentForChildren(),
+                  isClean());
+          tableState_ = null;
+        }
+        return tableStateBuilder_;
+      }
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.GetTableStateResponse)
+    }
+
+    static {
+      defaultInstance = new GetTableStateResponse(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.GetTableStateResponse)
+  }
+
   public interface GetClusterStatusRequestOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
   }
@@ -68552,6 +69674,18 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest request,
           com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse> done);
 
+      /**
+       * <code>rpc GetTableState(.hbase.pb.GetTableStateRequest) returns (.hbase.pb.GetTableStateResponse);</code>
+       *
+       * <pre>
+       ** returns table state 
+       * </pre>
+       */
+      public abstract void getTableState(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request,
+          com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse> done);
+
     }
 
     public static com.google.protobuf.Service newReflectiveService(
@@ -69069,6 +70203,14 @@ public final class MasterProtos {
           impl.isSnapshotCleanupEnabled(controller, request, done);
         }
 
+        @java.lang.Override
+        public  void getTableState(
+            com.google.protobuf.RpcController controller,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request,
+            com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse> done) {
+          impl.getTableState(controller, request, done);
+        }
+
       };
     }
 
@@ -69219,6 +70361,8 @@ public final class MasterProtos {
               return impl.switchSnapshotCleanup(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest)request);
             case 63:
               return impl.isSnapshotCleanupEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest)request);
+            case 64:
+              return impl.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request);
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -69361,6 +70505,8 @@ public final class MasterProtos {
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.getDefaultInstance();
             case 63:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.getDefaultInstance();
+            case 64:
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -69503,6 +70649,8 @@ public final class MasterProtos {
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.getDefaultInstance();
             case 63:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.getDefaultInstance();
+            case 64:
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -70313,6 +71461,18 @@ public final class MasterProtos {
         org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest request,
         com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse> done);
 
+    /**
+     * <code>rpc GetTableState(.hbase.pb.GetTableStateRequest) returns (.hbase.pb.GetTableStateResponse);</code>
+     *
+     * <pre>
+     ** returns table state 
+     * </pre>
+     */
+    public abstract void getTableState(
+        com.google.protobuf.RpcController controller,
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request,
+        com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse> done);
+
     public static final
         com.google.protobuf.Descriptors.ServiceDescriptor
         getDescriptor() {
@@ -70655,6 +71815,11 @@ public final class MasterProtos {
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse>specializeCallback(
               done));
           return;
+        case 64:
+          this.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request,
+            com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse>specializeCallback(
+              done));
+          return;
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -70797,6 +71962,8 @@ public final class MasterProtos {
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.getDefaultInstance();
         case 63:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.getDefaultInstance();
+        case 64:
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -70939,6 +72106,8 @@ public final class MasterProtos {
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.getDefaultInstance();
         case 63:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.getDefaultInstance();
+        case 64:
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -71919,6 +73088,21 @@ public final class MasterProtos {
             org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.class,
             org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.getDefaultInstance()));
       }
+
+      public  void getTableState(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request,
+          com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse> done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(64),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(),
+          com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.class,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance()));
+      }
     }
 
     public static BlockingInterface newBlockingStub(
@@ -72246,6 +73430,11 @@ public final class MasterProtos {
           com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest request)
           throws com.google.protobuf.ServiceException;
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse getTableState(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request)
+          throws com.google.protobuf.ServiceException;
     }
 
     private static final class BlockingStub implements BlockingInterface {
@@ -73022,6 +74211,18 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.getDefaultInstance());
       }
 
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse getTableState(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(64),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance());
+      }
+
     }
 
     // @@protoc_insertion_point(class_scope:hbase.pb.MasterService)
@@ -73917,6 +75118,16 @@ public final class MasterProtos {
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_hbase_pb_GetTableNamesResponse_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_GetTableStateRequest_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_GetTableStateRequest_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_GetTableStateResponse_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_GetTableStateResponse_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_GetClusterStatusRequest_descriptor;
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -74242,225 +75453,231 @@ public final class MasterProtos {
       "sRequest\022\r\n\005regex\030\001 \001(\t\022!\n\022include_sys_t",
       "ables\030\002 \001(\010:\005false\022\021\n\tnamespace\030\003 \001(\t\"A\n" +
       "\025GetTableNamesResponse\022(\n\013table_names\030\001 " +
-      "\003(\0132\023.hbase.pb.TableName\"\031\n\027GetClusterSt" +
-      "atusRequest\"K\n\030GetClusterStatusResponse\022" +
-      "/\n\016cluster_status\030\001 \002(\0132\027.hbase.pb.Clust" +
-      "erStatus\"\030\n\026IsMasterRunningRequest\"4\n\027Is" +
-      "MasterRunningResponse\022\031\n\021is_master_runni" +
-      "ng\030\001 \002(\010\"I\n\024ExecProcedureRequest\0221\n\tproc" +
-      "edure\030\001 \002(\0132\036.hbase.pb.ProcedureDescript" +
-      "ion\"F\n\025ExecProcedureResponse\022\030\n\020expected",
-      "_timeout\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"K\n\026I" +
-      "sProcedureDoneRequest\0221\n\tprocedure\030\001 \001(\013" +
-      "2\036.hbase.pb.ProcedureDescription\"`\n\027IsPr" +
-      "ocedureDoneResponse\022\023\n\004done\030\001 \001(\010:\005false" +
-      "\0220\n\010snapshot\030\002 \001(\0132\036.hbase.pb.ProcedureD" +
-      "escription\",\n\031GetProcedureResultRequest\022" +
-      "\017\n\007proc_id\030\001 \002(\004\"\371\001\n\032GetProcedureResultR" +
-      "esponse\0229\n\005state\030\001 \002(\0162*.hbase.pb.GetPro" +
-      "cedureResultResponse.State\022\022\n\nstart_time" +
-      "\030\002 \001(\004\022\023\n\013last_update\030\003 \001(\004\022\016\n\006result\030\004 ",
-      "\001(\014\0224\n\texception\030\005 \001(\0132!.hbase.pb.Foreig" +
-      "nExceptionMessage\"1\n\005State\022\r\n\tNOT_FOUND\020" +
-      "\000\022\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020\002\"M\n\025AbortPro" +
-      "cedureRequest\022\017\n\007proc_id\030\001 \002(\004\022#\n\025mayInt" +
-      "erruptIfRunning\030\002 \001(\010:\004true\"6\n\026AbortProc" +
-      "edureResponse\022\034\n\024is_procedure_aborted\030\001 " +
-      "\002(\010\"\027\n\025ListProceduresRequest\"@\n\026ListProc" +
-      "eduresResponse\022&\n\tprocedure\030\001 \003(\0132\023.hbas" +
-      "e.pb.Procedure\"\315\001\n\017SetQuotaRequest\022\021\n\tus" +
-      "er_name\030\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021\n\tnam",
-      "espace\030\003 \001(\t\022\'\n\ntable_name\030\004 \001(\0132\023.hbase" +
-      ".pb.TableName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016byp" +
-      "ass_globals\030\006 \001(\010\022+\n\010throttle\030\007 \001(\0132\031.hb" +
-      "ase.pb.ThrottleRequest\"\022\n\020SetQuotaRespon" +
-      "se\"J\n\037MajorCompactionTimestampRequest\022\'\n" +
-      "\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName\"U" +
-      "\n(MajorCompactionTimestampForRegionReque" +
-      "st\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpec" +
-      "ifier\"@\n MajorCompactionTimestampRespons" +
-      "e\022\034\n\024compaction_timestamp\030\001 \002(\003\"\035\n\033Secur",
-      "ityCapabilitiesRequest\"\354\001\n\034SecurityCapab" +
-      "ilitiesResponse\022G\n\014capabilities\030\001 \003(\01621." +
-      "hbase.pb.SecurityCapabilitiesResponse.Ca" +
-      "pability\"\202\001\n\nCapability\022\031\n\025SIMPLE_AUTHEN" +
-      "TICATION\020\000\022\031\n\025SECURE_AUTHENTICATION\020\001\022\021\n" +
-      "\rAUTHORIZATION\020\002\022\026\n\022CELL_AUTHORIZATION\020\003" +
-      "\022\023\n\017CELL_VISIBILITY\020\004\"D\n\027ClearDeadServer" +
-      "sRequest\022)\n\013server_name\030\001 \003(\0132\024.hbase.pb" +
-      ".ServerName\"E\n\030ClearDeadServersResponse\022" +
-      ")\n\013server_name\030\001 \003(\0132\024.hbase.pb.ServerNa",
-      "me\"A\n\031SetSnapshotCleanupRequest\022\017\n\007enabl" +
-      "ed\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(\010\";\n\032SetSnap" +
-      "shotCleanupResponse\022\035\n\025prev_snapshot_cle" +
-      "anup\030\001 \002(\010\"!\n\037IsSnapshotCleanupEnabledRe" +
-      "quest\"3\n IsSnapshotCleanupEnabledRespons" +
-      "e\022\017\n\007enabled\030\001 \002(\010\"\025\n\023GetClusterIdReques" +
-      "t\"*\n\024GetClusterIdResponse\022\022\n\ncluster_id\030" +
-      "\001 \001(\t\"\030\n\026GetActiveMasterRequest\"D\n\027GetAc" +
-      "tiveMasterResponse\022)\n\013server_name\030\001 \001(\0132" +
-      "\024.hbase.pb.ServerName\"\037\n\035GetMetaRegionLo",
-      "cationsRequest\"R\n\036GetMetaRegionLocations" +
-      "Response\0220\n\016meta_locations\030\001 \003(\0132\030.hbase" +
-      ".pb.RegionLocation*(\n\020MasterSwitchType\022\t" +
-      "\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\241.\n\rMasterService\022e" +
-      "\n\024GetSchemaAlterStatus\022%.hbase.pb.GetSch" +
-      "emaAlterStatusRequest\032&.hbase.pb.GetSche" +
-      "maAlterStatusResponse\022b\n\023GetTableDescrip" +
-      "tors\022$.hbase.pb.GetTableDescriptorsReque" +
-      "st\032%.hbase.pb.GetTableDescriptorsRespons" +
-      "e\022P\n\rGetTableNames\022\036.hbase.pb.GetTableNa",
-      "mesRequest\032\037.hbase.pb.GetTableNamesRespo" +
-      "nse\022Y\n\020GetClusterStatus\022!.hbase.pb.GetCl" +
-      "usterStatusRequest\032\".hbase.pb.GetCluster" +
-      "StatusResponse\022V\n\017IsMasterRunning\022 .hbas" +
-      "e.pb.IsMasterRunningRequest\032!.hbase.pb.I" +
-      "sMasterRunningResponse\022D\n\tAddColumn\022\032.hb" +
-      "ase.pb.AddColumnRequest\032\033.hbase.pb.AddCo" +
-      "lumnResponse\022M\n\014DeleteColumn\022\035.hbase.pb." +
-      "DeleteColumnRequest\032\036.hbase.pb.DeleteCol" +
-      "umnResponse\022M\n\014ModifyColumn\022\035.hbase.pb.M",
-      "odifyColumnRequest\032\036.hbase.pb.ModifyColu" +
-      "mnResponse\022G\n\nMoveRegion\022\033.hbase.pb.Move" +
-      "RegionRequest\032\034.hbase.pb.MoveRegionRespo" +
-      "nse\022k\n\026DispatchMergingRegions\022\'.hbase.pb" +
-      ".DispatchMergingRegionsRequest\032(.hbase.p" +
-      "b.DispatchMergingRegionsResponse\022M\n\014Assi" +
-      "gnRegion\022\035.hbase.pb.AssignRegionRequest\032" +
-      "\036.hbase.pb.AssignRegionResponse\022S\n\016Unass" +
-      "ignRegion\022\037.hbase.pb.UnassignRegionReque" +
-      "st\032 .hbase.pb.UnassignRegionResponse\022P\n\r",
-      "OfflineRegion\022\036.hbase.pb.OfflineRegionRe" +
-      "quest\032\037.hbase.pb.OfflineRegionResponse\022J" +
-      "\n\013DeleteTable\022\034.hbase.pb.DeleteTableRequ" +
-      "est\032\035.hbase.pb.DeleteTableResponse\022P\n\rtr" +
-      "uncateTable\022\036.hbase.pb.TruncateTableRequ" +
-      "est\032\037.hbase.pb.TruncateTableResponse\022J\n\013" +
-      "EnableTable\022\034.hbase.pb.EnableTableReques" +
-      "t\032\035.hbase.pb.EnableTableResponse\022M\n\014Disa" +
-      "bleTable\022\035.hbase.pb.DisableTableRequest\032" +
-      "\036.hbase.pb.DisableTableResponse\022J\n\013Modif",
-      "yTable\022\034.hbase.pb.ModifyTableRequest\032\035.h" +
-      "base.pb.ModifyTableResponse\022J\n\013CreateTab" +
-      "le\022\034.hbase.pb.CreateTableRequest\032\035.hbase" +
-      ".pb.CreateTableResponse\022A\n\010Shutdown\022\031.hb" +
-      "ase.pb.ShutdownRequest\032\032.hbase.pb.Shutdo" +
-      "wnResponse\022G\n\nStopMaster\022\033.hbase.pb.Stop" +
-      "MasterRequest\032\034.hbase.pb.StopMasterRespo" +
-      "nse\022h\n\031IsMasterInMaintenanceMode\022$.hbase" +
-      ".pb.IsInMaintenanceModeRequest\032%.hbase.p" +
-      "b.IsInMaintenanceModeResponse\022>\n\007Balance",
-      "\022\030.hbase.pb.BalanceRequest\032\031.hbase.pb.Ba" +
-      "lanceResponse\022_\n\022SetBalancerRunning\022#.hb" +
-      "ase.pb.SetBalancerRunningRequest\032$.hbase" +
-      ".pb.SetBalancerRunningResponse\022\\\n\021IsBala" +
-      "ncerEnabled\022\".hbase.pb.IsBalancerEnabled" +
-      "Request\032#.hbase.pb.IsBalancerEnabledResp" +
-      "onse\022k\n\026SetSplitOrMergeEnabled\022\'.hbase.p" +
-      "b.SetSplitOrMergeEnabledRequest\032(.hbase." +
-      "pb.SetSplitOrMergeEnabledResponse\022h\n\025IsS" +
-      "plitOrMergeEnabled\022&.hbase.pb.IsSplitOrM",
-      "ergeEnabledRequest\032\'.hbase.pb.IsSplitOrM" +
-      "ergeEnabledResponse\022D\n\tNormalize\022\032.hbase" +
-      ".pb.NormalizeRequest\032\033.hbase.pb.Normaliz" +
-      "eResponse\022e\n\024SetNormalizerRunning\022%.hbas" +
-      "e.pb.SetNormalizerRunningRequest\032&.hbase" +
-      ".pb.SetNormalizerRunningResponse\022b\n\023IsNo" +
-      "rmalizerEnabled\022$.hbase.pb.IsNormalizerE" +
-      "nabledRequest\032%.hbase.pb.IsNormalizerEna" +
-      "bledResponse\022S\n\016RunCatalogScan\022\037.hbase.p" +
-      "b.RunCatalogScanRequest\032 .hbase.pb.RunCa",
-      "talogScanResponse\022e\n\024EnableCatalogJanito" +
-      "r\022%.hbase.pb.EnableCatalogJanitorRequest" +
-      "\032&.hbase.pb.EnableCatalogJanitorResponse" +
-      "\022n\n\027IsCatalogJanitorEnabled\022(.hbase.pb.I" +
-      "sCatalogJanitorEnabledRequest\032).hbase.pb" +
-      ".IsCatalogJanitorEnabledResponse\022V\n\017RunC" +
-      "leanerChore\022 .hbase.pb.RunCleanerChoreRe" +
-      "quest\032!.hbase.pb.RunCleanerChoreResponse" +
-      "\022k\n\026SetCleanerChoreRunning\022\'.hbase.pb.Se" +
-      "tCleanerChoreRunningRequest\032(.hbase.pb.S",
-      "etCleanerChoreRunningResponse\022h\n\025IsClean" +
-      "erChoreEnabled\022&.hbase.pb.IsCleanerChore" +
-      "EnabledRequest\032\'.hbase.pb.IsCleanerChore" +
-      "EnabledResponse\022^\n\021ExecMasterService\022#.h" +
-      "base.pb.CoprocessorServiceRequest\032$.hbas" +
-      "e.pb.CoprocessorServiceResponse\022A\n\010Snaps" +
-      "hot\022\031.hbase.pb.SnapshotRequest\032\032.hbase.p" +
-      "b.SnapshotResponse\022h\n\025GetCompletedSnapsh" +
-      "ots\022&.hbase.pb.GetCompletedSnapshotsRequ" +
-      "est\032\'.hbase.pb.GetCompletedSnapshotsResp",
-      "onse\022S\n\016DeleteSnapshot\022\037.hbase.pb.Delete" +
-      "SnapshotRequest\032 .hbase.pb.DeleteSnapsho" +
-      "tResponse\022S\n\016IsSnapshotDone\022\037.hbase.pb.I" +
-      "sSnapshotDoneRequest\032 .hbase.pb.IsSnapsh" +
-      "otDoneResponse\022V\n\017RestoreSnapshot\022 .hbas" +
-      "e.pb.RestoreSnapshotRequest\032!.hbase.pb.R" +
-      "estoreSnapshotResponse\022h\n\025IsRestoreSnaps" +
-      "hotDone\022&.hbase.pb.IsRestoreSnapshotDone" +
-      "Request\032\'.hbase.pb.IsRestoreSnapshotDone" +
-      "Response\022P\n\rExecProcedure\022\036.hbase.pb.Exe",
-      "cProcedureRequest\032\037.hbase.pb.ExecProcedu" +
-      "reResponse\022W\n\024ExecProcedureWithRet\022\036.hba" +
-      "se.pb.ExecProcedureRequest\032\037.hbase.pb.Ex" +
-      "ecProcedureResponse\022V\n\017IsProcedureDone\022 " +
-      ".hbase.pb.IsProcedureDoneRequest\032!.hbase" +
-      ".pb.IsProcedureDoneResponse\022V\n\017ModifyNam" +
-      "espace\022 .hbase.pb.ModifyNamespaceRequest" +
-      "\032!.hbase.pb.ModifyNamespaceResponse\022V\n\017C" +
-      "reateNamespace\022 .hbase.pb.CreateNamespac" +
-      "eRequest\032!.hbase.pb.CreateNamespaceRespo",
-      "nse\022V\n\017DeleteNamespace\022 .hbase.pb.Delete" +
-      "NamespaceRequest\032!.hbase.pb.DeleteNamesp" +
-      "aceResponse\022k\n\026GetNamespaceDescriptor\022\'." +
-      "hbase.pb.GetNamespaceDescriptorRequest\032(" +
-      ".hbase.pb.GetNamespaceDescriptorResponse" +
-      "\022q\n\030ListNamespaceDescriptors\022).hbase.pb." +
-      "ListNamespaceDescriptorsRequest\032*.hbase." +
-      "pb.ListNamespaceDescriptorsResponse\022\206\001\n\037" +
-      "ListTableDescriptorsByNamespace\0220.hbase." +
-      "pb.ListTableDescriptorsByNamespaceReques",
-      "t\0321.hbase.pb.ListTableDescriptorsByNames" +
-      "paceResponse\022t\n\031ListTableNamesByNamespac" +
-      "e\022*.hbase.pb.ListTableNamesByNamespaceRe" +
-      "quest\032+.hbase.pb.ListTableNamesByNamespa" +
-      "ceResponse\022A\n\010SetQuota\022\031.hbase.pb.SetQuo" +
-      "taRequest\032\032.hbase.pb.SetQuotaResponse\022x\n" +
-      "\037getLastMajorCompactionTimestamp\022).hbase" +
-      ".pb.MajorCompactionTimestampRequest\032*.hb" +
-      "ase.pb.MajorCompactionTimestampResponse\022" +
-      "\212\001\n(getLastMajorCompactionTimestampForRe",
-      "gion\0222.hbase.pb.MajorCompactionTimestamp" +
-      "ForRegionRequest\032*.hbase.pb.MajorCompact" +
-      "ionTimestampResponse\022_\n\022getProcedureResu" +
-      "lt\022#.hbase.pb.GetProcedureResultRequest\032" +
-      "$.hbase.pb.GetProcedureResultResponse\022h\n" +
-      "\027getSecurityCapabilities\022%.hbase.pb.Secu" +
-      "rityCapabilitiesRequest\032&.hbase.pb.Secur" +
-      "ityCapabilitiesResponse\022S\n\016AbortProcedur" +
-      "e\022\037.hbase.pb.AbortProcedureRequest\032 .hba" +
-      "se.pb.AbortProcedureResponse\022S\n\016ListProc",
-      "edures\022\037.hbase.pb.ListProceduresRequest\032" +
-      " .hbase.pb.ListProceduresResponse\022Y\n\020Cle" +
-      "arDeadServers\022!.hbase.pb.ClearDeadServer" +
-      "sRequest\032\".hbase.pb.ClearDeadServersResp" +
-      "onse\022S\n\016ListNamespaces\022\037.hbase.pb.ListNa" +
-      "mespacesRequest\032 .hbase.pb.ListNamespace" +
-      "sResponse\022b\n\025SwitchSnapshotCleanup\022#.hba" +
-      "se.pb.SetSnapshotCleanupRequest\032$.hbase." +
-      "pb.SetSnapshotCleanupResponse\022q\n\030IsSnaps" +
-      "hotCleanupEnabled\022).hbase.pb.IsSnapshotC",
-      "leanupEnabledRequest\032*.hbase.pb.IsSnapsh" +
-      "otCleanupEnabledResponse2\247\002\n\021ClientMetaS" +
-      "ervice\022M\n\014GetClusterId\022\035.hbase.pb.GetClu" +
-      "sterIdRequest\032\036.hbase.pb.GetClusterIdRes" +
-      "ponse\022V\n\017GetActiveMaster\022 .hbase.pb.GetA" +
-      "ctiveMasterRequest\032!.hbase.pb.GetActiveM" +
-      "asterResponse\022k\n\026GetMetaRegionLocations\022" +
-      "\'.hbase.pb.GetMetaRegionLocationsRequest" +
-      "\032(.hbase.pb.GetMetaRegionLocationsRespon" +
-      "seBB\n*org.apache.hadoop.hbase.protobuf.g",
-      "eneratedB\014MasterProtosH\001\210\001\001\240\001\001"
+      "\003(\0132\023.hbase.pb.TableName\"?\n\024GetTableStat" +
+      "eRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb." +
+      "TableName\"B\n\025GetTableStateResponse\022)\n\013ta" +
+      "ble_state\030\001 \002(\0132\024.hbase.pb.TableState\"\031\n" +
+      "\027GetClusterStatusRequest\"K\n\030GetClusterSt" +
+      "atusResponse\022/\n\016cluster_status\030\001 \002(\0132\027.h" +
+      "base.pb.ClusterStatus\"\030\n\026IsMasterRunning" +
+      "Request\"4\n\027IsMasterRunningResponse\022\031\n\021is",
+      "_master_running\030\001 \002(\010\"I\n\024ExecProcedureRe" +
+      "quest\0221\n\tprocedure\030\001 \002(\0132\036.hbase.pb.Proc" +
+      "edureDescription\"F\n\025ExecProcedureRespons" +
+      "e\022\030\n\020expected_timeout\030\001 \001(\003\022\023\n\013return_da" +
+      "ta\030\002 \001(\014\"K\n\026IsProcedureDoneRequest\0221\n\tpr" +
+      "ocedure\030\001 \001(\0132\036.hbase.pb.ProcedureDescri" +
+      "ption\"`\n\027IsProcedureDoneResponse\022\023\n\004done" +
+      "\030\001 \001(\010:\005false\0220\n\010snapshot\030\002 \001(\0132\036.hbase." +
+      "pb.ProcedureDescription\",\n\031GetProcedureR" +
+      "esultRequest\022\017\n\007proc_id\030\001 \002(\004\"\371\001\n\032GetPro",
+      "cedureResultResponse\0229\n\005state\030\001 \002(\0162*.hb" +
+      "ase.pb.GetProcedureResultResponse.State\022" +
+      "\022\n\nstart_time\030\002 \001(\004\022\023\n\013last_update\030\003 \001(\004" +
+      "\022\016\n\006result\030\004 \001(\014\0224\n\texception\030\005 \001(\0132!.hb" +
+      "ase.pb.ForeignExceptionMessage\"1\n\005State\022" +
+      "\r\n\tNOT_FOUND\020\000\022\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020" +
+      "\002\"M\n\025AbortProcedureRequest\022\017\n\007proc_id\030\001 " +
+      "\002(\004\022#\n\025mayInterruptIfRunning\030\002 \001(\010:\004true" +
+      "\"6\n\026AbortProcedureResponse\022\034\n\024is_procedu" +
+      "re_aborted\030\001 \002(\010\"\027\n\025ListProceduresReques",
+      "t\"@\n\026ListProceduresResponse\022&\n\tprocedure" +
+      "\030\001 \003(\0132\023.hbase.pb.Procedure\"\315\001\n\017SetQuota" +
+      "Request\022\021\n\tuser_name\030\001 \001(\t\022\022\n\nuser_group" +
+      "\030\002 \001(\t\022\021\n\tnamespace\030\003 \001(\t\022\'\n\ntable_name\030" +
+      "\004 \001(\0132\023.hbase.pb.TableName\022\022\n\nremove_all" +
+      "\030\005 \001(\010\022\026\n\016bypass_globals\030\006 \001(\010\022+\n\010thrott" +
+      "le\030\007 \001(\0132\031.hbase.pb.ThrottleRequest\"\022\n\020S" +
+      "etQuotaResponse\"J\n\037MajorCompactionTimest" +
+      "ampRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.p" +
+      "b.TableName\"U\n(MajorCompactionTimestampF",
+      "orRegionRequest\022)\n\006region\030\001 \002(\0132\031.hbase." +
+      "pb.RegionSpecifier\"@\n MajorCompactionTim" +
+      "estampResponse\022\034\n\024compaction_timestamp\030\001" +
+      " \002(\003\"\035\n\033SecurityCapabilitiesRequest\"\354\001\n\034" +
+      "SecurityCapabilitiesResponse\022G\n\014capabili" +
+      "ties\030\001 \003(\01621.hbase.pb.SecurityCapabiliti" +
+      "esResponse.Capability\"\202\001\n\nCapability\022\031\n\025" +
+      "SIMPLE_AUTHENTICATION\020\000\022\031\n\025SECURE_AUTHEN" +
+      "TICATION\020\001\022\021\n\rAUTHORIZATION\020\002\022\026\n\022CELL_AU" +
+      "THORIZATION\020\003\022\023\n\017CELL_VISIBILITY\020\004\"D\n\027Cl",
+      "earDeadServersRequest\022)\n\013server_name\030\001 \003" +
+      "(\0132\024.hbase.pb.ServerName\"E\n\030ClearDeadSer" +
+      "versResponse\022)\n\013server_name\030\001 \003(\0132\024.hbas" +
+      "e.pb.ServerName\"A\n\031SetSnapshotCleanupReq" +
+      "uest\022\017\n\007enabled\030\001 \002(\010\022\023\n\013synchronous\030\002 \001" +
+      "(\010\";\n\032SetSnapshotCleanupResponse\022\035\n\025prev" +
+      "_snapshot_cleanup\030\001 \002(\010\"!\n\037IsSnapshotCle" +
+      "anupEnabledRequest\"3\n IsSnapshotCleanupE" +
+      "nabledResponse\022\017\n\007enabled\030\001 \002(\010\"\025\n\023GetCl" +
+      "usterIdRequest\"*\n\024GetClusterIdResponse\022\022",
+      "\n\ncluster_id\030\001 \001(\t\"\030\n\026GetActiveMasterReq" +
+      "uest\"D\n\027GetActiveMasterResponse\022)\n\013serve" +
+      "r_name\030\001 \001(\0132\024.hbase.pb.ServerName\"\037\n\035Ge" +
+      "tMetaRegionLocationsRequest\"R\n\036GetMetaRe" +
+      "gionLocationsResponse\0220\n\016meta_locations\030" +
+      "\001 \003(\0132\030.hbase.pb.RegionLocation*(\n\020Maste" +
+      "rSwitchType\022\t\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\363.\n\rMa" +
+      "sterService\022e\n\024GetSchemaAlterStatus\022%.hb" +
+      "ase.pb.GetSchemaAlterStatusRequest\032&.hba" +
+      "se.pb.GetSchemaAlterStatusResponse\022b\n\023Ge",
+      "tTableDescriptors\022$.hbase.pb.GetTableDes" +
+      "criptorsRequest\032%.hbase.pb.GetTableDescr" +
+      "iptorsResponse\022P\n\rGetTableNames\022\036.hbase." +
+      "pb.GetTableNamesRequest\032\037.hbase.pb.GetTa" +
+      "bleNamesResponse\022Y\n\020GetClusterStatus\022!.h" +
+      "base.pb.GetClusterStatusRequest\032\".hbase." +
+      "pb.GetClusterStatusResponse\022V\n\017IsMasterR" +
+      "unning\022 .hbase.pb.IsMasterRunningRequest" +
+      "\032!.hbase.pb.IsMasterRunningResponse\022D\n\tA" +
+      "ddColumn\022\032.hbase.pb.AddColumnRequest\032\033.h",
+      "base.pb.AddColumnResponse\022M\n\014DeleteColum" +
+      "n\022\035.hbase.pb.DeleteColumnRequest\032\036.hbase" +
+      ".pb.DeleteColumnResponse\022M\n\014ModifyColumn" +
+      "\022\035.hbase.pb.ModifyColumnRequest\032\036.hbase." +
+      "pb.ModifyColumnResponse\022G\n\nMoveRegion\022\033." +
+      "hbase.pb.MoveRegionRequest\032\034.hbase.pb.Mo" +
+      "veRegionResponse\022k\n\026DispatchMergingRegio" +
+      "ns\022\'.hbase.pb.DispatchMergingRegionsRequ" +
+      "est\032(.hbase.pb.DispatchMergingRegionsRes" +
+      "ponse\022M\n\014AssignRegion\022\035.hbase.pb.AssignR",
+      "egionRequest\032\036.hbase.pb.AssignRegionResp" +
+      "onse\022S\n\016UnassignRegion\022\037.hbase.pb.Unassi" +
+      "gnRegionRequest\032 .hbase.pb.UnassignRegio" +
+      "nResponse\022P\n\rOfflineRegion\022\036.hbase.pb.Of" +
+      "flineRegionRequest\032\037.hbase.pb.OfflineReg" +
+      "ionResponse\022J\n\013DeleteTable\022\034.hbase.pb.De" +
+      "leteTableRequest\032\035.hbase.pb.DeleteTableR" +
+      "esponse\022P\n\rtruncateTable\022\036.hbase.pb.Trun" +
+      "cateTableRequest\032\037.hbase.pb.TruncateTabl" +
+      "eResponse\022J\n\013EnableTable\022\034.hbase.pb.Enab",
+      "leTableRequest\032\035.hbase.pb.EnableTableRes" +
+      "ponse\022M\n\014DisableTable\022\035.hbase.pb.Disable" +
+      "TableRequest\032\036.hbase.pb.DisableTableResp" +
+      "onse\022J\n\013ModifyTable\022\034.hbase.pb.ModifyTab" +
+      "leRequest\032\035.hbase.pb.ModifyTableResponse" +
+      "\022J\n\013CreateTable\022\034.hbase.pb.CreateTableRe" +
+      "quest\032\035.hbase.pb.CreateTableResponse\022A\n\010" +
+      "Shutdown\022\031.hbase.pb.ShutdownRequest\032\032.hb" +
+      "ase.pb.ShutdownResponse\022G\n\nStopMaster\022\033." +
+      "hbase.pb.StopMasterRequest\032\034.hbase.pb.St",
+      "opMasterResponse\022h\n\031IsMasterInMaintenanc" +
+      "eMode\022$.hbase.pb.IsInMaintenanceModeRequ" +
+      "est\032%.hbase.pb.IsInMaintenanceModeRespon" +
+      "se\022>\n\007Balance\022\030.hbase.pb.BalanceRequest\032" +
+      "\031.hbase.pb.BalanceResponse\022_\n\022SetBalance" +
+      "rRunning\022#.hbase.pb.SetBalancerRunningRe" +
+      "quest\032$.hbase.pb.SetBalancerRunningRespo" +
+      "nse\022\\\n\021IsBalancerEnabled\022\".hbase.pb.IsBa" +
+      "lancerEnabledRequest\032#.hbase.pb.IsBalanc" +
+      "erEnabledResponse\022k\n\026SetSplitOrMergeEnab",
+      "led\022\'.hbase.pb.SetSplitOrMergeEnabledReq" +
+      "uest\032(.hbase.pb.SetSplitOrMergeEnabledRe" +
+      "sponse\022h\n\025IsSplitOrMergeEnabled\022&.hbase." +
+      "pb.IsSplitOrMergeEnabledRequest\032\'.hbase." +
+      "pb.IsSplitOrMergeEnabledResponse\022D\n\tNorm" +
+      "alize\022\032.hbase.pb.NormalizeRequest\032\033.hbas" +
+      "e.pb.NormalizeResponse\022e\n\024SetNormalizerR" +
+      "unning\022%.hbase.pb.SetNormalizerRunningRe" +
+      "quest\032&.hbase.pb.SetNormalizerRunningRes" +
+      "ponse\022b\n\023IsNormalizerEnabled\022$.hbase.pb.",
+      "IsNormalizerEnabledRequest\032%.hbase.pb.Is" +
+      "NormalizerEnabledResponse\022S\n\016RunCatalogS" +
+      "can\022\037.hbase.pb.RunCatalogScanRequest\032 .h" +
+      "base.pb.RunCatalogScanResponse\022e\n\024Enable" +
+      "CatalogJanitor\022%.hbase.pb.EnableCatalogJ" +
+      "anitorRequest\032&.hbase.pb.EnableCatalogJa" +
+      "nitorResponse\022n\n\027IsCatalogJanitorEnabled" +
+      "\022(.hbase.pb.IsCatalogJanitorEnabledReque" +
+      "st\032).hbase.pb.IsCatalogJanitorEnabledRes" +
+      "ponse\022V\n\017RunCleanerChore\022 .hbase.pb.RunC",
+      "leanerChoreRequest\032!.hbase.pb.RunCleaner" +
+      "ChoreResponse\022k\n\026SetCleanerChoreRunning\022" +
+      "\'.hbase.pb.SetCleanerChoreRunningRequest" +
+      "\032(.hbase.pb.SetCleanerChoreRunningRespon" +
+      "se\022h\n\025IsCleanerChoreEnabled\022&.hbase.pb.I" +
+      "sCleanerChoreEnabledRequest\032\'.hbase.pb.I" +
+      "sCleanerChoreEnabledResponse\022^\n\021ExecMast" +
+      "erService\022#.hbase.pb.CoprocessorServiceR" +
+      "equest\032$.hbase.pb.CoprocessorServiceResp" +
+      "onse\022A\n\010Snapshot\022\031.hbase.pb.SnapshotRequ",
+      "est\032\032.hbase.pb.SnapshotResponse\022h\n\025GetCo" +
+      "mpletedSnapshots\022&.hbase.pb.GetCompleted" +
+      "SnapshotsRequest\032\'.hbase.pb.GetCompleted" +
+      "SnapshotsResponse\022S\n\016DeleteSnapshot\022\037.hb" +
+      "ase.pb.DeleteSnapshotRequest\032 .hbase.pb." +
+      "DeleteSnapshotResponse\022S\n\016IsSnapshotDone" +
+      "\022\037.hbase.pb.IsSnapshotDoneRequest\032 .hbas" +
+      "e.pb.IsSnapshotDoneResponse\022V\n\017RestoreSn" +
+      "apshot\022 .hbase.pb.RestoreSnapshotRequest" +
+      "\032!.hbase.pb.RestoreSnapshotResponse\022h\n\025I",
+      "sRestoreSnapshotDone\022&.hbase.pb.IsRestor" +
+      "eSnapshotDoneRequest\032\'.hbase.pb.IsRestor" +
+      "eSnapshotDoneResponse\022P\n\rExecProcedure\022\036" +
+      ".hbase.pb.ExecProcedureRequest\032\037.hbase.p" +
+      "b.ExecProcedureResponse\022W\n\024ExecProcedure" +
+      "WithRet\022\036.hbase.pb.ExecProcedureRequest\032" +
+      "\037.hbase.pb.ExecProcedureResponse\022V\n\017IsPr" +
+      "ocedureDone\022 .hbase.pb.IsProcedureDoneRe" +
+      "quest\032!.hbase.pb.IsProcedureDoneResponse" +
+      "\022V\n\017ModifyNamespace\022 .hbase.pb.ModifyNam",
+      "espaceRequest\032!.hbase.pb.ModifyNamespace" +
+      "Response\022V\n\017CreateNamespace\022 .hbase.pb.C" +
+      "reateNamespaceRequest\032!.hbase.pb.CreateN" +
+      "amespaceResponse\022V\n\017DeleteNamespace\022 .hb" +
+      "ase.pb.DeleteNamespaceRequest\032!.hbase.pb" +
+      ".DeleteNamespaceResponse\022k\n\026GetNamespace" +
+      "Descriptor\022\'.hbase.pb.GetNamespaceDescri" +
+      "ptorRequest\032(.hbase.pb.GetNamespaceDescr" +
+      "iptorResponse\022q\n\030ListNamespaceDescriptor" +
+      "s\022).hbase.pb.ListNamespaceDescriptorsReq",
+      "uest\032*.hbase.pb.ListNamespaceDescriptors" +
+      "Response\022\206\001\n\037ListTableDescriptorsByNames" +
+      "pace\0220.hbase.pb.ListTableDescriptorsByNa" +
+      "mespaceRequest\0321.hbase.pb.ListTableDescr" +
+      "iptorsByNamespaceResponse\022t\n\031ListTableNa" +
+      "mesByNamespace\022*.hbase.pb.ListTableNames" +
+      "ByNamespaceRequest\032+.hbase.pb.ListTableN" +
+      "amesByNamespaceResponse\022A\n\010SetQuota\022\031.hb" +
+      "ase.pb.SetQuotaRequest\032\032.hbase.pb.SetQuo" +
+      "taResponse\022x\n\037getLastMajorCompactionTime",
+      "stamp\022).hbase.pb.MajorCompactionTimestam" +
+      "pRequest\032*.hbase.pb.MajorCompactionTimes" +
+      "tampResponse\022\212\001\n(getLastMajorCompactionT" +
+      "imestampForRegion\0222.hbase.pb.MajorCompac" +
+      "tionTimestampForRegionRequest\032*.hbase.pb" +
+      ".MajorCompactionTimestampResponse\022_\n\022get" +
+      "ProcedureResult\022#.hbase.pb.GetProcedureR" +
+      "esultRequest\032$.hbase.pb.GetProcedureResu" +
+      "ltResponse\022h\n\027getSecurityCapabilities\022%." +
+      "hbase.pb.SecurityCapabilitiesRequest\032&.h",
+      "base.pb.SecurityCapabilitiesResponse\022S\n\016" +
+      "AbortProcedure\022\037.hbase.pb.AbortProcedure" +
+      "Request\032 .hbase.pb.AbortProcedureRespons" +
+      "e\022S\n\016ListProcedures\022\037.hbase.pb.ListProce" +
+      "duresRequest\032 .hbase.pb.ListProceduresRe" +
+      "sponse\022Y\n\020ClearDeadServers\022!.hbase.pb.Cl" +
+      "earDeadServersRequest\032\".hbase.pb.ClearDe" +
+      "adServersResponse\022S\n\016ListNamespaces\022\037.hb" +
+      "ase.pb.ListNamespacesRequest\032 .hbase.pb." +
+      "ListNamespacesResponse\022b\n\025SwitchSnapshot",
+      "Cleanup\022#.hbase.pb.SetSnapshotCleanupReq" +
+      "uest\032$.hbase.pb.SetSnapshotCleanupRespon" +
+      "se\022q\n\030IsSnapshotCleanupEnabled\022).hbase.p" +
+      "b.IsSnapshotCleanupEnabledRequest\032*.hbas" +
+      "e.pb.IsSnapshotCleanupEnabledResponse\022P\n" +
+      "\rGetTableState\022\036.hbase.pb.GetTableStateR" +
+      "equest\032\037.hbase.pb.GetTableStateResponse2" +
+      "\247\002\n\021ClientMetaService\022M\n\014GetClusterId\022\035." +
+      "hbase.pb.GetClusterIdRequest\032\036.hbase.pb." +
+      "GetClusterIdResponse\022V\n\017GetActiveMaster\022",
+      " .hbase.pb.GetActiveMasterRequest\032!.hbas" +
+      "e.pb.GetActiveMasterResponse\022k\n\026GetMetaR" +
+      "egionLocations\022\'.hbase.pb.GetMetaRegionL" +
+      "ocationsRequest\032(.hbase.pb.GetMetaRegion" +
+      "LocationsResponseBB\n*org.apache.hadoop.h" +
+      "base.protobuf.generatedB\014MasterProtosH\001\210" +
+      "\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -75043,200 +76260,212 @@ public final class MasterProtos {
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetTableNamesResponse_descriptor,
               new java.lang.String[] { "TableNames", });
-          internal_static_hbase_pb_GetClusterStatusRequest_descriptor =
+          internal_static_hbase_pb_GetTableStateRequest_descriptor =
             getDescriptor().getMessageTypes().get(96);
+          internal_static_hbase_pb_GetTableStateRequest_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_GetTableStateRequest_descriptor,
+              new java.lang.String[] { "TableName", });
+          internal_static_hbase_pb_GetTableStateResponse_descriptor =
+            getDescriptor().getMessageTypes().get(97);
+          internal_static_hbase_pb_GetTableStateResponse_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_GetTableStateResponse_descriptor,
+              new java.lang.String[] { "TableState", });
+          internal_static_hbase_pb_GetClusterStatusRequest_descriptor =
+            getDescriptor().getMessageTypes().get(98);
           internal_static_hbase_pb_GetClusterStatusRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetClusterStatusRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_GetClusterStatusResponse_descriptor =
-            getDescriptor().getMessageTypes().get(97);
+            getDescriptor().getMessageTypes().get(99);
           internal_static_hbase_pb_GetClusterStatusResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetClusterStatusResponse_descriptor,
               new java.lang.String[] { "ClusterStatus", });
           internal_static_hbase_pb_IsMasterRunningRequest_descriptor =
-            getDescriptor().getMessageTypes().get(98);
+            getDescriptor().getMessageTypes().get(100);
           internal_static_hbase_pb_IsMasterRunningRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsMasterRunningRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_IsMasterRunningResponse_descriptor =
-            getDescriptor().getMessageTypes().get(99);
+            getDescriptor().getMessageTypes().get(101);
           internal_static_hbase_pb_IsMasterRunningResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsMasterRunningResponse_descriptor,
               new java.lang.String[] { "IsMasterRunning", });
           internal_static_hbase_pb_ExecProcedureRequest_descriptor =
-            getDescriptor().getMessageTypes().get(100);
+            getDescriptor().getMessageTypes().get(102);
           internal_static_hbase_pb_ExecProcedureRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ExecProcedureRequest_descriptor,
               new java.lang.String[] { "Procedure", });
           internal_static_hbase_pb_ExecProcedureResponse_descriptor =
-            getDescriptor().getMessageTypes().get(101);
+            getDescriptor().getMessageTypes().get(103);
           internal_static_hbase_pb_ExecProcedureResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ExecProcedureResponse_descriptor,
               new java.lang.String[] { "ExpectedTimeout", "ReturnData", });
           internal_static_hbase_pb_IsProcedureDoneRequest_descriptor =
-            getDescriptor().getMessageTypes().get(102);
+            getDescriptor().getMessageTypes().get(104);
           internal_static_hbase_pb_IsProcedureDoneRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsProcedureDoneRequest_descriptor,
               new java.lang.String[] { "Procedure", });
           internal_static_hbase_pb_IsProcedureDoneResponse_descriptor =
-            getDescriptor().getMessageTypes().get(103);
+            getDescriptor().getMessageTypes().get(105);
           internal_static_hbase_pb_IsProcedureDoneResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsProcedureDoneResponse_descriptor,
               new java.lang.String[] { "Done", "Snapshot", });
           internal_static_hbase_pb_GetProcedureResultRequest_descriptor =
-            getDescriptor().getMessageTypes().get(104);
+            getDescriptor().getMessageTypes().get(106);
           internal_static_hbase_pb_GetProcedureResultRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetProcedureResultRequest_descriptor,
               new java.lang.String[] { "ProcId", });
           internal_static_hbase_pb_GetProcedureResultResponse_descriptor =
-            getDescriptor().getMessageTypes().get(105);
+            getDescriptor().getMessageTypes().get(107);
           internal_static_hbase_pb_GetProcedureResultResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetProcedureResultResponse_descriptor,
               new java.lang.String[] { "State", "StartTime", "LastUpdate", "Result", "Exception", });
           internal_static_hbase_pb_AbortProcedureRequest_descriptor =
-            getDescriptor().getMessageTypes().get(106);
+            getDescriptor().getMessageTypes().get(108);
           internal_static_hbase_pb_AbortProcedureRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_AbortProcedureRequest_descriptor,
               new java.lang.String[] { "ProcId", "MayInterruptIfRunning", });
           internal_static_hbase_pb_AbortProcedureResponse_descriptor =
-            getDescriptor().getMessageTypes().get(107);
+            getDescriptor().getMessageTypes().get(109);
           internal_static_hbase_pb_AbortProcedureResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_AbortProcedureResponse_descriptor,
               new java.lang.String[] { "IsProcedureAborted", });
           internal_static_hbase_pb_ListProceduresRequest_descriptor =
-            getDescriptor().getMessageTypes().get(108);
+            getDescriptor().getMessageTypes().get(110);
           internal_static_hbase_pb_ListProceduresRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ListProceduresRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_ListProceduresResponse_descriptor =
-            getDescriptor().getMessageTypes().get(109);
+            getDescriptor().getMessageTypes().get(111);
           internal_static_hbase_pb_ListProceduresResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ListProceduresResponse_descriptor,
               new java.lang.String[] { "Procedure", });
           internal_static_hbase_pb_SetQuotaRequest_descriptor =
-            getDescriptor().getMessageTypes().get(110);
+            getDescriptor().getMessageTypes().get(112);
           internal_static_hbase_pb_SetQuotaRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SetQuotaRequest_descriptor,
               new java.lang.String[] { "UserName", "UserGroup", "Namespace", "TableName", "RemoveAll", "BypassGlobals", "Throttle", });
           internal_static_hbase_pb_SetQuotaResponse_descriptor =
-            getDescriptor().getMessageTypes().get(111);
+            getDescriptor().getMessageTypes().get(113);
           internal_static_hbase_pb_SetQuotaResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SetQuotaResponse_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor =
-            getDescriptor().getMessageTypes().get(112);
+            getDescriptor().getMessageTypes().get(114);
           internal_static_hbase_pb_MajorCompactionTimestampRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor,
               new java.lang.String[] { "TableName", });
           internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor =
-            getDescriptor().getMessageTypes().get(113);
+            getDescriptor().getMessageTypes().get(115);
           internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor,
               new java.lang.String[] { "Region", });
           internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor =
-            getDescriptor().getMessageTypes().get(114);
+            getDescriptor().getMessageTypes().get(116);
           internal_static_hbase_pb_MajorCompactionTimestampResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor,
               new java.lang.String[] { "CompactionTimestamp", });
           internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor =
-            getDescriptor().getMessageTypes().get(115);
+            getDescriptor().getMessageTypes().get(117);
           internal_static_hbase_pb_SecurityCapabilitiesRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor =
-            getDescriptor().getMessageTypes().get(116);
+            getDescriptor().getMessageTypes().get(118);
           internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor,
               new java.lang.String[] { "Capabilities", });
           internal_static_hbase_pb_ClearDeadServersRequest_descriptor =
-            getDescriptor().getMessageTypes().get(117);
+            getDescriptor().getMessageTypes().get(119);
           internal_static_hbase_pb_ClearDeadServersRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ClearDeadServersRequest_descriptor,
               new java.lang.String[] { "ServerName", });
           internal_static_hbase_pb_ClearDeadServersResponse_descriptor =
-            getDescriptor().getMessageTypes().get(118);
+            getDescriptor().getMessageTypes().get(120);
           internal_static_hbase_pb_ClearDeadServersResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ClearDeadServersResponse_descriptor,
               new java.lang.String[] { "ServerName", });
           internal_static_hbase_pb_SetSnapshotCleanupRequest_descriptor =
-            getDescriptor().getMessageTypes().get(119);
+            getDescriptor().getMessageTypes().get(121);
           internal_static_hbase_pb_SetSnapshotCleanupRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SetSnapshotCleanupRequest_descriptor,
               new java.lang.String[] { "Enabled", "Synchronous", });
           internal_static_hbase_pb_SetSnapshotCleanupResponse_descriptor =
-            getDescriptor().getMessageTypes().get(120);
+            getDescriptor().getMessageTypes().get(122);
           internal_static_hbase_pb_SetSnapshotCleanupResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SetSnapshotCleanupResponse_descriptor,
               new java.lang.String[] { "PrevSnapshotCleanup", });
           internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_descriptor =
-            getDescriptor().getMessageTypes().get(121);
+            getDescriptor().getMessageTypes().get(123);
           internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_descriptor =
-            getDescriptor().getMessageTypes().get(122);
+            getDescriptor().getMessageTypes().get(124);
           internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_descriptor,
               new java.lang.String[] { "Enabled", });
           internal_static_hbase_pb_GetClusterIdRequest_descriptor =
-            getDescriptor().getMessageTypes().get(123);
+            getDescriptor().getMessageTypes().get(125);
           internal_static_hbase_pb_GetClusterIdRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetClusterIdRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_GetClusterIdResponse_descriptor =
-            getDescriptor().getMessageTypes().get(124);
+            getDescriptor().getMessageTypes().get(126);
           internal_static_hbase_pb_GetClusterIdResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetClusterIdResponse_descriptor,
               new java.lang.String[] { "ClusterId", });
           internal_static_hbase_pb_GetActiveMasterRequest_descriptor =
-            getDescriptor().getMessageTypes().get(125);
+            getDescriptor().getMessageTypes().get(127);
           internal_static_hbase_pb_GetActiveMasterRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetActiveMasterRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_GetActiveMasterResponse_descriptor =
-            getDescriptor().getMessageTypes().get(126);
+            getDescriptor().getMessageTypes().get(128);
           internal_static_hbase_pb_GetActiveMasterResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetActiveMasterResponse_descriptor,
               new java.lang.String[] { "ServerName", });
           internal_static_hbase_pb_GetMetaRegionLocationsRequest_descriptor =
-            getDescriptor().getMessageTypes().get(127);
+            getDescriptor().getMessageTypes().get(129);
           internal_static_hbase_pb_GetMetaRegionLocationsRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetMetaRegionLocationsRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_GetMetaRegionLocationsResponse_descriptor =
-            getDescriptor().getMessageTypes().get(128);
+            getDescriptor().getMessageTypes().get(130);
           internal_static_hbase_pb_GetMetaRegionLocationsResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetMetaRegionLocationsResponse_descriptor,
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
index fc181a8..e872f4c 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
@@ -4419,12 +4419,12 @@ public final class ZooKeeperProtos {
     // @@protoc_insertion_point(class_scope:hbase.pb.SplitLogTask)
   }
 
-  public interface TableOrBuilder
+  public interface DeprecatedTableStateOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
 
-    // required .hbase.pb.Table.State state = 1 [default = ENABLED];
+    // required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
     /**
-     * <code>required .hbase.pb.Table.State state = 1 [default = ENABLED];</code>
+     * <code>required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];</code>
      *
      * <pre>
      * This is the table's state.  If no znode for a table,
@@ -4434,7 +4434,7 @@ public final class ZooKeeperProtos {
      */
     boolean hasState();
     /**
-     * <code>required .hbase.pb.Table.State state = 1 [default = ENABLED];</code>
+     * <code>required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];</code>
      *
      * <pre>
      * This is the table's state.  If no znode for a table,
@@ -4442,32 +4442,33 @@ public final class ZooKeeperProtos {
      * for more.
      * </pre>
      */
-    org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State getState();
+    org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State getState();
   }
   /**
-   * Protobuf type {@code hbase.pb.Table}
+   * Protobuf type {@code hbase.pb.DeprecatedTableState}
    *
    * <pre>
    **
    * The znode that holds state of table.
+   * Deprected, table state is stored in table descriptor on HDFS.
    * </pre>
    */
-  public static final class Table extends
+  public static final class DeprecatedTableState extends
       com.google.protobuf.GeneratedMessage
-      implements TableOrBuilder {
-    // Use Table.newBuilder() to construct.
-    private Table(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      implements DeprecatedTableStateOrBuilder {
+    // Use DeprecatedTableState.newBuilder() to construct.
+    private DeprecatedTableState(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
       super(builder);
       this.unknownFields = builder.getUnknownFields();
     }
-    private Table(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+    private DeprecatedTableState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
 
-    private static final Table defaultInstance;
-    public static Table getDefaultInstance() {
+    private static final DeprecatedTableState defaultInstance;
+    public static DeprecatedTableState getDefaultInstance() {
       return defaultInstance;
     }
 
-    public Table getDefaultInstanceForType() {
+    public DeprecatedTableState getDefaultInstanceForType() {
       return defaultInstance;
     }
 
@@ -4477,7 +4478,7 @@ public final class ZooKeeperProtos {
         getUnknownFields() {
       return this.unknownFields;
     }
-    private Table(
+    private DeprecatedTableState(
         com.google.protobuf.CodedInputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
@@ -4502,7 +4503,7 @@ public final class ZooKeeperProtos {
             }
             case 8: {
               int rawValue = input.readEnum();
-              org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.valueOf(rawValue);
+              org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.valueOf(rawValue);
               if (value == null) {
                 unknownFields.mergeVarintField(1, rawValue);
               } else {
@@ -4525,33 +4526,33 @@ public final class ZooKeeperProtos {
     }
     public static final com.google.protobuf.Descriptors.Descriptor
         getDescriptor() {
-      return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Table_descriptor;
+      return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_descriptor;
     }
 
     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
         internalGetFieldAccessorTable() {
-      return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Table_fieldAccessorTable
+      return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_fieldAccessorTable
           .ensureFieldAccessorsInitialized(
-              org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.Builder.class);
+              org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.Builder.class);
     }
 
-    public static com.google.protobuf.Parser<Table> PARSER =
-        new com.google.protobuf.AbstractParser<Table>() {
-      public Table parsePartialFrom(
+    public static com.google.protobuf.Parser<DeprecatedTableState> PARSER =
+        new com.google.protobuf.AbstractParser<DeprecatedTableState>() {
+      public DeprecatedTableState parsePartialFrom(
           com.google.protobuf.CodedInputStream input,
           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
           throws com.google.protobuf.InvalidProtocolBufferException {
-        return new Table(input, extensionRegistry);
+        return new DeprecatedTableState(input, extensionRegistry);
       }
     };
 
     @java.lang.Override
-    public com.google.protobuf.Parser<Table> getParserForType() {
+    public com.google.protobuf.Parser<DeprecatedTableState> getParserForType() {
       return PARSER;
     }
 
     /**
-     * Protobuf enum {@code hbase.pb.Table.State}
+     * Protobuf enum {@code hbase.pb.DeprecatedTableState.State}
      *
      * <pre>
      * Table's current state
@@ -4629,7 +4630,7 @@ public final class ZooKeeperProtos {
       }
       public static final com.google.protobuf.Descriptors.EnumDescriptor
           getDescriptor() {
-        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDescriptor().getEnumTypes().get(0);
+        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.getDescriptor().getEnumTypes().get(0);
       }
 
       private static final State[] VALUES = values();
@@ -4651,15 +4652,15 @@ public final class ZooKeeperProtos {
         this.value = value;
       }
 
-      // @@protoc_insertion_point(enum_scope:hbase.pb.Table.State)
+      // @@protoc_insertion_point(enum_scope:hbase.pb.DeprecatedTableState.State)
     }
 
     private int bitField0_;
-    // required .hbase.pb.Table.State state = 1 [default = ENABLED];
+    // required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
     public static final int STATE_FIELD_NUMBER = 1;
-    private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State state_;
+    private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State state_;
     /**
-     * <code>required .hbase.pb.Table.State state = 1 [default = ENABLED];</code>
+     * <code>required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];</code>
      *
      * <pre>
      * This is the table's state.  If no znode for a table,
@@ -4671,7 +4672,7 @@ public final class ZooKeeperProtos {
       return ((bitField0_ & 0x00000001) == 0x00000001);
     }
     /**
-     * <code>required .hbase.pb.Table.State state = 1 [default = ENABLED];</code>
+     * <code>required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];</code>
      *
      * <pre>
      * This is the table's state.  If no znode for a table,
@@ -4679,12 +4680,12 @@ public final class ZooKeeperProtos {
      * for more.
      * </pre>
      */
-    public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State getState() {
+    public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State getState() {
       return state_;
     }
 
     private void initFields() {
-      state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED;
+      state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.ENABLED;
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -4735,10 +4736,10 @@ public final class ZooKeeperProtos {
       if (obj == this) {
        return true;
       }
-      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table)) {
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState)) {
         return super.equals(obj);
       }
-      org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table) obj;
+      org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState) obj;
 
       boolean result = true;
       result = result && (hasState() == other.hasState());
@@ -4768,53 +4769,53 @@ public final class ZooKeeperProtos {
       return hash;
     }
 
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(
         com.google.protobuf.ByteString data)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(
         com.google.protobuf.ByteString data,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data, extensionRegistry);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(byte[] data)
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(byte[] data)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(
         byte[] data,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data, extensionRegistry);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(java.io.InputStream input)
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(java.io.InputStream input)
         throws java.io.IOException {
       return PARSER.parseFrom(input);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(
         java.io.InputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
       return PARSER.parseFrom(input, extensionRegistry);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseDelimitedFrom(java.io.InputStream input)
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseDelimitedFrom(java.io.InputStream input)
         throws java.io.IOException {
       return PARSER.parseDelimitedFrom(input);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseDelimitedFrom(
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseDelimitedFrom(
         java.io.InputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
       return PARSER.parseDelimitedFrom(input, extensionRegistry);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(
         com.google.protobuf.CodedInputStream input)
         throws java.io.IOException {
       return PARSER.parseFrom(input);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(
         com.google.protobuf.CodedInputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
@@ -4823,7 +4824,7 @@ public final class ZooKeeperProtos {
 
     public static Builder newBuilder() { return Builder.create(); }
     public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table prototype) {
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState prototype) {
       return newBuilder().mergeFrom(prototype);
     }
     public Builder toBuilder() { return newBuilder(this); }
@@ -4835,29 +4836,30 @@ public final class ZooKeeperProtos {
       return builder;
     }
     /**
-     * Protobuf type {@code hbase.pb.Table}
+     * Protobuf type {@code hbase.pb.DeprecatedTableState}
      *
      * <pre>
      **
      * The znode that holds state of table.
+     * Deprected, table state is stored in table descriptor on HDFS.
      * </pre>
      */
     public static final class Builder extends
         com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableOrBuilder {
+       implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableStateOrBuilder {
       public static final com.google.protobuf.Descriptors.Descriptor
           getDescriptor() {
-        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Table_descriptor;
+        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_descriptor;
       }
 
       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
           internalGetFieldAccessorTable() {
-        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Table_fieldAccessorTable
+        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_fieldAccessorTable
             .ensureFieldAccessorsInitialized(
-                org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.Builder.class);
+                org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.Builder.class);
       }
 
-      // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.newBuilder()
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.newBuilder()
       private Builder() {
         maybeForceBuilderInitialization();
       }
@@ -4877,7 +4879,7 @@ public final class ZooKeeperProtos {
 
       public Builder clear() {
         super.clear();
-        state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED;
+        state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.ENABLED;
         bitField0_ = (bitField0_ & ~0x00000001);
         return this;
       }
@@ -4888,23 +4890,23 @@ public final class ZooKeeperProtos {
 
       public com.google.protobuf.Descriptors.Descriptor
           getDescriptorForType() {
-        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Table_descriptor;
+        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_descriptor;
       }
 
-      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table getDefaultInstanceForType() {
-        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDefaultInstance();
+      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.getDefaultInstance();
       }
 
-      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table build() {
-        org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table result = buildPartial();
+      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState build() {
+        org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState result = buildPartial();
         if (!result.isInitialized()) {
           throw newUninitializedMessageException(result);
         }
         return result;
       }
 
-      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table buildPartial() {
-        org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table(this);
+      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState(this);
         int from_bitField0_ = bitField0_;
         int to_bitField0_ = 0;
         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
@@ -4917,16 +4919,16 @@ public final class ZooKeeperProtos {
       }
 
       public Builder mergeFrom(com.google.protobuf.Message other) {
-        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table) {
-          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table)other);
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState)other);
         } else {
           super.mergeFrom(other);
           return this;
         }
       }
 
-      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table other) {
-        if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDefaultInstance()) return this;
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.getDefaultInstance()) return this;
         if (other.hasState()) {
           setState(other.getState());
         }
@@ -4946,11 +4948,11 @@ public final class ZooKeeperProtos {
           com.google.protobuf.CodedInputStream input,
           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
           throws java.io.IOException {
-        org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parsedMessage = null;
+        org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parsedMessage = null;
         try {
           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table) e.getUnfinishedMessage();
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState) e.getUnfinishedMessage();
           throw e;
         } finally {
           if (parsedMessage != null) {
@@ -4961,10 +4963,10 @@ public final class ZooKeeperProtos {
       }
       private int bitField0_;
 
-      // required .hbase.pb.Table.State state = 1 [default = ENABLED];
-      private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED;
+      // required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
+      private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.ENABLED;
       /**
-       * <code>required .hbase.pb.Table.State state = 1 [default = ENABLED];</code>
+       * <code>required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];</code>
        *
        * <pre>
        * This is the table's state.  If no znode for a table,
@@ -4976,7 +4978,7 @@ public final class ZooKeeperProtos {
         return ((bitField0_ & 0x00000001) == 0x00000001);
       }
       /**
-       * <code>required .hbase.pb.Table.State state = 1 [default = ENABLED];</code>
+       * <code>required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];</code>
        *
        * <pre>
        * This is the table's state.  If no znode for a table,
@@ -4984,11 +4986,11 @@ public final class ZooKeeperProtos {
        * for more.
        * </pre>
        */
-      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State getState() {
+      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State getState() {
         return state_;
       }
       /**
-       * <code>required .hbase.pb.Table.State state = 1 [default = ENABLED];</code>
+       * <code>required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];</code>
        *
        * <pre>
        * This is the table's state.  If no znode for a table,
@@ -4996,7 +4998,7 @@ public final class ZooKeeperProtos {
        * for more.
        * </pre>
        */
-      public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State value) {
+      public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State value) {
         if (value == null) {
           throw new NullPointerException();
         }
@@ -5006,7 +5008,7 @@ public final class ZooKeeperProtos {
         return this;
       }
       /**
-       * <code>required .hbase.pb.Table.State state = 1 [default = ENABLED];</code>
+       * <code>required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];</code>
        *
        * <pre>
        * This is the table's state.  If no znode for a table,
@@ -5016,20 +5018,20 @@ public final class ZooKeeperProtos {
        */
       public Builder clearState() {
         bitField0_ = (bitField0_ & ~0x00000001);
-        state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED;
+        state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.ENABLED;
         onChanged();
         return this;
       }
 
-      // @@protoc_insertion_point(builder_scope:hbase.pb.Table)
+      // @@protoc_insertion_point(builder_scope:hbase.pb.DeprecatedTableState)
     }
 
     static {
-      defaultInstance = new Table(true);
+      defaultInstance = new DeprecatedTableState(true);
       defaultInstance.initFields();
     }
 
-    // @@protoc_insertion_point(class_scope:hbase.pb.Table)
+    // @@protoc_insertion_point(class_scope:hbase.pb.DeprecatedTableState)
   }
 
   public interface TableCFOrBuilder
@@ -10934,10 +10936,10 @@ public final class ZooKeeperProtos {
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_hbase_pb_SplitLogTask_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
-    internal_static_hbase_pb_Table_descriptor;
+    internal_static_hbase_pb_DeprecatedTableState_descriptor;
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
-      internal_static_hbase_pb_Table_fieldAccessorTable;
+      internal_static_hbase_pb_DeprecatedTableState_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_TableCF_descriptor;
   private static
@@ -11001,28 +11003,29 @@ public final class ZooKeeperProtos {
       "\022\016\n\nUNASSIGNED\020\000\022\t\n\005OWNED\020\001\022\014\n\010RESIGNED\020" +
       "\002\022\010\n\004DONE\020\003\022\007\n\003ERR\020\004\">\n\014RecoveryMode\022\013\n\007" +
       "UNKNOWN\020\000\022\021\n\rLOG_SPLITTING\020\001\022\016\n\nLOG_REPL" +
-      "AY\020\002\"w\n\005Table\022-\n\005state\030\001 \002(\0162\025.hbase.pb.",
-      "Table.State:\007ENABLED\"?\n\005State\022\013\n\007ENABLED" +
-      "\020\000\022\014\n\010DISABLED\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENABL" +
-      "ING\020\003\"D\n\007TableCF\022\'\n\ntable_name\030\001 \001(\0132\023.h" +
-      "base.pb.TableName\022\020\n\010families\030\002 \003(\014\"\330\001\n\017" +
-      "ReplicationPeer\022\022\n\nclusterkey\030\001 \002(\t\022\037\n\027r" +
-      "eplicationEndpointImpl\030\002 \001(\t\022&\n\004data\030\003 \003" +
-      "(\0132\030.hbase.pb.BytesBytesPair\022/\n\rconfigur" +
-      "ation\030\004 \003(\0132\030.hbase.pb.NameStringPair\022$\n" +
-      "\ttable_cfs\030\005 \003(\0132\021.hbase.pb.TableCF\022\021\n\tb" +
-      "andwidth\030\006 \001(\003\"g\n\020ReplicationState\022/\n\005st",
-      "ate\030\001 \002(\0162 .hbase.pb.ReplicationState.St" +
-      "ate\"\"\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DISABLED\020\001\"" +
-      "+\n\027ReplicationHLogPosition\022\020\n\010position\030\001" +
-      " \002(\003\"%\n\017ReplicationLock\022\022\n\nlock_owner\030\001 " +
-      "\002(\t\"\252\001\n\tTableLock\022\'\n\ntable_name\030\001 \001(\0132\023." +
-      "hbase.pb.TableName\022(\n\nlock_owner\030\002 \001(\0132\024" +
-      ".hbase.pb.ServerName\022\021\n\tthread_id\030\003 \001(\003\022" +
-      "\021\n\tis_shared\030\004 \001(\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013c" +
-      "reate_time\030\006 \001(\003\"\036\n\013SwitchState\022\017\n\007enabl" +
-      "ed\030\001 \001(\010BE\n*org.apache.hadoop.hbase.prot",
-      "obuf.generatedB\017ZooKeeperProtosH\001\210\001\001\240\001\001"
+      "AY\020\002\"\225\001\n\024DeprecatedTableState\022<\n\005state\030\001",
+      " \002(\0162$.hbase.pb.DeprecatedTableState.Sta" +
+      "te:\007ENABLED\"?\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DIS" +
+      "ABLED\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENABLING\020\003\"D\n\007" +
+      "TableCF\022\'\n\ntable_name\030\001 \001(\0132\023.hbase.pb.T" +
+      "ableName\022\020\n\010families\030\002 \003(\014\"\330\001\n\017Replicati" +
+      "onPeer\022\022\n\nclusterkey\030\001 \002(\t\022\037\n\027replicatio" +
+      "nEndpointImpl\030\002 \001(\t\022&\n\004data\030\003 \003(\0132\030.hbas" +
+      "e.pb.BytesBytesPair\022/\n\rconfiguration\030\004 \003" +
+      "(\0132\030.hbase.pb.NameStringPair\022$\n\ttable_cf" +
+      "s\030\005 \003(\0132\021.hbase.pb.TableCF\022\021\n\tbandwidth\030",
+      "\006 \001(\003\"g\n\020ReplicationState\022/\n\005state\030\001 \002(\016" +
+      "2 .hbase.pb.ReplicationState.State\"\"\n\005St" +
+      "ate\022\013\n\007ENABLED\020\000\022\014\n\010DISABLED\020\001\"+\n\027Replic" +
+      "ationHLogPosition\022\020\n\010position\030\001 \002(\003\"%\n\017R" +
+      "eplicationLock\022\022\n\nlock_owner\030\001 \002(\t\"\252\001\n\tT" +
+      "ableLock\022\'\n\ntable_name\030\001 \001(\0132\023.hbase.pb." +
+      "TableName\022(\n\nlock_owner\030\002 \001(\0132\024.hbase.pb" +
+      ".ServerName\022\021\n\tthread_id\030\003 \001(\003\022\021\n\tis_sha" +
+      "red\030\004 \001(\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013create_tim" +
+      "e\030\006 \001(\003\"\036\n\013SwitchState\022\017\n\007enabled\030\001 \001(\010B",
+      "E\n*org.apache.hadoop.hbase.protobuf.gene" +
+      "ratedB\017ZooKeeperProtosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -11059,11 +11062,11 @@ public final class ZooKeeperProtos {
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SplitLogTask_descriptor,
               new java.lang.String[] { "State", "ServerName", "Mode", });
-          internal_static_hbase_pb_Table_descriptor =
+          internal_static_hbase_pb_DeprecatedTableState_descriptor =
             getDescriptor().getMessageTypes().get(5);
-          internal_static_hbase_pb_Table_fieldAccessorTable = new
+          internal_static_hbase_pb_DeprecatedTableState_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
-              internal_static_hbase_pb_Table_descriptor,
+              internal_static_hbase_pb_DeprecatedTableState_descriptor,
               new java.lang.String[] { "State", });
           internal_static_hbase_pb_TableCF_descriptor =
             getDescriptor().getMessageTypes().get(6);
diff --git a/hbase-protocol/src/main/protobuf/HBase.proto b/hbase-protocol/src/main/protobuf/HBase.proto
index a594ccd..29bec72 100644
--- a/hbase-protocol/src/main/protobuf/HBase.proto
+++ b/hbase-protocol/src/main/protobuf/HBase.proto
@@ -39,6 +39,27 @@ message TableSchema {
   repeated NameStringPair configuration = 4;
 }
 
+/** Denotes state of the table */
+message TableState {
+  // Table's current state
+  enum State {
+    ENABLED = 0;
+    DISABLED = 1;
+    DISABLING = 2;
+    ENABLING = 3;
+  }
+  // This is the table's state.
+  required State state = 1;
+  required TableName table = 2;
+  optional uint64 timestamp = 3;
+}
+
+/** On HDFS representation of table state. */
+message TableDescriptor {
+  required TableSchema schema = 1;
+  optional TableState.State state = 2 [ default = ENABLED ];
+}
+
 /**
  * Column Family Schema
  * Inspired by the rest ColumSchemaMessage
diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto
index 27b5d75..b2fd3f8 100644
--- a/hbase-protocol/src/main/protobuf/Master.proto
+++ b/hbase-protocol/src/main/protobuf/Master.proto
@@ -456,6 +456,14 @@ message GetTableNamesResponse {
   repeated TableName table_names = 1;
 }
 
+message GetTableStateRequest {
+  required TableName table_name = 1;
+}
+
+message GetTableStateResponse {
+  required TableState table_state = 1;
+}
+
 message GetClusterStatusRequest {
 }
 
@@ -901,7 +909,9 @@ service MasterService {
   rpc IsSnapshotCleanupEnabled (IsSnapshotCleanupEnabledRequest)
     returns (IsSnapshotCleanupEnabledResponse);
 
-
+  /** returns table state */
+  rpc GetTableState(GetTableStateRequest)
+    returns(GetTableStateResponse);
 }
 
 /** Request and response to get the clusterID for this cluster */
diff --git a/hbase-protocol/src/main/protobuf/ZooKeeper.proto b/hbase-protocol/src/main/protobuf/ZooKeeper.proto
index 1638bf7..ad740f3 100644
--- a/hbase-protocol/src/main/protobuf/ZooKeeper.proto
+++ b/hbase-protocol/src/main/protobuf/ZooKeeper.proto
@@ -105,8 +105,9 @@ message SplitLogTask {
 
 /**
  * The znode that holds state of table.
+ * Deprected, table state is stored in table descriptor on HDFS.
  */
-message Table {
+message DeprecatedTableState {
   // Table's current state
   enum State {
     ENABLED = 0;
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
index 2b12f81..49f2e3c 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.LoadBalancer;
@@ -54,7 +55,6 @@ import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
 import org.apache.hadoop.hbase.net.Address;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 
 /**
  * Service to support Region Server Grouping (HBase-6721)
@@ -269,8 +269,8 @@ public class RSGroupAdminServer implements RSGroupAdmin {
     }
     for(TableName table: tables) {
       if (master.getAssignmentManager().getTableStateManager().isTableState(table,
-          ZooKeeperProtos.Table.State.DISABLED,
-          ZooKeeperProtos.Table.State.DISABLING)) {
+          TableState.State.DISABLED,
+          TableState.State.DISABLING)) {
         LOG.debug("Skipping move regions because the table" + table + " is disabled.");
         continue;
       }
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
index 6799e69..41a83a5 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -53,7 +53,6 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
@@ -61,11 +60,13 @@ import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.ServerListener;
+import org.apache.hadoop.hbase.master.TableStateManager;
 import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
 import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
 import org.apache.hadoop.hbase.net.Address;
@@ -74,7 +75,6 @@ import org.apache.hadoop.hbase.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
 import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
 import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -646,7 +646,7 @@ public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListene
                     if (sn == null) {
                       found.set(false);
                     } else if (tsm.isTableState(RSGROUP_TABLE_NAME,
-                        ZooKeeperProtos.Table.State.ENABLED)) {
+                        TableState.State.ENABLED)) {
                       try {
                         ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn);
                         ClientProtos.GetRequest request =
@@ -670,7 +670,7 @@ public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListene
                     if (sn == null) {
                       nsFound.set(false);
                     } else if (tsm.isTableState(TableName.NAMESPACE_TABLE_NAME,
-                        ZooKeeperProtos.Table.State.ENABLED)) {
+                        TableState.State.ENABLED)) {
                       try {
                         ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn);
                         ClientProtos.GetRequest request =
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java
index bdb202d..b4c808c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java
@@ -55,12 +55,4 @@ public interface CoordinatedStateManager {
    * @return instance of Server coordinated state manager runs within
    */
   Server getServer();
-
-  /**
-   * Returns implementation of TableStateManager.
-   * @throws InterruptedException if operation is interrupted
-   * @throws CoordinatedStateException if error happens in underlying coordination mechanism
-   */
-  TableStateManager getTableStateManager() throws InterruptedException,
-    CoordinatedStateException;
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java
new file mode 100644
index 0000000..5db0f69
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java
@@ -0,0 +1,161 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.protobuf.InvalidProtocolBufferException;
+/**
+ * Class represents table state on HDFS.
+ */
+@InterfaceAudience.Private
+public class TableDescriptor {
+  private HTableDescriptor hTableDescriptor;
+  private TableState.State tableState;
+
+  /**
+   * Creates TableDescriptor with all fields.
+   * @param hTableDescriptor HTableDescriptor to use
+   * @param tableState table state
+   */
+  public TableDescriptor(HTableDescriptor hTableDescriptor,
+      TableState.State tableState) {
+    this.hTableDescriptor = hTableDescriptor;
+    this.tableState = tableState;
+  }
+
+  /**
+   * Creates TableDescriptor with Enabled table.
+   * @param hTableDescriptor HTableDescriptor to use
+   */
+  @VisibleForTesting
+  public TableDescriptor(HTableDescriptor hTableDescriptor) {
+    this(hTableDescriptor, TableState.State.ENABLED);
+  }
+
+  /**
+   * Associated HTableDescriptor
+   * @return instance of HTableDescriptor
+   */
+  public HTableDescriptor getHTableDescriptor() {
+    return hTableDescriptor;
+  }
+
+  public void setHTableDescriptor(HTableDescriptor hTableDescriptor) {
+    this.hTableDescriptor = hTableDescriptor;
+  }
+
+  public TableState.State getTableState() {
+    return tableState;
+  }
+
+  public void setTableState(TableState.State tableState) {
+    this.tableState = tableState;
+  }
+
+  /**
+   * Convert to PB.
+   */
+  public HBaseProtos.TableDescriptor convert() {
+    return HBaseProtos.TableDescriptor.newBuilder()
+        .setSchema(hTableDescriptor.convert())
+        .setState(tableState.convert())
+        .build();
+  }
+
+  /**
+   * Convert from PB
+   */
+  public static TableDescriptor convert(HBaseProtos.TableDescriptor proto) {
+    HTableDescriptor hTableDescriptor = HTableDescriptor.convert(proto.getSchema());
+    TableState.State state = TableState.State.convert(proto.getState());
+    return new TableDescriptor(hTableDescriptor, state);
+  }
+
+  /**
+   * @return This instance serialized with pb with pb magic prefix
+   * @see #parseFrom(byte[])
+   */
+  public byte [] toByteArray() {
+    return ProtobufUtil.prependPBMagic(convert().toByteArray());
+  }
+
+  /**
+   * @param bytes A pb serialized {@link TableDescriptor} instance with pb magic prefix
+   * @see #toByteArray()
+   */
+  public static TableDescriptor parseFrom(final byte [] bytes)
+      throws DeserializationException, IOException {
+    if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
+      throw new DeserializationException("Expected PB encoded TableDescriptor");
+    }
+    int pblen = ProtobufUtil.lengthOfPBMagic();
+    HBaseProtos.TableDescriptor.Builder builder = HBaseProtos.TableDescriptor.newBuilder();
+    HBaseProtos.TableDescriptor ts;
+    try {
+      ts = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
+    } catch (InvalidProtocolBufferException e) {
+      throw new DeserializationException(e);
+    }
+    return convert(ts);
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+
+    TableDescriptor that = (TableDescriptor) o;
+
+    if (hTableDescriptor != null ?
+        !hTableDescriptor.equals(that.hTableDescriptor) :
+        that.hTableDescriptor != null){
+      return false;
+    }
+    if (tableState != that.tableState) {
+      return false;
+    }
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    int result = hTableDescriptor != null ? hTableDescriptor.hashCode() : 0;
+    result = 31 * result + (tableState != null ? tableState.hashCode() : 0);
+    return result;
+  }
+
+  @Override
+  public String toString() {
+    return "TableDescriptor{" +
+        "hTableDescriptor=" + hTableDescriptor +
+        ", tableState=" + tableState +
+        '}';
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
index 33ae1d5..c7bfd03 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
@@ -37,6 +37,14 @@ public interface TableDescriptors {
   throws IOException;
 
   /**
+   * @param tableName
+   * @return TableDescriptor for tablename
+   * @throws IOException
+   */
+  TableDescriptor getDescriptor(final TableName tableName)
+      throws IOException;
+
+  /**
    * Get Map of all NamespaceDescriptors for a given namespace.
    * @return Map of all descriptors.
    * @throws IOException
@@ -54,6 +62,15 @@ public interface TableDescriptors {
   throws IOException;
 
   /**
+   * Get Map of all TableDescriptors. Populates the descriptor cache as a
+   * side effect.
+   * @return Map of all descriptors.
+   * @throws IOException
+   */
+  Map<String, TableDescriptor> getAllDescriptors()
+      throws IOException;
+
+  /**
    * Add or update descriptor
    * @param htd Descriptor to set into TableDescriptors
    * @throws IOException
@@ -62,6 +79,14 @@ public interface TableDescriptors {
   throws IOException;
 
   /**
+   * Add or update descriptor
+   * @param htd Descriptor to set into TableDescriptors
+   * @throws IOException
+   */
+  void add(final TableDescriptor htd)
+      throws IOException;
+
+  /**
    * @param tablename
    * @return Instance of table descriptor or null if none found.
    * @throws IOException
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableStateManager.java
deleted file mode 100644
index 21c09b8..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableStateManager.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.InterruptedIOException;
-import java.util.Set;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
-
-/**
- * Helper class for table state management for operations running inside
- * RegionServer or HMaster.
- * Depending on implementation, fetches information from HBase system table,
- * local data store, ZooKeeper ensemble or somewhere else.
- * Code running on client side (with no coordinated state context) shall instead use
- * {@link org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader}
- */
-@InterfaceAudience.Private
-public interface TableStateManager {
-
-  /**
-   * Sets the table into desired state. Fails silently if the table is already in this state.
-   * @param tableName table to process
-   * @param state new state of this table
-   * @throws CoordinatedStateException if error happened when trying to set table state
-   */
-  void setTableState(TableName tableName, ZooKeeperProtos.Table.State state)
-    throws CoordinatedStateException;
-
-  /**
-   * Sets the specified table into the newState, but only if the table is already in
-   * one of the possibleCurrentStates (otherwise no operation is performed).
-   * @param tableName table to process
-   * @param newState new state for the table
-   * @param states table should be in one of these states for the operation
-   *                              to be performed
-   * @throws CoordinatedStateException if error happened while performing operation
-   * @return true if operation succeeded, false otherwise
-   */
-  boolean setTableStateIfInStates(TableName tableName, ZooKeeperProtos.Table.State newState,
-                                  ZooKeeperProtos.Table.State... states)
-    throws CoordinatedStateException;
-
-  /**
-   * Sets the specified table into the newState, but only if the table is NOT in
-   * one of the possibleCurrentStates (otherwise no operation is performed).
-   * @param tableName table to process
-   * @param newState new state for the table
-   * @param states table should NOT be in one of these states for the operation
-   *                              to be performed
-   * @throws CoordinatedStateException if error happened while performing operation
-   * @return true if operation succeeded, false otherwise
-   */
-  boolean setTableStateIfNotInStates(TableName tableName, ZooKeeperProtos.Table.State newState,
-                                     ZooKeeperProtos.Table.State... states)
-    throws CoordinatedStateException;
-
-  /**
-   * @return true if the table is in any one of the listed states, false otherwise.
-   */
-  boolean isTableState(TableName tableName, ZooKeeperProtos.Table.State... states);
-
-  /**
-   * @return true if the table is in any one of the listed states, false otherwise.
-   */
-  boolean isTableState(TableName tableName, boolean checkSource,
-      ZooKeeperProtos.Table.State... states);
-
-  /**
-   * Mark table as deleted. Fails silently if the table is not currently marked as disabled.
-   * @param tableName table to be deleted
-   * @throws CoordinatedStateException if error happened while performing operation
-   */
-  void setDeletedTable(TableName tableName) throws CoordinatedStateException;
-
-  /**
-   * Checks if table is present.
-   *
-   * @param tableName table we're checking
-   * @return true if the table is present, false otherwise
-   */
-  boolean isTablePresent(TableName tableName);
-
-  /**
-   * @return set of tables which are in any one of the listed states, empty Set if none
-   */
-  Set<TableName> getTablesInStates(ZooKeeperProtos.Table.State... states)
-    throws InterruptedIOException, CoordinatedStateException;
-
-  /**
-   * If the table is found in the given state the in-memory state is removed. This
-   * helps in cases where CreateTable is to be retried by the client in case of
-   * failures.  If deletePermanentState is true - the flag kept permanently is
-   * also reset.
-   *
-   * @param tableName table we're working on
-   * @param states if table isn't in any one of these states, operation aborts
-   * @param deletePermanentState if true, reset the permanent flag
-   * @throws CoordinatedStateException if error happened in underlying coordination engine
-   */
-  void checkAndRemoveTableState(TableName tableName, ZooKeeperProtos.Table.State states,
-                            boolean deletePermanentState)
-    throws CoordinatedStateException;
-}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java
index f79e5d8..03762ab 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java
@@ -18,10 +18,8 @@
 package org.apache.hadoop.hbase.coordination;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.TableStateManager;
 
 /**
  * Base class for {@link org.apache.hadoop.hbase.CoordinatedStateManager} implementations.
@@ -49,9 +47,6 @@ public abstract class BaseCoordinatedStateManager implements CoordinatedStateMan
     return null;
   }
 
-  @Override
-  public abstract TableStateManager getTableStateManager() throws InterruptedException,
-    CoordinatedStateException;
   /**
    * Method to retrieve coordination for split log worker
    */
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java
index 2f739be..7222b0f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java
@@ -20,13 +20,9 @@ package org.apache.hadoop.hbase.coordination;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.TableStateManager;
-import org.apache.hadoop.hbase.zookeeper.ZKTableStateManager;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.apache.zookeeper.KeeperException;
 
 /**
  * ZooKeeper-based implementation of {@link org.apache.hadoop.hbase.CoordinatedStateManager}.
@@ -61,16 +57,6 @@ public class ZkCoordinatedStateManager extends BaseCoordinatedStateManager {
   }
 
   @Override
-  public TableStateManager getTableStateManager() throws InterruptedException,
-      CoordinatedStateException {
-    try {
-      return new ZKTableStateManager(server.getZooKeeper());
-    } catch (KeeperException e) {
-      throw new CoordinatedStateException(e);
-    }
-  }
-
-  @Override
   public SplitLogWorkerCoordination getSplitLogWorkerCoordination() {
     return splitLogWorkerCoordination;
     }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkOpenRegionCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkOpenRegionCoordination.java
index 812bbe2..b54740a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkOpenRegionCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkOpenRegionCoordination.java
@@ -23,11 +23,11 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
@@ -309,7 +309,7 @@ public class ZkOpenRegionCoordination implements OpenRegionCoordination {
     }
     if (!openedNodeDeleted) {
       if (assignmentManager.getTableStateManager().isTableState(regionInfo.getTable(),
-          ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
+          TableState.State.DISABLED, TableState.State.DISABLING)) {
         debugLog(regionInfo, "Opened region "
           + regionInfo.getShortNameToLog() + " but "
           + "this table is disabled, triggering close of region");
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index de4edbb..842ce85 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hbase.master;
 
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -64,7 +63,6 @@ import org.apache.hadoop.hbase.RegionTransition;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Admin.MasterSwitchType;
@@ -77,6 +75,7 @@ import org.apache.hadoop.hbase.coordination.SplitTransactionCoordination.SplitTr
 import org.apache.hadoop.hbase.coordination.ZkOpenRegionCoordination;
 import org.apache.hadoop.hbase.coordination.ZkRegionMergeCoordination;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.executor.ExecutorService;
@@ -92,12 +91,12 @@ import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
 import org.apache.hadoop.hbase.master.handler.OpenedRegionHandler;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.regionserver.RegionAlreadyInTransitionException;
 import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
 import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
 import org.apache.hadoop.hbase.util.ConfigUtil;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.ExceptionUtil;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.KeyLocker;
 import org.apache.hadoop.hbase.util.Pair;
@@ -286,14 +285,11 @@ public class AssignmentManager extends ZooKeeperListener {
    * @param service Executor service
    * @param metricsMaster metrics manager
    * @param tableLockManager TableLock manager
-   * @throws KeeperException
-   * @throws IOException
    */
   public AssignmentManager(MasterServices server, ServerManager serverManager,
       final LoadBalancer balancer,
       final ExecutorService service, MetricsMaster metricsMaster,
-      final TableLockManager tableLockManager) throws KeeperException,
-        IOException, CoordinatedStateException {
+      final TableLockManager tableLockManager, final TableStateManager tableStateManager) {
     super(server.getZooKeeper());
     this.server = server;
     this.serverManager = serverManager;
@@ -306,15 +302,9 @@ public class AssignmentManager extends ZooKeeperListener {
     this.shouldAssignRegionsWithFavoredNodes = conf.getClass(
            HConstants.HBASE_MASTER_LOADBALANCER_CLASS, Object.class).equals(
            FavoredNodeLoadBalancer.class);
-    try {
-      if (server.getCoordinatedStateManager() != null) {
-        this.tableStateManager = server.getCoordinatedStateManager().getTableStateManager();
-      } else {
-        this.tableStateManager = null;
-      }
-    } catch (InterruptedException e) {
-      throw new InterruptedIOException();
-    }
+
+    this.tableStateManager = tableStateManager;
+
     // This is the max attempts, not retries, so it should be at least 1.
     this.maximumAttempts = Math.max(1,
       this.server.getConfiguration().getInt("hbase.assignment.maximum.attempts", 10));
@@ -392,7 +382,7 @@ public class AssignmentManager extends ZooKeeperListener {
   }
 
   /**
-   * @return Instance of ZKTableStateManager.
+   * @return Instance of TableStateManager.
    */
   public TableStateManager getTableStateManager() {
     // These are 'expensive' to make involving trip to zk ensemble so allow
@@ -516,10 +506,9 @@ public class AssignmentManager extends ZooKeeperListener {
    * @throws IOException
    * @throws KeeperException
    * @throws InterruptedException
-   * @throws CoordinatedStateException
    */
   void joinCluster() throws IOException,
-      KeeperException, InterruptedException, CoordinatedStateException {
+      KeeperException, CoordinatedStateException {
     long startTime = System.currentTimeMillis();
     // Concurrency note: In the below the accesses on regionsInTransition are
     // outside of a synchronization block where usually all accesses to RIT are
@@ -560,7 +549,7 @@ public class AssignmentManager extends ZooKeeperListener {
    * @throws InterruptedException
    */
   boolean processDeadServersAndRegionsInTransition(final Set<ServerName> deadServers)
-  throws KeeperException, IOException, InterruptedException, CoordinatedStateException {
+      throws KeeperException, IOException {
     List<String> nodes = ZKUtil.listChildrenNoWatch(watcher, watcher.assignmentZNode);
 
     if (useZKForAssignment && nodes == null) {
@@ -568,7 +557,6 @@ public class AssignmentManager extends ZooKeeperListener {
       server.abort(errorMessage, new IOException(errorMessage));
       return true; // Doesn't matter in this case
     }
-
     boolean failover = !serverManager.getDeadServers().isEmpty();
     if (failover) {
       // This may not be a failover actually, especially if meta is on this master.
@@ -689,7 +677,11 @@ public class AssignmentManager extends ZooKeeperListener {
     if (!failover) {
       // Fresh cluster startup.
       LOG.info("Clean cluster startup. Assigning user regions");
-      assignAllUserRegions(allRegions);
+      try {
+        assignAllUserRegions(allRegions);
+      } catch (InterruptedException ie) {
+        ExceptionUtil.rethrowIfInterrupt(ie);
+      }
     }
     // unassign replicas of the split parents and the merged regions
     // the daughter replicas are opened in assignAllUserRegions if it was
@@ -707,11 +699,10 @@ public class AssignmentManager extends ZooKeeperListener {
    * locations are returned.
    */
   private Map<HRegionInfo, ServerName> getUserRegionsToAssign()
-      throws InterruptedIOException, CoordinatedStateException {
+      throws IOException {
     Set<TableName> disabledOrDisablingOrEnabling =
-        tableStateManager.getTablesInStates(ZooKeeperProtos.Table.State.DISABLED,
-          ZooKeeperProtos.Table.State.DISABLING, ZooKeeperProtos.Table.State.ENABLING);
-
+        tableStateManager.getTablesInStates(TableState.State.DISABLED,
+          TableState.State.DISABLING, TableState.State.ENABLING);
     // Clean re/start, mark all user regions closed before reassignment
     return regionStates.closeAllUserRegions(disabledOrDisablingOrEnabling);
   }
@@ -739,7 +730,7 @@ public class AssignmentManager extends ZooKeeperListener {
         try {
           // Assign the regions
           assignAllUserRegions(getUserRegionsToAssign());
-        } catch (CoordinatedStateException | IOException | InterruptedException e) {
+        } catch (IOException | InterruptedException e) {
           LOG.error("Exception occured while assigning user regions.", e);
         }
       };
@@ -1482,7 +1473,7 @@ public class AssignmentManager extends ZooKeeperListener {
             LOG.debug("Znode " + regionNameStr + " deleted, state: " + rs);
 
             boolean disabled = getTableStateManager().isTableState(regionInfo.getTable(),
-                ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING);
+                TableState.State.DISABLED, TableState.State.DISABLING);
 
             ServerName serverName = rs.getServerName();
             if (serverManager.isServerOnline(serverName)) {
@@ -2269,7 +2260,7 @@ public class AssignmentManager extends ZooKeeperListener {
             // will not be in ENABLING or ENABLED state.
             TableName tableName = region.getTable();
             if (!tableStateManager.isTableState(tableName,
-              ZooKeeperProtos.Table.State.ENABLED, ZooKeeperProtos.Table.State.ENABLING)) {
+              TableState.State.ENABLED, TableState.State.ENABLING)) {
               LOG.debug("Setting table " + tableName + " to ENABLED state.");
               setEnabledTable(tableName);
             }
@@ -2495,8 +2486,8 @@ public class AssignmentManager extends ZooKeeperListener {
 
   private boolean isDisabledorDisablingRegionInRIT(final HRegionInfo region) {
     if (this.tableStateManager.isTableState(region.getTable(),
-        ZooKeeperProtos.Table.State.DISABLED,
-        ZooKeeperProtos.Table.State.DISABLING) || replicasToClose.contains(region)) {
+            TableState.State.DISABLED,
+            TableState.State.DISABLING) || replicasToClose.contains(region)) {
       LOG.info("Table " + region.getTable() + " is disabled or disabling;"
         + " skipping assign of " + region.getRegionNameAsString());
       offlineDisabledRegion(region);
@@ -3127,7 +3118,7 @@ public class AssignmentManager extends ZooKeeperListener {
     for (HRegionInfo hri : regionsFromMetaScan) {
       TableName tableName = hri.getTable();
       if (!tableStateManager.isTableState(tableName,
-          ZooKeeperProtos.Table.State.ENABLED)) {
+              TableState.State.ENABLED)) {
         setEnabledTable(tableName);
       }
     }
@@ -3194,14 +3185,14 @@ public class AssignmentManager extends ZooKeeperListener {
    * @throws IOException
    */
   Set<ServerName> rebuildUserRegions() throws
-      IOException, KeeperException, CoordinatedStateException {
+          IOException, KeeperException {
     Set<TableName> disabledOrEnablingTables = tableStateManager.getTablesInStates(
-      ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.ENABLING);
+            TableState.State.DISABLED, TableState.State.ENABLING);
 
     Set<TableName> disabledOrDisablingOrEnabling = tableStateManager.getTablesInStates(
-      ZooKeeperProtos.Table.State.DISABLED,
-      ZooKeeperProtos.Table.State.DISABLING,
-      ZooKeeperProtos.Table.State.ENABLING);
+            TableState.State.DISABLED,
+            TableState.State.DISABLING,
+            TableState.State.ENABLING);
 
     // Region assignment from META
     List<Result> results = MetaTableAccessor.fullScanOfMeta(server.getConnection());
@@ -3253,7 +3244,7 @@ public class AssignmentManager extends ZooKeeperListener {
         ServerName lastHost = hrl.getServerName();
         ServerName regionLocation = RegionStateStore.getRegionServer(result, replicaId);
         if (tableStateManager.isTableState(regionInfo.getTable(),
-             ZooKeeperProtos.Table.State.DISABLED)) {
+             TableState.State.DISABLED)) {
           // force region to forget it hosts for disabled/disabling tables.
           // see HBASE-13326
           lastHost = null;
@@ -3283,7 +3274,7 @@ public class AssignmentManager extends ZooKeeperListener {
         // this will be used in rolling restarts
         if (!disabledOrDisablingOrEnabling.contains(tableName)
           && !getTableStateManager().isTableState(tableName,
-            ZooKeeperProtos.Table.State.ENABLED)) {
+                TableState.State.ENABLED)) {
           setEnabledTable(tableName);
         }
       }
@@ -3300,9 +3291,9 @@ public class AssignmentManager extends ZooKeeperListener {
    * @throws IOException
    */
   private void recoverTableInDisablingState()
-      throws KeeperException, IOException, CoordinatedStateException {
+          throws KeeperException, IOException {
     Set<TableName> disablingTables =
-      tableStateManager.getTablesInStates(ZooKeeperProtos.Table.State.DISABLING);
+            tableStateManager.getTablesInStates(TableState.State.DISABLING);
     if (disablingTables.size() != 0) {
       for (TableName tableName : disablingTables) {
         // Recover by calling DisableTableHandler
@@ -3324,9 +3315,9 @@ public class AssignmentManager extends ZooKeeperListener {
    * @throws IOException
    */
   private void recoverTableInEnablingState()
-      throws KeeperException, IOException, CoordinatedStateException {
+          throws KeeperException, IOException {
     Set<TableName> enablingTables = tableStateManager.
-      getTablesInStates(ZooKeeperProtos.Table.State.ENABLING);
+            getTablesInStates(TableState.State.ENABLING);
     if (enablingTables.size() != 0) {
       for (TableName tableName : enablingTables) {
         // Recover by calling EnableTableHandler
@@ -3398,9 +3389,9 @@ public class AssignmentManager extends ZooKeeperListener {
         LOG.info("Server " + serverName + " isn't online. SSH will handle this");
         continue;
       }
+      RegionState.State state = regionState.getState();
       HRegionInfo regionInfo = regionState.getRegion();
-      State state = regionState.getState();
-
+      LOG.info("Processing " + regionState);
       switch (state) {
       case CLOSED:
         invokeAssign(regionInfo);
@@ -3790,7 +3781,7 @@ public class AssignmentManager extends ZooKeeperListener {
             server.abort("Unexpected ZK exception deleting node " + hri, ke);
           }
           if (tableStateManager.isTableState(hri.getTable(),
-              ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
+                  TableState.State.DISABLED, TableState.State.DISABLING)) {
             regionStates.regionOffline(hri);
             it.remove();
             continue;
@@ -3813,7 +3804,7 @@ public class AssignmentManager extends ZooKeeperListener {
     HRegionInfo hri = plan.getRegionInfo();
     TableName tableName = hri.getTable();
     if (tableStateManager.isTableState(tableName,
-      ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
+            TableState.State.DISABLED, TableState.State.DISABLING)) {
       LOG.info("Ignored moving region of disabling/disabled table "
         + tableName);
       return;
@@ -3861,8 +3852,8 @@ public class AssignmentManager extends ZooKeeperListener {
   protected void setEnabledTable(TableName tableName) {
     try {
       this.tableStateManager.setTableState(tableName,
-        ZooKeeperProtos.Table.State.ENABLED);
-    } catch (CoordinatedStateException e) {
+              TableState.State.ENABLED);
+    } catch (IOException e) {
       // here we can abort as it is the start up flow
       String errorMsg = "Unable to ensure that the table " + tableName
           + " will be" + " enabled because of a ZooKeeper issue";
@@ -3967,8 +3958,8 @@ public class AssignmentManager extends ZooKeeperListener {
         // When there are more than one region server a new RS is selected as the
         // destination and the same is updated in the region plan. (HBASE-5546)
         if (getTableStateManager().isTableState(hri.getTable(),
-            ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING) ||
-            replicasToClose.contains(hri)) {
+                TableState.State.DISABLED, TableState.State.DISABLING) ||
+                replicasToClose.contains(hri)) {
           offlineDisabledRegion(hri);
           return;
         }
@@ -3996,15 +3987,14 @@ public class AssignmentManager extends ZooKeeperListener {
     // reset the count, if any
     failedOpenTracker.remove(hri.getEncodedName());
     if (getTableStateManager().isTableState(hri.getTable(),
-        ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
+        TableState.State.DISABLED, TableState.State.DISABLING)) {
       invokeUnAssign(hri);
     }
   }
 
   private void onRegionClosed(final HRegionInfo hri) {
-    if (getTableStateManager().isTableState(hri.getTable(),
-        ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING) ||
-        replicasToClose.contains(hri)) {
+    if (getTableStateManager().isTableState(hri.getTable(), TableState.State.DISABLED,
+        TableState.State.DISABLING) || replicasToClose.contains(hri)) {
       offlineDisabledRegion(hri);
       return;
     }
@@ -4050,7 +4040,7 @@ public class AssignmentManager extends ZooKeeperListener {
     }
 
     if (getTableStateManager().isTableState(p.getTable(),
-        ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
+        TableState.State.DISABLED, TableState.State.DISABLING)) {
       invokeUnAssign(p);
     }
     return null;
@@ -4076,7 +4066,7 @@ public class AssignmentManager extends ZooKeeperListener {
 
       // User could disable the table before master knows the new region.
       if (getTableStateManager().isTableState(p.getTable(),
-          ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
+          TableState.State.DISABLED, TableState.State.DISABLING)) {
         invokeUnAssign(a);
         invokeUnAssign(b);
       } else {
@@ -4130,7 +4120,7 @@ public class AssignmentManager extends ZooKeeperListener {
 
       // User could disable the table before master knows the new region.
       if (getTableStateManager().isTableState(p.getTable(),
-          ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
+          TableState.State.DISABLED, TableState.State.DISABLING)) {
         invokeUnAssign(p);
       } else {
         Callable<Object> mergeReplicasCallable = new Callable<Object>() {
@@ -4170,7 +4160,7 @@ public class AssignmentManager extends ZooKeeperListener {
     }
 
     if (getTableStateManager().isTableState(p.getTable(),
-        ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
+        TableState.State.DISABLED, TableState.State.DISABLING)) {
       invokeUnAssign(a);
       invokeUnAssign(b);
     }
@@ -4291,7 +4281,7 @@ public class AssignmentManager extends ZooKeeperListener {
 
       // User could disable the table before master knows the new region.
       if (tableStateManager.isTableState(p.getTable(),
-          ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
+          TableState.State.DISABLED, TableState.State.DISABLING)) {
         unassign(p);
       }
     }
@@ -4421,7 +4411,7 @@ public class AssignmentManager extends ZooKeeperListener {
 
       // User could disable the table before master knows the new region.
       if (tableStateManager.isTableState(p.getTable(),
-          ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
+          TableState.State.DISABLED, TableState.State.DISABLING)) {
         unassign(hri_a);
         unassign(hri_b);
       }
@@ -4692,7 +4682,7 @@ public class AssignmentManager extends ZooKeeperListener {
         errorMsg = hri.getShortNameToLog()
           + " is not pending close on " + serverName;
       } else {
-        onRegionClosed(hri);
+          onRegionClosed(hri);
       }
       break;
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 71c79be..1e03d44 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -84,6 +84,7 @@ import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
 import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.executor.ExecutorType;
@@ -139,7 +140,6 @@ import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
 import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
 import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
@@ -167,6 +167,7 @@ import org.apache.hadoop.hbase.util.ModifyRegionUtils;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.util.VersionInfo;
+import org.apache.hadoop.hbase.util.ZKDataMigrator;
 import org.apache.hadoop.hbase.zookeeper.DrainingServerTracker;
 import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
@@ -385,6 +386,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
   private long splitPlanCount;
   private long mergePlanCount;
 
+  // handle table states
+  private TableStateManager tableStateManager;
+
   /** flag used in test cases in order to simulate RS failures during master initialization */
   private volatile boolean initializationBeforeMetaAssignment = false;
 
@@ -694,9 +698,8 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
 
     this.assignmentManager = new AssignmentManager(this, serverManager,
       this.balancer, this.service, this.metricsMaster,
-      this.tableLockManager);
+      this.tableLockManager, tableStateManager);
     zooKeeper.registerListenerFirst(assignmentManager);
-
     this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
         this.serverManager);
     this.regionServerTracker.start();
@@ -728,6 +731,14 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     this.mpmHost.register(new MasterFlushTableProcedureManager());
     this.mpmHost.loadProcedures(conf);
     this.mpmHost.initialize(this, this.metricsMaster);
+
+    // migrating existent table state from zk
+    for (Map.Entry<TableName, TableState.State> entry : ZKDataMigrator
+        .queryForTableStates(getZooKeeper()).entrySet()) {
+      LOG.info("Converting state from zk to new states:" + entry);
+      tableStateManager.setTableState(entry.getKey(), entry.getValue());
+    }
+    ZKUtil.deleteChildrenRecursively(getZooKeeper(), getZooKeeper().tableZNode);
   }
 
   /**
@@ -792,6 +803,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     // Invalidate all write locks held previously
     this.tableLockManager.reapWriteLocks();
 
+    this.tableStateManager = new TableStateManager(this);
+    this.tableStateManager.start();
+
     status.setStatus("Initializing ZK system trackers");
     initializeZKBasedSystemTrackers();
 
@@ -1186,8 +1200,8 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
   }
 
   private void enableMeta(TableName metaTableName) {
-    if (!this.assignmentManager.getTableStateManager().isTableState(metaTableName,
-        ZooKeeperProtos.Table.State.ENABLED)) {
+    if (!this.tableStateManager.isTableState(metaTableName,
+            TableState.State.ENABLED)) {
       this.assignmentManager.setEnabledTable(metaTableName);
     }
   }
@@ -1231,6 +1245,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     return tableNamespaceManager;
   }
 
+  @Override
+  public TableStateManager getTableStateManager() {
+    return tableStateManager;
+  }
+
   /*
    * Start up all services. If any of these threads gets an unhandled exception
    * then they just die with a logged message.  This should be fine because
@@ -1663,7 +1682,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
       // Don't run the normalizer concurrently
       List<TableName> allEnabledTables = new ArrayList<>(
         this.assignmentManager.getTableStateManager().getTablesInStates(
-          ZooKeeperProtos.Table.State.ENABLED));
+          TableState.State.ENABLED));
 
       Collections.shuffle(allEnabledTables);
 
@@ -2508,7 +2527,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
       throw new TableNotFoundException(tableName);
     }
     if (!getAssignmentManager().getTableStateManager().
-        isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED)) {
+        isTableState(tableName, TableState.State.DISABLED)) {
       throw new TableNotDisabledException(tableName);
     }
   }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 6ca0ad5..c5e8101 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -547,7 +547,6 @@ public class MasterFileSystem {
       fsd.createTableDescriptor(
           new HTableDescriptor(fsd.get(TableName.META_TABLE_NAME)));
     }
-
     return rd;
   }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 07ec9fc..4af4560 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -40,10 +40,12 @@ import org.apache.hadoop.hbase.ProcedureInfo;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.UnknownRegionException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.MergeRegionException;
 import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
 import org.apache.hadoop.hbase.ipc.PriorityFunction;
@@ -952,13 +954,11 @@ public class MasterRpcServices extends RSRpcServices
   public GetTableNamesResponse getTableNames(RpcController controller,
       GetTableNamesRequest req) throws ServiceException {
     try {
-      master.checkInitialized();
-
+      master.checkServiceStarted();
       final String regex = req.hasRegex() ? req.getRegex() : null;
       final String namespace = req.hasNamespace() ? req.getNamespace() : null;
       List<TableName> tableNames = master.listTableNames(namespace, regex,
           req.getIncludeSysTables());
-
       GetTableNamesResponse.Builder builder = GetTableNamesResponse.newBuilder();
       if (tableNames != null && tableNames.size() > 0) {
         // Add the table names to the response
@@ -973,6 +973,26 @@ public class MasterRpcServices extends RSRpcServices
   }
 
   @Override
+  public MasterProtos.GetTableStateResponse getTableState(RpcController controller,
+      MasterProtos.GetTableStateRequest request) throws ServiceException {
+    try {
+      master.checkServiceStarted();
+      TableName tableName = ProtobufUtil.toTableName(request.getTableName());
+      TableState.State state = master.getTableStateManager()
+              .getTableState(tableName);
+      if (state == null) {
+        throw new TableNotFoundException(tableName);
+      }
+      MasterProtos.GetTableStateResponse.Builder builder =
+              MasterProtos.GetTableStateResponse.newBuilder();
+      builder.setTableState(new TableState(tableName, state).convert());
+      return builder.build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
   public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled(RpcController c,
       IsCatalogJanitorEnabledRequest req) throws ServiceException {
     return IsCatalogJanitorEnabledResponse.newBuilder().setValue(
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index be6fb12..d20b764 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -84,6 +84,11 @@ public interface MasterServices extends Server {
   TableLockManager getTableLockManager();
 
   /**
+   * @return Master's instance of {@link TableStateManager}
+   */
+  TableStateManager getTableStateManager();
+
+  /**
    * @return Master's instance of {@link MasterCoprocessorHost}
    */
   MasterCoprocessorHost getMasterCoprocessorHost();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
index e31868e..b8b49d7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
@@ -31,6 +31,8 @@ import java.util.SortedSet;
 import java.util.TreeMap;
 import java.util.TreeSet;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -42,14 +44,13 @@ import org.apache.hadoop.hbase.RegionTransition;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.master.RegionState.State;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ConfigUtil;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -59,9 +60,6 @@ import org.apache.hadoop.hbase.zookeeper.ZKAssign;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-
 /**
  * Region state accountant. It holds the states of all regions in the memory.
  * In normal scenario, it should match the meta table and the true region states.
@@ -720,7 +718,7 @@ public class RegionStates {
       if (oldServerName != null && serverHoldings.containsKey(oldServerName)) {
         if (newState == State.MERGED || newState == State.SPLIT
             || hri.isMetaRegion() || tableStateManager.isTableState(hri.getTable(),
-              ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
+            TableState.State.DISABLED, TableState.State.DISABLING)) {
           // Offline the region only if it's merged/split, or the table is disabled/disabling.
           // Otherwise, offline it from this server only when it is online on a different server.
           LOG.info("Offlined " + hri.getShortNameToLog() + " from " + oldServerName);
@@ -1295,8 +1293,8 @@ public class RegionStates {
    * Update a region state. It will be put in transition if not already there.
    */
   private RegionState updateRegionState(final HRegionInfo hri,
-      final State state, final ServerName serverName, long openSeqNum) {
-    if (state == State.FAILED_CLOSE || state == State.FAILED_OPEN) {
+      final RegionState.State state, final ServerName serverName, long openSeqNum) {
+    if (state == RegionState.State.FAILED_CLOSE || state == RegionState.State.FAILED_OPEN) {
       LOG.warn("Failed to open/close " + hri.getShortNameToLog()
         + " on " + serverName + ", set to " + state);
     }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
index 5929f26..e576934 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.master.procedure.CreateNamespaceProcedure;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -228,7 +229,7 @@ public class TableNamespaceManager {
     }
 
     // Now check if the table is assigned, if not then fail fast
-    if (isTableAssigned()) {
+    if (isTableAssigned() && isTableEnabled()) {
       try {
         boolean initGoodSofar = true;
         nsTable = this.masterServices.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME);
@@ -297,6 +298,12 @@ public class TableNamespaceManager {
     return false;
   }
 
+  private boolean isTableEnabled() throws IOException {
+    return masterServices.getTableStateManager().getTableState(
+            TableName.NAMESPACE_TABLE_NAME
+    ).equals(TableState.State.ENABLED);
+  }
+
   private boolean isTableAssigned() {
     return !masterServices.getAssignmentManager().getRegionStates().
         getRegionsOfTable(TableName.NAMESPACE_TABLE_NAME).isEmpty();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
new file mode 100644
index 0000000..4ba3d10
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
@@ -0,0 +1,219 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.Set;
+
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableDescriptor;
+import org.apache.hadoop.hbase.TableDescriptors;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
+
+/**
+ * This is a helper class used to manage table states.
+ * States persisted in tableinfo and cached internally.
+ */
+@InterfaceAudience.Private
+public class TableStateManager {
+  private static final Log LOG = LogFactory.getLog(TableStateManager.class);
+  private final TableDescriptors descriptors;
+
+  private final Map<TableName, TableState.State> tableStates = Maps.newConcurrentMap();
+
+  public TableStateManager(MasterServices master) {
+    this.descriptors = master.getTableDescriptors();
+  }
+
+  public void start() throws IOException {
+    Map<String, TableDescriptor> all = descriptors.getAllDescriptors();
+    for (TableDescriptor table : all.values()) {
+      TableName tableName = table.getHTableDescriptor().getTableName();
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Adding table state: " + tableName
+            + ": " + table.getTableState());
+      }
+      tableStates.put(tableName, table.getTableState());
+    }
+  }
+
+  /**
+   * Set table state to provided.
+   * Caller should lock table on write.
+   * @param tableName table to change state for
+   * @param newState new state
+   * @throws IOException
+   */
+  public void setTableState(TableName tableName, TableState.State newState) throws IOException {
+    synchronized (tableStates) {
+      TableDescriptor descriptor = readDescriptor(tableName);
+      if (descriptor == null) {
+        throw new TableNotFoundException(tableName);
+      }
+      if (descriptor.getTableState() != newState) {
+        writeDescriptor(
+            new TableDescriptor(descriptor.getHTableDescriptor(), newState));
+      }
+    }
+  }
+
+  /**
+   * Set table state to provided but only if table in specified states
+   * Caller should lock table on write.
+   * @param tableName table to change state for
+   * @param newState new state
+   * @param states states to check against
+   * @throws IOException
+   */
+  public boolean setTableStateIfInStates(TableName tableName,
+                                         TableState.State newState,
+                                         TableState.State... states)
+          throws IOException {
+    synchronized (tableStates) {
+      TableDescriptor descriptor = readDescriptor(tableName);
+      if (descriptor == null) {
+        throw new TableNotFoundException(tableName);
+      }
+      if (TableState.isInStates(descriptor.getTableState(), states)) {
+        writeDescriptor(
+            new TableDescriptor(descriptor.getHTableDescriptor(), newState));
+        return true;
+      } else {
+        return false;
+      }
+    }
+  }
+
+
+  /**
+   * Set table state to provided but only if table not in specified states
+   * Caller should lock table on write.
+   * @param tableName table to change state for
+   * @param newState new state
+   * @param states states to check against
+   * @throws IOException
+   */
+  public boolean setTableStateIfNotInStates(TableName tableName,
+                                            TableState.State newState,
+                                            TableState.State... states)
+          throws IOException {
+    synchronized (tableStates) {
+      TableDescriptor descriptor = readDescriptor(tableName);
+      if (descriptor == null) {
+        throw new TableNotFoundException(tableName);
+      }
+      if (!TableState.isInStates(descriptor.getTableState(), states)) {
+        writeDescriptor(
+            new TableDescriptor(descriptor.getHTableDescriptor(), newState));
+        return true;
+      } else {
+        return false;
+      }
+    }
+  }
+
+  public boolean isTableState(TableName tableName, TableState.State... states) {
+    TableState.State tableState = null;
+    try {
+      tableState = getTableState(tableName);
+    } catch (IOException e) {
+      LOG.error("Unable to get table state, probably table not exists");
+      return false;
+    }
+    return tableState != null && TableState.isInStates(tableState, states);
+  }
+
+  public void setDeletedTable(TableName tableName) throws IOException {
+    TableState.State remove = tableStates.remove(tableName);
+    if (remove == null) {
+      LOG.warn("Moving table " + tableName + " state to deleted but was " +
+              "already deleted");
+    }
+  }
+
+  public boolean isTablePresent(TableName tableName) throws IOException {
+    return getTableState(tableName) != null;
+  }
+
+  /**
+   * Return all tables in given states.
+   *
+   * @param states filter by states
+   * @return tables in given states
+   * @throws IOException
+   */
+  public Set<TableName> getTablesInStates(TableState.State... states) throws IOException {
+    Set<TableName> rv = Sets.newHashSet();
+    for (Map.Entry<TableName, TableState.State> entry : tableStates.entrySet()) {
+      if (TableState.isInStates(entry.getValue(), states)) {
+        rv.add(entry.getKey());
+      }
+    }
+    return rv;
+  }
+
+  public TableState.State getTableState(TableName tableName) throws IOException {
+    TableState.State tableState = tableStates.get(tableName);
+    if (tableState == null) {
+      TableDescriptor descriptor = readDescriptor(tableName);
+      if (descriptor != null) {
+        tableState = descriptor.getTableState();
+      }
+    }
+    return tableState;
+  }
+
+  /**
+   * Write descriptor in place, update cache of states.
+   * Write lock should be hold by caller.
+   *
+   * @param descriptor what to write
+   */
+  private void writeDescriptor(TableDescriptor descriptor) throws IOException {
+    TableName tableName = descriptor.getHTableDescriptor().getTableName();
+    TableState.State state = descriptor.getTableState();
+    descriptors.add(descriptor);
+    LOG.debug("Table " + tableName + " written descriptor for state " + state);
+    tableStates.put(tableName, state);
+    LOG.debug("Table " + tableName + " updated state to " + state);
+  }
+
+  /**
+   * Read current descriptor for table, update cache of states.
+   *
+   * @param table descriptor to read
+   * @return descriptor
+   * @throws IOException
+   */
+  private TableDescriptor readDescriptor(TableName tableName) throws IOException {
+    TableDescriptor descriptor = descriptors.getDescriptor(tableName);
+    if (descriptor == null) {
+      tableStates.remove(tableName);
+    } else {
+      tableStates.put(tableName, descriptor.getTableState());
+    }
+    return descriptor;
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java
index 389a738..3be3316 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java
@@ -23,11 +23,10 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.master.AssignmentManager;
-import org.apache.hadoop.hbase.master.RegionState;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 
 /**
  * Handles CLOSED region event on Master.
@@ -93,7 +92,7 @@ public class ClosedRegionHandler extends EventHandler implements TotesHRegionInf
     LOG.debug("Handling CLOSED event for " + regionInfo.getEncodedName());
     // Check if this table is being disabled or not
     if (this.assignmentManager.getTableStateManager().isTableState(this.regionInfo.getTable(),
-        ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING) ||
+        TableState.State.DISABLED, TableState.State.DISABLING) ||
         assignmentManager.getReplicasToClose().contains(regionInfo)) {
       assignmentManager.offlineDisabledRegion(regionInfo);
       return;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
index 79e2493..09569b3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
@@ -30,14 +30,16 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CoordinatedStateException;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
 import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
-import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.ipc.RpcServer;
@@ -48,7 +50,6 @@ import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.TableLockManager;
 import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
@@ -119,13 +120,6 @@ public class CreateTableHandler extends EventHandler {
       if (MetaTableAccessor.tableExists(this.server.getConnection(), tableName)) {
         throw new TableExistsException(tableName);
       }
-
-      // During master initialization, the ZK state could be inconsistent from failed DDL
-      // in the past. If we fail here, it would prevent master to start.  We should force
-      // setting the system table state regardless the table state.
-      boolean skipTableStateCheck =
-          !((HMaster) this.server).isInitialized() && tableName.isSystemTable();
-      checkAndSetEnablingTable(assignmentManager, tableName, skipTableStateCheck);
       success = true;
     } finally {
       if (!success) {
@@ -135,52 +129,6 @@ public class CreateTableHandler extends EventHandler {
     return this;
   }
 
-  static void checkAndSetEnablingTable(final AssignmentManager assignmentManager,
-      final TableName tableName, boolean skipTableStateCheck) throws IOException {
-    // If we have multiple client threads trying to create the table at the
-    // same time, given the async nature of the operation, the table
-    // could be in a state where hbase:meta table hasn't been updated yet in
-    // the process() function.
-    // Use enabling state to tell if there is already a request for the same
-    // table in progress. This will introduce a new zookeeper call. Given
-    // createTable isn't a frequent operation, that should be ok.
-    // TODO: now that we have table locks, re-evaluate above -- table locks are not enough.
-    // We could have cleared the hbase.rootdir and not zk.  How can we detect this case?
-    // Having to clean zk AND hdfs is awkward.
-    try {
-      if (skipTableStateCheck) {
-        assignmentManager.getTableStateManager().setTableState(
-          tableName,
-          ZooKeeperProtos.Table.State.ENABLING);
-      } else if (!assignmentManager.getTableStateManager().setTableStateIfNotInStates(
-        tableName,
-        ZooKeeperProtos.Table.State.ENABLING,
-        ZooKeeperProtos.Table.State.ENABLING,
-        ZooKeeperProtos.Table.State.ENABLED)) {
-        throw new TableExistsException(tableName);
-      }
-    } catch (CoordinatedStateException e) {
-      throw new IOException("Unable to ensure that the table will be" +
-        " enabling because of a ZooKeeper issue", e);
-    }
-  }
-
-  static void removeEnablingTable(final AssignmentManager assignmentManager,
-      final TableName tableName) {
-    // Try deleting the enabling node in case of error
-    // If this does not happen then if the client tries to create the table
-    // again with the same Active master
-    // It will block the creation saying TableAlreadyExists.
-    try {
-      assignmentManager.getTableStateManager().checkAndRemoveTableState(tableName,
-        ZooKeeperProtos.Table.State.ENABLING, false);
-    } catch (CoordinatedStateException e) {
-      // Keeper exception should not happen here
-      LOG.error("Got a keeper exception while removing the ENABLING table znode "
-          + tableName, e);
-    }
-  }
-
   @Override
   public String toString() {
     String name = "UnknownServerName";
@@ -228,9 +176,6 @@ public class CreateTableHandler extends EventHandler {
     releaseTableLock();
     LOG.info("Table, " + this.hTableDescriptor.getTableName() + ", creation " +
         (exception == null ? "successful" : "failed. " + exception));
-    if (exception != null) {
-      removeEnablingTable(this.assignmentManager, this.hTableDescriptor.getTableName());
-    }
   }
 
   /**
@@ -253,9 +198,12 @@ public class CreateTableHandler extends EventHandler {
     FileSystem fs = fileSystemManager.getFileSystem();
 
     // 1. Create Table Descriptor
+    // using a copy of descriptor, table will be created enabling first
+    TableDescriptor underConstruction = new TableDescriptor(
+        this.hTableDescriptor, TableState.State.ENABLING);
     Path tempTableDir = FSUtils.getTableDir(tempdir, tableName);
     new FSTableDescriptors(this.conf).createTableDescriptorForTableDirectory(
-      tempTableDir, this.hTableDescriptor, false);
+      tempTableDir, underConstruction, false);
     Path tableDir = FSUtils.getTableDir(fileSystemManager.getRootDir(), tableName);
 
     // 2. Create Regions
@@ -280,24 +228,18 @@ public class CreateTableHandler extends EventHandler {
       // 7. Trigger immediate assignment of the regions in round-robin fashion
       ModifyRegionUtils.assignRegions(assignmentManager, regionInfos);
     }
-
-    // 8. Set table enabled flag up in zk.
-    try {
-      assignmentManager.getTableStateManager().setTableState(tableName,
-        ZooKeeperProtos.Table.State.ENABLED);
-    } catch (CoordinatedStateException e) {
-      throw new IOException("Unable to ensure that " + tableName + " will be" +
-        " enabled because of a ZooKeeper issue", e);
-    }
-
     // 8. Update the tabledescriptor cache.
     ((HMaster) this.server).getTableDescriptors().get(tableName);
+
+    // 9. Enable table
+    assignmentManager.getTableStateManager().setTableState(tableName,
+            TableState.State.ENABLED);
   }
 
   /**
    * Create any replicas for the regions (the default replicas that was
    * already created is passed to the method)
-   * @param hTableDescriptor
+   * @param hTableDescriptor descriptor to use
    * @param regions default replicas
    * @return the combined list of default and non-default replicas
    */
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
index 76f603f..e9b764e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
@@ -25,13 +25,13 @@ import java.util.concurrent.ExecutorService;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.TableNotEnabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
@@ -39,11 +39,10 @@ import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.BulkAssigner;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.RegionStates;
 import org.apache.hadoop.hbase.master.TableLockManager;
-import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.htrace.Trace;
 
 /**
@@ -91,16 +90,11 @@ public class DisableTableHandler extends EventHandler {
       // DISABLED or ENABLED.
       //TODO: reevaluate this since we have table locks now
       if (!skipTableStateCheck) {
-        try {
-          if (!this.assignmentManager.getTableStateManager().setTableStateIfInStates(
-            this.tableName, ZooKeeperProtos.Table.State.DISABLING,
-            ZooKeeperProtos.Table.State.ENABLED)) {
-            LOG.info("Table " + tableName + " isn't enabled; skipping disable");
-            throw new TableNotEnabledException(this.tableName);
-          }
-        } catch (CoordinatedStateException e) {
-          throw new IOException("Unable to ensure that the table will be" +
-            " disabling because of a coordination engine issue", e);
+        if (!this.assignmentManager.getTableStateManager().setTableStateIfInStates(
+          this.tableName, TableState.State.DISABLING,
+          TableState.State.ENABLED)) {
+          LOG.info("Table " + tableName + " isn't enabled; skipping disable");
+          throw new TableNotEnabledException(this.tableName);
         }
       }
       success = true;
@@ -139,8 +133,6 @@ public class DisableTableHandler extends EventHandler {
       }
     } catch (IOException e) {
       LOG.error("Error trying to disable table " + this.tableName, e);
-    } catch (CoordinatedStateException e) {
-      LOG.error("Error trying to disable table " + this.tableName, e);
     } finally {
       releaseTableLock();
     }
@@ -156,10 +148,10 @@ public class DisableTableHandler extends EventHandler {
     }
   }
 
-  private void handleDisableTable() throws IOException, CoordinatedStateException {
+  private void handleDisableTable() throws IOException {
     // Set table disabling flag up in zk.
     this.assignmentManager.getTableStateManager().setTableState(this.tableName,
-      ZooKeeperProtos.Table.State.DISABLING);
+      TableState.State.DISABLING);
     boolean done = false;
     while (true) {
       // Get list of online regions that are of this table.  Regions that are
@@ -188,7 +180,7 @@ public class DisableTableHandler extends EventHandler {
     }
     // Flip the table to disabled if success.
     if (done) this.assignmentManager.getTableStateManager().setTableState(this.tableName,
-      ZooKeeperProtos.Table.State.DISABLED);
+      TableState.State.DISABLED);
     LOG.info("Disabled table, " + this.tableName + ", is done=" + done);
   }
 
@@ -208,7 +200,7 @@ public class DisableTableHandler extends EventHandler {
       RegionStates regionStates = assignmentManager.getRegionStates();
       for (HRegionInfo region: regions) {
         if (regionStates.isRegionInTransition(region)
-            && !regionStates.isRegionInState(region, State.FAILED_CLOSE)) {
+            && !regionStates.isRegionInState(region, RegionState.State.FAILED_CLOSE)) {
           continue;
         }
         final HRegionInfo hri = region;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
index 2e6a10a..0b914d5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
@@ -26,15 +26,15 @@ import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.CoordinatedStateException;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.master.AssignmentManager;
@@ -47,7 +47,7 @@ import org.apache.hadoop.hbase.master.RegionStates;
 import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.master.TableLockManager;
 import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
+import org.apache.hadoop.hbase.master.TableStateManager;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 
@@ -97,14 +97,9 @@ public class EnableTableHandler extends EventHandler {
         if (!this.skipTableStateCheck) {
           throw new TableNotFoundException(tableName);
         }
-        try {
-          this.assignmentManager.getTableStateManager().checkAndRemoveTableState(tableName,
-            ZooKeeperProtos.Table.State.ENABLING, true);
-          throw new TableNotFoundException(tableName);
-        } catch (CoordinatedStateException e) {
-          // TODO : Use HBCK to clear such nodes
-          LOG.warn("Failed to delete the ENABLING node for the table " + tableName
-              + ".  The table will remain unusable. Run HBCK to manually fix the problem.");
+        TableStateManager tsm = assignmentManager.getTableStateManager();
+        if (tsm.isTableState(tableName, TableState.State.ENABLING)) {
+          tsm.setDeletedTable(tableName);
         }
       }
 
@@ -113,16 +108,11 @@ public class EnableTableHandler extends EventHandler {
       // After that, no other requests can be accepted until the table reaches
       // DISABLED or ENABLED.
       if (!skipTableStateCheck) {
-        try {
-          if (!this.assignmentManager.getTableStateManager().setTableStateIfInStates(
-              this.tableName, ZooKeeperProtos.Table.State.ENABLING,
-              ZooKeeperProtos.Table.State.DISABLED)) {
-            LOG.info("Table " + tableName + " isn't disabled; skipping enable");
-            throw new TableNotDisabledException(this.tableName);
-          }
-        } catch (CoordinatedStateException e) {
-          throw new IOException("Unable to ensure that the table will be" +
-            " enabling because of a coordination engine issue", e);
+        if (!this.assignmentManager.getTableStateManager().setTableStateIfInStates(
+            this.tableName, TableState.State.ENABLING,
+            TableState.State.DISABLED)) {
+          LOG.info("Table " + tableName + " isn't disabled; skipping enable");
+          throw new TableNotDisabledException(this.tableName);
         }
       }
       success = true;
@@ -158,11 +148,7 @@ public class EnableTableHandler extends EventHandler {
       if (cpHost != null) {
         cpHost.postEnableTableHandler(this.tableName, null);
       }
-    } catch (IOException e) {
-      LOG.error("Error trying to enable the table " + this.tableName, e);
-    } catch (CoordinatedStateException e) {
-      LOG.error("Error trying to enable the table " + this.tableName, e);
-    } catch (InterruptedException e) {
+    } catch (IOException | InterruptedException e) {
       LOG.error("Error trying to enable the table " + this.tableName, e);
     } finally {
       releaseTableLock();
@@ -179,14 +165,13 @@ public class EnableTableHandler extends EventHandler {
     }
   }
 
-  private void handleEnableTable() throws IOException, CoordinatedStateException,
+  private void handleEnableTable() throws IOException,
       InterruptedException {
     // I could check table is disabling and if so, not enable but require
     // that user first finish disabling but that might be obnoxious.
 
-    // Set table enabling flag up in zk.
     this.assignmentManager.getTableStateManager().setTableState(this.tableName,
-      ZooKeeperProtos.Table.State.ENABLING);
+      TableState.State.ENABLING);
     boolean done = false;
     ServerManager serverManager = ((HMaster)this.server).getServerManager();
     // Get the regions of this table. We're done when all listed
@@ -251,7 +236,7 @@ public class EnableTableHandler extends EventHandler {
     if (done) {
       // Flip the table to enabled.
       this.assignmentManager.getTableStateManager().setTableState(
-        this.tableName, ZooKeeperProtos.Table.State.ENABLED);
+        this.tableName, TableState.State.ENABLED);
       LOG.info("Table '" + this.tableName
       + "' was successfully enabled. Status: done=" + done);
     } else {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
index 43a0f65..0081f16 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
@@ -29,6 +29,8 @@ import java.util.TreeMap;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.CoordinatedStateException;
+import org.apache.hadoop.hbase.TableDescriptor;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
@@ -36,16 +38,15 @@ import org.apache.hadoop.hbase.InvalidFamilyOperationException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.master.BulkReOpen;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -137,7 +138,7 @@ public abstract class TableEventHandler extends EventHandler {
       handleTableOperation(hris);
       if (eventType.isOnlineSchemaChangeSupported() && this.masterServices.
           getAssignmentManager().getTableStateManager().isTableState(
-          tableName, ZooKeeperProtos.Table.State.ENABLED)) {
+          tableName, TableState.State.ENABLED)) {
         if (reOpenAllRegions(hris)) {
           LOG.info("Completed table operation " + eventType + " on table " +
               tableName);
@@ -236,10 +237,10 @@ public abstract class TableEventHandler extends EventHandler {
    * @throws FileNotFoundException
    * @throws IOException
    */
-  public HTableDescriptor getTableDescriptor()
+  public TableDescriptor getTableDescriptor()
   throws FileNotFoundException, IOException {
-    HTableDescriptor htd =
-      this.masterServices.getTableDescriptors().get(tableName);
+    TableDescriptor htd =
+      this.masterServices.getTableDescriptors().getDescriptor(tableName);
     if (htd == null) {
       throw new IOException("HTableDescriptor missing for " + tableName);
     }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java
index a3dc1a4..c9df56e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java
@@ -32,12 +32,12 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.InvalidFamilyOperationException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyState;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.security.User;
 
 /**
@@ -336,7 +336,7 @@ public class AddColumnFamilyProcedure
   private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException {
     // This operation only run when the table is enabled.
     if (!env.getMasterServices().getAssignmentManager().getTableStateManager()
-        .isTableState(getTableName(), ZooKeeperProtos.Table.State.ENABLED)) {
+        .isTableState(getTableName(), TableState.State.ENABLED)) {
       return;
     }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
index 152af45..a8459f1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -33,20 +33,21 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.HBaseException;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.master.TableStateManager;
 import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -299,8 +300,8 @@ public class CreateTableProcedure
         !(env.getMasterServices().isInitialized()) && tableName.isSystemTable();
     if (!skipTableStateCheck) {
       TableStateManager tsm = env.getMasterServices().getAssignmentManager().getTableStateManager();
-      if (tsm.isTableState(tableName, true, ZooKeeperProtos.Table.State.ENABLING,
-          ZooKeeperProtos.Table.State.ENABLED)) {
+      if (tsm.isTableState(tableName, TableState.State.ENABLING,
+          TableState.State.ENABLED)) {
         LOG.warn("The table " + tableName + " does not exist in meta but has a znode. " +
                "run hbck to fix inconsistencies.");
         setFailure("master-create-table", new TableExistsException(getTableName()));
@@ -375,7 +376,7 @@ public class CreateTableProcedure
     // using a copy of descriptor, table will be created enabling first
     final Path tempTableDir = FSUtils.getTableDir(tempdir, hTableDescriptor.getTableName());
     new FSTableDescriptors(env.getMasterConfiguration()).createTableDescriptorForTableDirectory(
-      tempTableDir, hTableDescriptor, false);
+      tempTableDir, new TableDescriptor(hTableDescriptor), false);
 
     // 2. Create Regions
     newRegions = hdfsRegionHandler.createHdfsRegions(env, tempdir,
@@ -448,14 +449,14 @@ public class CreateTableProcedure
 
     // Mark the table as Enabling
     assignmentManager.getTableStateManager().setTableState(tableName,
-        ZooKeeperProtos.Table.State.ENABLING);
+        TableState.State.ENABLING);
 
     // Trigger immediate assignment of the regions in round-robin fashion
     ModifyRegionUtils.assignRegions(assignmentManager, regions);
 
     // Enable table
     assignmentManager.getTableStateManager()
-      .setTableState(tableName, ZooKeeperProtos.Table.State.ENABLED);
+      .setTableState(tableName, TableState.State.ENABLED);
   }
 
   /**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java
index 5b1a69c..3e6568b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java
@@ -31,12 +31,12 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.InvalidFamilyOperationException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyState;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -357,7 +357,7 @@ public class DeleteColumnFamilyProcedure
   private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException {
     // This operation only run when the table is enabled.
     if (!env.getMasterServices().getAssignmentManager().getTableStateManager()
-        .isTableState(getTableName(), ZooKeeperProtos.Table.State.ENABLED)) {
+        .isTableState(getTableName(), TableState.State.ENABLED)) {
       return;
     }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
index bec599c..7fe2a89 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
@@ -32,8 +32,8 @@ import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotEnabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.exceptions.HBaseException;
 import org.apache.hadoop.hbase.master.AssignmentManager;
@@ -41,11 +41,11 @@ import org.apache.hadoop.hbase.master.BulkAssigner;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.RegionStates;
+import org.apache.hadoop.hbase.master.TableStateManager;
 import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableState;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.htrace.Trace;
@@ -286,8 +286,8 @@ public class DisableTableProcedure
       // this issue.
       TableStateManager tsm =
         env.getMasterServices().getAssignmentManager().getTableStateManager();
-      if (!tsm.setTableStateIfInStates(tableName, ZooKeeperProtos.Table.State.DISABLING,
-            ZooKeeperProtos.Table.State.DISABLING, ZooKeeperProtos.Table.State.ENABLED)) {
+      if (!tsm.setTableStateIfInStates(tableName, TableState.State.DISABLING,
+            TableState.State.DISABLING, TableState.State.ENABLED)) {
         LOG.info("Table " + tableName + " isn't enabled; skipping disable");
         setFailure("master-disable-table", new TableNotEnabledException(tableName));
         canTableBeDisabled = false;
@@ -311,7 +311,7 @@ public class DisableTableProcedure
       try {
         // If the state was changed, undo it.
         if (env.getMasterServices().getAssignmentManager().getTableStateManager().isTableState(
-            tableName, ZooKeeperProtos.Table.State.DISABLING)) {
+            tableName, TableState.State.DISABLING)) {
           EnableTableProcedure.setTableStateToEnabled(env, tableName);
         }
       } catch (Exception e) {
@@ -344,7 +344,7 @@ public class DisableTableProcedure
     // Set table disabling flag up in zk.
     env.getMasterServices().getAssignmentManager().getTableStateManager().setTableState(
       tableName,
-      ZooKeeperProtos.Table.State.DISABLING);
+      TableState.State.DISABLING);
   }
 
   /**
@@ -435,7 +435,7 @@ public class DisableTableProcedure
     // Flip the table to disabled
     env.getMasterServices().getAssignmentManager().getTableStateManager().setTableState(
       tableName,
-      ZooKeeperProtos.Table.State.DISABLED);
+      TableState.State.DISABLED);
     LOG.info("Disabled table, " + tableName + ", is completed.");
   }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
index f4a4538..c06bb07 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
@@ -35,8 +35,8 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.HBaseException;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.BulkAssigner;
@@ -45,11 +45,11 @@ import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionStates;
 import org.apache.hadoop.hbase.master.ServerManager;
+import org.apache.hadoop.hbase.master.TableStateManager;
 import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableState;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
@@ -307,7 +307,7 @@ public class EnableTableProcedure
       // was implemented. With table lock, there is no need to set the state here (it will
       // set the state later on). A quick state check should be enough for us to move forward.
       TableStateManager tsm = env.getMasterServices().getAssignmentManager().getTableStateManager();
-      if (!tsm.isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED)) {
+      if (!tsm.isTableState(tableName, TableState.State.DISABLED)) {
         LOG.info("Table " + tableName + " isn't disabled; skipping enable");
         setFailure("master-enable-table", new TableNotDisabledException(this.tableName));
         canTableBeEnabled = false;
@@ -344,8 +344,7 @@ public class EnableTableProcedure
     // Set table disabling flag up in zk.
     LOG.info("Attempting to enable the table " + tableName);
     env.getMasterServices().getAssignmentManager().getTableStateManager().setTableState(
-      tableName,
-      ZooKeeperProtos.Table.State.ENABLING);
+      tableName, TableState.State.ENABLING);
   }
 
   /**
@@ -490,8 +489,7 @@ public class EnableTableProcedure
       final TableName tableName) throws HBaseException, IOException {
     // Flip the table to Enabled
     env.getMasterServices().getAssignmentManager().getTableStateManager().setTableState(
-      tableName,
-      ZooKeeperProtos.Table.State.ENABLED);
+      tableName, TableState.State.ENABLED);
     LOG.info("Table '" + tableName + "' was successfully enabled.");
   }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java
index 2e8499f..c6ff1b6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java
@@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.BulkReOpen;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
@@ -78,7 +78,7 @@ public final class MasterDDLOperationHelper {
 
     // We only execute this procedure with table online if online schema change config is set.
     if (!env.getMasterServices().getAssignmentManager().getTableStateManager()
-        .isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED)
+        .isTableState(tableName, TableState.State.DISABLED)
         && !MasterDDLOperationHelper.isOnlineSchemaChangeAllowed(env)) {
       throw new TableNotDisabledException(tableName);
     }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java
index 5a6b592..590e4ce 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java
@@ -32,12 +32,12 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.InvalidFamilyOperationException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyState;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.security.User;
 
 /**
@@ -316,7 +316,7 @@ public class ModifyColumnFamilyProcedure
   private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException {
     // This operation only run when the table is enabled.
     if (!env.getMasterServices().getAssignmentManager().getTableStateManager()
-        .isTableState(getTableName(), ZooKeeperProtos.Table.State.ENABLED)) {
+        .isTableState(getTableName(), TableState.State.ENABLED)) {
       return;
     }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
index e785684..fa9746f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
@@ -42,11 +42,11 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableState;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
 
@@ -294,7 +294,7 @@ public class ModifyTableProcedure
         env.getMasterServices().getTableDescriptors().get(getTableName());
 
     if (env.getMasterServices().getAssignmentManager().getTableStateManager()
-        .isTableState(getTableName(), ZooKeeperProtos.Table.State.ENABLED)) {
+        .isTableState(getTableName(), TableState.State.ENABLED)) {
       // We only execute this procedure with table online if online schema change config is set.
       if (!MasterDDLOperationHelper.isOnlineSchemaChangeAllowed(env)) {
         throw new TableNotDisabledException(getTableName());
@@ -432,7 +432,7 @@ public class ModifyTableProcedure
   private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException {
     // This operation only run when the table is enabled.
     if (!env.getMasterServices().getAssignmentManager().getTableStateManager()
-        .isTableState(getTableName(), ZooKeeperProtos.Table.State.ENABLED)) {
+        .isTableState(getTableName(), TableState.State.ENABLED)) {
       return;
     }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
index b6e7a7c..ef04cfe 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.master.MasterServices;
@@ -48,7 +49,6 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ServerCrashState;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
@@ -526,7 +526,7 @@ implements ServerProcedureInterface {
           } else if (rit != null) {
             if ((rit.isPendingCloseOrClosing() || rit.isOffline())
                 && am.getTableStateManager().isTableState(hri.getTable(),
-                ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING) ||
+                TableState.State.DISABLED, TableState.State.DISABLING) ||
                 am.getReplicasToClose().contains(hri)) {
               // If the table was partially disabled and the RS went down, we should clear the
               // RIT and remove the node for the region.
@@ -713,7 +713,7 @@ implements ServerProcedureInterface {
     }
     // If table is not disabled but the region is offlined,
     boolean disabled = assignmentManager.getTableStateManager().isTableState(hri.getTable(),
-      ZooKeeperProtos.Table.State.DISABLED);
+      TableState.State.DISABLED);
     if (disabled){
       LOG.info("The table " + hri.getTable() + " was disabled.  Hence not proceeding.");
       return false;
@@ -725,7 +725,7 @@ implements ServerProcedureInterface {
       return false;
     }
     boolean disabling = assignmentManager.getTableStateManager().isTableState(hri.getTable(),
-      ZooKeeperProtos.Table.State.DISABLING);
+      TableState.State.DISABLING);
     if (disabling) {
       LOG.info("The table " + hri.getTable() + " is disabled.  Hence not assigning region" +
         hri.getEncodedName());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index 98018f0..5874c59 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.ipc.RpcServer;
@@ -72,7 +73,6 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.quotas.QuotaExceededException;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.security.User;
@@ -622,7 +622,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
     TableName snapshotTable = TableName.valueOf(snapshot.getTable());
     AssignmentManager assignmentMgr = master.getAssignmentManager();
     if (assignmentMgr.getTableStateManager().isTableState(snapshotTable,
-        ZooKeeperProtos.Table.State.ENABLED)) {
+      TableState.State.ENABLED)) {
       if (LOG.isDebugEnabled()) {
         LOG.debug("Table enabled, starting distributed snapshot for "
             + ClientSnapshotDescriptionUtils.toString(snapshot));
@@ -634,7 +634,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
     }
     // For disabled table, snapshot is created by the master
     else if (assignmentMgr.getTableStateManager().isTableState(snapshotTable,
-        ZooKeeperProtos.Table.State.DISABLED)) {
+        TableState.State.DISABLED)) {
       if (LOG.isDebugEnabled()) {
         LOG.debug("Table is disabled, running snapshot entirely on master "
             + ClientSnapshotDescriptionUtils.toString(snapshot));
@@ -801,7 +801,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
     // Execute the restore/clone operation
     if (MetaTableAccessor.tableExists(master.getConnection(), tableName)) {
       if (master.getAssignmentManager().getTableStateManager().isTableState(
-          TableName.valueOf(snapshot.getTable()), ZooKeeperProtos.Table.State.ENABLED)) {
+          TableName.valueOf(snapshot.getTable()), TableState.State.ENABLED)) {
         throw new UnsupportedOperationException("Table '" +
             TableName.valueOf(snapshot.getTable()) + "' must be disabled in order to " +
             "perform a restore operation" +
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
index 6da05cd..8a1c11a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
@@ -39,12 +39,14 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
@@ -373,7 +375,7 @@ public class NamespaceUpgrade implements Tool {
       HTableDescriptor newDesc = new HTableDescriptor(oldDesc);
       newDesc.setName(newTableName);
       new FSTableDescriptors(this.conf).createTableDescriptorForTableDirectory(
-        newTablePath, newDesc, true);
+        newTablePath, new TableDescriptor(newDesc, TableState.State.ENABLED), true);
     }
 
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
index 0b483d9..37528b2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.Text;
@@ -111,13 +112,14 @@ public class CompactionTool extends Configured implements Tool {
       if (isFamilyDir(fs, path)) {
         Path regionDir = path.getParent();
         Path tableDir = regionDir.getParent();
-        HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
+        TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
         HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
-        compactStoreFiles(tableDir, htd, hri, path.getName(), compactOnce, major);
+        compactStoreFiles(tableDir, htd.getHTableDescriptor(), hri,
+            path.getName(), compactOnce, major);
       } else if (isRegionDir(fs, path)) {
         Path tableDir = path.getParent();
-        HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
-        compactRegion(tableDir, htd, path, compactOnce, major);
+        TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
+        compactRegion(tableDir, htd.getHTableDescriptor(), path, compactOnce, major);
       } else if (isTableDir(fs, path)) {
         compactTable(path, compactOnce, major);
       } else {
@@ -128,9 +130,9 @@ public class CompactionTool extends Configured implements Tool {
 
     private void compactTable(final Path tableDir, final boolean compactOnce, final boolean major)
         throws IOException {
-      HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
+      TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
       for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) {
-        compactRegion(tableDir, htd, regionDir, compactOnce, major);
+        compactRegion(tableDir, htd.getHTableDescriptor(), regionDir, compactOnce, major);
       }
     }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java
index 11b6120..15360d1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java
@@ -90,6 +90,7 @@ public class WALCellCodec implements Codec {
    * Fully prepares the codec for use.
    * @param conf {@link Configuration} to read for the user-specified codec. If none is specified,
    *          uses a {@link WALCellCodec}.
+   * @param cellCodecClsName name of codec
    * @param compression compression the codec should use
    * @return a {@link WALCellCodec} ready for use.
    * @throws UnsupportedOperationException if the codec cannot be instantiated
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
index c76a3a9..0b54c4d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
@@ -43,7 +43,9 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -305,7 +307,8 @@ public final class SnapshotManifest {
   private void load() throws IOException {
     switch (getSnapshotFormat(desc)) {
       case SnapshotManifestV1.DESCRIPTOR_VERSION: {
-        this.htd = FSTableDescriptors.getTableDescriptorFromFs(workingDirFs, workingDir);
+        this.htd = FSTableDescriptors.getTableDescriptorFromFs(workingDirFs, workingDir)
+            .getHTableDescriptor();
         ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader");
         try {
           this.regionManifests =
@@ -410,7 +413,8 @@ public final class SnapshotManifest {
       LOG.info("Using old Snapshot Format");
       // write a copy of descriptor to the snapshot directory
       new FSTableDescriptors(conf, workingDirFs, rootDir)
-        .createTableDescriptorForTableDirectory(workingDir, htd, false);
+        .createTableDescriptorForTableDirectory(workingDir, new TableDescriptor(
+            htd, TableState.State.ENABLED), false);
     } else {
       LOG.debug("Convert to Single Snapshot Manifest for " + this.desc.getName());
       convertToV2SingleManifest();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index 7e161ca..8a163a9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -38,7 +38,9 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
@@ -88,15 +90,10 @@ public class FSTableDescriptors implements TableDescriptors {
   // This cache does not age out the old stuff.  Thinking is that the amount
   // of data we keep up in here is so small, no need to do occasional purge.
   // TODO.
-  private final Map<TableName, HTableDescriptor> cache =
-    new ConcurrentHashMap<TableName, HTableDescriptor>();
+  private final Map<TableName, TableDescriptor> cache =
+    new ConcurrentHashMap<TableName, TableDescriptor>();
 
   /**
-   * Table descriptor for <code>hbase:meta</code> catalog table
-   */
-   private final HTableDescriptor metaTableDescriptor;
-
-   /**
    * Construct a FSTableDescriptors instance using the hbase root dir of the given
    * conf and the filesystem where that root dir lives.
    * This instance can do write operations (is not read only).
@@ -121,7 +118,6 @@ public class FSTableDescriptors implements TableDescriptors {
     this.rootdir = rootdir;
     this.fsreadonly = fsreadonly;
     this.usecache = usecache;
-    this.metaTableDescriptor = HTableDescriptor.metaTableDescriptor(conf);
   }
 
   @Override
@@ -148,12 +144,12 @@ public class FSTableDescriptors implements TableDescriptors {
    * to see if a newer file has been created since the cached one was read.
    */
   @Override
-  public HTableDescriptor get(final TableName tablename)
+  public TableDescriptor getDescriptor(final TableName tablename)
   throws IOException {
     invocations++;
     if (TableName.META_TABLE_NAME.equals(tablename)) {
       cachehits++;
-      return metaTableDescriptor;
+      return new TableDescriptor(HTableDescriptor.META_TABLEDESC, TableState.State.ENABLED);
     }
     // hbase:meta is already handled. If some one tries to get the descriptor for
     // .logs, .oldlogs or .corrupt throw an exception.
@@ -163,74 +159,101 @@ public class FSTableDescriptors implements TableDescriptors {
 
     if (usecache) {
       // Look in cache of descriptors.
-      HTableDescriptor cachedtdm = this.cache.get(tablename);
+      TableDescriptor cachedtdm = this.cache.get(tablename);
       if (cachedtdm != null) {
         cachehits++;
         return cachedtdm;
       }
     }
-    HTableDescriptor tdmt = null;
+    TableDescriptor tdmt = null;
     try {
-      tdmt = getTableDescriptorFromFs(fs, rootdir, tablename, !fsreadonly);
-    } catch (NullPointerException e) {
-      LOG.debug("Exception during readTableDecriptor. Current table name = "
-          + tablename, e);
+      tdmt = getTableDescriptorFromFs(fs, rootdir, tablename);
     } catch (TableInfoMissingException e) {
       // ignore. This is regular operation
-    } catch (IOException ioe) {
+    } catch (NullPointerException | IOException e) {
       LOG.debug("Exception during readTableDecriptor. Current table name = "
-          + tablename, ioe);
+          + tablename, e);
     }
     // last HTD written wins
     if (usecache && tdmt != null) {
       this.cache.put(tablename, tdmt);
     }
-
     return tdmt;
   }
 
   /**
+   * Get the current table descriptor for the given table, or null if none exists.
+   *
+   * Uses a local cache of the descriptor but still checks the filesystem on each call
+   * to see if a newer file has been created since the cached one was read.
+   */
+  @Override
+  public HTableDescriptor get(TableName tableName) throws IOException {
+    if (HTableDescriptor.META_TABLEDESC.getTableName().equals(tableName)) {
+      cachehits++;
+      return HTableDescriptor.META_TABLEDESC;
+    }
+    TableDescriptor descriptor = getDescriptor(tableName);
+    return descriptor == null ? null : descriptor.getHTableDescriptor();
+  }
+
+  /**
    * Returns a map from table name to table descriptor for all tables.
    */
   @Override
-  public Map<String, HTableDescriptor> getAll()
+  public Map<String, TableDescriptor> getAllDescriptors()
   throws IOException {
-    Map<String, HTableDescriptor> htds = new TreeMap<String, HTableDescriptor>();
+    Map<String, TableDescriptor> tds = new TreeMap<String, TableDescriptor>();
 
     if (fsvisited && usecache) {
-      for (Map.Entry<TableName, HTableDescriptor> entry: this.cache.entrySet()) {
-        htds.put(entry.getKey().toString(), entry.getValue());
+      for (Map.Entry<TableName, TableDescriptor> entry: this.cache.entrySet()) {
+        tds.put(entry.getKey().toString(), entry.getValue());
       }
       // add hbase:meta to the response
-      htds.put(HTableDescriptor.META_TABLEDESC.getTableName().getNameAsString(),
-        HTableDescriptor.META_TABLEDESC);
+      tds.put(HTableDescriptor.META_TABLEDESC.getTableName().getNameAsString(),
+          new TableDescriptor(HTableDescriptor.META_TABLEDESC, TableState.State.ENABLED));
     } else {
       LOG.debug("Fetching table descriptors from the filesystem.");
       boolean allvisited = true;
       for (Path d : FSUtils.getTableDirs(fs, rootdir)) {
-        HTableDescriptor htd = null;
+        TableDescriptor td = null;
         try {
-          htd = get(FSUtils.getTableName(d));
+          td = getDescriptor(FSUtils.getTableName(d));
         } catch (FileNotFoundException fnfe) {
           // inability of retrieving one HTD shouldn't stop getting the remaining
           LOG.warn("Trouble retrieving htd", fnfe);
         }
-        if (htd == null) {
+        if (td == null) {
           allvisited = false;
           continue;
         } else {
-          htds.put(htd.getTableName().getNameAsString(), htd);
+          tds.put(td.getHTableDescriptor().getTableName().getNameAsString(), td);
         }
         fsvisited = allvisited;
       }
     }
-    return htds;
+    return tds;
   }
 
-  /* (non-Javadoc)
-   * @see org.apache.hadoop.hbase.TableDescriptors#getTableDescriptors(org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path)
+  /**
+   * Returns a map from table name to table descriptor for all tables.
    */
   @Override
+  public Map<String, HTableDescriptor> getAll() throws IOException {
+    Map<String, HTableDescriptor> htds = new TreeMap<String, HTableDescriptor>();
+    Map<String, TableDescriptor> allDescriptors = getAllDescriptors();
+    for (Map.Entry<String, TableDescriptor> entry : allDescriptors
+        .entrySet()) {
+      htds.put(entry.getKey(), entry.getValue().getHTableDescriptor());
+    }
+    return htds;
+  }
+
+  /**
+    * Find descriptors by namespace.
+    * @see #get(org.apache.hadoop.hbase.TableName)
+    */
+  @Override
   public Map<String, HTableDescriptor> getByNamespace(String name)
   throws IOException {
     Map<String, HTableDescriptor> htds = new TreeMap<String, HTableDescriptor>();
@@ -255,21 +278,51 @@ public class FSTableDescriptors implements TableDescriptors {
    * and updates the local cache with it.
    */
   @Override
-  public void add(HTableDescriptor htd) throws IOException {
+  public void add(TableDescriptor htd) throws IOException {
     if (fsreadonly) {
       throw new NotImplementedException("Cannot add a table descriptor - in read only mode");
     }
-    if (TableName.META_TABLE_NAME.equals(htd.getTableName())) {
+    TableName tableName = htd.getHTableDescriptor().getTableName();
+    if (TableName.META_TABLE_NAME.equals(tableName)) {
       throw new NotImplementedException();
     }
-    if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getTableName().getNameAsString())) {
+    if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) {
       throw new NotImplementedException(
-        "Cannot add a table descriptor for a reserved subdirectory name: " + htd.getNameAsString());
+        "Cannot add a table descriptor for a reserved subdirectory name: "
+            + htd.getHTableDescriptor().getNameAsString());
     }
     updateTableDescriptor(htd);
   }
 
   /**
+   * Adds (or updates) the table descriptor to the FileSystem
+   * and updates the local cache with it.
+   */
+  @Override
+  public void add(HTableDescriptor htd) throws IOException {
+    if (fsreadonly) {
+      throw new NotImplementedException("Cannot add a table descriptor - in read only mode");
+    }
+    TableName tableName = htd.getTableName();
+    if (TableName.META_TABLE_NAME.equals(tableName)) {
+      throw new NotImplementedException();
+    }
+    if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) {
+      throw new NotImplementedException(
+          "Cannot add a table descriptor for a reserved subdirectory name: "
+              + htd.getNameAsString());
+    }
+    TableDescriptor descriptor = getDescriptor(htd.getTableName());
+    if (descriptor == null) {
+      descriptor = new TableDescriptor(htd);
+    }
+    else {
+      descriptor.setHTableDescriptor(htd);
+    }
+    updateTableDescriptor(descriptor);
+  }
+
+  /**
    * Removes the table descriptor from the local cache and returns it.
    * If not in read only mode, it also deletes the entire table directory(!)
    * from the FileSystem.
@@ -286,11 +339,11 @@ public class FSTableDescriptors implements TableDescriptors {
         throw new IOException("Failed delete of " + tabledir.toString());
       }
     }
-    HTableDescriptor descriptor = this.cache.remove(tablename);
+    TableDescriptor descriptor = this.cache.remove(tablename);
     if (descriptor == null) {
       return null;
     } else {
-      return descriptor;
+      return descriptor.getHTableDescriptor();
     }
   }
 
@@ -474,8 +527,8 @@ public class FSTableDescriptors implements TableDescriptors {
    * if it exists, bypassing the local cache.
    * Returns null if it's not found.
    */
-  public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs,
-    Path hbaseRootDir, TableName tableName) throws IOException {
+  public static TableDescriptor getTableDescriptorFromFs(FileSystem fs,
+      Path hbaseRootDir, TableName tableName) throws IOException {
     Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
     return getTableDescriptorFromFs(fs, tableDir);
   }
@@ -485,37 +538,16 @@ public class FSTableDescriptors implements TableDescriptors {
    * directly from the file system if it exists.
    * @throws TableInfoMissingException if there is no descriptor
    */
-  public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs,
-    Path hbaseRootDir, TableName tableName, boolean rewritePb) throws IOException {
-    Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
-    return getTableDescriptorFromFs(fs, tableDir, rewritePb);
-  }
-  /**
-   * Returns the latest table descriptor for the table located at the given directory
-   * directly from the file system if it exists.
-   * @throws TableInfoMissingException if there is no descriptor
-   */
-  public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir)
+  public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir)
     throws IOException {
-    return getTableDescriptorFromFs(fs, tableDir, false);
-  }
-
-  /**
-   * Returns the latest table descriptor for the table located at the given directory
-   * directly from the file system if it exists.
-   * @throws TableInfoMissingException if there is no descriptor
-   */
-  public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir,
-    boolean rewritePb)
-  throws IOException {
     FileStatus status = getTableInfoPath(fs, tableDir, false);
     if (status == null) {
       throw new TableInfoMissingException("No table descriptor file under " + tableDir);
     }
-    return readTableDescriptor(fs, status, rewritePb);
+    return readTableDescriptor(fs, status, false);
   }
 
-  private static HTableDescriptor readTableDescriptor(FileSystem fs, FileStatus status,
+  private static TableDescriptor readTableDescriptor(FileSystem fs, FileStatus status,
       boolean rewritePb) throws IOException {
     int len = Ints.checkedCast(status.getLen());
     byte [] content = new byte[len];
@@ -525,30 +557,32 @@ public class FSTableDescriptors implements TableDescriptors {
     } finally {
       fsDataInputStream.close();
     }
-    HTableDescriptor htd = null;
+    TableDescriptor td = null;
     try {
-      htd = HTableDescriptor.parseFrom(content);
+      td = TableDescriptor.parseFrom(content);
     } catch (DeserializationException e) {
       // we have old HTableDescriptor here
       try {
         HTableDescriptor ohtd = HTableDescriptor.parseFrom(content);
         LOG.warn("Found old table descriptor, converting to new format for table " +
           ohtd.getTableName());
-        htd = new HTableDescriptor(ohtd);
-        if (rewritePb) rewriteTableDescriptor(fs, status, htd);
+        td = new TableDescriptor(ohtd);
+        if (rewritePb) {
+          rewriteTableDescriptor(fs, status, td);
+        }
       } catch (DeserializationException e1) {
         throw new IOException("content=" + Bytes.toShort(content), e1);
       }
     }
     if (rewritePb && !ProtobufUtil.isPBMagicPrefix(content)) {
       // Convert the file over to be pb before leaving here.
-      rewriteTableDescriptor(fs, status, htd);
+      rewriteTableDescriptor(fs, status, td);
     }
-    return htd;
+    return td;
   }
 
   private static void rewriteTableDescriptor(final FileSystem fs, final FileStatus status,
-    final HTableDescriptor td)
+    final TableDescriptor td)
   throws IOException {
     Path tableInfoDir = status.getPath().getParent();
     Path tableDir = tableInfoDir.getParent();
@@ -560,17 +594,18 @@ public class FSTableDescriptors implements TableDescriptors {
    * @throws IOException Thrown if failed update.
    * @throws NotImplementedException if in read only mode
    */
-  @VisibleForTesting Path updateTableDescriptor(HTableDescriptor htd)
+  @VisibleForTesting Path updateTableDescriptor(TableDescriptor td)
   throws IOException {
     if (fsreadonly) {
       throw new NotImplementedException("Cannot update a table descriptor - in read only mode");
     }
-    Path tableDir = getTableDir(htd.getTableName());
-    Path p = writeTableDescriptor(fs, htd, tableDir, getTableInfoPath(tableDir));
+    TableName tableName = td.getHTableDescriptor().getTableName();
+    Path tableDir = getTableDir(tableName);
+    Path p = writeTableDescriptor(fs, td, tableDir, getTableInfoPath(tableDir));
     if (p == null) throw new IOException("Failed update");
     LOG.info("Updated tableinfo=" + p);
     if (usecache) {
-      this.cache.put(htd.getTableName(), htd);
+      this.cache.put(td.getHTableDescriptor().getTableName(), td);
     }
     return p;
   }
@@ -621,9 +656,8 @@ public class FSTableDescriptors implements TableDescriptors {
    * @return Descriptor file or null if we failed write.
    */
   private static Path writeTableDescriptor(final FileSystem fs,
-    final HTableDescriptor htd, final Path tableDir,
-    final FileStatus currentDescriptorFile)
-  throws IOException {
+    final TableDescriptor htd, final Path tableDir,
+    final FileStatus currentDescriptorFile) throws IOException {
     // Get temporary dir into which we'll first write a file to avoid half-written file phenomenon.
     // This directory is never removed to avoid removing it out from under a concurrent writer.
     Path tmpTableDir = new Path(tableDir, TMP_DIR);
@@ -652,7 +686,7 @@ public class FSTableDescriptors implements TableDescriptors {
       }
       tableInfoDirPath = new Path(tableInfoDir, filename);
       try {
-        writeHTD(fs, tempPath, htd);
+        writeTD(fs, tempPath, htd);
         fs.mkdirs(tableInfoDirPath.getParent());
         if (!fs.rename(tempPath, tableInfoDirPath)) {
           throw new IOException("Failed rename of " + tempPath + " to " + tableInfoDirPath);
@@ -676,7 +710,7 @@ public class FSTableDescriptors implements TableDescriptors {
     return tableInfoDirPath;
   }
 
-  private static void writeHTD(final FileSystem fs, final Path p, final HTableDescriptor htd)
+  private static void writeTD(final FileSystem fs, final Path p, final TableDescriptor htd)
   throws IOException {
     FSDataOutputStream out = fs.create(p, false);
     try {
@@ -693,20 +727,29 @@ public class FSTableDescriptors implements TableDescriptors {
    * Used by tests.
    * @return True if we successfully created file.
    */
-  public boolean createTableDescriptor(HTableDescriptor htd) throws IOException {
+  public boolean createTableDescriptor(TableDescriptor htd) throws IOException {
     return createTableDescriptor(htd, false);
   }
 
   /**
+   * Create new HTableDescriptor in HDFS. Happens when we are creating table.
+   * Used by tests.
+   * @return True if we successfully created file.
+   */
+  public boolean createTableDescriptor(HTableDescriptor htd) throws IOException {
+    return createTableDescriptor(new TableDescriptor(htd), false);
+  }
+
+  /**
    * Create new HTableDescriptor in HDFS. Happens when we are creating table. If
    * forceCreation is true then even if previous table descriptor is present it
    * will be overwritten
    *
    * @return True if we successfully created file.
    */
-  public boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation)
+  public boolean createTableDescriptor(TableDescriptor htd, boolean forceCreation)
   throws IOException {
-    Path tableDir = getTableDir(htd.getTableName());
+    Path tableDir = getTableDir(htd.getHTableDescriptor().getTableName());
     return createTableDescriptorForTableDirectory(tableDir, htd, forceCreation);
   }
 
@@ -722,7 +765,7 @@ public class FSTableDescriptors implements TableDescriptors {
    * @throws IOException if a filesystem error occurs
    */
   public boolean createTableDescriptorForTableDirectory(Path tableDir,
-      HTableDescriptor htd, boolean forceCreation) throws IOException {
+      TableDescriptor htd, boolean forceCreation) throws IOException {
     if (fsreadonly) {
       throw new NotImplementedException("Cannot create a table descriptor - in read only mode");
     }
@@ -743,4 +786,3 @@ public class FSTableDescriptors implements TableDescriptors {
   }
 
 }
-
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 6cb3d20..6ed2b3e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -71,7 +71,6 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -84,6 +83,7 @@ import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -107,13 +107,13 @@ import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.io.FileLink;
 import org.apache.hadoop.hbase.io.HFileLink;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
@@ -128,9 +128,6 @@ import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler;
 import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl;
 import org.apache.hadoop.hbase.util.hbck.TableLockChecker;
 import org.apache.hadoop.hbase.wal.WALSplitter;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader;
-import org.apache.hadoop.hbase.zookeeper.ZKTableStateManager;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
@@ -1337,9 +1334,9 @@ public class HBaseFsck extends Configured implements Closeable {
         modTInfo = new TableInfo(tableName);
         tablesInfo.put(tableName, modTInfo);
         try {
-          HTableDescriptor htd =
+          TableDescriptor htd =
               FSTableDescriptors.getTableDescriptorFromFs(fs, hbaseRoot, tableName);
-          modTInfo.htds.add(htd);
+          modTInfo.htds.add(htd.getHTableDescriptor());
         } catch (IOException ioe) {
           if (!orphanTableDirs.containsKey(tableName)) {
             LOG.warn("Unable to read .tableinfo from " + hbaseRoot, ioe);
@@ -1394,7 +1391,7 @@ public class HBaseFsck extends Configured implements Closeable {
     for (String columnfamimly : columns) {
       htd.addFamily(new HColumnDescriptor(columnfamimly));
     }
-    fstd.createTableDescriptor(htd, true);
+    fstd.createTableDescriptor(new TableDescriptor(htd, TableState.State.ENABLED), true);
     return true;
   }
 
@@ -1442,7 +1439,7 @@ public class HBaseFsck extends Configured implements Closeable {
           if (tableName.equals(htds[j].getTableName())) {
             HTableDescriptor htd = htds[j];
             LOG.info("fixing orphan table: " + tableName + " from cache");
-            fstd.createTableDescriptor(htd, true);
+            fstd.createTableDescriptor(new TableDescriptor(htd, TableState.State.ENABLED), true);
             j++;
             iter.remove();
           }
@@ -1802,19 +1799,16 @@ public class HBaseFsck extends Configured implements Closeable {
    * @throws IOException
    */
   private void loadDisabledTables()
-  throws ZooKeeperConnectionException, IOException {
+  throws IOException {
     HConnectionManager.execute(new HConnectable<Void>(getConf()) {
       @Override
       public Void connect(HConnection connection) throws IOException {
-        try {
-          for (TableName tableName :
-              ZKTableStateClientSideReader.getDisabledOrDisablingTables(zkw)) {
-            disabledTables.add(tableName);
+        TableName[] tables = connection.listTableNames();
+        for (TableName table : tables) {
+          if (connection.getTableState(table)
+              .inStates(TableState.State.DISABLED, TableState.State.DISABLING)) {
+            disabledTables.add(table);
           }
-        } catch (KeeperException ke) {
-          throw new IOException(ke);
-        } catch (InterruptedException e) {
-          throw new InterruptedIOException();
         }
         return null;
       }
@@ -3546,12 +3540,15 @@ public class HBaseFsck extends Configured implements Closeable {
   /**
    * Check whether a orphaned table ZNode exists and fix it if requested.
    * @throws IOException
-   * @throws KeeperException
-   * @throws InterruptedException
    */
   private void checkAndFixOrphanedTableZNodes()
-      throws IOException, KeeperException, InterruptedException {
-    Set<TableName> enablingTables = ZKTableStateClientSideReader.getEnablingTables(zkw);
+      throws IOException {
+    Set<TableName> enablingTables = new HashSet<>();
+    for (TableName tableName: admin.listTableNames()) {
+      if (connection.getTableState(tableName).getState().equals(TableState.State.ENABLING)) {
+        enablingTables.add(tableName);
+      }
+    }
     String msg;
     TableInfo tableInfo;
 
@@ -3570,21 +3567,12 @@ public class HBaseFsck extends Configured implements Closeable {
     }
 
     if (orphanedTableZNodes.size() > 0 && this.fixTableZNodes) {
-      ZKTableStateManager zkTableStateMgr = new ZKTableStateManager(zkw);
-
       for (TableName tableName : orphanedTableZNodes) {
-        try {
-          // Set the table state to be disabled so that if we made mistake, we can trace
-          // the history and figure it out.
-          // Another choice is to call checkAndRemoveTableState() to delete the orphaned ZNode.
-          // Both approaches works.
-          zkTableStateMgr.setTableState(tableName, ZooKeeperProtos.Table.State.DISABLED);
-        } catch (CoordinatedStateException e) {
-          // This exception should not happen here
-          LOG.error(
-            "Got a CoordinatedStateException while fixing the ENABLING table znode " + tableName,
-            e);
-        }
+        // Set the table state to be disabled so that if we made mistake, we can trace
+        // the history and figure it out.
+        // Another choice is to call checkAndRemoveTableState() to delete the orphaned ZNode.
+        // Both approaches works.
+        admin.disableTable(tableName);
       }
     }
   }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
index 7f2c85d..02b5980 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
@@ -155,7 +155,8 @@ class HMerge {
 
       this.rootDir = FSUtils.getRootDir(conf);
       Path tabledir = FSUtils.getTableDir(this.rootDir, tableName);
-      this.htd = FSTableDescriptors.getTableDescriptorFromFs(this.fs, tabledir);
+      this.htd = FSTableDescriptors.getTableDescriptorFromFs(this.fs, tabledir)
+          .getHTableDescriptor();
       String logname = "merge_" + System.currentTimeMillis() + HConstants.HREGION_LOGDIR_NAME;
 
       final Configuration walConf = new Configuration(conf);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
index adab203..1530d28 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
@@ -153,9 +154,9 @@ public class Merge extends Configured implements Tool {
     if (info2 == null) {
       throw new NullPointerException("info2 is null using key " + meta);
     }
-    HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()),
+    TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()),
       this.rootdir, this.tableName);
-    HRegion merged = merge(htd, meta, info1, info2);
+    HRegion merged = merge(htd.getHTableDescriptor(), meta, info1, info2);
 
     LOG.info("Adding " + merged.getRegionInfo() + " to " +
         meta.getRegionInfo());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java
index 57ec87d..82308be 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java
@@ -18,8 +18,11 @@
 package org.apache.hadoop.hbase.util;
 
 import java.io.IOException;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 
+import com.google.protobuf.InvalidProtocolBufferException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -27,6 +30,9 @@ import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
@@ -153,8 +159,9 @@ public class ZKDataMigrator extends Configured implements Tool {
       }
       byte[] data = ZKUtil.getData(zkw, znode);
       if (ProtobufUtil.isPBMagicPrefix(data)) continue;
-      ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder();
-      builder.setState(ZooKeeperProtos.Table.State.valueOf(Bytes.toString(data)));
+      ZooKeeperProtos.DeprecatedTableState.Builder builder =
+          ZooKeeperProtos.DeprecatedTableState.newBuilder();
+      builder.setState(ZooKeeperProtos.DeprecatedTableState.State.valueOf(Bytes.toString(data)));
       data = ProtobufUtil.prependPBMagic(builder.build().toByteArray());
       ZKUtil.setData(zkw, znode, data);
     }
@@ -232,15 +239,14 @@ public class ZKDataMigrator extends Configured implements Tool {
   }
 
   private void migrateClusterKeyToPB(ZooKeeperWatcher zkw, String peerZnode, byte[] data)
-      throws KeeperException, NoNodeException {
+      throws KeeperException {
     ReplicationPeer peer = ZooKeeperProtos.ReplicationPeer.newBuilder()
         .setClusterkey(Bytes.toString(data)).build();
     ZKUtil.setData(zkw, peerZnode, ProtobufUtil.prependPBMagic(peer.toByteArray()));
   }
 
   private void migratePeerStateToPB(ZooKeeperWatcher zkw, byte[] data,
- String peerStatePath)
-      throws KeeperException, NoNodeException {
+     String peerStatePath) throws KeeperException {
     String state = Bytes.toString(data);
     if (ZooKeeperProtos.ReplicationState.State.ENABLED.name().equals(state)) {
       ZKUtil.setData(zkw, peerStatePath, ReplicationStateZKBase.ENABLED_ZNODE_BYTES);
@@ -249,6 +255,80 @@ public class ZKDataMigrator extends Configured implements Tool {
     }
   }
 
+  /**
+   * Method for table states migration.
+   * Reading state from zk, applying them to internal state
+   * and delete.
+   * Used by master to clean migration from zk based states to
+   * table descriptor based states.
+   */
+  @Deprecated
+  public static Map<TableName, TableState.State> queryForTableStates(ZooKeeperWatcher zkw)
+      throws KeeperException, InterruptedException {
+    Map<TableName, TableState.State> rv = new HashMap<>();
+    List<String> children = ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode);
+    if (children == null) {
+      return rv;
+    }
+    for (String child: children) {
+      TableName tableName = TableName.valueOf(child);
+      ZooKeeperProtos.DeprecatedTableState.State state = getTableState(zkw, tableName);
+      TableState.State newState = TableState.State.ENABLED;
+      if (state != null) {
+        switch (state) {
+          case ENABLED:
+            newState = TableState.State.ENABLED;
+            break;
+          case DISABLED:
+            newState = TableState.State.DISABLED;
+            break;
+          case DISABLING:
+            newState = TableState.State.DISABLING;
+            break;
+          case ENABLING:
+            newState = TableState.State.ENABLING;
+            break;
+          default:
+        }
+      }
+      rv.put(tableName, newState);
+    }
+    return rv;
+  }
+
+  /**
+   * Gets table state from ZK.
+   * @param zkw ZooKeeperWatcher instance to use
+   * @param tableName table we're checking
+   * @return Null or {@link ZooKeeperProtos.DeprecatedTableState.State} found in znode.
+   * @throws KeeperException
+   */
+  @Deprecated
+  private static  ZooKeeperProtos.DeprecatedTableState.State getTableState(
+      final ZooKeeperWatcher zkw, final TableName tableName)
+      throws KeeperException, InterruptedException {
+    String znode = ZKUtil.joinZNode(zkw.tableZNode, tableName.getNameAsString());
+    byte [] data = ZKUtil.getData(zkw, znode);
+    if (data == null || data.length <= 0) {
+      return null;
+    }
+    try {
+      ProtobufUtil.expectPBMagicPrefix(data);
+      ZooKeeperProtos.DeprecatedTableState.Builder builder =
+          ZooKeeperProtos.DeprecatedTableState.newBuilder();
+      int magicLen = ProtobufUtil.lengthOfPBMagic();
+      ZooKeeperProtos.DeprecatedTableState t = builder.mergeFrom(data,
+          magicLen, data.length - magicLen).build();
+      return t.getState();
+    } catch (InvalidProtocolBufferException e) {
+      KeeperException ke = new KeeperException.DataInconsistencyException();
+      ke.initCause(e);
+      throw ke;
+    } catch (DeserializationException e) {
+      throw ZKUtil.convert(e);
+    }
+  }
+
   public static void main(String args[]) throws Exception {
     System.exit(ToolRunner.run(HBaseConfiguration.create(), new ZKDataMigrator(), args));
   }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index 9273b6a..bb703ed 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -50,6 +50,9 @@ import java.util.concurrent.atomic.AtomicReference;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.protobuf.ServiceException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -61,7 +64,6 @@ import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
@@ -71,7 +73,6 @@ import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.ConnectionUtils;
 import org.apache.hadoop.hbase.client.Delete;
@@ -82,6 +83,7 @@ import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
 import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.RegionOpeningException;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.master.SplitLogManager;
@@ -98,7 +100,6 @@ import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStor
 import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.LastSequenceId;
@@ -123,9 +124,6 @@ import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
 import org.apache.hadoop.io.MultipleIOException;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.protobuf.ServiceException;
 import com.google.protobuf.TextFormat;
 
 /**
@@ -335,13 +333,14 @@ public class WALSplitter {
         LOG.warn("Nothing to split in log file " + logPath);
         return true;
       }
-      if (csm != null) {
-        try {
-          TableStateManager tsm = csm.getTableStateManager();
-          disablingOrDisabledTables = tsm.getTablesInStates(
-          ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING);
-        } catch (CoordinatedStateException e) {
-          throw new IOException("Can't get disabling/disabled tables", e);
+      if(csm != null) {
+        HConnection scc = csm.getServer().getConnection();
+        TableName[] tables = scc.listTableNames();
+        for (TableName table : tables) {
+          if (scc.getTableState(table)
+              .inStates(TableState.State.DISABLED, TableState.State.DISABLING)) {
+            disablingOrDisabledTables.add(table);
+          }
         }
       }
       int numOpenedFilesBeforeReporting = conf.getInt("hbase.splitlog.report.openedfiles", 3);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateManager.java
deleted file mode 100644
index db00c14..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateManager.java
+++ /dev/null
@@ -1,369 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.zookeeper;
-
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.CoordinatedStateException;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableStateManager;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
-import org.apache.zookeeper.KeeperException;
-
-/**
- * Implementation of TableStateManager which reads, caches and sets state
- * up in ZooKeeper.  If multiple read/write clients, will make for confusion.
- * Code running on client side without consensus context should use
- * {@link ZKTableStateClientSideReader} instead.
- *
- * <p>To save on trips to the zookeeper ensemble, internally we cache table
- * state.
- */
-@InterfaceAudience.Private
-public class ZKTableStateManager implements TableStateManager {
-  // A znode will exist under the table directory if it is in any of the
-  // following states: {@link TableState#ENABLING} , {@link TableState#DISABLING},
-  // or {@link TableState#DISABLED}.  If {@link TableState#ENABLED}, there will
-  // be no entry for a table in zk.  Thats how it currently works.
-
-  private static final Log LOG = LogFactory.getLog(ZKTableStateManager.class);
-  private final ZooKeeperWatcher watcher;
-
-  /**
-   * Cache of what we found in zookeeper so we don't have to go to zk ensemble
-   * for every query.  Synchronize access rather than use concurrent Map because
-   * synchronization needs to span query of zk.
-   */
-  private final Map<TableName, ZooKeeperProtos.Table.State> cache =
-    new HashMap<TableName, ZooKeeperProtos.Table.State>();
-
-  public ZKTableStateManager(final ZooKeeperWatcher zkw) throws KeeperException,
-      InterruptedException {
-    super();
-    this.watcher = zkw;
-    populateTableStates();
-  }
-
-  /**
-   * Gets a list of all the tables set as disabled in zookeeper.
-   * @throws KeeperException, InterruptedException
-   */
-  private void populateTableStates() throws KeeperException, InterruptedException {
-    synchronized (this.cache) {
-      List<String> children = ZKUtil.listChildrenNoWatch(this.watcher, this.watcher.tableZNode);
-      if (children == null) return;
-      for (String child: children) {
-        TableName tableName = TableName.valueOf(child);
-        ZooKeeperProtos.Table.State state = getTableState(this.watcher, tableName);
-        if (state != null) this.cache.put(tableName, state);
-      }
-    }
-  }
-
-  /**
-   * Sets table state in ZK. Sets no watches.
-   *
-   * {@inheritDoc}
-   */
-  @Override
-  public void setTableState(TableName tableName, ZooKeeperProtos.Table.State state)
-  throws CoordinatedStateException {
-    synchronized (this.cache) {
-      LOG.info("Moving table " + tableName + " state from " + this.cache.get(tableName)
-        + " to " + state);
-      try {
-        setTableStateInZK(tableName, state);
-      } catch (KeeperException e) {
-        throw new CoordinatedStateException(e);
-      }
-    }
-  }
-
-  /**
-   * Checks and sets table state in ZK. Sets no watches.
-   * {@inheritDoc}
-   */
-  @Override
-  public boolean setTableStateIfInStates(TableName tableName,
-                                         ZooKeeperProtos.Table.State newState,
-                                         ZooKeeperProtos.Table.State... states)
-      throws CoordinatedStateException {
-    synchronized (this.cache) {
-      // Transition ENABLED->DISABLING has to be performed with a hack, because
-      // we treat empty state as enabled in this case because 0.92- clusters.
-      if (
-          (newState == ZooKeeperProtos.Table.State.DISABLING) &&
-               this.cache.get(tableName) != null && !isTableState(tableName, states) ||
-          (newState != ZooKeeperProtos.Table.State.DISABLING &&
-               !isTableState(tableName, states) )) {
-        return false;
-      }
-      try {
-        setTableStateInZK(tableName, newState);
-      } catch (KeeperException e) {
-        throw new CoordinatedStateException(e);
-      }
-      return true;
-    }
-  }
-
-  /**
-   * Checks and sets table state in ZK. Sets no watches.
-   * {@inheritDoc}
-   */
-  @Override
-  public boolean setTableStateIfNotInStates(TableName tableName,
-                                            ZooKeeperProtos.Table.State newState,
-                                            ZooKeeperProtos.Table.State... states)
-    throws CoordinatedStateException {
-    synchronized (this.cache) {
-      if (isTableState(tableName, states)) {
-        // If the table is in the one of the states from the states list, the cache
-        // might be out-of-date, try to find it out from the master source (zookeeper server).
-        //
-        // Note: this adds extra zookeeper server calls and might have performance impact.
-        // However, this is not the happy path so we should not reach here often. Therefore,
-        // the performance impact should be minimal to none.
-        try {
-          ZooKeeperProtos.Table.State curstate = getTableState(watcher, tableName);
-
-          if (isTableInState(Arrays.asList(states), curstate)) {
-            return false;
-          }
-        } catch (KeeperException e) {
-          throw new CoordinatedStateException(e);
-        } catch (InterruptedException e) {
-          throw new CoordinatedStateException(e);
-        }
-      }
-      try {
-        setTableStateInZK(tableName, newState);
-      } catch (KeeperException e) {
-        throw new CoordinatedStateException(e);
-      }
-      return true;
-    }
-  }
-
-  private void setTableStateInZK(final TableName tableName,
-                                 final ZooKeeperProtos.Table.State state)
-      throws KeeperException {
-    String znode = ZKUtil.joinZNode(this.watcher.tableZNode, tableName.getNameAsString());
-    if (ZKUtil.checkExists(this.watcher, znode) == -1) {
-      ZKUtil.createAndFailSilent(this.watcher, znode);
-    }
-    synchronized (this.cache) {
-      ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder();
-      builder.setState(state);
-      byte [] data = ProtobufUtil.prependPBMagic(builder.build().toByteArray());
-      ZKUtil.setData(this.watcher, znode, data);
-      this.cache.put(tableName, state);
-    }
-  }
-
-  /**
-   * Checks if table is marked in specified state in ZK (using cache only). {@inheritDoc}
-   */
-  @Override
-  public boolean isTableState(final TableName tableName,
-      final ZooKeeperProtos.Table.State... states) {
-    return isTableState(tableName, false, states); // only check cache
-  }
-
-  /**
-   * Checks if table is marked in specified state in ZK. {@inheritDoc}
-   */
-  @Override
-  public boolean isTableState(final TableName tableName, final boolean checkSource,
-      final ZooKeeperProtos.Table.State... states) {
-    boolean isTableInSpecifiedState;
-    synchronized (this.cache) {
-      ZooKeeperProtos.Table.State currentState = this.cache.get(tableName);
-      if (checkSource) {
-        // The cache might be out-of-date, try to find it out from the master source (zookeeper
-        // server) and update the cache.
-        try {
-          ZooKeeperProtos.Table.State stateInZK = getTableState(watcher, tableName);
-
-          if (currentState != stateInZK) {
-            if (stateInZK != null) {
-              this.cache.put(tableName, stateInZK);
-            } else {
-              this.cache.remove(tableName);
-            }
-            currentState = stateInZK;
-          }
-        } catch (KeeperException | InterruptedException e) {
-          // Contacting zookeeper failed.  Let us just trust the value in cache.
-        }
-      }
-      return isTableInState(Arrays.asList(states), currentState);
-    }
-  }
-
-  /**
-   * Deletes the table in zookeeper. Fails silently if the table is not currently disabled in
-   * zookeeper. Sets no watches. {@inheritDoc}
-   */
-  @Override
-  public void setDeletedTable(final TableName tableName)
-  throws CoordinatedStateException {
-    synchronized (this.cache) {
-      if (this.cache.remove(tableName) == null) {
-        LOG.warn("Moving table " + tableName + " state to deleted but was already deleted");
-      }
-      try {
-        ZKUtil.deleteNodeFailSilent(this.watcher,
-          ZKUtil.joinZNode(this.watcher.tableZNode, tableName.getNameAsString()));
-      } catch (KeeperException e) {
-        throw new CoordinatedStateException(e);
-      }
-    }
-  }
-
-  /**
-   * check if table is present.
-   *
-   * @param tableName table we're working on
-   * @return true if the table is present
-   */
-  @Override
-  public boolean isTablePresent(final TableName tableName) {
-    synchronized (this.cache) {
-      ZooKeeperProtos.Table.State state = this.cache.get(tableName);
-      return !(state == null);
-    }
-  }
-
-  /**
-   * Gets a list of all the tables set as disabling in zookeeper.
-   * @return Set of disabling tables, empty Set if none
-   * @throws CoordinatedStateException if error happened in underlying coordination engine
-   */
-  @Override
-  public Set<TableName> getTablesInStates(ZooKeeperProtos.Table.State... states)
-    throws InterruptedIOException, CoordinatedStateException {
-    try {
-      return getAllTables(states);
-    } catch (KeeperException e) {
-      throw new CoordinatedStateException(e);
-    }
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public void checkAndRemoveTableState(TableName tableName, ZooKeeperProtos.Table.State states,
-                                       boolean deletePermanentState)
-      throws CoordinatedStateException {
-    synchronized (this.cache) {
-      if (isTableState(tableName, states)) {
-        this.cache.remove(tableName);
-        if (deletePermanentState) {
-          try {
-            ZKUtil.deleteNodeFailSilent(this.watcher,
-                ZKUtil.joinZNode(this.watcher.tableZNode, tableName.getNameAsString()));
-          } catch (KeeperException e) {
-            throw new CoordinatedStateException(e);
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * Gets a list of all the tables of specified states in zookeeper.
-   * @return Set of tables of specified states, empty Set if none
-   * @throws KeeperException
-   */
-  Set<TableName> getAllTables(final ZooKeeperProtos.Table.State... states)
-      throws KeeperException, InterruptedIOException {
-
-    Set<TableName> allTables = new HashSet<TableName>();
-    List<String> children =
-      ZKUtil.listChildrenNoWatch(watcher, watcher.tableZNode);
-    if(children == null) return allTables;
-    for (String child: children) {
-      TableName tableName = TableName.valueOf(child);
-      ZooKeeperProtos.Table.State state;
-      try {
-        state = getTableState(watcher, tableName);
-      } catch (InterruptedException e) {
-        throw new InterruptedIOException();
-      }
-      for (ZooKeeperProtos.Table.State expectedState: states) {
-        if (state == expectedState) {
-          allTables.add(tableName);
-          break;
-        }
-      }
-    }
-    return allTables;
-  }
-
-  /**
-   * Gets table state from ZK.
-   * @param zkw ZooKeeperWatcher instance to use
-   * @param tableName table we're checking
-   * @return Null or {@link ZooKeeperProtos.Table.State} found in znode.
-   * @throws KeeperException
-   */
-  private ZooKeeperProtos.Table.State getTableState(final ZooKeeperWatcher zkw,
-                                                   final TableName tableName)
-    throws KeeperException, InterruptedException {
-    String znode = ZKUtil.joinZNode(zkw.tableZNode, tableName.getNameAsString());
-    byte [] data = ZKUtil.getData(zkw, znode);
-    if (data == null || data.length <= 0) return null;
-    try {
-      ProtobufUtil.expectPBMagicPrefix(data);
-      ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder();
-      int magicLen = ProtobufUtil.lengthOfPBMagic();
-      ProtobufUtil.mergeFrom(builder, data, magicLen, data.length - magicLen);
-      return builder.getState();
-    } catch (IOException e) {
-      KeeperException ke = new KeeperException.DataInconsistencyException();
-      ke.initCause(e);
-      throw ke;
-    } catch (DeserializationException e) {
-      throw ZKUtil.convert(e);
-    }
-  }
-
-  /**
-   * @return true if current state isn't null and is contained
-   * in the list of expected states.
-   */
-  private boolean isTableInState(final List<ZooKeeperProtos.Table.State> expectedStates,
-                       final ZooKeeperProtos.Table.State currentState) {
-    return currentState != null && expectedStates.contains(currentState);
-  }
-}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index ec1e32c..a37c55d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -3390,6 +3390,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
     }
   }
 
+
   /**
    * Make sure that at least the specified number of region servers
    * are running
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java
index 946b812..5b7ba49 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.ServerManager;
+import org.apache.hadoop.hbase.master.TableStateManager;
 import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
 import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -54,6 +55,7 @@ import java.util.Set;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
 
 
 /**
@@ -98,70 +100,72 @@ public class TestDrainingServer {
     final HRegionInfo REGIONINFO = new HRegionInfo(TableName.valueOf("table_test"),
         HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW);
 
-    ZooKeeperWatcher zkWatcher = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
-      "zkWatcher-Test", abortable, true);
+    try (ZooKeeperWatcher zkWatcher = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
+      "zkWatcher-Test", abortable, true)) {
 
-    Map<ServerName, ServerLoad> onlineServers = new HashMap<ServerName, ServerLoad>();
+      Map<ServerName, ServerLoad> onlineServers = new HashMap<ServerName, ServerLoad>();
 
-    onlineServers.put(SERVERNAME_A, ServerLoad.EMPTY_SERVERLOAD);
-    onlineServers.put(SERVERNAME_B, ServerLoad.EMPTY_SERVERLOAD);
+      onlineServers.put(SERVERNAME_A, ServerLoad.EMPTY_SERVERLOAD);
+      onlineServers.put(SERVERNAME_B, ServerLoad.EMPTY_SERVERLOAD);
 
-    Mockito.when(server.getConfiguration()).thenReturn(conf);
-    Mockito.when(server.getServerName()).thenReturn(ServerName.valueOf("masterMock,1,1"));
-    Mockito.when(server.getZooKeeper()).thenReturn(zkWatcher);
-    Mockito.when(server.getRegionServerVersion(Mockito.any(ServerName.class))).thenReturn("0.0.0");
+      Mockito.when(server.getConfiguration()).thenReturn(conf);
+      Mockito.when(server.getServerName()).thenReturn(ServerName.valueOf("masterMock,1,1"));
+      Mockito.when(server.getZooKeeper()).thenReturn(zkWatcher);
+      Mockito.when(server.getRegionServerVersion(Mockito.any(ServerName.class))).thenReturn("0.0.0");
 
-    CoordinatedStateManager cp = new ZkCoordinatedStateManager();
-    cp.initialize(server);
-    cp.start();
+      CoordinatedStateManager cp = new ZkCoordinatedStateManager();
+      cp.initialize(server);
+      cp.start();
 
-    Mockito.when(server.getCoordinatedStateManager()).thenReturn(cp);
+      Mockito.when(server.getCoordinatedStateManager()).thenReturn(cp);
 
-    Mockito.when(serverManager.getOnlineServers()).thenReturn(onlineServers);
-    Mockito.when(serverManager.getOnlineServersList())
-    .thenReturn(new ArrayList<ServerName>(onlineServers.keySet()));
+      Mockito.when(serverManager.getOnlineServers()).thenReturn(onlineServers);
+      Mockito.when(serverManager.getOnlineServersList())
+          .thenReturn(new ArrayList<ServerName>(onlineServers.keySet()));
 
-    Mockito.when(serverManager.createDestinationServersList())
-        .thenReturn(new ArrayList<ServerName>(onlineServers.keySet()));
-    Mockito.when(serverManager.createDestinationServersList(null))
-        .thenReturn(new ArrayList<ServerName>(onlineServers.keySet()));
-    Mockito.when(serverManager.createDestinationServersList(Mockito.anyList())).thenReturn(
-        new ArrayList<ServerName>(onlineServers.keySet()));
+      Mockito.when(serverManager.createDestinationServersList())
+          .thenReturn(new ArrayList<ServerName>(onlineServers.keySet()));
+      Mockito.when(serverManager.createDestinationServersList(null))
+          .thenReturn(new ArrayList<ServerName>(onlineServers.keySet()));
+      Mockito.when(serverManager.createDestinationServersList(Mockito.anyList())).thenReturn(
+          new ArrayList<ServerName>(onlineServers.keySet()));
 
-    for (ServerName sn : onlineServers.keySet()) {
-      Mockito.when(serverManager.isServerOnline(sn)).thenReturn(true);
-      Mockito.when(serverManager.sendRegionClose(sn, REGIONINFO, -1)).thenReturn(true);
-      Mockito.when(serverManager.sendRegionClose(sn, REGIONINFO, -1, null, false)).thenReturn(true);
-      Mockito.when(serverManager.sendRegionOpen(sn, REGIONINFO, -1, new ArrayList<ServerName>()))
-      .thenReturn(RegionOpeningState.OPENED);
-      Mockito.when(serverManager.sendRegionOpen(sn, REGIONINFO, -1, null))
-      .thenReturn(RegionOpeningState.OPENED);
-      Mockito.when(serverManager.addServerToDrainList(sn)).thenReturn(true);
-    }
+      for (ServerName sn : onlineServers.keySet()) {
+        Mockito.when(serverManager.isServerOnline(sn)).thenReturn(true);
+        Mockito.when(serverManager.sendRegionClose(sn, REGIONINFO, -1)).thenReturn(true);
+        Mockito.when(serverManager.sendRegionClose(sn, REGIONINFO, -1, null, false)).thenReturn(true);
+        Mockito.when(serverManager.sendRegionOpen(sn, REGIONINFO, -1, new ArrayList<ServerName>()))
+            .thenReturn(RegionOpeningState.OPENED);
+        Mockito.when(serverManager.sendRegionOpen(sn, REGIONINFO, -1, null))
+            .thenReturn(RegionOpeningState.OPENED);
+        Mockito.when(serverManager.addServerToDrainList(sn)).thenReturn(true);
+      }
 
-    Mockito.when(master.getServerManager()).thenReturn(serverManager);
+      Mockito.when(master.getServerManager()).thenReturn(serverManager);
 
-    am = new AssignmentManager(server, serverManager,
-        balancer, startupMasterExecutor("mockExecutorService"), null, null);
+      TableStateManager tsm = mock(TableStateManager.class);
+      am = new AssignmentManager(server, serverManager,
+          balancer, startupMasterExecutor("mockExecutorService"), null, null, tsm);
 
-    Mockito.when(master.getAssignmentManager()).thenReturn(am);
-    Mockito.when(master.getZooKeeper()).thenReturn(zkWatcher);
+      Mockito.when(master.getAssignmentManager()).thenReturn(am);
+      Mockito.when(master.getZooKeeper()).thenReturn(zkWatcher);
 
-    am.addPlan(REGIONINFO.getEncodedName(), new RegionPlan(REGIONINFO, null, SERVERNAME_A));
+      am.addPlan(REGIONINFO.getEncodedName(), new RegionPlan(REGIONINFO, null, SERVERNAME_A));
 
-    zkWatcher.registerListenerFirst(am);
+      zkWatcher.registerListenerFirst(am);
 
-    addServerToDrainedList(SERVERNAME_A, onlineServers, serverManager);
+      addServerToDrainedList(SERVERNAME_A, onlineServers, serverManager);
 
-    am.assign(REGIONINFO, true);
+      am.assign(REGIONINFO, true);
 
-    setRegionOpenedOnZK(zkWatcher, SERVERNAME_A, REGIONINFO);
-    setRegionOpenedOnZK(zkWatcher, SERVERNAME_B, REGIONINFO);
+      setRegionOpenedOnZK(zkWatcher, SERVERNAME_A, REGIONINFO);
+      setRegionOpenedOnZK(zkWatcher, SERVERNAME_B, REGIONINFO);
 
-    am.waitForAssignment(REGIONINFO);
+      am.waitForAssignment(REGIONINFO);
 
-    assertTrue(am.getRegionStates().isRegionOnline(REGIONINFO));
-    assertNotEquals(am.getRegionStates().getRegionServerOfRegion(REGIONINFO), SERVERNAME_A);
+      assertTrue(am.getRegionStates().isRegionOnline(REGIONINFO));
+      assertNotEquals(am.getRegionStates().getRegionServerOfRegion(REGIONINFO), SERVERNAME_A);
+    }
   }
 
   @Test
@@ -207,80 +211,82 @@ public class TestDrainingServer {
     bulk.put(REGIONINFO_D, SERVERNAME_D);
     bulk.put(REGIONINFO_E, SERVERNAME_E);
 
-    ZooKeeperWatcher zkWatcher = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
-        "zkWatcher-BulkAssignTest", abortable, true);
-
-    Mockito.when(server.getConfiguration()).thenReturn(conf);
-    Mockito.when(server.getServerName()).thenReturn(ServerName.valueOf("masterMock,1,1"));
-    Mockito.when(server.getZooKeeper()).thenReturn(zkWatcher);
-
-    CoordinatedStateManager cp = new ZkCoordinatedStateManager();
-    cp.initialize(server);
-    cp.start();
-
-    Mockito.when(server.getCoordinatedStateManager()).thenReturn(cp);
-
-    Mockito.when(serverManager.getOnlineServers()).thenReturn(onlineServers);
-    Mockito.when(serverManager.getOnlineServersList()).thenReturn(
-      new ArrayList<ServerName>(onlineServers.keySet()));
-
-    Mockito.when(serverManager.createDestinationServersList()).thenReturn(
-      new ArrayList<ServerName>(onlineServers.keySet()));
-    Mockito.when(serverManager.createDestinationServersList(null)).thenReturn(
-      new ArrayList<ServerName>(onlineServers.keySet()));
-    Mockito.when(serverManager.createDestinationServersList(Mockito.anyList())).thenReturn(
-        new ArrayList<ServerName>(onlineServers.keySet()));
-
-    for (Entry<HRegionInfo, ServerName> entry : bulk.entrySet()) {
-      Mockito.when(serverManager.isServerOnline(entry.getValue())).thenReturn(true);
-      Mockito.when(serverManager.sendRegionClose(entry.getValue(),
-        entry.getKey(), -1)).thenReturn(true);
-      Mockito.when(serverManager.sendRegionOpen(entry.getValue(),
-        entry.getKey(), -1, null)).thenReturn(RegionOpeningState.OPENED);
-      Mockito.when(serverManager.addServerToDrainList(entry.getValue())).thenReturn(true);
-    }
-
-    Mockito.when(master.getServerManager()).thenReturn(serverManager);
-
-    drainedServers.add(SERVERNAME_A);
-    drainedServers.add(SERVERNAME_B);
-    drainedServers.add(SERVERNAME_C);
-    drainedServers.add(SERVERNAME_D);
-
-    am = new AssignmentManager(server, serverManager,
-      balancer, startupMasterExecutor("mockExecutorServiceBulk"), null, null);
-
-    Mockito.when(master.getAssignmentManager()).thenReturn(am);
-
-    zkWatcher.registerListener(am);
-
-    for (ServerName drained : drainedServers) {
-      addServerToDrainedList(drained, onlineServers, serverManager);
-    }
-
-    am.assign(bulk);
-
-    Set<RegionState> regionsInTransition = am.getRegionStates().getRegionsInTransition();
-    for (RegionState rs : regionsInTransition) {
-      setRegionOpenedOnZK(zkWatcher, rs.getServerName(), rs.getRegion());
-    }
-
-    am.waitForAssignment(REGIONINFO_A);
-    am.waitForAssignment(REGIONINFO_B);
-    am.waitForAssignment(REGIONINFO_C);
-    am.waitForAssignment(REGIONINFO_D);
-    am.waitForAssignment(REGIONINFO_E);
-
-    Map<HRegionInfo, ServerName> regionAssignments = am.getRegionStates().getRegionAssignments();
-    for (Entry<HRegionInfo, ServerName> entry : regionAssignments.entrySet()) {
-      LOG.info("Region Assignment: "
-          + entry.getKey().getRegionNameAsString() + " Server: " + entry.getValue());
-      bunchServersAssigned.add(entry.getValue());
-    }
-
-    for (ServerName sn : drainedServers) {
-      assertFalse(bunchServersAssigned.contains(sn));
-    }
+   try (ZooKeeperWatcher zkWatcher = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
+        "zkWatcher-BulkAssignTest", abortable, true)) {
+
+     Mockito.when(server.getConfiguration()).thenReturn(conf);
+     Mockito.when(server.getServerName()).thenReturn(ServerName.valueOf("masterMock,1,1"));
+     Mockito.when(server.getZooKeeper()).thenReturn(zkWatcher);
+
+     CoordinatedStateManager cp = new ZkCoordinatedStateManager();
+     cp.initialize(server);
+     cp.start();
+
+     Mockito.when(server.getCoordinatedStateManager()).thenReturn(cp);
+
+     Mockito.when(serverManager.getOnlineServers()).thenReturn(onlineServers);
+     Mockito.when(serverManager.getOnlineServersList()).thenReturn(
+         new ArrayList<ServerName>(onlineServers.keySet()));
+
+     Mockito.when(serverManager.createDestinationServersList()).thenReturn(
+         new ArrayList<ServerName>(onlineServers.keySet()));
+     Mockito.when(serverManager.createDestinationServersList(null)).thenReturn(
+         new ArrayList<ServerName>(onlineServers.keySet()));
+     Mockito.when(serverManager.createDestinationServersList(Mockito.anyList())).thenReturn(
+         new ArrayList<ServerName>(onlineServers.keySet()));
+
+     for (Entry<HRegionInfo, ServerName> entry : bulk.entrySet()) {
+       Mockito.when(serverManager.isServerOnline(entry.getValue())).thenReturn(true);
+       Mockito.when(serverManager.sendRegionClose(entry.getValue(),
+           entry.getKey(), -1)).thenReturn(true);
+       Mockito.when(serverManager.sendRegionOpen(entry.getValue(),
+           entry.getKey(), -1, null)).thenReturn(RegionOpeningState.OPENED);
+       Mockito.when(serverManager.addServerToDrainList(entry.getValue())).thenReturn(true);
+     }
+
+     Mockito.when(master.getServerManager()).thenReturn(serverManager);
+
+     drainedServers.add(SERVERNAME_A);
+     drainedServers.add(SERVERNAME_B);
+     drainedServers.add(SERVERNAME_C);
+     drainedServers.add(SERVERNAME_D);
+
+     TableStateManager tsm = mock(TableStateManager.class);
+     am = new AssignmentManager(server, serverManager, balancer,
+         startupMasterExecutor("mockExecutorServiceBulk"), null, null, tsm);
+
+     Mockito.when(master.getAssignmentManager()).thenReturn(am);
+
+     zkWatcher.registerListener(am);
+
+     for (ServerName drained : drainedServers) {
+       addServerToDrainedList(drained, onlineServers, serverManager);
+     }
+
+     am.assign(bulk);
+
+     Set<RegionState> regionsInTransition = am.getRegionStates().getRegionsInTransition();
+     for (RegionState rs : regionsInTransition) {
+       setRegionOpenedOnZK(zkWatcher, rs.getServerName(), rs.getRegion());
+     }
+
+     am.waitForAssignment(REGIONINFO_A);
+     am.waitForAssignment(REGIONINFO_B);
+     am.waitForAssignment(REGIONINFO_C);
+     am.waitForAssignment(REGIONINFO_D);
+     am.waitForAssignment(REGIONINFO_E);
+
+     Map<HRegionInfo, ServerName> regionAssignments = am.getRegionStates().getRegionAssignments();
+     for (Entry<HRegionInfo, ServerName> entry : regionAssignments.entrySet()) {
+       LOG.info("Region Assignment: "
+           + entry.getKey().getRegionNameAsString() + " Server: " + entry.getValue());
+       bunchServersAssigned.add(entry.getValue());
+     }
+
+     for (ServerName sn : drainedServers) {
+       assertFalse(bunchServersAssigned.contains(sn));
+     }
+   }
   }
 
   private void addServerToDrainedList(ServerName serverName,
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
index f963461..9d5259a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
@@ -42,8 +42,8 @@ public class TestFSTableDescriptorForceCreation {
     Path rootdir = new Path(UTIL.getDataTestDir(), name);
     FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
-
-    assertTrue("Should create new table descriptor", fstd.createTableDescriptor(htd, false));
+    assertTrue("Should create new table descriptor",
+        fstd.createTableDescriptor(new TableDescriptor(htd), false));
   }
 
   @Test
@@ -56,7 +56,8 @@ public class TestFSTableDescriptorForceCreation {
     FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
     HTableDescriptor htd = new HTableDescriptor(name);
     fstd.add(htd);
-    assertFalse("Should not create new table descriptor", fstd.createTableDescriptor(htd, false));
+    assertFalse("Should not create new table descriptor",
+        fstd.createTableDescriptor(new TableDescriptor(htd), false));
   }
 
   @Test
@@ -67,9 +68,10 @@ public class TestFSTableDescriptorForceCreation {
     Path rootdir = new Path(UTIL.getDataTestDir(), name);
     FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
-    fstd.createTableDescriptor(htd, false);
+    TableDescriptor td = new TableDescriptor(htd);
+    fstd.createTableDescriptor(td, false);
     assertTrue("Should create new table descriptor",
-        fstd.createTableDescriptor(htd, true));
+        fstd.createTableDescriptor(td, true));
   }
 
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java
index 4660bbb..8d0e418 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java
@@ -160,8 +160,8 @@ public class TestHColumnDescriptorDefaultVersions {
     // Verify descriptor from HDFS
     MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
     Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
-    htd = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
-    hcds = htd.getColumnFamilies();
+    TableDescriptor td = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
+    hcds = td.getHTableDescriptor().getColumnFamilies();
     verifyHColumnDescriptor(expected, hcds, tableName, families);
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTableDescriptor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTableDescriptor.java
new file mode 100644
index 0000000..19c1136
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTableDescriptor.java
@@ -0,0 +1,57 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Test setting values in the descriptor
+ */
+@Category(SmallTests.class)
+public class TestTableDescriptor {
+  final static Log LOG = LogFactory.getLog(TestTableDescriptor.class);
+
+  @Test
+  public void testPb() throws DeserializationException, IOException {
+    HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC);
+    final int v = 123;
+    htd.setMaxFileSize(v);
+    htd.setDurability(Durability.ASYNC_WAL);
+    htd.setReadOnly(true);
+    htd.setRegionReplication(2);
+    TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED);
+    byte[] bytes = td.toByteArray();
+    TableDescriptor deserializedTd = TableDescriptor.parseFrom(bytes);
+    assertEquals(td, deserializedTd);
+    assertEquals(td.getHTableDescriptor(), deserializedTd.getHTableDescriptor());
+    assertEquals(td.getTableState(), deserializedTd.getTableState());
+  }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
index c0b32b8..0a99845 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.InvalidFamilyOperationException;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
@@ -49,11 +50,8 @@ import org.apache.hadoop.hbase.TableNotEnabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.exceptions.MergeRegionException;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -255,7 +253,7 @@ public class TestAdmin1 {
     this.admin.disableTable(ht.getName());
     assertTrue("Table must be disabled.", TEST_UTIL.getHBaseCluster()
         .getMaster().getAssignmentManager().getTableStateManager().isTableState(
-        ht.getName(), ZooKeeperProtos.Table.State.DISABLED));
+        ht.getName(), TableState.State.DISABLED));
 
     // Test that table is disabled
     get = new Get(row);
@@ -282,7 +280,7 @@ public class TestAdmin1 {
     this.admin.enableTable(table);
     assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster()
         .getMaster().getAssignmentManager().getTableStateManager().isTableState(
-        ht.getName(), ZooKeeperProtos.Table.State.ENABLED));
+        ht.getName(), TableState.State.ENABLED));
 
     // Test that table is enabled
     try {
@@ -354,7 +352,7 @@ public class TestAdmin1 {
     assertEquals(numTables + 1, tables.length);
     assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster()
         .getMaster().getAssignmentManager().getTableStateManager().isTableState(
-        TableName.valueOf("testCreateTable"), ZooKeeperProtos.Table.State.ENABLED));
+        TableName.valueOf("testCreateTable"), TableState.State.ENABLED));
   }
 
   @Test (timeout=300000)
@@ -1340,11 +1338,9 @@ public class TestAdmin1 {
 
   @Test (timeout=300000)
   public void testEnableDisableAddColumnDeleteColumn() throws Exception {
-	ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL);
     TableName tableName = TableName.valueOf("testEnableDisableAddColumnDeleteColumn");
     TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close();
-    while (!ZKTableStateClientSideReader.isEnabledTable(zkw,
-      TableName.valueOf("testEnableDisableAddColumnDeleteColumn"))) {
+    while (!this.admin.isTableEnabled(tableName)) {
       Thread.sleep(10);
     }
     this.admin.disableTable(tableName);
@@ -1487,16 +1483,4 @@ public class TestAdmin1 {
       this.admin.deleteTable(tableName);
     }
   }
-
-  @Test (timeout=30000)
-  public void testTableNotFoundException() throws Exception {
-    ZooKeeperWatcher zkw = TEST_UTIL.getZooKeeperWatcher();
-    TableName table = TableName.valueOf("tableNotExists");
-    try {
-      ZKTableStateClientSideReader.isDisabledTable(zkw, table);
-      fail("Shouldn't be here");
-    } catch (TableNotFoundException e) {
-      // This is expected.
-    }
-  }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index db26d37..6258f6d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -71,6 +71,11 @@ public class MockNoopMasterServices implements MasterServices, Server {
   }
 
   @Override
+  public TableStateManager getTableStateManager() {
+    return null;
+  }
+
+  @Override
   public MasterCoprocessorHost getMasterCoprocessorHost() {
     return null;
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
index 28f9e83..92c045f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@@ -69,7 +70,6 @@ import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -156,10 +156,9 @@ public class TestAssignmentManagerOnCluster {
           Bytes.toBytes(metaServerName.getServerName()));
         master.assignmentManager.waitUntilNoRegionsInTransition(60000);
       }
-      RegionState metaState =
-          MetaTableLocator.getMetaRegionState(master.getZooKeeper());
-        assertEquals("Meta should be not in transition",
-            metaState.getState(), RegionState.State.OPEN);
+      RegionState metaState = MetaTableLocator.getMetaRegionState(master.getZooKeeper());
+      assertEquals("Meta should be not in transition",
+          metaState.getState(), RegionState.State.OPEN);
       assertNotEquals("Meta should be moved off master",
         metaServerName, master.getServerName());
       cluster.killRegionServer(metaServerName);
@@ -289,7 +288,8 @@ public class TestAssignmentManagerOnCluster {
     String table = "testAssignRegionOnRestartedServer";
     TEST_UTIL.getMiniHBaseCluster().getConf().setInt("hbase.assignment.maximum.attempts", 20);
     TEST_UTIL.getMiniHBaseCluster().stopMaster(0);
-    TEST_UTIL.getMiniHBaseCluster().startMaster(); //restart the master so that conf take into affect
+    //restart the master so that conf take into affect
+    TEST_UTIL.getMiniHBaseCluster().startMaster();
 
     ServerName deadServer = null;
     HMaster master = null;
@@ -888,7 +888,7 @@ public class TestAssignmentManagerOnCluster {
         }
       }
 
-      am.getTableStateManager().setTableState(table, ZooKeeperProtos.Table.State.DISABLING);
+      am.getTableStateManager().setTableState(table, TableState.State.DISABLING);
       List<HRegionInfo> toAssignRegions = am.cleanOutCrashedServerReferences(destServerName);
       assertTrue("Regions to be assigned should be empty.", toAssignRegions.isEmpty());
       assertTrue("Regions to be assigned should be empty.", am.getRegionStates()
@@ -897,7 +897,7 @@ public class TestAssignmentManagerOnCluster {
       if (hri != null && serverName != null) {
         am.regionOnline(hri, serverName);
       }
-      am.getTableStateManager().setTableState(table, ZooKeeperProtos.Table.State.DISABLED);
+      am.getTableStateManager().setTableState(table, TableState.State.DISABLED);
       TEST_UTIL.deleteTable(table);
     }
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
index 397d5a8..6b499f2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
@@ -41,6 +41,8 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.ChoreService;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
+import org.apache.hadoop.hbase.TableDescriptor;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
@@ -54,13 +56,13 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.TableDescriptors;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
 import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination;
 import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination.SplitLogManagerDetails;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator;
@@ -352,13 +354,18 @@ public class TestCatalogJanitor {
       return new TableDescriptors() {
         @Override
         public HTableDescriptor remove(TableName tablename) throws IOException {
-          // TODO Auto-generated method stub
+          // noop
           return null;
         }
 
         @Override
         public Map<String, HTableDescriptor> getAll() throws IOException {
-          // TODO Auto-generated method stub
+          // noop
+          return null;
+        }
+
+        @Override public Map<String, TableDescriptor> getAllDescriptors() throws IOException {
+          // noop
           return null;
         }
 
@@ -369,14 +376,24 @@ public class TestCatalogJanitor {
         }
 
         @Override
+        public TableDescriptor getDescriptor(TableName tablename)
+            throws IOException {
+          return createTableDescriptor();
+        }
+
+        @Override
         public Map<String, HTableDescriptor> getByNamespace(String name) throws IOException {
           return null;
         }
 
         @Override
         public void add(HTableDescriptor htd) throws IOException {
-          // TODO Auto-generated method stub
+          // noop
+        }
 
+        @Override
+        public void add(TableDescriptor htd) throws IOException {
+          // noop
         }
         @Override
         public void setCacheOn() throws IOException {
@@ -541,6 +558,11 @@ public class TestCatalogJanitor {
     }
 
     @Override
+    public TableStateManager getTableStateManager() {
+      return null;
+    }
+
+    @Override
     public void dispatchMergingRegions(HRegionInfo region_a, HRegionInfo region_b,
         boolean forcible, User user) throws IOException {
     }
@@ -1169,6 +1191,11 @@ public class TestCatalogJanitor {
     return htd;
   }
 
+  private TableDescriptor createTableDescriptor() {
+    TableDescriptor htd = new TableDescriptor(createHTableDescriptor(), TableState.State.ENABLED);
+    return htd;
+  }
+
   private MultiResponse buildMultiResponse(MultiRequest req) {
     MultiResponse.Builder builder = MultiResponse.newBuilder();
     RegionActionResult.Builder regionActionResultBuilder =
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
index 34715aa..80e05e00 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
@@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.UnknownRegionException;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.util.StringUtils;
@@ -84,7 +84,7 @@ public class TestMaster {
 
     try (HTable ht = TEST_UTIL.createTable(TABLENAME, FAMILYNAME)) {
       assertTrue(m.assignmentManager.getTableStateManager().isTableState(TABLENAME,
-        ZooKeeperProtos.Table.State.ENABLED));
+          TableState.State.ENABLED));
       TEST_UTIL.loadTable(ht, FAMILYNAME, false);
     }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
index 2228188..fcbe0a6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
@@ -43,20 +43,19 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.RegionTransition;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.RequestConverter;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
@@ -71,10 +70,8 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
-import org.apache.hadoop.hbase.zookeeper.ZKTableStateManager;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.data.Stat;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -302,8 +299,8 @@ public class TestMasterFailover {
     log("Beginning to mock scenarios");
 
     // Disable the disabledTable in ZK
-    TableStateManager zktable = new ZKTableStateManager(zkw);
-    zktable.setTableState(disabledTable, ZooKeeperProtos.Table.State.DISABLED);
+    TableStateManager tsm = master.getTableStateManager();
+    tsm.setTableState(disabledTable, TableState.State.DISABLED);
 
     /*
      *  ZK = OFFLINE
@@ -619,7 +616,7 @@ public class TestMasterFailover {
 
     assertTrue(" Table must be enabled.", master.getAssignmentManager()
         .getTableStateManager().isTableState(TableName.valueOf("enabledTable"),
-        ZooKeeperProtos.Table.State.ENABLED));
+        TableState.State.ENABLED));
     // we also need regions assigned out on the dead server
     List<HRegionInfo> enabledAndOnDeadRegions = new ArrayList<HRegionInfo>();
     enabledAndOnDeadRegions.addAll(enabledRegions.subList(0, 6));
@@ -679,13 +676,11 @@ public class TestMasterFailover {
     log("Beginning to mock scenarios");
 
     // Disable the disabledTable in ZK
-    TableStateManager zktable = new ZKTableStateManager(zkw);
-    zktable.setTableState(disabledTable, ZooKeeperProtos.Table.State.DISABLED);
+    TableStateManager tsm = master.getTableStateManager();
+    tsm.setTableState(disabledTable, TableState.State.DISABLED);
 
     assertTrue(" The enabled table should be identified on master fail over.",
-        zktable.isTableState(TableName.valueOf("enabledTable"),
-          ZooKeeperProtos.Table.State.ENABLED));
-
+        tsm.isTableState(TableName.valueOf("enabledTable"), TableState.State.ENABLED));
     /*
      * ZK = CLOSING
      */
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java
index a2ecfb4..5af7b47 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
@@ -102,8 +102,8 @@ public class TestMasterRestartAfterDisablingTable {
 
     assertTrue("The table should not be in enabled state", cluster.getMaster()
         .getAssignmentManager().getTableStateManager().isTableState(
-        TableName.valueOf("tableRestart"), ZooKeeperProtos.Table.State.DISABLED,
-        ZooKeeperProtos.Table.State.DISABLING));
+        TableName.valueOf("tableRestart"), TableState.State.DISABLED,
+        TableState.State.DISABLING));
     log("Enabling table\n");
     // Need a new Admin, the previous one is on the old master
     Admin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
@@ -118,7 +118,7 @@ public class TestMasterRestartAfterDisablingTable {
           6, regions.size());
     assertTrue("The table should be in enabled state", cluster.getMaster()
         .getAssignmentManager().getTableStateManager()
-        .isTableState(TableName.valueOf("tableRestart"), ZooKeeperProtos.Table.State.ENABLED));
+        .isTableState(TableName.valueOf("tableRestart"), TableState.State.ENABLED));
     ht.close();
     TEST_UTIL.shutdownMiniCluster();
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java
index 9ecac42..c1affd5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java
@@ -42,9 +42,9 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.MockServer;
 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
-import org.apache.hadoop.hbase.zookeeper.ZKTableStateManager;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
@@ -140,7 +140,10 @@ public class TestOpenedRegionHandler {
       // create a node with OPENED state
       zkw = HBaseTestingUtility.createAndForceNodeToOpenedState(TEST_UTIL,
           region, server.getServerName());
-      when(am.getTableStateManager()).thenReturn(new ZKTableStateManager(zkw));
+      MasterServices masterServices = Mockito.mock(MasterServices.class);
+      when(masterServices.getTableDescriptors()).thenReturn(new FSTableDescriptors(conf));
+      TableStateManager tsm = new TableStateManager(masterServices);
+      when(am.getTableStateManager()).thenReturn(tsm);
       Stat stat = new Stat();
       String nodeName = ZKAssign.getNodeName(zkw, region.getRegionInfo()
           .getEncodedName());
@@ -171,8 +174,8 @@ public class TestOpenedRegionHandler {
       } catch (Exception e) {
         expectedException = true;
       }
-      assertFalse("The process method should not throw any exception.",
-          expectedException);
+      assertFalse("The process method should not throw any exception. "
+          , expectedException);
       List<String> znodes = ZKUtil.listChildrenAndWatchForNewChildren(zkw,
           zkw.assignmentZNode);
       String regionName = znodes.get(0);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionStates.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionStates.java
index 0410294..a35e359 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionStates.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionStates.java
@@ -19,10 +19,8 @@ package org.apache.hadoop.hbase.master;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
index 16a6450..7e5656b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
@@ -36,7 +36,6 @@ import java.util.concurrent.Future;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ChoreService;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -53,7 +52,6 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver;
 import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.exceptions.LockTimeoutException;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -388,12 +386,14 @@ public class TestTableLockManager {
     choreService.scheduleChore(alterThread);
     choreService.scheduleChore(splitThread);
     TEST_UTIL.waitTableEnabled(tableName);
+
     while (true) {
       List<HRegionInfo> regions = admin.getTableRegions(tableName);
       LOG.info(String.format("Table #regions: %d regions: %s:", regions.size(), regions));
       assertEquals(admin.getTableDescriptor(tableName), desc);
       for (HRegion region : TEST_UTIL.getMiniHBaseCluster().getRegions(tableName)) {
-        assertEquals(desc, region.getTableDesc());
+        HTableDescriptor regionTableDesc = region.getTableDesc();
+        assertEquals(desc, regionTableDesc);
       }
       if (regions.size() >= 5) {
         break;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
index ff479d4..86a54e5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableStateManager;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.client.BufferedMutator;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Durability;
@@ -45,9 +45,9 @@ import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
 import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.TableStateManager;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.util.ModifyRegionUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -188,13 +188,13 @@ public class MasterProcedureTestingUtility {
   public static void validateTableIsEnabled(final HMaster master, final TableName tableName)
       throws IOException {
     TableStateManager tsm = master.getAssignmentManager().getTableStateManager();
-    assertTrue(tsm.isTableState(tableName, ZooKeeperProtos.Table.State.ENABLED));
+    assertTrue(tsm.isTableState(tableName, TableState.State.ENABLED));
   }
 
   public static void validateTableIsDisabled(final HMaster master, final TableName tableName)
       throws IOException {
     TableStateManager tsm = master.getAssignmentManager().getTableStateManager();
-    assertTrue(tsm.isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED));
+    assertTrue(tsm.isTableState(tableName, TableState.State.DISABLED));
   }
 
   /**
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure2.java
index f27150e..c4ec0ac 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure2.java
@@ -23,10 +23,10 @@ import static org.junit.Assert.assertTrue;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -45,6 +45,8 @@ public class TestCreateTableProcedure2 {
     TEST_UTIL.shutdownMiniZKCluster();
   }
 
+  /*
+  Note: Relevant fix was undone by HBASE-7767.
   @Test
   public void testMasterRestartAfterNameSpaceEnablingNodeIsCreated() throws Exception {
     // Step 1: start mini zk cluster.
@@ -54,8 +56,9 @@ public class TestCreateTableProcedure2 {
     TableName tableName = TableName.valueOf("hbase:namespace");
     ZooKeeperWatcher zkw = TEST_UTIL.getZooKeeperWatcher();
     String znode = ZKUtil.joinZNode(zkw.tableZNode, tableName.getNameAsString());
-    ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder();
-    builder.setState(ZooKeeperProtos.Table.State.ENABLED);
+    HBaseProtos.TableState.Builder builder = HBaseProtos.TableState.newBuilder();
+    builder.setState(HBaseProtos.TableState.State.ENABLED);
+    builder.setTable(ProtobufUtil.toProtoTableName(tableName));
     byte [] data = ProtobufUtil.prependPBMagic(builder.build().toByteArray());
     ZKUtil.createSetData(zkw, znode, data);
     LOG.info("Create an orphaned Znode " + znode);
@@ -65,4 +68,5 @@ public class TestCreateTableProcedure2 {
     TEST_UTIL.startMiniCluster();
     assertTrue(TEST_UTIL.getHBaseCluster().getLiveMasterThreads().size() == 1);
   }
+  */
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java
index 0b5e83f..d849f02 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.InvalidFamilyOperationException;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
@@ -273,8 +274,9 @@ public class TestTableDescriptorModificationFromClient {
     // Verify descriptor from HDFS
     MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
     Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
-    htd = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
... 374 lines suppressed ...


[hbase] 04/09: HBASE-23304: RPCs needed for client meta information lookup (#904)

Posted by bh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

bharathv pushed a commit to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 18200b09e233107e31ab73fc2e215a6e793a0ff5
Author: Bharath Vissapragada <bh...@apache.org>
AuthorDate: Thu Dec 19 11:29:25 2019 -0800

    HBASE-23304: RPCs needed for client meta information lookup (#904)
    
    * HBASE-23304: RPCs needed for client meta information lookup
    
    This patch implements the RPCs needed for the meta information
    lookup during connection init. New tests added to cover the RPC
    code paths. HBASE-23305 builds on this to implement the client
    side logic.
    
    Fixed a bunch of checkstyle nits around the places the patch
    touches.
    
    Signed-off-by: Andrew Purtell <ap...@apache.org>
    (cherry picked from commit 4f8fbba0c01742f17fa2d85a4b944d7f42b7c2b1)
    (cherry picked from commit 488460e84015647840afca8d02ab279653b11dee)
---
 .../apache/hadoop/hbase/protobuf/ProtobufUtil.java |   31 +-
 .../hadoop/hbase/zookeeper/ZooKeeperWatcher.java   |    4 +-
 .../hbase/protobuf/generated/HBaseProtos.java      |  902 ++++-
 .../hbase/protobuf/generated/MasterProtos.java     | 3988 ++++++++++++++++++--
 hbase-protocol/src/main/protobuf/HBase.proto       |    6 +
 hbase-protocol/src/main/protobuf/Master.proto      |   44 +
 .../hadoop/hbase/master/MasterRpcServices.java     |   49 +-
 .../hbase/master/MetaRegionLocationCache.java      |    2 +-
 .../hbase/client/TestMetaRegionLocationCache.java  |    5 +-
 .../hbase/master/TestClientMetaServiceRPCs.java    |  158 +
 .../hadoop/hbase/protobuf/TestProtobufUtil.java    |    3 +-
 11 files changed, 4943 insertions(+), 249 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 74319d6..240133a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -68,6 +68,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
@@ -3703,15 +3704,17 @@ public final class ProtobufUtil {
   /**
    * Get a ServerName from the passed in data bytes.
    * @param data Data with a serialize server name in it; can handle the old style
-   * servername where servername was host and port.  Works too with data that
-   * begins w/ the pb 'PBUF' magic and that is then followed by a protobuf that
-   * has a serialized {@link ServerName} in it.
+   *   servername where servername was host and port.  Works too with data that
+   *   begins w/ the pb 'PBUF' magic and that is then followed by a protobuf that
+   *   has a serialized {@link ServerName} in it.
    * @return Returns null if <code>data</code> is null else converts passed data
-   * to a ServerName instance.
-   * @throws DeserializationException
+   *   to a ServerName instance.
+   * @throws DeserializationException when data cannot be de-serialized as expected.
    */
   public static ServerName parseServerNameFrom(final byte [] data) throws DeserializationException {
-    if (data == null || data.length <= 0) return null;
+    if (data == null || data.length <= 0) {
+      return null;
+    }
     if (isPBMagicPrefix(data)) {
       int prefixLen = lengthOfPBMagic();
       try {
@@ -3743,4 +3746,20 @@ public final class ProtobufUtil {
     int port = Addressing.parsePort(str);
     return ServerName.valueOf(hostname, port, -1L);
   }
+
+  public static HBaseProtos.RegionLocation toRegionLocation(HRegionLocation loc) {
+    HBaseProtos.RegionLocation.Builder builder = HBaseProtos.RegionLocation.newBuilder();
+    builder.setRegionInfo(HRegionInfo.convert(loc.getRegionInfo()));
+    if (loc.getServerName() != null) {
+      builder.setServerName(toServerName(loc.getServerName()));
+    }
+    builder.setSeqNum(loc.getSeqNum());
+    return builder.build();
+  }
+
+  public static HRegionLocation toRegionLocation(HBaseProtos.RegionLocation proto) {
+    org.apache.hadoop.hbase.HRegionInfo regionInfo = HRegionInfo.convert(proto.getRegionInfo());
+    ServerName serverName = proto.hasServerName() ? toServerName(proto.getServerName()) : null;
+    return new HRegionLocation(regionInfo, serverName, proto.getSeqNum());
+  }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
index 51b14c8..b180fb9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
@@ -589,7 +589,9 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
    */
   public int getMetaReplicaIdFromZnode(String znode) {
     String pattern = conf.get("zookeeper.znode.metaserver","meta-region-server");
-    if (znode.equals(pattern)) return DEFAULT_REPLICA_ID;
+    if (znode.equals(pattern)) {
+      return DEFAULT_REPLICA_ID;
+    }
     // the non-default replicas are of the pattern meta-region-server-<replicaId>
     String nonDefaultPattern = pattern + "-";
     return Integer.parseInt(znode.substring(nonDefaultPattern.length()));
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
index d5a7150..82fcb61 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
@@ -18680,6 +18680,878 @@ public final class HBaseProtos {
     // @@protoc_insertion_point(class_scope:hbase.pb.SnapshotDescription)
   }
 
+  public interface RegionLocationOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required .hbase.pb.RegionInfo region_info = 1;
+    /**
+     * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+     */
+    boolean hasRegionInfo();
+    /**
+     * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo();
+    /**
+     * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder();
+
+    // optional .hbase.pb.ServerName server_name = 2;
+    /**
+     * <code>optional .hbase.pb.ServerName server_name = 2;</code>
+     */
+    boolean hasServerName();
+    /**
+     * <code>optional .hbase.pb.ServerName server_name = 2;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName();
+    /**
+     * <code>optional .hbase.pb.ServerName server_name = 2;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder();
+
+    // required int64 seq_num = 3;
+    /**
+     * <code>required int64 seq_num = 3;</code>
+     */
+    boolean hasSeqNum();
+    /**
+     * <code>required int64 seq_num = 3;</code>
+     */
+    long getSeqNum();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.RegionLocation}
+   */
+  public static final class RegionLocation extends
+      com.google.protobuf.GeneratedMessage
+      implements RegionLocationOrBuilder {
+    // Use RegionLocation.newBuilder() to construct.
+    private RegionLocation(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private RegionLocation(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final RegionLocation defaultInstance;
+    public static RegionLocation getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public RegionLocation getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private RegionLocation(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000001) == 0x00000001)) {
+                subBuilder = regionInfo_.toBuilder();
+              }
+              regionInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(regionInfo_);
+                regionInfo_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000001;
+              break;
+            }
+            case 18: {
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000002) == 0x00000002)) {
+                subBuilder = serverName_.toBuilder();
+              }
+              serverName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(serverName_);
+                serverName_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000002;
+              break;
+            }
+            case 24: {
+              bitField0_ |= 0x00000004;
+              seqNum_ = input.readInt64();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionLocation_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionLocation_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<RegionLocation> PARSER =
+        new com.google.protobuf.AbstractParser<RegionLocation>() {
+      public RegionLocation parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new RegionLocation(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<RegionLocation> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required .hbase.pb.RegionInfo region_info = 1;
+    public static final int REGION_INFO_FIELD_NUMBER = 1;
+    private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo regionInfo_;
+    /**
+     * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+     */
+    public boolean hasRegionInfo() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() {
+      return regionInfo_;
+    }
+    /**
+     * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() {
+      return regionInfo_;
+    }
+
+    // optional .hbase.pb.ServerName server_name = 2;
+    public static final int SERVER_NAME_FIELD_NUMBER = 2;
+    private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_;
+    /**
+     * <code>optional .hbase.pb.ServerName server_name = 2;</code>
+     */
+    public boolean hasServerName() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>optional .hbase.pb.ServerName server_name = 2;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() {
+      return serverName_;
+    }
+    /**
+     * <code>optional .hbase.pb.ServerName server_name = 2;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() {
+      return serverName_;
+    }
+
+    // required int64 seq_num = 3;
+    public static final int SEQ_NUM_FIELD_NUMBER = 3;
+    private long seqNum_;
+    /**
+     * <code>required int64 seq_num = 3;</code>
+     */
+    public boolean hasSeqNum() {
+      return ((bitField0_ & 0x00000004) == 0x00000004);
+    }
+    /**
+     * <code>required int64 seq_num = 3;</code>
+     */
+    public long getSeqNum() {
+      return seqNum_;
+    }
+
+    private void initFields() {
+      regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
+      serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+      seqNum_ = 0L;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasRegionInfo()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasSeqNum()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!getRegionInfo().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (hasServerName()) {
+        if (!getServerName().isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeMessage(1, regionInfo_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeMessage(2, serverName_);
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        output.writeInt64(3, seqNum_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, regionInfo_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(2, serverName_);
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeInt64Size(3, seqNum_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation) obj;
+
+      boolean result = true;
+      result = result && (hasRegionInfo() == other.hasRegionInfo());
+      if (hasRegionInfo()) {
+        result = result && getRegionInfo()
+            .equals(other.getRegionInfo());
+      }
+      result = result && (hasServerName() == other.hasServerName());
+      if (hasServerName()) {
+        result = result && getServerName()
+            .equals(other.getServerName());
+      }
+      result = result && (hasSeqNum() == other.hasSeqNum());
+      if (hasSeqNum()) {
+        result = result && (getSeqNum()
+            == other.getSeqNum());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasRegionInfo()) {
+        hash = (37 * hash) + REGION_INFO_FIELD_NUMBER;
+        hash = (53 * hash) + getRegionInfo().hashCode();
+      }
+      if (hasServerName()) {
+        hash = (37 * hash) + SERVER_NAME_FIELD_NUMBER;
+        hash = (53 * hash) + getServerName().hashCode();
+      }
+      if (hasSeqNum()) {
+        hash = (37 * hash) + SEQ_NUM_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getSeqNum());
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.RegionLocation}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionLocation_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionLocation_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getRegionInfoFieldBuilder();
+          getServerNameFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        if (regionInfoBuilder_ == null) {
+          regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
+        } else {
+          regionInfoBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        if (serverNameBuilder_ == null) {
+          serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+        } else {
+          serverNameBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000002);
+        seqNum_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000004);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_RegionLocation_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation build() {
+        org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        if (regionInfoBuilder_ == null) {
+          result.regionInfo_ = regionInfo_;
+        } else {
+          result.regionInfo_ = regionInfoBuilder_.build();
+        }
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        if (serverNameBuilder_ == null) {
+          result.serverName_ = serverName_;
+        } else {
+          result.serverName_ = serverNameBuilder_.build();
+        }
+        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+          to_bitField0_ |= 0x00000004;
+        }
+        result.seqNum_ = seqNum_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.getDefaultInstance()) return this;
+        if (other.hasRegionInfo()) {
+          mergeRegionInfo(other.getRegionInfo());
+        }
+        if (other.hasServerName()) {
+          mergeServerName(other.getServerName());
+        }
+        if (other.hasSeqNum()) {
+          setSeqNum(other.getSeqNum());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasRegionInfo()) {
+          
+          return false;
+        }
+        if (!hasSeqNum()) {
+          
+          return false;
+        }
+        if (!getRegionInfo().isInitialized()) {
+          
+          return false;
+        }
+        if (hasServerName()) {
+          if (!getServerName().isInitialized()) {
+            
+            return false;
+          }
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required .hbase.pb.RegionInfo region_info = 1;
+      private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_;
+      /**
+       * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+       */
+      public boolean hasRegionInfo() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() {
+        if (regionInfoBuilder_ == null) {
+          return regionInfo_;
+        } else {
+          return regionInfoBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+       */
+      public Builder setRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
+        if (regionInfoBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          regionInfo_ = value;
+          onChanged();
+        } else {
+          regionInfoBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+       */
+      public Builder setRegionInfo(
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+        if (regionInfoBuilder_ == null) {
+          regionInfo_ = builderForValue.build();
+          onChanged();
+        } else {
+          regionInfoBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+       */
+      public Builder mergeRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
+        if (regionInfoBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001) &&
+              regionInfo_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) {
+            regionInfo_ =
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(regionInfo_).mergeFrom(value).buildPartial();
+          } else {
+            regionInfo_ = value;
+          }
+          onChanged();
+        } else {
+          regionInfoBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+       */
+      public Builder clearRegionInfo() {
+        if (regionInfoBuilder_ == null) {
+          regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
+          onChanged();
+        } else {
+          regionInfoBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder() {
+        bitField0_ |= 0x00000001;
+        onChanged();
+        return getRegionInfoFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() {
+        if (regionInfoBuilder_ != null) {
+          return regionInfoBuilder_.getMessageOrBuilder();
+        } else {
+          return regionInfo_;
+        }
+      }
+      /**
+       * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> 
+          getRegionInfoFieldBuilder() {
+        if (regionInfoBuilder_ == null) {
+          regionInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>(
+                  regionInfo_,
+                  getParentForChildren(),
+                  isClean());
+          regionInfo_ = null;
+        }
+        return regionInfoBuilder_;
+      }
+
+      // optional .hbase.pb.ServerName server_name = 2;
+      private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_;
+      /**
+       * <code>optional .hbase.pb.ServerName server_name = 2;</code>
+       */
+      public boolean hasServerName() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>optional .hbase.pb.ServerName server_name = 2;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() {
+        if (serverNameBuilder_ == null) {
+          return serverName_;
+        } else {
+          return serverNameBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>optional .hbase.pb.ServerName server_name = 2;</code>
+       */
+      public Builder setServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
+        if (serverNameBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          serverName_ = value;
+          onChanged();
+        } else {
+          serverNameBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.ServerName server_name = 2;</code>
+       */
+      public Builder setServerName(
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
+        if (serverNameBuilder_ == null) {
+          serverName_ = builderForValue.build();
+          onChanged();
+        } else {
+          serverNameBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.ServerName server_name = 2;</code>
+       */
+      public Builder mergeServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
+        if (serverNameBuilder_ == null) {
+          if (((bitField0_ & 0x00000002) == 0x00000002) &&
+              serverName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) {
+            serverName_ =
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(serverName_).mergeFrom(value).buildPartial();
+          } else {
+            serverName_ = value;
+          }
+          onChanged();
+        } else {
+          serverNameBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.ServerName server_name = 2;</code>
+       */
+      public Builder clearServerName() {
+        if (serverNameBuilder_ == null) {
+          serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+          onChanged();
+        } else {
+          serverNameBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.ServerName server_name = 2;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder() {
+        bitField0_ |= 0x00000002;
+        onChanged();
+        return getServerNameFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>optional .hbase.pb.ServerName server_name = 2;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() {
+        if (serverNameBuilder_ != null) {
+          return serverNameBuilder_.getMessageOrBuilder();
+        } else {
+          return serverName_;
+        }
+      }
+      /**
+       * <code>optional .hbase.pb.ServerName server_name = 2;</code>
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
+          getServerNameFieldBuilder() {
+        if (serverNameBuilder_ == null) {
+          serverNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
+                  serverName_,
+                  getParentForChildren(),
+                  isClean());
+          serverName_ = null;
+        }
+        return serverNameBuilder_;
+      }
+
+      // required int64 seq_num = 3;
+      private long seqNum_ ;
+      /**
+       * <code>required int64 seq_num = 3;</code>
+       */
+      public boolean hasSeqNum() {
+        return ((bitField0_ & 0x00000004) == 0x00000004);
+      }
+      /**
+       * <code>required int64 seq_num = 3;</code>
+       */
+      public long getSeqNum() {
+        return seqNum_;
+      }
+      /**
+       * <code>required int64 seq_num = 3;</code>
+       */
+      public Builder setSeqNum(long value) {
+        bitField0_ |= 0x00000004;
+        seqNum_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required int64 seq_num = 3;</code>
+       */
+      public Builder clearSeqNum() {
+        bitField0_ = (bitField0_ & ~0x00000004);
+        seqNum_ = 0L;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.RegionLocation)
+    }
+
+    static {
+      defaultInstance = new RegionLocation(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.RegionLocation)
+  }
+
   private static com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_TableSchema_descriptor;
   private static
@@ -18795,6 +19667,11 @@ public final class HBaseProtos {
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_hbase_pb_SnapshotDescription_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_RegionLocation_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_RegionLocation_fieldAccessorTable;
 
   public static com.google.protobuf.Descriptors.FileDescriptor
       getDescriptor() {
@@ -18857,14 +19734,17 @@ public final class HBaseProtos {
       "owner\030\006 \001(\t\022<\n\025users_and_permissions\030\007 \001" +
       "(\0132\035.hbase.pb.UsersAndPermissions\022\016\n\003ttl" +
       "\030\010 \001(\003:\0010\".\n\004Type\022\014\n\010DISABLED\020\000\022\t\n\005FLUSH" +
-      "\020\001\022\r\n\tSKIPFLUSH\020\002*r\n\013CompareType\022\010\n\004LESS" +
-      "\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_" +
-      "EQUAL\020\003\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007GREATER" +
-      "\020\005\022\t\n\005NO_OP\020\006*n\n\010TimeUnit\022\017\n\013NANOSECONDS" +
-      "\020\001\022\020\n\014MICROSECONDS\020\002\022\020\n\014MILLISECONDS\020\003\022\013" +
-      "\n\007SECONDS\020\004\022\013\n\007MINUTES\020\005\022\t\n\005HOURS\020\006\022\010\n\004D" +
-      "AYS\020\007B>\n*org.apache.hadoop.hbase.protobu",
-      "f.generatedB\013HBaseProtosH\001\240\001\001"
+      "\020\001\022\r\n\tSKIPFLUSH\020\002\"w\n\016RegionLocation\022)\n\013r" +
+      "egion_info\030\001 \002(\0132\024.hbase.pb.RegionInfo\022)" +
+      "\n\013server_name\030\002 \001(\0132\024.hbase.pb.ServerNam" +
+      "e\022\017\n\007seq_num\030\003 \002(\003*r\n\013CompareType\022\010\n\004LES" +
+      "S\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT" +
+      "_EQUAL\020\003\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007GREATE" +
+      "R\020\005\022\t\n\005NO_OP\020\006*n\n\010TimeUnit\022\017\n\013NANOSECOND",
+      "S\020\001\022\020\n\014MICROSECONDS\020\002\022\020\n\014MILLISECONDS\020\003\022" +
+      "\013\n\007SECONDS\020\004\022\013\n\007MINUTES\020\005\022\t\n\005HOURS\020\006\022\010\n\004" +
+      "DAYS\020\007B>\n*org.apache.hadoop.hbase.protob" +
+      "uf.generatedB\013HBaseProtosH\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -19009,6 +19889,12 @@ public final class HBaseProtos {
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SnapshotDescription_descriptor,
               new java.lang.String[] { "Name", "Table", "CreationTime", "Type", "Version", "Owner", "UsersAndPermissions", "Ttl", });
+          internal_static_hbase_pb_RegionLocation_descriptor =
+            getDescriptor().getMessageTypes().get(23);
+          internal_static_hbase_pb_RegionLocation_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_RegionLocation_descriptor,
+              new java.lang.String[] { "RegionInfo", "ServerName", "SeqNum", });
           return null;
         }
       };
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index d6eeb55..76cbbe9 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -64697,6 +64697,3051 @@ public final class MasterProtos {
     // @@protoc_insertion_point(class_scope:hbase.pb.IsSnapshotCleanupEnabledResponse)
   }
 
+  public interface GetClusterIdRequestOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+  }
+  /**
+   * Protobuf type {@code hbase.pb.GetClusterIdRequest}
+   *
+   * <pre>
+   ** Request and response to get the clusterID for this cluster 
+   * </pre>
+   */
+  public static final class GetClusterIdRequest extends
+      com.google.protobuf.GeneratedMessage
+      implements GetClusterIdRequestOrBuilder {
+    // Use GetClusterIdRequest.newBuilder() to construct.
+    private GetClusterIdRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private GetClusterIdRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final GetClusterIdRequest defaultInstance;
+    public static GetClusterIdRequest getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public GetClusterIdRequest getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private GetClusterIdRequest(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdRequest_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdRequest_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<GetClusterIdRequest> PARSER =
+        new com.google.protobuf.AbstractParser<GetClusterIdRequest>() {
+      public GetClusterIdRequest parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new GetClusterIdRequest(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<GetClusterIdRequest> getParserForType() {
+      return PARSER;
+    }
+
+    private void initFields() {
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest) obj;
+
+      boolean result = true;
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.GetClusterIdRequest}
+     *
+     * <pre>
+     ** Request and response to get the clusterID for this cluster 
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequestOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdRequest_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdRequest_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdRequest_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest build() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest(this);
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.getDefaultInstance()) return this;
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.GetClusterIdRequest)
+    }
+
+    static {
+      defaultInstance = new GetClusterIdRequest(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.GetClusterIdRequest)
+  }
+
+  public interface GetClusterIdResponseOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // optional string cluster_id = 1;
+    /**
+     * <code>optional string cluster_id = 1;</code>
+     *
+     * <pre>
+     ** Not set if cluster ID could not be determined. 
+     * </pre>
+     */
+    boolean hasClusterId();
+    /**
+     * <code>optional string cluster_id = 1;</code>
+     *
+     * <pre>
+     ** Not set if cluster ID could not be determined. 
+     * </pre>
+     */
+    java.lang.String getClusterId();
+    /**
+     * <code>optional string cluster_id = 1;</code>
+     *
+     * <pre>
+     ** Not set if cluster ID could not be determined. 
+     * </pre>
+     */
+    com.google.protobuf.ByteString
+        getClusterIdBytes();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.GetClusterIdResponse}
+   */
+  public static final class GetClusterIdResponse extends
+      com.google.protobuf.GeneratedMessage
+      implements GetClusterIdResponseOrBuilder {
+    // Use GetClusterIdResponse.newBuilder() to construct.
+    private GetClusterIdResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private GetClusterIdResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final GetClusterIdResponse defaultInstance;
+    public static GetClusterIdResponse getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public GetClusterIdResponse getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private GetClusterIdResponse(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              bitField0_ |= 0x00000001;
+              clusterId_ = input.readBytes();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdResponse_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdResponse_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<GetClusterIdResponse> PARSER =
+        new com.google.protobuf.AbstractParser<GetClusterIdResponse>() {
+      public GetClusterIdResponse parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new GetClusterIdResponse(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<GetClusterIdResponse> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // optional string cluster_id = 1;
+    public static final int CLUSTER_ID_FIELD_NUMBER = 1;
+    private java.lang.Object clusterId_;
+    /**
+     * <code>optional string cluster_id = 1;</code>
+     *
+     * <pre>
+     ** Not set if cluster ID could not be determined. 
+     * </pre>
+     */
+    public boolean hasClusterId() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>optional string cluster_id = 1;</code>
+     *
+     * <pre>
+     ** Not set if cluster ID could not be determined. 
+     * </pre>
+     */
+    public java.lang.String getClusterId() {
+      java.lang.Object ref = clusterId_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          clusterId_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>optional string cluster_id = 1;</code>
+     *
+     * <pre>
+     ** Not set if cluster ID could not be determined. 
+     * </pre>
+     */
+    public com.google.protobuf.ByteString
+        getClusterIdBytes() {
+      java.lang.Object ref = clusterId_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        clusterId_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    private void initFields() {
+      clusterId_ = "";
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBytes(1, getClusterIdBytes());
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(1, getClusterIdBytes());
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse) obj;
+
+      boolean result = true;
+      result = result && (hasClusterId() == other.hasClusterId());
+      if (hasClusterId()) {
+        result = result && getClusterId()
+            .equals(other.getClusterId());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasClusterId()) {
+        hash = (37 * hash) + CLUSTER_ID_FIELD_NUMBER;
+        hash = (53 * hash) + getClusterId().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.GetClusterIdResponse}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponseOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdResponse_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdResponse_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        clusterId_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetClusterIdResponse_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse build() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.clusterId_ = clusterId_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.getDefaultInstance()) return this;
+        if (other.hasClusterId()) {
+          bitField0_ |= 0x00000001;
+          clusterId_ = other.clusterId_;
+          onChanged();
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // optional string cluster_id = 1;
+      private java.lang.Object clusterId_ = "";
+      /**
+       * <code>optional string cluster_id = 1;</code>
+       *
+       * <pre>
+       ** Not set if cluster ID could not be determined. 
+       * </pre>
+       */
+      public boolean hasClusterId() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>optional string cluster_id = 1;</code>
+       *
+       * <pre>
+       ** Not set if cluster ID could not be determined. 
+       * </pre>
+       */
+      public java.lang.String getClusterId() {
+        java.lang.Object ref = clusterId_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          clusterId_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>optional string cluster_id = 1;</code>
+       *
+       * <pre>
+       ** Not set if cluster ID could not be determined. 
+       * </pre>
+       */
+      public com.google.protobuf.ByteString
+          getClusterIdBytes() {
+        java.lang.Object ref = clusterId_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          clusterId_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>optional string cluster_id = 1;</code>
+       *
+       * <pre>
+       ** Not set if cluster ID could not be determined. 
+       * </pre>
+       */
+      public Builder setClusterId(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        clusterId_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional string cluster_id = 1;</code>
+       *
+       * <pre>
+       ** Not set if cluster ID could not be determined. 
+       * </pre>
+       */
+      public Builder clearClusterId() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        clusterId_ = getDefaultInstance().getClusterId();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional string cluster_id = 1;</code>
+       *
+       * <pre>
+       ** Not set if cluster ID could not be determined. 
+       * </pre>
+       */
+      public Builder setClusterIdBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        clusterId_ = value;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.GetClusterIdResponse)
+    }
+
+    static {
+      defaultInstance = new GetClusterIdResponse(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.GetClusterIdResponse)
+  }
+
+  public interface GetActiveMasterRequestOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+  }
+  /**
+   * Protobuf type {@code hbase.pb.GetActiveMasterRequest}
+   *
+   * <pre>
+   ** Request and response to get the currently active master name for this cluster 
+   * </pre>
+   */
+  public static final class GetActiveMasterRequest extends
+      com.google.protobuf.GeneratedMessage
+      implements GetActiveMasterRequestOrBuilder {
+    // Use GetActiveMasterRequest.newBuilder() to construct.
+    private GetActiveMasterRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private GetActiveMasterRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final GetActiveMasterRequest defaultInstance;
+    public static GetActiveMasterRequest getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public GetActiveMasterRequest getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private GetActiveMasterRequest(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetActiveMasterRequest_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetActiveMasterRequest_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<GetActiveMasterRequest> PARSER =
+        new com.google.protobuf.AbstractParser<GetActiveMasterRequest>() {
+      public GetActiveMasterRequest parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new GetActiveMasterRequest(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<GetActiveMasterRequest> getParserForType() {
+      return PARSER;
+    }
+
+    private void initFields() {
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest) obj;
+
+      boolean result = true;
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.GetActiveMasterRequest}
+     *
+     * <pre>
+     ** Request and response to get the currently active master name for this cluster 
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequestOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetActiveMasterRequest_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetActiveMasterRequest_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetActiveMasterRequest_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest build() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest(this);
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest.getDefaultInstance()) return this;
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.GetActiveMasterRequest)
+    }
+
+    static {
+      defaultInstance = new GetActiveMasterRequest(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.GetActiveMasterRequest)
+  }
+
+  public interface GetActiveMasterResponseOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // optional .hbase.pb.ServerName server_name = 1;
+    /**
+     * <code>optional .hbase.pb.ServerName server_name = 1;</code>
+     *
+     * <pre>
+     ** Not set if an active master could not be determined. 
+     * </pre>
+     */
+    boolean hasServerName();
+    /**
+     * <code>optional .hbase.pb.ServerName server_name = 1;</code>
+     *
+     * <pre>
+     ** Not set if an active master could not be determined. 
+     * </pre>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName();
+    /**
+     * <code>optional .hbase.pb.ServerName server_name = 1;</code>
+     *
+     * <pre>
+     ** Not set if an active master could not be determined. 
+     * </pre>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.GetActiveMasterResponse}
+   */
+  public static final class GetActiveMasterResponse extends
+      com.google.protobuf.GeneratedMessage
+      implements GetActiveMasterResponseOrBuilder {
+    // Use GetActiveMasterResponse.newBuilder() to construct.
+    private GetActiveMasterResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private GetActiveMasterResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final GetActiveMasterResponse defaultInstance;
+    public static GetActiveMasterResponse getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public GetActiveMasterResponse getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private GetActiveMasterResponse(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000001) == 0x00000001)) {
+                subBuilder = serverName_.toBuilder();
+              }
+              serverName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(serverName_);
+                serverName_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000001;
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetActiveMasterResponse_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetActiveMasterResponse_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<GetActiveMasterResponse> PARSER =
+        new com.google.protobuf.AbstractParser<GetActiveMasterResponse>() {
+      public GetActiveMasterResponse parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new GetActiveMasterResponse(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<GetActiveMasterResponse> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // optional .hbase.pb.ServerName server_name = 1;
+    public static final int SERVER_NAME_FIELD_NUMBER = 1;
+    private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_;
+    /**
+     * <code>optional .hbase.pb.ServerName server_name = 1;</code>
+     *
+     * <pre>
+     ** Not set if an active master could not be determined. 
+     * </pre>
+     */
+    public boolean hasServerName() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>optional .hbase.pb.ServerName server_name = 1;</code>
+     *
+     * <pre>
+     ** Not set if an active master could not be determined. 
+     * </pre>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() {
+      return serverName_;
+    }
+    /**
+     * <code>optional .hbase.pb.ServerName server_name = 1;</code>
+     *
+     * <pre>
+     ** Not set if an active master could not be determined. 
+     * </pre>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() {
+      return serverName_;
+    }
+
+    private void initFields() {
+      serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (hasServerName()) {
+        if (!getServerName().isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeMessage(1, serverName_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, serverName_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse) obj;
+
+      boolean result = true;
+      result = result && (hasServerName() == other.hasServerName());
+      if (hasServerName()) {
+        result = result && getServerName()
+            .equals(other.getServerName());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasServerName()) {
+        hash = (37 * hash) + SERVER_NAME_FIELD_NUMBER;
+        hash = (53 * hash) + getServerName().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.GetActiveMasterResponse}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponseOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetActiveMasterResponse_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetActiveMasterResponse_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getServerNameFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        if (serverNameBuilder_ == null) {
+          serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+        } else {
+          serverNameBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetActiveMasterResponse_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse build() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        if (serverNameBuilder_ == null) {
+          result.serverName_ = serverName_;
+        } else {
+          result.serverName_ = serverNameBuilder_.build();
+        }
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse.getDefaultInstance()) return this;
+        if (other.hasServerName()) {
+          mergeServerName(other.getServerName());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (hasServerName()) {
+          if (!getServerName().isInitialized()) {
+            
+            return false;
+          }
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // optional .hbase.pb.ServerName server_name = 1;
+      private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_;
+      /**
+       * <code>optional .hbase.pb.ServerName server_name = 1;</code>
+       *
+       * <pre>
+       ** Not set if an active master could not be determined. 
+       * </pre>
+       */
+      public boolean hasServerName() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>optional .hbase.pb.ServerName server_name = 1;</code>
+       *
+       * <pre>
+       ** Not set if an active master could not be determined. 
+       * </pre>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() {
+        if (serverNameBuilder_ == null) {
+          return serverName_;
+        } else {
+          return serverNameBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>optional .hbase.pb.ServerName server_name = 1;</code>
+       *
+       * <pre>
+       ** Not set if an active master could not be determined. 
+       * </pre>
+       */
+      public Builder setServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
+        if (serverNameBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          serverName_ = value;
+          onChanged();
+        } else {
+          serverNameBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.ServerName server_name = 1;</code>
+       *
+       * <pre>
+       ** Not set if an active master could not be determined. 
+       * </pre>
+       */
+      public Builder setServerName(
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
+        if (serverNameBuilder_ == null) {
+          serverName_ = builderForValue.build();
+          onChanged();
+        } else {
+          serverNameBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.ServerName server_name = 1;</code>
+       *
+       * <pre>
+       ** Not set if an active master could not be determined. 
+       * </pre>
+       */
+      public Builder mergeServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
+        if (serverNameBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001) &&
+              serverName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) {
+            serverName_ =
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(serverName_).mergeFrom(value).buildPartial();
+          } else {
+            serverName_ = value;
+          }
+          onChanged();
+        } else {
+          serverNameBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.ServerName server_name = 1;</code>
+       *
+       * <pre>
+       ** Not set if an active master could not be determined. 
+       * </pre>
+       */
+      public Builder clearServerName() {
+        if (serverNameBuilder_ == null) {
+          serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+          onChanged();
+        } else {
+          serverNameBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.ServerName server_name = 1;</code>
+       *
+       * <pre>
+       ** Not set if an active master could not be determined. 
+       * </pre>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder() {
+        bitField0_ |= 0x00000001;
+        onChanged();
+        return getServerNameFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>optional .hbase.pb.ServerName server_name = 1;</code>
+       *
+       * <pre>
+       ** Not set if an active master could not be determined. 
+       * </pre>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() {
+        if (serverNameBuilder_ != null) {
+          return serverNameBuilder_.getMessageOrBuilder();
+        } else {
+          return serverName_;
+        }
+      }
+      /**
+       * <code>optional .hbase.pb.ServerName server_name = 1;</code>
+       *
+       * <pre>
+       ** Not set if an active master could not be determined. 
+       * </pre>
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
+          getServerNameFieldBuilder() {
+        if (serverNameBuilder_ == null) {
+          serverNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
+                  serverName_,
+                  getParentForChildren(),
+                  isClean());
+          serverName_ = null;
+        }
+        return serverNameBuilder_;
+      }
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.GetActiveMasterResponse)
+    }
+
+    static {
+      defaultInstance = new GetActiveMasterResponse(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.GetActiveMasterResponse)
+  }
+
+  public interface GetMetaRegionLocationsRequestOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+  }
+  /**
+   * Protobuf type {@code hbase.pb.GetMetaRegionLocationsRequest}
+   *
+   * <pre>
+   ** Request and response to get the current list of meta region locations 
+   * </pre>
+   */
+  public static final class GetMetaRegionLocationsRequest extends
+      com.google.protobuf.GeneratedMessage
+      implements GetMetaRegionLocationsRequestOrBuilder {
+    // Use GetMetaRegionLocationsRequest.newBuilder() to construct.
+    private GetMetaRegionLocationsRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private GetMetaRegionLocationsRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final GetMetaRegionLocationsRequest defaultInstance;
+    public static GetMetaRegionLocationsRequest getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public GetMetaRegionLocationsRequest getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private GetMetaRegionLocationsRequest(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsRequest_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsRequest_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<GetMetaRegionLocationsRequest> PARSER =
+        new com.google.protobuf.AbstractParser<GetMetaRegionLocationsRequest>() {
+      public GetMetaRegionLocationsRequest parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new GetMetaRegionLocationsRequest(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<GetMetaRegionLocationsRequest> getParserForType() {
+      return PARSER;
+    }
+
+    private void initFields() {
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest) obj;
+
+      boolean result = true;
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.GetMetaRegionLocationsRequest}
+     *
+     * <pre>
+     ** Request and response to get the current list of meta region locations 
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequestOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsRequest_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsRequest_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsRequest_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest build() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest(this);
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.getDefaultInstance()) return this;
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.GetMetaRegionLocationsRequest)
+    }
+
+    static {
+      defaultInstance = new GetMetaRegionLocationsRequest(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.GetMetaRegionLocationsRequest)
+  }
+
+  public interface GetMetaRegionLocationsResponseOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // repeated .hbase.pb.RegionLocation meta_locations = 1;
+    /**
+     * <code>repeated .hbase.pb.RegionLocation meta_locations = 1;</code>
+     *
+     * <pre>
+     ** Not set if meta region locations could not be determined. 
+     * </pre>
+     */
+    java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation> 
+        getMetaLocationsList();
+    /**
+     * <code>repeated .hbase.pb.RegionLocation meta_locations = 1;</code>
+     *
+     * <pre>
+     ** Not set if meta region locations could not be determined. 
+     * </pre>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation getMetaLocations(int index);
+    /**
+     * <code>repeated .hbase.pb.RegionLocation meta_locations = 1;</code>
+     *
+     * <pre>
+     ** Not set if meta region locations could not be determined. 
+     * </pre>
+     */
+    int getMetaLocationsCount();
+    /**
+     * <code>repeated .hbase.pb.RegionLocation meta_locations = 1;</code>
+     *
+     * <pre>
+     ** Not set if meta region locations could not be determined. 
+     * </pre>
+     */
+    java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder> 
+        getMetaLocationsOrBuilderList();
+    /**
+     * <code>repeated .hbase.pb.RegionLocation meta_locations = 1;</code>
+     *
+     * <pre>
+     ** Not set if meta region locations could not be determined. 
+     * </pre>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder getMetaLocationsOrBuilder(
+        int index);
+  }
+  /**
+   * Protobuf type {@code hbase.pb.GetMetaRegionLocationsResponse}
+   */
+  public static final class GetMetaRegionLocationsResponse extends
+      com.google.protobuf.GeneratedMessage
+      implements GetMetaRegionLocationsResponseOrBuilder {
+    // Use GetMetaRegionLocationsResponse.newBuilder() to construct.
+    private GetMetaRegionLocationsResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private GetMetaRegionLocationsResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final GetMetaRegionLocationsResponse defaultInstance;
+    public static GetMetaRegionLocationsResponse getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public GetMetaRegionLocationsResponse getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private GetMetaRegionLocationsResponse(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+                metaLocations_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation>();
+                mutable_bitField0_ |= 0x00000001;
+              }
+              metaLocations_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.PARSER, extensionRegistry));
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+          metaLocations_ = java.util.Collections.unmodifiableList(metaLocations_);
+        }
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsResponse_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsResponse_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<GetMetaRegionLocationsResponse> PARSER =
+        new com.google.protobuf.AbstractParser<GetMetaRegionLocationsResponse>() {
+      public GetMetaRegionLocationsResponse parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new GetMetaRegionLocationsResponse(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<GetMetaRegionLocationsResponse> getParserForType() {
+      return PARSER;
+    }
+
+    // repeated .hbase.pb.RegionLocation meta_locations = 1;
+    public static final int META_LOCATIONS_FIELD_NUMBER = 1;
+    private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation> metaLocations_;
+    /**
+     * <code>repeated .hbase.pb.RegionLocation meta_locations = 1;</code>
+     *
+     * <pre>
+     ** Not set if meta region locations could not be determined. 
+     * </pre>
+     */
+    public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation> getMetaLocationsList() {
+      return metaLocations_;
+    }
+    /**
+     * <code>repeated .hbase.pb.RegionLocation meta_locations = 1;</code>
+     *
+     * <pre>
+     ** Not set if meta region locations could not be determined. 
+     * </pre>
+     */
+    public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder> 
+        getMetaLocationsOrBuilderList() {
+      return metaLocations_;
+    }
+    /**
+     * <code>repeated .hbase.pb.RegionLocation meta_locations = 1;</code>
+     *
+     * <pre>
+     ** Not set if meta region locations could not be determined. 
+     * </pre>
+     */
+    public int getMetaLocationsCount() {
+      return metaLocations_.size();
+    }
+    /**
+     * <code>repeated .hbase.pb.RegionLocation meta_locations = 1;</code>
+     *
+     * <pre>
+     ** Not set if meta region locations could not be determined. 
+     * </pre>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation getMetaLocations(int index) {
+      return metaLocations_.get(index);
+    }
+    /**
+     * <code>repeated .hbase.pb.RegionLocation meta_locations = 1;</code>
+     *
+     * <pre>
+     ** Not set if meta region locations could not be determined. 
+     * </pre>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder getMetaLocationsOrBuilder(
+        int index) {
+      return metaLocations_.get(index);
+    }
+
+    private void initFields() {
+      metaLocations_ = java.util.Collections.emptyList();
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      for (int i = 0; i < getMetaLocationsCount(); i++) {
+        if (!getMetaLocations(i).isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      for (int i = 0; i < metaLocations_.size(); i++) {
+        output.writeMessage(1, metaLocations_.get(i));
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      for (int i = 0; i < metaLocations_.size(); i++) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, metaLocations_.get(i));
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse) obj;
+
+      boolean result = true;
+      result = result && getMetaLocationsList()
+          .equals(other.getMetaLocationsList());
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (getMetaLocationsCount() > 0) {
+        hash = (37 * hash) + META_LOCATIONS_FIELD_NUMBER;
+        hash = (53 * hash) + getMetaLocationsList().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.GetMetaRegionLocationsResponse}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponseOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsResponse_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsResponse_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getMetaLocationsFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        if (metaLocationsBuilder_ == null) {
+          metaLocations_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000001);
+        } else {
+          metaLocationsBuilder_.clear();
+        }
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMetaRegionLocationsResponse_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse build() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse(this);
+        int from_bitField0_ = bitField0_;
+        if (metaLocationsBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001)) {
+            metaLocations_ = java.util.Collections.unmodifiableList(metaLocations_);
+            bitField0_ = (bitField0_ & ~0x00000001);
+          }
+          result.metaLocations_ = metaLocations_;
+        } else {
+          result.metaLocations_ = metaLocationsBuilder_.build();
+        }
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.getDefaultInstance()) return this;
+        if (metaLocationsBuilder_ == null) {
+          if (!other.metaLocations_.isEmpty()) {
+            if (metaLocations_.isEmpty()) {
+              metaLocations_ = other.metaLocations_;
+              bitField0_ = (bitField0_ & ~0x00000001);
+            } else {
+              ensureMetaLocationsIsMutable();
+              metaLocations_.addAll(other.metaLocations_);
+            }
+            onChanged();
+          }
+        } else {
+          if (!other.metaLocations_.isEmpty()) {
+            if (metaLocationsBuilder_.isEmpty()) {
+              metaLocationsBuilder_.dispose();
+              metaLocationsBuilder_ = null;
+              metaLocations_ = other.metaLocations_;
+              bitField0_ = (bitField0_ & ~0x00000001);
+              metaLocationsBuilder_ = 
+                com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+                   getMetaLocationsFieldBuilder() : null;
+            } else {
+              metaLocationsBuilder_.addAllMessages(other.metaLocations_);
+            }
+          }
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        for (int i = 0; i < getMetaLocationsCount(); i++) {
+          if (!getMetaLocations(i).isInitialized()) {
+            
+            return false;
+          }
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // repeated .hbase.pb.RegionLocation meta_locations = 1;
+      private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation> metaLocations_ =
+        java.util.Collections.emptyList();
+      private void ensureMetaLocationsIsMutable() {
+        if (!((bitField0_ & 0x00000001) == 0x00000001)) {
+          metaLocations_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation>(metaLocations_);
+          bitField0_ |= 0x00000001;
+         }
+      }
+
+      private com.google.protobuf.RepeatedFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder> metaLocationsBuilder_;
+
+      /**
+       * <code>repeated .hbase.pb.RegionLocation meta_locations = 1;</code>
+       *
+       * <pre>
+       ** Not set if meta region locations could not be determined. 
+       * </pre>
+       */
+      public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation> getMetaLocationsList() {
+        if (metaLocationsBuilder_ == null) {
+          return java.util.Collections.unmodifiableList(metaLocations_);
+        } else {
+          return metaLocationsBuilder_.getMessageList();
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLocation meta_locations = 1;</code>
+       *
+       * <pre>
+       ** Not set if meta region locations could not be determined. 
+       * </pre>
+       */
+      public int getMetaLocationsCount() {
+        if (metaLocationsBuilder_ == null) {
+          return metaLocations_.size();
+        } else {
+          return metaLocationsBuilder_.getCount();
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLocation meta_locations = 1;</code>
+       *
+       * <pre>
+       ** Not set if meta region locations could not be determined. 
+       * </pre>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation getMetaLocations(int index) {
+        if (metaLocationsBuilder_ == null) {
+          return metaLocations_.get(index);
+        } else {
+          return metaLocationsBuilder_.getMessage(index);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLocation meta_locations = 1;</code>
+       *
+       * <pre>
+       ** Not set if meta region locations could not be determined. 
+       * </pre>
+       */
+      public Builder setMetaLocations(
+          int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation value) {
+        if (metaLocationsBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureMetaLocationsIsMutable();
+          metaLocations_.set(index, value);
+          onChanged();
+        } else {
+          metaLocationsBuilder_.setMessage(index, value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLocation meta_locations = 1;</code>
+       *
+       * <pre>
+       ** Not set if meta region locations could not be determined. 
+       * </pre>
+       */
+      public Builder setMetaLocations(
+          int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder builderForValue) {
+        if (metaLocationsBuilder_ == null) {
+          ensureMetaLocationsIsMutable();
+          metaLocations_.set(index, builderForValue.build());
+          onChanged();
+        } else {
+          metaLocationsBuilder_.setMessage(index, builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLocation meta_locations = 1;</code>
+       *
+       * <pre>
+       ** Not set if meta region locations could not be determined. 
+       * </pre>
+       */
+      public Builder addMetaLocations(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation value) {
+        if (metaLocationsBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureMetaLocationsIsMutable();
+          metaLocations_.add(value);
+          onChanged();
+        } else {
+          metaLocationsBuilder_.addMessage(value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLocation meta_locations = 1;</code>
+       *
+       * <pre>
+       ** Not set if meta region locations could not be determined. 
+       * </pre>
+       */
+      public Builder addMetaLocations(
+          int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation value) {
+        if (metaLocationsBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureMetaLocationsIsMutable();
+          metaLocations_.add(index, value);
+          onChanged();
+        } else {
+          metaLocationsBuilder_.addMessage(index, value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLocation meta_locations = 1;</code>
+       *
+       * <pre>
+       ** Not set if meta region locations could not be determined. 
+       * </pre>
+       */
+      public Builder addMetaLocations(
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder builderForValue) {
+        if (metaLocationsBuilder_ == null) {
+          ensureMetaLocationsIsMutable();
+          metaLocations_.add(builderForValue.build());
+          onChanged();
+        } else {
+          metaLocationsBuilder_.addMessage(builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLocation meta_locations = 1;</code>
+       *
+       * <pre>
+       ** Not set if meta region locations could not be determined. 
+       * </pre>
+       */
+      public Builder addMetaLocations(
+          int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder builderForValue) {
+        if (metaLocationsBuilder_ == null) {
+          ensureMetaLocationsIsMutable();
+          metaLocations_.add(index, builderForValue.build());
+          onChanged();
+        } else {
+          metaLocationsBuilder_.addMessage(index, builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLocation meta_locations = 1;</code>
+       *
+       * <pre>
+       ** Not set if meta region locations could not be determined. 
+       * </pre>
+       */
+      public Builder addAllMetaLocations(
+          java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation> values) {
+        if (metaLocationsBuilder_ == null) {
+          ensureMetaLocationsIsMutable();
+          super.addAll(values, metaLocations_);
+          onChanged();
+        } else {
+          metaLocationsBuilder_.addAllMessages(values);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLocation meta_locations = 1;</code>
+       *
+       * <pre>
+       ** Not set if meta region locations could not be determined. 
+       * </pre>
+       */
+      public Builder clearMetaLocations() {
+        if (metaLocationsBuilder_ == null) {
+          metaLocations_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000001);
+          onChanged();
+        } else {
+          metaLocationsBuilder_.clear();
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLocation meta_locations = 1;</code>
+       *
+       * <pre>
+       ** Not set if meta region locations could not be determined. 
+       * </pre>
+       */
+      public Builder removeMetaLocations(int index) {
+        if (metaLocationsBuilder_ == null) {
+          ensureMetaLocationsIsMutable();
+          metaLocations_.remove(index);
+          onChanged();
+        } else {
+          metaLocationsBuilder_.remove(index);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLocation meta_locations = 1;</code>
+       *
+       * <pre>
+       ** Not set if meta region locations could not be determined. 
+       * </pre>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder getMetaLocationsBuilder(
+          int index) {
+        return getMetaLocationsFieldBuilder().getBuilder(index);
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLocation meta_locations = 1;</code>
+       *
+       * <pre>
+       ** Not set if meta region locations could not be determined. 
+       * </pre>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder getMetaLocationsOrBuilder(
+          int index) {
+        if (metaLocationsBuilder_ == null) {
+          return metaLocations_.get(index);  } else {
+          return metaLocationsBuilder_.getMessageOrBuilder(index);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLocation meta_locations = 1;</code>
+       *
+       * <pre>
+       ** Not set if meta region locations could not be determined. 
+       * </pre>
+       */
+      public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder> 
+           getMetaLocationsOrBuilderList() {
+        if (metaLocationsBuilder_ != null) {
+          return metaLocationsBuilder_.getMessageOrBuilderList();
+        } else {
+          return java.util.Collections.unmodifiableList(metaLocations_);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLocation meta_locations = 1;</code>
+       *
+       * <pre>
+       ** Not set if meta region locations could not be determined. 
+       * </pre>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder addMetaLocationsBuilder() {
+        return getMetaLocationsFieldBuilder().addBuilder(
+            org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.getDefaultInstance());
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLocation meta_locations = 1;</code>
+       *
+       * <pre>
+       ** Not set if meta region locations could not be determined. 
+       * </pre>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder addMetaLocationsBuilder(
+          int index) {
+        return getMetaLocationsFieldBuilder().addBuilder(
+            index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.getDefaultInstance());
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLocation meta_locations = 1;</code>
+       *
+       * <pre>
+       ** Not set if meta region locations could not be determined. 
+       * </pre>
+       */
+      public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder> 
+           getMetaLocationsBuilderList() {
+        return getMetaLocationsFieldBuilder().getBuilderList();
+      }
+      private com.google.protobuf.RepeatedFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder> 
+          getMetaLocationsFieldBuilder() {
+        if (metaLocationsBuilder_ == null) {
+          metaLocationsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocation.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLocationOrBuilder>(
+                  metaLocations_,
+                  ((bitField0_ & 0x00000001) == 0x00000001),
+                  getParentForChildren(),
+                  isClean());
+          metaLocations_ = null;
+        }
+        return metaLocationsBuilder_;
+      }
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.GetMetaRegionLocationsResponse)
+    }
+
+    static {
+      defaultInstance = new GetMetaRegionLocationsResponse(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.GetMetaRegionLocationsResponse)
+  }
+
   /**
    * Protobuf service {@code hbase.pb.MasterService}
    */
@@ -69662,324 +72707,733 @@ public final class MasterProtos {
           getDescriptor().getMethods().get(37),
           controller,
           request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance());
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance());
+      }
+
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse getCompletedSnapshots(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(38),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance());
+      }
+
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse deleteSnapshot(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(39),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance());
+      }
+
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse isSnapshotDone(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(40),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance());
+      }
+
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse restoreSnapshot(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(41),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance());
+      }
+
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse isRestoreSnapshotDone(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(42),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance());
+      }
+
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse execProcedure(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(43),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance());
+      }
+
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse execProcedureWithRet(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(44),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance());
+      }
+
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse isProcedureDone(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(45),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance());
+      }
+
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse modifyNamespace(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(46),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance());
+      }
+
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse createNamespace(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(47),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance());
+      }
+
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse deleteNamespace(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(48),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance());
+      }
+
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse getNamespaceDescriptor(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(49),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance());
+      }
+
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse listNamespaceDescriptors(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(50),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance());
+      }
+
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(51),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance());
+      }
+
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse listTableNamesByNamespace(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(52),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance());
+      }
+
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse setQuota(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(53),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance());
+      }
+
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(54),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance());
       }
 
 
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse getCompletedSnapshots(
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion(
           com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest request)
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request)
           throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(38),
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(55),
           controller,
           request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance());
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance());
       }
 
 
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse deleteSnapshot(
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse getProcedureResult(
           com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest request)
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request)
           throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(39),
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(56),
           controller,
           request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance());
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance());
       }
 
 
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse isSnapshotDone(
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse getSecurityCapabilities(
           com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest request)
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request)
           throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(40),
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(57),
           controller,
           request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance());
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance());
       }
 
 
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse restoreSnapshot(
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse abortProcedure(
           com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest request)
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request)
           throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(41),
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(58),
           controller,
           request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance());
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance());
       }
 
 
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse isRestoreSnapshotDone(
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse listProcedures(
           com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest request)
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request)
           throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(42),
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(59),
           controller,
           request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance());
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance());
       }
 
 
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse execProcedure(
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse clearDeadServers(
           com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request)
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest request)
           throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(43),
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(60),
           controller,
           request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance());
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.getDefaultInstance());
       }
 
 
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse execProcedureWithRet(
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesResponse listNamespaces(
           com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request)
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesRequest request)
           throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(44),
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(61),
           controller,
           request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance());
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesResponse.getDefaultInstance());
       }
 
 
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse isProcedureDone(
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse switchSnapshotCleanup(
           com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest request)
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest request)
           throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(45),
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(62),
           controller,
           request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance());
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.getDefaultInstance());
       }
 
 
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse modifyNamespace(
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse isSnapshotCleanupEnabled(
           com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest request)
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest request)
           throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(46),
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(63),
           controller,
           request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance());
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.getDefaultInstance());
+      }
+
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.MasterService)
+  }
+
+  /**
+   * Protobuf service {@code hbase.pb.ClientMetaService}
+   *
+   * <pre>
+   **
+   * Implements all the RPCs needed by clients to look up cluster meta information needed for connection establishment.
+   * </pre>
+   */
+  public static abstract class ClientMetaService
+      implements com.google.protobuf.Service {
+    protected ClientMetaService() {}
+
+    public interface Interface {
+      /**
+       * <code>rpc GetClusterId(.hbase.pb.GetClusterIdRequest) returns (.hbase.pb.GetClusterIdResponse);</code>
+       *
+       * <pre>
+       **
+       * Get Cluster ID for this cluster.
+       * </pre>
+       */
+      public abstract void getClusterId(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest request,
+          com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse> done);
+
+      /**
+       * <code>rpc GetActiveMaster(.hbase.pb.GetActiveMasterRequest) returns (.hbase.pb.GetActiveMasterResponse);</code>
+       *
+       * <pre>
+       **
+       * Get active master server name for this cluster.
+       * </pre>
+       */
+      public abstract void getActiveMaster(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest request,
+          com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse> done);
+
+      /**
+       * <code>rpc GetMetaRegionLocations(.hbase.pb.GetMetaRegionLocationsRequest) returns (.hbase.pb.GetMetaRegionLocationsResponse);</code>
+       *
+       * <pre>
+       **
+       * Get current meta replicas' region locations.
+       * </pre>
+       */
+      public abstract void getMetaRegionLocations(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest request,
+          com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse> done);
+
+    }
+
+    public static com.google.protobuf.Service newReflectiveService(
+        final Interface impl) {
+      return new ClientMetaService() {
+        @java.lang.Override
+        public  void getClusterId(
+            com.google.protobuf.RpcController controller,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest request,
+            com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse> done) {
+          impl.getClusterId(controller, request, done);
+        }
+
+        @java.lang.Override
+        public  void getActiveMaster(
+            com.google.protobuf.RpcController controller,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest request,
+            com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse> done) {
+          impl.getActiveMaster(controller, request, done);
+        }
+
+        @java.lang.Override
+        public  void getMetaRegionLocations(
+            com.google.protobuf.RpcController controller,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest request,
+            com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse> done) {
+          impl.getMetaRegionLocations(controller, request, done);
+        }
+
+      };
+    }
+
+    public static com.google.protobuf.BlockingService
+        newReflectiveBlockingService(final BlockingInterface impl) {
+      return new com.google.protobuf.BlockingService() {
+        public final com.google.protobuf.Descriptors.ServiceDescriptor
+            getDescriptorForType() {
+          return getDescriptor();
+        }
+
+        public final com.google.protobuf.Message callBlockingMethod(
+            com.google.protobuf.Descriptors.MethodDescriptor method,
+            com.google.protobuf.RpcController controller,
+            com.google.protobuf.Message request)
+            throws com.google.protobuf.ServiceException {
+          if (method.getService() != getDescriptor()) {
+            throw new java.lang.IllegalArgumentException(
+              "Service.callBlockingMethod() given method descriptor for " +
+              "wrong service type.");
+          }
+          switch(method.getIndex()) {
+            case 0:
+              return impl.getClusterId(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest)request);
+            case 1:
+              return impl.getActiveMaster(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest)request);
+            case 2:
+              return impl.getMetaRegionLocations(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest)request);
+            default:
+              throw new java.lang.AssertionError("Can't get here.");
+          }
+        }
+
+        public final com.google.protobuf.Message
+            getRequestPrototype(
+            com.google.protobuf.Descriptors.MethodDescriptor method) {
+          if (method.getService() != getDescriptor()) {
+            throw new java.lang.IllegalArgumentException(
+              "Service.getRequestPrototype() given method " +
+              "descriptor for wrong service type.");
+          }
+          switch(method.getIndex()) {
+            case 0:
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.getDefaultInstance();
+            case 1:
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest.getDefaultInstance();
+            case 2:
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.getDefaultInstance();
+            default:
+              throw new java.lang.AssertionError("Can't get here.");
+          }
+        }
+
+        public final com.google.protobuf.Message
+            getResponsePrototype(
+            com.google.protobuf.Descriptors.MethodDescriptor method) {
+          if (method.getService() != getDescriptor()) {
+            throw new java.lang.IllegalArgumentException(
+              "Service.getResponsePrototype() given method " +
+              "descriptor for wrong service type.");
+          }
+          switch(method.getIndex()) {
+            case 0:
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.getDefaultInstance();
+            case 1:
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse.getDefaultInstance();
+            case 2:
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.getDefaultInstance();
+            default:
+              throw new java.lang.AssertionError("Can't get here.");
+          }
+        }
+
+      };
+    }
+
+    /**
+     * <code>rpc GetClusterId(.hbase.pb.GetClusterIdRequest) returns (.hbase.pb.GetClusterIdResponse);</code>
+     *
+     * <pre>
+     **
+     * Get Cluster ID for this cluster.
+     * </pre>
+     */
+    public abstract void getClusterId(
+        com.google.protobuf.RpcController controller,
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest request,
+        com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse> done);
+
+    /**
+     * <code>rpc GetActiveMaster(.hbase.pb.GetActiveMasterRequest) returns (.hbase.pb.GetActiveMasterResponse);</code>
+     *
+     * <pre>
+     **
+     * Get active master server name for this cluster.
+     * </pre>
+     */
+    public abstract void getActiveMaster(
+        com.google.protobuf.RpcController controller,
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest request,
+        com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse> done);
+
+    /**
+     * <code>rpc GetMetaRegionLocations(.hbase.pb.GetMetaRegionLocationsRequest) returns (.hbase.pb.GetMetaRegionLocationsResponse);</code>
+     *
+     * <pre>
+     **
+     * Get current meta replicas' region locations.
+     * </pre>
+     */
+    public abstract void getMetaRegionLocations(
+        com.google.protobuf.RpcController controller,
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest request,
+        com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse> done);
+
+    public static final
+        com.google.protobuf.Descriptors.ServiceDescriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.getDescriptor().getServices().get(1);
+    }
+    public final com.google.protobuf.Descriptors.ServiceDescriptor
+        getDescriptorForType() {
+      return getDescriptor();
+    }
+
+    public final void callMethod(
+        com.google.protobuf.Descriptors.MethodDescriptor method,
+        com.google.protobuf.RpcController controller,
+        com.google.protobuf.Message request,
+        com.google.protobuf.RpcCallback<
+          com.google.protobuf.Message> done) {
+      if (method.getService() != getDescriptor()) {
+        throw new java.lang.IllegalArgumentException(
+          "Service.callMethod() given method descriptor for wrong " +
+          "service type.");
       }
-
-
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse createNamespace(
-          com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest request)
-          throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(47),
-          controller,
-          request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance());
+      switch(method.getIndex()) {
+        case 0:
+          this.getClusterId(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest)request,
+            com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse>specializeCallback(
+              done));
+          return;
+        case 1:
+          this.getActiveMaster(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest)request,
+            com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse>specializeCallback(
+              done));
+          return;
+        case 2:
+          this.getMetaRegionLocations(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest)request,
+            com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse>specializeCallback(
+              done));
+          return;
+        default:
+          throw new java.lang.AssertionError("Can't get here.");
       }
+    }
 
-
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse deleteNamespace(
-          com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest request)
-          throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(48),
-          controller,
-          request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance());
+    public final com.google.protobuf.Message
+        getRequestPrototype(
+        com.google.protobuf.Descriptors.MethodDescriptor method) {
+      if (method.getService() != getDescriptor()) {
+        throw new java.lang.IllegalArgumentException(
+          "Service.getRequestPrototype() given method " +
+          "descriptor for wrong service type.");
       }
-
-
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse getNamespaceDescriptor(
-          com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest request)
-          throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(49),
-          controller,
-          request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance());
+      switch(method.getIndex()) {
+        case 0:
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.getDefaultInstance();
+        case 1:
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest.getDefaultInstance();
+        case 2:
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.getDefaultInstance();
+        default:
+          throw new java.lang.AssertionError("Can't get here.");
       }
+    }
 
-
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse listNamespaceDescriptors(
-          com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest request)
-          throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(50),
-          controller,
-          request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance());
+    public final com.google.protobuf.Message
+        getResponsePrototype(
+        com.google.protobuf.Descriptors.MethodDescriptor method) {
+      if (method.getService() != getDescriptor()) {
+        throw new java.lang.IllegalArgumentException(
+          "Service.getResponsePrototype() given method " +
+          "descriptor for wrong service type.");
       }
-
-
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace(
-          com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest request)
-          throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(51),
-          controller,
-          request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance());
+      switch(method.getIndex()) {
+        case 0:
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.getDefaultInstance();
+        case 1:
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse.getDefaultInstance();
+        case 2:
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.getDefaultInstance();
+        default:
+          throw new java.lang.AssertionError("Can't get here.");
       }
+    }
 
+    public static Stub newStub(
+        com.google.protobuf.RpcChannel channel) {
+      return new Stub(channel);
+    }
 
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse listTableNamesByNamespace(
-          com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request)
-          throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(52),
-          controller,
-          request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance());
+    public static final class Stub extends org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClientMetaService implements Interface {
+      private Stub(com.google.protobuf.RpcChannel channel) {
+        this.channel = channel;
       }
 
+      private final com.google.protobuf.RpcChannel channel;
 
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse setQuota(
-          com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request)
-          throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(53),
-          controller,
-          request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance());
+      public com.google.protobuf.RpcChannel getChannel() {
+        return channel;
       }
 
-
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(
+      public  void getClusterId(
           com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request)
-          throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(54),
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest request,
+          com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse> done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(0),
           controller,
           request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance());
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.getDefaultInstance(),
+          com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.class,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.getDefaultInstance()));
       }
 
-
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion(
+      public  void getActiveMaster(
           com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request)
-          throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(55),
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest request,
+          com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse> done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(1),
           controller,
           request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance());
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse.getDefaultInstance(),
+          com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse.class,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse.getDefaultInstance()));
       }
 
-
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse getProcedureResult(
+      public  void getMetaRegionLocations(
           com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request)
-          throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(56),
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest request,
+          com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse> done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(2),
           controller,
           request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance());
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.getDefaultInstance(),
+          com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.class,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.getDefaultInstance()));
       }
+    }
 
+    public static BlockingInterface newBlockingStub(
+        com.google.protobuf.BlockingRpcChannel channel) {
+      return new BlockingStub(channel);
+    }
 
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse getSecurityCapabilities(
+    public interface BlockingInterface {
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse getClusterId(
           com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request)
-          throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(57),
-          controller,
-          request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance());
-      }
-
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest request)
+          throws com.google.protobuf.ServiceException;
 
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse abortProcedure(
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse getActiveMaster(
           com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request)
-          throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(58),
-          controller,
-          request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance());
-      }
-
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest request)
+          throws com.google.protobuf.ServiceException;
 
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse listProcedures(
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse getMetaRegionLocations(
           com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request)
-          throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(59),
-          controller,
-          request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance());
-      }
-
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest request)
+          throws com.google.protobuf.ServiceException;
+    }
 
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse clearDeadServers(
-          com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest request)
-          throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(60),
-          controller,
-          request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.getDefaultInstance());
+    private static final class BlockingStub implements BlockingInterface {
+      private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
+        this.channel = channel;
       }
 
+      private final com.google.protobuf.BlockingRpcChannel channel;
 
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesResponse listNamespaces(
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse getClusterId(
           com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesRequest request)
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest request)
           throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(61),
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(0),
           controller,
           request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesResponse.getDefaultInstance());
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.getDefaultInstance());
       }
 
 
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse switchSnapshotCleanup(
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse getActiveMaster(
           com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest request)
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest request)
           throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(62),
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(1),
           controller,
           request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.getDefaultInstance());
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse.getDefaultInstance());
       }
 
 
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse isSnapshotCleanupEnabled(
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse getMetaRegionLocations(
           com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest request)
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest request)
           throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(63),
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(2),
           controller,
           request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.getDefaultInstance());
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.getDefaultInstance());
       }
 
     }
 
-    // @@protoc_insertion_point(class_scope:hbase.pb.MasterService)
+    // @@protoc_insertion_point(class_scope:hbase.pb.ClientMetaService)
   }
 
   private static com.google.protobuf.Descriptors.Descriptor
@@ -70597,6 +74051,36 @@ public final class MasterProtos {
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_GetClusterIdRequest_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_GetClusterIdRequest_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_GetClusterIdResponse_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_GetClusterIdResponse_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_GetActiveMasterRequest_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_GetActiveMasterRequest_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_GetActiveMasterResponse_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_GetActiveMasterResponse_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_GetMetaRegionLocationsRequest_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_GetMetaRegionLocationsRequest_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_GetMetaRegionLocationsResponse_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_GetMetaRegionLocationsResponse_fieldAccessorTable;
 
   public static com.google.protobuf.Descriptors.FileDescriptor
       getDescriptor() {
@@ -70811,158 +74295,172 @@ public final class MasterProtos {
       "shotCleanupResponse\022\035\n\025prev_snapshot_cle" +
       "anup\030\001 \002(\010\"!\n\037IsSnapshotCleanupEnabledRe" +
       "quest\"3\n IsSnapshotCleanupEnabledRespons" +
-      "e\022\017\n\007enabled\030\001 \002(\010*(\n\020MasterSwitchType\022\t" +
+      "e\022\017\n\007enabled\030\001 \002(\010\"\025\n\023GetClusterIdReques" +
+      "t\"*\n\024GetClusterIdResponse\022\022\n\ncluster_id\030" +
+      "\001 \001(\t\"\030\n\026GetActiveMasterRequest\"D\n\027GetAc" +
+      "tiveMasterResponse\022)\n\013server_name\030\001 \001(\0132" +
+      "\024.hbase.pb.ServerName\"\037\n\035GetMetaRegionLo",
+      "cationsRequest\"R\n\036GetMetaRegionLocations" +
+      "Response\0220\n\016meta_locations\030\001 \003(\0132\030.hbase" +
+      ".pb.RegionLocation*(\n\020MasterSwitchType\022\t" +
       "\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\241.\n\rMasterService\022e" +
       "\n\024GetSchemaAlterStatus\022%.hbase.pb.GetSch" +
       "emaAlterStatusRequest\032&.hbase.pb.GetSche" +
-      "maAlterStatusResponse\022b\n\023GetTableDescrip",
+      "maAlterStatusResponse\022b\n\023GetTableDescrip" +
       "tors\022$.hbase.pb.GetTableDescriptorsReque" +
       "st\032%.hbase.pb.GetTableDescriptorsRespons" +
-      "e\022P\n\rGetTableNames\022\036.hbase.pb.GetTableNa" +
+      "e\022P\n\rGetTableNames\022\036.hbase.pb.GetTableNa",
       "mesRequest\032\037.hbase.pb.GetTableNamesRespo" +
       "nse\022Y\n\020GetClusterStatus\022!.hbase.pb.GetCl" +
       "usterStatusRequest\032\".hbase.pb.GetCluster" +
       "StatusResponse\022V\n\017IsMasterRunning\022 .hbas" +
       "e.pb.IsMasterRunningRequest\032!.hbase.pb.I" +
       "sMasterRunningResponse\022D\n\tAddColumn\022\032.hb" +
-      "ase.pb.AddColumnRequest\032\033.hbase.pb.AddCo",
+      "ase.pb.AddColumnRequest\032\033.hbase.pb.AddCo" +
       "lumnResponse\022M\n\014DeleteColumn\022\035.hbase.pb." +
       "DeleteColumnRequest\032\036.hbase.pb.DeleteCol" +
-      "umnResponse\022M\n\014ModifyColumn\022\035.hbase.pb.M" +
+      "umnResponse\022M\n\014ModifyColumn\022\035.hbase.pb.M",
       "odifyColumnRequest\032\036.hbase.pb.ModifyColu" +
       "mnResponse\022G\n\nMoveRegion\022\033.hbase.pb.Move" +
       "RegionRequest\032\034.hbase.pb.MoveRegionRespo" +
       "nse\022k\n\026DispatchMergingRegions\022\'.hbase.pb" +
       ".DispatchMergingRegionsRequest\032(.hbase.p" +
       "b.DispatchMergingRegionsResponse\022M\n\014Assi" +
-      "gnRegion\022\035.hbase.pb.AssignRegionRequest\032",
+      "gnRegion\022\035.hbase.pb.AssignRegionRequest\032" +
       "\036.hbase.pb.AssignRegionResponse\022S\n\016Unass" +
       "ignRegion\022\037.hbase.pb.UnassignRegionReque" +
-      "st\032 .hbase.pb.UnassignRegionResponse\022P\n\r" +
+      "st\032 .hbase.pb.UnassignRegionResponse\022P\n\r",
       "OfflineRegion\022\036.hbase.pb.OfflineRegionRe" +
       "quest\032\037.hbase.pb.OfflineRegionResponse\022J" +
       "\n\013DeleteTable\022\034.hbase.pb.DeleteTableRequ" +
       "est\032\035.hbase.pb.DeleteTableResponse\022P\n\rtr" +
       "uncateTable\022\036.hbase.pb.TruncateTableRequ" +
       "est\032\037.hbase.pb.TruncateTableResponse\022J\n\013" +
-      "EnableTable\022\034.hbase.pb.EnableTableReques",
+      "EnableTable\022\034.hbase.pb.EnableTableReques" +
       "t\032\035.hbase.pb.EnableTableResponse\022M\n\014Disa" +
       "bleTable\022\035.hbase.pb.DisableTableRequest\032" +
-      "\036.hbase.pb.DisableTableResponse\022J\n\013Modif" +
+      "\036.hbase.pb.DisableTableResponse\022J\n\013Modif",
       "yTable\022\034.hbase.pb.ModifyTableRequest\032\035.h" +
       "base.pb.ModifyTableResponse\022J\n\013CreateTab" +
       "le\022\034.hbase.pb.CreateTableRequest\032\035.hbase" +
       ".pb.CreateTableResponse\022A\n\010Shutdown\022\031.hb" +
       "ase.pb.ShutdownRequest\032\032.hbase.pb.Shutdo" +
       "wnResponse\022G\n\nStopMaster\022\033.hbase.pb.Stop" +
-      "MasterRequest\032\034.hbase.pb.StopMasterRespo",
+      "MasterRequest\032\034.hbase.pb.StopMasterRespo" +
       "nse\022h\n\031IsMasterInMaintenanceMode\022$.hbase" +
       ".pb.IsInMaintenanceModeRequest\032%.hbase.p" +
-      "b.IsInMaintenanceModeResponse\022>\n\007Balance" +
+      "b.IsInMaintenanceModeResponse\022>\n\007Balance",
       "\022\030.hbase.pb.BalanceRequest\032\031.hbase.pb.Ba" +
       "lanceResponse\022_\n\022SetBalancerRunning\022#.hb" +
       "ase.pb.SetBalancerRunningRequest\032$.hbase" +
       ".pb.SetBalancerRunningResponse\022\\\n\021IsBala" +
       "ncerEnabled\022\".hbase.pb.IsBalancerEnabled" +
       "Request\032#.hbase.pb.IsBalancerEnabledResp" +
-      "onse\022k\n\026SetSplitOrMergeEnabled\022\'.hbase.p",
+      "onse\022k\n\026SetSplitOrMergeEnabled\022\'.hbase.p" +
       "b.SetSplitOrMergeEnabledRequest\032(.hbase." +
       "pb.SetSplitOrMergeEnabledResponse\022h\n\025IsS" +
-      "plitOrMergeEnabled\022&.hbase.pb.IsSplitOrM" +
+      "plitOrMergeEnabled\022&.hbase.pb.IsSplitOrM",
       "ergeEnabledRequest\032\'.hbase.pb.IsSplitOrM" +
       "ergeEnabledResponse\022D\n\tNormalize\022\032.hbase" +
       ".pb.NormalizeRequest\032\033.hbase.pb.Normaliz" +
       "eResponse\022e\n\024SetNormalizerRunning\022%.hbas" +
       "e.pb.SetNormalizerRunningRequest\032&.hbase" +
       ".pb.SetNormalizerRunningResponse\022b\n\023IsNo" +
-      "rmalizerEnabled\022$.hbase.pb.IsNormalizerE",
+      "rmalizerEnabled\022$.hbase.pb.IsNormalizerE" +
       "nabledRequest\032%.hbase.pb.IsNormalizerEna" +
       "bledResponse\022S\n\016RunCatalogScan\022\037.hbase.p" +
-      "b.RunCatalogScanRequest\032 .hbase.pb.RunCa" +
+      "b.RunCatalogScanRequest\032 .hbase.pb.RunCa",
       "talogScanResponse\022e\n\024EnableCatalogJanito" +
       "r\022%.hbase.pb.EnableCatalogJanitorRequest" +
       "\032&.hbase.pb.EnableCatalogJanitorResponse" +
       "\022n\n\027IsCatalogJanitorEnabled\022(.hbase.pb.I" +
       "sCatalogJanitorEnabledRequest\032).hbase.pb" +
       ".IsCatalogJanitorEnabledResponse\022V\n\017RunC" +
-      "leanerChore\022 .hbase.pb.RunCleanerChoreRe",
+      "leanerChore\022 .hbase.pb.RunCleanerChoreRe" +
       "quest\032!.hbase.pb.RunCleanerChoreResponse" +
       "\022k\n\026SetCleanerChoreRunning\022\'.hbase.pb.Se" +
-      "tCleanerChoreRunningRequest\032(.hbase.pb.S" +
+      "tCleanerChoreRunningRequest\032(.hbase.pb.S",
       "etCleanerChoreRunningResponse\022h\n\025IsClean" +
       "erChoreEnabled\022&.hbase.pb.IsCleanerChore" +
       "EnabledRequest\032\'.hbase.pb.IsCleanerChore" +
       "EnabledResponse\022^\n\021ExecMasterService\022#.h" +
       "base.pb.CoprocessorServiceRequest\032$.hbas" +
       "e.pb.CoprocessorServiceResponse\022A\n\010Snaps" +
-      "hot\022\031.hbase.pb.SnapshotRequest\032\032.hbase.p",
+      "hot\022\031.hbase.pb.SnapshotRequest\032\032.hbase.p" +
       "b.SnapshotResponse\022h\n\025GetCompletedSnapsh" +
       "ots\022&.hbase.pb.GetCompletedSnapshotsRequ" +
-      "est\032\'.hbase.pb.GetCompletedSnapshotsResp" +
+      "est\032\'.hbase.pb.GetCompletedSnapshotsResp",
       "onse\022S\n\016DeleteSnapshot\022\037.hbase.pb.Delete" +
       "SnapshotRequest\032 .hbase.pb.DeleteSnapsho" +
       "tResponse\022S\n\016IsSnapshotDone\022\037.hbase.pb.I" +
       "sSnapshotDoneRequest\032 .hbase.pb.IsSnapsh" +
       "otDoneResponse\022V\n\017RestoreSnapshot\022 .hbas" +
       "e.pb.RestoreSnapshotRequest\032!.hbase.pb.R" +
-      "estoreSnapshotResponse\022h\n\025IsRestoreSnaps",
+      "estoreSnapshotResponse\022h\n\025IsRestoreSnaps" +
       "hotDone\022&.hbase.pb.IsRestoreSnapshotDone" +
       "Request\032\'.hbase.pb.IsRestoreSnapshotDone" +
-      "Response\022P\n\rExecProcedure\022\036.hbase.pb.Exe" +
+      "Response\022P\n\rExecProcedure\022\036.hbase.pb.Exe",
       "cProcedureRequest\032\037.hbase.pb.ExecProcedu" +
       "reResponse\022W\n\024ExecProcedureWithRet\022\036.hba" +
       "se.pb.ExecProcedureRequest\032\037.hbase.pb.Ex" +
       "ecProcedureResponse\022V\n\017IsProcedureDone\022 " +
       ".hbase.pb.IsProcedureDoneRequest\032!.hbase" +
       ".pb.IsProcedureDoneResponse\022V\n\017ModifyNam" +
-      "espace\022 .hbase.pb.ModifyNamespaceRequest",
+      "espace\022 .hbase.pb.ModifyNamespaceRequest" +
       "\032!.hbase.pb.ModifyNamespaceResponse\022V\n\017C" +
       "reateNamespace\022 .hbase.pb.CreateNamespac" +
-      "eRequest\032!.hbase.pb.CreateNamespaceRespo" +
+      "eRequest\032!.hbase.pb.CreateNamespaceRespo",
       "nse\022V\n\017DeleteNamespace\022 .hbase.pb.Delete" +
       "NamespaceRequest\032!.hbase.pb.DeleteNamesp" +
       "aceResponse\022k\n\026GetNamespaceDescriptor\022\'." +
       "hbase.pb.GetNamespaceDescriptorRequest\032(" +
       ".hbase.pb.GetNamespaceDescriptorResponse" +
       "\022q\n\030ListNamespaceDescriptors\022).hbase.pb." +
-      "ListNamespaceDescriptorsRequest\032*.hbase.",
+      "ListNamespaceDescriptorsRequest\032*.hbase." +
       "pb.ListNamespaceDescriptorsResponse\022\206\001\n\037" +
       "ListTableDescriptorsByNamespace\0220.hbase." +
-      "pb.ListTableDescriptorsByNamespaceReques" +
+      "pb.ListTableDescriptorsByNamespaceReques",
       "t\0321.hbase.pb.ListTableDescriptorsByNames" +
       "paceResponse\022t\n\031ListTableNamesByNamespac" +
       "e\022*.hbase.pb.ListTableNamesByNamespaceRe" +
       "quest\032+.hbase.pb.ListTableNamesByNamespa" +
       "ceResponse\022A\n\010SetQuota\022\031.hbase.pb.SetQuo" +
       "taRequest\032\032.hbase.pb.SetQuotaResponse\022x\n" +
-      "\037getLastMajorCompactionTimestamp\022).hbase",
+      "\037getLastMajorCompactionTimestamp\022).hbase" +
       ".pb.MajorCompactionTimestampRequest\032*.hb" +
       "ase.pb.MajorCompactionTimestampResponse\022" +
-      "\212\001\n(getLastMajorCompactionTimestampForRe" +
+      "\212\001\n(getLastMajorCompactionTimestampForRe",
       "gion\0222.hbase.pb.MajorCompactionTimestamp" +
       "ForRegionRequest\032*.hbase.pb.MajorCompact" +
       "ionTimestampResponse\022_\n\022getProcedureResu" +
       "lt\022#.hbase.pb.GetProcedureResultRequest\032" +
       "$.hbase.pb.GetProcedureResultResponse\022h\n" +
       "\027getSecurityCapabilities\022%.hbase.pb.Secu" +
-      "rityCapabilitiesRequest\032&.hbase.pb.Secur",
+      "rityCapabilitiesRequest\032&.hbase.pb.Secur" +
       "ityCapabilitiesResponse\022S\n\016AbortProcedur" +
       "e\022\037.hbase.pb.AbortProcedureRequest\032 .hba" +
-      "se.pb.AbortProcedureResponse\022S\n\016ListProc" +
+      "se.pb.AbortProcedureResponse\022S\n\016ListProc",
       "edures\022\037.hbase.pb.ListProceduresRequest\032" +
       " .hbase.pb.ListProceduresResponse\022Y\n\020Cle" +
       "arDeadServers\022!.hbase.pb.ClearDeadServer" +
       "sRequest\032\".hbase.pb.ClearDeadServersResp" +
       "onse\022S\n\016ListNamespaces\022\037.hbase.pb.ListNa" +
       "mespacesRequest\032 .hbase.pb.ListNamespace" +
-      "sResponse\022b\n\025SwitchSnapshotCleanup\022#.hba",
+      "sResponse\022b\n\025SwitchSnapshotCleanup\022#.hba" +
       "se.pb.SetSnapshotCleanupRequest\032$.hbase." +
       "pb.SetSnapshotCleanupResponse\022q\n\030IsSnaps" +
-      "hotCleanupEnabled\022).hbase.pb.IsSnapshotC" +
+      "hotCleanupEnabled\022).hbase.pb.IsSnapshotC",
       "leanupEnabledRequest\032*.hbase.pb.IsSnapsh" +
-      "otCleanupEnabledResponseBB\n*org.apache.h" +
-      "adoop.hbase.protobuf.generatedB\014MasterPr" +
-      "otosH\001\210\001\001\240\001\001"
+      "otCleanupEnabledResponse2\247\002\n\021ClientMetaS" +
+      "ervice\022M\n\014GetClusterId\022\035.hbase.pb.GetClu" +
+      "sterIdRequest\032\036.hbase.pb.GetClusterIdRes" +
+      "ponse\022V\n\017GetActiveMaster\022 .hbase.pb.GetA" +
+      "ctiveMasterRequest\032!.hbase.pb.GetActiveM" +
+      "asterResponse\022k\n\026GetMetaRegionLocations\022" +
+      "\'.hbase.pb.GetMetaRegionLocationsRequest" +
+      "\032(.hbase.pb.GetMetaRegionLocationsRespon" +
+      "seBB\n*org.apache.hadoop.hbase.protobuf.g",
+      "eneratedB\014MasterProtosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -71707,6 +75205,42 @@ public final class MasterProtos {
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_descriptor,
               new java.lang.String[] { "Enabled", });
+          internal_static_hbase_pb_GetClusterIdRequest_descriptor =
+            getDescriptor().getMessageTypes().get(123);
+          internal_static_hbase_pb_GetClusterIdRequest_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_GetClusterIdRequest_descriptor,
+              new java.lang.String[] { });
+          internal_static_hbase_pb_GetClusterIdResponse_descriptor =
+            getDescriptor().getMessageTypes().get(124);
+          internal_static_hbase_pb_GetClusterIdResponse_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_GetClusterIdResponse_descriptor,
+              new java.lang.String[] { "ClusterId", });
+          internal_static_hbase_pb_GetActiveMasterRequest_descriptor =
+            getDescriptor().getMessageTypes().get(125);
+          internal_static_hbase_pb_GetActiveMasterRequest_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_GetActiveMasterRequest_descriptor,
+              new java.lang.String[] { });
+          internal_static_hbase_pb_GetActiveMasterResponse_descriptor =
+            getDescriptor().getMessageTypes().get(126);
+          internal_static_hbase_pb_GetActiveMasterResponse_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_GetActiveMasterResponse_descriptor,
+              new java.lang.String[] { "ServerName", });
+          internal_static_hbase_pb_GetMetaRegionLocationsRequest_descriptor =
+            getDescriptor().getMessageTypes().get(127);
+          internal_static_hbase_pb_GetMetaRegionLocationsRequest_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_GetMetaRegionLocationsRequest_descriptor,
+              new java.lang.String[] { });
+          internal_static_hbase_pb_GetMetaRegionLocationsResponse_descriptor =
+            getDescriptor().getMessageTypes().get(128);
+          internal_static_hbase_pb_GetMetaRegionLocationsResponse_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_GetMetaRegionLocationsResponse_descriptor,
+              new java.lang.String[] { "MetaLocations", });
           return null;
         }
       };
diff --git a/hbase-protocol/src/main/protobuf/HBase.proto b/hbase-protocol/src/main/protobuf/HBase.proto
index 44b722d..a594ccd 100644
--- a/hbase-protocol/src/main/protobuf/HBase.proto
+++ b/hbase-protocol/src/main/protobuf/HBase.proto
@@ -234,3 +234,9 @@ message SnapshotDescription {
   optional UsersAndPermissions users_and_permissions = 7;
   optional int64 ttl = 8 [default = 0];
 }
+
+message RegionLocation {
+  required RegionInfo region_info = 1;
+  optional ServerName server_name = 2;
+  required int64 seq_num = 3;
+}
diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto
index f43c4e0..27b5d75 100644
--- a/hbase-protocol/src/main/protobuf/Master.proto
+++ b/hbase-protocol/src/main/protobuf/Master.proto
@@ -903,3 +903,47 @@ service MasterService {
 
 
 }
+
+/** Request and response to get the clusterID for this cluster */
+message GetClusterIdRequest {
+}
+message GetClusterIdResponse {
+  /** Not set if cluster ID could not be determined. */
+  optional string cluster_id = 1;
+}
+
+/** Request and response to get the currently active master name for this cluster */
+message GetActiveMasterRequest {
+}
+message GetActiveMasterResponse {
+  /** Not set if an active master could not be determined. */
+  optional ServerName server_name = 1;
+}
+
+/** Request and response to get the current list of meta region locations */
+message GetMetaRegionLocationsRequest {
+}
+message GetMetaRegionLocationsResponse {
+  /** Not set if meta region locations could not be determined. */
+  repeated RegionLocation meta_locations = 1;
+}
+
+/**
+ * Implements all the RPCs needed by clients to look up cluster meta information needed for connection establishment.
+ */
+service ClientMetaService {
+  /**
+   * Get Cluster ID for this cluster.
+   */
+  rpc GetClusterId(GetClusterIdRequest) returns(GetClusterIdResponse);
+
+  /**
+   * Get active master server name for this cluster.
+   */
+  rpc GetActiveMaster(GetActiveMasterRequest) returns(GetActiveMasterResponse);
+
+  /**
+   * Get current meta replicas' region locations.
+   */
+  rpc GetMetaRegionLocations(GetMetaRegionLocationsRequest) returns(GetMetaRegionLocationsResponse);
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 82b456e..07ec9fc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -28,6 +28,7 @@ import java.util.Set;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.CoordinatedStateException;
+import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
@@ -71,6 +72,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClientMetaService;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
@@ -93,10 +95,16 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableReques
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest;
@@ -221,7 +229,8 @@ import com.google.protobuf.ServiceException;
 @InterfaceAudience.Private
 @SuppressWarnings("deprecation")
 public class MasterRpcServices extends RSRpcServices
-    implements MasterService.BlockingInterface, RegionServerStatusService.BlockingInterface {
+    implements MasterService.BlockingInterface, RegionServerStatusService.BlockingInterface,
+    ClientMetaService.BlockingInterface {
   private static final Log LOG = LogFactory.getLog(MasterRpcServices.class.getName());
 
   private final HMaster master;
@@ -330,6 +339,9 @@ public class MasterRpcServices extends RSRpcServices
     bssi.add(new BlockingServiceAndInterface(
       RegionServerStatusService.newReflectiveBlockingService(this),
       RegionServerStatusService.BlockingInterface.class));
+    bssi.add(new BlockingServiceAndInterface(
+        ClientMetaService.newReflectiveBlockingService(this),
+        ClientMetaService.BlockingInterface.class));
     bssi.addAll(super.getServices());
     return bssi;
   }
@@ -1757,4 +1769,39 @@ public class MasterRpcServices extends RSRpcServices
     }
     return null;
   }
+
+  @Override
+  public GetClusterIdResponse getClusterId(RpcController rpcController, GetClusterIdRequest request)
+      throws ServiceException {
+    GetClusterIdResponse.Builder resp = GetClusterIdResponse.newBuilder();
+    String clusterId = master.getClusterId();
+    if (clusterId != null) {
+      resp.setClusterId(clusterId);
+    }
+    return resp.build();
+  }
+
+  @Override
+  public GetActiveMasterResponse getActiveMaster(RpcController rpcController,
+      GetActiveMasterRequest request) throws ServiceException {
+    GetActiveMasterResponse.Builder resp = GetActiveMasterResponse.newBuilder();
+    ServerName serverName = master.getActiveMaster();
+    if (serverName != null) {
+      resp.setServerName(ProtobufUtil.toServerName(serverName));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public GetMetaRegionLocationsResponse getMetaRegionLocations(RpcController rpcController,
+     GetMetaRegionLocationsRequest request) throws ServiceException {
+    GetMetaRegionLocationsResponse.Builder response = GetMetaRegionLocationsResponse.newBuilder();
+    List<HRegionLocation> metaLocations =
+        master.getMetaRegionLocationCache().getMetaRegionLocations();
+    for (HRegionLocation location: metaLocations) {
+      response.addMetaLocations(ProtobufUtil.toRegionLocation(location));
+    }
+    return response.build();
+  }
+
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java
index b0fd8fe..821cb18 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.master;
 
 import static org.apache.hadoop.hbase.zookeeper.ZKUtil.joinZNode;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.ConcurrentNavigableMap;
@@ -35,7 +36,6 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 /**
  * A cache of meta region location metadata. Registers a listener on ZK to track changes to the
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java
index c254e56..e9fa26d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java
@@ -18,16 +18,15 @@
 package org.apache.hadoop.hbase.client;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import com.google.common.base.Preconditions;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClientMetaServiceRPCs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClientMetaServiceRPCs.java
new file mode 100644
index 0000000..458f891
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClientMetaServiceRPCs.java
@@ -0,0 +1,158 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+import static org.apache.hadoop.hbase.HConstants.HBASE_RPC_TIMEOUT_KEY;
+import static org.junit.Assert.assertEquals;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.ipc.HBaseRpcController;
+import org.apache.hadoop.hbase.ipc.RpcClient;
+import org.apache.hadoop.hbase.ipc.RpcClientFactory;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.JVMClusterUtil;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClientMetaService;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse;
+
+@Category({MediumTests.class, MasterTests.class})
+public class TestClientMetaServiceRPCs {
+
+  // Total number of masters (active + stand by) for the purpose of this test.
+  private static final int MASTER_COUNT = 3;
+  private static final int RS_COUNT = 3;
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private static Configuration conf;
+  private static int rpcTimeout;
+  private static RpcClient rpcClient;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    // Start the mini cluster with stand-by masters.
+    TEST_UTIL.startMiniCluster(MASTER_COUNT, RS_COUNT);
+    conf = TEST_UTIL.getConfiguration();
+    rpcTimeout = (int) Math.min(Integer.MAX_VALUE, TimeUnit.MILLISECONDS.toNanos(
+        conf.getLong(HBASE_RPC_TIMEOUT_KEY, DEFAULT_HBASE_RPC_TIMEOUT)));
+    rpcClient = RpcClientFactory.createClient(conf,
+        TEST_UTIL.getMiniHBaseCluster().getMaster().getClusterId());
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    if (rpcClient != null) {
+      rpcClient.close();
+    }
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  private static ClientMetaService.BlockingInterface getMasterStub(ServerName server)
+      throws IOException {
+    return ClientMetaService.newBlockingStub(
+        rpcClient.createBlockingRpcChannel(server, User.getCurrent(), rpcTimeout));
+  }
+
+  private static HBaseRpcController getRpcController() {
+    return RpcControllerFactory.instantiate(conf).newController();
+  }
+
+  /**
+   * Verifies the cluster ID from all running masters.
+   */
+  @Test public void TestClusterID() throws Exception {
+    HBaseRpcController rpcController = getRpcController();
+    String clusterID = TEST_UTIL.getMiniHBaseCluster().getMaster().getClusterId();
+    int rpcCount = 0;
+    for (JVMClusterUtil.MasterThread masterThread:
+        TEST_UTIL.getMiniHBaseCluster().getMasterThreads()) {
+      ClientMetaService.BlockingInterface stub =
+          getMasterStub(masterThread.getMaster().getServerName());
+      GetClusterIdResponse resp =
+          stub.getClusterId(rpcController, GetClusterIdRequest.getDefaultInstance());
+      assertEquals(clusterID, resp.getClusterId());
+      rpcCount++;
+    }
+    assertEquals(MASTER_COUNT, rpcCount);
+  }
+
+  /**
+   * Verifies the active master ServerName as seen by all masters.
+   */
+  @Test public void TestActiveMaster() throws Exception {
+    HBaseRpcController rpcController = getRpcController();
+    ServerName activeMaster = TEST_UTIL.getMiniHBaseCluster().getMaster().getServerName();
+    int rpcCount = 0;
+    for (JVMClusterUtil.MasterThread masterThread:
+        TEST_UTIL.getMiniHBaseCluster().getMasterThreads()) {
+      ClientMetaService.BlockingInterface stub =
+          getMasterStub(masterThread.getMaster().getServerName());
+      GetActiveMasterResponse resp =
+          stub.getActiveMaster(rpcController, GetActiveMasterRequest.getDefaultInstance());
+      assertEquals(activeMaster, ProtobufUtil.toServerName(resp.getServerName()));
+      rpcCount++;
+    }
+    assertEquals(MASTER_COUNT, rpcCount);
+  }
+
+  /**
+   * Verifies that the meta region locations RPC returns consistent results across all masters.
+   */
+  @Test public void TestMetaLocations() throws Exception {
+    HBaseRpcController rpcController = getRpcController();
+    List<HRegionLocation> metaLocations = TEST_UTIL.getMiniHBaseCluster().getMaster()
+        .getMetaRegionLocationCache().getMetaRegionLocations();
+    Collections.sort(metaLocations);
+    int rpcCount = 0;
+    for (JVMClusterUtil.MasterThread masterThread:
+      TEST_UTIL.getMiniHBaseCluster().getMasterThreads()) {
+      ClientMetaService.BlockingInterface stub =
+          getMasterStub(masterThread.getMaster().getServerName());
+      GetMetaRegionLocationsResponse resp = stub.getMetaRegionLocations(
+          rpcController, GetMetaRegionLocationsRequest.getDefaultInstance());
+      List<HRegionLocation> result = new ArrayList<>();
+      for (HBaseProtos.RegionLocation location: resp.getMetaLocationsList()) {
+        result.add(ProtobufUtil.toRegionLocation(location));
+      }
+      Collections.sort(result);
+      assertEquals(metaLocations, result);
+      rpcCount++;
+    }
+    assertEquals(MASTER_COUNT, rpcCount);
+  }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java
index 89b74b3..ab6b275 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java
@@ -43,13 +43,12 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Col
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.DeleteType;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer;
-
 
 /**
  * Class to test ProtobufUtil.


[hbase] 08/09: HBASE-24765: Dynamic master discovery (#2314)

Posted by bh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

bharathv pushed a commit to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 3e1450d8b305856335010b305f565e0b0c823a74
Author: Bharath Vissapragada <bh...@apache.org>
AuthorDate: Wed Aug 26 09:13:34 2020 -0700

    HBASE-24765: Dynamic master discovery (#2314)
    
    This patch adds the ability to discover newly added masters
    dynamically on the master registry side. The trigger for the
    re-fetch is either periodic (5 mins) or any registry RPC failure.
    Master server information is cached in masters to avoid repeated
    ZK lookups.
    
    Updates the client side connection metrics to maintain a counter
    per RPC type so that clients have visibility into counts grouped
    by RPC method name.
    
    I didn't add the method to ZK registry interface since there
    is a design discussion going on in splittable meta doc. We can
    add it later if needed.
    
    Signed-off-by: Nick Dimiduk <nd...@apache.org>
    Signed-off-by: Viraj Jasani <vj...@apache.org>
    Signed-off-by: Duo Zhang <zh...@apache.org>
    Signed-off-by: Andrew Purtell <ap...@apache.org>
    (cherry picked from commit 275a38e1533eafa1d4bd1d50c13bcecd9a397ea8)
    (cherry picked from commit bb9121da77c7b881a3cc4c389029a610fc2b0925)
---
 .../hbase/client/MasterAddressRefresher.java       |  125 ++
 .../apache/hadoop/hbase/client/MasterRegistry.java |   67 +-
 .../hadoop/hbase/client/MetricsConnection.java     |   11 +-
 .../hbase/zookeeper/MasterAddressTracker.java      |   55 +
 .../hadoop/hbase/client/TestMetricsConnection.java |    6 +
 .../hbase/protobuf/generated/MasterProtos.java     | 1788 ++++++++++++++------
 hbase-protocol/src/main/protobuf/Master.proto      |   18 +-
 .../hadoop/hbase/master/ActiveMasterManager.java   |   42 +-
 .../org/apache/hadoop/hbase/master/HMaster.java    |   66 +-
 .../hadoop/hbase/master/MasterRpcServices.java     |   20 +-
 .../hbase/client/TestMasterAddressRefresher.java   |  176 ++
 .../hadoop/hbase/client/TestMasterRegistry.java    |  103 ++
 .../hbase/master/TestActiveMasterManager.java      |  284 ++--
 .../hbase/master/TestClientMetaServiceRPCs.java    |   21 -
 .../hadoop/hbase/master/TestMasterFailover.java    |   11 +-
 .../regionserver/TestMasterAddressTracker.java     |   37 +-
 16 files changed, 2087 insertions(+), 743 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterAddressRefresher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterAddressRefresher.java
new file mode 100644
index 0000000..08b5e9b
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterAddressRefresher.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClientMetaService;
+
+/**
+ * Thread safe utility that keeps master end points used by {@link MasterRegistry} up to date. This
+ * uses the RPC {@link ClientMetaService#getMasters} to fetch the latest list of registered masters.
+ * By default the refresh happens periodically (configured via
+ * {@link #PERIODIC_REFRESH_INTERVAL_SECS}). The refresh can also be triggered on demand via
+ * {@link #refreshNow()}. To prevent a flood of on-demand refreshes we expect that any attempts two
+ * should be spaced at least {@link #MIN_SECS_BETWEEN_REFRESHES} seconds apart.
+ */
+@InterfaceAudience.Private
+public class MasterAddressRefresher implements Closeable {
+  private static final Logger LOG = LoggerFactory.getLogger(MasterAddressRefresher.class);
+  public static final String PERIODIC_REFRESH_INTERVAL_SECS =
+      "hbase.client.master_registry.refresh_interval_secs";
+  private static final int PERIODIC_REFRESH_INTERVAL_SECS_DEFAULT = 300;
+  public static final String MIN_SECS_BETWEEN_REFRESHES =
+      "hbase.client.master_registry.min_secs_between_refreshes";
+  private static final int MIN_SECS_BETWEEN_REFRESHES_DEFAULT = 60;
+
+  private final ExecutorService pool;
+  private final MasterRegistry registry;
+  private final long periodicRefreshMs;
+  private final long timeBetweenRefreshesMs;
+  private final Object refreshMasters = new Object();
+
+  @Override
+  public void close() {
+    pool.shutdownNow();
+  }
+
+  /**
+   * Thread that refreshes the master end points until it is interrupted via {@link #close()}.
+   * Multiple callers attempting to refresh at the same time synchronize on {@link #refreshMasters}.
+   */
+  private class RefreshThread implements Runnable {
+    @Override
+    public void run() {
+      long lastRpcTs = 0;
+      while (!Thread.interrupted()) {
+        try {
+          // Spurious wake ups are okay, worst case we make an extra RPC call to refresh. We won't
+          // have duplicate refreshes because once the thread is past the wait(), notify()s are
+          // ignored until the thread is back to the waiting state.
+          synchronized (refreshMasters) {
+            refreshMasters.wait(periodicRefreshMs);
+          }
+          long currentTs = EnvironmentEdgeManager.currentTime();
+          if (lastRpcTs != 0 && currentTs - lastRpcTs <= timeBetweenRefreshesMs) {
+            continue;
+          }
+          lastRpcTs = currentTs;
+          LOG.debug("Attempting to refresh master address end points.");
+          Set<ServerName> newMasters = new HashSet<>(registry.getMasters());
+          registry.populateMasterStubs(newMasters);
+          LOG.debug("Finished refreshing master end points. {}", newMasters);
+        } catch (InterruptedException e) {
+          LOG.debug("Interrupted during wait, aborting refresh-masters-thread.", e);
+          break;
+        } catch (IOException e) {
+          LOG.debug("Error populating latest list of masters.", e);
+        }
+      }
+      LOG.info("Master end point refresher loop exited.");
+    }
+  }
+
+  MasterAddressRefresher(Configuration conf, MasterRegistry registry) {
+    pool = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder()
+        .setNameFormat("master-registry-refresh-end-points").setDaemon(true).build());
+    periodicRefreshMs = TimeUnit.SECONDS.toMillis(conf.getLong(PERIODIC_REFRESH_INTERVAL_SECS,
+        PERIODIC_REFRESH_INTERVAL_SECS_DEFAULT));
+    timeBetweenRefreshesMs = TimeUnit.SECONDS.toMillis(conf.getLong(MIN_SECS_BETWEEN_REFRESHES,
+        MIN_SECS_BETWEEN_REFRESHES_DEFAULT));
+    Preconditions.checkArgument(periodicRefreshMs > 0);
+    Preconditions.checkArgument(timeBetweenRefreshesMs < periodicRefreshMs);
+    this.registry = registry;
+    pool.submit(new RefreshThread());
+  }
+
+  /**
+   * Notifies the refresher thread to refresh the configuration. This does not guarantee a refresh.
+   * See class comment for details.
+   */
+  void refreshNow() {
+    synchronized (refreshMasters) {
+      refreshMasters.notify();
+    }
+  }
+}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java
index 882173f..877049c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java
@@ -22,6 +22,7 @@ import static org.apache.hadoop.hbase.util.DNS.getMasterHostname;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
 import com.google.common.net.HostAndPort;
 import com.google.protobuf.Message;
 import com.google.protobuf.RpcController;
@@ -40,6 +41,7 @@ import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
 import org.apache.hadoop.hbase.exceptions.MasterRegistryFetchException;
 import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
@@ -51,10 +53,11 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.security.User;
 
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClientMetaService;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest;
@@ -69,13 +72,15 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRespo
 public class MasterRegistry implements ConnectionRegistry {
   private static final String MASTER_ADDRS_CONF_SEPARATOR = ",";
 
-  private ImmutableMap<String, ClientMetaService.Interface> masterAddr2Stub;
+  private volatile ImmutableMap<String, ClientMetaService.Interface> masterAddr2Stub;
 
   // RPC client used to talk to the masters.
   private RpcClient rpcClient;
   private RpcControllerFactory rpcControllerFactory;
   private int rpcTimeoutMs;
 
+  protected MasterAddressRefresher masterAddressRefresher;
+
   @Override
   public void init(Connection connection) throws IOException {
     Configuration conf = connection.getConfiguration();
@@ -87,13 +92,15 @@ public class MasterRegistry implements ConnectionRegistry {
     rpcClient = RpcClientFactory.createClient(conf, null);
     rpcControllerFactory = RpcControllerFactory.instantiate(conf);
     populateMasterStubs(parseMasterAddrs(conf));
+    masterAddressRefresher = new MasterAddressRefresher(conf, this);
   }
 
-  private interface Callable <T extends Message> {
+  protected interface Callable <T extends Message> {
     T call(ClientMetaService.Interface stub, RpcController controller) throws IOException;
   }
 
-  private <T extends Message> T doCall(Callable<T> callable) throws MasterRegistryFetchException {
+   protected <T extends Message> T doCall(Callable<T> callable)
+       throws MasterRegistryFetchException {
     Exception lastException = null;
     Set<String> masters = masterAddr2Stub.keySet();
     List<ClientMetaService.Interface> stubs = new ArrayList<>(masterAddr2Stub.values());
@@ -102,14 +109,16 @@ public class MasterRegistry implements ConnectionRegistry {
       HBaseRpcController controller = rpcControllerFactory.newController();
       try {
         T resp = callable.call(stub, controller);
-        if (controller.failed()) {
-          lastException = controller.getFailed();
-          continue;
+        if (!controller.failed()) {
+          return resp;
         }
-        return resp;
+        lastException = controller.getFailed();
       } catch (Exception e) {
         lastException = e;
       }
+      if (ClientExceptionsUtil.isConnectionException(lastException)) {
+        masterAddressRefresher.refreshNow();
+      }
     }
     // rpcs to all masters failed.
     throw new MasterRegistryFetchException(masters, lastException);
@@ -117,19 +126,37 @@ public class MasterRegistry implements ConnectionRegistry {
 
   @Override
   public ServerName getActiveMaster() throws IOException {
-    GetActiveMasterResponse resp = doCall(new Callable<GetActiveMasterResponse>() {
+    GetMastersResponseEntry activeMaster = null;
+    for (GetMastersResponseEntry entry: getMastersInternal().getMasterServersList()) {
+      if (entry.getIsActive()) {
+        activeMaster = entry;
+        break;
+      }
+    }
+    if (activeMaster == null) {
+      throw new HBaseIOException("No active master found");
+    }
+    return ProtobufUtil.toServerName(activeMaster.getServerName());
+  }
+
+  List<ServerName> getMasters() throws IOException {
+    List<ServerName> result = new ArrayList<>();
+    for (GetMastersResponseEntry entry: getMastersInternal().getMasterServersList()) {
+      result.add(ProtobufUtil.toServerName(entry.getServerName()));
+    }
+    return result;
+  }
+
+  private GetMastersResponse getMastersInternal() throws IOException {
+    return doCall(new Callable<GetMastersResponse>() {
       @Override
-      public GetActiveMasterResponse call(
+      public GetMastersResponse call(
           ClientMetaService.Interface stub, RpcController controller) throws IOException {
-        BlockingRpcCallback<GetActiveMasterResponse> cb = new BlockingRpcCallback<>();
-        stub.getActiveMaster(controller, GetActiveMasterRequest.getDefaultInstance(), cb);
+        BlockingRpcCallback<GetMastersResponse> cb = new BlockingRpcCallback<>();
+        stub.getMasters(controller, GetMastersRequest.getDefaultInstance(), cb);
         return cb.get();
       }
     });
-    if (!resp.hasServerName() || resp.getServerName() == null) {
-      throw new HBaseIOException("No active master found");
-    }
-    return ProtobufUtil.toServerName(resp.getServerName());
   }
 
   @Override
@@ -230,4 +257,10 @@ public class MasterRegistry implements ConnectionRegistry {
     }
     masterAddr2Stub = builder.build();
   }
+
+  @InterfaceAudience.Private
+  ImmutableSet<String> getParsedMasterServers() {
+    return masterAddr2Stub.keySet();
+  }
+
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java
index 6328d7f..0bce8eb 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java
@@ -56,6 +56,7 @@ public class MetricsConnection implements StatisticTrackable {
   /** Set this key to {@code true} to enable metrics collection of client requests. */
   public static final String CLIENT_SIDE_METRICS_ENABLED_KEY = "hbase.client.metrics.enable";
 
+  private static final String CNT_BASE = "rpcCount_";
   private static final String DRTN_BASE = "rpcCallDurationMs_";
   private static final String REQ_BASE = "rpcCallRequestSizeBytes_";
   private static final String RESP_BASE = "rpcCallResponseSizeBytes_";
@@ -303,6 +304,8 @@ public class MetricsConnection implements StatisticTrackable {
           LOAD_FACTOR, CONCURRENCY_LEVEL);
   private final ConcurrentMap<String, Counter> cacheDroppingExceptions =
     new ConcurrentHashMap<>(CAPACITY, LOAD_FACTOR, CONCURRENCY_LEVEL);
+  @VisibleForTesting protected final ConcurrentMap<String, Counter>  rpcCounters =
+      new ConcurrentHashMap<>(CAPACITY, LOAD_FACTOR, CONCURRENCY_LEVEL);
 
   public MetricsConnection(final ConnectionManager.HConnectionImplementation conn) {
     this.scope = conn.toString();
@@ -450,8 +453,7 @@ public class MetricsConnection implements StatisticTrackable {
   }
 
   /** Update call stats for non-critical-path methods */
-  private void updateRpcGeneric(MethodDescriptor method, CallStats stats) {
-    final String methodName = method.getService().getName() + "_" + method.getName();
+  private void updateRpcGeneric(String methodName, CallStats stats) {
     getMetric(DRTN_BASE + methodName, rpcTimers, timerFactory)
         .update(stats.getCallTimeMs(), TimeUnit.MILLISECONDS);
     getMetric(REQ_BASE + methodName, rpcHistograms, histogramFactory)
@@ -466,6 +468,9 @@ public class MetricsConnection implements StatisticTrackable {
     if (callsPerServer > 0) {
       concurrentCallsPerServerHist.update(callsPerServer);
     }
+    // Update the counter that tracks RPCs by type.
+    final String methodName = method.getService().getName() + "_" + method.getName();
+    getMetric(CNT_BASE + methodName, rpcCounters, counterFactory).inc();
     // this implementation is tied directly to protobuf implementation details. would be better
     // if we could dispatch based on something static, ie, request Message type.
     if (method.getService() == ClientService.getDescriptor()) {
@@ -518,7 +523,7 @@ public class MetricsConnection implements StatisticTrackable {
       }
     }
     // Fallback to dynamic registry lookup for DDL methods.
-    updateRpcGeneric(method, stats);
+    updateRpcGeneric(methodName, stats);
   }
 
   public void incrCacheDroppingExceptions(Object exception) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java
index 311202c..c34d294 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java
@@ -17,6 +17,10 @@
  */
 package org.apache.hadoop.hbase.zookeeper;
 
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HConstants;
@@ -68,6 +72,57 @@ public class MasterAddressTracker extends ZooKeeperNodeTracker {
   }
 
   /**
+   * @param watcher ZooKeeperWatcher instance to use for querying ZK.
+   * @return current list of backup masters.
+   */
+  public static List<ServerName> getBackupMastersAndRenewWatch(
+      ZooKeeperWatcher watcher) {
+    // Build Set of backup masters from ZK nodes
+    List<String> backupMasterStrings;
+    try {
+      backupMasterStrings = ZKUtil.listChildrenAndWatchForNewChildren(
+          watcher, watcher.backupMasterAddressesZNode);
+    } catch (KeeperException e) {
+      LOG.warn(watcher.prefix("Unable to list backup servers"), e);
+      backupMasterStrings = null;
+    }
+
+    List<ServerName> backupMasters = new ArrayList<>();
+    if (backupMasterStrings != null && !backupMasterStrings.isEmpty()) {
+      for (String s: backupMasterStrings) {
+        try {
+          byte [] bytes;
+          try {
+            bytes = ZKUtil.getData(watcher, ZKUtil.joinZNode(
+               watcher.backupMasterAddressesZNode, s));
+          } catch (InterruptedException e) {
+            throw new InterruptedIOException("Thread interrupted.");
+          }
+          if (bytes != null) {
+            ServerName sn;
+            try {
+              sn = ServerName.parseFrom(bytes);
+            } catch (DeserializationException e) {
+              LOG.warn("Failed parse, skipping registering backup server", e);
+              continue;
+            }
+            backupMasters.add(sn);
+          }
+        } catch (KeeperException | InterruptedIOException e) {
+          LOG.warn(watcher.prefix("Unable to get information about " +
+              "backup servers"), e);
+        }
+      }
+      Collections.sort(backupMasters, new Comparator<ServerName>() {
+        @Override
+        public int compare(ServerName s1, ServerName s2) {
+          return s1.getServerName().compareTo(s2.getServerName());
+        }});
+    }
+    return backupMasters;
+  }
+
+  /**
    * Get the address of the current master if one is available.  Returns null
    * if no current master.
    * @return Server name or null if timed out.
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java
index f1be81b..9638c72 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.client;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 import com.google.protobuf.ByteString;
 import com.yammer.metrics.util.RatioGauge;
@@ -129,6 +130,11 @@ public class TestMetricsConnection {
               .build(),
           MetricsConnection.newCallStats());
     }
+    for (String method: new String[]{"Get", "Scan", "Mutate"}) {
+      final String metricKey = "rpcCount_" + ClientService.getDescriptor().getName() + "_" + method;
+      final long metricVal = METRICS.rpcCounters.get(metricKey).count();
+      assertTrue("metric: " + metricKey + " val: " + metricVal, metricVal >= loop);
+    }
     for (MetricsConnection.CallTracker t : new MetricsConnection.CallTracker[] {
       METRICS.getTracker, METRICS.scanTracker, METRICS.multiTracker, METRICS.appendTracker,
       METRICS.deleteTracker, METRICS.incrementTracker, METRICS.putTracker
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index 909c144..2eaed11 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -66722,32 +66722,32 @@ public final class MasterProtos {
     // @@protoc_insertion_point(class_scope:hbase.pb.GetClusterIdResponse)
   }
 
-  public interface GetActiveMasterRequestOrBuilder
+  public interface GetMastersRequestOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
   }
   /**
-   * Protobuf type {@code hbase.pb.GetActiveMasterRequest}
+   * Protobuf type {@code hbase.pb.GetMastersRequest}
    *
    * <pre>
-   ** Request and response to get the currently active master name for this cluster 
+   ** Request and response to get the current list of all registers master servers 
    * </pre>
    */
-  public static final class GetActiveMasterRequest extends
+  public static final class GetMastersRequest extends
       com.google.protobuf.GeneratedMessage
-      implements GetActiveMasterRequestOrBuilder {
-    // Use GetActiveMasterRequest.newBuilder() to construct.
-    private GetActiveMasterRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      implements GetMastersRequestOrBuilder {
+    // Use GetMastersRequest.newBuilder() to construct.
+    private GetMastersRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
       super(builder);
       this.unknownFields = builder.getUnknownFields();
     }
-    private GetActiveMasterRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+    private GetMastersRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
 
-    private static final GetActiveMasterRequest defaultInstance;
-    public static GetActiveMasterRequest getDefaultInstance() {
+    private static final GetMastersRequest defaultInstance;
+    public static GetMastersRequest getDefaultInstance() {
       return defaultInstance;
     }
 
-    public GetActiveMasterRequest getDefaultInstanceForType() {
+    public GetMastersRequest getDefaultInstanceForType() {
       return defaultInstance;
     }
 
@@ -66757,7 +66757,7 @@ public final class MasterProtos {
         getUnknownFields() {
       return this.unknownFields;
     }
-    private GetActiveMasterRequest(
+    private GetMastersRequest(
         com.google.protobuf.CodedInputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
@@ -66793,28 +66793,28 @@ public final class MasterProtos {
     }
     public static final com.google.protobuf.Descriptors.Descriptor
         getDescriptor() {
-      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetActiveMasterRequest_descriptor;
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersRequest_descriptor;
     }
 
     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
         internalGetFieldAccessorTable() {
-      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetActiveMasterRequest_fieldAccessorTable
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersRequest_fieldAccessorTable
           .ensureFieldAccessorsInitialized(
-              org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest.Builder.class);
+              org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.Builder.class);
     }
 
-    public static com.google.protobuf.Parser<GetActiveMasterRequest> PARSER =
-        new com.google.protobuf.AbstractParser<GetActiveMasterRequest>() {
-      public GetActiveMasterRequest parsePartialFrom(
+    public static com.google.protobuf.Parser<GetMastersRequest> PARSER =
+        new com.google.protobuf.AbstractParser<GetMastersRequest>() {
+      public GetMastersRequest parsePartialFrom(
           com.google.protobuf.CodedInputStream input,
           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
           throws com.google.protobuf.InvalidProtocolBufferException {
-        return new GetActiveMasterRequest(input, extensionRegistry);
+        return new GetMastersRequest(input, extensionRegistry);
       }
     };
 
     @java.lang.Override
-    public com.google.protobuf.Parser<GetActiveMasterRequest> getParserForType() {
+    public com.google.protobuf.Parser<GetMastersRequest> getParserForType() {
       return PARSER;
     }
 
@@ -66858,10 +66858,10 @@ public final class MasterProtos {
       if (obj == this) {
        return true;
       }
-      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest)) {
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest)) {
         return super.equals(obj);
       }
-      org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest) obj;
+      org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest) obj;
 
       boolean result = true;
       result = result &&
@@ -66882,53 +66882,53 @@ public final class MasterProtos {
       return hash;
     }
 
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest parseFrom(
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom(
         com.google.protobuf.ByteString data)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest parseFrom(
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom(
         com.google.protobuf.ByteString data,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data, extensionRegistry);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest parseFrom(byte[] data)
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom(byte[] data)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest parseFrom(
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom(
         byte[] data,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data, extensionRegistry);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest parseFrom(java.io.InputStream input)
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom(java.io.InputStream input)
         throws java.io.IOException {
       return PARSER.parseFrom(input);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest parseFrom(
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom(
         java.io.InputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
       return PARSER.parseFrom(input, extensionRegistry);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest parseDelimitedFrom(java.io.InputStream input)
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseDelimitedFrom(java.io.InputStream input)
         throws java.io.IOException {
       return PARSER.parseDelimitedFrom(input);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest parseDelimitedFrom(
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseDelimitedFrom(
         java.io.InputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
       return PARSER.parseDelimitedFrom(input, extensionRegistry);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest parseFrom(
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom(
         com.google.protobuf.CodedInputStream input)
         throws java.io.IOException {
       return PARSER.parseFrom(input);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest parseFrom(
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parseFrom(
         com.google.protobuf.CodedInputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
@@ -66937,7 +66937,7 @@ public final class MasterProtos {
 
     public static Builder newBuilder() { return Builder.create(); }
     public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest prototype) {
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest prototype) {
       return newBuilder().mergeFrom(prototype);
     }
     public Builder toBuilder() { return newBuilder(this); }
@@ -66949,28 +66949,28 @@ public final class MasterProtos {
       return builder;
     }
     /**
-     * Protobuf type {@code hbase.pb.GetActiveMasterRequest}
+     * Protobuf type {@code hbase.pb.GetMastersRequest}
      *
      * <pre>
-     ** Request and response to get the currently active master name for this cluster 
+     ** Request and response to get the current list of all registers master servers 
      * </pre>
      */
     public static final class Builder extends
         com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequestOrBuilder {
+       implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequestOrBuilder {
       public static final com.google.protobuf.Descriptors.Descriptor
           getDescriptor() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetActiveMasterRequest_descriptor;
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersRequest_descriptor;
       }
 
       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
           internalGetFieldAccessorTable() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetActiveMasterRequest_fieldAccessorTable
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersRequest_fieldAccessorTable
             .ensureFieldAccessorsInitialized(
-                org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest.Builder.class);
+                org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.Builder.class);
       }
 
-      // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest.newBuilder()
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.newBuilder()
       private Builder() {
         maybeForceBuilderInitialization();
       }
@@ -66999,38 +66999,38 @@ public final class MasterProtos {
 
       public com.google.protobuf.Descriptors.Descriptor
           getDescriptorForType() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetActiveMasterRequest_descriptor;
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersRequest_descriptor;
       }
 
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest getDefaultInstanceForType() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest.getDefaultInstance();
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.getDefaultInstance();
       }
 
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest build() {
-        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest result = buildPartial();
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest build() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest result = buildPartial();
         if (!result.isInitialized()) {
           throw newUninitializedMessageException(result);
         }
         return result;
       }
 
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest buildPartial() {
-        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest(this);
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest(this);
         onBuilt();
         return result;
       }
 
       public Builder mergeFrom(com.google.protobuf.Message other) {
-        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest) {
-          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest)other);
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest)other);
         } else {
           super.mergeFrom(other);
           return this;
         }
       }
 
-      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest other) {
-        if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest.getDefaultInstance()) return this;
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.getDefaultInstance()) return this;
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -67043,11 +67043,11 @@ public final class MasterProtos {
           com.google.protobuf.CodedInputStream input,
           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
           throws java.io.IOException {
-        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest parsedMessage = null;
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest parsedMessage = null;
         try {
           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest) e.getUnfinishedMessage();
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest) e.getUnfinishedMessage();
           throw e;
         } finally {
           if (parsedMessage != null) {
@@ -67057,65 +67057,723 @@ public final class MasterProtos {
         return this;
       }
 
-      // @@protoc_insertion_point(builder_scope:hbase.pb.GetActiveMasterRequest)
+      // @@protoc_insertion_point(builder_scope:hbase.pb.GetMastersRequest)
     }
 
     static {
-      defaultInstance = new GetActiveMasterRequest(true);
+      defaultInstance = new GetMastersRequest(true);
       defaultInstance.initFields();
     }
 
-    // @@protoc_insertion_point(class_scope:hbase.pb.GetActiveMasterRequest)
+    // @@protoc_insertion_point(class_scope:hbase.pb.GetMastersRequest)
   }
 
-  public interface GetActiveMasterResponseOrBuilder
+  public interface GetMastersResponseEntryOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
 
-    // optional .hbase.pb.ServerName server_name = 1;
+    // required .hbase.pb.ServerName server_name = 1;
     /**
-     * <code>optional .hbase.pb.ServerName server_name = 1;</code>
-     *
-     * <pre>
-     ** Not set if an active master could not be determined. 
-     * </pre>
+     * <code>required .hbase.pb.ServerName server_name = 1;</code>
      */
     boolean hasServerName();
     /**
-     * <code>optional .hbase.pb.ServerName server_name = 1;</code>
-     *
-     * <pre>
-     ** Not set if an active master could not be determined. 
-     * </pre>
+     * <code>required .hbase.pb.ServerName server_name = 1;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName();
+    /**
+     * <code>required .hbase.pb.ServerName server_name = 1;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder();
+
+    // required bool is_active = 2;
+    /**
+     * <code>required bool is_active = 2;</code>
+     */
+    boolean hasIsActive();
+    /**
+     * <code>required bool is_active = 2;</code>
+     */
+    boolean getIsActive();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.GetMastersResponseEntry}
+   */
+  public static final class GetMastersResponseEntry extends
+      com.google.protobuf.GeneratedMessage
+      implements GetMastersResponseEntryOrBuilder {
+    // Use GetMastersResponseEntry.newBuilder() to construct.
+    private GetMastersResponseEntry(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private GetMastersResponseEntry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final GetMastersResponseEntry defaultInstance;
+    public static GetMastersResponseEntry getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public GetMastersResponseEntry getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private GetMastersResponseEntry(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000001) == 0x00000001)) {
+                subBuilder = serverName_.toBuilder();
+              }
+              serverName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(serverName_);
+                serverName_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000001;
+              break;
+            }
+            case 16: {
+              bitField0_ |= 0x00000002;
+              isActive_ = input.readBool();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponseEntry_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponseEntry_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<GetMastersResponseEntry> PARSER =
+        new com.google.protobuf.AbstractParser<GetMastersResponseEntry>() {
+      public GetMastersResponseEntry parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new GetMastersResponseEntry(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<GetMastersResponseEntry> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required .hbase.pb.ServerName server_name = 1;
+    public static final int SERVER_NAME_FIELD_NUMBER = 1;
+    private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_;
+    /**
+     * <code>required .hbase.pb.ServerName server_name = 1;</code>
+     */
+    public boolean hasServerName() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required .hbase.pb.ServerName server_name = 1;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() {
+      return serverName_;
+    }
+    /**
+     * <code>required .hbase.pb.ServerName server_name = 1;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() {
+      return serverName_;
+    }
+
+    // required bool is_active = 2;
+    public static final int IS_ACTIVE_FIELD_NUMBER = 2;
+    private boolean isActive_;
+    /**
+     * <code>required bool is_active = 2;</code>
+     */
+    public boolean hasIsActive() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>required bool is_active = 2;</code>
+     */
+    public boolean getIsActive() {
+      return isActive_;
+    }
+
+    private void initFields() {
+      serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+      isActive_ = false;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasServerName()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasIsActive()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!getServerName().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeMessage(1, serverName_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeBool(2, isActive_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, serverName_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBoolSize(2, isActive_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry) obj;
+
+      boolean result = true;
+      result = result && (hasServerName() == other.hasServerName());
+      if (hasServerName()) {
+        result = result && getServerName()
+            .equals(other.getServerName());
+      }
+      result = result && (hasIsActive() == other.hasIsActive());
+      if (hasIsActive()) {
+        result = result && (getIsActive()
+            == other.getIsActive());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasServerName()) {
+        hash = (37 * hash) + SERVER_NAME_FIELD_NUMBER;
+        hash = (53 * hash) + getServerName().hashCode();
+      }
+      if (hasIsActive()) {
+        hash = (37 * hash) + IS_ACTIVE_FIELD_NUMBER;
+        hash = (53 * hash) + hashBoolean(getIsActive());
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.GetMastersResponseEntry}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponseEntry_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponseEntry_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getServerNameFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        if (serverNameBuilder_ == null) {
+          serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+        } else {
+          serverNameBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        isActive_ = false;
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponseEntry_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry build() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        if (serverNameBuilder_ == null) {
+          result.serverName_ = serverName_;
+        } else {
+          result.serverName_ = serverNameBuilder_.build();
+        }
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.isActive_ = isActive_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.getDefaultInstance()) return this;
+        if (other.hasServerName()) {
+          mergeServerName(other.getServerName());
+        }
+        if (other.hasIsActive()) {
+          setIsActive(other.getIsActive());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasServerName()) {
+          
+          return false;
+        }
+        if (!hasIsActive()) {
+          
+          return false;
+        }
+        if (!getServerName().isInitialized()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required .hbase.pb.ServerName server_name = 1;
+      private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_;
+      /**
+       * <code>required .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public boolean hasServerName() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() {
+        if (serverNameBuilder_ == null) {
+          return serverName_;
+        } else {
+          return serverNameBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>required .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public Builder setServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
+        if (serverNameBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          serverName_ = value;
+          onChanged();
+        } else {
+          serverNameBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public Builder setServerName(
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
+        if (serverNameBuilder_ == null) {
+          serverName_ = builderForValue.build();
+          onChanged();
+        } else {
+          serverNameBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public Builder mergeServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
+        if (serverNameBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001) &&
+              serverName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) {
+            serverName_ =
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(serverName_).mergeFrom(value).buildPartial();
+          } else {
+            serverName_ = value;
+          }
+          onChanged();
+        } else {
+          serverNameBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public Builder clearServerName() {
+        if (serverNameBuilder_ == null) {
+          serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+          onChanged();
+        } else {
+          serverNameBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder() {
+        bitField0_ |= 0x00000001;
+        onChanged();
+        return getServerNameFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>required .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() {
+        if (serverNameBuilder_ != null) {
+          return serverNameBuilder_.getMessageOrBuilder();
+        } else {
+          return serverName_;
+        }
+      }
+      /**
+       * <code>required .hbase.pb.ServerName server_name = 1;</code>
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
+          getServerNameFieldBuilder() {
+        if (serverNameBuilder_ == null) {
+          serverNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
+                  serverName_,
+                  getParentForChildren(),
+                  isClean());
+          serverName_ = null;
+        }
+        return serverNameBuilder_;
+      }
+
+      // required bool is_active = 2;
+      private boolean isActive_ ;
+      /**
+       * <code>required bool is_active = 2;</code>
+       */
+      public boolean hasIsActive() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>required bool is_active = 2;</code>
+       */
+      public boolean getIsActive() {
+        return isActive_;
+      }
+      /**
+       * <code>required bool is_active = 2;</code>
+       */
+      public Builder setIsActive(boolean value) {
+        bitField0_ |= 0x00000002;
+        isActive_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required bool is_active = 2;</code>
+       */
+      public Builder clearIsActive() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        isActive_ = false;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.GetMastersResponseEntry)
+    }
+
+    static {
+      defaultInstance = new GetMastersResponseEntry(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.GetMastersResponseEntry)
+  }
+
+  public interface GetMastersResponseOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;
+    /**
+     * <code>repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;</code>
+     */
+    java.util.List<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry> 
+        getMasterServersList();
+    /**
+     * <code>repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry getMasterServers(int index);
+    /**
+     * <code>repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;</code>
+     */
+    int getMasterServersCount();
+    /**
+     * <code>repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;</code>
      */
-    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName();
+    java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder> 
+        getMasterServersOrBuilderList();
     /**
-     * <code>optional .hbase.pb.ServerName server_name = 1;</code>
-     *
-     * <pre>
-     ** Not set if an active master could not be determined. 
-     * </pre>
+     * <code>repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;</code>
      */
-    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder();
+    org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder getMasterServersOrBuilder(
+        int index);
   }
   /**
-   * Protobuf type {@code hbase.pb.GetActiveMasterResponse}
+   * Protobuf type {@code hbase.pb.GetMastersResponse}
    */
-  public static final class GetActiveMasterResponse extends
+  public static final class GetMastersResponse extends
       com.google.protobuf.GeneratedMessage
-      implements GetActiveMasterResponseOrBuilder {
-    // Use GetActiveMasterResponse.newBuilder() to construct.
-    private GetActiveMasterResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      implements GetMastersResponseOrBuilder {
+    // Use GetMastersResponse.newBuilder() to construct.
+    private GetMastersResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
       super(builder);
       this.unknownFields = builder.getUnknownFields();
     }
-    private GetActiveMasterResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+    private GetMastersResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
 
-    private static final GetActiveMasterResponse defaultInstance;
-    public static GetActiveMasterResponse getDefaultInstance() {
+    private static final GetMastersResponse defaultInstance;
+    public static GetMastersResponse getDefaultInstance() {
       return defaultInstance;
     }
 
-    public GetActiveMasterResponse getDefaultInstanceForType() {
+    public GetMastersResponse getDefaultInstanceForType() {
       return defaultInstance;
     }
 
@@ -67125,7 +67783,7 @@ public final class MasterProtos {
         getUnknownFields() {
       return this.unknownFields;
     }
-    private GetActiveMasterResponse(
+    private GetMastersResponse(
         com.google.protobuf.CodedInputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
@@ -67149,16 +67807,11 @@ public final class MasterProtos {
               break;
             }
             case 10: {
-              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null;
-              if (((bitField0_ & 0x00000001) == 0x00000001)) {
-                subBuilder = serverName_.toBuilder();
-              }
-              serverName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry);
-              if (subBuilder != null) {
-                subBuilder.mergeFrom(serverName_);
-                serverName_ = subBuilder.buildPartial();
+              if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+                masterServers_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry>();
+                mutable_bitField0_ |= 0x00000001;
               }
-              bitField0_ |= 0x00000001;
+              masterServers_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.PARSER, extensionRegistry));
               break;
             }
           }
@@ -67169,82 +67822,86 @@ public final class MasterProtos {
         throw new com.google.protobuf.InvalidProtocolBufferException(
             e.getMessage()).setUnfinishedMessage(this);
       } finally {
+        if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+          masterServers_ = java.util.Collections.unmodifiableList(masterServers_);
+        }
         this.unknownFields = unknownFields.build();
         makeExtensionsImmutable();
       }
     }
     public static final com.google.protobuf.Descriptors.Descriptor
         getDescriptor() {
-      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetActiveMasterResponse_descriptor;
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponse_descriptor;
     }
 
     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
         internalGetFieldAccessorTable() {
-      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetActiveMasterResponse_fieldAccessorTable
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponse_fieldAccessorTable
           .ensureFieldAccessorsInitialized(
-              org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse.Builder.class);
+              org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.Builder.class);
     }
 
-    public static com.google.protobuf.Parser<GetActiveMasterResponse> PARSER =
-        new com.google.protobuf.AbstractParser<GetActiveMasterResponse>() {
-      public GetActiveMasterResponse parsePartialFrom(
+    public static com.google.protobuf.Parser<GetMastersResponse> PARSER =
+        new com.google.protobuf.AbstractParser<GetMastersResponse>() {
+      public GetMastersResponse parsePartialFrom(
           com.google.protobuf.CodedInputStream input,
           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
           throws com.google.protobuf.InvalidProtocolBufferException {
-        return new GetActiveMasterResponse(input, extensionRegistry);
+        return new GetMastersResponse(input, extensionRegistry);
       }
     };
 
     @java.lang.Override
-    public com.google.protobuf.Parser<GetActiveMasterResponse> getParserForType() {
+    public com.google.protobuf.Parser<GetMastersResponse> getParserForType() {
       return PARSER;
     }
 
-    private int bitField0_;
-    // optional .hbase.pb.ServerName server_name = 1;
-    public static final int SERVER_NAME_FIELD_NUMBER = 1;
-    private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_;
+    // repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;
+    public static final int MASTER_SERVERS_FIELD_NUMBER = 1;
+    private java.util.List<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry> masterServers_;
     /**
-     * <code>optional .hbase.pb.ServerName server_name = 1;</code>
-     *
-     * <pre>
-     ** Not set if an active master could not be determined. 
-     * </pre>
+     * <code>repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;</code>
      */
-    public boolean hasServerName() {
-      return ((bitField0_ & 0x00000001) == 0x00000001);
+    public java.util.List<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry> getMasterServersList() {
+      return masterServers_;
     }
     /**
-     * <code>optional .hbase.pb.ServerName server_name = 1;</code>
-     *
-     * <pre>
-     ** Not set if an active master could not be determined. 
-     * </pre>
+     * <code>repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;</code>
      */
-    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() {
-      return serverName_;
+    public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder> 
+        getMasterServersOrBuilderList() {
+      return masterServers_;
     }
     /**
-     * <code>optional .hbase.pb.ServerName server_name = 1;</code>
-     *
-     * <pre>
-     ** Not set if an active master could not be determined. 
-     * </pre>
+     * <code>repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;</code>
      */
-    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() {
-      return serverName_;
+    public int getMasterServersCount() {
+      return masterServers_.size();
+    }
+    /**
+     * <code>repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry getMasterServers(int index) {
+      return masterServers_.get(index);
+    }
+    /**
+     * <code>repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder getMasterServersOrBuilder(
+        int index) {
+      return masterServers_.get(index);
     }
 
     private void initFields() {
-      serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+      masterServers_ = java.util.Collections.emptyList();
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
       byte isInitialized = memoizedIsInitialized;
       if (isInitialized != -1) return isInitialized == 1;
 
-      if (hasServerName()) {
-        if (!getServerName().isInitialized()) {
+      for (int i = 0; i < getMasterServersCount(); i++) {
+        if (!getMasterServers(i).isInitialized()) {
           memoizedIsInitialized = 0;
           return false;
         }
@@ -67256,8 +67913,8 @@ public final class MasterProtos {
     public void writeTo(com.google.protobuf.CodedOutputStream output)
                         throws java.io.IOException {
       getSerializedSize();
-      if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        output.writeMessage(1, serverName_);
+      for (int i = 0; i < masterServers_.size(); i++) {
+        output.writeMessage(1, masterServers_.get(i));
       }
       getUnknownFields().writeTo(output);
     }
@@ -67268,9 +67925,9 @@ public final class MasterProtos {
       if (size != -1) return size;
 
       size = 0;
-      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+      for (int i = 0; i < masterServers_.size(); i++) {
         size += com.google.protobuf.CodedOutputStream
-          .computeMessageSize(1, serverName_);
+          .computeMessageSize(1, masterServers_.get(i));
       }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
@@ -67289,17 +67946,14 @@ public final class MasterProtos {
       if (obj == this) {
        return true;
       }
-      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse)) {
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse)) {
         return super.equals(obj);
       }
-      org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse) obj;
+      org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse) obj;
 
       boolean result = true;
-      result = result && (hasServerName() == other.hasServerName());
-      if (hasServerName()) {
-        result = result && getServerName()
-            .equals(other.getServerName());
-      }
+      result = result && getMasterServersList()
+          .equals(other.getMasterServersList());
       result = result &&
           getUnknownFields().equals(other.getUnknownFields());
       return result;
@@ -67313,62 +67967,62 @@ public final class MasterProtos {
       }
       int hash = 41;
       hash = (19 * hash) + getDescriptorForType().hashCode();
-      if (hasServerName()) {
-        hash = (37 * hash) + SERVER_NAME_FIELD_NUMBER;
-        hash = (53 * hash) + getServerName().hashCode();
+      if (getMasterServersCount() > 0) {
+        hash = (37 * hash) + MASTER_SERVERS_FIELD_NUMBER;
+        hash = (53 * hash) + getMasterServersList().hashCode();
       }
       hash = (29 * hash) + getUnknownFields().hashCode();
       memoizedHashCode = hash;
       return hash;
     }
 
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse parseFrom(
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom(
         com.google.protobuf.ByteString data)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse parseFrom(
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom(
         com.google.protobuf.ByteString data,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data, extensionRegistry);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse parseFrom(byte[] data)
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom(byte[] data)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse parseFrom(
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom(
         byte[] data,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data, extensionRegistry);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse parseFrom(java.io.InputStream input)
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom(java.io.InputStream input)
         throws java.io.IOException {
       return PARSER.parseFrom(input);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse parseFrom(
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom(
         java.io.InputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
       return PARSER.parseFrom(input, extensionRegistry);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse parseDelimitedFrom(java.io.InputStream input)
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseDelimitedFrom(java.io.InputStream input)
         throws java.io.IOException {
       return PARSER.parseDelimitedFrom(input);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse parseDelimitedFrom(
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseDelimitedFrom(
         java.io.InputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
       return PARSER.parseDelimitedFrom(input, extensionRegistry);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse parseFrom(
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom(
         com.google.protobuf.CodedInputStream input)
         throws java.io.IOException {
       return PARSER.parseFrom(input);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse parseFrom(
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parseFrom(
         com.google.protobuf.CodedInputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
@@ -67377,7 +68031,7 @@ public final class MasterProtos {
 
     public static Builder newBuilder() { return Builder.create(); }
     public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse prototype) {
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse prototype) {
       return newBuilder().mergeFrom(prototype);
     }
     public Builder toBuilder() { return newBuilder(this); }
@@ -67389,24 +68043,24 @@ public final class MasterProtos {
       return builder;
     }
     /**
-     * Protobuf type {@code hbase.pb.GetActiveMasterResponse}
+     * Protobuf type {@code hbase.pb.GetMastersResponse}
      */
     public static final class Builder extends
         com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponseOrBuilder {
+       implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseOrBuilder {
       public static final com.google.protobuf.Descriptors.Descriptor
           getDescriptor() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetActiveMasterResponse_descriptor;
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponse_descriptor;
       }
 
       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
           internalGetFieldAccessorTable() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetActiveMasterResponse_fieldAccessorTable
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponse_fieldAccessorTable
             .ensureFieldAccessorsInitialized(
-                org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse.Builder.class);
+                org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.Builder.class);
       }
 
-      // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse.newBuilder()
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.newBuilder()
       private Builder() {
         maybeForceBuilderInitialization();
       }
@@ -67418,7 +68072,7 @@ public final class MasterProtos {
       }
       private void maybeForceBuilderInitialization() {
         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
-          getServerNameFieldBuilder();
+          getMasterServersFieldBuilder();
         }
       }
       private static Builder create() {
@@ -67427,12 +68081,12 @@ public final class MasterProtos {
 
       public Builder clear() {
         super.clear();
-        if (serverNameBuilder_ == null) {
-          serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+        if (masterServersBuilder_ == null) {
+          masterServers_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000001);
         } else {
-          serverNameBuilder_.clear();
+          masterServersBuilder_.clear();
         }
-        bitField0_ = (bitField0_ & ~0x00000001);
         return this;
       }
 
@@ -67442,59 +68096,81 @@ public final class MasterProtos {
 
       public com.google.protobuf.Descriptors.Descriptor
           getDescriptorForType() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetActiveMasterResponse_descriptor;
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetMastersResponse_descriptor;
       }
 
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse getDefaultInstanceForType() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse.getDefaultInstance();
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.getDefaultInstance();
       }
 
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse build() {
-        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse result = buildPartial();
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse build() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse result = buildPartial();
         if (!result.isInitialized()) {
           throw newUninitializedMessageException(result);
         }
         return result;
       }
 
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse buildPartial() {
-        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse(this);
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse(this);
         int from_bitField0_ = bitField0_;
-        int to_bitField0_ = 0;
-        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
-          to_bitField0_ |= 0x00000001;
-        }
-        if (serverNameBuilder_ == null) {
-          result.serverName_ = serverName_;
+        if (masterServersBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001)) {
+            masterServers_ = java.util.Collections.unmodifiableList(masterServers_);
+            bitField0_ = (bitField0_ & ~0x00000001);
+          }
+          result.masterServers_ = masterServers_;
         } else {
-          result.serverName_ = serverNameBuilder_.build();
+          result.masterServers_ = masterServersBuilder_.build();
         }
-        result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
       }
 
       public Builder mergeFrom(com.google.protobuf.Message other) {
-        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse) {
-          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse)other);
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse)other);
         } else {
           super.mergeFrom(other);
           return this;
         }
       }
 
-      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse other) {
-        if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse.getDefaultInstance()) return this;
-        if (other.hasServerName()) {
-          mergeServerName(other.getServerName());
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.getDefaultInstance()) return this;
+        if (masterServersBuilder_ == null) {
+          if (!other.masterServers_.isEmpty()) {
+            if (masterServers_.isEmpty()) {
+              masterServers_ = other.masterServers_;
+              bitField0_ = (bitField0_ & ~0x00000001);
+            } else {
+              ensureMasterServersIsMutable();
+              masterServers_.addAll(other.masterServers_);
+            }
+            onChanged();
+          }
+        } else {
+          if (!other.masterServers_.isEmpty()) {
+            if (masterServersBuilder_.isEmpty()) {
+              masterServersBuilder_.dispose();
+              masterServersBuilder_ = null;
+              masterServers_ = other.masterServers_;
+              bitField0_ = (bitField0_ & ~0x00000001);
+              masterServersBuilder_ = 
+                com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+                   getMasterServersFieldBuilder() : null;
+            } else {
+              masterServersBuilder_.addAllMessages(other.masterServers_);
+            }
+          }
         }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
 
       public final boolean isInitialized() {
-        if (hasServerName()) {
-          if (!getServerName().isInitialized()) {
+        for (int i = 0; i < getMasterServersCount(); i++) {
+          if (!getMasterServers(i).isInitialized()) {
             
             return false;
           }
@@ -67506,11 +68182,11 @@ public final class MasterProtos {
           com.google.protobuf.CodedInputStream input,
           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
           throws java.io.IOException {
-        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse parsedMessage = null;
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse parsedMessage = null;
         try {
           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse) e.getUnfinishedMessage();
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse) e.getUnfinishedMessage();
           throw e;
         } finally {
           if (parsedMessage != null) {
@@ -67521,168 +68197,255 @@ public final class MasterProtos {
       }
       private int bitField0_;
 
-      // optional .hbase.pb.ServerName server_name = 1;
-      private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
-      private com.google.protobuf.SingleFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_;
+      // repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;
+      private java.util.List<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry> masterServers_ =
+        java.util.Collections.emptyList();
+      private void ensureMasterServersIsMutable() {
+        if (!((bitField0_ & 0x00000001) == 0x00000001)) {
+          masterServers_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry>(masterServers_);
+          bitField0_ |= 0x00000001;
+         }
+      }
+
+      private com.google.protobuf.RepeatedFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder> masterServersBuilder_;
+
       /**
-       * <code>optional .hbase.pb.ServerName server_name = 1;</code>
-       *
-       * <pre>
-       ** Not set if an active master could not be determined. 
-       * </pre>
+       * <code>repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;</code>
        */
-      public boolean hasServerName() {
-        return ((bitField0_ & 0x00000001) == 0x00000001);
+      public java.util.List<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry> getMasterServersList() {
+        if (masterServersBuilder_ == null) {
+          return java.util.Collections.unmodifiableList(masterServers_);
+        } else {
+          return masterServersBuilder_.getMessageList();
+        }
       }
       /**
-       * <code>optional .hbase.pb.ServerName server_name = 1;</code>
-       *
-       * <pre>
-       ** Not set if an active master could not be determined. 
-       * </pre>
+       * <code>repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;</code>
        */
-      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() {
-        if (serverNameBuilder_ == null) {
-          return serverName_;
+      public int getMasterServersCount() {
+        if (masterServersBuilder_ == null) {
+          return masterServers_.size();
         } else {
-          return serverNameBuilder_.getMessage();
+          return masterServersBuilder_.getCount();
         }
       }
       /**
-       * <code>optional .hbase.pb.ServerName server_name = 1;</code>
-       *
-       * <pre>
-       ** Not set if an active master could not be determined. 
-       * </pre>
+       * <code>repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;</code>
        */
-      public Builder setServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
-        if (serverNameBuilder_ == null) {
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry getMasterServers(int index) {
+        if (masterServersBuilder_ == null) {
+          return masterServers_.get(index);
+        } else {
+          return masterServersBuilder_.getMessage(index);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;</code>
+       */
+      public Builder setMasterServers(
+          int index, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry value) {
+        if (masterServersBuilder_ == null) {
           if (value == null) {
             throw new NullPointerException();
           }
-          serverName_ = value;
+          ensureMasterServersIsMutable();
+          masterServers_.set(index, value);
           onChanged();
         } else {
-          serverNameBuilder_.setMessage(value);
+          masterServersBuilder_.setMessage(index, value);
         }
-        bitField0_ |= 0x00000001;
         return this;
       }
       /**
-       * <code>optional .hbase.pb.ServerName server_name = 1;</code>
-       *
-       * <pre>
-       ** Not set if an active master could not be determined. 
-       * </pre>
+       * <code>repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;</code>
        */
-      public Builder setServerName(
-          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
-        if (serverNameBuilder_ == null) {
-          serverName_ = builderForValue.build();
+      public Builder setMasterServers(
+          int index, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder builderForValue) {
+        if (masterServersBuilder_ == null) {
+          ensureMasterServersIsMutable();
+          masterServers_.set(index, builderForValue.build());
           onChanged();
         } else {
-          serverNameBuilder_.setMessage(builderForValue.build());
+          masterServersBuilder_.setMessage(index, builderForValue.build());
         }
-        bitField0_ |= 0x00000001;
         return this;
       }
       /**
-       * <code>optional .hbase.pb.ServerName server_name = 1;</code>
-       *
-       * <pre>
-       ** Not set if an active master could not be determined. 
-       * </pre>
+       * <code>repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;</code>
        */
-      public Builder mergeServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
-        if (serverNameBuilder_ == null) {
-          if (((bitField0_ & 0x00000001) == 0x00000001) &&
-              serverName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) {
-            serverName_ =
-              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(serverName_).mergeFrom(value).buildPartial();
-          } else {
-            serverName_ = value;
+      public Builder addMasterServers(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry value) {
+        if (masterServersBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
           }
+          ensureMasterServersIsMutable();
+          masterServers_.add(value);
           onChanged();
         } else {
-          serverNameBuilder_.mergeFrom(value);
+          masterServersBuilder_.addMessage(value);
         }
-        bitField0_ |= 0x00000001;
         return this;
       }
       /**
-       * <code>optional .hbase.pb.ServerName server_name = 1;</code>
-       *
-       * <pre>
-       ** Not set if an active master could not be determined. 
-       * </pre>
+       * <code>repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;</code>
        */
-      public Builder clearServerName() {
-        if (serverNameBuilder_ == null) {
-          serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+      public Builder addMasterServers(
+          int index, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry value) {
+        if (masterServersBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureMasterServersIsMutable();
+          masterServers_.add(index, value);
           onChanged();
         } else {
-          serverNameBuilder_.clear();
+          masterServersBuilder_.addMessage(index, value);
         }
-        bitField0_ = (bitField0_ & ~0x00000001);
         return this;
       }
       /**
-       * <code>optional .hbase.pb.ServerName server_name = 1;</code>
-       *
-       * <pre>
-       ** Not set if an active master could not be determined. 
-       * </pre>
+       * <code>repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;</code>
        */
-      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder() {
-        bitField0_ |= 0x00000001;
-        onChanged();
-        return getServerNameFieldBuilder().getBuilder();
+      public Builder addMasterServers(
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder builderForValue) {
+        if (masterServersBuilder_ == null) {
+          ensureMasterServersIsMutable();
+          masterServers_.add(builderForValue.build());
+          onChanged();
+        } else {
+          masterServersBuilder_.addMessage(builderForValue.build());
+        }
+        return this;
       }
       /**
-       * <code>optional .hbase.pb.ServerName server_name = 1;</code>
-       *
-       * <pre>
-       ** Not set if an active master could not be determined. 
-       * </pre>
+       * <code>repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;</code>
        */
-      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() {
-        if (serverNameBuilder_ != null) {
-          return serverNameBuilder_.getMessageOrBuilder();
+      public Builder addMasterServers(
+          int index, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder builderForValue) {
+        if (masterServersBuilder_ == null) {
+          ensureMasterServersIsMutable();
+          masterServers_.add(index, builderForValue.build());
+          onChanged();
         } else {
-          return serverName_;
+          masterServersBuilder_.addMessage(index, builderForValue.build());
         }
+        return this;
       }
       /**
-       * <code>optional .hbase.pb.ServerName server_name = 1;</code>
-       *
-       * <pre>
-       ** Not set if an active master could not be determined. 
-       * </pre>
+       * <code>repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;</code>
        */
-      private com.google.protobuf.SingleFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
-          getServerNameFieldBuilder() {
-        if (serverNameBuilder_ == null) {
-          serverNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
-              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
-                  serverName_,
+      public Builder addAllMasterServers(
+          java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry> values) {
+        if (masterServersBuilder_ == null) {
+          ensureMasterServersIsMutable();
+          super.addAll(values, masterServers_);
+          onChanged();
+        } else {
+          masterServersBuilder_.addAllMessages(values);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;</code>
+       */
+      public Builder clearMasterServers() {
+        if (masterServersBuilder_ == null) {
+          masterServers_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000001);
+          onChanged();
+        } else {
+          masterServersBuilder_.clear();
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;</code>
+       */
+      public Builder removeMasterServers(int index) {
+        if (masterServersBuilder_ == null) {
+          ensureMasterServersIsMutable();
+          masterServers_.remove(index);
+          onChanged();
+        } else {
+          masterServersBuilder_.remove(index);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder getMasterServersBuilder(
+          int index) {
+        return getMasterServersFieldBuilder().getBuilder(index);
+      }
+      /**
+       * <code>repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder getMasterServersOrBuilder(
+          int index) {
+        if (masterServersBuilder_ == null) {
+          return masterServers_.get(index);  } else {
+          return masterServersBuilder_.getMessageOrBuilder(index);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;</code>
+       */
+      public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder> 
+           getMasterServersOrBuilderList() {
+        if (masterServersBuilder_ != null) {
+          return masterServersBuilder_.getMessageOrBuilderList();
+        } else {
+          return java.util.Collections.unmodifiableList(masterServers_);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder addMasterServersBuilder() {
+        return getMasterServersFieldBuilder().addBuilder(
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.getDefaultInstance());
+      }
+      /**
+       * <code>repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder addMasterServersBuilder(
+          int index) {
+        return getMasterServersFieldBuilder().addBuilder(
+            index, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.getDefaultInstance());
+      }
+      /**
+       * <code>repeated .hbase.pb.GetMastersResponseEntry master_servers = 1;</code>
+       */
+      public java.util.List<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder> 
+           getMasterServersBuilderList() {
+        return getMasterServersFieldBuilder().getBuilderList();
+      }
+      private com.google.protobuf.RepeatedFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder> 
+          getMasterServersFieldBuilder() {
+        if (masterServersBuilder_ == null) {
+          masterServersBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry.Builder, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntryOrBuilder>(
+                  masterServers_,
+                  ((bitField0_ & 0x00000001) == 0x00000001),
                   getParentForChildren(),
                   isClean());
-          serverName_ = null;
+          masterServers_ = null;
         }
-        return serverNameBuilder_;
+        return masterServersBuilder_;
       }
 
-      // @@protoc_insertion_point(builder_scope:hbase.pb.GetActiveMasterResponse)
+      // @@protoc_insertion_point(builder_scope:hbase.pb.GetMastersResponse)
     }
 
     static {
-      defaultInstance = new GetActiveMasterResponse(true);
+      defaultInstance = new GetMastersResponse(true);
       defaultInstance.initFields();
     }
 
-    // @@protoc_insertion_point(class_scope:hbase.pb.GetActiveMasterResponse)
+    // @@protoc_insertion_point(class_scope:hbase.pb.GetMastersResponse)
   }
 
   public interface GetMetaRegionLocationsRequestOrBuilder
@@ -75043,17 +75806,18 @@ public final class MasterProtos {
           com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse> done);
 
       /**
-       * <code>rpc GetActiveMaster(.hbase.pb.GetActiveMasterRequest) returns (.hbase.pb.GetActiveMasterResponse);</code>
+       * <code>rpc GetMasters(.hbase.pb.GetMastersRequest) returns (.hbase.pb.GetMastersResponse);</code>
        *
        * <pre>
        **
-       * Get active master server name for this cluster.
+       * Get registered list of master servers in this cluster. List includes both active and backup
+       * masters.
        * </pre>
        */
-      public abstract void getActiveMaster(
+      public abstract void getMasters(
           com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest request,
-          com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse> done);
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest request,
+          com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse> done);
 
       /**
        * <code>rpc GetMetaRegionLocations(.hbase.pb.GetMetaRegionLocationsRequest) returns (.hbase.pb.GetMetaRegionLocationsResponse);</code>
@@ -75095,11 +75859,11 @@ public final class MasterProtos {
         }
 
         @java.lang.Override
-        public  void getActiveMaster(
+        public  void getMasters(
             com.google.protobuf.RpcController controller,
-            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest request,
-            com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse> done) {
-          impl.getActiveMaster(controller, request, done);
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest request,
+            com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse> done) {
+          impl.getMasters(controller, request, done);
         }
 
         @java.lang.Override
@@ -75143,7 +75907,7 @@ public final class MasterProtos {
             case 0:
               return impl.getClusterId(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest)request);
             case 1:
-              return impl.getActiveMaster(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest)request);
+              return impl.getMasters(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest)request);
             case 2:
               return impl.getMetaRegionLocations(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest)request);
             case 3:
@@ -75165,7 +75929,7 @@ public final class MasterProtos {
             case 0:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.getDefaultInstance();
             case 1:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.getDefaultInstance();
             case 2:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.getDefaultInstance();
             case 3:
@@ -75187,7 +75951,7 @@ public final class MasterProtos {
             case 0:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.getDefaultInstance();
             case 1:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.getDefaultInstance();
             case 2:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.getDefaultInstance();
             case 3:
@@ -75214,17 +75978,18 @@ public final class MasterProtos {
         com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse> done);
 
     /**
-     * <code>rpc GetActiveMaster(.hbase.pb.GetActiveMasterRequest) returns (.hbase.pb.GetActiveMasterResponse);</code>
+     * <code>rpc GetMasters(.hbase.pb.GetMastersRequest) returns (.hbase.pb.GetMastersResponse);</code>
      *
      * <pre>
      **
-     * Get active master server name for this cluster.
+     * Get registered list of master servers in this cluster. List includes both active and backup
+     * masters.
      * </pre>
      */
-    public abstract void getActiveMaster(
+    public abstract void getMasters(
         com.google.protobuf.RpcController controller,
-        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest request,
-        com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse> done);
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest request,
+        com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse> done);
 
     /**
      * <code>rpc GetMetaRegionLocations(.hbase.pb.GetMetaRegionLocationsRequest) returns (.hbase.pb.GetMetaRegionLocationsResponse);</code>
@@ -75280,8 +76045,8 @@ public final class MasterProtos {
               done));
           return;
         case 1:
-          this.getActiveMaster(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest)request,
-            com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse>specializeCallback(
+          this.getMasters(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest)request,
+            com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse>specializeCallback(
               done));
           return;
         case 2:
@@ -75311,7 +76076,7 @@ public final class MasterProtos {
         case 0:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest.getDefaultInstance();
         case 1:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest.getDefaultInstance();
         case 2:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.getDefaultInstance();
         case 3:
@@ -75333,7 +76098,7 @@ public final class MasterProtos {
         case 0:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.getDefaultInstance();
         case 1:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.getDefaultInstance();
         case 2:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.getDefaultInstance();
         case 3:
@@ -75374,19 +76139,19 @@ public final class MasterProtos {
             org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse.getDefaultInstance()));
       }
 
-      public  void getActiveMaster(
+      public  void getMasters(
           com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest request,
-          com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse> done) {
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest request,
+          com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse> done) {
         channel.callMethod(
           getDescriptor().getMethods().get(1),
           controller,
           request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse.getDefaultInstance(),
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.getDefaultInstance(),
           com.google.protobuf.RpcUtil.generalizeCallback(
             done,
-            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse.class,
-            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse.getDefaultInstance()));
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.class,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.getDefaultInstance()));
       }
 
       public  void getMetaRegionLocations(
@@ -75431,9 +76196,9 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest request)
           throws com.google.protobuf.ServiceException;
 
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse getActiveMaster(
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse getMasters(
           com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest request)
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest request)
           throws com.google.protobuf.ServiceException;
 
       public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse getMetaRegionLocations(
@@ -75466,15 +76231,15 @@ public final class MasterProtos {
       }
 
 
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse getActiveMaster(
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse getMasters(
           com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest request)
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest request)
           throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse) channel.callBlockingMethod(
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse) channel.callBlockingMethod(
           getDescriptor().getMethods().get(1),
           controller,
           request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse.getDefaultInstance());
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse.getDefaultInstance());
       }
 
 
@@ -76142,15 +76907,20 @@ public final class MasterProtos {
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_hbase_pb_GetClusterIdResponse_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
-    internal_static_hbase_pb_GetActiveMasterRequest_descriptor;
+    internal_static_hbase_pb_GetMastersRequest_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_GetMastersRequest_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_GetMastersResponseEntry_descriptor;
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
-      internal_static_hbase_pb_GetActiveMasterRequest_fieldAccessorTable;
+      internal_static_hbase_pb_GetMastersResponseEntry_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
-    internal_static_hbase_pb_GetActiveMasterResponse_descriptor;
+    internal_static_hbase_pb_GetMastersResponse_descriptor;
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
-      internal_static_hbase_pb_GetActiveMasterResponse_fieldAccessorTable;
+      internal_static_hbase_pb_GetMastersResponse_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_GetMetaRegionLocationsRequest_descriptor;
   private static
@@ -76390,174 +77160,176 @@ public final class MasterProtos {
       "anupEnabledRequest\"3\n IsSnapshotCleanupE" +
       "nabledResponse\022\017\n\007enabled\030\001 \002(\010\"\025\n\023GetCl" +
       "usterIdRequest\"*\n\024GetClusterIdResponse\022\022",
-      "\n\ncluster_id\030\001 \001(\t\"\030\n\026GetActiveMasterReq" +
-      "uest\"D\n\027GetActiveMasterResponse\022)\n\013serve" +
-      "r_name\030\001 \001(\0132\024.hbase.pb.ServerName\"\037\n\035Ge" +
-      "tMetaRegionLocationsRequest\"R\n\036GetMetaRe" +
-      "gionLocationsResponse\0220\n\016meta_locations\030" +
-      "\001 \003(\0132\030.hbase.pb.RegionLocation\"\025\n\023GetNu" +
-      "mLiveRSRequest\"2\n\024GetNumLiveRSResponse\022\032" +
-      "\n\022num_region_servers\030\001 \002(\005*(\n\020MasterSwit" +
-      "chType\022\t\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\363.\n\rMasterS" +
-      "ervice\022e\n\024GetSchemaAlterStatus\022%.hbase.p",
-      "b.GetSchemaAlterStatusRequest\032&.hbase.pb" +
-      ".GetSchemaAlterStatusResponse\022b\n\023GetTabl" +
-      "eDescriptors\022$.hbase.pb.GetTableDescript" +
-      "orsRequest\032%.hbase.pb.GetTableDescriptor" +
-      "sResponse\022P\n\rGetTableNames\022\036.hbase.pb.Ge" +
-      "tTableNamesRequest\032\037.hbase.pb.GetTableNa" +
-      "mesResponse\022Y\n\020GetClusterStatus\022!.hbase." +
-      "pb.GetClusterStatusRequest\032\".hbase.pb.Ge" +
-      "tClusterStatusResponse\022V\n\017IsMasterRunnin" +
-      "g\022 .hbase.pb.IsMasterRunningRequest\032!.hb",
-      "ase.pb.IsMasterRunningResponse\022D\n\tAddCol" +
-      "umn\022\032.hbase.pb.AddColumnRequest\032\033.hbase." +
-      "pb.AddColumnResponse\022M\n\014DeleteColumn\022\035.h" +
-      "base.pb.DeleteColumnRequest\032\036.hbase.pb.D" +
-      "eleteColumnResponse\022M\n\014ModifyColumn\022\035.hb" +
-      "ase.pb.ModifyColumnRequest\032\036.hbase.pb.Mo" +
-      "difyColumnResponse\022G\n\nMoveRegion\022\033.hbase" +
-      ".pb.MoveRegionRequest\032\034.hbase.pb.MoveReg" +
-      "ionResponse\022k\n\026DispatchMergingRegions\022\'." +
-      "hbase.pb.DispatchMergingRegionsRequest\032(",
-      ".hbase.pb.DispatchMergingRegionsResponse" +
-      "\022M\n\014AssignRegion\022\035.hbase.pb.AssignRegion" +
-      "Request\032\036.hbase.pb.AssignRegionResponse\022" +
-      "S\n\016UnassignRegion\022\037.hbase.pb.UnassignReg" +
-      "ionRequest\032 .hbase.pb.UnassignRegionResp" +
-      "onse\022P\n\rOfflineRegion\022\036.hbase.pb.Offline" +
-      "RegionRequest\032\037.hbase.pb.OfflineRegionRe" +
-      "sponse\022J\n\013DeleteTable\022\034.hbase.pb.DeleteT" +
-      "ableRequest\032\035.hbase.pb.DeleteTableRespon" +
-      "se\022P\n\rtruncateTable\022\036.hbase.pb.TruncateT",
-      "ableRequest\032\037.hbase.pb.TruncateTableResp" +
-      "onse\022J\n\013EnableTable\022\034.hbase.pb.EnableTab" +
-      "leRequest\032\035.hbase.pb.EnableTableResponse" +
-      "\022M\n\014DisableTable\022\035.hbase.pb.DisableTable" +
-      "Request\032\036.hbase.pb.DisableTableResponse\022" +
-      "J\n\013ModifyTable\022\034.hbase.pb.ModifyTableReq" +
-      "uest\032\035.hbase.pb.ModifyTableResponse\022J\n\013C" +
-      "reateTable\022\034.hbase.pb.CreateTableRequest" +
-      "\032\035.hbase.pb.CreateTableResponse\022A\n\010Shutd" +
-      "own\022\031.hbase.pb.ShutdownRequest\032\032.hbase.p",
-      "b.ShutdownResponse\022G\n\nStopMaster\022\033.hbase" +
-      ".pb.StopMasterRequest\032\034.hbase.pb.StopMas" +
-      "terResponse\022h\n\031IsMasterInMaintenanceMode" +
-      "\022$.hbase.pb.IsInMaintenanceModeRequest\032%" +
-      ".hbase.pb.IsInMaintenanceModeResponse\022>\n" +
-      "\007Balance\022\030.hbase.pb.BalanceRequest\032\031.hba" +
-      "se.pb.BalanceResponse\022_\n\022SetBalancerRunn" +
-      "ing\022#.hbase.pb.SetBalancerRunningRequest" +
-      "\032$.hbase.pb.SetBalancerRunningResponse\022\\" +
-      "\n\021IsBalancerEnabled\022\".hbase.pb.IsBalance",
-      "rEnabledRequest\032#.hbase.pb.IsBalancerEna" +
-      "bledResponse\022k\n\026SetSplitOrMergeEnabled\022\'" +
-      ".hbase.pb.SetSplitOrMergeEnabledRequest\032" +
-      "(.hbase.pb.SetSplitOrMergeEnabledRespons" +
-      "e\022h\n\025IsSplitOrMergeEnabled\022&.hbase.pb.Is" +
-      "SplitOrMergeEnabledRequest\032\'.hbase.pb.Is" +
-      "SplitOrMergeEnabledResponse\022D\n\tNormalize" +
-      "\022\032.hbase.pb.NormalizeRequest\032\033.hbase.pb." +
-      "NormalizeResponse\022e\n\024SetNormalizerRunnin" +
-      "g\022%.hbase.pb.SetNormalizerRunningRequest",
-      "\032&.hbase.pb.SetNormalizerRunningResponse" +
-      "\022b\n\023IsNormalizerEnabled\022$.hbase.pb.IsNor" +
-      "malizerEnabledRequest\032%.hbase.pb.IsNorma" +
-      "lizerEnabledResponse\022S\n\016RunCatalogScan\022\037" +
-      ".hbase.pb.RunCatalogScanRequest\032 .hbase." +
-      "pb.RunCatalogScanResponse\022e\n\024EnableCatal" +
-      "ogJanitor\022%.hbase.pb.EnableCatalogJanito" +
-      "rRequest\032&.hbase.pb.EnableCatalogJanitor" +
-      "Response\022n\n\027IsCatalogJanitorEnabled\022(.hb" +
-      "ase.pb.IsCatalogJanitorEnabledRequest\032).",
-      "hbase.pb.IsCatalogJanitorEnabledResponse" +
-      "\022V\n\017RunCleanerChore\022 .hbase.pb.RunCleane" +
-      "rChoreRequest\032!.hbase.pb.RunCleanerChore" +
-      "Response\022k\n\026SetCleanerChoreRunning\022\'.hba" +
-      "se.pb.SetCleanerChoreRunningRequest\032(.hb" +
-      "ase.pb.SetCleanerChoreRunningResponse\022h\n" +
-      "\025IsCleanerChoreEnabled\022&.hbase.pb.IsClea" +
-      "nerChoreEnabledRequest\032\'.hbase.pb.IsClea" +
-      "nerChoreEnabledResponse\022^\n\021ExecMasterSer" +
-      "vice\022#.hbase.pb.CoprocessorServiceReques",
-      "t\032$.hbase.pb.CoprocessorServiceResponse\022" +
-      "A\n\010Snapshot\022\031.hbase.pb.SnapshotRequest\032\032" +
-      ".hbase.pb.SnapshotResponse\022h\n\025GetComplet" +
-      "edSnapshots\022&.hbase.pb.GetCompletedSnaps" +
-      "hotsRequest\032\'.hbase.pb.GetCompletedSnaps" +
-      "hotsResponse\022S\n\016DeleteSnapshot\022\037.hbase.p" +
-      "b.DeleteSnapshotRequest\032 .hbase.pb.Delet" +
-      "eSnapshotResponse\022S\n\016IsSnapshotDone\022\037.hb" +
-      "ase.pb.IsSnapshotDoneRequest\032 .hbase.pb." +
-      "IsSnapshotDoneResponse\022V\n\017RestoreSnapsho",
-      "t\022 .hbase.pb.RestoreSnapshotRequest\032!.hb" +
-      "ase.pb.RestoreSnapshotResponse\022h\n\025IsRest" +
-      "oreSnapshotDone\022&.hbase.pb.IsRestoreSnap" +
-      "shotDoneRequest\032\'.hbase.pb.IsRestoreSnap" +
-      "shotDoneResponse\022P\n\rExecProcedure\022\036.hbas" +
-      "e.pb.ExecProcedureRequest\032\037.hbase.pb.Exe" +
-      "cProcedureResponse\022W\n\024ExecProcedureWithR" +
-      "et\022\036.hbase.pb.ExecProcedureRequest\032\037.hba" +
-      "se.pb.ExecProcedureResponse\022V\n\017IsProcedu" +
-      "reDone\022 .hbase.pb.IsProcedureDoneRequest",
-      "\032!.hbase.pb.IsProcedureDoneResponse\022V\n\017M" +
-      "odifyNamespace\022 .hbase.pb.ModifyNamespac" +
-      "eRequest\032!.hbase.pb.ModifyNamespaceRespo" +
-      "nse\022V\n\017CreateNamespace\022 .hbase.pb.Create" +
-      "NamespaceRequest\032!.hbase.pb.CreateNamesp" +
-      "aceResponse\022V\n\017DeleteNamespace\022 .hbase.p" +
-      "b.DeleteNamespaceRequest\032!.hbase.pb.Dele" +
-      "teNamespaceResponse\022k\n\026GetNamespaceDescr" +
-      "iptor\022\'.hbase.pb.GetNamespaceDescriptorR" +
-      "equest\032(.hbase.pb.GetNamespaceDescriptor",
-      "Response\022q\n\030ListNamespaceDescriptors\022).h" +
-      "base.pb.ListNamespaceDescriptorsRequest\032" +
-      "*.hbase.pb.ListNamespaceDescriptorsRespo" +
-      "nse\022\206\001\n\037ListTableDescriptorsByNamespace\022" +
-      "0.hbase.pb.ListTableDescriptorsByNamespa" +
-      "ceRequest\0321.hbase.pb.ListTableDescriptor" +
-      "sByNamespaceResponse\022t\n\031ListTableNamesBy" +
-      "Namespace\022*.hbase.pb.ListTableNamesByNam" +
-      "espaceRequest\032+.hbase.pb.ListTableNamesB" +
-      "yNamespaceResponse\022A\n\010SetQuota\022\031.hbase.p",
-      "b.SetQuotaRequest\032\032.hbase.pb.SetQuotaRes" +
-      "ponse\022x\n\037getLastMajorCompactionTimestamp" +
-      "\022).hbase.pb.MajorCompactionTimestampRequ" +
-      "est\032*.hbase.pb.MajorCompactionTimestampR" +
-      "esponse\022\212\001\n(getLastMajorCompactionTimest" +
-      "ampForRegion\0222.hbase.pb.MajorCompactionT" +
-      "imestampForRegionRequest\032*.hbase.pb.Majo" +
-      "rCompactionTimestampResponse\022_\n\022getProce" +
-      "dureResult\022#.hbase.pb.GetProcedureResult" +
-      "Request\032$.hbase.pb.GetProcedureResultRes",
-      "ponse\022h\n\027getSecurityCapabilities\022%.hbase" +
-      ".pb.SecurityCapabilitiesRequest\032&.hbase." +
-      "pb.SecurityCapabilitiesResponse\022S\n\016Abort" +
-      "Procedure\022\037.hbase.pb.AbortProcedureReque" +
-      "st\032 .hbase.pb.AbortProcedureResponse\022S\n\016" +
-      "ListProcedures\022\037.hbase.pb.ListProcedures" +
-      "Request\032 .hbase.pb.ListProceduresRespons" +
-      "e\022Y\n\020ClearDeadServers\022!.hbase.pb.ClearDe" +
-      "adServersRequest\032\".hbase.pb.ClearDeadSer" +
-      "versResponse\022S\n\016ListNamespaces\022\037.hbase.p",
-      "b.ListNamespacesRequest\032 .hbase.pb.ListN" +
-      "amespacesResponse\022b\n\025SwitchSnapshotClean" +
-      "up\022#.hbase.pb.SetSnapshotCleanupRequest\032" +
-      "$.hbase.pb.SetSnapshotCleanupResponse\022q\n" +
-      "\030IsSnapshotCleanupEnabled\022).hbase.pb.IsS" +
-      "napshotCleanupEnabledRequest\032*.hbase.pb." +
-      "IsSnapshotCleanupEnabledResponse\022P\n\rGetT" +
-      "ableState\022\036.hbase.pb.GetTableStateReques" +
-      "t\032\037.hbase.pb.GetTableStateResponse2\366\002\n\021C" +
-      "lientMetaService\022M\n\014GetClusterId\022\035.hbase",
-      ".pb.GetClusterIdRequest\032\036.hbase.pb.GetCl" +
-      "usterIdResponse\022V\n\017GetActiveMaster\022 .hba" +
-      "se.pb.GetActiveMasterRequest\032!.hbase.pb." +
-      "GetActiveMasterResponse\022k\n\026GetMetaRegion" +
+      "\n\ncluster_id\030\001 \001(\t\"\023\n\021GetMastersRequest\"" +
+      "W\n\027GetMastersResponseEntry\022)\n\013server_nam" +
+      "e\030\001 \002(\0132\024.hbase.pb.ServerName\022\021\n\tis_acti" +
+      "ve\030\002 \002(\010\"O\n\022GetMastersResponse\0229\n\016master" +
+      "_servers\030\001 \003(\0132!.hbase.pb.GetMastersResp" +
+      "onseEntry\"\037\n\035GetMetaRegionLocationsReque" +
+      "st\"R\n\036GetMetaRegionLocationsResponse\0220\n\016" +
+      "meta_locations\030\001 \003(\0132\030.hbase.pb.RegionLo" +
+      "cation\"\025\n\023GetNumLiveRSRequest\"2\n\024GetNumL" +
+      "iveRSResponse\022\032\n\022num_region_servers\030\001 \002(",
+      "\005*(\n\020MasterSwitchType\022\t\n\005SPLIT\020\000\022\t\n\005MERG" +
+      "E\020\0012\363.\n\rMasterService\022e\n\024GetSchemaAlterS" +
+      "tatus\022%.hbase.pb.GetSchemaAlterStatusReq" +
+      "uest\032&.hbase.pb.GetSchemaAlterStatusResp" +
+      "onse\022b\n\023GetTableDescriptors\022$.hbase.pb.G" +
+      "etTableDescriptorsRequest\032%.hbase.pb.Get" +
+      "TableDescriptorsResponse\022P\n\rGetTableName" +
+      "s\022\036.hbase.pb.GetTableNamesRequest\032\037.hbas" +
+      "e.pb.GetTableNamesResponse\022Y\n\020GetCluster" +
+      "Status\022!.hbase.pb.GetClusterStatusReques",
+      "t\032\".hbase.pb.GetClusterStatusResponse\022V\n" +
+      "\017IsMasterRunning\022 .hbase.pb.IsMasterRunn" +
+      "ingRequest\032!.hbase.pb.IsMasterRunningRes" +
+      "ponse\022D\n\tAddColumn\022\032.hbase.pb.AddColumnR" +
+      "equest\032\033.hbase.pb.AddColumnResponse\022M\n\014D" +
+      "eleteColumn\022\035.hbase.pb.DeleteColumnReque" +
+      "st\032\036.hbase.pb.DeleteColumnResponse\022M\n\014Mo" +
+      "difyColumn\022\035.hbase.pb.ModifyColumnReques" +
+      "t\032\036.hbase.pb.ModifyColumnResponse\022G\n\nMov" +
+      "eRegion\022\033.hbase.pb.MoveRegionRequest\032\034.h",
+      "base.pb.MoveRegionResponse\022k\n\026DispatchMe" +
+      "rgingRegions\022\'.hbase.pb.DispatchMergingR" +
+      "egionsRequest\032(.hbase.pb.DispatchMerging" +
+      "RegionsResponse\022M\n\014AssignRegion\022\035.hbase." +
+      "pb.AssignRegionRequest\032\036.hbase.pb.Assign" +
+      "RegionResponse\022S\n\016UnassignRegion\022\037.hbase" +
+      ".pb.UnassignRegionRequest\032 .hbase.pb.Una" +
+      "ssignRegionResponse\022P\n\rOfflineRegion\022\036.h" +
+      "base.pb.OfflineRegionRequest\032\037.hbase.pb." +
+      "OfflineRegionResponse\022J\n\013DeleteTable\022\034.h",
+      "base.pb.DeleteTableRequest\032\035.hbase.pb.De" +
+      "leteTableResponse\022P\n\rtruncateTable\022\036.hba" +
+      "se.pb.TruncateTableRequest\032\037.hbase.pb.Tr" +
+      "uncateTableResponse\022J\n\013EnableTable\022\034.hba" +
+      "se.pb.EnableTableRequest\032\035.hbase.pb.Enab" +
+      "leTableResponse\022M\n\014DisableTable\022\035.hbase." +
+      "pb.DisableTableRequest\032\036.hbase.pb.Disabl" +
+      "eTableResponse\022J\n\013ModifyTable\022\034.hbase.pb" +
+      ".ModifyTableRequest\032\035.hbase.pb.ModifyTab" +
+      "leResponse\022J\n\013CreateTable\022\034.hbase.pb.Cre",
+      "ateTableRequest\032\035.hbase.pb.CreateTableRe" +
+      "sponse\022A\n\010Shutdown\022\031.hbase.pb.ShutdownRe" +
+      "quest\032\032.hbase.pb.ShutdownResponse\022G\n\nSto" +
+      "pMaster\022\033.hbase.pb.StopMasterRequest\032\034.h" +
+      "base.pb.StopMasterResponse\022h\n\031IsMasterIn" +
+      "MaintenanceMode\022$.hbase.pb.IsInMaintenan" +
+      "ceModeRequest\032%.hbase.pb.IsInMaintenance" +
+      "ModeResponse\022>\n\007Balance\022\030.hbase.pb.Balan" +
+      "ceRequest\032\031.hbase.pb.BalanceResponse\022_\n\022" +
+      "SetBalancerRunning\022#.hbase.pb.SetBalance",
+      "rRunningRequest\032$.hbase.pb.SetBalancerRu" +
+      "nningResponse\022\\\n\021IsBalancerEnabled\022\".hba" +
+      "se.pb.IsBalancerEnabledRequest\032#.hbase.p" +
+      "b.IsBalancerEnabledResponse\022k\n\026SetSplitO" +
+      "rMergeEnabled\022\'.hbase.pb.SetSplitOrMerge" +
+      "EnabledRequest\032(.hbase.pb.SetSplitOrMerg" +
+      "eEnabledResponse\022h\n\025IsSplitOrMergeEnable" +
+      "d\022&.hbase.pb.IsSplitOrMergeEnabledReques" +
+      "t\032\'.hbase.pb.IsSplitOrMergeEnabledRespon" +
+      "se\022D\n\tNormalize\022\032.hbase.pb.NormalizeRequ",
+      "est\032\033.hbase.pb.NormalizeResponse\022e\n\024SetN" +
+      "ormalizerRunning\022%.hbase.pb.SetNormalize" +
+      "rRunningRequest\032&.hbase.pb.SetNormalizer" +
+      "RunningResponse\022b\n\023IsNormalizerEnabled\022$" +
+      ".hbase.pb.IsNormalizerEnabledRequest\032%.h" +
+      "base.pb.IsNormalizerEnabledResponse\022S\n\016R" +
+      "unCatalogScan\022\037.hbase.pb.RunCatalogScanR" +
+      "equest\032 .hbase.pb.RunCatalogScanResponse" +
+      "\022e\n\024EnableCatalogJanitor\022%.hbase.pb.Enab" +
+      "leCatalogJanitorRequest\032&.hbase.pb.Enabl",
+      "eCatalogJanitorResponse\022n\n\027IsCatalogJani" +
+      "torEnabled\022(.hbase.pb.IsCatalogJanitorEn" +
+      "abledRequest\032).hbase.pb.IsCatalogJanitor" +
+      "EnabledResponse\022V\n\017RunCleanerChore\022 .hba" +
+      "se.pb.RunCleanerChoreRequest\032!.hbase.pb." +
+      "RunCleanerChoreResponse\022k\n\026SetCleanerCho" +
+      "reRunning\022\'.hbase.pb.SetCleanerChoreRunn" +
+      "ingRequest\032(.hbase.pb.SetCleanerChoreRun" +
+      "ningResponse\022h\n\025IsCleanerChoreEnabled\022&." +
+      "hbase.pb.IsCleanerChoreEnabledRequest\032\'.",
+      "hbase.pb.IsCleanerChoreEnabledResponse\022^" +
+      "\n\021ExecMasterService\022#.hbase.pb.Coprocess" +
+      "orServiceRequest\032$.hbase.pb.CoprocessorS" +
+      "erviceResponse\022A\n\010Snapshot\022\031.hbase.pb.Sn" +
+      "apshotRequest\032\032.hbase.pb.SnapshotRespons" +
+      "e\022h\n\025GetCompletedSnapshots\022&.hbase.pb.Ge" +
+      "tCompletedSnapshotsRequest\032\'.hbase.pb.Ge" +
+      "tCompletedSnapshotsResponse\022S\n\016DeleteSna" +
+      "pshot\022\037.hbase.pb.DeleteSnapshotRequest\032 " +
+      ".hbase.pb.DeleteSnapshotResponse\022S\n\016IsSn",
+      "apshotDone\022\037.hbase.pb.IsSnapshotDoneRequ" +
+      "est\032 .hbase.pb.IsSnapshotDoneResponse\022V\n" +
+      "\017RestoreSnapshot\022 .hbase.pb.RestoreSnaps" +
+      "hotRequest\032!.hbase.pb.RestoreSnapshotRes" +
+      "ponse\022h\n\025IsRestoreSnapshotDone\022&.hbase.p" +
+      "b.IsRestoreSnapshotDoneRequest\032\'.hbase.p" +
+      "b.IsRestoreSnapshotDoneResponse\022P\n\rExecP" +
+      "rocedure\022\036.hbase.pb.ExecProcedureRequest" +
+      "\032\037.hbase.pb.ExecProcedureResponse\022W\n\024Exe" +
+      "cProcedureWithRet\022\036.hbase.pb.ExecProcedu",
+      "reRequest\032\037.hbase.pb.ExecProcedureRespon" +
+      "se\022V\n\017IsProcedureDone\022 .hbase.pb.IsProce" +
+      "dureDoneRequest\032!.hbase.pb.IsProcedureDo" +
+      "neResponse\022V\n\017ModifyNamespace\022 .hbase.pb" +
+      ".ModifyNamespaceRequest\032!.hbase.pb.Modif" +
+      "yNamespaceResponse\022V\n\017CreateNamespace\022 ." +
+      "hbase.pb.CreateNamespaceRequest\032!.hbase." +
+      "pb.CreateNamespaceResponse\022V\n\017DeleteName" +
+      "space\022 .hbase.pb.DeleteNamespaceRequest\032" +
+      "!.hbase.pb.DeleteNamespaceResponse\022k\n\026Ge",
+      "tNamespaceDescriptor\022\'.hbase.pb.GetNames" +
+      "paceDescriptorRequest\032(.hbase.pb.GetName" +
+      "spaceDescriptorResponse\022q\n\030ListNamespace" +
+      "Descriptors\022).hbase.pb.ListNamespaceDesc" +
+      "riptorsRequest\032*.hbase.pb.ListNamespaceD" +
+      "escriptorsResponse\022\206\001\n\037ListTableDescript" +
+      "orsByNamespace\0220.hbase.pb.ListTableDescr" +
+      "iptorsByNamespaceRequest\0321.hbase.pb.List" +
+      "TableDescriptorsByNamespaceResponse\022t\n\031L" +
+      "istTableNamesByNamespace\022*.hbase.pb.List",
+      "TableNamesByNamespaceRequest\032+.hbase.pb." +
+      "ListTableNamesByNamespaceResponse\022A\n\010Set" +
+      "Quota\022\031.hbase.pb.SetQuotaRequest\032\032.hbase" +
+      ".pb.SetQuotaResponse\022x\n\037getLastMajorComp" +
+      "actionTimestamp\022).hbase.pb.MajorCompacti" +
+      "onTimestampRequest\032*.hbase.pb.MajorCompa" +
+      "ctionTimestampResponse\022\212\001\n(getLastMajorC" +
+      "ompactionTimestampForRegion\0222.hbase.pb.M" +
+      "ajorCompactionTimestampForRegionRequest\032" +
+      "*.hbase.pb.MajorCompactionTimestampRespo",
+      "nse\022_\n\022getProcedureResult\022#.hbase.pb.Get" +
+      "ProcedureResultRequest\032$.hbase.pb.GetPro" +
+      "cedureResultResponse\022h\n\027getSecurityCapab" +
+      "ilities\022%.hbase.pb.SecurityCapabilitiesR" +
+      "equest\032&.hbase.pb.SecurityCapabilitiesRe" +
+      "sponse\022S\n\016AbortProcedure\022\037.hbase.pb.Abor" +
+      "tProcedureRequest\032 .hbase.pb.AbortProced" +
+      "ureResponse\022S\n\016ListProcedures\022\037.hbase.pb" +
+      ".ListProceduresRequest\032 .hbase.pb.ListPr" +
+      "oceduresResponse\022Y\n\020ClearDeadServers\022!.h",
+      "base.pb.ClearDeadServersRequest\032\".hbase." +
+      "pb.ClearDeadServersResponse\022S\n\016ListNames" +
+      "paces\022\037.hbase.pb.ListNamespacesRequest\032 " +
+      ".hbase.pb.ListNamespacesResponse\022b\n\025Swit" +
+      "chSnapshotCleanup\022#.hbase.pb.SetSnapshot" +
+      "CleanupRequest\032$.hbase.pb.SetSnapshotCle" +
+      "anupResponse\022q\n\030IsSnapshotCleanupEnabled" +
+      "\022).hbase.pb.IsSnapshotCleanupEnabledRequ" +
+      "est\032*.hbase.pb.IsSnapshotCleanupEnabledR" +
+      "esponse\022P\n\rGetTableState\022\036.hbase.pb.GetT",
+      "ableStateRequest\032\037.hbase.pb.GetTableStat" +
+      "eResponse2\347\002\n\021ClientMetaService\022M\n\014GetCl" +
+      "usterId\022\035.hbase.pb.GetClusterIdRequest\032\036" +
+      ".hbase.pb.GetClusterIdResponse\022G\n\nGetMas" +
+      "ters\022\033.hbase.pb.GetMastersRequest\032\034.hbas" +
+      "e.pb.GetMastersResponse\022k\n\026GetMetaRegion" +
       "Locations\022\'.hbase.pb.GetMetaRegionLocati" +
       "onsRequest\032(.hbase.pb.GetMetaRegionLocat" +
       "ionsResponse\022M\n\014GetNumLiveRS\022\035.hbase.pb." +
-      "GetNumLiveRSRequest\032\036.hbase.pb.GetNumLiv" +
+      "GetNumLiveRSRequest\032\036.hbase.pb.GetNumLiv",
       "eRSResponseBB\n*org.apache.hadoop.hbase.p" +
       "rotobuf.generatedB\014MasterProtosH\001\210\001\001\240\001\001"
     };
@@ -77328,38 +78100,44 @@ public final class MasterProtos {
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetClusterIdResponse_descriptor,
               new java.lang.String[] { "ClusterId", });
-          internal_static_hbase_pb_GetActiveMasterRequest_descriptor =
+          internal_static_hbase_pb_GetMastersRequest_descriptor =
             getDescriptor().getMessageTypes().get(127);
-          internal_static_hbase_pb_GetActiveMasterRequest_fieldAccessorTable = new
+          internal_static_hbase_pb_GetMastersRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
-              internal_static_hbase_pb_GetActiveMasterRequest_descriptor,
+              internal_static_hbase_pb_GetMastersRequest_descriptor,
               new java.lang.String[] { });
-          internal_static_hbase_pb_GetActiveMasterResponse_descriptor =
+          internal_static_hbase_pb_GetMastersResponseEntry_descriptor =
             getDescriptor().getMessageTypes().get(128);
-          internal_static_hbase_pb_GetActiveMasterResponse_fieldAccessorTable = new
+          internal_static_hbase_pb_GetMastersResponseEntry_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
-              internal_static_hbase_pb_GetActiveMasterResponse_descriptor,
-              new java.lang.String[] { "ServerName", });
-          internal_static_hbase_pb_GetMetaRegionLocationsRequest_descriptor =
+              internal_static_hbase_pb_GetMastersResponseEntry_descriptor,
+              new java.lang.String[] { "ServerName", "IsActive", });
+          internal_static_hbase_pb_GetMastersResponse_descriptor =
             getDescriptor().getMessageTypes().get(129);
+          internal_static_hbase_pb_GetMastersResponse_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_GetMastersResponse_descriptor,
+              new java.lang.String[] { "MasterServers", });
+          internal_static_hbase_pb_GetMetaRegionLocationsRequest_descriptor =
+            getDescriptor().getMessageTypes().get(130);
           internal_static_hbase_pb_GetMetaRegionLocationsRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetMetaRegionLocationsRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_GetMetaRegionLocationsResponse_descriptor =
-            getDescriptor().getMessageTypes().get(130);
+            getDescriptor().getMessageTypes().get(131);
           internal_static_hbase_pb_GetMetaRegionLocationsResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetMetaRegionLocationsResponse_descriptor,
               new java.lang.String[] { "MetaLocations", });
           internal_static_hbase_pb_GetNumLiveRSRequest_descriptor =
-            getDescriptor().getMessageTypes().get(131);
+            getDescriptor().getMessageTypes().get(132);
           internal_static_hbase_pb_GetNumLiveRSRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetNumLiveRSRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_GetNumLiveRSResponse_descriptor =
-            getDescriptor().getMessageTypes().get(132);
+            getDescriptor().getMessageTypes().get(133);
           internal_static_hbase_pb_GetNumLiveRSResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetNumLiveRSResponse_descriptor,
diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto
index bf16dd5..be4c66e 100644
--- a/hbase-protocol/src/main/protobuf/Master.proto
+++ b/hbase-protocol/src/main/protobuf/Master.proto
@@ -922,12 +922,15 @@ message GetClusterIdResponse {
   optional string cluster_id = 1;
 }
 
-/** Request and response to get the currently active master name for this cluster */
-message GetActiveMasterRequest {
+/** Request and response to get the current list of all registers master servers */
+message GetMastersRequest {
 }
-message GetActiveMasterResponse {
-  /** Not set if an active master could not be determined. */
-  optional ServerName server_name = 1;
+message GetMastersResponseEntry {
+    required ServerName server_name = 1;
+    required bool is_active = 2;
+}
+message GetMastersResponse {
+    repeated GetMastersResponseEntry master_servers = 1;
 }
 
 /** Request and response to get the current list of meta region locations */
@@ -955,9 +958,10 @@ service ClientMetaService {
   rpc GetClusterId(GetClusterIdRequest) returns(GetClusterIdResponse);
 
   /**
-   * Get active master server name for this cluster.
+   * Get registered list of master servers in this cluster. List includes both active and backup
+   * masters.
    */
-  rpc GetActiveMaster(GetActiveMasterRequest) returns(GetActiveMasterResponse);
+  rpc GetMasters(GetMastersRequest) returns(GetMastersResponse);
 
   /**
    * Get current meta replicas' region locations.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java
index d92a48e..61ace78 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java
@@ -17,7 +17,10 @@
  * limitations under the License.
  */
 package org.apache.hadoop.hbase.master;
+import com.google.common.collect.ImmutableList;
 import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.List;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.commons.logging.Log;
@@ -35,9 +38,10 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
 
 /**
- * Handles everything on master-side related to master election.
+ * Handles everything on master-side related to master election. Keeps track of
+ * currently active master and registered backup masters.
  *
- * <p>Listens and responds to ZooKeeper notifications on the master znode,
+ * <p>Listens and responds to ZooKeeper notifications on the master znodes,
  * both <code>nodeCreated</code> and <code>nodeDeleted</code>.
  *
  * <p>Contains blocking methods which will hold up backup masters, waiting
@@ -64,18 +68,23 @@ public class ActiveMasterManager extends ZooKeeperListener {
   // Active master's server name. Invalidated anytime active master changes (based on ZK
   // notifications) and lazily fetched on-demand.
   // ServerName is immutable, so we don't need heavy synchronization around it.
-  private volatile ServerName activeMasterServerName;
+  volatile ServerName activeMasterServerName;
+  // Registered backup masters. List is kept up to date based on ZK change notifications to
+  // backup znode.
+  private volatile ImmutableList<ServerName> backupMasters;
 
   /**
    * @param watcher ZK watcher
    * @param sn ServerName
    * @param master In an instance of a Master.
    */
-  ActiveMasterManager(ZooKeeperWatcher watcher, ServerName sn, Server master) {
+  ActiveMasterManager(ZooKeeperWatcher watcher, ServerName sn, Server master)
+      throws InterruptedIOException {
     super(watcher);
     watcher.registerListener(this);
     this.sn = sn;
     this.master = master;
+    updateBackupMasters();
   }
 
   // will be set after jetty server is started
@@ -89,8 +98,18 @@ public class ActiveMasterManager extends ZooKeeperListener {
   }
 
   @Override
-  public void nodeDeleted(String path) {
+  public void nodeChildrenChanged(String path) {
+    if (path.equals(watcher.backupMasterAddressesZNode)) {
+      try {
+        updateBackupMasters();
+      } catch (InterruptedIOException ioe) {
+        LOG.error("Error updating backup masters", ioe);
+      }
+    }
+  }
 
+  @Override
+  public void nodeDeleted(String path) {
     // We need to keep track of the cluster's shutdown status while
     // we wait on the current master. We consider that, if the cluster
     // was already in a "shutdown" state when we started, that this master
@@ -101,7 +120,6 @@ public class ActiveMasterManager extends ZooKeeperListener {
     if(path.equals(watcher.clusterStateZNode) && !master.isStopped()) {
       clusterShutDown.set(true);
     }
-
     handle(path);
   }
 
@@ -111,6 +129,11 @@ public class ActiveMasterManager extends ZooKeeperListener {
     }
   }
 
+  private void updateBackupMasters() throws InterruptedIOException {
+    backupMasters =
+        ImmutableList.copyOf(MasterAddressTracker.getBackupMastersAndRenewWatch(watcher));
+  }
+
   /**
    * Fetches the active master's ServerName from zookeeper.
    */
@@ -320,4 +343,11 @@ public class ActiveMasterManager extends ZooKeeperListener {
       LOG.error(this.watcher.prefix("Error deleting our own master address node"), e);
     }
   }
+
+  /**
+   * @return list of registered backup masters.
+   */
+  public List<ServerName> getBackupMasters() {
+    return backupMasters;
+  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 2bb4362..1a2f52b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.master;
 
+import com.google.protobuf.Descriptors;
+import com.google.protobuf.Service;
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.lang.reflect.Constructor;
@@ -27,7 +29,6 @@ import java.net.UnknownHostException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.Comparator;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -86,7 +87,6 @@ import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.executor.ExecutorType;
 import org.apache.hadoop.hbase.http.InfoServer;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
@@ -189,12 +189,8 @@ import org.mortbay.jetty.servlet.ServletHolder;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
-import com.google.protobuf.Descriptors;
-import com.google.protobuf.Service;
 
 /**
- * HMaster is the "master server" for HBase. An HBase cluster has one active
- * master.  If many masters are started, all compete.  Whichever wins goes on to
  * run the cluster.  All others park themselves in their constructor until
  * master or cluster shutdown or until the active master loses its lease in
  * zookeeper.  Thereafter, all running master jostle to take over master role.
@@ -2548,56 +2544,14 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
    */
   public ClusterStatus getClusterStatusWithoutCoprocessor() throws InterruptedIOException {
     // Build Set of backup masters from ZK nodes
-    List<String> backupMasterStrings;
-    try {
-      backupMasterStrings = ZKUtil.listChildrenNoWatch(this.zooKeeper,
-        this.zooKeeper.backupMasterAddressesZNode);
-    } catch (KeeperException e) {
-      LOG.warn(this.zooKeeper.prefix("Unable to list backup servers"), e);
-      backupMasterStrings = null;
-    }
-
-    List<ServerName> backupMasters = null;
-    if (backupMasterStrings != null && !backupMasterStrings.isEmpty()) {
-      backupMasters = new ArrayList<ServerName>(backupMasterStrings.size());
-      for (String s: backupMasterStrings) {
-        try {
-          byte [] bytes;
-          try {
-            bytes = ZKUtil.getData(this.zooKeeper, ZKUtil.joinZNode(
-                this.zooKeeper.backupMasterAddressesZNode, s));
-          } catch (InterruptedException e) {
-            throw new InterruptedIOException();
-          }
-          if (bytes != null) {
-            ServerName sn;
-            try {
-              sn = ServerName.parseFrom(bytes);
-            } catch (DeserializationException e) {
-              LOG.warn("Failed parse, skipping registering backup server", e);
-              continue;
-            }
-            backupMasters.add(sn);
-          }
-        } catch (KeeperException e) {
-          LOG.warn(this.zooKeeper.prefix("Unable to get information about " +
-                   "backup servers"), e);
-        }
-      }
-      Collections.sort(backupMasters, new Comparator<ServerName>() {
-        @Override
-        public int compare(ServerName s1, ServerName s2) {
-          return s1.getServerName().compareTo(s2.getServerName());
-        }});
-    }
-
+    List<ServerName> backupMasters = getBackupMasters();
     String clusterId = fileSystemManager != null ?
-      fileSystemManager.getClusterId().toString() : null;
+        fileSystemManager.getClusterId().toString() : null;
     Set<RegionState> regionsInTransition = assignmentManager != null ?
-      assignmentManager.getRegionStates().getRegionsInTransition() : null;
+        assignmentManager.getRegionStates().getRegionsInTransition() : null;
     String[] coprocessors = cpHost != null ? getMasterCoprocessors() : null;
     boolean balancerOn = loadBalancerTracker != null ?
-      loadBalancerTracker.isBalancerOn() : false;
+        loadBalancerTracker.isBalancerOn() : false;
     Map<ServerName, ServerLoad> onlineServers = null;
     Set<ServerName> deadServers = null;
     if (serverManager != null) {
@@ -2605,8 +2559,12 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
       onlineServers = serverManager.getOnlineServers();
     }
     return new ClusterStatus(VersionInfo.getVersion(), clusterId,
-      onlineServers, deadServers, serverName, backupMasters,
-      regionsInTransition, coprocessors, balancerOn);
+        onlineServers, deadServers, serverName, backupMasters,
+        regionsInTransition, coprocessors, balancerOn);
+  }
+
+  List<ServerName> getBackupMasters() {
+    return activeMasterManager.getBackupMasters();
   }
 
   /**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index e4eb654..963b94e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -97,14 +97,15 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableReques
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMastersResponseEntry;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
@@ -1804,12 +1805,19 @@ public class MasterRpcServices extends RSRpcServices
   }
 
   @Override
-  public GetActiveMasterResponse getActiveMaster(RpcController rpcController,
-      GetActiveMasterRequest request) throws ServiceException {
-    GetActiveMasterResponse.Builder resp = GetActiveMasterResponse.newBuilder();
+  public GetMastersResponse getMasters(RpcController rpcController, GetMastersRequest request)
+      throws ServiceException {
+    GetMastersResponse.Builder resp = GetMastersResponse.newBuilder();
+    // Active master
     ServerName serverName = master.getActiveMaster();
     if (serverName != null) {
-      resp.setServerName(ProtobufUtil.toServerName(serverName));
+      resp.addMasterServers(GetMastersResponseEntry.newBuilder()
+          .setServerName(ProtobufUtil.toServerName(serverName)).setIsActive(true).build());
+    }
+    // Backup masters
+    for (ServerName backupMaster: master.getBackupMasters()) {
+      resp.addMasterServers(GetMastersResponseEntry.newBuilder().setServerName(
+          ProtobufUtil.toServerName(backupMaster)).setIsActive(false).build());
     }
     return resp.build();
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterAddressRefresher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterAddressRefresher.java
new file mode 100644
index 0000000..7e2f2f7
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterAddressRefresher.java
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import com.google.common.util.concurrent.Uninterruptibles;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ClientTests.class, SmallTests.class})
+public class TestMasterAddressRefresher {
+
+  static class DummyConnection implements Connection {
+    private final Configuration conf;
+
+    DummyConnection(Configuration conf) {
+      this.conf = conf;
+    }
+
+    @Override
+    public Configuration getConfiguration() {
+      return conf;
+    }
+
+    @Override
+    public Table getTable(TableName tableName) throws IOException {
+      return null;
+    }
+
+    @Override
+    public Table getTable(TableName tableName, ExecutorService pool) throws IOException {
+      return null;
+    }
+
+    @Override
+    public BufferedMutator getBufferedMutator(TableName tableName) throws IOException {
+      return null;
+    }
+
+    @Override
+    public BufferedMutator getBufferedMutator(BufferedMutatorParams params) throws IOException {
+      return null;
+    }
+
+    @Override
+    public RegionLocator getRegionLocator(TableName tableName) throws IOException {
+      return null;
+    }
+
+    @Override
+    public Admin getAdmin() throws IOException {
+      return null;
+    }
+
+    @Override
+    public void close() throws IOException {
+
+    }
+
+    @Override
+    public boolean isClosed() {
+      return false;
+    }
+
+    @Override
+    public void abort(String why, Throwable e) {
+
+    }
+
+    @Override
+    public boolean isAborted() {
+      return false;
+    }
+  }
+
+  private static class DummyMasterRegistry extends MasterRegistry {
+
+    private final AtomicInteger getMastersCallCounter = new AtomicInteger(0);
+    private final List<Long> callTimeStamps = new ArrayList<>();
+
+    @Override
+    public void init(Connection connection) throws IOException {
+      super.init(connection);
+    }
+
+    @Override
+    List<ServerName> getMasters() {
+      getMastersCallCounter.incrementAndGet();
+      callTimeStamps.add(EnvironmentEdgeManager.currentTime());
+      return new ArrayList<>();
+    }
+
+    public int getMastersCount() {
+      return getMastersCallCounter.get();
+    }
+
+    public List<Long> getCallTimeStamps() {
+      return callTimeStamps;
+    }
+  }
+
+  @Test
+  public void testPeriodicMasterEndPointRefresh() throws IOException {
+    Configuration conf = HBaseConfiguration.create();
+    // Refresh every 1 second.
+    conf.setLong(MasterAddressRefresher.PERIODIC_REFRESH_INTERVAL_SECS, 1);
+    conf.setLong(MasterAddressRefresher.MIN_SECS_BETWEEN_REFRESHES, 0);
+    final DummyMasterRegistry registry = new DummyMasterRegistry();
+    registry.init(new DummyConnection(conf));
+    // Wait for > 3 seconds to see that at least 3 getMasters() RPCs have been made.
+    Waiter.waitFor(
+        conf, 5000, new Waiter.Predicate<Exception>() {
+          @Override
+          public boolean evaluate() throws Exception {
+            return registry.getMastersCount() > 3;
+          }
+        });
+  }
+
+  @Test
+  public void testDurationBetweenRefreshes() throws IOException {
+    Configuration conf = HBaseConfiguration.create();
+    // Disable periodic refresh
+    conf.setLong(MasterAddressRefresher.PERIODIC_REFRESH_INTERVAL_SECS, Integer.MAX_VALUE);
+    // A minimum duration of 1s between refreshes
+    conf.setLong(MasterAddressRefresher.MIN_SECS_BETWEEN_REFRESHES, 1);
+    DummyMasterRegistry registry = new DummyMasterRegistry();
+    registry.init(new DummyConnection(conf));
+    // Issue a ton of manual refreshes.
+    for (int i = 0; i < 10000; i++) {
+      registry.masterAddressRefresher.refreshNow();
+      Uninterruptibles.sleepUninterruptibly(1, TimeUnit.MILLISECONDS);
+    }
+    // Overall wait time is 10000 ms, so the number of requests should be <=10
+    List<Long> callTimeStamps = registry.getCallTimeStamps();
+    // Actual calls to getMasters() should be much lower than the refresh count.
+    Assert.assertTrue(
+        String.valueOf(registry.getMastersCount()), registry.getMastersCount() <= 20);
+    Assert.assertTrue(callTimeStamps.size() > 0);
+    // Verify that the delta between subsequent RPCs is at least 1sec as configured.
+    for (int i = 1; i < callTimeStamps.size() - 1; i++) {
+      long delta = callTimeStamps.get(i) - callTimeStamps.get(i - 1);
+      // Few ms cushion to account for any env jitter.
+      Assert.assertTrue(callTimeStamps.toString(), delta > 990);
+    }
+  }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java
index 07f0100..0695e4b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java
@@ -19,9 +19,15 @@ package org.apache.hadoop.hbase.client;
 
 import static org.apache.hadoop.hbase.HConstants.META_REPLICAS_NUM;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableSet;
+import com.google.protobuf.RpcController;
 import java.io.IOException;
+import java.net.SocketTimeoutException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
@@ -33,6 +39,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.exceptions.MasterRegistryFetchException;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -41,6 +48,10 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClientMetaService;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse;
+
 @Category({ MediumTests.class, ClientTests.class })
 public class TestMasterRegistry {
 
@@ -59,6 +70,20 @@ public class TestMasterRegistry {
     TEST_UTIL.shutdownMiniCluster();
   }
 
+  private static class ExceptionInjectorRegistry extends MasterRegistry {
+    @Override
+    public String getClusterId() throws IOException {
+      GetClusterIdResponse resp = doCall(new Callable<GetClusterIdResponse>() {
+        @Override
+        public GetClusterIdResponse call(ClientMetaService.Interface stub, RpcController controller)
+            throws IOException {
+          throw new SocketTimeoutException("Injected exception.");
+        }
+      });
+      return resp.getClusterId();
+    }
+  }
+
   /**
    * Generates a string of dummy master addresses in host:port format. Every other hostname won't
    * have a port number.
@@ -130,4 +155,82 @@ public class TestMasterRegistry {
       registry.close();
     }
   }
+
+  /**
+   * Tests that the list of masters configured in the MasterRegistry is dynamically refreshed in the
+   * event of errors.
+   */
+  @Test
+  public void testDynamicMasterConfigurationRefresh() throws Exception {
+    Configuration conf = TEST_UTIL.getConnection().getConfiguration();
+    String currentMasterAddrs = Preconditions.checkNotNull(conf.get(HConstants.MASTER_ADDRS_KEY));
+    HMaster activeMaster = TEST_UTIL.getHBaseCluster().getMaster();
+    // Add a non-working master
+    ServerName badServer = ServerName.valueOf("localhost", 1234, -1);
+    conf.set(HConstants.MASTER_ADDRS_KEY, badServer.toShortString() + "," + currentMasterAddrs);
+    // Do not limit the number of refreshes during the test run.
+    conf.setLong(MasterAddressRefresher.MIN_SECS_BETWEEN_REFRESHES, 0);
+    final ExceptionInjectorRegistry registry = new ExceptionInjectorRegistry();
+    try {
+      registry.init(TEST_UTIL.getConnection());
+      final ImmutableSet<String> masters = registry.getParsedMasterServers();
+      assertTrue(masters.contains(badServer.toString()));
+      // Make a registry RPC, this should trigger a refresh since one of the RPC fails.
+      try {
+        registry.getClusterId();
+      } catch (MasterRegistryFetchException e) {
+        // Expected.
+      }
+
+      // Wait for new set of masters to be populated.
+      TEST_UTIL.waitFor(5000,
+          new Waiter.Predicate<Exception>() {
+            @Override
+            public boolean evaluate() throws Exception {
+              return !registry.getParsedMasterServers().equals(masters);
+            }
+          });
+      // new set of masters should not include the bad server
+      final ImmutableSet<String> newMasters = registry.getParsedMasterServers();
+      // Bad one should be out.
+      assertEquals(3, newMasters.size());
+      assertFalse(newMasters.contains(badServer.toString()));
+      // Kill the active master
+      activeMaster.stopMaster();
+      TEST_UTIL.waitFor(10000,
+          new Waiter.Predicate<Exception>() {
+            @Override
+            public boolean evaluate() {
+              return TEST_UTIL.getMiniHBaseCluster().getLiveMasterThreads().size() == 2;
+            }
+          });
+      TEST_UTIL.getMiniHBaseCluster().waitForActiveAndReadyMaster(10000);
+      // Make a registry RPC, this should trigger a refresh since one of the RPC fails.
+      try {
+        registry.getClusterId();
+      } catch (MasterRegistryFetchException e) {
+        // Expected.
+      }
+      // Wait until the killed master de-registered.
+      TEST_UTIL.waitFor(10000, new Waiter.Predicate<Exception>() {
+        @Override
+        public boolean evaluate() throws Exception {
+          return registry.getMasters().size() == 2;
+        }
+      });
+      TEST_UTIL.waitFor(20000, new Waiter.Predicate<Exception>() {
+        @Override
+        public boolean evaluate() throws Exception {
+          return registry.getParsedMasterServers().size() == 2;
+        }
+      });
+      final ImmutableSet<String> newMasters2 = registry.getParsedMasterServers();
+      assertEquals(2, newMasters2.size());
+      assertFalse(newMasters2.contains(activeMaster.getServerName().toString()));
+    } finally {
+      registry.close();
+      // Reset the state, add a killed master.
+      TEST_UTIL.getMiniHBaseCluster().startMaster();
+    }
+  }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
index 3144eeb..5e3106f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
@@ -1,4 +1,4 @@
-/**
+/*
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -21,10 +21,12 @@ package org.apache.hadoop.hbase.master;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.concurrent.Semaphore;
 
 import org.apache.commons.logging.Log;
@@ -33,6 +35,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ChoreService;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
@@ -70,43 +73,41 @@ public class TestActiveMasterManager {
   }
 
   @Test public void testRestartMaster() throws IOException, KeeperException {
-    ZooKeeperWatcher zk = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
-      "testActiveMasterManagerFromZK", null, true);
-    try {
-      ZKUtil.deleteNode(zk, zk.getMasterAddressZNode());
-      ZKUtil.deleteNode(zk, zk.clusterStateZNode);
-    } catch(KeeperException.NoNodeException nne) {}
-
-    // Create the master node with a dummy address
-    ServerName master = ServerName.valueOf("localhost", 1, System.currentTimeMillis());
-    // Should not have a master yet
-    DummyMaster dummyMaster = new DummyMaster(zk,master);
-    ClusterStatusTracker clusterStatusTracker =
-      dummyMaster.getClusterStatusTracker();
-    ActiveMasterManager activeMasterManager =
-      dummyMaster.getActiveMasterManager();
-    assertFalse(activeMasterManager.clusterHasActiveMaster.get());
-    assertNull(activeMasterManager.getActiveMasterServerName());
-
-    // First test becoming the active master uninterrupted
-    MonitoredTask status = Mockito.mock(MonitoredTask.class);
-    clusterStatusTracker.setClusterUp();
-
-    activeMasterManager.blockUntilBecomingActiveMaster(100, status);
-    assertTrue(activeMasterManager.clusterHasActiveMaster.get());
-    assertMaster(zk, master);
-    assertMaster(zk, activeMasterManager.getActiveMasterServerName());
-
-    // Now pretend master restart
-    DummyMaster secondDummyMaster = new DummyMaster(zk,master);
-    ActiveMasterManager secondActiveMasterManager =
-      secondDummyMaster.getActiveMasterManager();
-    assertFalse(secondActiveMasterManager.clusterHasActiveMaster.get());
-    activeMasterManager.blockUntilBecomingActiveMaster(100, status);
-    assertTrue(activeMasterManager.clusterHasActiveMaster.get());
-    assertMaster(zk, master);
-    assertMaster(zk, activeMasterManager.getActiveMasterServerName());
-    assertMaster(zk, secondActiveMasterManager.getActiveMasterServerName());
+    try (ZooKeeperWatcher zk = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
+        "testActiveMasterManagerFromZK", null, true)) {
+      try {
+        ZKUtil.deleteNode(zk, zk.getMasterAddressZNode());
+        ZKUtil.deleteNode(zk, zk.clusterStateZNode);
+      } catch (KeeperException.NoNodeException nne) {
+      }
+
+      // Create the master node with a dummy address
+      ServerName master = ServerName.valueOf("localhost", 1, System.currentTimeMillis());
+      // Should not have a master yet
+      DummyMaster dummyMaster = new DummyMaster(zk, master);
+      ClusterStatusTracker clusterStatusTracker =
+          dummyMaster.getClusterStatusTracker();
+      ActiveMasterManager activeMasterManager =
+          dummyMaster.getActiveMasterManager();
+      assertFalse(activeMasterManager.clusterHasActiveMaster.get());
+
+      // First test becoming the active master uninterrupted
+      MonitoredTask status = Mockito.mock(MonitoredTask.class);
+      clusterStatusTracker.setClusterUp();
+
+      activeMasterManager.blockUntilBecomingActiveMaster(100, status);
+      assertTrue(activeMasterManager.clusterHasActiveMaster.get());
+      assertMaster(zk, master);
+
+      // Now pretend master restart
+      DummyMaster secondDummyMaster = new DummyMaster(zk, master);
+      ActiveMasterManager secondActiveMasterManager =
+          secondDummyMaster.getActiveMasterManager();
+      assertFalse(secondActiveMasterManager.clusterHasActiveMaster.get());
+      activeMasterManager.blockUntilBecomingActiveMaster(100, status);
+      assertTrue(activeMasterManager.clusterHasActiveMaster.get());
+      assertMaster(zk, master);
+    }
   }
 
   /**
@@ -116,86 +117,126 @@ public class TestActiveMasterManager {
    */
   @Test
   public void testActiveMasterManagerFromZK() throws Exception {
-    ZooKeeperWatcher zk = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
-      "testActiveMasterManagerFromZK", null, true);
-    try {
+    try (ZooKeeperWatcher zk = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
+        "testActiveMasterManagerFromZK", null, true)) {
+      try {
+        ZKUtil.deleteNode(zk, zk.getMasterAddressZNode());
+        ZKUtil.deleteNode(zk, zk.clusterStateZNode);
+      } catch (KeeperException.NoNodeException nne) {
+      }
+
+      // Create the master node with a dummy address
+      ServerName firstMasterAddress =
+          ServerName.valueOf("localhost", 1, System.currentTimeMillis());
+      ServerName secondMasterAddress =
+          ServerName.valueOf("localhost", 2, System.currentTimeMillis());
+
+      // Should not have a master yet
+      DummyMaster ms1 = new DummyMaster(zk, firstMasterAddress);
+      ActiveMasterManager activeMasterManager =
+          ms1.getActiveMasterManager();
+      assertFalse(activeMasterManager.clusterHasActiveMaster.get());
+
+      // First test becoming the active master uninterrupted
+      ClusterStatusTracker clusterStatusTracker =
+          ms1.getClusterStatusTracker();
+      clusterStatusTracker.setClusterUp();
+      activeMasterManager.blockUntilBecomingActiveMaster(100,
+          Mockito.mock(MonitoredTask.class));
+      assertTrue(activeMasterManager.clusterHasActiveMaster.get());
+      assertMaster(zk, firstMasterAddress);
+
+      // New manager will now try to become the active master in another thread
+      WaitToBeMasterThread t = new WaitToBeMasterThread(zk, secondMasterAddress);
+      t.start();
+      // Wait for this guy to figure out there is another active master
+      // Wait for 1 second at most
+      int sleeps = 0;
+      while (!t.manager.clusterHasActiveMaster.get() && sleeps < 100) {
+        Thread.sleep(10);
+        sleeps++;
+      }
+
+      // Both should see that there is an active master
+      assertTrue(activeMasterManager.clusterHasActiveMaster.get());
+      assertTrue(t.manager.clusterHasActiveMaster.get());
+      // But secondary one should not be the active master
+      assertFalse(t.isActiveMaster);
+
+      // Close the first server and delete it's master node
+      ms1.stop("stopping first server");
+
+      // Use a listener to capture when the node is actually deleted
+      NodeDeletionListener listener = new NodeDeletionListener(zk, zk.getMasterAddressZNode());
+      zk.registerListener(listener);
+
+      LOG.info("Deleting master node");
       ZKUtil.deleteNode(zk, zk.getMasterAddressZNode());
-      ZKUtil.deleteNode(zk, zk.clusterStateZNode);
-    } catch(KeeperException.NoNodeException nne) {}
-
-    // Create the master node with a dummy address
-    ServerName firstMasterAddress =
-        ServerName.valueOf("localhost", 1, System.currentTimeMillis());
-    ServerName secondMasterAddress =
-        ServerName.valueOf("localhost", 2, System.currentTimeMillis());
-
-    // Should not have a master yet
-    DummyMaster ms1 = new DummyMaster(zk,firstMasterAddress);
-    ActiveMasterManager activeMasterManager =
-      ms1.getActiveMasterManager();
-    assertFalse(activeMasterManager.clusterHasActiveMaster.get());
-    assertNull(activeMasterManager.getActiveMasterServerName());
-
-    // First test becoming the active master uninterrupted
-    ClusterStatusTracker clusterStatusTracker =
-      ms1.getClusterStatusTracker();
-    clusterStatusTracker.setClusterUp();
-    activeMasterManager.blockUntilBecomingActiveMaster(100,
-        Mockito.mock(MonitoredTask.class));
-    assertTrue(activeMasterManager.clusterHasActiveMaster.get());
-    assertMaster(zk, firstMasterAddress);
-    assertMaster(zk, activeMasterManager.getActiveMasterServerName());
-
-    // New manager will now try to become the active master in another thread
-    WaitToBeMasterThread t = new WaitToBeMasterThread(zk, secondMasterAddress);
-    t.start();
-    // Wait for this guy to figure out there is another active master
-    // Wait for 1 second at most
-    int sleeps = 0;
-    while(!t.manager.clusterHasActiveMaster.get() && sleeps < 100) {
-      Thread.sleep(10);
-      sleeps++;
-    }
 
-    // Both should see that there is an active master
-    assertTrue(activeMasterManager.clusterHasActiveMaster.get());
-    assertTrue(t.manager.clusterHasActiveMaster.get());
-    // But secondary one should not be the active master
-    assertFalse(t.isActiveMaster);
-    // Verify the active master ServerName is populated in standby master.
-    assertEquals(firstMasterAddress, t.manager.getActiveMasterServerName());
-
-    // Close the first server and delete it's master node
-    ms1.stop("stopping first server");
-
-    // Use a listener to capture when the node is actually deleted
-    NodeDeletionListener listener = new NodeDeletionListener(zk, zk.getMasterAddressZNode());
-    zk.registerListener(listener);
-
-    LOG.info("Deleting master node");
-    ZKUtil.deleteNode(zk, zk.getMasterAddressZNode());
-
-    // Wait for the node to be deleted
-    LOG.info("Waiting for active master manager to be notified");
-    listener.waitForDeletion();
-    LOG.info("Master node deleted");
-
-    // Now we expect the secondary manager to have and be the active master
-    // Wait for 1 second at most
-    sleeps = 0;
-    while(!t.isActiveMaster && sleeps < 100) {
-      Thread.sleep(10);
-      sleeps++;
-    }
-    LOG.debug("Slept " + sleeps + " times");
+      // Wait for the node to be deleted
+      LOG.info("Waiting for active master manager to be notified");
+      listener.waitForDeletion();
+      LOG.info("Master node deleted");
+
+      // Now we expect the secondary manager to have and be the active master
+      // Wait for 1 second at most
+      sleeps = 0;
+      while (!t.isActiveMaster && sleeps < 100) {
+        Thread.sleep(10);
+        sleeps++;
+      }
+      LOG.debug("Slept " + sleeps + " times");
 
-    assertTrue(t.manager.clusterHasActiveMaster.get());
-    assertTrue(t.isActiveMaster);
-    assertEquals(secondMasterAddress, t.manager.getActiveMasterServerName());
+      assertTrue(t.manager.clusterHasActiveMaster.get());
+      assertTrue(t.isActiveMaster);
 
-    LOG.info("Deleting master node");
+      LOG.info("Deleting master node");
+
+      ZKUtil.deleteNode(zk, zk.getMasterAddressZNode());
+    }
+  }
 
-    ZKUtil.deleteNode(zk, zk.getMasterAddressZNode());
+  @Test
+  public void testBackupMasterUpdates() throws Exception {
+    Configuration conf = TEST_UTIL.getConfiguration();
+    try (ZooKeeperWatcher zk = new ZooKeeperWatcher(
+        conf, "testBackupMasterUpdates", null, true)) {
+      ServerName sn1 = ServerName.valueOf("localhost", 1, -1);
+      DummyMaster master1 = new DummyMaster(zk, sn1);
+      final ActiveMasterManager activeMasterManager = master1.getActiveMasterManager();
+      activeMasterManager.blockUntilBecomingActiveMaster(100,
+          Mockito.mock(MonitoredTask.class));
+      assertEquals(sn1, activeMasterManager.getActiveMasterServerName());
+      assertEquals(0, activeMasterManager.getBackupMasters().size());
+      // Add backup masters
+      final List<String> backupZNodes = new ArrayList<>();
+      for (int i = 1; i <= 10; i++) {
+        ServerName backupSn = ServerName.valueOf("localhost", 1000 + i, -1);
+        String backupZn = ZKUtil.joinZNode(zk.backupMasterAddressesZNode, backupSn.toString());
+        backupZNodes.add(backupZn);
+        MasterAddressTracker.setMasterAddress(zk, backupZn, backupSn, 1234);
+        TEST_UTIL.waitFor(10000,
+            new Waiter.Predicate<Exception>() {
+              @Override
+              public boolean evaluate() throws Exception {
+                return activeMasterManager.getBackupMasters().size() == backupZNodes.size();
+              }
+            });
+      }
+      // Remove backup masters
+      int numBackups = backupZNodes.size();
+      for (String backupZNode: backupZNodes) {
+        ZKUtil.deleteNode(zk, backupZNode);
+        final int currentBackups = --numBackups;
+        TEST_UTIL.waitFor(10000,
+            new Waiter.Predicate<Exception>() {
+              @Override
+              public boolean evaluate() throws Exception {
+                return activeMasterManager.getBackupMasters().size() == currentBackups;
+              }
+            });
+      }
+    }
   }
 
   /**
@@ -206,8 +247,8 @@ public class TestActiveMasterManager {
    * @throws IOException if an IO problem is encountered
    */
   private void assertMaster(ZooKeeperWatcher zk,
-      ServerName expectedAddress)
-  throws KeeperException, IOException {
+                            ServerName expectedAddress)
+      throws KeeperException, IOException {
     ServerName readAddress = MasterAddressTracker.getMasterAddress(zk);
     assertNotNull(readAddress);
     assertTrue(expectedAddress.equals(readAddress));
@@ -219,7 +260,8 @@ public class TestActiveMasterManager {
     DummyMaster dummyMaster;
     boolean isActiveMaster;
 
-    public WaitToBeMasterThread(ZooKeeperWatcher zk, ServerName address) {
+    public WaitToBeMasterThread(ZooKeeperWatcher zk, ServerName address)
+        throws InterruptedIOException {
       this.dummyMaster = new DummyMaster(zk,address);
       this.manager = this.dummyMaster.getActiveMasterManager();
       isActiveMaster = false;
@@ -267,13 +309,13 @@ public class TestActiveMasterManager {
     private ClusterStatusTracker clusterStatusTracker;
     private ActiveMasterManager activeMasterManager;
 
-    public DummyMaster(ZooKeeperWatcher zk, ServerName master) {
+    public DummyMaster(ZooKeeperWatcher zk, ServerName master) throws InterruptedIOException {
       this.clusterStatusTracker =
-        new ClusterStatusTracker(zk, this);
+          new ClusterStatusTracker(zk, this);
       clusterStatusTracker.start();
 
       this.activeMasterManager =
-        new ActiveMasterManager(zk, master, this);
+          new ActiveMasterManager(zk, master, this);
       zk.registerListener(activeMasterManager);
     }
 
@@ -338,4 +380,4 @@ public class TestActiveMasterManager {
       return null;
     }
   }
-}
+}
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClientMetaServiceRPCs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClientMetaServiceRPCs.java
index 458f891..013d2a4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClientMetaServiceRPCs.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClientMetaServiceRPCs.java
@@ -45,8 +45,6 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClientMetaService;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest;
@@ -112,25 +110,6 @@ public class TestClientMetaServiceRPCs {
   }
 
   /**
-   * Verifies the active master ServerName as seen by all masters.
-   */
-  @Test public void TestActiveMaster() throws Exception {
-    HBaseRpcController rpcController = getRpcController();
-    ServerName activeMaster = TEST_UTIL.getMiniHBaseCluster().getMaster().getServerName();
-    int rpcCount = 0;
-    for (JVMClusterUtil.MasterThread masterThread:
-        TEST_UTIL.getMiniHBaseCluster().getMasterThreads()) {
-      ClientMetaService.BlockingInterface stub =
-          getMasterStub(masterThread.getMaster().getServerName());
-      GetActiveMasterResponse resp =
-          stub.getActiveMaster(rpcController, GetActiveMasterRequest.getDefaultInstance());
-      assertEquals(activeMaster, ProtobufUtil.toServerName(resp.getServerName()));
-      rpcCount++;
-    }
-    assertEquals(MASTER_COUNT, rpcCount);
-  }
-
-  /**
    * Verifies that the meta region locations RPC returns consistent results across all masters.
    */
   @Test public void TestMetaLocations() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
index fcbe0a6..a5b3d49 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.MetaTableAccessor;
@@ -1138,10 +1139,16 @@ public class TestMasterFailover {
 
     // Check that ClusterStatus reports the correct active and backup masters
     assertNotNull(active);
+    final HMaster finalActive = active;
+    TEST_UTIL.waitFor(10000, new Waiter.Predicate<Exception>() {
+      @Override
+      public boolean evaluate() throws Exception {
+        ClusterStatus status = finalActive.getClusterStatus();
+        return status.getBackupMastersSize() == 1 && status.getBackupMasters().size() == 1;
+      }
+    });
     status = active.getClusterStatus();
     assertTrue(status.getMaster().equals(activeName));
-    assertEquals(1, status.getBackupMastersSize());
-    assertEquals(1, status.getBackupMasters().size());
 
     // kill the active master
     LOG.debug("\n\nStopping the active master " + active.getServerName() + "\n");
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMasterAddressTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMasterAddressTracker.java
index fd8c4dc..cdb6af5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMasterAddressTracker.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMasterAddressTracker.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
+import java.util.List;
 import java.util.concurrent.Semaphore;
 
 import org.apache.commons.logging.Log;
@@ -33,6 +34,7 @@ import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -45,10 +47,19 @@ public class TestMasterAddressTracker {
   private static final Log LOG = LogFactory.getLog(TestMasterAddressTracker.class);
 
   private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  // Cleaned up after each unit test.
+  private static ZooKeeperWatcher zk;
 
   @Rule
   public TestName name = new TestName();
 
+  @After
+  public void cleanUp() {
+    if (zk != null) {
+      zk.close();
+    }
+  }
+
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
     TEST_UTIL.startMiniZKCluster();
@@ -79,9 +90,10 @@ public class TestMasterAddressTracker {
    */
   private MasterAddressTracker setupMasterTracker(final ServerName sn, final int infoPort)
       throws Exception {
-    ZooKeeperWatcher zk = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
+    zk = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
         name.getMethodName(), null);
     ZKUtil.createAndFailSilent(zk, zk.baseZNode);
+    ZKUtil.createAndFailSilent(zk, zk.backupMasterAddressesZNode);
 
     // Should not have a master yet
     MasterAddressTracker addressTracker = new MasterAddressTracker(zk, null);
@@ -155,6 +167,29 @@ public class TestMasterAddressTracker {
     assertEquals("Should receive 0 for backup not found.", 0, addressTracker.getMasterInfoPort());
   }
 
+  @Test
+  public void testBackupMasters() throws Exception {
+    final ServerName sn = ServerName.valueOf("localhost", 5678, System.currentTimeMillis());
+    final MasterAddressTracker addressTracker = setupMasterTracker(sn, 1111);
+    assertTrue(addressTracker.hasMaster());
+    ServerName activeMaster = addressTracker.getMasterAddress();
+    assertEquals(activeMaster, sn);
+    // No current backup masters
+    List<ServerName> backupMasters = MasterAddressTracker.getBackupMastersAndRenewWatch(zk);
+    assertEquals(0, backupMasters.size());
+    ServerName backupMaster1 = ServerName.valueOf("localhost", 2222, -1);
+    ServerName backupMaster2 = ServerName.valueOf("localhost", 3333, -1);
+    String backupZNode1 = ZKUtil.joinZNode(zk.backupMasterAddressesZNode, backupMaster1.toString());
+    String backupZNode2 = ZKUtil.joinZNode(zk.backupMasterAddressesZNode, backupMaster2.toString());
+    // Add a backup master
+    MasterAddressTracker.setMasterAddress(zk, backupZNode1, backupMaster1, 2222);
+    MasterAddressTracker.setMasterAddress(zk, backupZNode2, backupMaster2, 3333);
+    backupMasters = MasterAddressTracker.getBackupMastersAndRenewWatch(zk);
+    assertEquals(2, backupMasters.size());
+    assertTrue(backupMasters.contains(backupMaster1));
+    assertTrue(backupMasters.contains(backupMaster2));
+  }
+
   public static class NodeCreationListener extends ZooKeeperListener {
     private static final Log LOG = LogFactory.getLog(NodeCreationListener.class);
 


[hbase] 06/09: HBASE-23604: Clarify Registry usage in the code

Posted by bh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

bharathv pushed a commit to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit d86699463505f48426beacfb93fe174fbcc30b55
Author: Bharath Vissapragada <bh...@apache.org>
AuthorDate: Wed Sep 2 14:59:18 2020 -0700

    HBASE-23604: Clarify Registry usage in the code
    
    Signed-off-by: Andrew Purtell <ap...@apache.org>
---
 .../hadoop/hbase/client/ConnectionManager.java     | 71 +++++++++-------------
 .../{Registry.java => ConnectionRegistry.java}     | 12 +++-
 ...Factory.java => ConnectionRegistryFactory.java} | 12 ++--
 ...istry.java => ZooKeeperConnectionRegistry.java} | 38 ++++++------
 .../hadoop/hbase/client/TestAsyncProcess.java      | 11 +++-
 .../hadoop/hbase/client/TestClientNoCluster.java   | 13 ++--
 .../hbase/client/TestMetaRegionLocationCache.java  |  8 +--
 7 files changed, 83 insertions(+), 82 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index 961ee3a..9798e72 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -83,7 +83,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
-import org.apache.hadoop.hbase.protobuf.generated.*;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
@@ -635,7 +635,7 @@ class ConnectionManager {
     /**
      * Cluster registry of basic info such as clusterid and meta region location.
      */
-    Registry registry;
+    ConnectionRegistry registry;
 
     private final ClientBackoffPolicy backoffPolicy;
 
@@ -920,8 +920,8 @@ class ConnectionManager {
      * @return The cluster registry implementation to use.
      * @throws IOException
      */
-    private Registry setupRegistry() throws IOException {
-      return RegistryFactory.getRegistry(this);
+    private ConnectionRegistry setupRegistry() throws IOException {
+      return ConnectionRegistryFactory.getRegistry(this);
     }
 
     /**
@@ -1259,7 +1259,7 @@ class ConnectionManager {
           }
         }
         // Look up from zookeeper
-        metaLocations = this.registry.getMetaRegionLocation();
+        metaLocations = this.registry.getMetaRegionLocations();
         lastMetaLookupTime = EnvironmentEdgeManager.currentTime();
         if (metaLocations != null &&
             metaLocations.getRegionLocation(replicaId) != null) {
@@ -1589,43 +1589,31 @@ class ConnectionManager {
        * @throws KeeperException
        * @throws ServiceException
        */
-      private Object makeStubNoRetries() throws IOException, KeeperException, ServiceException {
-        ZooKeeperKeepAliveConnection zkw;
-        try {
-          zkw = getKeepAliveZooKeeperWatcher();
-        } catch (IOException e) {
-          ExceptionUtil.rethrowIfInterrupt(e);
-          throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e);
-        }
-        try {
-          checkIfBaseNodeAvailable(zkw);
-          ServerName sn = MasterAddressTracker.getMasterAddress(zkw);
-          if (sn == null) {
-            String msg = "ZooKeeper available but no active master location found";
-            LOG.info(msg);
-            throw new MasterNotRunningException(msg);
-          }
-          if (isDeadServer(sn)) {
-            throw new MasterNotRunningException(sn + " is dead.");
+      private Object makeStubNoRetries() throws IOException, ServiceException {
+        ServerName sn = registry.getActiveMaster();
+        if (sn == null) {
+          String msg = "ZooKeeper available but no active master location found";
+          LOG.info(msg);
+          throw new MasterNotRunningException(msg);
+        }
+        if (isDeadServer(sn)) {
+          throw new MasterNotRunningException(sn + " is dead.");
+        }
+        // Use the security info interface name as our stub key
+        String key = getStubKey(getServiceName(),
+            sn.getHostname(), sn.getPort(), hostnamesCanChange);
+        connectionLock.putIfAbsent(key, key);
+        Object stub = null;
+        synchronized (connectionLock.get(key)) {
+          stub = stubs.get(key);
+          if (stub == null) {
+            BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel(sn, user, rpcTimeout);
+            stub = makeStub(channel);
+            isMasterRunning();
+            stubs.put(key, stub);
           }
-          // Use the security info interface name as our stub key
-          String key = getStubKey(getServiceName(),
-              sn.getHostname(), sn.getPort(), hostnamesCanChange);
-          connectionLock.putIfAbsent(key, key);
-          Object stub = null;
-          synchronized (connectionLock.get(key)) {
-            stub = stubs.get(key);
-            if (stub == null) {
-              BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel(sn, user, rpcTimeout);
-              stub = makeStub(channel);
-              isMasterRunning();
-              stubs.put(key, stub);
-            }
-          }
-          return stub;
-        } finally {
-          zkw.close();
         }
+        return stub;
       }
 
       /**
@@ -1643,12 +1631,9 @@ class ConnectionManager {
               return makeStubNoRetries();
             } catch (IOException e) {
               exceptionCaught = e;
-            } catch (KeeperException e) {
-              exceptionCaught = e;
             } catch (ServiceException e) {
               exceptionCaught = e;
             }
-
             throw new MasterNotRunningException(exceptionCaught);
           } else {
             throw new DoNotRetryIOException("Connection was closed while trying to get master");
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java
similarity index 83%
rename from hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java
rename to hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java
index 9debd63..9c4f22a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java
@@ -19,26 +19,32 @@ package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
 
+import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.RegionLocations;
 
 /**
- * Cluster registry.
  * Implementations hold cluster information such as this cluster's id, location of hbase:meta, etc.
+ * needed by cluster connections.
  * Internal use only.
  */
 @InterfaceAudience.Private
-interface Registry {
+interface ConnectionRegistry {
   /**
    * @param connection
    */
   void init(Connection connection);
 
   /**
+   * @return the currently active master, null if none exists.
+   */
+  ServerName getActiveMaster() throws IOException;
+
+  /**
    * @return Meta region location
    * @throws IOException
    */
-  RegionLocations getMetaRegionLocation() throws IOException;
+  RegionLocations getMetaRegionLocations() throws IOException;
 
   /**
    * @return Cluster id.
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java
similarity index 79%
rename from hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryFactory.java
rename to hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java
index 789e2e1..c166e21 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java
@@ -22,23 +22,23 @@ import java.io.IOException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 
 /**
- * Get instance of configured Registry.
+ * Get instance of configured Connection Registry.
  */
 @InterfaceAudience.Private
-class RegistryFactory {
+class ConnectionRegistryFactory {
   static final String REGISTRY_IMPL_CONF_KEY = "hbase.client.registry.impl";
 
   /**
    * @return The cluster registry implementation to use.
    * @throws IOException
    */
-  static Registry getRegistry(final Connection connection)
+  static ConnectionRegistry getRegistry(final Connection connection)
   throws IOException {
     String registryClass = connection.getConfiguration().get(REGISTRY_IMPL_CONF_KEY,
-      ZooKeeperRegistry.class.getName());
-    Registry registry = null;
+      ZooKeeperConnectionRegistry.class.getName());
+    ConnectionRegistry registry = null;
     try {
-      registry = (Registry)Class.forName(registryClass).getDeclaredConstructor().newInstance();
+      registry = (ConnectionRegistry)Class.forName(registryClass).getDeclaredConstructor().newInstance();
     } catch (Throwable t) {
       throw new IOException(t);
     }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperConnectionRegistry.java
similarity index 81%
rename from hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
rename to hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperConnectionRegistry.java
index 8f7257e..0401aee 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperConnectionRegistry.java
@@ -22,10 +22,12 @@ import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@@ -34,8 +36,8 @@ import org.apache.zookeeper.KeeperException;
 /**
  * A cluster registry that stores to zookeeper.
  */
-class ZooKeeperRegistry implements Registry {
-  private static final Log LOG = LogFactory.getLog(ZooKeeperRegistry.class);
+class ZooKeeperConnectionRegistry implements ConnectionRegistry {
+  private static final Log LOG = LogFactory.getLog(ZooKeeperConnectionRegistry.class);
   // Needs an instance of hci to function.  Set after construct this instance.
   ConnectionManager.HConnectionImplementation hci;
 
@@ -48,10 +50,19 @@ class ZooKeeperRegistry implements Registry {
   }
 
   @Override
-  public RegionLocations getMetaRegionLocation() throws IOException {
-    ZooKeeperKeepAliveConnection zkw = hci.getKeepAliveZooKeeperWatcher();
+  public ServerName getActiveMaster() throws IOException {
+    ServerName sn;
+    try (ZooKeeperKeepAliveConnection zkw = hci.getKeepAliveZooKeeperWatcher()) {
+      sn = MasterAddressTracker.getMasterAddress(zkw);
+    } catch (KeeperException e) {
+      throw new HBaseIOException(e);
+    }
+    return sn;
+  }
 
-    try {
+  @Override
+  public RegionLocations getMetaRegionLocations() throws IOException {
+    try (ZooKeeperKeepAliveConnection zkw = hci.getKeepAliveZooKeeperWatcher();) {
       if (LOG.isTraceEnabled()) {
         LOG.trace("Looking up meta region location in ZK," + " connection=" + this);
       }
@@ -84,8 +95,6 @@ class ZooKeeperRegistry implements Registry {
     } catch (InterruptedException e) {
       Thread.currentThread().interrupt();
       return null;
-    } finally {
-      zkw.close();
     }
   }
 
@@ -96,34 +105,25 @@ class ZooKeeperRegistry implements Registry {
     if (this.clusterId != null) return this.clusterId;
     // No synchronized here, worse case we will retrieve it twice, that's
     //  not an issue.
-    ZooKeeperKeepAliveConnection zkw = null;
-    try {
-      zkw = hci.getKeepAliveZooKeeperWatcher();
+    try (ZooKeeperKeepAliveConnection zkw = hci.getKeepAliveZooKeeperWatcher()) {
       this.clusterId = ZKClusterId.readClusterIdZNode(zkw);
       if (this.clusterId == null) {
         LOG.info("ClusterId read in ZooKeeper is null");
       }
-    } catch (KeeperException e) {
-      LOG.warn("Can't retrieve clusterId from Zookeeper", e);
-    } catch (IOException e) {
+    } catch (KeeperException | IOException e) {
       LOG.warn("Can't retrieve clusterId from Zookeeper", e);
-    } finally {
-      if (zkw != null) zkw.close();
     }
     return this.clusterId;
   }
 
   @Override
   public int getCurrentNrHRS() throws IOException {
-    ZooKeeperKeepAliveConnection zkw = hci.getKeepAliveZooKeeperWatcher();
-    try {
+    try (ZooKeeperKeepAliveConnection zkw = hci.getKeepAliveZooKeeperWatcher()) {
       // We go to zk rather than to master to get count of regions to avoid
       // HTable having a Master dependency.  See HBase-2828
       return ZKUtil.getNumberOfChildren(zkw, zkw.rsZNode);
     } catch (KeeperException ke) {
       throw new IOException("Unexpected ZooKeeper exception", ke);
-    } finally {
-        zkw.close();
     }
   }
 }
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
index 21e3d85..d10cc2f 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
@@ -460,12 +460,17 @@ public class TestAsyncProcess {
    * Returns our async process.
    */
   static class MyConnectionImpl extends ConnectionManager.HConnectionImplementation {
-    public static class TestRegistry implements Registry {
+    public static class TestConnectionRegistry implements ConnectionRegistry {
       @Override
       public void init(Connection connection) {}
 
       @Override
-      public RegionLocations getMetaRegionLocation() throws IOException {
+      public ServerName getActiveMaster() {
+        return null;
+      }
+
+      @Override
+      public RegionLocations getMetaRegionLocations() throws IOException {
         return null;
       }
 
@@ -487,7 +492,7 @@ public class TestAsyncProcess {
     }
 
     private static Configuration setupConf(Configuration conf) {
-      conf.setClass(RegistryFactory.REGISTRY_IMPL_CONF_KEY, TestRegistry.class, Registry.class);
+      conf.setClass(ConnectionRegistryFactory.REGISTRY_IMPL_CONF_KEY, TestConnectionRegistry.class, ConnectionRegistry.class);
       return conf;
     }
 
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
index 06647ca..d2e4f0f 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
@@ -105,13 +105,13 @@ public class TestClientNoCluster extends Configured implements Tool {
     // Run my HConnection overrides.  Use my little HConnectionImplementation below which
     // allows me insert mocks and also use my Registry below rather than the default zk based
     // one so tests run faster and don't have zk dependency.
-    this.conf.set("hbase.client.registry.impl", SimpleRegistry.class.getName());
+    this.conf.set("hbase.client.registry.impl", SimpleConnectionRegistry.class.getName());
   }
 
   /**
    * Simple cluster registry inserted in place of our usual zookeeper based one.
    */
-  static class SimpleRegistry implements Registry {
+  static class SimpleConnectionRegistry implements ConnectionRegistry {
     final ServerName META_HOST = META_SERVERNAME;
 
     @Override
@@ -119,7 +119,12 @@ public class TestClientNoCluster extends Configured implements Tool {
     }
 
     @Override
-    public RegionLocations getMetaRegionLocation() throws IOException {
+    public ServerName getActiveMaster() {
+      return null;
+    }
+
+    @Override
+    public RegionLocations getMetaRegionLocations() throws IOException {
       return new RegionLocations(
         new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, META_HOST));
     }
@@ -796,7 +801,7 @@ public class TestClientNoCluster extends Configured implements Tool {
     getConf().set("hbase.client.connection.impl",
       ManyServersManyRegionsConnection.class.getName());
     // Use simple kv registry rather than zk
-    getConf().set("hbase.client.registry.impl", SimpleRegistry.class.getName());
+    getConf().set("hbase.client.registry.impl", SimpleConnectionRegistry.class.getName());
     // When to report fails.  Default is we report the 10th.  This means we'll see log everytime
     // an exception is thrown -- usually RegionTooBusyException when we have more than
     // hbase.test.multi.too.many requests outstanding at any time.
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java
index e9fa26d..f5ba56d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java
@@ -55,11 +55,11 @@ public class TestMetaRegionLocationCache {
   private static final Log LOG = LogFactory.getLog(TestMetaRegionLocationCache.class.getName());
 
   private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-  private static Registry REGISTRY;
+  private static ConnectionRegistry REGISTRY;
 
   // waits for all replicas to have region location
   static void waitUntilAllMetaReplicasHavingRegionLocation(Configuration conf,
-       final Registry registry, final int regionReplication) throws IOException {
+                                                           final ConnectionRegistry registry, final int regionReplication) throws IOException {
     Waiter.waitFor(conf, conf.getLong(
         "hbase.client.sync.wait.timeout.msec", 60000), 200, true,
         new Waiter.ExplainingPredicate<IOException>() {
@@ -71,7 +71,7 @@ public class TestMetaRegionLocationCache {
           @Override
           public boolean evaluate() throws IOException {
             try {
-              RegionLocations locs = registry.getMetaRegionLocation();
+              RegionLocations locs = registry.getMetaRegionLocations();
               if (locs == null || locs.size() < regionReplication) {
                 return false;
               }
@@ -93,7 +93,7 @@ public class TestMetaRegionLocationCache {
   public static void setUp() throws Exception {
     TEST_UTIL.getConfiguration().setInt(HConstants.META_REPLICAS_NUM, 3);
     TEST_UTIL.startMiniCluster(3);
-    REGISTRY = RegistryFactory.getRegistry(TEST_UTIL.getConnection());
+    REGISTRY = ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConnection());
     waitUntilAllMetaReplicasHavingRegionLocation(
         TEST_UTIL.getConfiguration(), REGISTRY, 3);
     TEST_UTIL.getConnection().getAdmin().setBalancerRunning(false, true);


[hbase] 07/09: HBASE-23305: Implement master based registry for client connections

Posted by bh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

bharathv pushed a commit to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit ebe9e68274cddf483957c6e5f1b4b01e8491fb7e
Author: Bharath Vissapragada <bh...@apache.org>
AuthorDate: Thu Sep 3 19:30:13 2020 -0700

    HBASE-23305: Implement master based registry for client connections
    
    Signed-off-by: Andrew Purtell <ap...@apache.org>
---
 .../hadoop/hbase/client/ConnectionManager.java     |    7 +-
 .../hadoop/hbase/client/ConnectionRegistry.java    |    9 +-
 .../hbase/client/ConnectionRegistryFactory.java    |    6 +-
 .../apache/hadoop/hbase/client/MasterRegistry.java |  233 ++++
 ...tionRegistry.java => ZKConnectionRegistry.java} |    8 +-
 .../apache/hadoop/hbase/security/SecurityInfo.java |    3 +
 .../hadoop/hbase/client/TestAsyncProcess.java      |    6 +-
 .../hadoop/hbase/client/TestClientNoCluster.java   |    4 +
 .../java/org/apache/hadoop/hbase/HConstants.java   |    3 +-
 .../exceptions/MasterRegistryFetchException.java   |   37 +-
 .../java/org/apache/hadoop/hbase/util/DNS.java     |   11 +
 .../apache/hadoop/hbase/util/PrettyPrinter.java    |   19 +
 .../hbase/protobuf/generated/MasterProtos.java     | 1218 +++++++++++++++++---
 hbase-protocol/src/main/protobuf/Master.proto      |   12 +
 .../org/apache/hadoop/hbase/master/HMaster.java    |   15 +
 .../hadoop/hbase/master/MasterRpcServices.java     |   13 +
 .../apache/hadoop/hbase/util/JVMClusterUtil.java   |    7 +
 .../apache/hadoop/hbase/HBaseTestingUtility.java   |    3 +
 .../hadoop/hbase/client/TestFromClientSide.java    |   85 +-
 .../client/TestFromClientSideWithCoprocessor.java  |   29 +-
 .../hadoop/hbase/client/TestMasterRegistry.java    |  133 +++
 21 files changed, 1640 insertions(+), 221 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index 9798e72..5addc7a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -942,7 +942,7 @@ class ConnectionManager {
 
     protected String clusterId = null;
 
-    void retrieveClusterId() {
+    void retrieveClusterId() throws IOException {
       if (clusterId != null) return;
       this.clusterId = this.registry.getClusterId();
       if (clusterId == null) {
@@ -1592,7 +1592,7 @@ class ConnectionManager {
       private Object makeStubNoRetries() throws IOException, ServiceException {
         ServerName sn = registry.getActiveMaster();
         if (sn == null) {
-          String msg = "ZooKeeper available but no active master location found";
+          String msg = "No active master location found";
           LOG.info(msg);
           throw new MasterNotRunningException(msg);
         }
@@ -2587,6 +2587,9 @@ class ConnectionManager {
       if (this.closed) {
         return;
       }
+      if (this.registry != null) {
+        this.registry.close();
+      }
       closeMaster();
       shutdownPools();
       if (this.metrics != null) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java
index 9c4f22a..353ff61 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java
@@ -33,7 +33,7 @@ interface ConnectionRegistry {
   /**
    * @param connection
    */
-  void init(Connection connection);
+  void init(Connection connection) throws IOException;
 
   /**
    * @return the currently active master, null if none exists.
@@ -49,11 +49,16 @@ interface ConnectionRegistry {
   /**
    * @return Cluster id.
    */
-  String getClusterId();
+  String getClusterId() throws IOException;
 
   /**
    * @return Count of 'running' regionservers
    * @throws IOException
    */
   int getCurrentNrHRS() throws IOException;
+
+  /**
+   * Cleanup state, if any.
+   */
+  void close();
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java
index c166e21..eaef389 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
 
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 
 /**
@@ -26,7 +27,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
  */
 @InterfaceAudience.Private
 class ConnectionRegistryFactory {
-  static final String REGISTRY_IMPL_CONF_KEY = "hbase.client.registry.impl";
 
   /**
    * @return The cluster registry implementation to use.
@@ -34,8 +34,8 @@ class ConnectionRegistryFactory {
    */
   static ConnectionRegistry getRegistry(final Connection connection)
   throws IOException {
-    String registryClass = connection.getConfiguration().get(REGISTRY_IMPL_CONF_KEY,
-      ZooKeeperConnectionRegistry.class.getName());
+    String registryClass = connection.getConfiguration().get(HConstants.REGISTRY_IMPL_CONF_KEY,
+      ZKConnectionRegistry.class.getName());
     ConnectionRegistry registry = null;
     try {
       registry = (ConnectionRegistry)Class.forName(registryClass).getDeclaredConstructor().newInstance();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java
new file mode 100644
index 0000000..882173f
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java
@@ -0,0 +1,233 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.apache.hadoop.hbase.util.DNS.getMasterHostname;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.net.HostAndPort;
+import com.google.protobuf.Message;
+import com.google.protobuf.RpcController;
+import java.io.IOException;
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.ThreadLocalRandom;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.exceptions.MasterRegistryFetchException;
+import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
+import org.apache.hadoop.hbase.ipc.HBaseRpcController;
+import org.apache.hadoop.hbase.ipc.RpcClient;
+import org.apache.hadoop.hbase.ipc.RpcClientFactory;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.security.User;
+
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClientMetaService;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterIdResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse;
+
+/**
+ * Master based registry implementation. Makes RPCs to the configured master addresses from config
+ * {@value org.apache.hadoop.hbase.HConstants#MASTER_ADDRS_KEY}. All the registry methods are
+ * blocking unlike implementations in other branches.
+ */
+@InterfaceAudience.Private
+public class MasterRegistry implements ConnectionRegistry {
+  private static final String MASTER_ADDRS_CONF_SEPARATOR = ",";
+
+  private ImmutableMap<String, ClientMetaService.Interface> masterAddr2Stub;
+
+  // RPC client used to talk to the masters.
+  private RpcClient rpcClient;
+  private RpcControllerFactory rpcControllerFactory;
+  private int rpcTimeoutMs;
+
+  @Override
+  public void init(Connection connection) throws IOException {
+    Configuration conf = connection.getConfiguration();
+    rpcTimeoutMs = (int) Math.min(Integer.MAX_VALUE,
+        conf.getLong(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT));
+    // HBASE-25051: we pass cluster id as null here since we do not have a cluster id yet, we have
+    // to fetch this through the master registry...
+    // This is a problem as we will use the cluster id to determine the authentication method
+    rpcClient = RpcClientFactory.createClient(conf, null);
+    rpcControllerFactory = RpcControllerFactory.instantiate(conf);
+    populateMasterStubs(parseMasterAddrs(conf));
+  }
+
+  private interface Callable <T extends Message> {
+    T call(ClientMetaService.Interface stub, RpcController controller) throws IOException;
+  }
+
+  private <T extends Message> T doCall(Callable<T> callable) throws MasterRegistryFetchException {
+    Exception lastException = null;
+    Set<String> masters = masterAddr2Stub.keySet();
+    List<ClientMetaService.Interface> stubs = new ArrayList<>(masterAddr2Stub.values());
+    Collections.shuffle(stubs, ThreadLocalRandom.current());
+    for (ClientMetaService.Interface stub: stubs) {
+      HBaseRpcController controller = rpcControllerFactory.newController();
+      try {
+        T resp = callable.call(stub, controller);
+        if (controller.failed()) {
+          lastException = controller.getFailed();
+          continue;
+        }
+        return resp;
+      } catch (Exception e) {
+        lastException = e;
+      }
+    }
+    // rpcs to all masters failed.
+    throw new MasterRegistryFetchException(masters, lastException);
+  }
+
+  @Override
+  public ServerName getActiveMaster() throws IOException {
+    GetActiveMasterResponse resp = doCall(new Callable<GetActiveMasterResponse>() {
+      @Override
+      public GetActiveMasterResponse call(
+          ClientMetaService.Interface stub, RpcController controller) throws IOException {
+        BlockingRpcCallback<GetActiveMasterResponse> cb = new BlockingRpcCallback<>();
+        stub.getActiveMaster(controller, GetActiveMasterRequest.getDefaultInstance(), cb);
+        return cb.get();
+      }
+    });
+    if (!resp.hasServerName() || resp.getServerName() == null) {
+      throw new HBaseIOException("No active master found");
+    }
+    return ProtobufUtil.toServerName(resp.getServerName());
+  }
+
+  @Override
+  public RegionLocations getMetaRegionLocations() throws IOException {
+    GetMetaRegionLocationsResponse resp = doCall(new Callable<GetMetaRegionLocationsResponse>() {
+      @Override
+      public GetMetaRegionLocationsResponse call(
+          ClientMetaService.Interface stub, RpcController controller) throws IOException {
+        BlockingRpcCallback<GetMetaRegionLocationsResponse> cb = new BlockingRpcCallback<>();
+        stub.getMetaRegionLocations(controller, GetMetaRegionLocationsRequest.getDefaultInstance(),
+            cb);
+        return cb.get();
+      }
+    });
+    List<HRegionLocation> result = new ArrayList<>();
+    for (HBaseProtos.RegionLocation loc: resp.getMetaLocationsList()) {
+      result.add(ProtobufUtil.toRegionLocation(loc));
+    }
+    return new RegionLocations(result);
+  }
+
+  @Override
+  public String getClusterId() throws IOException {
+    GetClusterIdResponse resp = doCall(new Callable<GetClusterIdResponse>() {
+      @Override
+      public GetClusterIdResponse call(ClientMetaService.Interface stub, RpcController controller)
+          throws IOException {
+        BlockingRpcCallback<GetClusterIdResponse> cb = new BlockingRpcCallback<>();
+        stub.getClusterId(controller, GetClusterIdRequest.getDefaultInstance(), cb);
+        return cb.get();
+      }
+    });
+    return resp.getClusterId();
+  }
+
+  @Override
+  public int getCurrentNrHRS() throws IOException {
+    GetNumLiveRSResponse resp = doCall(new Callable<GetNumLiveRSResponse>() {
+      @Override
+      public GetNumLiveRSResponse call(ClientMetaService.Interface stub, RpcController controller)
+          throws IOException {
+        BlockingRpcCallback<GetNumLiveRSResponse> cb = new BlockingRpcCallback<>();
+        stub.getNumLiveRS(controller, GetNumLiveRSRequest.getDefaultInstance(), cb);
+        return cb.get();
+      }
+    });
+    return resp.getNumRegionServers();
+  }
+
+  @Override
+  public void close() {
+    if (rpcClient != null) {
+      rpcClient.close();
+    }
+  }
+
+  /**
+   * Parses the list of master addresses from the provided configuration. Supported format is comma
+   * separated host[:port] values. If no port number if specified, default master port is assumed.
+   * @param conf Configuration to parse from.
+   */
+  @InterfaceAudience.Private
+   public static Set<ServerName> parseMasterAddrs(Configuration conf) throws UnknownHostException {
+    Set<ServerName> masterAddrs = new HashSet<>();
+    String configuredMasters = getMasterAddr(conf);
+    for (String masterAddr : configuredMasters.split(MASTER_ADDRS_CONF_SEPARATOR)) {
+      HostAndPort masterHostPort =
+          HostAndPort.fromString(masterAddr.trim()).withDefaultPort(HConstants.DEFAULT_MASTER_PORT);
+      masterAddrs.add(ServerName.valueOf(masterHostPort.toString(), ServerName.NON_STARTCODE));
+    }
+    Preconditions.checkArgument(!masterAddrs.isEmpty(), "At least one master address is needed");
+    return masterAddrs;
+  }
+
+  /**
+   * Builds the default master address end point if it is not specified in the configuration.
+   * <p/>
+   * Will be called in {@code HBaseTestingUtility}.
+   */
+  @InterfaceAudience.Private
+  public static String getMasterAddr(Configuration conf) throws UnknownHostException {
+    String masterAddrFromConf = conf.get(HConstants.MASTER_ADDRS_KEY);
+    if (!Strings.isNullOrEmpty(masterAddrFromConf)) {
+      return masterAddrFromConf;
+    }
+    String hostname = getMasterHostname(conf);
+    int port = conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT);
+    return String.format("%s:%d", hostname, port);
+  }
+
+  void populateMasterStubs(Set<ServerName> masters) throws IOException {
+    Preconditions.checkNotNull(masters);
+    ImmutableMap.Builder<String, ClientMetaService.Interface> builder = ImmutableMap.builder();
+    User user = User.getCurrent();
+    for (ServerName masterAddr : masters) {
+      builder.put(masterAddr.toString(), ClientMetaService.newStub(
+          rpcClient.createRpcChannel(masterAddr, user, rpcTimeoutMs)));
+    }
+    masterAddr2Stub = builder.build();
+  }
+}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java
similarity index 96%
rename from hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperConnectionRegistry.java
rename to hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java
index 0401aee..c656da8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperConnectionRegistry.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java
@@ -36,8 +36,8 @@ import org.apache.zookeeper.KeeperException;
 /**
  * A cluster registry that stores to zookeeper.
  */
-class ZooKeeperConnectionRegistry implements ConnectionRegistry {
-  private static final Log LOG = LogFactory.getLog(ZooKeeperConnectionRegistry.class);
+class ZKConnectionRegistry implements ConnectionRegistry {
+  private static final Log LOG = LogFactory.getLog(ZKConnectionRegistry.class);
   // Needs an instance of hci to function.  Set after construct this instance.
   ConnectionManager.HConnectionImplementation hci;
 
@@ -126,4 +126,8 @@ class ZooKeeperConnectionRegistry implements ConnectionRegistry {
       throw new IOException("Unexpected ZooKeeper exception", ke);
     }
   }
+
+  @Override
+  public void close() {
+  }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java
index eca54a4..1091ee6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java
@@ -23,6 +23,7 @@ import java.util.concurrent.ConcurrentMap;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
 import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.TokenIdentifier.Kind;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClientMetaService;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos;
@@ -42,6 +43,8 @@ public class SecurityInfo {
         new SecurityInfo("hbase.regionserver.kerberos.principal", Kind.HBASE_AUTH_TOKEN));
     infos.put(MasterService.getDescriptor().getName(),
         new SecurityInfo("hbase.master.kerberos.principal", Kind.HBASE_AUTH_TOKEN));
+    infos.put(ClientMetaService.getDescriptor().getName(),
+        new SecurityInfo("hbase.master.kerberos.principal", Kind.HBASE_AUTH_TOKEN));
     infos.put(RegionServerStatusProtos.RegionServerStatusService.getDescriptor().getName(),
         new SecurityInfo("hbase.master.kerberos.principal", Kind.HBASE_AUTH_TOKEN));
   }
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
index d10cc2f..a73674a 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
@@ -483,6 +483,10 @@ public class TestAsyncProcess {
       public int getCurrentNrHRS() throws IOException {
         return 1;
       }
+
+      @Override
+      public void close() {
+      }
     }
 
     final AtomicInteger nbThreads = new AtomicInteger(0);
@@ -492,7 +496,7 @@ public class TestAsyncProcess {
     }
 
     private static Configuration setupConf(Configuration conf) {
-      conf.setClass(ConnectionRegistryFactory.REGISTRY_IMPL_CONF_KEY, TestConnectionRegistry.class, ConnectionRegistry.class);
+      conf.setClass(HConstants.REGISTRY_IMPL_CONF_KEY, TestConnectionRegistry.class, ConnectionRegistry.class);
       return conf;
     }
 
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
index d2e4f0f..5c85980 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
@@ -138,6 +138,10 @@ public class TestClientNoCluster extends Configured implements Tool {
     public int getCurrentNrHRS() throws IOException {
       return 1;
     }
+
+    @Override
+    public void close() {
+    }
   }
 
   /**
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 28b2d1c..41e7525 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -79,6 +79,7 @@ public final class HConstants {
     Bytes.SIZEOF_BYTE + 2 * Bytes.SIZEOF_INT;
   /** Just an array of bytes of the right size. */
   public static final byte[] HFILEBLOCK_DUMMY_HEADER = new byte[HFILEBLOCK_HEADER_SIZE];
+  public static final String REGISTRY_IMPL_CONF_KEY = "hbase.client.registry.impl";
 
   //End HFileBlockConstants.
 
@@ -176,7 +177,7 @@ public final class HConstants {
   public static final String MASTER_INFO_PORT = "hbase.master.info.port";
 
   /** Configuration key for the list of master host:ports **/
-  public static final String MASTER_ADDRS_KEY = "hbase.master.addrs";
+  public static final String MASTER_ADDRS_KEY = "hbase.masters";
 
   public static final String MASTER_ADDRS_DEFAULT =  "localhost:" + DEFAULT_MASTER_PORT;
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/exceptions/MasterRegistryFetchException.java
similarity index 52%
copy from hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java
copy to hbase-common/src/main/java/org/apache/hadoop/hbase/exceptions/MasterRegistryFetchException.java
index c166e21..3a66f61 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/exceptions/MasterRegistryFetchException.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -15,34 +15,25 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hbase.client;
-
-import java.io.IOException;
+package org.apache.hadoop.hbase.exceptions;
 
+import java.util.Set;
+import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.util.PrettyPrinter;
 
 /**
- * Get instance of configured Connection Registry.
+ * Exception thrown when an master registry RPC fails in client. The exception includes the list of
+ * masters to which RPC was attempted and the last exception encountered. Prior exceptions are
+ * included in the logs.
  */
 @InterfaceAudience.Private
-class ConnectionRegistryFactory {
-  static final String REGISTRY_IMPL_CONF_KEY = "hbase.client.registry.impl";
+public class MasterRegistryFetchException extends HBaseIOException {
+
+  private static final long serialVersionUID = 6992134872168185171L;
 
-  /**
-   * @return The cluster registry implementation to use.
-   * @throws IOException
-   */
-  static ConnectionRegistry getRegistry(final Connection connection)
-  throws IOException {
-    String registryClass = connection.getConfiguration().get(REGISTRY_IMPL_CONF_KEY,
-      ZooKeeperConnectionRegistry.class.getName());
-    ConnectionRegistry registry = null;
-    try {
-      registry = (ConnectionRegistry)Class.forName(registryClass).getDeclaredConstructor().newInstance();
-    } catch (Throwable t) {
-      throw new IOException(t);
-    }
-    registry.init(connection);
-    return registry;
+  public MasterRegistryFetchException(Set<String> masters, Throwable failure) {
+    super(String.format("Exception making rpc to masters %s", PrettyPrinter.toString(masters)),
+        failure);
   }
 }
\ No newline at end of file
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DNS.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DNS.java
index 4b9e87f..1241dc8 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DNS.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DNS.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.util;
 import java.lang.reflect.Method;
 import java.net.UnknownHostException;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 
 /**
@@ -66,4 +67,14 @@ public final class DNS {
       return org.apache.hadoop.net.DNS.getDefaultHost(strInterface, nameserver);
     }
   }
+
+  public static String getMasterHostname(Configuration conf) throws UnknownHostException {
+    String hostname = conf.get("hbase.master.hostname", "");
+    if (hostname.isEmpty()) {
+      return Strings.domainNamePointerToHostName(getDefaultHost(
+          conf.get("hbase.master.dns.interface", "default"),
+          conf.get("hbase.master.dns.nameserver", "default")));
+    }
+    return hostname;
+  }
 }
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
index efdd144..6530447 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
@@ -19,6 +19,11 @@
 
 package org.apache.hadoop.hbase.util;
 
+import com.google.common.base.Joiner;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Objects;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
 
@@ -97,4 +102,18 @@ public class PrettyPrinter {
     return sb.toString();
   }
 
+  /**
+   * Pretty prints a collection of any type to a string. Relies on toString() implementation of the
+   * object type.
+   * @param collection collection to pretty print.
+   * @return Pretty printed string for the collection.
+   */
+  public static String toString(Collection<?> collection) {
+    List<String> stringList = new ArrayList<>();
+    for (Object o: collection) {
+      stringList.add(Objects.toString(o));
+    }
+    return "[" + Joiner.on(',').join(stringList) + "]";
+  }
+
 }
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index 87b780b..909c144 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -68864,6 +68864,794 @@ public final class MasterProtos {
     // @@protoc_insertion_point(class_scope:hbase.pb.GetMetaRegionLocationsResponse)
   }
 
+  public interface GetNumLiveRSRequestOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+  }
+  /**
+   * Protobuf type {@code hbase.pb.GetNumLiveRSRequest}
+   *
+   * <pre>
+   ** Request and response to get the number of live region servers 
+   * </pre>
+   */
+  public static final class GetNumLiveRSRequest extends
+      com.google.protobuf.GeneratedMessage
+      implements GetNumLiveRSRequestOrBuilder {
+    // Use GetNumLiveRSRequest.newBuilder() to construct.
+    private GetNumLiveRSRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private GetNumLiveRSRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final GetNumLiveRSRequest defaultInstance;
+    public static GetNumLiveRSRequest getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public GetNumLiveRSRequest getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private GetNumLiveRSRequest(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSRequest_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSRequest_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<GetNumLiveRSRequest> PARSER =
+        new com.google.protobuf.AbstractParser<GetNumLiveRSRequest>() {
+      public GetNumLiveRSRequest parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new GetNumLiveRSRequest(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<GetNumLiveRSRequest> getParserForType() {
+      return PARSER;
+    }
+
+    private void initFields() {
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest) obj;
+
+      boolean result = true;
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.GetNumLiveRSRequest}
+     *
+     * <pre>
+     ** Request and response to get the number of live region servers 
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequestOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSRequest_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSRequest_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSRequest_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest build() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest(this);
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.getDefaultInstance()) return this;
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.GetNumLiveRSRequest)
+    }
+
+    static {
+      defaultInstance = new GetNumLiveRSRequest(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.GetNumLiveRSRequest)
+  }
+
+  public interface GetNumLiveRSResponseOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required int32 num_region_servers = 1;
+    /**
+     * <code>required int32 num_region_servers = 1;</code>
+     */
+    boolean hasNumRegionServers();
+    /**
+     * <code>required int32 num_region_servers = 1;</code>
+     */
+    int getNumRegionServers();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.GetNumLiveRSResponse}
+   */
+  public static final class GetNumLiveRSResponse extends
+      com.google.protobuf.GeneratedMessage
+      implements GetNumLiveRSResponseOrBuilder {
+    // Use GetNumLiveRSResponse.newBuilder() to construct.
+    private GetNumLiveRSResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private GetNumLiveRSResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final GetNumLiveRSResponse defaultInstance;
+    public static GetNumLiveRSResponse getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public GetNumLiveRSResponse getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private GetNumLiveRSResponse(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 8: {
+              bitField0_ |= 0x00000001;
+              numRegionServers_ = input.readInt32();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSResponse_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSResponse_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<GetNumLiveRSResponse> PARSER =
+        new com.google.protobuf.AbstractParser<GetNumLiveRSResponse>() {
+      public GetNumLiveRSResponse parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new GetNumLiveRSResponse(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<GetNumLiveRSResponse> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required int32 num_region_servers = 1;
+    public static final int NUM_REGION_SERVERS_FIELD_NUMBER = 1;
+    private int numRegionServers_;
+    /**
+     * <code>required int32 num_region_servers = 1;</code>
+     */
+    public boolean hasNumRegionServers() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required int32 num_region_servers = 1;</code>
+     */
+    public int getNumRegionServers() {
+      return numRegionServers_;
+    }
+
+    private void initFields() {
+      numRegionServers_ = 0;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasNumRegionServers()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeInt32(1, numRegionServers_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeInt32Size(1, numRegionServers_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse) obj;
+
+      boolean result = true;
+      result = result && (hasNumRegionServers() == other.hasNumRegionServers());
+      if (hasNumRegionServers()) {
+        result = result && (getNumRegionServers()
+            == other.getNumRegionServers());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasNumRegionServers()) {
+        hash = (37 * hash) + NUM_REGION_SERVERS_FIELD_NUMBER;
+        hash = (53 * hash) + getNumRegionServers();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.GetNumLiveRSResponse}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponseOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSResponse_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSResponse_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        numRegionServers_ = 0;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetNumLiveRSResponse_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse build() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.numRegionServers_ = numRegionServers_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.getDefaultInstance()) return this;
+        if (other.hasNumRegionServers()) {
+          setNumRegionServers(other.getNumRegionServers());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasNumRegionServers()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required int32 num_region_servers = 1;
+      private int numRegionServers_ ;
+      /**
+       * <code>required int32 num_region_servers = 1;</code>
+       */
+      public boolean hasNumRegionServers() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required int32 num_region_servers = 1;</code>
+       */
+      public int getNumRegionServers() {
+        return numRegionServers_;
+      }
+      /**
+       * <code>required int32 num_region_servers = 1;</code>
+       */
+      public Builder setNumRegionServers(int value) {
+        bitField0_ |= 0x00000001;
+        numRegionServers_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required int32 num_region_servers = 1;</code>
+       */
+      public Builder clearNumRegionServers() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        numRegionServers_ = 0;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.GetNumLiveRSResponse)
+    }
+
+    static {
+      defaultInstance = new GetNumLiveRSResponse(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.GetNumLiveRSResponse)
+  }
+
   /**
    * Protobuf service {@code hbase.pb.MasterService}
    */
@@ -74280,6 +75068,19 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest request,
           com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse> done);
 
+      /**
+       * <code>rpc GetNumLiveRS(.hbase.pb.GetNumLiveRSRequest) returns (.hbase.pb.GetNumLiveRSResponse);</code>
+       *
+       * <pre>
+       **
+       * Get number of live region servers.
+       * </pre>
+       */
+      public abstract void getNumLiveRS(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest request,
+          com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse> done);
+
     }
 
     public static com.google.protobuf.Service newReflectiveService(
@@ -74309,6 +75110,14 @@ public final class MasterProtos {
           impl.getMetaRegionLocations(controller, request, done);
         }
 
+        @java.lang.Override
+        public  void getNumLiveRS(
+            com.google.protobuf.RpcController controller,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest request,
+            com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse> done) {
+          impl.getNumLiveRS(controller, request, done);
+        }
+
       };
     }
 
@@ -74337,6 +75146,8 @@ public final class MasterProtos {
               return impl.getActiveMaster(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest)request);
             case 2:
               return impl.getMetaRegionLocations(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest)request);
+            case 3:
+              return impl.getNumLiveRS(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest)request);
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -74357,6 +75168,8 @@ public final class MasterProtos {
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest.getDefaultInstance();
             case 2:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.getDefaultInstance();
+            case 3:
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -74377,6 +75190,8 @@ public final class MasterProtos {
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse.getDefaultInstance();
             case 2:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.getDefaultInstance();
+            case 3:
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -74424,6 +75239,19 @@ public final class MasterProtos {
         org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest request,
         com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse> done);
 
+    /**
+     * <code>rpc GetNumLiveRS(.hbase.pb.GetNumLiveRSRequest) returns (.hbase.pb.GetNumLiveRSResponse);</code>
+     *
+     * <pre>
+     **
+     * Get number of live region servers.
+     * </pre>
+     */
+    public abstract void getNumLiveRS(
+        com.google.protobuf.RpcController controller,
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest request,
+        com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse> done);
+
     public static final
         com.google.protobuf.Descriptors.ServiceDescriptor
         getDescriptor() {
@@ -74461,6 +75289,11 @@ public final class MasterProtos {
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse>specializeCallback(
               done));
           return;
+        case 3:
+          this.getNumLiveRS(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest)request,
+            com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse>specializeCallback(
+              done));
+          return;
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -74481,6 +75314,8 @@ public final class MasterProtos {
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterRequest.getDefaultInstance();
         case 2:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest.getDefaultInstance();
+        case 3:
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -74501,6 +75336,8 @@ public final class MasterProtos {
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetActiveMasterResponse.getDefaultInstance();
         case 2:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.getDefaultInstance();
+        case 3:
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -74566,6 +75403,21 @@ public final class MasterProtos {
             org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.class,
             org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.getDefaultInstance()));
       }
+
+      public  void getNumLiveRS(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest request,
+          com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse> done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(3),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.getDefaultInstance(),
+          com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.class,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.getDefaultInstance()));
+      }
     }
 
     public static BlockingInterface newBlockingStub(
@@ -74588,6 +75440,11 @@ public final class MasterProtos {
           com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest request)
           throws com.google.protobuf.ServiceException;
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse getNumLiveRS(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest request)
+          throws com.google.protobuf.ServiceException;
     }
 
     private static final class BlockingStub implements BlockingInterface {
@@ -74632,6 +75489,18 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse.getDefaultInstance());
       }
 
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse getNumLiveRS(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(3),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse.getDefaultInstance());
+      }
+
     }
 
     // @@protoc_insertion_point(class_scope:hbase.pb.ClientMetaService)
@@ -75292,6 +76161,16 @@ public final class MasterProtos {
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_hbase_pb_GetMetaRegionLocationsResponse_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_GetNumLiveRSRequest_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_GetNumLiveRSRequest_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_GetNumLiveRSResponse_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_GetNumLiveRSResponse_fieldAccessorTable;
 
   public static com.google.protobuf.Descriptors.FileDescriptor
       getDescriptor() {
@@ -75516,168 +76395,171 @@ public final class MasterProtos {
       "r_name\030\001 \001(\0132\024.hbase.pb.ServerName\"\037\n\035Ge" +
       "tMetaRegionLocationsRequest\"R\n\036GetMetaRe" +
       "gionLocationsResponse\0220\n\016meta_locations\030" +
-      "\001 \003(\0132\030.hbase.pb.RegionLocation*(\n\020Maste" +
-      "rSwitchType\022\t\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\363.\n\rMa" +
-      "sterService\022e\n\024GetSchemaAlterStatus\022%.hb" +
-      "ase.pb.GetSchemaAlterStatusRequest\032&.hba" +
-      "se.pb.GetSchemaAlterStatusResponse\022b\n\023Ge",
-      "tTableDescriptors\022$.hbase.pb.GetTableDes" +
-      "criptorsRequest\032%.hbase.pb.GetTableDescr" +
-      "iptorsResponse\022P\n\rGetTableNames\022\036.hbase." +
-      "pb.GetTableNamesRequest\032\037.hbase.pb.GetTa" +
-      "bleNamesResponse\022Y\n\020GetClusterStatus\022!.h" +
-      "base.pb.GetClusterStatusRequest\032\".hbase." +
-      "pb.GetClusterStatusResponse\022V\n\017IsMasterR" +
-      "unning\022 .hbase.pb.IsMasterRunningRequest" +
-      "\032!.hbase.pb.IsMasterRunningResponse\022D\n\tA" +
-      "ddColumn\022\032.hbase.pb.AddColumnRequest\032\033.h",
-      "base.pb.AddColumnResponse\022M\n\014DeleteColum" +
-      "n\022\035.hbase.pb.DeleteColumnRequest\032\036.hbase" +
-      ".pb.DeleteColumnResponse\022M\n\014ModifyColumn" +
-      "\022\035.hbase.pb.ModifyColumnRequest\032\036.hbase." +
-      "pb.ModifyColumnResponse\022G\n\nMoveRegion\022\033." +
-      "hbase.pb.MoveRegionRequest\032\034.hbase.pb.Mo" +
-      "veRegionResponse\022k\n\026DispatchMergingRegio" +
-      "ns\022\'.hbase.pb.DispatchMergingRegionsRequ" +
-      "est\032(.hbase.pb.DispatchMergingRegionsRes" +
-      "ponse\022M\n\014AssignRegion\022\035.hbase.pb.AssignR",
-      "egionRequest\032\036.hbase.pb.AssignRegionResp" +
-      "onse\022S\n\016UnassignRegion\022\037.hbase.pb.Unassi" +
-      "gnRegionRequest\032 .hbase.pb.UnassignRegio" +
-      "nResponse\022P\n\rOfflineRegion\022\036.hbase.pb.Of" +
-      "flineRegionRequest\032\037.hbase.pb.OfflineReg" +
-      "ionResponse\022J\n\013DeleteTable\022\034.hbase.pb.De" +
-      "leteTableRequest\032\035.hbase.pb.DeleteTableR" +
-      "esponse\022P\n\rtruncateTable\022\036.hbase.pb.Trun" +
-      "cateTableRequest\032\037.hbase.pb.TruncateTabl" +
-      "eResponse\022J\n\013EnableTable\022\034.hbase.pb.Enab",
-      "leTableRequest\032\035.hbase.pb.EnableTableRes" +
-      "ponse\022M\n\014DisableTable\022\035.hbase.pb.Disable" +
-      "TableRequest\032\036.hbase.pb.DisableTableResp" +
-      "onse\022J\n\013ModifyTable\022\034.hbase.pb.ModifyTab" +
-      "leRequest\032\035.hbase.pb.ModifyTableResponse" +
-      "\022J\n\013CreateTable\022\034.hbase.pb.CreateTableRe" +
-      "quest\032\035.hbase.pb.CreateTableResponse\022A\n\010" +
-      "Shutdown\022\031.hbase.pb.ShutdownRequest\032\032.hb" +
-      "ase.pb.ShutdownResponse\022G\n\nStopMaster\022\033." +
-      "hbase.pb.StopMasterRequest\032\034.hbase.pb.St",
-      "opMasterResponse\022h\n\031IsMasterInMaintenanc" +
-      "eMode\022$.hbase.pb.IsInMaintenanceModeRequ" +
-      "est\032%.hbase.pb.IsInMaintenanceModeRespon" +
-      "se\022>\n\007Balance\022\030.hbase.pb.BalanceRequest\032" +
-      "\031.hbase.pb.BalanceResponse\022_\n\022SetBalance" +
-      "rRunning\022#.hbase.pb.SetBalancerRunningRe" +
-      "quest\032$.hbase.pb.SetBalancerRunningRespo" +
-      "nse\022\\\n\021IsBalancerEnabled\022\".hbase.pb.IsBa" +
-      "lancerEnabledRequest\032#.hbase.pb.IsBalanc" +
-      "erEnabledResponse\022k\n\026SetSplitOrMergeEnab",
-      "led\022\'.hbase.pb.SetSplitOrMergeEnabledReq" +
-      "uest\032(.hbase.pb.SetSplitOrMergeEnabledRe" +
-      "sponse\022h\n\025IsSplitOrMergeEnabled\022&.hbase." +
-      "pb.IsSplitOrMergeEnabledRequest\032\'.hbase." +
-      "pb.IsSplitOrMergeEnabledResponse\022D\n\tNorm" +
-      "alize\022\032.hbase.pb.NormalizeRequest\032\033.hbas" +
-      "e.pb.NormalizeResponse\022e\n\024SetNormalizerR" +
-      "unning\022%.hbase.pb.SetNormalizerRunningRe" +
-      "quest\032&.hbase.pb.SetNormalizerRunningRes" +
-      "ponse\022b\n\023IsNormalizerEnabled\022$.hbase.pb.",
-      "IsNormalizerEnabledRequest\032%.hbase.pb.Is" +
-      "NormalizerEnabledResponse\022S\n\016RunCatalogS" +
-      "can\022\037.hbase.pb.RunCatalogScanRequest\032 .h" +
-      "base.pb.RunCatalogScanResponse\022e\n\024Enable" +
-      "CatalogJanitor\022%.hbase.pb.EnableCatalogJ" +
-      "anitorRequest\032&.hbase.pb.EnableCatalogJa" +
-      "nitorResponse\022n\n\027IsCatalogJanitorEnabled" +
-      "\022(.hbase.pb.IsCatalogJanitorEnabledReque" +
-      "st\032).hbase.pb.IsCatalogJanitorEnabledRes" +
-      "ponse\022V\n\017RunCleanerChore\022 .hbase.pb.RunC",
-      "leanerChoreRequest\032!.hbase.pb.RunCleaner" +
-      "ChoreResponse\022k\n\026SetCleanerChoreRunning\022" +
-      "\'.hbase.pb.SetCleanerChoreRunningRequest" +
-      "\032(.hbase.pb.SetCleanerChoreRunningRespon" +
-      "se\022h\n\025IsCleanerChoreEnabled\022&.hbase.pb.I" +
-      "sCleanerChoreEnabledRequest\032\'.hbase.pb.I" +
-      "sCleanerChoreEnabledResponse\022^\n\021ExecMast" +
-      "erService\022#.hbase.pb.CoprocessorServiceR" +
-      "equest\032$.hbase.pb.CoprocessorServiceResp" +
-      "onse\022A\n\010Snapshot\022\031.hbase.pb.SnapshotRequ",
-      "est\032\032.hbase.pb.SnapshotResponse\022h\n\025GetCo" +
-      "mpletedSnapshots\022&.hbase.pb.GetCompleted" +
-      "SnapshotsRequest\032\'.hbase.pb.GetCompleted" +
-      "SnapshotsResponse\022S\n\016DeleteSnapshot\022\037.hb" +
-      "ase.pb.DeleteSnapshotRequest\032 .hbase.pb." +
-      "DeleteSnapshotResponse\022S\n\016IsSnapshotDone" +
-      "\022\037.hbase.pb.IsSnapshotDoneRequest\032 .hbas" +
-      "e.pb.IsSnapshotDoneResponse\022V\n\017RestoreSn" +
-      "apshot\022 .hbase.pb.RestoreSnapshotRequest" +
-      "\032!.hbase.pb.RestoreSnapshotResponse\022h\n\025I",
-      "sRestoreSnapshotDone\022&.hbase.pb.IsRestor" +
-      "eSnapshotDoneRequest\032\'.hbase.pb.IsRestor" +
-      "eSnapshotDoneResponse\022P\n\rExecProcedure\022\036" +
-      ".hbase.pb.ExecProcedureRequest\032\037.hbase.p" +
-      "b.ExecProcedureResponse\022W\n\024ExecProcedure" +
-      "WithRet\022\036.hbase.pb.ExecProcedureRequest\032" +
-      "\037.hbase.pb.ExecProcedureResponse\022V\n\017IsPr" +
-      "ocedureDone\022 .hbase.pb.IsProcedureDoneRe" +
-      "quest\032!.hbase.pb.IsProcedureDoneResponse" +
-      "\022V\n\017ModifyNamespace\022 .hbase.pb.ModifyNam",
-      "espaceRequest\032!.hbase.pb.ModifyNamespace" +
-      "Response\022V\n\017CreateNamespace\022 .hbase.pb.C" +
-      "reateNamespaceRequest\032!.hbase.pb.CreateN" +
-      "amespaceResponse\022V\n\017DeleteNamespace\022 .hb" +
-      "ase.pb.DeleteNamespaceRequest\032!.hbase.pb" +
-      ".DeleteNamespaceResponse\022k\n\026GetNamespace" +
-      "Descriptor\022\'.hbase.pb.GetNamespaceDescri" +
-      "ptorRequest\032(.hbase.pb.GetNamespaceDescr" +
-      "iptorResponse\022q\n\030ListNamespaceDescriptor" +
-      "s\022).hbase.pb.ListNamespaceDescriptorsReq",
-      "uest\032*.hbase.pb.ListNamespaceDescriptors" +
-      "Response\022\206\001\n\037ListTableDescriptorsByNames" +
-      "pace\0220.hbase.pb.ListTableDescriptorsByNa" +
-      "mespaceRequest\0321.hbase.pb.ListTableDescr" +
-      "iptorsByNamespaceResponse\022t\n\031ListTableNa" +
-      "mesByNamespace\022*.hbase.pb.ListTableNames" +
-      "ByNamespaceRequest\032+.hbase.pb.ListTableN" +
-      "amesByNamespaceResponse\022A\n\010SetQuota\022\031.hb" +
-      "ase.pb.SetQuotaRequest\032\032.hbase.pb.SetQuo" +
-      "taResponse\022x\n\037getLastMajorCompactionTime",
-      "stamp\022).hbase.pb.MajorCompactionTimestam" +
-      "pRequest\032*.hbase.pb.MajorCompactionTimes" +
-      "tampResponse\022\212\001\n(getLastMajorCompactionT" +
-      "imestampForRegion\0222.hbase.pb.MajorCompac" +
-      "tionTimestampForRegionRequest\032*.hbase.pb" +
-      ".MajorCompactionTimestampResponse\022_\n\022get" +
-      "ProcedureResult\022#.hbase.pb.GetProcedureR" +
-      "esultRequest\032$.hbase.pb.GetProcedureResu" +
-      "ltResponse\022h\n\027getSecurityCapabilities\022%." +
-      "hbase.pb.SecurityCapabilitiesRequest\032&.h",
-      "base.pb.SecurityCapabilitiesResponse\022S\n\016" +
-      "AbortProcedure\022\037.hbase.pb.AbortProcedure" +
-      "Request\032 .hbase.pb.AbortProcedureRespons" +
-      "e\022S\n\016ListProcedures\022\037.hbase.pb.ListProce" +
-      "duresRequest\032 .hbase.pb.ListProceduresRe" +
-      "sponse\022Y\n\020ClearDeadServers\022!.hbase.pb.Cl" +
-      "earDeadServersRequest\032\".hbase.pb.ClearDe" +
-      "adServersResponse\022S\n\016ListNamespaces\022\037.hb" +
-      "ase.pb.ListNamespacesRequest\032 .hbase.pb." +
-      "ListNamespacesResponse\022b\n\025SwitchSnapshot",
-      "Cleanup\022#.hbase.pb.SetSnapshotCleanupReq" +
-      "uest\032$.hbase.pb.SetSnapshotCleanupRespon" +
-      "se\022q\n\030IsSnapshotCleanupEnabled\022).hbase.p" +
-      "b.IsSnapshotCleanupEnabledRequest\032*.hbas" +
-      "e.pb.IsSnapshotCleanupEnabledResponse\022P\n" +
-      "\rGetTableState\022\036.hbase.pb.GetTableStateR" +
-      "equest\032\037.hbase.pb.GetTableStateResponse2" +
-      "\247\002\n\021ClientMetaService\022M\n\014GetClusterId\022\035." +
-      "hbase.pb.GetClusterIdRequest\032\036.hbase.pb." +
-      "GetClusterIdResponse\022V\n\017GetActiveMaster\022",
-      " .hbase.pb.GetActiveMasterRequest\032!.hbas" +
-      "e.pb.GetActiveMasterResponse\022k\n\026GetMetaR" +
-      "egionLocations\022\'.hbase.pb.GetMetaRegionL" +
-      "ocationsRequest\032(.hbase.pb.GetMetaRegion" +
-      "LocationsResponseBB\n*org.apache.hadoop.h" +
-      "base.protobuf.generatedB\014MasterProtosH\001\210" +
-      "\001\001\240\001\001"
+      "\001 \003(\0132\030.hbase.pb.RegionLocation\"\025\n\023GetNu" +
+      "mLiveRSRequest\"2\n\024GetNumLiveRSResponse\022\032" +
+      "\n\022num_region_servers\030\001 \002(\005*(\n\020MasterSwit" +
+      "chType\022\t\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\363.\n\rMasterS" +
+      "ervice\022e\n\024GetSchemaAlterStatus\022%.hbase.p",
+      "b.GetSchemaAlterStatusRequest\032&.hbase.pb" +
+      ".GetSchemaAlterStatusResponse\022b\n\023GetTabl" +
+      "eDescriptors\022$.hbase.pb.GetTableDescript" +
+      "orsRequest\032%.hbase.pb.GetTableDescriptor" +
+      "sResponse\022P\n\rGetTableNames\022\036.hbase.pb.Ge" +
+      "tTableNamesRequest\032\037.hbase.pb.GetTableNa" +
+      "mesResponse\022Y\n\020GetClusterStatus\022!.hbase." +
+      "pb.GetClusterStatusRequest\032\".hbase.pb.Ge" +
+      "tClusterStatusResponse\022V\n\017IsMasterRunnin" +
+      "g\022 .hbase.pb.IsMasterRunningRequest\032!.hb",
+      "ase.pb.IsMasterRunningResponse\022D\n\tAddCol" +
+      "umn\022\032.hbase.pb.AddColumnRequest\032\033.hbase." +
+      "pb.AddColumnResponse\022M\n\014DeleteColumn\022\035.h" +
+      "base.pb.DeleteColumnRequest\032\036.hbase.pb.D" +
+      "eleteColumnResponse\022M\n\014ModifyColumn\022\035.hb" +
+      "ase.pb.ModifyColumnRequest\032\036.hbase.pb.Mo" +
+      "difyColumnResponse\022G\n\nMoveRegion\022\033.hbase" +
+      ".pb.MoveRegionRequest\032\034.hbase.pb.MoveReg" +
+      "ionResponse\022k\n\026DispatchMergingRegions\022\'." +
+      "hbase.pb.DispatchMergingRegionsRequest\032(",
+      ".hbase.pb.DispatchMergingRegionsResponse" +
+      "\022M\n\014AssignRegion\022\035.hbase.pb.AssignRegion" +
+      "Request\032\036.hbase.pb.AssignRegionResponse\022" +
+      "S\n\016UnassignRegion\022\037.hbase.pb.UnassignReg" +
+      "ionRequest\032 .hbase.pb.UnassignRegionResp" +
+      "onse\022P\n\rOfflineRegion\022\036.hbase.pb.Offline" +
+      "RegionRequest\032\037.hbase.pb.OfflineRegionRe" +
+      "sponse\022J\n\013DeleteTable\022\034.hbase.pb.DeleteT" +
+      "ableRequest\032\035.hbase.pb.DeleteTableRespon" +
+      "se\022P\n\rtruncateTable\022\036.hbase.pb.TruncateT",
+      "ableRequest\032\037.hbase.pb.TruncateTableResp" +
+      "onse\022J\n\013EnableTable\022\034.hbase.pb.EnableTab" +
+      "leRequest\032\035.hbase.pb.EnableTableResponse" +
+      "\022M\n\014DisableTable\022\035.hbase.pb.DisableTable" +
+      "Request\032\036.hbase.pb.DisableTableResponse\022" +
+      "J\n\013ModifyTable\022\034.hbase.pb.ModifyTableReq" +
+      "uest\032\035.hbase.pb.ModifyTableResponse\022J\n\013C" +
+      "reateTable\022\034.hbase.pb.CreateTableRequest" +
+      "\032\035.hbase.pb.CreateTableResponse\022A\n\010Shutd" +
+      "own\022\031.hbase.pb.ShutdownRequest\032\032.hbase.p",
+      "b.ShutdownResponse\022G\n\nStopMaster\022\033.hbase" +
+      ".pb.StopMasterRequest\032\034.hbase.pb.StopMas" +
+      "terResponse\022h\n\031IsMasterInMaintenanceMode" +
+      "\022$.hbase.pb.IsInMaintenanceModeRequest\032%" +
+      ".hbase.pb.IsInMaintenanceModeResponse\022>\n" +
+      "\007Balance\022\030.hbase.pb.BalanceRequest\032\031.hba" +
+      "se.pb.BalanceResponse\022_\n\022SetBalancerRunn" +
+      "ing\022#.hbase.pb.SetBalancerRunningRequest" +
+      "\032$.hbase.pb.SetBalancerRunningResponse\022\\" +
+      "\n\021IsBalancerEnabled\022\".hbase.pb.IsBalance",
+      "rEnabledRequest\032#.hbase.pb.IsBalancerEna" +
+      "bledResponse\022k\n\026SetSplitOrMergeEnabled\022\'" +
+      ".hbase.pb.SetSplitOrMergeEnabledRequest\032" +
+      "(.hbase.pb.SetSplitOrMergeEnabledRespons" +
+      "e\022h\n\025IsSplitOrMergeEnabled\022&.hbase.pb.Is" +
+      "SplitOrMergeEnabledRequest\032\'.hbase.pb.Is" +
+      "SplitOrMergeEnabledResponse\022D\n\tNormalize" +
+      "\022\032.hbase.pb.NormalizeRequest\032\033.hbase.pb." +
+      "NormalizeResponse\022e\n\024SetNormalizerRunnin" +
+      "g\022%.hbase.pb.SetNormalizerRunningRequest",
+      "\032&.hbase.pb.SetNormalizerRunningResponse" +
+      "\022b\n\023IsNormalizerEnabled\022$.hbase.pb.IsNor" +
+      "malizerEnabledRequest\032%.hbase.pb.IsNorma" +
+      "lizerEnabledResponse\022S\n\016RunCatalogScan\022\037" +
+      ".hbase.pb.RunCatalogScanRequest\032 .hbase." +
+      "pb.RunCatalogScanResponse\022e\n\024EnableCatal" +
+      "ogJanitor\022%.hbase.pb.EnableCatalogJanito" +
+      "rRequest\032&.hbase.pb.EnableCatalogJanitor" +
+      "Response\022n\n\027IsCatalogJanitorEnabled\022(.hb" +
+      "ase.pb.IsCatalogJanitorEnabledRequest\032).",
+      "hbase.pb.IsCatalogJanitorEnabledResponse" +
+      "\022V\n\017RunCleanerChore\022 .hbase.pb.RunCleane" +
+      "rChoreRequest\032!.hbase.pb.RunCleanerChore" +
+      "Response\022k\n\026SetCleanerChoreRunning\022\'.hba" +
+      "se.pb.SetCleanerChoreRunningRequest\032(.hb" +
+      "ase.pb.SetCleanerChoreRunningResponse\022h\n" +
+      "\025IsCleanerChoreEnabled\022&.hbase.pb.IsClea" +
+      "nerChoreEnabledRequest\032\'.hbase.pb.IsClea" +
+      "nerChoreEnabledResponse\022^\n\021ExecMasterSer" +
+      "vice\022#.hbase.pb.CoprocessorServiceReques",
+      "t\032$.hbase.pb.CoprocessorServiceResponse\022" +
+      "A\n\010Snapshot\022\031.hbase.pb.SnapshotRequest\032\032" +
+      ".hbase.pb.SnapshotResponse\022h\n\025GetComplet" +
+      "edSnapshots\022&.hbase.pb.GetCompletedSnaps" +
+      "hotsRequest\032\'.hbase.pb.GetCompletedSnaps" +
+      "hotsResponse\022S\n\016DeleteSnapshot\022\037.hbase.p" +
+      "b.DeleteSnapshotRequest\032 .hbase.pb.Delet" +
+      "eSnapshotResponse\022S\n\016IsSnapshotDone\022\037.hb" +
+      "ase.pb.IsSnapshotDoneRequest\032 .hbase.pb." +
+      "IsSnapshotDoneResponse\022V\n\017RestoreSnapsho",
+      "t\022 .hbase.pb.RestoreSnapshotRequest\032!.hb" +
+      "ase.pb.RestoreSnapshotResponse\022h\n\025IsRest" +
+      "oreSnapshotDone\022&.hbase.pb.IsRestoreSnap" +
+      "shotDoneRequest\032\'.hbase.pb.IsRestoreSnap" +
+      "shotDoneResponse\022P\n\rExecProcedure\022\036.hbas" +
+      "e.pb.ExecProcedureRequest\032\037.hbase.pb.Exe" +
+      "cProcedureResponse\022W\n\024ExecProcedureWithR" +
+      "et\022\036.hbase.pb.ExecProcedureRequest\032\037.hba" +
+      "se.pb.ExecProcedureResponse\022V\n\017IsProcedu" +
+      "reDone\022 .hbase.pb.IsProcedureDoneRequest",
+      "\032!.hbase.pb.IsProcedureDoneResponse\022V\n\017M" +
+      "odifyNamespace\022 .hbase.pb.ModifyNamespac" +
+      "eRequest\032!.hbase.pb.ModifyNamespaceRespo" +
+      "nse\022V\n\017CreateNamespace\022 .hbase.pb.Create" +
+      "NamespaceRequest\032!.hbase.pb.CreateNamesp" +
+      "aceResponse\022V\n\017DeleteNamespace\022 .hbase.p" +
+      "b.DeleteNamespaceRequest\032!.hbase.pb.Dele" +
+      "teNamespaceResponse\022k\n\026GetNamespaceDescr" +
+      "iptor\022\'.hbase.pb.GetNamespaceDescriptorR" +
+      "equest\032(.hbase.pb.GetNamespaceDescriptor",
+      "Response\022q\n\030ListNamespaceDescriptors\022).h" +
+      "base.pb.ListNamespaceDescriptorsRequest\032" +
+      "*.hbase.pb.ListNamespaceDescriptorsRespo" +
+      "nse\022\206\001\n\037ListTableDescriptorsByNamespace\022" +
+      "0.hbase.pb.ListTableDescriptorsByNamespa" +
+      "ceRequest\0321.hbase.pb.ListTableDescriptor" +
+      "sByNamespaceResponse\022t\n\031ListTableNamesBy" +
+      "Namespace\022*.hbase.pb.ListTableNamesByNam" +
+      "espaceRequest\032+.hbase.pb.ListTableNamesB" +
+      "yNamespaceResponse\022A\n\010SetQuota\022\031.hbase.p",
+      "b.SetQuotaRequest\032\032.hbase.pb.SetQuotaRes" +
+      "ponse\022x\n\037getLastMajorCompactionTimestamp" +
+      "\022).hbase.pb.MajorCompactionTimestampRequ" +
+      "est\032*.hbase.pb.MajorCompactionTimestampR" +
+      "esponse\022\212\001\n(getLastMajorCompactionTimest" +
+      "ampForRegion\0222.hbase.pb.MajorCompactionT" +
+      "imestampForRegionRequest\032*.hbase.pb.Majo" +
+      "rCompactionTimestampResponse\022_\n\022getProce" +
+      "dureResult\022#.hbase.pb.GetProcedureResult" +
+      "Request\032$.hbase.pb.GetProcedureResultRes",
+      "ponse\022h\n\027getSecurityCapabilities\022%.hbase" +
+      ".pb.SecurityCapabilitiesRequest\032&.hbase." +
+      "pb.SecurityCapabilitiesResponse\022S\n\016Abort" +
+      "Procedure\022\037.hbase.pb.AbortProcedureReque" +
+      "st\032 .hbase.pb.AbortProcedureResponse\022S\n\016" +
+      "ListProcedures\022\037.hbase.pb.ListProcedures" +
+      "Request\032 .hbase.pb.ListProceduresRespons" +
+      "e\022Y\n\020ClearDeadServers\022!.hbase.pb.ClearDe" +
+      "adServersRequest\032\".hbase.pb.ClearDeadSer" +
+      "versResponse\022S\n\016ListNamespaces\022\037.hbase.p",
+      "b.ListNamespacesRequest\032 .hbase.pb.ListN" +
+      "amespacesResponse\022b\n\025SwitchSnapshotClean" +
+      "up\022#.hbase.pb.SetSnapshotCleanupRequest\032" +
+      "$.hbase.pb.SetSnapshotCleanupResponse\022q\n" +
+      "\030IsSnapshotCleanupEnabled\022).hbase.pb.IsS" +
+      "napshotCleanupEnabledRequest\032*.hbase.pb." +
+      "IsSnapshotCleanupEnabledResponse\022P\n\rGetT" +
+      "ableState\022\036.hbase.pb.GetTableStateReques" +
+      "t\032\037.hbase.pb.GetTableStateResponse2\366\002\n\021C" +
+      "lientMetaService\022M\n\014GetClusterId\022\035.hbase",
+      ".pb.GetClusterIdRequest\032\036.hbase.pb.GetCl" +
+      "usterIdResponse\022V\n\017GetActiveMaster\022 .hba" +
+      "se.pb.GetActiveMasterRequest\032!.hbase.pb." +
+      "GetActiveMasterResponse\022k\n\026GetMetaRegion" +
+      "Locations\022\'.hbase.pb.GetMetaRegionLocati" +
+      "onsRequest\032(.hbase.pb.GetMetaRegionLocat" +
+      "ionsResponse\022M\n\014GetNumLiveRS\022\035.hbase.pb." +
+      "GetNumLiveRSRequest\032\036.hbase.pb.GetNumLiv" +
+      "eRSResponseBB\n*org.apache.hadoop.hbase.p" +
+      "rotobuf.generatedB\014MasterProtosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -76470,6 +77352,18 @@ public final class MasterProtos {
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetMetaRegionLocationsResponse_descriptor,
               new java.lang.String[] { "MetaLocations", });
+          internal_static_hbase_pb_GetNumLiveRSRequest_descriptor =
+            getDescriptor().getMessageTypes().get(131);
+          internal_static_hbase_pb_GetNumLiveRSRequest_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_GetNumLiveRSRequest_descriptor,
+              new java.lang.String[] { });
+          internal_static_hbase_pb_GetNumLiveRSResponse_descriptor =
+            getDescriptor().getMessageTypes().get(132);
+          internal_static_hbase_pb_GetNumLiveRSResponse_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_GetNumLiveRSResponse_descriptor,
+              new java.lang.String[] { "NumRegionServers", });
           return null;
         }
       };
diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto
index b2fd3f8..bf16dd5 100644
--- a/hbase-protocol/src/main/protobuf/Master.proto
+++ b/hbase-protocol/src/main/protobuf/Master.proto
@@ -938,6 +938,13 @@ message GetMetaRegionLocationsResponse {
   repeated RegionLocation meta_locations = 1;
 }
 
+/** Request and response to get the number of live region servers */
+message GetNumLiveRSRequest {
+}
+message GetNumLiveRSResponse {
+  required int32 num_region_servers = 1;
+}
+
 /**
  * Implements all the RPCs needed by clients to look up cluster meta information needed for connection establishment.
  */
@@ -956,4 +963,9 @@ service ClientMetaService {
    * Get current meta replicas' region locations.
    */
   rpc GetMetaRegionLocations(GetMetaRegionLocationsRequest) returns(GetMetaRegionLocationsResponse);
+
+  /**
+   * Get number of live region servers.
+   */
+  rpc GetNumLiveRS(GetNumLiveRSRequest) returns(GetNumLiveRSResponse);
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 1e03d44..2bb4362 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2641,6 +2641,21 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     return masterFinishedInitializationTime;
   }
 
+  /**
+   * @return number of live region servers tracked by this master.
+   * @throws KeeperException if there is an issue with zookeeper connection.
+   */
+  public int getNumLiveRegionServers() throws KeeperException {
+    if (isActiveMaster()) {
+      return regionServerTracker.getOnlineServers().size();
+    }
+    // If the master is not active, we fall back to ZK to fetch the number of live region servers.
+    // This is an extra hop but that is okay since the ConnectionRegistry call that is serviced by
+    // this method is already deprecated and is not used in any active code paths. This method is
+    // here to only for the test code.
+    return ZKUtil.getNumberOfChildren(zooKeeper, zooKeeper.rsZNode);
+  }
+
   public int getNumWALFiles() {
     return procedureStore != null ? procedureStore.getActiveLogs().size() : 0;
   }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 4af4560..e4eb654 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -109,6 +109,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLoca
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNumLiveRSResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
@@ -1824,4 +1826,15 @@ public class MasterRpcServices extends RSRpcServices
     return response.build();
   }
 
+  @Override
+  public GetNumLiveRSResponse getNumLiveRS(RpcController rpcController, GetNumLiveRSRequest request)
+      throws ServiceException {
+    GetNumLiveRSResponse.Builder response = GetNumLiveRSResponse.newBuilder();
+    try {
+      response.setNumRegionServers(master.getNumLiveRegionServers());
+    } catch (KeeperException ke) {
+      throw new ServiceException(ke);
+    }
+    return response.build();
+  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
index 23c4f3c..4128c8c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
@@ -18,6 +18,7 @@
  */
 package org.apache.hadoop.hbase.util;
 
+import com.google.common.base.Preconditions;
 import java.io.InterruptedIOException;
 import java.io.IOException;
 import java.lang.reflect.Constructor;
@@ -26,6 +27,7 @@ import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
@@ -144,6 +146,11 @@ public class JVMClusterUtil {
     } catch (Exception e) {
       throw new IOException(e);
     }
+    // Needed if a master based registry is configured for internal cluster connections. Here, we
+    // just add the current master host port since we do not know other master addresses up front
+    // in mini cluster tests.
+    c.set(HConstants.MASTER_ADDRS_KEY,
+        Preconditions.checkNotNull(server.getServerName().getAddress()).toString());
     return new JVMClusterUtil.MasterThread(server, index);
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index a37c55d..a9b5fb1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -1110,6 +1110,9 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
     Configuration c = new Configuration(this.conf);
     this.hbaseCluster =
         new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
+    // Populate the master address configuration from mini cluster configuration.
+    conf.set(HConstants.MASTER_ADDRS_KEY,
+        c.get(HConstants.MASTER_ADDRS_KEY, HConstants.MASTER_ADDRS_DEFAULT));
     // Don't leave here till we've done a successful scan of the hbase:meta
     Table t = new HTable(c, TableName.META_TABLE_NAME);
     ResultScanner s = t.getScanner(new Scan());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index 2e436c6..0e715a9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -30,6 +30,7 @@ import java.io.IOException;
 import java.lang.reflect.Method;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -117,40 +118,88 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.log4j.AppenderSkeleton;
 import org.apache.log4j.Logger;
 import org.apache.log4j.spi.LoggingEvent;
-import org.junit.After;
 import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import org.junit.Assume;
 import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
 /**
  * Run tests that use the HBase clients; {@link HTable}.
  * Sets up the HBase mini cluster once at start and runs through all client tests.
  * Each creates a table named for the method and does its stuff against that.
+ *
+ * Parameterized to run with different registry implementations.
  */
 @Category(LargeTests.class)
 @SuppressWarnings ("deprecation")
+@RunWith(Parameterized.class)
 public class TestFromClientSide {
   private static final Log LOG = LogFactory.getLog(TestFromClientSide.class);
-  protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  protected static HBaseTestingUtility TEST_UTIL;
   private static byte [] ROW = Bytes.toBytes("testRow");
   private static byte [] FAMILY = Bytes.toBytes("testFamily");
   private static byte [] QUALIFIER = Bytes.toBytes("testQualifier");
   private static byte [] VALUE = Bytes.toBytes("testValue");
   protected static int SLAVES = 3;
 
+  @Parameterized.Parameters
+  public static Collection parameters() {
+    return Arrays.asList(new Object[][] {
+        { MasterRegistry.class },
+        { ZKConnectionRegistry.class }
+    });
+  }
+
+  // To keep the child classes happy.
+  TestFromClientSide() {}
+
+  public TestFromClientSide(Class<? extends ConnectionRegistry> registry) throws Exception {
+    initialize(registry);
+  }
+
   /**
-   * @throws java.lang.Exception
+   * JUnit does not provide an easy way to run a hook after each parameterized run. Without that
+   * there is no easy way to restart the test cluster after each parameterized run. Annotation
+   * BeforeParam does not work either because it runs before parameterization and hence does not
+   * have access to the test parameters (which is weird).
+   *
+   * This *hack* checks if the current instance of test cluster configuration has the passed
+   * parameterized configs. In such a case, we can just reuse the cluster for test and do not need
+   * to initialize from scratch. While this is a hack, it saves a ton of time for the full
+   * test and de-flakes it.
    */
-  @BeforeClass
-  public static void setUpBeforeClass() throws Exception {
+  protected static boolean isSameParameterizedCluster(
+      Class<? extends ConnectionRegistry> registryImpl) {
+    if (TEST_UTIL == null) {
+      return false;
+    }
+    Configuration conf = TEST_UTIL.getConfiguration();
+    Class<? extends ConnectionRegistry> confClass = conf.getClass(HConstants.REGISTRY_IMPL_CONF_KEY,
+        ZKConnectionRegistry.class, ConnectionRegistry.class);
+    return confClass.getName().equals(registryImpl.getName());
+  }
+
+  public static void initialize(Class<? extends ConnectionRegistry> registry) throws Exception {
+    // initialize() is called for every unit test, however we only want to reset the cluster state
+    // at the end of every parameterized run.
+    if (isSameParameterizedCluster(registry)) {
+      return;
+    }
+    if (TEST_UTIL != null) {
+      // We reached end of a parameterized run, clean up.
+      TEST_UTIL.shutdownMiniCluster();
+    }
+    TEST_UTIL = new HBaseTestingUtility();
     // Uncomment the following lines if more verbosity is needed for
     // debugging (see HBASE-12285 for details).
     //((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL);
     //((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.ALL);
     //((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL);
     Configuration conf = TEST_UTIL.getConfiguration();
+    conf.setClass(HConstants.REGISTRY_IMPL_CONF_KEY, registry, ConnectionRegistry.class);
     conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
         MultiRowMutationEndpoint.class.getName());
     conf.setBoolean("hbase.table.sanity.checks", true); // enable for below tests
@@ -168,22 +217,6 @@ public class TestFromClientSide {
   }
 
   /**
-   * @throws java.lang.Exception
-   */
-  @Before
-  public void setUp() throws Exception {
-    // Nothing to do.
-  }
-
-  /**
-   * @throws java.lang.Exception
-   */
-  @After
-  public void tearDown() throws Exception {
-    // Nothing to do.
-  }
-
-  /**
    * Test append result when there are duplicate rpc request.
    */
   @Test
@@ -4461,6 +4494,12 @@ public class TestFromClientSide {
    */
   @Test
   public void testUnmanagedHConnectionReconnect() throws Exception {
+    Configuration conf = TEST_UTIL.getConfiguration();
+    Class registryImpl = conf.getClass(
+        HConstants.REGISTRY_IMPL_CONF_KEY, ZKConnectionRegistry.class);
+    // This test does not make sense for MasterRegistry since it stops the only master in the
+    // cluster and starts a new master without populating the underlying config for the connection.
+    Assume.assumeFalse(registryImpl.equals(MasterRegistry.class));
     final TableName tableName = TableName.valueOf("testUnmanagedHConnectionReconnect");
     HTable t = createUnmangedHConnectionHTable(tableName);
     Connection conn = t.getConnection();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java
index cd2409e..9ce0133 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java
@@ -17,13 +17,18 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import java.util.Arrays;
+import java.util.Collection;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
 import org.apache.hadoop.hbase.regionserver.NoOpScanPolicyObserver;
 import org.junit.BeforeClass;
 import org.junit.experimental.categories.Category;
+import org.junit.runners.Parameterized;
 
 /**
  * Test all client operations with a coprocessor that
@@ -31,12 +36,32 @@ import org.junit.experimental.categories.Category;
  */
 @Category(LargeTests.class)
 public class TestFromClientSideWithCoprocessor extends TestFromClientSide {
-  @BeforeClass
-  public static void setUpBeforeClass() throws Exception {
+
+  @Parameterized.Parameters
+  public static Collection parameters() {
+    return Arrays.asList(new Object[][] {
+        { ZKConnectionRegistry.class }
+    });
+  }
+
+  public TestFromClientSideWithCoprocessor(Class registry) throws Exception {
+    initialize(registry);
+  }
+
+  public static void initialize(Class<? extends ConnectionRegistry> registry) throws Exception {
+    if (isSameParameterizedCluster(registry)) {
+      return;
+    }
+    if (TEST_UTIL != null) {
+      // We reached end of a parameterized run, clean up.
+      TEST_UTIL.shutdownMiniCluster();
+    }
+    TEST_UTIL = new HBaseTestingUtility();
     Configuration conf = TEST_UTIL.getConfiguration();
     conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
         MultiRowMutationEndpoint.class.getName(), NoOpScanPolicyObserver.class.getName());
     conf.setBoolean("hbase.table.sanity.checks", true); // enable for below tests
+    conf.setClass(HConstants.REGISTRY_IMPL_CONF_KEY, registry, ConnectionRegistry.class);
     // We need more than one region server in this test
     TEST_UTIL.startMiniCluster(SLAVES);
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java
new file mode 100644
index 0000000..07f0100
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.apache.hadoop.hbase.HConstants.META_REPLICAS_NUM;
+import static org.junit.Assert.assertEquals;
+
+import com.google.common.base.Joiner;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ MediumTests.class, ClientTests.class })
+public class TestMasterRegistry {
+
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+  private static final int META_REPLICA_COUNT = 3;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    TEST_UTIL.getConfiguration().setInt(META_REPLICAS_NUM, META_REPLICA_COUNT);
+    TEST_UTIL.startMiniCluster(3, 3);
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  /**
+   * Generates a string of dummy master addresses in host:port format. Every other hostname won't
+   * have a port number.
+   */
+  private static String generateDummyMastersList(int size) {
+    List<String> masters = new ArrayList<>();
+    for (int i = 0; i < size; i++) {
+      masters.add(" localhost" + (i % 2 == 0 ? ":" + (1000 + i) : ""));
+    }
+    return Joiner.on(",").join(masters);
+  }
+
+  /**
+   * Makes sure the master registry parses the master end points in the configuration correctly.
+   */
+  @Test
+  public void testMasterAddressParsing() throws IOException {
+    Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
+    int numMasters = 10;
+    conf.set(HConstants.MASTER_ADDRS_KEY, generateDummyMastersList(numMasters));
+    List<ServerName> parsedMasters = new ArrayList<>(MasterRegistry.parseMasterAddrs(conf));
+    // Half of them would be without a port, duplicates are removed.
+    assertEquals(numMasters / 2 + 1, parsedMasters.size());
+    // Sort in the increasing order of port numbers.
+    Collections.sort(parsedMasters, new Comparator<ServerName>() {
+      @Override
+      public int compare(ServerName sn1, ServerName sn2) {
+        return sn1.getPort() - sn2.getPort();
+      }
+    });
+    for (int i = 0; i < parsedMasters.size(); i++) {
+      ServerName sn = parsedMasters.get(i);
+      assertEquals("localhost", sn.getHostname());
+      if (i == parsedMasters.size() - 1) {
+        // Last entry should be the one with default port.
+        assertEquals(HConstants.DEFAULT_MASTER_PORT, sn.getPort());
+      } else {
+        assertEquals(1000 + (2 * i), sn.getPort());
+      }
+    }
+  }
+
+  @Test
+  public void testRegistryRPCs() throws Exception {
+    HMaster activeMaster = TEST_UTIL.getHBaseCluster().getMaster();
+    final MasterRegistry registry = new MasterRegistry();
+    try {
+      registry.init(TEST_UTIL.getConnection());
+      // Add wait on all replicas being assigned before proceeding w/ test. Failed on occasion
+      // because not all replicas had made it up before test started.
+      TEST_UTIL.waitFor(10000, new Waiter.Predicate<Exception>() {
+        @Override
+        public boolean evaluate() throws Exception {
+          return registry.getMetaRegionLocations().size() == META_REPLICA_COUNT;
+        }
+      });
+      assertEquals(registry.getClusterId(), activeMaster.getClusterId());
+      assertEquals(registry.getActiveMaster(), activeMaster.getServerName());
+      List<HRegionLocation> metaLocations =
+          Arrays.asList(registry.getMetaRegionLocations().getRegionLocations());
+      List<HRegionLocation> actualMetaLocations =
+          activeMaster.getMetaRegionLocationCache().getMetaRegionLocations();
+      Collections.sort(metaLocations);
+      Collections.sort(actualMetaLocations);
+      assertEquals(actualMetaLocations, metaLocations);
+      int numRs = registry.getCurrentNrHRS();
+      assertEquals(TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size(), numRs);
+    } finally {
+      registry.close();
+    }
+  }
+}


[hbase] 03/09: HBASE-23281: Track meta region locations in masters (#830)

Posted by bh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

bharathv pushed a commit to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 9a1d5a02b0040d99273a1e0051816f8928e36ba5
Author: Bharath Vissapragada <bh...@apache.org>
AuthorDate: Wed Dec 4 15:26:58 2019 -0800

    HBASE-23281: Track meta region locations in masters (#830)
    
    * HBASE-23281: Track meta region changes on masters
    
    This patch adds a simple cache that tracks the meta region replica
    locations. It keeps an eye on the region movements so that the
    cached locations are not stale.
    
    This information is used for servicing client RPCs for connections
    that use master based registry (HBASE-18095). The RPC end points
    will be added in a separate patch.
    
    Signed-off-by: Nick Dimiduk <nd...@apache.org>
    Signed-off-by: Andrew Purtell <ap...@apache.org>
    (cherry picked from commit 8571d389cfe7bb18dafad82ca011e78390a21061)
    (cherry picked from commit 89581d9d218436dcaa840f895b2e98c5d4b65a1e)
---
 .../apache/hadoop/hbase/protobuf/ProtobufUtil.java |  86 +++++++
 .../hadoop/hbase/zookeeper/ZooKeeperWatcher.java   | 131 ++++++++---
 .../org/apache/hadoop/hbase/master/HMaster.java    |  15 +-
 .../hbase/master/MetaRegionLocationCache.java      | 256 +++++++++++++++++++++
 .../hbase/client/TestMetaRegionLocationCache.java  | 237 +++++++++++++++++++
 .../master/TestRegionsRecoveryConfigManager.java   |   3 +-
 .../hadoop/hbase/protobuf/TestProtobufUtil.java    |  34 ++-
 7 files changed, 728 insertions(+), 34 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 975cf44..74319d6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -86,6 +86,7 @@ import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
@@ -95,6 +96,7 @@ import org.apache.hadoop.hbase.filter.ByteArrayComparable;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.io.LimitInputStream;
 import org.apache.hadoop.hbase.io.TimeRange;
+import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
 import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
@@ -158,6 +160,7 @@ import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.Flus
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.quotas.QuotaScope;
 import org.apache.hadoop.hbase.quotas.QuotaType;
 import org.apache.hadoop.hbase.quotas.ThrottleType;
@@ -170,6 +173,7 @@ import org.apache.hadoop.hbase.security.access.UserPermission;
 import org.apache.hadoop.hbase.security.token.AuthenticationTokenIdentifier;
 import org.apache.hadoop.hbase.security.visibility.Authorizations;
 import org.apache.hadoop.hbase.security.visibility.CellVisibility;
+import org.apache.hadoop.hbase.util.Addressing;
 import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.DynamicClassLoader;
@@ -3657,4 +3661,86 @@ public final class ProtobufUtil {
     }
     return Collections.emptySet();
   }
+
+  /**
+   * Get the Meta region state from the passed data bytes. Can handle both old and new style
+   * server names.
+   * @param data protobuf serialized data with meta server name.
+   * @param replicaId replica ID for this region
+   * @return RegionState instance corresponding to the serialized data.
+   * @throws DeserializationException if the data is invalid.
+   */
+  public static RegionState parseMetaRegionStateFrom(final byte[] data, int replicaId)
+      throws DeserializationException {
+    RegionState.State state = RegionState.State.OPEN;
+    ServerName serverName;
+    if (data != null && data.length > 0 && ProtobufUtil.isPBMagicPrefix(data)) {
+      try {
+        int prefixLen = ProtobufUtil.lengthOfPBMagic();
+        ZooKeeperProtos.MetaRegionServer rl =
+            ZooKeeperProtos.MetaRegionServer.PARSER.parseFrom(data, prefixLen,
+                data.length - prefixLen);
+        if (rl.hasState()) {
+          state = RegionState.State.convert(rl.getState());
+        }
+        HBaseProtos.ServerName sn = rl.getServer();
+        serverName = ServerName.valueOf(
+            sn.getHostName(), sn.getPort(), sn.getStartCode());
+      } catch (InvalidProtocolBufferException e) {
+        throw new DeserializationException("Unable to parse meta region location");
+      }
+    } else {
+      // old style of meta region location?
+      serverName = parseServerNameFrom(data);
+    }
+    if (serverName == null) {
+      state = RegionState.State.OFFLINE;
+    }
+    return new RegionState(RegionReplicaUtil.getRegionInfoForReplica(
+        HRegionInfo.FIRST_META_REGIONINFO, replicaId), state, serverName);
+  }
+
+  /**
+   * Get a ServerName from the passed in data bytes.
+   * @param data Data with a serialize server name in it; can handle the old style
+   * servername where servername was host and port.  Works too with data that
+   * begins w/ the pb 'PBUF' magic and that is then followed by a protobuf that
+   * has a serialized {@link ServerName} in it.
+   * @return Returns null if <code>data</code> is null else converts passed data
+   * to a ServerName instance.
+   * @throws DeserializationException
+   */
+  public static ServerName parseServerNameFrom(final byte [] data) throws DeserializationException {
+    if (data == null || data.length <= 0) return null;
+    if (isPBMagicPrefix(data)) {
+      int prefixLen = lengthOfPBMagic();
+      try {
+        ZooKeeperProtos.Master rss =
+            ZooKeeperProtos.Master.PARSER.parseFrom(data, prefixLen, data.length - prefixLen);
+        org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName sn =
+            rss.getMaster();
+        return ServerName.valueOf(sn.getHostName(), sn.getPort(), sn.getStartCode());
+      } catch (/*InvalidProtocolBufferException*/IOException e) {
+        // A failed parse of the znode is pretty catastrophic. Rather than loop
+        // retrying hoping the bad bytes will changes, and rather than change
+        // the signature on this method to add an IOE which will send ripples all
+        // over the code base, throw a RuntimeException.  This should "never" happen.
+        // Fail fast if it does.
+        throw new DeserializationException(e);
+      }
+    }
+    // The str returned could be old style -- pre hbase-1502 -- which was
+    // hostname and port seperated by a colon rather than hostname, port and
+    // startcode delimited by a ','.
+    String str = Bytes.toString(data);
+    int index = str.indexOf(ServerName.SERVERNAME_SEPARATOR);
+    if (index != -1) {
+      // Presume its ServerName serialized with versioned bytes.
+      return ServerName.parseVersionedServerName(data);
+    }
+    // Presume it a hostname:port format.
+    String hostname = Addressing.parseHostname(str);
+    int port = Addressing.parsePort(str);
+    return ServerName.valueOf(hostname, port, -1L);
+  }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
index 901fc71..51b14c8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
@@ -1,4 +1,4 @@
-/**
+/*
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -18,6 +18,11 @@
  */
 package org.apache.hadoop.hbase.zookeeper;
 
+import static org.apache.hadoop.hbase.HConstants.DEFAULT_META_REPLICA_NUM;
+import static org.apache.hadoop.hbase.HConstants.META_REPLICAS_NUM;
+import static org.apache.hadoop.hbase.HRegionInfo.DEFAULT_REPLICA_ID;
+import static org.apache.hadoop.hbase.zookeeper.ZKUtil.joinZNode;
+import com.google.common.collect.ImmutableMap;
 import java.io.Closeable;
 import java.io.IOException;
 import java.util.ArrayList;
@@ -39,7 +44,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.AuthUtil;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.security.Superusers;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -71,6 +75,9 @@ import org.apache.zookeeper.data.Stat;
 public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
   private static final Log LOG = LogFactory.getLog(ZooKeeperWatcher.class);
 
+  public static final String META_ZNODE_PREFIX_CONF_KEY = "zookeeper.znode.metaserver";
+  public static final String META_ZNODE_PREFIX = "meta-region-server";
+
   // Identifier for this watcher (for logging only).  It is made of the prefix
   // passed on construction and the zookeeper sessionid.
   private String prefix;
@@ -91,6 +98,11 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
   private final List<ZooKeeperListener> listeners =
     new CopyOnWriteArrayList<ZooKeeperListener>();
 
+  /**
+   * znodes containing the locations of the servers hosting the meta replicas
+   */
+  private final ImmutableMap<Integer, String> metaReplicaZNodes;
+
   // Single threaded executor pool that processes event notifications from Zookeeper. Events are
   // processed in the order in which they arrive (pool backed by an unbounded fifo queue). We do
   // this to decouple the event processing from Zookeeper's ClientCnxn's EventThread context.
@@ -148,6 +160,13 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
   // znode of indicating master maintenance mode
   public static String masterMaintZNode = "masterMaintenance";
 
+  /**
+   * The prefix of meta znode. Does not include baseZNode.
+   * Its a 'prefix' because meta replica id integer can be tagged on the end (if
+   * no number present, it is 'default' replica).
+   */
+  private final String metaZNodePrefix;
+
   // Certain ZooKeeper nodes need to be world-readable
   public static final ArrayList<ACL> CREATOR_ALL_AND_WORLD_READABLE =
     new ArrayList<ACL>() { {
@@ -155,7 +174,6 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
       add(new ACL(ZooDefs.Perms.ALL,ZooDefs.Ids.AUTH_IDS));
     }};
 
-  public final static String META_ZNODE_PREFIX = "meta-region-server";
   private static final String DEFAULT_SNAPSHOT_CLEANUP_ZNODE = "snapshot-cleanup";
 
   private final Configuration conf;
@@ -202,6 +220,15 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
     PendingWatcher pendingWatcher = new PendingWatcher();
     this.recoverableZooKeeper = ZKUtil.connect(conf, quorum, pendingWatcher, identifier);
     pendingWatcher.prepare(this);
+    ImmutableMap.Builder<Integer, String> builder = ImmutableMap.builder();
+    metaZNodePrefix = conf.get(META_ZNODE_PREFIX_CONF_KEY, META_ZNODE_PREFIX);
+    String defaultMetaReplicaZNode = joinZNode(baseZNode, metaZNodePrefix);
+    builder.put(DEFAULT_REPLICA_ID, defaultMetaReplicaZNode);
+    int numMetaReplicas = conf.getInt(META_REPLICAS_NUM, DEFAULT_META_REPLICA_NUM);
+    for (int i = 1; i < numMetaReplicas; i++) {
+      builder.put(i, defaultMetaReplicaZNode + "-" + i);
+    }
+    metaReplicaZNodes = builder.build();
     if (canCreateBaseZNode) {
       try {
         createBaseZNodes();
@@ -219,6 +246,13 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
         HConstants.ZK_SYNC_BLOCKING_TIMEOUT_DEFAULT_MS);
   }
 
+  /**
+   * @return true if the znode is a meta region replica
+   */
+  public boolean isAnyMetaReplicaZNode(String node) {
+    return this.metaReplicaZNodes.containsValue(node);
+  }
+
   private void createBaseZNodes() throws ZooKeeperConnectionException {
     try {
       // Create all the necessary "directories" of znodes
@@ -296,7 +330,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
     List<String> children = recoverableZooKeeper.getChildren(znode, false);
 
     for (String child : children) {
-      setZnodeAclsRecursive(ZKUtil.joinZNode(znode, child));
+      setZnodeAclsRecursive(joinZNode(znode, child));
     }
     List<ACL> acls = ZKUtil.createACL(this, znode, true);
     LOG.info("Setting ACLs for znode:" + znode + " , acl:" + acls);
@@ -446,47 +480,47 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
   private void setNodeNames(Configuration conf) {
     baseZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
         HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
-    metaReplicaZnodes.put(0, ZKUtil.joinZNode(baseZNode,
+    metaReplicaZnodes.put(0, joinZNode(baseZNode,
            conf.get("zookeeper.znode.metaserver", "meta-region-server")));
-    int numMetaReplicas = conf.getInt(HConstants.META_REPLICAS_NUM,
-            HConstants.DEFAULT_META_REPLICA_NUM);
+    int numMetaReplicas = conf.getInt(META_REPLICAS_NUM,
+            DEFAULT_META_REPLICA_NUM);
     for (int i = 1; i < numMetaReplicas; i++) {
-      String str = ZKUtil.joinZNode(baseZNode,
+      String str = joinZNode(baseZNode,
         conf.get("zookeeper.znode.metaserver", "meta-region-server") + "-" + i);
       metaReplicaZnodes.put(i, str);
     }
-    rsZNode = ZKUtil.joinZNode(baseZNode,
+    rsZNode = joinZNode(baseZNode,
         conf.get("zookeeper.znode.rs", "rs"));
-    drainingZNode = ZKUtil.joinZNode(baseZNode,
+    drainingZNode = joinZNode(baseZNode,
         conf.get("zookeeper.znode.draining.rs", "draining"));
-    masterAddressZNode = ZKUtil.joinZNode(baseZNode,
+    masterAddressZNode = joinZNode(baseZNode,
         conf.get("zookeeper.znode.master", "master"));
-    backupMasterAddressesZNode = ZKUtil.joinZNode(baseZNode,
+    backupMasterAddressesZNode = joinZNode(baseZNode,
         conf.get("zookeeper.znode.backup.masters", "backup-masters"));
-    clusterStateZNode = ZKUtil.joinZNode(baseZNode,
+    clusterStateZNode = joinZNode(baseZNode,
         conf.get("zookeeper.znode.state", "running"));
-    assignmentZNode = ZKUtil.joinZNode(baseZNode,
+    assignmentZNode = joinZNode(baseZNode,
         conf.get("zookeeper.znode.unassigned", "region-in-transition"));
-    tableZNode = ZKUtil.joinZNode(baseZNode,
+    tableZNode = joinZNode(baseZNode,
         conf.get("zookeeper.znode.tableEnableDisable", "table"));
-    clusterIdZNode = ZKUtil.joinZNode(baseZNode,
+    clusterIdZNode = joinZNode(baseZNode,
         conf.get("zookeeper.znode.clusterId", "hbaseid"));
-    splitLogZNode = ZKUtil.joinZNode(baseZNode,
+    splitLogZNode = joinZNode(baseZNode,
         conf.get("zookeeper.znode.splitlog", HConstants.SPLIT_LOGDIR_NAME));
-    balancerZNode = ZKUtil.joinZNode(baseZNode,
+    balancerZNode = joinZNode(baseZNode,
         conf.get("zookeeper.znode.balancer", "balancer"));
-    regionNormalizerZNode = ZKUtil.joinZNode(baseZNode,
+    regionNormalizerZNode = joinZNode(baseZNode,
       conf.get("zookeeper.znode.regionNormalizer", "normalizer"));
-    switchZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.switch", "switch"));
-    tableLockZNode = ZKUtil.joinZNode(baseZNode,
+    switchZNode = joinZNode(baseZNode, conf.get("zookeeper.znode.switch", "switch"));
+    tableLockZNode = joinZNode(baseZNode,
         conf.get("zookeeper.znode.tableLock", "table-lock"));
-    snapshotCleanupZNode = ZKUtil.joinZNode(baseZNode,
+    snapshotCleanupZNode = joinZNode(baseZNode,
         conf.get("zookeeper.znode.snapshot.cleanup", DEFAULT_SNAPSHOT_CLEANUP_ZNODE));
-    recoveringRegionsZNode = ZKUtil.joinZNode(baseZNode,
+    recoveringRegionsZNode = joinZNode(baseZNode,
         conf.get("zookeeper.znode.recovering.regions", "recovering-regions"));
-    namespaceZNode = ZKUtil.joinZNode(baseZNode,
+    namespaceZNode = joinZNode(baseZNode,
         conf.get("zookeeper.znode.namespace", "namespace"));
-    masterMaintZNode = ZKUtil.joinZNode(baseZNode,
+    masterMaintZNode = joinZNode(baseZNode,
       conf.get("zookeeper.znode.masterMaintenance", "master-maintenance"));
   }
 
@@ -508,7 +542,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
    * @return true or false
    */
   public boolean isDefaultMetaReplicaZnode(String node) {
-    if (getZNodeForReplica(HRegionInfo.DEFAULT_REPLICA_ID).equals(node)) {
+    if (getZNodeForReplica(DEFAULT_REPLICA_ID).equals(node)) {
       return true;
     }
     return false;
@@ -542,7 +576,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
     // This is mostly needed for tests that attempt to create meta replicas
     // from outside the master
     if (str == null) {
-      str = ZKUtil.joinZNode(baseZNode,
+      str = joinZNode(baseZNode,
           conf.get("zookeeper.znode.metaserver", "meta-region-server") + "-" + replicaId);
     }
     return str;
@@ -555,7 +589,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
    */
   public int getMetaReplicaIdFromZnode(String znode) {
     String pattern = conf.get("zookeeper.znode.metaserver","meta-region-server");
-    if (znode.equals(pattern)) return HRegionInfo.DEFAULT_REPLICA_ID;
+    if (znode.equals(pattern)) return DEFAULT_REPLICA_ID;
     // the non-default replicas are of the pattern meta-region-server-<replicaId>
     String nonDefaultPattern = pattern + "-";
     return Integer.parseInt(znode.substring(nonDefaultPattern.length()));
@@ -868,4 +902,45 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
   public String getSwitchZNode() {
     return switchZNode;
   }
+
+  /**
+   * Parses the meta replicaId from the passed path.
+   * @param path the name of the full path which includes baseZNode.
+   * @return replicaId
+   */
+  public int getMetaReplicaIdFromPath(String path) {
+    // Extract the znode from path. The prefix is of the following format.
+    // baseZNode + PATH_SEPARATOR.
+    int prefixLen = baseZNode.length() + 1;
+    return getMetaReplicaIdFromZnode(path.substring(prefixLen));
+  }
+
+  /**
+   * Same as {@link #getMetaReplicaNodes()} except that this also registers a watcher on base znode
+   * for subsequent CREATE/DELETE operations on child nodes.
+   */
+  public List<String> getMetaReplicaNodesAndWatchChildren() throws KeeperException {
+    List<String> childrenOfBaseNode =
+        ZKUtil.listChildrenAndWatchForNewChildren(this, baseZNode);
+    return filterMetaReplicaNodes(childrenOfBaseNode);
+  }
+
+  /**
+   * @param nodes Input list of znodes
+   * @return Filtered list of znodes from nodes that belong to meta replica(s).
+   */
+  private List<String> filterMetaReplicaNodes(List<String> nodes) {
+    if (nodes == null || nodes.isEmpty()) {
+      return new ArrayList<>();
+    }
+    List<String> metaReplicaNodes = new ArrayList<>(2);
+    String pattern = conf.get(META_ZNODE_PREFIX_CONF_KEY, META_ZNODE_PREFIX);
+    for (String child : nodes) {
+      if (child.startsWith(pattern)) {
+        metaReplicaNodes.add(child);
+      }
+    }
+    return metaReplicaNodes;
+  }
+
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 1f54793f..71c79be 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -1,5 +1,4 @@
-/**
- *
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -307,6 +306,12 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
 
   private RegionsRecoveryConfigManager regionsRecoveryConfigManager = null;
 
+  /**
+   * Cache for the meta region replica's locations. Also tracks their changes to avoid stale
+   * cache entries.
+   */
+  private final MetaRegionLocationCache metaRegionLocationCache;
+
   // buffer for "fatal error" notices from region servers
   // in the cluster. This is only used for assisting
   // operations/debugging.
@@ -517,11 +522,13 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
 
     // Some unit tests don't need a cluster, so no zookeeper at all
     if (!conf.getBoolean("hbase.testing.nocluster", false)) {
+      this.metaRegionLocationCache = new MetaRegionLocationCache(this.zooKeeper);
       setInitLatch(new CountDownLatch(1));
       activeMasterManager = new ActiveMasterManager(zooKeeper, this.serverName, this);
       int infoPort = putUpJettyServer();
       startActiveMasterManager(infoPort);
     } else {
+      this.metaRegionLocationCache = null;
       activeMasterManager = null;
     }
     cachedClusterId = new CachedClusterId(conf);
@@ -3444,4 +3451,8 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     }
     return cachedClusterId.getFromCacheOrFetch();
   }
+
+  public MetaRegionLocationCache getMetaRegionLocationCache() {
+    return this.metaRegionLocationCache;
+  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java
new file mode 100644
index 0000000..b0fd8fe
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java
@@ -0,0 +1,256 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import static org.apache.hadoop.hbase.zookeeper.ZKUtil.joinZNode;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ConcurrentNavigableMap;
+import java.util.concurrent.ThreadFactory;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.types.CopyOnWriteArrayMap;
+import org.apache.hadoop.hbase.util.RetryCounter;
+import org.apache.hadoop.hbase.util.RetryCounterFactory;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.zookeeper.KeeperException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
+/**
+ * A cache of meta region location metadata. Registers a listener on ZK to track changes to the
+ * meta table znodes. Clients are expected to retry if the meta information is stale. This class
+ * is thread-safe (a single instance of this class can be shared by multiple threads without race
+ * conditions).
+ */
+@InterfaceAudience.Private
+public class MetaRegionLocationCache extends ZooKeeperListener {
+
+  private static final Logger LOG = LoggerFactory.getLogger(MetaRegionLocationCache.class);
+
+  /**
+   * Maximum number of times we retry when ZK operation times out.
+   */
+  private static final int MAX_ZK_META_FETCH_RETRIES = 10;
+  /**
+   * Sleep interval ms between ZK operation retries.
+   */
+  private static final int SLEEP_INTERVAL_MS_BETWEEN_RETRIES = 1000;
+  private static final int SLEEP_INTERVAL_MS_MAX = 10000;
+  private final RetryCounterFactory retryCounterFactory =
+      new RetryCounterFactory(MAX_ZK_META_FETCH_RETRIES, SLEEP_INTERVAL_MS_BETWEEN_RETRIES);
+
+  /**
+   * Cached meta region locations indexed by replica ID.
+   * CopyOnWriteArrayMap ensures synchronization during updates and a consistent snapshot during
+   * client requests. Even though CopyOnWriteArrayMap copies the data structure for every write,
+   * that should be OK since the size of the list is often small and mutations are not too often
+   * and we do not need to block client requests while mutations are in progress.
+   */
+  private final CopyOnWriteArrayMap<Integer, HRegionLocation> cachedMetaLocations;
+
+  private enum ZNodeOpType {
+    INIT,
+    CREATED,
+    CHANGED,
+    DELETED
+  }
+
+  public MetaRegionLocationCache(ZooKeeperWatcher zkWatcher) {
+    super(zkWatcher);
+    cachedMetaLocations = new CopyOnWriteArrayMap<>();
+    watcher.registerListener(this);
+    // Populate the initial snapshot of data from meta znodes.
+    // This is needed because stand-by masters can potentially start after the initial znode
+    // creation. It blocks forever until the initial meta locations are loaded from ZK and watchers
+    // are established. Subsequent updates are handled by the registered listener. Also, this runs
+    // in a separate thread in the background to not block master init.
+    ThreadFactory threadFactory = new ThreadFactoryBuilder().setDaemon(true).build();
+    final RetryCounterFactory retryFactory = new RetryCounterFactory(
+        Integer.MAX_VALUE, SLEEP_INTERVAL_MS_BETWEEN_RETRIES, SLEEP_INTERVAL_MS_MAX);
+    threadFactory.newThread(
+        new Runnable() {
+          @Override
+          public void run() {
+            MetaRegionLocationCache.this.loadMetaLocationsFromZk(
+                retryFactory.create(), ZNodeOpType.INIT);
+          }
+        }).start();
+  }
+
+  /**
+   * Populates the current snapshot of meta locations from ZK. If no meta znodes exist, it registers
+   * a watcher on base znode to check for any CREATE/DELETE events on the children.
+   * @param retryCounter controls the number of retries and sleep between retries.
+   */
+  private void loadMetaLocationsFromZk(RetryCounter retryCounter, ZNodeOpType opType) {
+    List<String> znodes = null;
+    while (retryCounter.shouldRetry()) {
+      try {
+        znodes = watcher.getMetaReplicaNodesAndWatchChildren();
+        break;
+      } catch (KeeperException ke) {
+        LOG.debug("Error populating initial meta locations", ke);
+        if (!retryCounter.shouldRetry()) {
+          // Retries exhausted and watchers not set. This is not a desirable state since the cache
+          // could remain stale forever. Propagate the exception.
+          watcher.abort("Error populating meta locations", ke);
+          return;
+        }
+        try {
+          retryCounter.sleepUntilNextRetry();
+        } catch (InterruptedException ie) {
+          LOG.error("Interrupted while loading meta locations from ZK", ie);
+          Thread.currentThread().interrupt();
+          return;
+        }
+      }
+    }
+    if (znodes == null || znodes.isEmpty()) {
+      // No meta znodes exist at this point but we registered a watcher on the base znode to listen
+      // for updates. They will be handled via nodeChildrenChanged().
+      return;
+    }
+    if (znodes.size() == cachedMetaLocations.size()) {
+      // No new meta znodes got added.
+      return;
+    }
+    for (String znode: znodes) {
+      String path = joinZNode(watcher.baseZNode, znode);
+      updateMetaLocation(path, opType);
+    }
+  }
+
+  /**
+   * Gets the HRegionLocation for a given meta replica ID. Renews the watch on the znode for
+   * future updates.
+   * @param replicaId ReplicaID of the region.
+   * @return HRegionLocation for the meta replica.
+   * @throws KeeperException if there is any issue fetching/parsing the serialized data.
+   */
+  private HRegionLocation getMetaRegionLocation(int replicaId)
+      throws KeeperException {
+    RegionState metaRegionState;
+    try {
+      byte[] data = ZKUtil.getDataAndWatch(watcher,
+          watcher.getZNodeForReplica(replicaId));
+      metaRegionState = ProtobufUtil.parseMetaRegionStateFrom(data, replicaId);
+    } catch (DeserializationException e) {
+      throw ZKUtil.convert(e);
+    }
+    return new HRegionLocation(metaRegionState.getRegion(), metaRegionState.getServerName());
+  }
+
+  private void updateMetaLocation(String path, ZNodeOpType opType) {
+    if (!isValidMetaZNode(path)) {
+      return;
+    }
+    LOG.debug("Updating meta znode for path {}: {}", path, opType.name());
+    int replicaId = watcher.getMetaReplicaIdFromPath(path);
+    RetryCounter retryCounter = retryCounterFactory.create();
+    HRegionLocation location = null;
+    while (retryCounter.shouldRetry()) {
+      try {
+        if (opType == ZNodeOpType.DELETED) {
+          if (!ZKUtil.watchAndCheckExists(watcher, path)) {
+            // The path does not exist, we've set the watcher and we can break for now.
+            break;
+          }
+          // If it is a transient error and the node appears right away, we fetch the
+          // latest meta state.
+        }
+        location = getMetaRegionLocation(replicaId);
+        break;
+      } catch (KeeperException e) {
+        LOG.debug("Error getting meta location for path {}", path, e);
+        if (!retryCounter.shouldRetry()) {
+          LOG.warn("Error getting meta location for path {}. Retries exhausted.", path, e);
+          break;
+        }
+        try {
+          retryCounter.sleepUntilNextRetry();
+        } catch (InterruptedException ie) {
+          Thread.currentThread().interrupt();
+          return;
+        }
+      }
+    }
+    if (location == null) {
+      cachedMetaLocations.remove(replicaId);
+      return;
+    }
+    cachedMetaLocations.put(replicaId, location);
+  }
+
+  /**
+   * @return Optional list of HRegionLocations for meta replica(s), null if the cache is empty.
+   *
+   */
+  public List<HRegionLocation> getMetaRegionLocations() {
+    ConcurrentNavigableMap<Integer, HRegionLocation> snapshot =
+        cachedMetaLocations.tailMap(cachedMetaLocations.firstKey());
+    List<HRegionLocation> result = new ArrayList<>();
+    if (snapshot.isEmpty()) {
+      // This could be possible if the master has not successfully initialized yet or meta region
+      // is stuck in some weird state.
+      return result;
+    }
+    // Explicitly iterate instead of new ArrayList<>(snapshot.values()) because the underlying
+    // ArrayValueCollection does not implement toArray().
+    for (HRegionLocation location: snapshot.values()) {
+      result.add(location);
+    }
+    return result;
+  }
+
+  /**
+   * Helper to check if the given 'path' corresponds to a meta znode. This listener is only
+   * interested in changes to meta znodes.
+   */
+  private boolean isValidMetaZNode(String path) {
+    return watcher.isAnyMetaReplicaZNode(path);
+  }
+
+  @Override
+  public void nodeCreated(String path) {
+    updateMetaLocation(path, ZNodeOpType.CREATED);
+  }
+
+  @Override
+  public void nodeDeleted(String path) {
+    updateMetaLocation(path, ZNodeOpType.DELETED);
+  }
+
+  @Override
+  public void nodeDataChanged(String path) {
+    updateMetaLocation(path, ZNodeOpType.CHANGED);
+  }
+
+  @Override
+  public void nodeChildrenChanged(String path) {
+    if (!path.equals(watcher.baseZNode)) {
+      return;
+    }
+    loadMetaLocationsFromZk(retryCounterFactory.create(), ZNodeOpType.CHANGED);
+  }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java
new file mode 100644
index 0000000..c254e56
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java
@@ -0,0 +1,237 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import com.google.common.base.Preconditions;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MultithreadedTestUtil;
+import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.MetaRegionLocationCache;
+import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.JVMClusterUtil;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({SmallTests.class, MasterTests.class })
+public class TestMetaRegionLocationCache {
+
+  private static final Log LOG = LogFactory.getLog(TestMetaRegionLocationCache.class.getName());
+
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private static Registry REGISTRY;
+
+  // waits for all replicas to have region location
+  static void waitUntilAllMetaReplicasHavingRegionLocation(Configuration conf,
+       final Registry registry, final int regionReplication) throws IOException {
+    Waiter.waitFor(conf, conf.getLong(
+        "hbase.client.sync.wait.timeout.msec", 60000), 200, true,
+        new Waiter.ExplainingPredicate<IOException>() {
+          @Override
+          public String explainFailure() throws IOException {
+            return "Not all meta replicas get assigned";
+          }
+
+          @Override
+          public boolean evaluate() throws IOException {
+            try {
+              RegionLocations locs = registry.getMetaRegionLocation();
+              if (locs == null || locs.size() < regionReplication) {
+                return false;
+              }
+              for (int i = 0; i < regionReplication; i++) {
+                if (locs.getRegionLocation(i) == null) {
+                  return false;
+                }
+              }
+              return true;
+            } catch (Exception e) {
+              LOG.warn("Failed to get meta region locations", e);
+              return false;
+            }
+          }
+        });
+  }
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    TEST_UTIL.getConfiguration().setInt(HConstants.META_REPLICAS_NUM, 3);
+    TEST_UTIL.startMiniCluster(3);
+    REGISTRY = RegistryFactory.getRegistry(TEST_UTIL.getConnection());
+    waitUntilAllMetaReplicasHavingRegionLocation(
+        TEST_UTIL.getConfiguration(), REGISTRY, 3);
+    TEST_UTIL.getConnection().getAdmin().setBalancerRunning(false, true);
+  }
+
+  @AfterClass
+  public static void cleanUp() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  private List<HRegionLocation> getCurrentMetaLocations(ZooKeeperWatcher zk) throws Exception {
+    List<HRegionLocation> result = new ArrayList<>();
+    for (String znode: zk.getMetaReplicaNodes()) {
+      String path = ZKUtil.joinZNode(zk.baseZNode, znode);
+      int replicaId = zk.getMetaReplicaIdFromPath(path);
+      RegionState state = MetaTableLocator.getMetaRegionState(zk, replicaId);
+      result.add(new HRegionLocation(state.getRegion(), state.getServerName()));
+    }
+    return result;
+  }
+
+  // Verifies that the cached meta locations in the given master are in sync with what is in ZK.
+  private void verifyCachedMetaLocations(final HMaster master) throws Exception {
+    // Wait until initial meta locations are loaded.
+    ZooKeeperWatcher zk = master.getZooKeeper();
+    final List<String> metaZnodes = zk.getMetaReplicaNodes();
+    assertEquals(3, metaZnodes.size());
+    TEST_UTIL.waitFor(10000, new Waiter.Predicate<Exception>() {
+      @Override
+      public boolean evaluate() throws Exception {
+        return master.getMetaRegionLocationCache().getMetaRegionLocations().size()
+            == metaZnodes.size();
+      }
+    });
+    List<HRegionLocation> metaHRLs = master.getMetaRegionLocationCache().getMetaRegionLocations();
+    List<HRegionLocation> actualHRLs = getCurrentMetaLocations(zk);
+    Collections.sort(metaHRLs);
+    Collections.sort(actualHRLs);
+    assertEquals(actualHRLs, metaHRLs);
+  }
+
+  @Test public void testInitialMetaLocations() throws Exception {
+    verifyCachedMetaLocations(TEST_UTIL.getMiniHBaseCluster().getMaster());
+  }
+
+  @Test public void testStandByMetaLocations() throws Exception {
+    HMaster standBy = TEST_UTIL.getMiniHBaseCluster().startMaster().getMaster();
+    verifyCachedMetaLocations(standBy);
+  }
+
+  private static ServerName getOtherRS(List<ServerName> allServers, ServerName except) {
+    Preconditions.checkArgument(allServers.size() > 0);
+    allServers.remove(except);
+    ServerName ret;
+    try {
+      Collections.shuffle(allServers);
+      ret = allServers.get(0);
+    } finally {
+      allServers.add(except);
+    }
+    return ret;
+  }
+
+  /*
+   * Shuffles the meta region replicas around the cluster and makes sure the cache is not stale.
+   */
+  @Test public void testMetaLocationsChange() throws Exception {
+    List<HRegionLocation> currentMetaLocs =
+        getCurrentMetaLocations(TEST_UTIL.getMiniHBaseCluster().getMaster().getZooKeeper());
+    List<ServerName> allServers = new ArrayList<>();
+    for (JVMClusterUtil.RegionServerThread rs:
+        TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads()) {
+      allServers.add(rs.getRegionServer().getServerName());
+    }
+    // Move these replicas to random servers.
+    for (HRegionLocation location: currentMetaLocs) {
+      TEST_UTIL.moveRegionAndWait(
+          location.getRegionInfo(), getOtherRS(allServers, location.getServerName()));
+    }
+    waitUntilAllMetaReplicasHavingRegionLocation(
+        TEST_UTIL.getConfiguration(), REGISTRY, 3);
+    for (JVMClusterUtil.MasterThread masterThread:
+        TEST_UTIL.getMiniHBaseCluster().getMasterThreads()) {
+      verifyCachedMetaLocations(masterThread.getMaster());
+    }
+  }
+
+  /**
+   * Tests MetaRegionLocationCache's init procedure to make sure that it correctly watches the base
+   * znode for notifications.
+   */
+  @Test public void testMetaRegionLocationCache() throws Exception {
+    final String parentZnodeName = "/randomznodename";
+    Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
+    conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, parentZnodeName);
+    ServerName sn = ServerName.valueOf("localhost", 1234, 5678);
+    try (ZooKeeperWatcher zkWatcher = new ZooKeeperWatcher(conf, null, null, true)) {
+      // A thread that repeatedly creates and drops an unrelated child znode. This is to simulate
+      // some ZK activity in the background.
+      MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext(conf);
+      ctx.addThread(new MultithreadedTestUtil.RepeatingTestThread(ctx) {
+        @Override public void doAnAction() throws Exception {
+          final String testZnode = parentZnodeName + "/child";
+          ZKUtil.createNodeIfNotExistsAndWatch(zkWatcher, testZnode, testZnode.getBytes());
+          ZKUtil.deleteNode(zkWatcher, testZnode);
+        }
+      });
+      ctx.startThreads();
+      try {
+        MetaRegionLocationCache metaCache = new MetaRegionLocationCache(zkWatcher);
+        // meta znodes do not exist at this point, cache should be empty.
+        assertTrue(metaCache.getMetaRegionLocations().isEmpty());
+        // Set the meta locations for a random meta replicas, simulating an active hmaster meta
+        // assignment.
+        for (int i = 0; i < 3; i++) {
+          // Updates the meta znodes.
+          MetaTableLocator.setMetaLocation(zkWatcher, sn, i, RegionState.State.OPEN);
+        }
+        // Wait until the meta cache is populated.
+        int iters = 0;
+        while (iters++ < 10) {
+          if (metaCache.getMetaRegionLocations().size() == 3) {
+            break;
+          }
+          Thread.sleep(1000);
+        }
+        List<HRegionLocation> metaLocations = metaCache.getMetaRegionLocations();
+        assertNotNull(metaLocations);
+        assertEquals(3, metaLocations.size());
+        for (HRegionLocation location : metaLocations) {
+          assertEquals(sn, location.getServerName());
+        }
+      } finally {
+        // clean up.
+        ctx.stop();
+        ZKUtil.deleteChildrenRecursively(zkWatcher, parentZnodeName);
+      }
+    }
+  }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryConfigManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryConfigManager.java
index 2949cc2..3735d4a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryConfigManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryConfigManager.java
@@ -122,5 +122,4 @@ public class TestRegionsRecoveryConfigManager {
     }
 
   }
-
-}
\ No newline at end of file
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java
index c3e6279..89b74b3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java
@@ -19,19 +19,22 @@
 package org.apache.hadoop.hbase.protobuf;
 
 import static org.junit.Assert.assertEquals;
-
+import com.google.protobuf.ByteString;
 import java.io.IOException;
 import java.util.Collections;
 import java.util.List;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
@@ -45,8 +48,8 @@ import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer;
 
-import com.google.protobuf.ByteString;
 
 /**
  * Class to test ProtobufUtil.
@@ -350,4 +353,31 @@ public class TestProtobufUtil {
         ProtobufUtil.toScan(expectedProto));
     assertEquals(expectedProto, actualProto);
   }
+
+  @Test
+  public void testMetaRegionState() throws Exception {
+    ServerName serverName = ServerName.valueOf("localhost", 1234, 5678);
+    // New region state style.
+    for (RegionState.State state: RegionState.State.values()) {
+      RegionState regionState =
+          new RegionState(HRegionInfo.FIRST_META_REGIONINFO, state, serverName);
+      MetaRegionServer metars = MetaRegionServer.newBuilder()
+          .setServer(org.apache.hadoop.hbase.protobuf.ProtobufUtil.toServerName(serverName))
+          .setRpcVersion(HConstants.RPC_CURRENT_VERSION)
+          .setState(state.convert()).build();
+      // Serialize
+      byte[] data = ProtobufUtil.prependPBMagic(metars.toByteArray());
+      ProtobufUtil.prependPBMagic(data);
+      // Deserialize
+      RegionState regionStateNew = ProtobufUtil.parseMetaRegionStateFrom(data, 1);
+      assertEquals(regionState.getServerName(), regionStateNew.getServerName());
+      assertEquals(regionState.getState(), regionStateNew.getState());
+    }
+    // old style.
+    RegionState rs =
+        org.apache.hadoop.hbase.protobuf.ProtobufUtil.parseMetaRegionStateFrom(
+            serverName.getVersionedBytes(), 1);
+    assertEquals(serverName, rs.getServerName());
+    assertEquals(rs.getState(), RegionState.State.OPEN);
+  }
 }


[hbase] 09/09: HBASE-23330: Fix delegation token fetch with MasterRegistry

Posted by bh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

bharathv pushed a commit to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 55cae10bebf68ff72e7e212a11152edfcf6207c4
Author: Bharath Vissapragada <bh...@apache.org>
AuthorDate: Wed Sep 16 08:07:48 2020 -0700

    HBASE-23330: Fix delegation token fetch with MasterRegistry
    
    Signed-off-by: Andrew Purtell <ap...@apache.org>
---
 .../org/apache/hadoop/hbase/client/Connection.java |  5 +++++
 .../hadoop/hbase/client/ConnectionAdapter.java     |  5 +++++
 .../hadoop/hbase/client/ConnectionManager.java     |  5 +++++
 .../hadoop/hbase/security/token/TokenUtil.java     | 24 +++++++---------------
 .../hbase/client/TestMasterAddressRefresher.java   |  5 +++++
 5 files changed, 27 insertions(+), 17 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
index bce0f91..f72a6ef 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
@@ -166,6 +166,11 @@ public interface Connection extends Abortable, Closeable {
    */
   Admin getAdmin() throws IOException;
 
+  /**
+   * @return the cluster ID unique to this HBase cluster.
+   */
+  String getClusterId() throws IOException;
+
   @Override
   public void close() throws IOException;
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
index 0bed7ef..ac4a342 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
@@ -501,4 +501,9 @@ abstract class ConnectionAdapter implements ClusterConnection {
   public RpcControllerFactory getRpcControllerFactory() {
     return wrappedConnection.getRpcControllerFactory();
   }
+
+  @Override
+  public String getClusterId() throws IOException {
+    return wrappedConnection.getClusterId();
+  }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index 5addc7a..82b364e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -825,6 +825,11 @@ class ConnectionManager {
     }
 
     @Override
+    public String getClusterId() throws IOException {
+      return registry.getClusterId();
+    }
+
+    @Override
     public MetricsConnection getConnectionMetrics() {
       return this.metrics;
     }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java
index 78e438c..5d30bf8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java
@@ -305,7 +305,7 @@ public class TokenUtil {
   public static void addTokenForJob(final Connection conn, final JobConf job, User user)
       throws IOException, InterruptedException {
 
-    Token<AuthenticationTokenIdentifier> token = getAuthToken(conn.getConfiguration(), user);
+    Token<AuthenticationTokenIdentifier> token = getAuthToken(conn, user);
     if (token == null) {
       token = obtainToken(conn, user);
     }
@@ -324,7 +324,7 @@ public class TokenUtil {
    */
   public static void addTokenForJob(final Connection conn, User user, Job job)
       throws IOException, InterruptedException {
-    Token<AuthenticationTokenIdentifier> token = getAuthToken(conn.getConfiguration(), user);
+    Token<AuthenticationTokenIdentifier> token = getAuthToken(conn, user);
     if (token == null) {
       token = obtainToken(conn, user);
     }
@@ -343,7 +343,7 @@ public class TokenUtil {
    */
   public static boolean addTokenIfMissing(Connection conn, User user)
       throws IOException, InterruptedException {
-    Token<AuthenticationTokenIdentifier> token = getAuthToken(conn.getConfiguration(), user);
+    Token<AuthenticationTokenIdentifier> token = getAuthToken(conn, user);
     if (token == null) {
       token = obtainToken(conn, user);
       user.getUGI().addToken(token.getService(), token);
@@ -356,19 +356,9 @@ public class TokenUtil {
    * Get the authentication token of the user for the cluster specified in the configuration
    * @return null if the user does not have the token, otherwise the auth token for the cluster.
    */
-  private static Token<AuthenticationTokenIdentifier> getAuthToken(Configuration conf, User user)
-      throws IOException, InterruptedException {
-    ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "TokenUtil-getAuthToken", null);
-    try {
-      String clusterId = ZKClusterId.readClusterIdZNode(zkw);
-      if (clusterId == null) {
-        throw new IOException("Failed to get cluster ID");
-      }
-      return new AuthenticationTokenSelector().selectToken(new Text(clusterId), user.getTokens());
-    } catch (KeeperException e) {
-      throw new IOException(e);
-    } finally {
-      zkw.close();
-    }
+  private static Token<AuthenticationTokenIdentifier> getAuthToken(Connection conn, User user)
+      throws IOException {
+    String clusterId = conn.getClusterId();
+    return new AuthenticationTokenSelector().selectToken(new Text(clusterId), user.getTokens());
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterAddressRefresher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterAddressRefresher.java
index 7e2f2f7..22dbfa9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterAddressRefresher.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterAddressRefresher.java
@@ -82,6 +82,11 @@ public class TestMasterAddressRefresher {
     }
 
     @Override
+    public String getClusterId() throws IOException {
+      return null;
+    }
+
+    @Override
     public void close() throws IOException {
 
     }


[hbase] 02/09: HBASE-23275: Track active master's address in ActiveMasterManager (#812)

Posted by bh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

bharathv pushed a commit to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit e4161e587540696bdfe1efb07e3169d29742cd52
Author: Bharath Vissapragada <bh...@apache.org>
AuthorDate: Wed Nov 20 11:41:36 2019 -0800

    HBASE-23275: Track active master's address in ActiveMasterManager (#812)
    
    Currently we just track whether an active master exists.
    It helps to also track the address of the active master in
    all the masters to help serve the client RPC requests to
    know which master is active.
    
    Signed-off-by: Nick Dimiduk <nd...@apache.org>
    Signed-off-by: Andrew Purtell <ap...@apache.org>
    (cherry picked from commit efebb843afe4458599e12cf3390fe534780fac4e)
    (cherry picked from commit 742949165ff63a9bab448e4d1e40a386aa715c82)
---
 .../hadoop/hbase/master/ActiveMasterManager.java   | 52 +++++++++++++++++++---
 .../org/apache/hadoop/hbase/master/HMaster.java    |  4 ++
 .../hbase/master/TestActiveMasterManager.java      | 11 +++++
 3 files changed, 61 insertions(+), 6 deletions(-)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java
index 7b93e8f..d92a48e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java
@@ -1,4 +1,4 @@
-/**
+/*
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -17,7 +17,6 @@
  * limitations under the License.
  */
 package org.apache.hadoop.hbase.master;
-
 import java.io.IOException;
 import java.util.concurrent.atomic.AtomicBoolean;
 
@@ -57,12 +56,18 @@ public class ActiveMasterManager extends ZooKeeperListener {
   final AtomicBoolean clusterHasActiveMaster = new AtomicBoolean(false);
   final AtomicBoolean clusterShutDown = new AtomicBoolean(false);
 
+  // This server's information.
   private final ServerName sn;
   private int infoPort;
   private final Server master;
 
+  // Active master's server name. Invalidated anytime active master changes (based on ZK
+  // notifications) and lazily fetched on-demand.
+  // ServerName is immutable, so we don't need heavy synchronization around it.
+  private volatile ServerName activeMasterServerName;
+
   /**
-   * @param watcher
+   * @param watcher ZK watcher
    * @param sn ServerName
    * @param master In an instance of a Master.
    */
@@ -107,6 +112,33 @@ public class ActiveMasterManager extends ZooKeeperListener {
   }
 
   /**
+   * Fetches the active master's ServerName from zookeeper.
+   */
+  private void fetchAndSetActiveMasterServerName() {
+    LOG.debug("Attempting to fetch active master sn from zk");
+    try {
+      activeMasterServerName = MasterAddressTracker.getMasterAddress(watcher);
+    } catch (IOException | KeeperException e) {
+      // Log and ignore for now and re-fetch later if needed.
+      LOG.error("Error fetching active master information", e);
+    }
+  }
+
+  /**
+   * @return the currently active master as seen by us or null if one does not exist.
+   */
+  public ServerName getActiveMasterServerName() {
+    if (!clusterHasActiveMaster.get()) {
+      return null;
+    }
+    if (activeMasterServerName == null) {
+      fetchAndSetActiveMasterServerName();
+    }
+    // It could still be null, but return whatever we have.
+    return activeMasterServerName;
+  }
+
+  /**
    * Handle a change in the master node.  Doesn't matter whether this was called
    * from a nodeCreated or nodeDeleted event because there are no guarantees
    * that the current state of the master node matches the event at the time of
@@ -134,6 +166,9 @@ public class ActiveMasterManager extends ZooKeeperListener {
           // Notify any thread waiting to become the active master
           clusterHasActiveMaster.notifyAll();
         }
+        // Reset the active master sn. Will be re-fetched later if needed.
+        // We don't want to make a synchronous RPC under a monitor.
+        activeMasterServerName = null;
       }
     } catch (KeeperException ke) {
       master.abort("Received an unexpected KeeperException, aborting", ke);
@@ -151,8 +186,8 @@ public class ActiveMasterManager extends ZooKeeperListener {
    * @param checkInterval the interval to check if the master is stopped
    * @param startupStatus the monitor status to track the progress
    * @return True if no issue becoming active master else false if another
-   * master was running or if some other problem (zookeeper, stop flag has been
-   * set on this Master)
+   *   master was running or if some other problem (zookeeper, stop flag has been
+   *   set on this Master)
    */
   boolean blockUntilBecomingActiveMaster(
       int checkInterval, MonitoredTask startupStatus) {
@@ -179,9 +214,13 @@ public class ActiveMasterManager extends ZooKeeperListener {
           startupStatus.setStatus("Successfully registered as active master.");
           this.clusterHasActiveMaster.set(true);
           LOG.info("Registered Active Master=" + this.sn);
+          activeMasterServerName = sn;
           return true;
         }
 
+        // Invalidate the active master name so that subsequent requests do not get any stale
+        // master information. Will be re-fetched if needed.
+        activeMasterServerName = null;
         // There is another active master running elsewhere or this is a restart
         // and the master ephemeral node has not expired yet.
         this.clusterHasActiveMaster.set(true);
@@ -208,7 +247,8 @@ public class ActiveMasterManager extends ZooKeeperListener {
             ZKUtil.deleteNode(this.watcher, this.watcher.getMasterAddressZNode());
 
             // We may have failed to delete the znode at the previous step, but
-            //  we delete the file anyway: a second attempt to delete the znode is likely to fail again.
+            //  we delete the file anyway: a second attempt to delete the znode is likely to fail
+            //  again.
             ZNodeClearer.deleteMyEphemeralNodeOnDisk();
           } else {
             msg = "Another master is the active master, " + currentMaster +
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index b6311e1..1f54793f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -3434,6 +3434,10 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     return replicationLoadSourceMap;
   }
 
+  public ServerName getActiveMaster() {
+    return activeMasterManager.getActiveMasterServerName();
+  }
+
   public String getClusterId() {
     if (activeMaster) {
       return super.getClusterId();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
index 428ff1e..3144eeb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
@@ -18,8 +18,10 @@
  */
 package org.apache.hadoop.hbase.master;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
@@ -84,6 +86,7 @@ public class TestActiveMasterManager {
     ActiveMasterManager activeMasterManager =
       dummyMaster.getActiveMasterManager();
     assertFalse(activeMasterManager.clusterHasActiveMaster.get());
+    assertNull(activeMasterManager.getActiveMasterServerName());
 
     // First test becoming the active master uninterrupted
     MonitoredTask status = Mockito.mock(MonitoredTask.class);
@@ -92,6 +95,7 @@ public class TestActiveMasterManager {
     activeMasterManager.blockUntilBecomingActiveMaster(100, status);
     assertTrue(activeMasterManager.clusterHasActiveMaster.get());
     assertMaster(zk, master);
+    assertMaster(zk, activeMasterManager.getActiveMasterServerName());
 
     // Now pretend master restart
     DummyMaster secondDummyMaster = new DummyMaster(zk,master);
@@ -101,6 +105,8 @@ public class TestActiveMasterManager {
     activeMasterManager.blockUntilBecomingActiveMaster(100, status);
     assertTrue(activeMasterManager.clusterHasActiveMaster.get());
     assertMaster(zk, master);
+    assertMaster(zk, activeMasterManager.getActiveMasterServerName());
+    assertMaster(zk, secondActiveMasterManager.getActiveMasterServerName());
   }
 
   /**
@@ -128,6 +134,7 @@ public class TestActiveMasterManager {
     ActiveMasterManager activeMasterManager =
       ms1.getActiveMasterManager();
     assertFalse(activeMasterManager.clusterHasActiveMaster.get());
+    assertNull(activeMasterManager.getActiveMasterServerName());
 
     // First test becoming the active master uninterrupted
     ClusterStatusTracker clusterStatusTracker =
@@ -137,6 +144,7 @@ public class TestActiveMasterManager {
         Mockito.mock(MonitoredTask.class));
     assertTrue(activeMasterManager.clusterHasActiveMaster.get());
     assertMaster(zk, firstMasterAddress);
+    assertMaster(zk, activeMasterManager.getActiveMasterServerName());
 
     // New manager will now try to become the active master in another thread
     WaitToBeMasterThread t = new WaitToBeMasterThread(zk, secondMasterAddress);
@@ -154,6 +162,8 @@ public class TestActiveMasterManager {
     assertTrue(t.manager.clusterHasActiveMaster.get());
     // But secondary one should not be the active master
     assertFalse(t.isActiveMaster);
+    // Verify the active master ServerName is populated in standby master.
+    assertEquals(firstMasterAddress, t.manager.getActiveMasterServerName());
 
     // Close the first server and delete it's master node
     ms1.stop("stopping first server");
@@ -181,6 +191,7 @@ public class TestActiveMasterManager {
 
     assertTrue(t.manager.clusterHasActiveMaster.get());
     assertTrue(t.isActiveMaster);
+    assertEquals(secondMasterAddress, t.manager.getActiveMasterServerName());
 
     LOG.info("Deleting master node");