You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by bh...@apache.org on 2020/09/19 22:05:01 UTC

[hbase] 05/09: HBASE-7767 Get rid of ZKTable, and table enable/disable state in ZK (Andrey Stepachev)

This is an automated email from the ASF dual-hosted git repository.

bharathv pushed a commit to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit cb756629b0d7f89d530de4d1ad06c65401eb0801
Author: stack <st...@apache.org>
AuthorDate: Mon Sep 15 09:34:10 2014 -0700

    HBASE-7767 Get rid of ZKTable, and table enable/disable state in ZK (Andrey Stepachev)
    
    Signed-off-by: Andrew Purtell <ap...@apache.org>
    
    (cherry picked from commit 3cc5d19039904361f60c413f10f3cbca27a7ba96)
---
 .../hadoop/hbase/client/ConnectionAdapter.java     |    5 +
 .../hadoop/hbase/client/ConnectionManager.java     |   26 +-
 .../apache/hadoop/hbase/client/HConnection.java    |    7 +
 .../org/apache/hadoop/hbase/client/Registry.java   |    7 -
 .../org/apache/hadoop/hbase/client/TableState.java |  205 +++
 .../hadoop/hbase/client/ZooKeeperRegistry.java     |   21 -
 .../hadoop/hbase/protobuf/RequestConverter.java    |   16 +-
 .../zookeeper/ZKTableStateClientSideReader.java    |  205 ---
 .../hadoop/hbase/zookeeper/ZooKeeperWatcher.java   |    1 +
 .../hadoop/hbase/client/TestAsyncProcess.java      |    5 -
 .../hadoop/hbase/client/TestClientNoCluster.java   |    6 -
 .../TestZKTableStateClientSideReader.java          |   52 -
 .../hbase/protobuf/generated/HBaseProtos.java      | 1761 +++++++++++++++++++-
 .../hbase/protobuf/generated/MasterProtos.java     | 1733 ++++++++++++++++---
 .../hbase/protobuf/generated/ZooKeeperProtos.java  |  213 +--
 hbase-protocol/src/main/protobuf/HBase.proto       |   21 +
 hbase-protocol/src/main/protobuf/Master.proto      |   12 +-
 hbase-protocol/src/main/protobuf/ZooKeeper.proto   |    3 +-
 .../hadoop/hbase/rsgroup/RSGroupAdminServer.java   |    6 +-
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java      |    8 +-
 .../hadoop/hbase/CoordinatedStateManager.java      |    8 -
 .../org/apache/hadoop/hbase/TableDescriptor.java   |  161 ++
 .../org/apache/hadoop/hbase/TableDescriptors.java  |   25 +
 .../org/apache/hadoop/hbase/TableStateManager.java |  121 --
 .../coordination/BaseCoordinatedStateManager.java  |    5 -
 .../coordination/ZkCoordinatedStateManager.java    |   14 -
 .../coordination/ZkOpenRegionCoordination.java     |    4 +-
 .../hadoop/hbase/master/AssignmentManager.java     |  114 +-
 .../org/apache/hadoop/hbase/master/HMaster.java    |   33 +-
 .../hadoop/hbase/master/MasterFileSystem.java      |    1 -
 .../hadoop/hbase/master/MasterRpcServices.java     |   26 +-
 .../apache/hadoop/hbase/master/MasterServices.java |    5 +
 .../apache/hadoop/hbase/master/RegionStates.java   |   14 +-
 .../hadoop/hbase/master/TableNamespaceManager.java |    9 +-
 .../hadoop/hbase/master/TableStateManager.java     |  219 +++
 .../hbase/master/handler/ClosedRegionHandler.java  |    5 +-
 .../hbase/master/handler/CreateTableHandler.java   |   84 +-
 .../hbase/master/handler/DisableTableHandler.java  |   30 +-
 .../hbase/master/handler/EnableTableHandler.java   |   45 +-
 .../hbase/master/handler/TableEventHandler.java    |   13 +-
 .../master/procedure/AddColumnFamilyProcedure.java |    4 +-
 .../master/procedure/CreateTableProcedure.java     |   15 +-
 .../procedure/DeleteColumnFamilyProcedure.java     |    4 +-
 .../master/procedure/DisableTableProcedure.java    |   14 +-
 .../master/procedure/EnableTableProcedure.java     |   12 +-
 .../master/procedure/MasterDDLOperationHelper.java |    4 +-
 .../procedure/ModifyColumnFamilyProcedure.java     |    4 +-
 .../master/procedure/ModifyTableProcedure.java     |    6 +-
 .../master/procedure/ServerCrashProcedure.java     |    8 +-
 .../hbase/master/snapshot/SnapshotManager.java     |    8 +-
 .../hadoop/hbase/migration/NamespaceUpgrade.java   |    4 +-
 .../hadoop/hbase/regionserver/CompactionTool.java  |   14 +-
 .../hbase/regionserver/wal/WALCellCodec.java       |    1 +
 .../hadoop/hbase/snapshot/SnapshotManifest.java    |    8 +-
 .../hadoop/hbase/util/FSTableDescriptors.java      |  212 ++-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java    |   60 +-
 .../java/org/apache/hadoop/hbase/util/HMerge.java  |    3 +-
 .../java/org/apache/hadoop/hbase/util/Merge.java   |    5 +-
 .../apache/hadoop/hbase/util/ZKDataMigrator.java   |   90 +-
 .../org/apache/hadoop/hbase/wal/WALSplitter.java   |   25 +-
 .../hbase/zookeeper/ZKTableStateManager.java       |  369 ----
 .../apache/hadoop/hbase/HBaseTestingUtility.java   |    1 +
 .../apache/hadoop/hbase/TestDrainingServer.java    |  246 +--
 .../hbase/TestFSTableDescriptorForceCreation.java  |   12 +-
 .../TestHColumnDescriptorDefaultVersions.java      |    4 +-
 .../apache/hadoop/hbase/TestTableDescriptor.java   |   57 +
 .../org/apache/hadoop/hbase/client/TestAdmin1.java |   26 +-
 .../hbase/master/MockNoopMasterServices.java       |    5 +
 .../master/TestAssignmentManagerOnCluster.java     |   16 +-
 .../hadoop/hbase/master/TestCatalogJanitor.java    |   35 +-
 .../org/apache/hadoop/hbase/master/TestMaster.java |    4 +-
 .../hadoop/hbase/master/TestMasterFailover.java    |   19 +-
 .../TestMasterRestartAfterDisablingTable.java      |    8 +-
 .../hbase/master/TestOpenedRegionHandler.java      |   11 +-
 .../hadoop/hbase/master/TestRegionStates.java      |    2 -
 .../hadoop/hbase/master/TestTableLockManager.java  |    6 +-
 .../procedure/MasterProcedureTestingUtility.java   |    8 +-
 .../procedure/TestCreateTableProcedure2.java       |   10 +-
 .../TestTableDescriptorModificationFromClient.java |    6 +-
 .../hbase/snapshot/SnapshotTestingUtils.java       |    8 +-
 .../hadoop/hbase/util/TestFSTableDescriptors.java  |   39 +-
 .../apache/hadoop/hbase/util/TestHBaseFsck.java    |   51 -
 .../hbase/zookeeper/TestZKTableStateManager.java   |  114 --
 83 files changed, 4764 insertions(+), 2043 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
index 4e3e55e..0bed7ef 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
@@ -185,6 +185,11 @@ abstract class ConnectionAdapter implements ClusterConnection {
   }
 
   @Override
+  public TableState getTableState(TableName tableName) throws IOException {
+    return wrappedConnection.getTableState(tableName);
+  }
+
+  @Override
   public HTableDescriptor[] listTables() throws IOException {
     return wrappedConnection.listTables();
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index 61107f7..961ee3a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -124,6 +124,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescripto
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
@@ -1006,7 +1008,7 @@ class ConnectionManager {
 
     @Override
     public boolean isTableEnabled(TableName tableName) throws IOException {
-      return this.registry.isTableOnlineState(tableName, true);
+      return getTableState(tableName).inStates(TableState.State.ENABLED);
     }
 
     @Override
@@ -1016,7 +1018,7 @@ class ConnectionManager {
 
     @Override
     public boolean isTableDisabled(TableName tableName) throws IOException {
-      return this.registry.isTableOnlineState(tableName, false);
+      return getTableState(tableName).inStates(TableState.State.DISABLED);
     }
 
     @Override
@@ -2174,6 +2176,13 @@ class ConnectionManager {
         }
 
         @Override
+        public GetTableStateResponse getTableState(
+                RpcController controller, GetTableStateRequest request)
+                throws ServiceException {
+          return stub.getTableState(controller, request);
+        }
+
+        @Override
         public void close() {
           release(this.mss);
         }
@@ -2800,6 +2809,19 @@ class ConnectionManager {
     public RpcControllerFactory getRpcControllerFactory() {
       return this.rpcControllerFactory;
     }
+
+    public TableState getTableState(TableName tableName) throws IOException {
+      MasterKeepAliveConnection master = getKeepAliveMasterService();
+      try {
+        GetTableStateResponse resp = master.getTableState(null,
+                RequestConverter.buildGetTableStateRequest(tableName));
+        return TableState.convert(resp.getTableState());
+      } catch (ServiceException se) {
+        throw ProtobufUtil.getRemoteException(se);
+      } finally {
+        master.close();
+      }
+    }
   }
 
   /**
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
index e476d5f..7de1dfb 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
@@ -213,6 +213,13 @@ public interface HConnection extends Connection {
   boolean isTableDisabled(byte[] tableName) throws IOException;
 
   /**
+   * Retrieve TableState, represent current table state.
+   * @param tableName table state for
+   * @return state of the table
+   */
+  public TableState getTableState(TableName tableName)  throws IOException;
+
+  /**
    * @param tableName table name
    * @return true if all regions of the table are available, false otherwise
    * @throws IOException if a remote or network exception occurs
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java
index 58ec3c4..9debd63 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java
@@ -21,7 +21,6 @@ import java.io.IOException;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.RegionLocations;
-import org.apache.hadoop.hbase.TableName;
 
 /**
  * Cluster registry.
@@ -47,12 +46,6 @@ interface Registry {
   String getClusterId();
 
   /**
-   * @param enabled Return true if table is enabled
-   * @throws IOException
-   */
-  boolean isTableOnlineState(TableName tableName, boolean enabled) throws IOException;
-
-  /**
    * @return Count of 'running' regionservers
    * @throws IOException
    */
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java
new file mode 100644
index 0000000..384d4e6
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java
@@ -0,0 +1,205 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+
+/**
+ * Represents table state.
+ */
+@InterfaceAudience.Private
+public class TableState {
+
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
+  public static enum State {
+    ENABLED,
+    DISABLED,
+    DISABLING,
+    ENABLING;
+
+    /**
+     * Covert from PB version of State
+     *
+     * @param state convert from
+     * @return POJO
+     */
+    public static State convert(HBaseProtos.TableState.State state) {
+      State ret;
+      switch (state) {
+        case ENABLED:
+          ret = State.ENABLED;
+          break;
+        case DISABLED:
+          ret = State.DISABLED;
+          break;
+        case DISABLING:
+          ret = State.DISABLING;
+          break;
+        case ENABLING:
+          ret = State.ENABLING;
+          break;
+        default:
+          throw new IllegalStateException(state.toString());
+      }
+      return ret;
+    }
+
+    /**
+     * Covert to PB version of State
+     *
+     * @return PB
+     */
+    public HBaseProtos.TableState.State convert() {
+      HBaseProtos.TableState.State state;
+      switch (this) {
+        case ENABLED:
+          state = HBaseProtos.TableState.State.ENABLED;
+          break;
+        case DISABLED:
+          state = HBaseProtos.TableState.State.DISABLED;
+          break;
+        case DISABLING:
+          state = HBaseProtos.TableState.State.DISABLING;
+          break;
+        case ENABLING:
+          state = HBaseProtos.TableState.State.ENABLING;
+          break;
+        default:
+          throw new IllegalStateException(this.toString());
+      }
+      return state;
+    }
+
+  }
+
+  private final long timestamp;
+  private final TableName tableName;
+  private final State state;
+
+  /**
+   * Create instance of TableState.
+   * @param state table state
+   */
+  public TableState(TableName tableName, State state, long timestamp) {
+    this.tableName = tableName;
+    this.state = state;
+    this.timestamp = timestamp;
+  }
+
+  /**
+   * Create instance of TableState with current timestamp
+   *
+   * @param tableName table for which state is created
+   * @param state     state of the table
+   */
+  public TableState(TableName tableName, State state) {
+    this(tableName, state, System.currentTimeMillis());
+  }
+
+  /**
+   * @return table state
+   */
+  public State getState() {
+    return state;
+  }
+
+  /**
+   * Timestamp of table state
+   *
+   * @return milliseconds
+   */
+  public long getTimestamp() {
+    return timestamp;
+  }
+
+  /**
+   * Table name for state
+   *
+   * @return milliseconds
+   */
+  public TableName getTableName() {
+    return tableName;
+  }
+
+  /**
+   * Check that table in given states
+   * @param state state
+   * @return true if satisfies
+   */
+  public boolean inStates(State state) {
+    return this.state.equals(state);
+  }
+
+  /**
+   * Check that table in given states
+   * @param states state list
+   * @return true if satisfies
+   */
+  public boolean inStates(State... states) {
+    for (State s : states) {
+      if (s.equals(this.state)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+
+  /**
+   * Covert to PB version of TableState
+   * @return PB
+   */
+  public HBaseProtos.TableState convert() {
+    return HBaseProtos.TableState.newBuilder()
+        .setState(this.state.convert())
+        .setTable(ProtobufUtil.toProtoTableName(this.tableName))
+        .setTimestamp(this.timestamp)
+            .build();
+  }
+
+  /**
+   * Covert from PB version of TableState
+   * @param tableState convert from
+   * @return POJO
+   */
+  public static TableState convert(HBaseProtos.TableState tableState) {
+    TableState.State state = State.convert(tableState.getState());
+    return new TableState(ProtobufUtil.toTableName(tableState.getTable()),
+        state, tableState.getTimestamp());
+  }
+
+  /**
+   * Static version of state checker
+   * @param state desired
+   * @param target equals to any of
+   * @return true if satisfies
+   */
+  public static boolean isInStates(State state, State... target) {
+    for (State tableState : target) {
+      if (state.equals(tableState)) {
+        return true;
+      }
+    }
+    return false;
+  }
+}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
index 05572b7..8f7257e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
@@ -27,10 +26,8 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
-import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.zookeeper.KeeperException;
 
@@ -117,24 +114,6 @@ class ZooKeeperRegistry implements Registry {
   }
 
   @Override
-  public boolean isTableOnlineState(TableName tableName, boolean enabled)
-  throws IOException {
-    ZooKeeperKeepAliveConnection zkw = hci.getKeepAliveZooKeeperWatcher();
-    try {
-      if (enabled) {
-        return ZKTableStateClientSideReader.isEnabledTable(zkw, tableName);
-      }
-      return ZKTableStateClientSideReader.isDisabledTable(zkw, tableName);
-    } catch (KeeperException e) {
-      throw new IOException("Enable/Disable failed", e);
-    } catch (InterruptedException e) {
-      throw new InterruptedIOException();
-    } finally {
-       zkw.close();
-    }
-  }
-
-  @Override
   public int getCurrentNrHRS() throws IOException {
     ZooKeeperKeepAliveConnection zkw = hci.getKeepAliveZooKeeperWatcher();
     try {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
index 63b8af2..31e69cc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
@@ -22,6 +22,8 @@ import java.util.List;
 import java.util.regex.Pattern;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.util.ByteStringer;
+
 import org.apache.hadoop.hbase.CellScannable;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -95,6 +97,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusR
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest;
@@ -117,7 +120,6 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEn
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
-import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
@@ -1411,6 +1413,18 @@ public final class RequestConverter {
     return builder.build();
   }
 
+  /*
+   * Creates a protocol buffer GetTableStateRequest
+   *
+   * @param tableName table to get request for
+   * @return a GetTableStateRequest
+   */
+  public static GetTableStateRequest buildGetTableStateRequest(final TableName tableName) {
+    return GetTableStateRequest.newBuilder()
+            .setTableName(ProtobufUtil.toProtoTableName(tableName))
+            .build();
+  }
+
   /**
    * Creates a protocol buffer GetTableDescriptorsRequest for a single table
    *
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateClientSideReader.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateClientSideReader.java
deleted file mode 100644
index 7c21b01..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateClientSideReader.java
+++ /dev/null
@@ -1,205 +0,0 @@
-/**
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.zookeeper;
-
-import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
-import org.apache.zookeeper.KeeperException;
-
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-/**
- * Non-instantiable class that provides helper functions to learn
- * about HBase table state for code running on client side (hence, not having
- * access to consensus context).
- *
- * Doesn't cache any table state, just goes directly to ZooKeeper.
- * TODO: decouple this class from ZooKeeper.
- */
-@InterfaceAudience.Private
-public class ZKTableStateClientSideReader {
-
-  private ZKTableStateClientSideReader() {}
-  
-  /**
-   * Go to zookeeper and see if state of table is {@code ZooKeeperProtos.Table.State#DISABLED}.
-   * This method does not use cache.
-   * This method is for clients other than AssignmentManager
-   * @param zkw ZooKeeperWatcher instance to use
-   * @param tableName table we're checking
-   * @return True if table is enabled.
-   * @throws KeeperException
-   */
-  public static boolean isDisabledTable(final ZooKeeperWatcher zkw,
-      final TableName tableName)
-      throws KeeperException, InterruptedException, TableNotFoundException {
-    ZooKeeperProtos.Table.State state = getTableState(zkw, tableName);
-    return isTableState(ZooKeeperProtos.Table.State.DISABLED, state);
-  }
-
-  /**
-   * Go to zookeeper and see if state of table is {@code ZooKeeperProtos.Table.State#ENABLED}.
-   * This method does not use cache.
-   * This method is for clients other than AssignmentManager
-   * @param zkw ZooKeeperWatcher instance to use
-   * @param tableName table we're checking
-   * @return True if table is enabled.
-   * @throws KeeperException
-   */
-  public static boolean isEnabledTable(final ZooKeeperWatcher zkw,
-      final TableName tableName)
-      throws KeeperException, InterruptedException, TableNotFoundException {
-    return getTableState(zkw, tableName) == ZooKeeperProtos.Table.State.ENABLED;
-  }
-
-  /**
-   * Go to zookeeper and see if state of table is {@code ZooKeeperProtos.Table.State#DISABLING}
-   * of {@code ZooKeeperProtos.Table.State#DISABLED}.
-   * This method does not use cache.
-   * This method is for clients other than AssignmentManager.
-   * @param zkw ZooKeeperWatcher instance to use
-   * @param tableName table we're checking
-   * @return True if table is enabled.
-   * @throws KeeperException
-   */
-  public static boolean isDisablingOrDisabledTable(final ZooKeeperWatcher zkw,
-      final TableName tableName)
-      throws KeeperException, InterruptedException, TableNotFoundException {
-    ZooKeeperProtos.Table.State state = getTableState(zkw, tableName);
-    return isTableState(ZooKeeperProtos.Table.State.DISABLING, state) ||
-      isTableState(ZooKeeperProtos.Table.State.DISABLED, state);
-  }
-
-  /**
-   * Gets a list of all the tables set as disabled in zookeeper.
-   * @return Set of disabled tables, empty Set if none
-   * @throws KeeperException
-   */
-  public static Set<TableName> getDisabledTables(ZooKeeperWatcher zkw)
-      throws KeeperException, InterruptedException, TableNotFoundException {
-    Set<TableName> disabledTables = new HashSet<TableName>();
-    List<String> children =
-      ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode);
-    for (String child: children) {
-      TableName tableName =
-          TableName.valueOf(child);
-      ZooKeeperProtos.Table.State state = getTableState(zkw, tableName);
-      if (state == ZooKeeperProtos.Table.State.DISABLED) disabledTables.add(tableName);
-    }
-    return disabledTables;
-  }
-
-  /**
-   * Gets a list of all the tables set as disabled in zookeeper.
-   * @return Set of disabled tables, empty Set if none
-   * @throws KeeperException
-   */
-  public static Set<TableName> getDisabledOrDisablingTables(ZooKeeperWatcher zkw)
-      throws KeeperException, InterruptedException, TableNotFoundException {
-    return
-        getTablesInStates(
-          zkw,
-          ZooKeeperProtos.Table.State.DISABLED,
-          ZooKeeperProtos.Table.State.DISABLING);
-  }
-
-  /**
-   * Gets a list of all the tables set as enabling in zookeeper.
-   * @param zkw ZooKeeperWatcher instance to use
-   * @return Set of enabling tables, empty Set if none
-   * @throws KeeperException
-   * @throws InterruptedException
-   */
-  public static Set<TableName> getEnablingTables(ZooKeeperWatcher zkw)
-      throws KeeperException, InterruptedException, TableNotFoundException {
-    return getTablesInStates(zkw, ZooKeeperProtos.Table.State.ENABLING);
-  }
-
-  /**
-   * Gets a list of tables that are set as one of the passing in states in zookeeper.
-   * @param zkw ZooKeeperWatcher instance to use
-   * @param states the list of states that a table could be in
-   * @return Set of tables in one of the states, empty Set if none
-   * @throws KeeperException
-   * @throws InterruptedException
-   */
-  private static Set<TableName> getTablesInStates(
-    ZooKeeperWatcher zkw,
-    ZooKeeperProtos.Table.State... states)
-      throws KeeperException, InterruptedException, TableNotFoundException {
-    Set<TableName> tableNameSet = new HashSet<TableName>();
-    List<String> children = ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode);
-    TableName tableName;
-    ZooKeeperProtos.Table.State tableState;
-    for (String child: children) {
-      tableName = TableName.valueOf(child);
-      tableState = getTableState(zkw, tableName);
-      for (ZooKeeperProtos.Table.State state : states) {
-         if (tableState == state) {
-           tableNameSet.add(tableName);
-           break;
-         }
-      }
-    }
-    return tableNameSet;
-  }
-
-  static boolean isTableState(final ZooKeeperProtos.Table.State expectedState,
-      final ZooKeeperProtos.Table.State currentState) {
-    return currentState != null && currentState.equals(expectedState);
-  }
-
-  /**
-   * @param zkw ZooKeeperWatcher instance to use
-   * @param tableName table we're checking
-   * @return {@link ZooKeeperProtos.Table.State} found in znode.
-   * @throws KeeperException
-   * @throws TableNotFoundException if tableName doesn't exist
-   */
-  static ZooKeeperProtos.Table.State getTableState(final ZooKeeperWatcher zkw,
-      final TableName tableName)
-      throws KeeperException, InterruptedException, TableNotFoundException {
-    String znode = ZKUtil.joinZNode(zkw.tableZNode, tableName.getNameAsString());
-    byte [] data = ZKUtil.getData(zkw, znode);
-    if (data == null || data.length <= 0) {
-      throw new TableNotFoundException(tableName);
-    }
-    try {
-      ProtobufUtil.expectPBMagicPrefix(data);
-      ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder();
-      int magicLen = ProtobufUtil.lengthOfPBMagic();
-      ProtobufUtil.mergeFrom(builder, data, magicLen, data.length - magicLen);
-      return builder.getState();
-    } catch (IOException e) {
-      KeeperException ke = new KeeperException.DataInconsistencyException();
-      ke.initCause(e);
-      throw ke;
-    } catch (DeserializationException e) {
-      throw ZKUtil.convert(e);
-    }
-  }
-}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
index b180fb9..be05054 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
@@ -138,6 +138,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
   // znode used for region transitioning and assignment
   public String assignmentZNode;
   // znode used for table disabling/enabling
+  @Deprecated
   public String tableZNode;
   // znode containing the unique cluster ID
   public String clusterIdZNode;
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
index 5d37ad7..21e3d85 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
@@ -475,11 +475,6 @@ public class TestAsyncProcess {
       }
 
       @Override
-      public boolean isTableOnlineState(TableName tableName, boolean enabled) throws IOException {
-        return false;
-      }
-
-      @Override
       public int getCurrentNrHRS() throws IOException {
         return 1;
       }
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
index f6968bc..06647ca 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
@@ -130,12 +130,6 @@ public class TestClientNoCluster extends Configured implements Tool {
     }
 
     @Override
-    public boolean isTableOnlineState(TableName tableName, boolean enabled)
-    throws IOException {
-      return enabled;
-    }
-
-    @Override
     public int getCurrentNrHRS() throws IOException {
       return 1;
     }
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateClientSideReader.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateClientSideReader.java
deleted file mode 100644
index e82d3b0..0000000
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateClientSideReader.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.zookeeper;
-
-import static org.junit.Assert.fail;
-
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
-
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.data.Stat;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.mockito.Mockito;
-
-@Category({SmallTests.class})
-public class TestZKTableStateClientSideReader {
-
-  @Test
-  public void test() throws Exception {
-    ZooKeeperWatcher zkw = Mockito.mock(ZooKeeperWatcher.class);
-    RecoverableZooKeeper rzk = Mockito.mock(RecoverableZooKeeper.class);
-    Mockito.doReturn(rzk).when(zkw).getRecoverableZooKeeper();
-    Mockito.doReturn(null).when(rzk).getData(Mockito.anyString(),
-        Mockito.any(Watcher.class), Mockito.any(Stat.class));
-    TableName table = TableName.valueOf("table-not-exists");
-    try {
-      ZKTableStateClientSideReader.getTableState(zkw, table);
-      fail("Shouldn't reach here");
-    } catch(TableNotFoundException e) {
-      // Expected Table not found exception
-    }
-  }
-}
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
index 82fcb61..f86370d 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
@@ -1975,6 +1975,1576 @@ public final class HBaseProtos {
     // @@protoc_insertion_point(class_scope:hbase.pb.TableSchema)
   }
 
+  public interface TableStateOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required .hbase.pb.TableState.State state = 1;
+    /**
+     * <code>required .hbase.pb.TableState.State state = 1;</code>
+     *
+     * <pre>
+     * This is the table's state.
+     * </pre>
+     */
+    boolean hasState();
+    /**
+     * <code>required .hbase.pb.TableState.State state = 1;</code>
+     *
+     * <pre>
+     * This is the table's state.
+     * </pre>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState();
+
+    // required .hbase.pb.TableName table = 2;
+    /**
+     * <code>required .hbase.pb.TableName table = 2;</code>
+     */
+    boolean hasTable();
+    /**
+     * <code>required .hbase.pb.TableName table = 2;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTable();
+    /**
+     * <code>required .hbase.pb.TableName table = 2;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableOrBuilder();
+
+    // optional uint64 timestamp = 3;
+    /**
+     * <code>optional uint64 timestamp = 3;</code>
+     */
+    boolean hasTimestamp();
+    /**
+     * <code>optional uint64 timestamp = 3;</code>
+     */
+    long getTimestamp();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.TableState}
+   *
+   * <pre>
+   ** Denotes state of the table 
+   * </pre>
+   */
+  public static final class TableState extends
+      com.google.protobuf.GeneratedMessage
+      implements TableStateOrBuilder {
+    // Use TableState.newBuilder() to construct.
+    private TableState(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private TableState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final TableState defaultInstance;
+    public static TableState getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public TableState getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private TableState(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 8: {
+              int rawValue = input.readEnum();
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State value = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.valueOf(rawValue);
+              if (value == null) {
+                unknownFields.mergeVarintField(1, rawValue);
+              } else {
+                bitField0_ |= 0x00000001;
+                state_ = value;
+              }
+              break;
+            }
+            case 18: {
+              org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000002) == 0x00000002)) {
+                subBuilder = table_.toBuilder();
+              }
+              table_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(table_);
+                table_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000002;
+              break;
+            }
+            case 24: {
+              bitField0_ |= 0x00000004;
+              timestamp_ = input.readUInt64();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableState_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableState_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<TableState> PARSER =
+        new com.google.protobuf.AbstractParser<TableState>() {
+      public TableState parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new TableState(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<TableState> getParserForType() {
+      return PARSER;
+    }
+
+    /**
+     * Protobuf enum {@code hbase.pb.TableState.State}
+     *
+     * <pre>
+     * Table's current state
+     * </pre>
+     */
+    public enum State
+        implements com.google.protobuf.ProtocolMessageEnum {
+      /**
+       * <code>ENABLED = 0;</code>
+       */
+      ENABLED(0, 0),
+      /**
+       * <code>DISABLED = 1;</code>
+       */
+      DISABLED(1, 1),
+      /**
+       * <code>DISABLING = 2;</code>
+       */
+      DISABLING(2, 2),
+      /**
+       * <code>ENABLING = 3;</code>
+       */
+      ENABLING(3, 3),
+      ;
+
+      /**
+       * <code>ENABLED = 0;</code>
+       */
+      public static final int ENABLED_VALUE = 0;
+      /**
+       * <code>DISABLED = 1;</code>
+       */
+      public static final int DISABLED_VALUE = 1;
+      /**
+       * <code>DISABLING = 2;</code>
+       */
+      public static final int DISABLING_VALUE = 2;
+      /**
+       * <code>ENABLING = 3;</code>
+       */
+      public static final int ENABLING_VALUE = 3;
+
+
+      public final int getNumber() { return value; }
+
+      public static State valueOf(int value) {
+        switch (value) {
+          case 0: return ENABLED;
+          case 1: return DISABLED;
+          case 2: return DISABLING;
+          case 3: return ENABLING;
+          default: return null;
+        }
+      }
+
+      public static com.google.protobuf.Internal.EnumLiteMap<State>
+          internalGetValueMap() {
+        return internalValueMap;
+      }
+      private static com.google.protobuf.Internal.EnumLiteMap<State>
+          internalValueMap =
+            new com.google.protobuf.Internal.EnumLiteMap<State>() {
+              public State findValueByNumber(int number) {
+                return State.valueOf(number);
+              }
+            };
+
+      public final com.google.protobuf.Descriptors.EnumValueDescriptor
+          getValueDescriptor() {
+        return getDescriptor().getValues().get(index);
+      }
+      public final com.google.protobuf.Descriptors.EnumDescriptor
+          getDescriptorForType() {
+        return getDescriptor();
+      }
+      public static final com.google.protobuf.Descriptors.EnumDescriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDescriptor().getEnumTypes().get(0);
+      }
+
+      private static final State[] VALUES = values();
+
+      public static State valueOf(
+          com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+        if (desc.getType() != getDescriptor()) {
+          throw new java.lang.IllegalArgumentException(
+            "EnumValueDescriptor is not for this type.");
+        }
+        return VALUES[desc.getIndex()];
+      }
+
+      private final int index;
+      private final int value;
+
+      private State(int index, int value) {
+        this.index = index;
+        this.value = value;
+      }
+
+      // @@protoc_insertion_point(enum_scope:hbase.pb.TableState.State)
+    }
+
+    private int bitField0_;
+    // required .hbase.pb.TableState.State state = 1;
+    public static final int STATE_FIELD_NUMBER = 1;
+    private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State state_;
+    /**
+     * <code>required .hbase.pb.TableState.State state = 1;</code>
+     *
+     * <pre>
+     * This is the table's state.
+     * </pre>
+     */
+    public boolean hasState() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required .hbase.pb.TableState.State state = 1;</code>
+     *
+     * <pre>
+     * This is the table's state.
+     * </pre>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() {
+      return state_;
+    }
+
+    // required .hbase.pb.TableName table = 2;
+    public static final int TABLE_FIELD_NUMBER = 2;
+    private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName table_;
+    /**
+     * <code>required .hbase.pb.TableName table = 2;</code>
+     */
+    public boolean hasTable() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>required .hbase.pb.TableName table = 2;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTable() {
+      return table_;
+    }
+    /**
+     * <code>required .hbase.pb.TableName table = 2;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableOrBuilder() {
+      return table_;
+    }
+
+    // optional uint64 timestamp = 3;
+    public static final int TIMESTAMP_FIELD_NUMBER = 3;
+    private long timestamp_;
+    /**
+     * <code>optional uint64 timestamp = 3;</code>
+     */
+    public boolean hasTimestamp() {
+      return ((bitField0_ & 0x00000004) == 0x00000004);
+    }
+    /**
+     * <code>optional uint64 timestamp = 3;</code>
+     */
+    public long getTimestamp() {
+      return timestamp_;
+    }
+
+    private void initFields() {
+      state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED;
+      table_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
+      timestamp_ = 0L;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasState()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasTable()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!getTable().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeEnum(1, state_.getNumber());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeMessage(2, table_);
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        output.writeUInt64(3, timestamp_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeEnumSize(1, state_.getNumber());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(2, table_);
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(3, timestamp_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState) obj;
+
+      boolean result = true;
+      result = result && (hasState() == other.hasState());
+      if (hasState()) {
+        result = result &&
+            (getState() == other.getState());
+      }
+      result = result && (hasTable() == other.hasTable());
+      if (hasTable()) {
+        result = result && getTable()
+            .equals(other.getTable());
+      }
+      result = result && (hasTimestamp() == other.hasTimestamp());
+      if (hasTimestamp()) {
+        result = result && (getTimestamp()
+            == other.getTimestamp());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasState()) {
+        hash = (37 * hash) + STATE_FIELD_NUMBER;
+        hash = (53 * hash) + hashEnum(getState());
+      }
+      if (hasTable()) {
+        hash = (37 * hash) + TABLE_FIELD_NUMBER;
+        hash = (53 * hash) + getTable().hashCode();
+      }
+      if (hasTimestamp()) {
+        hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getTimestamp());
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.TableState}
+     *
+     * <pre>
+     ** Denotes state of the table 
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableState_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableState_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getTableFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        if (tableBuilder_ == null) {
+          table_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
+        } else {
+          tableBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000002);
+        timestamp_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000004);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableState_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState build() {
+        org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.state_ = state_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        if (tableBuilder_ == null) {
+          result.table_ = table_;
+        } else {
+          result.table_ = tableBuilder_.build();
+        }
+        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+          to_bitField0_ |= 0x00000004;
+        }
+        result.timestamp_ = timestamp_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance()) return this;
+        if (other.hasState()) {
+          setState(other.getState());
+        }
+        if (other.hasTable()) {
+          mergeTable(other.getTable());
+        }
+        if (other.hasTimestamp()) {
+          setTimestamp(other.getTimestamp());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasState()) {
+          
+          return false;
+        }
+        if (!hasTable()) {
+          
+          return false;
+        }
+        if (!getTable().isInitialized()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required .hbase.pb.TableState.State state = 1;
+      private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED;
+      /**
+       * <code>required .hbase.pb.TableState.State state = 1;</code>
+       *
+       * <pre>
+       * This is the table's state.
+       * </pre>
+       */
+      public boolean hasState() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required .hbase.pb.TableState.State state = 1;</code>
+       *
+       * <pre>
+       * This is the table's state.
+       * </pre>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() {
+        return state_;
+      }
+      /**
+       * <code>required .hbase.pb.TableState.State state = 1;</code>
+       *
+       * <pre>
+       * This is the table's state.
+       * </pre>
+       */
+      public Builder setState(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State value) {
+        if (value == null) {
+          throw new NullPointerException();
+        }
+        bitField0_ |= 0x00000001;
+        state_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableState.State state = 1;</code>
+       *
+       * <pre>
+       * This is the table's state.
+       * </pre>
+       */
+      public Builder clearState() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED;
+        onChanged();
+        return this;
+      }
+
+      // required .hbase.pb.TableName table = 2;
+      private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName table_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> tableBuilder_;
+      /**
+       * <code>required .hbase.pb.TableName table = 2;</code>
+       */
+      public boolean hasTable() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>required .hbase.pb.TableName table = 2;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTable() {
+        if (tableBuilder_ == null) {
+          return table_;
+        } else {
+          return tableBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>required .hbase.pb.TableName table = 2;</code>
+       */
+      public Builder setTable(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) {
+        if (tableBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          table_ = value;
+          onChanged();
+        } else {
+          tableBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableName table = 2;</code>
+       */
+      public Builder setTable(
+          org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) {
+        if (tableBuilder_ == null) {
+          table_ = builderForValue.build();
+          onChanged();
+        } else {
+          tableBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableName table = 2;</code>
+       */
+      public Builder mergeTable(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) {
+        if (tableBuilder_ == null) {
+          if (((bitField0_ & 0x00000002) == 0x00000002) &&
+              table_ != org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()) {
+            table_ =
+              org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.newBuilder(table_).mergeFrom(value).buildPartial();
+          } else {
+            table_ = value;
+          }
+          onChanged();
+        } else {
+          tableBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableName table = 2;</code>
+       */
+      public Builder clearTable() {
+        if (tableBuilder_ == null) {
+          table_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
+          onChanged();
+        } else {
+          tableBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableName table = 2;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder getTableBuilder() {
+        bitField0_ |= 0x00000002;
+        onChanged();
+        return getTableFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>required .hbase.pb.TableName table = 2;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableOrBuilder() {
+        if (tableBuilder_ != null) {
+          return tableBuilder_.getMessageOrBuilder();
+        } else {
+          return table_;
+        }
+      }
+      /**
+       * <code>required .hbase.pb.TableName table = 2;</code>
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> 
+          getTableFieldBuilder() {
+        if (tableBuilder_ == null) {
+          tableBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>(
+                  table_,
+                  getParentForChildren(),
+                  isClean());
+          table_ = null;
+        }
+        return tableBuilder_;
+      }
+
+      // optional uint64 timestamp = 3;
+      private long timestamp_ ;
+      /**
+       * <code>optional uint64 timestamp = 3;</code>
+       */
+      public boolean hasTimestamp() {
+        return ((bitField0_ & 0x00000004) == 0x00000004);
+      }
+      /**
+       * <code>optional uint64 timestamp = 3;</code>
+       */
+      public long getTimestamp() {
+        return timestamp_;
+      }
+      /**
+       * <code>optional uint64 timestamp = 3;</code>
+       */
+      public Builder setTimestamp(long value) {
+        bitField0_ |= 0x00000004;
+        timestamp_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional uint64 timestamp = 3;</code>
+       */
+      public Builder clearTimestamp() {
+        bitField0_ = (bitField0_ & ~0x00000004);
+        timestamp_ = 0L;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.TableState)
+    }
+
+    static {
+      defaultInstance = new TableState(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.TableState)
+  }
+
+  public interface TableDescriptorOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required .hbase.pb.TableSchema schema = 1;
+    /**
+     * <code>required .hbase.pb.TableSchema schema = 1;</code>
+     */
+    boolean hasSchema();
+    /**
+     * <code>required .hbase.pb.TableSchema schema = 1;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getSchema();
+    /**
+     * <code>required .hbase.pb.TableSchema schema = 1;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getSchemaOrBuilder();
+
+    // optional .hbase.pb.TableState.State state = 2 [default = ENABLED];
+    /**
+     * <code>optional .hbase.pb.TableState.State state = 2 [default = ENABLED];</code>
+     */
+    boolean hasState();
+    /**
+     * <code>optional .hbase.pb.TableState.State state = 2 [default = ENABLED];</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.TableDescriptor}
+   *
+   * <pre>
+   ** On HDFS representation of table state. 
+   * </pre>
+   */
+  public static final class TableDescriptor extends
+      com.google.protobuf.GeneratedMessage
+      implements TableDescriptorOrBuilder {
+    // Use TableDescriptor.newBuilder() to construct.
+    private TableDescriptor(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private TableDescriptor(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final TableDescriptor defaultInstance;
+    public static TableDescriptor getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public TableDescriptor getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private TableDescriptor(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000001) == 0x00000001)) {
+                subBuilder = schema_.toBuilder();
+              }
+              schema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(schema_);
+                schema_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000001;
+              break;
+            }
+            case 16: {
+              int rawValue = input.readEnum();
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State value = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.valueOf(rawValue);
+              if (value == null) {
+                unknownFields.mergeVarintField(2, rawValue);
+              } else {
+                bitField0_ |= 0x00000002;
+                state_ = value;
+              }
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<TableDescriptor> PARSER =
+        new com.google.protobuf.AbstractParser<TableDescriptor>() {
+      public TableDescriptor parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new TableDescriptor(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<TableDescriptor> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required .hbase.pb.TableSchema schema = 1;
+    public static final int SCHEMA_FIELD_NUMBER = 1;
+    private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema schema_;
+    /**
+     * <code>required .hbase.pb.TableSchema schema = 1;</code>
+     */
+    public boolean hasSchema() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required .hbase.pb.TableSchema schema = 1;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getSchema() {
+      return schema_;
+    }
+    /**
+     * <code>required .hbase.pb.TableSchema schema = 1;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getSchemaOrBuilder() {
+      return schema_;
+    }
+
+    // optional .hbase.pb.TableState.State state = 2 [default = ENABLED];
+    public static final int STATE_FIELD_NUMBER = 2;
+    private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State state_;
+    /**
+     * <code>optional .hbase.pb.TableState.State state = 2 [default = ENABLED];</code>
+     */
+    public boolean hasState() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>optional .hbase.pb.TableState.State state = 2 [default = ENABLED];</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() {
+      return state_;
+    }
+
+    private void initFields() {
+      schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+      state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasSchema()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!getSchema().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeMessage(1, schema_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeEnum(2, state_.getNumber());
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, schema_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeEnumSize(2, state_.getNumber());
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor) obj;
+
+      boolean result = true;
+      result = result && (hasSchema() == other.hasSchema());
+      if (hasSchema()) {
+        result = result && getSchema()
+            .equals(other.getSchema());
+      }
+      result = result && (hasState() == other.hasState());
+      if (hasState()) {
+        result = result &&
+            (getState() == other.getState());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasSchema()) {
+        hash = (37 * hash) + SCHEMA_FIELD_NUMBER;
+        hash = (53 * hash) + getSchema().hashCode();
+      }
+      if (hasState()) {
+        hash = (37 * hash) + STATE_FIELD_NUMBER;
+        hash = (53 * hash) + hashEnum(getState());
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.TableDescriptor}
+     *
+     * <pre>
+     ** On HDFS representation of table state. 
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptorOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getSchemaFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        if (schemaBuilder_ == null) {
+          schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+        } else {
+          schemaBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED;
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor build() {
+        org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        if (schemaBuilder_ == null) {
+          result.schema_ = schema_;
+        } else {
+          result.schema_ = schemaBuilder_.build();
+        }
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.state_ = state_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.getDefaultInstance()) return this;
+        if (other.hasSchema()) {
+          mergeSchema(other.getSchema());
+        }
+        if (other.hasState()) {
+          setState(other.getState());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasSchema()) {
+          
+          return false;
+        }
+        if (!getSchema().isInitialized()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required .hbase.pb.TableSchema schema = 1;
+      private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> schemaBuilder_;
+      /**
+       * <code>required .hbase.pb.TableSchema schema = 1;</code>
+       */
+      public boolean hasSchema() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required .hbase.pb.TableSchema schema = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getSchema() {
+        if (schemaBuilder_ == null) {
+          return schema_;
+        } else {
+          return schemaBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>required .hbase.pb.TableSchema schema = 1;</code>
+       */
+      public Builder setSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+        if (schemaBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          schema_ = value;
+          onChanged();
+        } else {
+          schemaBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableSchema schema = 1;</code>
+       */
+      public Builder setSchema(
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) {
+        if (schemaBuilder_ == null) {
+          schema_ = builderForValue.build();
+          onChanged();
+        } else {
+          schemaBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableSchema schema = 1;</code>
+       */
+      public Builder mergeSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+        if (schemaBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001) &&
+              schema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) {
+            schema_ =
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(schema_).mergeFrom(value).buildPartial();
+          } else {
+            schema_ = value;
+          }
+          onChanged();
+        } else {
+          schemaBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableSchema schema = 1;</code>
+       */
+      public Builder clearSchema() {
+        if (schemaBuilder_ == null) {
+          schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+          onChanged();
+        } else {
+          schemaBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableSchema schema = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getSchemaBuilder() {
+        bitField0_ |= 0x00000001;
+        onChanged();
+        return getSchemaFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>required .hbase.pb.TableSchema schema = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getSchemaOrBuilder() {
+        if (schemaBuilder_ != null) {
+          return schemaBuilder_.getMessageOrBuilder();
+        } else {
+          return schema_;
+        }
+      }
+      /**
+       * <code>required .hbase.pb.TableSchema schema = 1;</code>
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> 
+          getSchemaFieldBuilder() {
+        if (schemaBuilder_ == null) {
+          schemaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>(
+                  schema_,
+                  getParentForChildren(),
+                  isClean());
+          schema_ = null;
+        }
+        return schemaBuilder_;
+      }
+
+      // optional .hbase.pb.TableState.State state = 2 [default = ENABLED];
+      private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED;
+      /**
+       * <code>optional .hbase.pb.TableState.State state = 2 [default = ENABLED];</code>
+       */
+      public boolean hasState() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>optional .hbase.pb.TableState.State state = 2 [default = ENABLED];</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() {
+        return state_;
+      }
+      /**
+       * <code>optional .hbase.pb.TableState.State state = 2 [default = ENABLED];</code>
+       */
+      public Builder setState(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State value) {
+        if (value == null) {
+          throw new NullPointerException();
+        }
+        bitField0_ |= 0x00000002;
+        state_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.TableState.State state = 2 [default = ENABLED];</code>
+       */
+      public Builder clearState() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.TableDescriptor)
+    }
+
+    static {
+      defaultInstance = new TableDescriptor(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.TableDescriptor)
+  }
+
   public interface ColumnFamilySchemaOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
 
@@ -19558,6 +21128,16 @@ public final class HBaseProtos {
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_hbase_pb_TableSchema_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_TableState_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_TableState_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_TableDescriptor_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_TableDescriptor_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_ColumnFamilySchema_descriptor;
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -19687,64 +21267,71 @@ public final class HBaseProtos {
       "leName\022,\n\nattributes\030\002 \003(\0132\030.hbase.pb.By" +
       "tesBytesPair\0225\n\017column_families\030\003 \003(\0132\034." +
       "hbase.pb.ColumnFamilySchema\022/\n\rconfigura" +
-      "tion\030\004 \003(\0132\030.hbase.pb.NameStringPair\"\201\001\n" +
-      "\022ColumnFamilySchema\022\014\n\004name\030\001 \002(\014\022,\n\natt" +
-      "ributes\030\002 \003(\0132\030.hbase.pb.BytesBytesPair\022" +
-      "/\n\rconfiguration\030\003 \003(\0132\030.hbase.pb.NameSt",
-      "ringPair\"\243\001\n\nRegionInfo\022\021\n\tregion_id\030\001 \002" +
-      "(\004\022\'\n\ntable_name\030\002 \002(\0132\023.hbase.pb.TableN" +
-      "ame\022\021\n\tstart_key\030\003 \001(\014\022\017\n\007end_key\030\004 \001(\014\022" +
-      "\017\n\007offline\030\005 \001(\010\022\r\n\005split\030\006 \001(\010\022\025\n\nrepli" +
-      "ca_id\030\007 \001(\005:\0010\":\n\014FavoredNodes\022*\n\014favore" +
-      "d_node\030\001 \003(\0132\024.hbase.pb.ServerName\"\236\001\n\017R" +
-      "egionSpecifier\022;\n\004type\030\001 \002(\0162-.hbase.pb." +
-      "RegionSpecifier.RegionSpecifierType\022\r\n\005v" +
-      "alue\030\002 \002(\014\"?\n\023RegionSpecifierType\022\017\n\013REG" +
-      "ION_NAME\020\001\022\027\n\023ENCODED_REGION_NAME\020\002\"%\n\tT",
-      "imeRange\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 \001(\004\"W\n\025Co" +
-      "lumnFamilyTimeRange\022\025\n\rcolumn_family\030\001 \002" +
-      "(\014\022\'\n\ntime_range\030\002 \002(\0132\023.hbase.pb.TimeRa" +
-      "nge\"A\n\nServerName\022\021\n\thost_name\030\001 \002(\t\022\014\n\004" +
-      "port\030\002 \001(\r\022\022\n\nstart_code\030\003 \001(\004\"\033\n\013Coproc" +
-      "essor\022\014\n\004name\030\001 \002(\t\"-\n\016NameStringPair\022\014\n" +
-      "\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\rNameBytesP" +
-      "air\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(\014\"/\n\016Byte" +
-      "sBytesPair\022\r\n\005first\030\001 \002(\014\022\016\n\006second\030\002 \002(" +
-      "\014\",\n\rNameInt64Pair\022\014\n\004name\030\001 \001(\t\022\r\n\005valu",
-      "e\030\002 \001(\003\"\206\001\n\024ProcedureDescription\022\021\n\tsign" +
-      "ature\030\001 \002(\t\022\020\n\010instance\030\002 \001(\t\022\030\n\rcreatio" +
-      "n_time\030\003 \001(\003:\0010\022/\n\rconfiguration\030\004 \003(\0132\030" +
-      ".hbase.pb.NameStringPair\"\n\n\010EmptyMsg\"\033\n\007" +
-      "LongMsg\022\020\n\010long_msg\030\001 \002(\003\"\037\n\tDoubleMsg\022\022" +
-      "\n\ndouble_msg\030\001 \002(\001\"\'\n\rBigDecimalMsg\022\026\n\016b" +
-      "igdecimal_msg\030\001 \002(\014\"5\n\004UUID\022\026\n\016least_sig" +
-      "_bits\030\001 \002(\004\022\025\n\rmost_sig_bits\030\002 \002(\004\"T\n\023Na" +
-      "mespaceDescriptor\022\014\n\004name\030\001 \002(\014\022/\n\rconfi" +
-      "guration\030\002 \003(\0132\030.hbase.pb.NameStringPair",
-      "\"\235\001\n\013VersionInfo\022\017\n\007version\030\001 \002(\t\022\013\n\003url" +
-      "\030\002 \002(\t\022\020\n\010revision\030\003 \002(\t\022\014\n\004user\030\004 \002(\t\022\014" +
-      "\n\004date\030\005 \002(\t\022\024\n\014src_checksum\030\006 \002(\t\022\025\n\rve" +
-      "rsion_major\030\007 \001(\r\022\025\n\rversion_minor\030\010 \001(\r" +
-      "\"Q\n\020RegionServerInfo\022\020\n\010infoPort\030\001 \001(\005\022+" +
-      "\n\014version_info\030\002 \001(\0132\025.hbase.pb.VersionI" +
-      "nfo\"\243\002\n\023SnapshotDescription\022\014\n\004name\030\001 \002(" +
-      "\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcreation_time\030\003 \001(\003:" +
-      "\0010\0227\n\004type\030\004 \001(\0162\".hbase.pb.SnapshotDesc" +
-      "ription.Type:\005FLUSH\022\017\n\007version\030\005 \001(\005\022\r\n\005",
-      "owner\030\006 \001(\t\022<\n\025users_and_permissions\030\007 \001" +
-      "(\0132\035.hbase.pb.UsersAndPermissions\022\016\n\003ttl" +
-      "\030\010 \001(\003:\0010\".\n\004Type\022\014\n\010DISABLED\020\000\022\t\n\005FLUSH" +
-      "\020\001\022\r\n\tSKIPFLUSH\020\002\"w\n\016RegionLocation\022)\n\013r" +
-      "egion_info\030\001 \002(\0132\024.hbase.pb.RegionInfo\022)" +
-      "\n\013server_name\030\002 \001(\0132\024.hbase.pb.ServerNam" +
-      "e\022\017\n\007seq_num\030\003 \002(\003*r\n\013CompareType\022\010\n\004LES" +
-      "S\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT" +
-      "_EQUAL\020\003\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007GREATE" +
-      "R\020\005\022\t\n\005NO_OP\020\006*n\n\010TimeUnit\022\017\n\013NANOSECOND",
-      "S\020\001\022\020\n\014MICROSECONDS\020\002\022\020\n\014MILLISECONDS\020\003\022" +
-      "\013\n\007SECONDS\020\004\022\013\n\007MINUTES\020\005\022\t\n\005HOURS\020\006\022\010\n\004" +
-      "DAYS\020\007B>\n*org.apache.hadoop.hbase.protob" +
-      "uf.generatedB\013HBaseProtosH\001\240\001\001"
+      "tion\030\004 \003(\0132\030.hbase.pb.NameStringPair\"\257\001\n" +
+      "\nTableState\022)\n\005state\030\001 \002(\0162\032.hbase.pb.Ta" +
+      "bleState.State\022\"\n\005table\030\002 \002(\0132\023.hbase.pb" +
+      ".TableName\022\021\n\ttimestamp\030\003 \001(\004\"?\n\005State\022\013",
+      "\n\007ENABLED\020\000\022\014\n\010DISABLED\020\001\022\r\n\tDISABLING\020\002" +
+      "\022\014\n\010ENABLING\020\003\"l\n\017TableDescriptor\022%\n\006sch" +
+      "ema\030\001 \002(\0132\025.hbase.pb.TableSchema\0222\n\005stat" +
+      "e\030\002 \001(\0162\032.hbase.pb.TableState.State:\007ENA" +
+      "BLED\"\201\001\n\022ColumnFamilySchema\022\014\n\004name\030\001 \002(" +
+      "\014\022,\n\nattributes\030\002 \003(\0132\030.hbase.pb.BytesBy" +
+      "tesPair\022/\n\rconfiguration\030\003 \003(\0132\030.hbase.p" +
+      "b.NameStringPair\"\243\001\n\nRegionInfo\022\021\n\tregio" +
+      "n_id\030\001 \002(\004\022\'\n\ntable_name\030\002 \002(\0132\023.hbase.p" +
+      "b.TableName\022\021\n\tstart_key\030\003 \001(\014\022\017\n\007end_ke",
+      "y\030\004 \001(\014\022\017\n\007offline\030\005 \001(\010\022\r\n\005split\030\006 \001(\010\022" +
+      "\025\n\nreplica_id\030\007 \001(\005:\0010\":\n\014FavoredNodes\022*" +
+      "\n\014favored_node\030\001 \003(\0132\024.hbase.pb.ServerNa" +
+      "me\"\236\001\n\017RegionSpecifier\022;\n\004type\030\001 \002(\0162-.h" +
+      "base.pb.RegionSpecifier.RegionSpecifierT" +
+      "ype\022\r\n\005value\030\002 \002(\014\"?\n\023RegionSpecifierTyp" +
+      "e\022\017\n\013REGION_NAME\020\001\022\027\n\023ENCODED_REGION_NAM" +
+      "E\020\002\"%\n\tTimeRange\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 \001" +
+      "(\004\"W\n\025ColumnFamilyTimeRange\022\025\n\rcolumn_fa" +
+      "mily\030\001 \002(\014\022\'\n\ntime_range\030\002 \002(\0132\023.hbase.p",
+      "b.TimeRange\"A\n\nServerName\022\021\n\thost_name\030\001" +
+      " \002(\t\022\014\n\004port\030\002 \001(\r\022\022\n\nstart_code\030\003 \001(\004\"\033" +
+      "\n\013Coprocessor\022\014\n\004name\030\001 \002(\t\"-\n\016NameStrin" +
+      "gPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\rNa" +
+      "meBytesPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(\014" +
+      "\"/\n\016BytesBytesPair\022\r\n\005first\030\001 \002(\014\022\016\n\006sec" +
+      "ond\030\002 \002(\014\",\n\rNameInt64Pair\022\014\n\004name\030\001 \001(\t" +
+      "\022\r\n\005value\030\002 \001(\003\"\206\001\n\024ProcedureDescription" +
+      "\022\021\n\tsignature\030\001 \002(\t\022\020\n\010instance\030\002 \001(\t\022\030\n" +
+      "\rcreation_time\030\003 \001(\003:\0010\022/\n\rconfiguration",
+      "\030\004 \003(\0132\030.hbase.pb.NameStringPair\"\n\n\010Empt" +
+      "yMsg\"\033\n\007LongMsg\022\020\n\010long_msg\030\001 \002(\003\"\037\n\tDou" +
+      "bleMsg\022\022\n\ndouble_msg\030\001 \002(\001\"\'\n\rBigDecimal" +
+      "Msg\022\026\n\016bigdecimal_msg\030\001 \002(\014\"5\n\004UUID\022\026\n\016l" +
+      "east_sig_bits\030\001 \002(\004\022\025\n\rmost_sig_bits\030\002 \002" +
+      "(\004\"T\n\023NamespaceDescriptor\022\014\n\004name\030\001 \002(\014\022" +
+      "/\n\rconfiguration\030\002 \003(\0132\030.hbase.pb.NameSt" +
+      "ringPair\"\235\001\n\013VersionInfo\022\017\n\007version\030\001 \002(" +
+      "\t\022\013\n\003url\030\002 \002(\t\022\020\n\010revision\030\003 \002(\t\022\014\n\004user" +
+      "\030\004 \002(\t\022\014\n\004date\030\005 \002(\t\022\024\n\014src_checksum\030\006 \002",
+      "(\t\022\025\n\rversion_major\030\007 \001(\r\022\025\n\rversion_min" +
+      "or\030\010 \001(\r\"Q\n\020RegionServerInfo\022\020\n\010infoPort" +
+      "\030\001 \001(\005\022+\n\014version_info\030\002 \001(\0132\025.hbase.pb." +
+      "VersionInfo\"\243\002\n\023SnapshotDescription\022\014\n\004n" +
+      "ame\030\001 \002(\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcreation_tim" +
+      "e\030\003 \001(\003:\0010\0227\n\004type\030\004 \001(\0162\".hbase.pb.Snap" +
+      "shotDescription.Type:\005FLUSH\022\017\n\007version\030\005" +
+      " \001(\005\022\r\n\005owner\030\006 \001(\t\022<\n\025users_and_permiss" +
+      "ions\030\007 \001(\0132\035.hbase.pb.UsersAndPermission" +
+      "s\022\016\n\003ttl\030\010 \001(\003:\0010\".\n\004Type\022\014\n\010DISABLED\020\000\022",
+      "\t\n\005FLUSH\020\001\022\r\n\tSKIPFLUSH\020\002\"w\n\016RegionLocat" +
+      "ion\022)\n\013region_info\030\001 \002(\0132\024.hbase.pb.Regi" +
+      "onInfo\022)\n\013server_name\030\002 \001(\0132\024.hbase.pb.S" +
+      "erverName\022\017\n\007seq_num\030\003 \002(\003*r\n\013CompareTyp" +
+      "e\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020" +
+      "\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_OR_EQUAL\020\004\022\013" +
+      "\n\007GREATER\020\005\022\t\n\005NO_OP\020\006*n\n\010TimeUnit\022\017\n\013NA" +
+      "NOSECONDS\020\001\022\020\n\014MICROSECONDS\020\002\022\020\n\014MILLISE" +
+      "CONDS\020\003\022\013\n\007SECONDS\020\004\022\013\n\007MINUTES\020\005\022\t\n\005HOU" +
+      "RS\020\006\022\010\n\004DAYS\020\007B>\n*org.apache.hadoop.hbas",
+      "e.protobuf.generatedB\013HBaseProtosH\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -19757,140 +21344,152 @@ public final class HBaseProtos {
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_TableSchema_descriptor,
               new java.lang.String[] { "TableName", "Attributes", "ColumnFamilies", "Configuration", });
-          internal_static_hbase_pb_ColumnFamilySchema_descriptor =
+          internal_static_hbase_pb_TableState_descriptor =
             getDescriptor().getMessageTypes().get(1);
+          internal_static_hbase_pb_TableState_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_TableState_descriptor,
+              new java.lang.String[] { "State", "Table", "Timestamp", });
+          internal_static_hbase_pb_TableDescriptor_descriptor =
+            getDescriptor().getMessageTypes().get(2);
+          internal_static_hbase_pb_TableDescriptor_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_TableDescriptor_descriptor,
+              new java.lang.String[] { "Schema", "State", });
+          internal_static_hbase_pb_ColumnFamilySchema_descriptor =
+            getDescriptor().getMessageTypes().get(3);
           internal_static_hbase_pb_ColumnFamilySchema_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ColumnFamilySchema_descriptor,
               new java.lang.String[] { "Name", "Attributes", "Configuration", });
           internal_static_hbase_pb_RegionInfo_descriptor =
-            getDescriptor().getMessageTypes().get(2);
+            getDescriptor().getMessageTypes().get(4);
           internal_static_hbase_pb_RegionInfo_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_RegionInfo_descriptor,
               new java.lang.String[] { "RegionId", "TableName", "StartKey", "EndKey", "Offline", "Split", "ReplicaId", });
           internal_static_hbase_pb_FavoredNodes_descriptor =
-            getDescriptor().getMessageTypes().get(3);
+            getDescriptor().getMessageTypes().get(5);
           internal_static_hbase_pb_FavoredNodes_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_FavoredNodes_descriptor,
               new java.lang.String[] { "FavoredNode", });
           internal_static_hbase_pb_RegionSpecifier_descriptor =
-            getDescriptor().getMessageTypes().get(4);
+            getDescriptor().getMessageTypes().get(6);
           internal_static_hbase_pb_RegionSpecifier_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_RegionSpecifier_descriptor,
               new java.lang.String[] { "Type", "Value", });
           internal_static_hbase_pb_TimeRange_descriptor =
-            getDescriptor().getMessageTypes().get(5);
+            getDescriptor().getMessageTypes().get(7);
           internal_static_hbase_pb_TimeRange_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_TimeRange_descriptor,
               new java.lang.String[] { "From", "To", });
           internal_static_hbase_pb_ColumnFamilyTimeRange_descriptor =
-            getDescriptor().getMessageTypes().get(6);
+            getDescriptor().getMessageTypes().get(8);
           internal_static_hbase_pb_ColumnFamilyTimeRange_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ColumnFamilyTimeRange_descriptor,
               new java.lang.String[] { "ColumnFamily", "TimeRange", });
           internal_static_hbase_pb_ServerName_descriptor =
-            getDescriptor().getMessageTypes().get(7);
+            getDescriptor().getMessageTypes().get(9);
           internal_static_hbase_pb_ServerName_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ServerName_descriptor,
               new java.lang.String[] { "HostName", "Port", "StartCode", });
           internal_static_hbase_pb_Coprocessor_descriptor =
-            getDescriptor().getMessageTypes().get(8);
+            getDescriptor().getMessageTypes().get(10);
           internal_static_hbase_pb_Coprocessor_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_Coprocessor_descriptor,
               new java.lang.String[] { "Name", });
           internal_static_hbase_pb_NameStringPair_descriptor =
-            getDescriptor().getMessageTypes().get(9);
+            getDescriptor().getMessageTypes().get(11);
           internal_static_hbase_pb_NameStringPair_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_NameStringPair_descriptor,
               new java.lang.String[] { "Name", "Value", });
           internal_static_hbase_pb_NameBytesPair_descriptor =
-            getDescriptor().getMessageTypes().get(10);
+            getDescriptor().getMessageTypes().get(12);
           internal_static_hbase_pb_NameBytesPair_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_NameBytesPair_descriptor,
               new java.lang.String[] { "Name", "Value", });
           internal_static_hbase_pb_BytesBytesPair_descriptor =
-            getDescriptor().getMessageTypes().get(11);
+            getDescriptor().getMessageTypes().get(13);
           internal_static_hbase_pb_BytesBytesPair_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_BytesBytesPair_descriptor,
               new java.lang.String[] { "First", "Second", });
           internal_static_hbase_pb_NameInt64Pair_descriptor =
-            getDescriptor().getMessageTypes().get(12);
+            getDescriptor().getMessageTypes().get(14);
           internal_static_hbase_pb_NameInt64Pair_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_NameInt64Pair_descriptor,
               new java.lang.String[] { "Name", "Value", });
           internal_static_hbase_pb_ProcedureDescription_descriptor =
-            getDescriptor().getMessageTypes().get(13);
+            getDescriptor().getMessageTypes().get(15);
           internal_static_hbase_pb_ProcedureDescription_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ProcedureDescription_descriptor,
               new java.lang.String[] { "Signature", "Instance", "CreationTime", "Configuration", });
           internal_static_hbase_pb_EmptyMsg_descriptor =
-            getDescriptor().getMessageTypes().get(14);
+            getDescriptor().getMessageTypes().get(16);
           internal_static_hbase_pb_EmptyMsg_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_EmptyMsg_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_LongMsg_descriptor =
-            getDescriptor().getMessageTypes().get(15);
+            getDescriptor().getMessageTypes().get(17);
           internal_static_hbase_pb_LongMsg_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_LongMsg_descriptor,
               new java.lang.String[] { "LongMsg", });
           internal_static_hbase_pb_DoubleMsg_descriptor =
-            getDescriptor().getMessageTypes().get(16);
+            getDescriptor().getMessageTypes().get(18);
           internal_static_hbase_pb_DoubleMsg_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_DoubleMsg_descriptor,
               new java.lang.String[] { "DoubleMsg", });
           internal_static_hbase_pb_BigDecimalMsg_descriptor =
-            getDescriptor().getMessageTypes().get(17);
+            getDescriptor().getMessageTypes().get(19);
           internal_static_hbase_pb_BigDecimalMsg_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_BigDecimalMsg_descriptor,
               new java.lang.String[] { "BigdecimalMsg", });
           internal_static_hbase_pb_UUID_descriptor =
-            getDescriptor().getMessageTypes().get(18);
+            getDescriptor().getMessageTypes().get(20);
           internal_static_hbase_pb_UUID_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_UUID_descriptor,
               new java.lang.String[] { "LeastSigBits", "MostSigBits", });
           internal_static_hbase_pb_NamespaceDescriptor_descriptor =
-            getDescriptor().getMessageTypes().get(19);
+            getDescriptor().getMessageTypes().get(21);
           internal_static_hbase_pb_NamespaceDescriptor_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_NamespaceDescriptor_descriptor,
               new java.lang.String[] { "Name", "Configuration", });
           internal_static_hbase_pb_VersionInfo_descriptor =
-            getDescriptor().getMessageTypes().get(20);
+            getDescriptor().getMessageTypes().get(22);
           internal_static_hbase_pb_VersionInfo_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_VersionInfo_descriptor,
               new java.lang.String[] { "Version", "Url", "Revision", "User", "Date", "SrcChecksum", "VersionMajor", "VersionMinor", });
           internal_static_hbase_pb_RegionServerInfo_descriptor =
-            getDescriptor().getMessageTypes().get(21);
+            getDescriptor().getMessageTypes().get(23);
           internal_static_hbase_pb_RegionServerInfo_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_RegionServerInfo_descriptor,
               new java.lang.String[] { "InfoPort", "VersionInfo", });
           internal_static_hbase_pb_SnapshotDescription_descriptor =
-            getDescriptor().getMessageTypes().get(22);
+            getDescriptor().getMessageTypes().get(24);
           internal_static_hbase_pb_SnapshotDescription_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SnapshotDescription_descriptor,
               new java.lang.String[] { "Name", "Table", "CreationTime", "Type", "Version", "Owner", "UsersAndPermissions", "Ttl", });
           internal_static_hbase_pb_RegionLocation_descriptor =
-            getDescriptor().getMessageTypes().get(23);
+            getDescriptor().getMessageTypes().get(25);
           internal_static_hbase_pb_RegionLocation_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_RegionLocation_descriptor,
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index 76cbbe9..87b780b 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -49724,6 +49724,1128 @@ public final class MasterProtos {
     // @@protoc_insertion_point(class_scope:hbase.pb.GetTableNamesResponse)
   }
 
+  public interface GetTableStateRequestOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required .hbase.pb.TableName table_name = 1;
+    /**
+     * <code>required .hbase.pb.TableName table_name = 1;</code>
+     */
+    boolean hasTableName();
+    /**
+     * <code>required .hbase.pb.TableName table_name = 1;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName();
+    /**
+     * <code>required .hbase.pb.TableName table_name = 1;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.GetTableStateRequest}
+   */
+  public static final class GetTableStateRequest extends
+      com.google.protobuf.GeneratedMessage
+      implements GetTableStateRequestOrBuilder {
+    // Use GetTableStateRequest.newBuilder() to construct.
+    private GetTableStateRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private GetTableStateRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final GetTableStateRequest defaultInstance;
+    public static GetTableStateRequest getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public GetTableStateRequest getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private GetTableStateRequest(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000001) == 0x00000001)) {
+                subBuilder = tableName_.toBuilder();
+              }
+              tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(tableName_);
+                tableName_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000001;
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateRequest_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateRequest_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<GetTableStateRequest> PARSER =
+        new com.google.protobuf.AbstractParser<GetTableStateRequest>() {
+      public GetTableStateRequest parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new GetTableStateRequest(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<GetTableStateRequest> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required .hbase.pb.TableName table_name = 1;
+    public static final int TABLE_NAME_FIELD_NUMBER = 1;
+    private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_;
+    /**
+     * <code>required .hbase.pb.TableName table_name = 1;</code>
+     */
+    public boolean hasTableName() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required .hbase.pb.TableName table_name = 1;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() {
+      return tableName_;
+    }
+    /**
+     * <code>required .hbase.pb.TableName table_name = 1;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() {
+      return tableName_;
+    }
+
+    private void initFields() {
+      tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasTableName()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!getTableName().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeMessage(1, tableName_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, tableName_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest) obj;
+
+      boolean result = true;
+      result = result && (hasTableName() == other.hasTableName());
+      if (hasTableName()) {
+        result = result && getTableName()
+            .equals(other.getTableName());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasTableName()) {
+        hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
+        hash = (53 * hash) + getTableName().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.GetTableStateRequest}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequestOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateRequest_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateRequest_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getTableNameFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        if (tableNameBuilder_ == null) {
+          tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
+        } else {
+          tableNameBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateRequest_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest build() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        if (tableNameBuilder_ == null) {
+          result.tableName_ = tableName_;
+        } else {
+          result.tableName_ = tableNameBuilder_.build();
+        }
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance()) return this;
+        if (other.hasTableName()) {
+          mergeTableName(other.getTableName());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasTableName()) {
+          
+          return false;
+        }
+        if (!getTableName().isInitialized()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required .hbase.pb.TableName table_name = 1;
+      private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> tableNameBuilder_;
+      /**
+       * <code>required .hbase.pb.TableName table_name = 1;</code>
+       */
+      public boolean hasTableName() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required .hbase.pb.TableName table_name = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() {
+        if (tableNameBuilder_ == null) {
+          return tableName_;
+        } else {
+          return tableNameBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>required .hbase.pb.TableName table_name = 1;</code>
+       */
+      public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) {
+        if (tableNameBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          tableName_ = value;
+          onChanged();
+        } else {
+          tableNameBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableName table_name = 1;</code>
+       */
+      public Builder setTableName(
+          org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) {
+        if (tableNameBuilder_ == null) {
+          tableName_ = builderForValue.build();
+          onChanged();
+        } else {
+          tableNameBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableName table_name = 1;</code>
+       */
+      public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) {
+        if (tableNameBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001) &&
+              tableName_ != org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()) {
+            tableName_ =
+              org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
+          } else {
+            tableName_ = value;
+          }
+          onChanged();
+        } else {
+          tableNameBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableName table_name = 1;</code>
+       */
+      public Builder clearTableName() {
+        if (tableNameBuilder_ == null) {
+          tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
+          onChanged();
+        } else {
+          tableNameBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableName table_name = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder getTableNameBuilder() {
+        bitField0_ |= 0x00000001;
+        onChanged();
+        return getTableNameFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>required .hbase.pb.TableName table_name = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() {
+        if (tableNameBuilder_ != null) {
+          return tableNameBuilder_.getMessageOrBuilder();
+        } else {
+          return tableName_;
+        }
+      }
+      /**
+       * <code>required .hbase.pb.TableName table_name = 1;</code>
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> 
+          getTableNameFieldBuilder() {
+        if (tableNameBuilder_ == null) {
+          tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>(
+                  tableName_,
+                  getParentForChildren(),
+                  isClean());
+          tableName_ = null;
+        }
+        return tableNameBuilder_;
+      }
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.GetTableStateRequest)
+    }
+
+    static {
+      defaultInstance = new GetTableStateRequest(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.GetTableStateRequest)
+  }
+
+  public interface GetTableStateResponseOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required .hbase.pb.TableState table_state = 1;
+    /**
+     * <code>required .hbase.pb.TableState table_state = 1;</code>
+     */
+    boolean hasTableState();
+    /**
+     * <code>required .hbase.pb.TableState table_state = 1;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState getTableState();
+    /**
+     * <code>required .hbase.pb.TableState table_state = 1;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder getTableStateOrBuilder();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.GetTableStateResponse}
+   */
+  public static final class GetTableStateResponse extends
+      com.google.protobuf.GeneratedMessage
+      implements GetTableStateResponseOrBuilder {
+    // Use GetTableStateResponse.newBuilder() to construct.
+    private GetTableStateResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private GetTableStateResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final GetTableStateResponse defaultInstance;
+    public static GetTableStateResponse getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public GetTableStateResponse getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private GetTableStateResponse(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000001) == 0x00000001)) {
+                subBuilder = tableState_.toBuilder();
+              }
+              tableState_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(tableState_);
+                tableState_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000001;
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateResponse_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateResponse_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<GetTableStateResponse> PARSER =
+        new com.google.protobuf.AbstractParser<GetTableStateResponse>() {
+      public GetTableStateResponse parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new GetTableStateResponse(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<GetTableStateResponse> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required .hbase.pb.TableState table_state = 1;
+    public static final int TABLE_STATE_FIELD_NUMBER = 1;
+    private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState tableState_;
+    /**
+     * <code>required .hbase.pb.TableState table_state = 1;</code>
+     */
+    public boolean hasTableState() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required .hbase.pb.TableState table_state = 1;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState getTableState() {
+      return tableState_;
+    }
+    /**
+     * <code>required .hbase.pb.TableState table_state = 1;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder getTableStateOrBuilder() {
+      return tableState_;
+    }
+
+    private void initFields() {
+      tableState_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance();
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasTableState()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!getTableState().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeMessage(1, tableState_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, tableState_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) obj;
+
+      boolean result = true;
+      result = result && (hasTableState() == other.hasTableState());
+      if (hasTableState()) {
+        result = result && getTableState()
+            .equals(other.getTableState());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasTableState()) {
+        hash = (37 * hash) + TABLE_STATE_FIELD_NUMBER;
+        hash = (53 * hash) + getTableState().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.GetTableStateResponse}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponseOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateResponse_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateResponse_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getTableStateFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        if (tableStateBuilder_ == null) {
+          tableState_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance();
+        } else {
+          tableStateBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_GetTableStateResponse_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse build() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        if (tableStateBuilder_ == null) {
+          result.tableState_ = tableState_;
+        } else {
+          result.tableState_ = tableStateBuilder_.build();
+        }
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance()) return this;
+        if (other.hasTableState()) {
+          mergeTableState(other.getTableState());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasTableState()) {
+          
+          return false;
+        }
+        if (!getTableState().isInitialized()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required .hbase.pb.TableState table_state = 1;
+      private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState tableState_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder> tableStateBuilder_;
+      /**
+       * <code>required .hbase.pb.TableState table_state = 1;</code>
+       */
+      public boolean hasTableState() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required .hbase.pb.TableState table_state = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState getTableState() {
+        if (tableStateBuilder_ == null) {
+          return tableState_;
+        } else {
+          return tableStateBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>required .hbase.pb.TableState table_state = 1;</code>
+       */
+      public Builder setTableState(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState value) {
+        if (tableStateBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          tableState_ = value;
+          onChanged();
+        } else {
+          tableStateBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableState table_state = 1;</code>
+       */
+      public Builder setTableState(
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder builderForValue) {
+        if (tableStateBuilder_ == null) {
+          tableState_ = builderForValue.build();
+          onChanged();
+        } else {
+          tableStateBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableState table_state = 1;</code>
+       */
+      public Builder mergeTableState(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState value) {
+        if (tableStateBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001) &&
+              tableState_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance()) {
+            tableState_ =
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.newBuilder(tableState_).mergeFrom(value).buildPartial();
+          } else {
+            tableState_ = value;
+          }
+          onChanged();
+        } else {
+          tableStateBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableState table_state = 1;</code>
+       */
+      public Builder clearTableState() {
+        if (tableStateBuilder_ == null) {
+          tableState_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance();
+          onChanged();
+        } else {
+          tableStateBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.TableState table_state = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder getTableStateBuilder() {
+        bitField0_ |= 0x00000001;
+        onChanged();
+        return getTableStateFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>required .hbase.pb.TableState table_state = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder getTableStateOrBuilder() {
+        if (tableStateBuilder_ != null) {
+          return tableStateBuilder_.getMessageOrBuilder();
+        } else {
+          return tableState_;
+        }
+      }
+      /**
+       * <code>required .hbase.pb.TableState table_state = 1;</code>
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder> 
+          getTableStateFieldBuilder() {
+        if (tableStateBuilder_ == null) {
+          tableStateBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder>(
+                  tableState_,
+                  getParentForChildren(),
+                  isClean());
+          tableState_ = null;
+        }
+        return tableStateBuilder_;
+      }
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.GetTableStateResponse)
+    }
+
+    static {
+      defaultInstance = new GetTableStateResponse(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.GetTableStateResponse)
+  }
+
   public interface GetClusterStatusRequestOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
   }
@@ -68552,6 +69674,18 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest request,
           com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse> done);
 
+      /**
+       * <code>rpc GetTableState(.hbase.pb.GetTableStateRequest) returns (.hbase.pb.GetTableStateResponse);</code>
+       *
+       * <pre>
+       ** returns table state 
+       * </pre>
+       */
+      public abstract void getTableState(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request,
+          com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse> done);
+
     }
 
     public static com.google.protobuf.Service newReflectiveService(
@@ -69069,6 +70203,14 @@ public final class MasterProtos {
           impl.isSnapshotCleanupEnabled(controller, request, done);
         }
 
+        @java.lang.Override
+        public  void getTableState(
+            com.google.protobuf.RpcController controller,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request,
+            com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse> done) {
+          impl.getTableState(controller, request, done);
+        }
+
       };
     }
 
@@ -69219,6 +70361,8 @@ public final class MasterProtos {
               return impl.switchSnapshotCleanup(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest)request);
             case 63:
               return impl.isSnapshotCleanupEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest)request);
+            case 64:
+              return impl.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request);
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -69361,6 +70505,8 @@ public final class MasterProtos {
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.getDefaultInstance();
             case 63:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.getDefaultInstance();
+            case 64:
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -69503,6 +70649,8 @@ public final class MasterProtos {
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.getDefaultInstance();
             case 63:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.getDefaultInstance();
+            case 64:
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -70313,6 +71461,18 @@ public final class MasterProtos {
         org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest request,
         com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse> done);
 
+    /**
+     * <code>rpc GetTableState(.hbase.pb.GetTableStateRequest) returns (.hbase.pb.GetTableStateResponse);</code>
+     *
+     * <pre>
+     ** returns table state 
+     * </pre>
+     */
+    public abstract void getTableState(
+        com.google.protobuf.RpcController controller,
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request,
+        com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse> done);
+
     public static final
         com.google.protobuf.Descriptors.ServiceDescriptor
         getDescriptor() {
@@ -70655,6 +71815,11 @@ public final class MasterProtos {
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse>specializeCallback(
               done));
           return;
+        case 64:
+          this.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request,
+            com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse>specializeCallback(
+              done));
+          return;
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -70797,6 +71962,8 @@ public final class MasterProtos {
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest.getDefaultInstance();
         case 63:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest.getDefaultInstance();
+        case 64:
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -70939,6 +72106,8 @@ public final class MasterProtos {
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse.getDefaultInstance();
         case 63:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.getDefaultInstance();
+        case 64:
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -71919,6 +73088,21 @@ public final class MasterProtos {
             org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.class,
             org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.getDefaultInstance()));
       }
+
+      public  void getTableState(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request,
+          com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse> done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(64),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(),
+          com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.class,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance()));
+      }
     }
 
     public static BlockingInterface newBlockingStub(
@@ -72246,6 +73430,11 @@ public final class MasterProtos {
           com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest request)
           throws com.google.protobuf.ServiceException;
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse getTableState(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request)
+          throws com.google.protobuf.ServiceException;
     }
 
     private static final class BlockingStub implements BlockingInterface {
@@ -73022,6 +74211,18 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse.getDefaultInstance());
       }
 
+
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse getTableState(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(64),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance());
+      }
+
     }
 
     // @@protoc_insertion_point(class_scope:hbase.pb.MasterService)
@@ -73917,6 +75118,16 @@ public final class MasterProtos {
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_hbase_pb_GetTableNamesResponse_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_GetTableStateRequest_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_GetTableStateRequest_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_GetTableStateResponse_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_GetTableStateResponse_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_GetClusterStatusRequest_descriptor;
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -74242,225 +75453,231 @@ public final class MasterProtos {
       "sRequest\022\r\n\005regex\030\001 \001(\t\022!\n\022include_sys_t",
       "ables\030\002 \001(\010:\005false\022\021\n\tnamespace\030\003 \001(\t\"A\n" +
       "\025GetTableNamesResponse\022(\n\013table_names\030\001 " +
-      "\003(\0132\023.hbase.pb.TableName\"\031\n\027GetClusterSt" +
-      "atusRequest\"K\n\030GetClusterStatusResponse\022" +
-      "/\n\016cluster_status\030\001 \002(\0132\027.hbase.pb.Clust" +
-      "erStatus\"\030\n\026IsMasterRunningRequest\"4\n\027Is" +
-      "MasterRunningResponse\022\031\n\021is_master_runni" +
-      "ng\030\001 \002(\010\"I\n\024ExecProcedureRequest\0221\n\tproc" +
-      "edure\030\001 \002(\0132\036.hbase.pb.ProcedureDescript" +
-      "ion\"F\n\025ExecProcedureResponse\022\030\n\020expected",
-      "_timeout\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"K\n\026I" +
-      "sProcedureDoneRequest\0221\n\tprocedure\030\001 \001(\013" +
-      "2\036.hbase.pb.ProcedureDescription\"`\n\027IsPr" +
-      "ocedureDoneResponse\022\023\n\004done\030\001 \001(\010:\005false" +
-      "\0220\n\010snapshot\030\002 \001(\0132\036.hbase.pb.ProcedureD" +
-      "escription\",\n\031GetProcedureResultRequest\022" +
-      "\017\n\007proc_id\030\001 \002(\004\"\371\001\n\032GetProcedureResultR" +
-      "esponse\0229\n\005state\030\001 \002(\0162*.hbase.pb.GetPro" +
-      "cedureResultResponse.State\022\022\n\nstart_time" +
-      "\030\002 \001(\004\022\023\n\013last_update\030\003 \001(\004\022\016\n\006result\030\004 ",
-      "\001(\014\0224\n\texception\030\005 \001(\0132!.hbase.pb.Foreig" +
-      "nExceptionMessage\"1\n\005State\022\r\n\tNOT_FOUND\020" +
-      "\000\022\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020\002\"M\n\025AbortPro" +
-      "cedureRequest\022\017\n\007proc_id\030\001 \002(\004\022#\n\025mayInt" +
-      "erruptIfRunning\030\002 \001(\010:\004true\"6\n\026AbortProc" +
-      "edureResponse\022\034\n\024is_procedure_aborted\030\001 " +
-      "\002(\010\"\027\n\025ListProceduresRequest\"@\n\026ListProc" +
-      "eduresResponse\022&\n\tprocedure\030\001 \003(\0132\023.hbas" +
-      "e.pb.Procedure\"\315\001\n\017SetQuotaRequest\022\021\n\tus" +
-      "er_name\030\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021\n\tnam",
-      "espace\030\003 \001(\t\022\'\n\ntable_name\030\004 \001(\0132\023.hbase" +
-      ".pb.TableName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016byp" +
-      "ass_globals\030\006 \001(\010\022+\n\010throttle\030\007 \001(\0132\031.hb" +
-      "ase.pb.ThrottleRequest\"\022\n\020SetQuotaRespon" +
-      "se\"J\n\037MajorCompactionTimestampRequest\022\'\n" +
-      "\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName\"U" +
-      "\n(MajorCompactionTimestampForRegionReque" +
-      "st\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpec" +
-      "ifier\"@\n MajorCompactionTimestampRespons" +
-      "e\022\034\n\024compaction_timestamp\030\001 \002(\003\"\035\n\033Secur",
-      "ityCapabilitiesRequest\"\354\001\n\034SecurityCapab" +
-      "ilitiesResponse\022G\n\014capabilities\030\001 \003(\01621." +
-      "hbase.pb.SecurityCapabilitiesResponse.Ca" +
-      "pability\"\202\001\n\nCapability\022\031\n\025SIMPLE_AUTHEN" +
-      "TICATION\020\000\022\031\n\025SECURE_AUTHENTICATION\020\001\022\021\n" +
-      "\rAUTHORIZATION\020\002\022\026\n\022CELL_AUTHORIZATION\020\003" +
-      "\022\023\n\017CELL_VISIBILITY\020\004\"D\n\027ClearDeadServer" +
-      "sRequest\022)\n\013server_name\030\001 \003(\0132\024.hbase.pb" +
-      ".ServerName\"E\n\030ClearDeadServersResponse\022" +
-      ")\n\013server_name\030\001 \003(\0132\024.hbase.pb.ServerNa",
-      "me\"A\n\031SetSnapshotCleanupRequest\022\017\n\007enabl" +
-      "ed\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(\010\";\n\032SetSnap" +
-      "shotCleanupResponse\022\035\n\025prev_snapshot_cle" +
-      "anup\030\001 \002(\010\"!\n\037IsSnapshotCleanupEnabledRe" +
-      "quest\"3\n IsSnapshotCleanupEnabledRespons" +
-      "e\022\017\n\007enabled\030\001 \002(\010\"\025\n\023GetClusterIdReques" +
-      "t\"*\n\024GetClusterIdResponse\022\022\n\ncluster_id\030" +
-      "\001 \001(\t\"\030\n\026GetActiveMasterRequest\"D\n\027GetAc" +
-      "tiveMasterResponse\022)\n\013server_name\030\001 \001(\0132" +
-      "\024.hbase.pb.ServerName\"\037\n\035GetMetaRegionLo",
-      "cationsRequest\"R\n\036GetMetaRegionLocations" +
-      "Response\0220\n\016meta_locations\030\001 \003(\0132\030.hbase" +
-      ".pb.RegionLocation*(\n\020MasterSwitchType\022\t" +
-      "\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\241.\n\rMasterService\022e" +
-      "\n\024GetSchemaAlterStatus\022%.hbase.pb.GetSch" +
-      "emaAlterStatusRequest\032&.hbase.pb.GetSche" +
-      "maAlterStatusResponse\022b\n\023GetTableDescrip" +
-      "tors\022$.hbase.pb.GetTableDescriptorsReque" +
-      "st\032%.hbase.pb.GetTableDescriptorsRespons" +
-      "e\022P\n\rGetTableNames\022\036.hbase.pb.GetTableNa",
-      "mesRequest\032\037.hbase.pb.GetTableNamesRespo" +
-      "nse\022Y\n\020GetClusterStatus\022!.hbase.pb.GetCl" +
-      "usterStatusRequest\032\".hbase.pb.GetCluster" +
-      "StatusResponse\022V\n\017IsMasterRunning\022 .hbas" +
-      "e.pb.IsMasterRunningRequest\032!.hbase.pb.I" +
-      "sMasterRunningResponse\022D\n\tAddColumn\022\032.hb" +
-      "ase.pb.AddColumnRequest\032\033.hbase.pb.AddCo" +
-      "lumnResponse\022M\n\014DeleteColumn\022\035.hbase.pb." +
-      "DeleteColumnRequest\032\036.hbase.pb.DeleteCol" +
-      "umnResponse\022M\n\014ModifyColumn\022\035.hbase.pb.M",
-      "odifyColumnRequest\032\036.hbase.pb.ModifyColu" +
-      "mnResponse\022G\n\nMoveRegion\022\033.hbase.pb.Move" +
-      "RegionRequest\032\034.hbase.pb.MoveRegionRespo" +
-      "nse\022k\n\026DispatchMergingRegions\022\'.hbase.pb" +
-      ".DispatchMergingRegionsRequest\032(.hbase.p" +
-      "b.DispatchMergingRegionsResponse\022M\n\014Assi" +
-      "gnRegion\022\035.hbase.pb.AssignRegionRequest\032" +
-      "\036.hbase.pb.AssignRegionResponse\022S\n\016Unass" +
-      "ignRegion\022\037.hbase.pb.UnassignRegionReque" +
-      "st\032 .hbase.pb.UnassignRegionResponse\022P\n\r",
-      "OfflineRegion\022\036.hbase.pb.OfflineRegionRe" +
-      "quest\032\037.hbase.pb.OfflineRegionResponse\022J" +
-      "\n\013DeleteTable\022\034.hbase.pb.DeleteTableRequ" +
-      "est\032\035.hbase.pb.DeleteTableResponse\022P\n\rtr" +
-      "uncateTable\022\036.hbase.pb.TruncateTableRequ" +
-      "est\032\037.hbase.pb.TruncateTableResponse\022J\n\013" +
-      "EnableTable\022\034.hbase.pb.EnableTableReques" +
-      "t\032\035.hbase.pb.EnableTableResponse\022M\n\014Disa" +
-      "bleTable\022\035.hbase.pb.DisableTableRequest\032" +
-      "\036.hbase.pb.DisableTableResponse\022J\n\013Modif",
-      "yTable\022\034.hbase.pb.ModifyTableRequest\032\035.h" +
-      "base.pb.ModifyTableResponse\022J\n\013CreateTab" +
-      "le\022\034.hbase.pb.CreateTableRequest\032\035.hbase" +
-      ".pb.CreateTableResponse\022A\n\010Shutdown\022\031.hb" +
-      "ase.pb.ShutdownRequest\032\032.hbase.pb.Shutdo" +
-      "wnResponse\022G\n\nStopMaster\022\033.hbase.pb.Stop" +
-      "MasterRequest\032\034.hbase.pb.StopMasterRespo" +
-      "nse\022h\n\031IsMasterInMaintenanceMode\022$.hbase" +
-      ".pb.IsInMaintenanceModeRequest\032%.hbase.p" +
-      "b.IsInMaintenanceModeResponse\022>\n\007Balance",
-      "\022\030.hbase.pb.BalanceRequest\032\031.hbase.pb.Ba" +
-      "lanceResponse\022_\n\022SetBalancerRunning\022#.hb" +
-      "ase.pb.SetBalancerRunningRequest\032$.hbase" +
-      ".pb.SetBalancerRunningResponse\022\\\n\021IsBala" +
-      "ncerEnabled\022\".hbase.pb.IsBalancerEnabled" +
-      "Request\032#.hbase.pb.IsBalancerEnabledResp" +
-      "onse\022k\n\026SetSplitOrMergeEnabled\022\'.hbase.p" +
-      "b.SetSplitOrMergeEnabledRequest\032(.hbase." +
-      "pb.SetSplitOrMergeEnabledResponse\022h\n\025IsS" +
-      "plitOrMergeEnabled\022&.hbase.pb.IsSplitOrM",
-      "ergeEnabledRequest\032\'.hbase.pb.IsSplitOrM" +
-      "ergeEnabledResponse\022D\n\tNormalize\022\032.hbase" +
-      ".pb.NormalizeRequest\032\033.hbase.pb.Normaliz" +
-      "eResponse\022e\n\024SetNormalizerRunning\022%.hbas" +
-      "e.pb.SetNormalizerRunningRequest\032&.hbase" +
-      ".pb.SetNormalizerRunningResponse\022b\n\023IsNo" +
-      "rmalizerEnabled\022$.hbase.pb.IsNormalizerE" +
-      "nabledRequest\032%.hbase.pb.IsNormalizerEna" +
-      "bledResponse\022S\n\016RunCatalogScan\022\037.hbase.p" +
-      "b.RunCatalogScanRequest\032 .hbase.pb.RunCa",
-      "talogScanResponse\022e\n\024EnableCatalogJanito" +
-      "r\022%.hbase.pb.EnableCatalogJanitorRequest" +
-      "\032&.hbase.pb.EnableCatalogJanitorResponse" +
-      "\022n\n\027IsCatalogJanitorEnabled\022(.hbase.pb.I" +
-      "sCatalogJanitorEnabledRequest\032).hbase.pb" +
-      ".IsCatalogJanitorEnabledResponse\022V\n\017RunC" +
-      "leanerChore\022 .hbase.pb.RunCleanerChoreRe" +
-      "quest\032!.hbase.pb.RunCleanerChoreResponse" +
-      "\022k\n\026SetCleanerChoreRunning\022\'.hbase.pb.Se" +
-      "tCleanerChoreRunningRequest\032(.hbase.pb.S",
-      "etCleanerChoreRunningResponse\022h\n\025IsClean" +
-      "erChoreEnabled\022&.hbase.pb.IsCleanerChore" +
-      "EnabledRequest\032\'.hbase.pb.IsCleanerChore" +
-      "EnabledResponse\022^\n\021ExecMasterService\022#.h" +
-      "base.pb.CoprocessorServiceRequest\032$.hbas" +
-      "e.pb.CoprocessorServiceResponse\022A\n\010Snaps" +
-      "hot\022\031.hbase.pb.SnapshotRequest\032\032.hbase.p" +
-      "b.SnapshotResponse\022h\n\025GetCompletedSnapsh" +
-      "ots\022&.hbase.pb.GetCompletedSnapshotsRequ" +
-      "est\032\'.hbase.pb.GetCompletedSnapshotsResp",
-      "onse\022S\n\016DeleteSnapshot\022\037.hbase.pb.Delete" +
-      "SnapshotRequest\032 .hbase.pb.DeleteSnapsho" +
-      "tResponse\022S\n\016IsSnapshotDone\022\037.hbase.pb.I" +
-      "sSnapshotDoneRequest\032 .hbase.pb.IsSnapsh" +
-      "otDoneResponse\022V\n\017RestoreSnapshot\022 .hbas" +
-      "e.pb.RestoreSnapshotRequest\032!.hbase.pb.R" +
-      "estoreSnapshotResponse\022h\n\025IsRestoreSnaps" +
-      "hotDone\022&.hbase.pb.IsRestoreSnapshotDone" +
-      "Request\032\'.hbase.pb.IsRestoreSnapshotDone" +
-      "Response\022P\n\rExecProcedure\022\036.hbase.pb.Exe",
-      "cProcedureRequest\032\037.hbase.pb.ExecProcedu" +
-      "reResponse\022W\n\024ExecProcedureWithRet\022\036.hba" +
-      "se.pb.ExecProcedureRequest\032\037.hbase.pb.Ex" +
-      "ecProcedureResponse\022V\n\017IsProcedureDone\022 " +
-      ".hbase.pb.IsProcedureDoneRequest\032!.hbase" +
-      ".pb.IsProcedureDoneResponse\022V\n\017ModifyNam" +
-      "espace\022 .hbase.pb.ModifyNamespaceRequest" +
-      "\032!.hbase.pb.ModifyNamespaceResponse\022V\n\017C" +
-      "reateNamespace\022 .hbase.pb.CreateNamespac" +
-      "eRequest\032!.hbase.pb.CreateNamespaceRespo",
-      "nse\022V\n\017DeleteNamespace\022 .hbase.pb.Delete" +
-      "NamespaceRequest\032!.hbase.pb.DeleteNamesp" +
-      "aceResponse\022k\n\026GetNamespaceDescriptor\022\'." +
-      "hbase.pb.GetNamespaceDescriptorRequest\032(" +
-      ".hbase.pb.GetNamespaceDescriptorResponse" +
-      "\022q\n\030ListNamespaceDescriptors\022).hbase.pb." +
-      "ListNamespaceDescriptorsRequest\032*.hbase." +
-      "pb.ListNamespaceDescriptorsResponse\022\206\001\n\037" +
-      "ListTableDescriptorsByNamespace\0220.hbase." +
-      "pb.ListTableDescriptorsByNamespaceReques",
-      "t\0321.hbase.pb.ListTableDescriptorsByNames" +
-      "paceResponse\022t\n\031ListTableNamesByNamespac" +
-      "e\022*.hbase.pb.ListTableNamesByNamespaceRe" +
-      "quest\032+.hbase.pb.ListTableNamesByNamespa" +
-      "ceResponse\022A\n\010SetQuota\022\031.hbase.pb.SetQuo" +
-      "taRequest\032\032.hbase.pb.SetQuotaResponse\022x\n" +
-      "\037getLastMajorCompactionTimestamp\022).hbase" +
-      ".pb.MajorCompactionTimestampRequest\032*.hb" +
-      "ase.pb.MajorCompactionTimestampResponse\022" +
-      "\212\001\n(getLastMajorCompactionTimestampForRe",
-      "gion\0222.hbase.pb.MajorCompactionTimestamp" +
-      "ForRegionRequest\032*.hbase.pb.MajorCompact" +
-      "ionTimestampResponse\022_\n\022getProcedureResu" +
-      "lt\022#.hbase.pb.GetProcedureResultRequest\032" +
-      "$.hbase.pb.GetProcedureResultResponse\022h\n" +
-      "\027getSecurityCapabilities\022%.hbase.pb.Secu" +
-      "rityCapabilitiesRequest\032&.hbase.pb.Secur" +
-      "ityCapabilitiesResponse\022S\n\016AbortProcedur" +
-      "e\022\037.hbase.pb.AbortProcedureRequest\032 .hba" +
-      "se.pb.AbortProcedureResponse\022S\n\016ListProc",
-      "edures\022\037.hbase.pb.ListProceduresRequest\032" +
-      " .hbase.pb.ListProceduresResponse\022Y\n\020Cle" +
-      "arDeadServers\022!.hbase.pb.ClearDeadServer" +
-      "sRequest\032\".hbase.pb.ClearDeadServersResp" +
-      "onse\022S\n\016ListNamespaces\022\037.hbase.pb.ListNa" +
-      "mespacesRequest\032 .hbase.pb.ListNamespace" +
-      "sResponse\022b\n\025SwitchSnapshotCleanup\022#.hba" +
-      "se.pb.SetSnapshotCleanupRequest\032$.hbase." +
-      "pb.SetSnapshotCleanupResponse\022q\n\030IsSnaps" +
-      "hotCleanupEnabled\022).hbase.pb.IsSnapshotC",
-      "leanupEnabledRequest\032*.hbase.pb.IsSnapsh" +
-      "otCleanupEnabledResponse2\247\002\n\021ClientMetaS" +
-      "ervice\022M\n\014GetClusterId\022\035.hbase.pb.GetClu" +
-      "sterIdRequest\032\036.hbase.pb.GetClusterIdRes" +
-      "ponse\022V\n\017GetActiveMaster\022 .hbase.pb.GetA" +
-      "ctiveMasterRequest\032!.hbase.pb.GetActiveM" +
-      "asterResponse\022k\n\026GetMetaRegionLocations\022" +
-      "\'.hbase.pb.GetMetaRegionLocationsRequest" +
-      "\032(.hbase.pb.GetMetaRegionLocationsRespon" +
-      "seBB\n*org.apache.hadoop.hbase.protobuf.g",
-      "eneratedB\014MasterProtosH\001\210\001\001\240\001\001"
+      "\003(\0132\023.hbase.pb.TableName\"?\n\024GetTableStat" +
+      "eRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb." +
+      "TableName\"B\n\025GetTableStateResponse\022)\n\013ta" +
+      "ble_state\030\001 \002(\0132\024.hbase.pb.TableState\"\031\n" +
+      "\027GetClusterStatusRequest\"K\n\030GetClusterSt" +
+      "atusResponse\022/\n\016cluster_status\030\001 \002(\0132\027.h" +
+      "base.pb.ClusterStatus\"\030\n\026IsMasterRunning" +
+      "Request\"4\n\027IsMasterRunningResponse\022\031\n\021is",
+      "_master_running\030\001 \002(\010\"I\n\024ExecProcedureRe" +
+      "quest\0221\n\tprocedure\030\001 \002(\0132\036.hbase.pb.Proc" +
+      "edureDescription\"F\n\025ExecProcedureRespons" +
+      "e\022\030\n\020expected_timeout\030\001 \001(\003\022\023\n\013return_da" +
+      "ta\030\002 \001(\014\"K\n\026IsProcedureDoneRequest\0221\n\tpr" +
+      "ocedure\030\001 \001(\0132\036.hbase.pb.ProcedureDescri" +
+      "ption\"`\n\027IsProcedureDoneResponse\022\023\n\004done" +
+      "\030\001 \001(\010:\005false\0220\n\010snapshot\030\002 \001(\0132\036.hbase." +
+      "pb.ProcedureDescription\",\n\031GetProcedureR" +
+      "esultRequest\022\017\n\007proc_id\030\001 \002(\004\"\371\001\n\032GetPro",
+      "cedureResultResponse\0229\n\005state\030\001 \002(\0162*.hb" +
+      "ase.pb.GetProcedureResultResponse.State\022" +
+      "\022\n\nstart_time\030\002 \001(\004\022\023\n\013last_update\030\003 \001(\004" +
+      "\022\016\n\006result\030\004 \001(\014\0224\n\texception\030\005 \001(\0132!.hb" +
+      "ase.pb.ForeignExceptionMessage\"1\n\005State\022" +
+      "\r\n\tNOT_FOUND\020\000\022\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020" +
+      "\002\"M\n\025AbortProcedureRequest\022\017\n\007proc_id\030\001 " +
+      "\002(\004\022#\n\025mayInterruptIfRunning\030\002 \001(\010:\004true" +
+      "\"6\n\026AbortProcedureResponse\022\034\n\024is_procedu" +
+      "re_aborted\030\001 \002(\010\"\027\n\025ListProceduresReques",
+      "t\"@\n\026ListProceduresResponse\022&\n\tprocedure" +
+      "\030\001 \003(\0132\023.hbase.pb.Procedure\"\315\001\n\017SetQuota" +
+      "Request\022\021\n\tuser_name\030\001 \001(\t\022\022\n\nuser_group" +
+      "\030\002 \001(\t\022\021\n\tnamespace\030\003 \001(\t\022\'\n\ntable_name\030" +
+      "\004 \001(\0132\023.hbase.pb.TableName\022\022\n\nremove_all" +
+      "\030\005 \001(\010\022\026\n\016bypass_globals\030\006 \001(\010\022+\n\010thrott" +
+      "le\030\007 \001(\0132\031.hbase.pb.ThrottleRequest\"\022\n\020S" +
+      "etQuotaResponse\"J\n\037MajorCompactionTimest" +
+      "ampRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.p" +
+      "b.TableName\"U\n(MajorCompactionTimestampF",
+      "orRegionRequest\022)\n\006region\030\001 \002(\0132\031.hbase." +
+      "pb.RegionSpecifier\"@\n MajorCompactionTim" +
+      "estampResponse\022\034\n\024compaction_timestamp\030\001" +
+      " \002(\003\"\035\n\033SecurityCapabilitiesRequest\"\354\001\n\034" +
+      "SecurityCapabilitiesResponse\022G\n\014capabili" +
+      "ties\030\001 \003(\01621.hbase.pb.SecurityCapabiliti" +
+      "esResponse.Capability\"\202\001\n\nCapability\022\031\n\025" +
+      "SIMPLE_AUTHENTICATION\020\000\022\031\n\025SECURE_AUTHEN" +
+      "TICATION\020\001\022\021\n\rAUTHORIZATION\020\002\022\026\n\022CELL_AU" +
+      "THORIZATION\020\003\022\023\n\017CELL_VISIBILITY\020\004\"D\n\027Cl",
+      "earDeadServersRequest\022)\n\013server_name\030\001 \003" +
+      "(\0132\024.hbase.pb.ServerName\"E\n\030ClearDeadSer" +
+      "versResponse\022)\n\013server_name\030\001 \003(\0132\024.hbas" +
+      "e.pb.ServerName\"A\n\031SetSnapshotCleanupReq" +
+      "uest\022\017\n\007enabled\030\001 \002(\010\022\023\n\013synchronous\030\002 \001" +
+      "(\010\";\n\032SetSnapshotCleanupResponse\022\035\n\025prev" +
+      "_snapshot_cleanup\030\001 \002(\010\"!\n\037IsSnapshotCle" +
+      "anupEnabledRequest\"3\n IsSnapshotCleanupE" +
+      "nabledResponse\022\017\n\007enabled\030\001 \002(\010\"\025\n\023GetCl" +
+      "usterIdRequest\"*\n\024GetClusterIdResponse\022\022",
+      "\n\ncluster_id\030\001 \001(\t\"\030\n\026GetActiveMasterReq" +
+      "uest\"D\n\027GetActiveMasterResponse\022)\n\013serve" +
+      "r_name\030\001 \001(\0132\024.hbase.pb.ServerName\"\037\n\035Ge" +
+      "tMetaRegionLocationsRequest\"R\n\036GetMetaRe" +
+      "gionLocationsResponse\0220\n\016meta_locations\030" +
+      "\001 \003(\0132\030.hbase.pb.RegionLocation*(\n\020Maste" +
+      "rSwitchType\022\t\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\363.\n\rMa" +
+      "sterService\022e\n\024GetSchemaAlterStatus\022%.hb" +
+      "ase.pb.GetSchemaAlterStatusRequest\032&.hba" +
+      "se.pb.GetSchemaAlterStatusResponse\022b\n\023Ge",
+      "tTableDescriptors\022$.hbase.pb.GetTableDes" +
+      "criptorsRequest\032%.hbase.pb.GetTableDescr" +
+      "iptorsResponse\022P\n\rGetTableNames\022\036.hbase." +
+      "pb.GetTableNamesRequest\032\037.hbase.pb.GetTa" +
+      "bleNamesResponse\022Y\n\020GetClusterStatus\022!.h" +
+      "base.pb.GetClusterStatusRequest\032\".hbase." +
+      "pb.GetClusterStatusResponse\022V\n\017IsMasterR" +
+      "unning\022 .hbase.pb.IsMasterRunningRequest" +
+      "\032!.hbase.pb.IsMasterRunningResponse\022D\n\tA" +
+      "ddColumn\022\032.hbase.pb.AddColumnRequest\032\033.h",
+      "base.pb.AddColumnResponse\022M\n\014DeleteColum" +
+      "n\022\035.hbase.pb.DeleteColumnRequest\032\036.hbase" +
+      ".pb.DeleteColumnResponse\022M\n\014ModifyColumn" +
+      "\022\035.hbase.pb.ModifyColumnRequest\032\036.hbase." +
+      "pb.ModifyColumnResponse\022G\n\nMoveRegion\022\033." +
+      "hbase.pb.MoveRegionRequest\032\034.hbase.pb.Mo" +
+      "veRegionResponse\022k\n\026DispatchMergingRegio" +
+      "ns\022\'.hbase.pb.DispatchMergingRegionsRequ" +
+      "est\032(.hbase.pb.DispatchMergingRegionsRes" +
+      "ponse\022M\n\014AssignRegion\022\035.hbase.pb.AssignR",
+      "egionRequest\032\036.hbase.pb.AssignRegionResp" +
+      "onse\022S\n\016UnassignRegion\022\037.hbase.pb.Unassi" +
+      "gnRegionRequest\032 .hbase.pb.UnassignRegio" +
+      "nResponse\022P\n\rOfflineRegion\022\036.hbase.pb.Of" +
+      "flineRegionRequest\032\037.hbase.pb.OfflineReg" +
+      "ionResponse\022J\n\013DeleteTable\022\034.hbase.pb.De" +
+      "leteTableRequest\032\035.hbase.pb.DeleteTableR" +
+      "esponse\022P\n\rtruncateTable\022\036.hbase.pb.Trun" +
+      "cateTableRequest\032\037.hbase.pb.TruncateTabl" +
+      "eResponse\022J\n\013EnableTable\022\034.hbase.pb.Enab",
+      "leTableRequest\032\035.hbase.pb.EnableTableRes" +
+      "ponse\022M\n\014DisableTable\022\035.hbase.pb.Disable" +
+      "TableRequest\032\036.hbase.pb.DisableTableResp" +
+      "onse\022J\n\013ModifyTable\022\034.hbase.pb.ModifyTab" +
+      "leRequest\032\035.hbase.pb.ModifyTableResponse" +
+      "\022J\n\013CreateTable\022\034.hbase.pb.CreateTableRe" +
+      "quest\032\035.hbase.pb.CreateTableResponse\022A\n\010" +
+      "Shutdown\022\031.hbase.pb.ShutdownRequest\032\032.hb" +
+      "ase.pb.ShutdownResponse\022G\n\nStopMaster\022\033." +
+      "hbase.pb.StopMasterRequest\032\034.hbase.pb.St",
+      "opMasterResponse\022h\n\031IsMasterInMaintenanc" +
+      "eMode\022$.hbase.pb.IsInMaintenanceModeRequ" +
+      "est\032%.hbase.pb.IsInMaintenanceModeRespon" +
+      "se\022>\n\007Balance\022\030.hbase.pb.BalanceRequest\032" +
+      "\031.hbase.pb.BalanceResponse\022_\n\022SetBalance" +
+      "rRunning\022#.hbase.pb.SetBalancerRunningRe" +
+      "quest\032$.hbase.pb.SetBalancerRunningRespo" +
+      "nse\022\\\n\021IsBalancerEnabled\022\".hbase.pb.IsBa" +
+      "lancerEnabledRequest\032#.hbase.pb.IsBalanc" +
+      "erEnabledResponse\022k\n\026SetSplitOrMergeEnab",
+      "led\022\'.hbase.pb.SetSplitOrMergeEnabledReq" +
+      "uest\032(.hbase.pb.SetSplitOrMergeEnabledRe" +
+      "sponse\022h\n\025IsSplitOrMergeEnabled\022&.hbase." +
+      "pb.IsSplitOrMergeEnabledRequest\032\'.hbase." +
+      "pb.IsSplitOrMergeEnabledResponse\022D\n\tNorm" +
+      "alize\022\032.hbase.pb.NormalizeRequest\032\033.hbas" +
+      "e.pb.NormalizeResponse\022e\n\024SetNormalizerR" +
+      "unning\022%.hbase.pb.SetNormalizerRunningRe" +
+      "quest\032&.hbase.pb.SetNormalizerRunningRes" +
+      "ponse\022b\n\023IsNormalizerEnabled\022$.hbase.pb.",
+      "IsNormalizerEnabledRequest\032%.hbase.pb.Is" +
+      "NormalizerEnabledResponse\022S\n\016RunCatalogS" +
+      "can\022\037.hbase.pb.RunCatalogScanRequest\032 .h" +
+      "base.pb.RunCatalogScanResponse\022e\n\024Enable" +
+      "CatalogJanitor\022%.hbase.pb.EnableCatalogJ" +
+      "anitorRequest\032&.hbase.pb.EnableCatalogJa" +
+      "nitorResponse\022n\n\027IsCatalogJanitorEnabled" +
+      "\022(.hbase.pb.IsCatalogJanitorEnabledReque" +
+      "st\032).hbase.pb.IsCatalogJanitorEnabledRes" +
+      "ponse\022V\n\017RunCleanerChore\022 .hbase.pb.RunC",
+      "leanerChoreRequest\032!.hbase.pb.RunCleaner" +
+      "ChoreResponse\022k\n\026SetCleanerChoreRunning\022" +
+      "\'.hbase.pb.SetCleanerChoreRunningRequest" +
+      "\032(.hbase.pb.SetCleanerChoreRunningRespon" +
+      "se\022h\n\025IsCleanerChoreEnabled\022&.hbase.pb.I" +
+      "sCleanerChoreEnabledRequest\032\'.hbase.pb.I" +
+      "sCleanerChoreEnabledResponse\022^\n\021ExecMast" +
+      "erService\022#.hbase.pb.CoprocessorServiceR" +
+      "equest\032$.hbase.pb.CoprocessorServiceResp" +
+      "onse\022A\n\010Snapshot\022\031.hbase.pb.SnapshotRequ",
+      "est\032\032.hbase.pb.SnapshotResponse\022h\n\025GetCo" +
+      "mpletedSnapshots\022&.hbase.pb.GetCompleted" +
+      "SnapshotsRequest\032\'.hbase.pb.GetCompleted" +
+      "SnapshotsResponse\022S\n\016DeleteSnapshot\022\037.hb" +
+      "ase.pb.DeleteSnapshotRequest\032 .hbase.pb." +
+      "DeleteSnapshotResponse\022S\n\016IsSnapshotDone" +
+      "\022\037.hbase.pb.IsSnapshotDoneRequest\032 .hbas" +
+      "e.pb.IsSnapshotDoneResponse\022V\n\017RestoreSn" +
+      "apshot\022 .hbase.pb.RestoreSnapshotRequest" +
+      "\032!.hbase.pb.RestoreSnapshotResponse\022h\n\025I",
+      "sRestoreSnapshotDone\022&.hbase.pb.IsRestor" +
+      "eSnapshotDoneRequest\032\'.hbase.pb.IsRestor" +
+      "eSnapshotDoneResponse\022P\n\rExecProcedure\022\036" +
+      ".hbase.pb.ExecProcedureRequest\032\037.hbase.p" +
+      "b.ExecProcedureResponse\022W\n\024ExecProcedure" +
+      "WithRet\022\036.hbase.pb.ExecProcedureRequest\032" +
+      "\037.hbase.pb.ExecProcedureResponse\022V\n\017IsPr" +
+      "ocedureDone\022 .hbase.pb.IsProcedureDoneRe" +
+      "quest\032!.hbase.pb.IsProcedureDoneResponse" +
+      "\022V\n\017ModifyNamespace\022 .hbase.pb.ModifyNam",
+      "espaceRequest\032!.hbase.pb.ModifyNamespace" +
+      "Response\022V\n\017CreateNamespace\022 .hbase.pb.C" +
+      "reateNamespaceRequest\032!.hbase.pb.CreateN" +
+      "amespaceResponse\022V\n\017DeleteNamespace\022 .hb" +
+      "ase.pb.DeleteNamespaceRequest\032!.hbase.pb" +
+      ".DeleteNamespaceResponse\022k\n\026GetNamespace" +
+      "Descriptor\022\'.hbase.pb.GetNamespaceDescri" +
+      "ptorRequest\032(.hbase.pb.GetNamespaceDescr" +
+      "iptorResponse\022q\n\030ListNamespaceDescriptor" +
+      "s\022).hbase.pb.ListNamespaceDescriptorsReq",
+      "uest\032*.hbase.pb.ListNamespaceDescriptors" +
+      "Response\022\206\001\n\037ListTableDescriptorsByNames" +
+      "pace\0220.hbase.pb.ListTableDescriptorsByNa" +
+      "mespaceRequest\0321.hbase.pb.ListTableDescr" +
+      "iptorsByNamespaceResponse\022t\n\031ListTableNa" +
+      "mesByNamespace\022*.hbase.pb.ListTableNames" +
+      "ByNamespaceRequest\032+.hbase.pb.ListTableN" +
+      "amesByNamespaceResponse\022A\n\010SetQuota\022\031.hb" +
+      "ase.pb.SetQuotaRequest\032\032.hbase.pb.SetQuo" +
+      "taResponse\022x\n\037getLastMajorCompactionTime",
+      "stamp\022).hbase.pb.MajorCompactionTimestam" +
+      "pRequest\032*.hbase.pb.MajorCompactionTimes" +
+      "tampResponse\022\212\001\n(getLastMajorCompactionT" +
+      "imestampForRegion\0222.hbase.pb.MajorCompac" +
+      "tionTimestampForRegionRequest\032*.hbase.pb" +
+      ".MajorCompactionTimestampResponse\022_\n\022get" +
+      "ProcedureResult\022#.hbase.pb.GetProcedureR" +
+      "esultRequest\032$.hbase.pb.GetProcedureResu" +
+      "ltResponse\022h\n\027getSecurityCapabilities\022%." +
+      "hbase.pb.SecurityCapabilitiesRequest\032&.h",
+      "base.pb.SecurityCapabilitiesResponse\022S\n\016" +
+      "AbortProcedure\022\037.hbase.pb.AbortProcedure" +
+      "Request\032 .hbase.pb.AbortProcedureRespons" +
+      "e\022S\n\016ListProcedures\022\037.hbase.pb.ListProce" +
+      "duresRequest\032 .hbase.pb.ListProceduresRe" +
+      "sponse\022Y\n\020ClearDeadServers\022!.hbase.pb.Cl" +
+      "earDeadServersRequest\032\".hbase.pb.ClearDe" +
+      "adServersResponse\022S\n\016ListNamespaces\022\037.hb" +
+      "ase.pb.ListNamespacesRequest\032 .hbase.pb." +
+      "ListNamespacesResponse\022b\n\025SwitchSnapshot",
+      "Cleanup\022#.hbase.pb.SetSnapshotCleanupReq" +
+      "uest\032$.hbase.pb.SetSnapshotCleanupRespon" +
+      "se\022q\n\030IsSnapshotCleanupEnabled\022).hbase.p" +
+      "b.IsSnapshotCleanupEnabledRequest\032*.hbas" +
+      "e.pb.IsSnapshotCleanupEnabledResponse\022P\n" +
+      "\rGetTableState\022\036.hbase.pb.GetTableStateR" +
+      "equest\032\037.hbase.pb.GetTableStateResponse2" +
+      "\247\002\n\021ClientMetaService\022M\n\014GetClusterId\022\035." +
+      "hbase.pb.GetClusterIdRequest\032\036.hbase.pb." +
+      "GetClusterIdResponse\022V\n\017GetActiveMaster\022",
+      " .hbase.pb.GetActiveMasterRequest\032!.hbas" +
+      "e.pb.GetActiveMasterResponse\022k\n\026GetMetaR" +
+      "egionLocations\022\'.hbase.pb.GetMetaRegionL" +
+      "ocationsRequest\032(.hbase.pb.GetMetaRegion" +
+      "LocationsResponseBB\n*org.apache.hadoop.h" +
+      "base.protobuf.generatedB\014MasterProtosH\001\210" +
+      "\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -75043,200 +76260,212 @@ public final class MasterProtos {
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetTableNamesResponse_descriptor,
               new java.lang.String[] { "TableNames", });
-          internal_static_hbase_pb_GetClusterStatusRequest_descriptor =
+          internal_static_hbase_pb_GetTableStateRequest_descriptor =
             getDescriptor().getMessageTypes().get(96);
+          internal_static_hbase_pb_GetTableStateRequest_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_GetTableStateRequest_descriptor,
+              new java.lang.String[] { "TableName", });
+          internal_static_hbase_pb_GetTableStateResponse_descriptor =
+            getDescriptor().getMessageTypes().get(97);
+          internal_static_hbase_pb_GetTableStateResponse_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_GetTableStateResponse_descriptor,
+              new java.lang.String[] { "TableState", });
+          internal_static_hbase_pb_GetClusterStatusRequest_descriptor =
+            getDescriptor().getMessageTypes().get(98);
           internal_static_hbase_pb_GetClusterStatusRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetClusterStatusRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_GetClusterStatusResponse_descriptor =
-            getDescriptor().getMessageTypes().get(97);
+            getDescriptor().getMessageTypes().get(99);
           internal_static_hbase_pb_GetClusterStatusResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetClusterStatusResponse_descriptor,
               new java.lang.String[] { "ClusterStatus", });
           internal_static_hbase_pb_IsMasterRunningRequest_descriptor =
-            getDescriptor().getMessageTypes().get(98);
+            getDescriptor().getMessageTypes().get(100);
           internal_static_hbase_pb_IsMasterRunningRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsMasterRunningRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_IsMasterRunningResponse_descriptor =
-            getDescriptor().getMessageTypes().get(99);
+            getDescriptor().getMessageTypes().get(101);
           internal_static_hbase_pb_IsMasterRunningResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsMasterRunningResponse_descriptor,
               new java.lang.String[] { "IsMasterRunning", });
           internal_static_hbase_pb_ExecProcedureRequest_descriptor =
-            getDescriptor().getMessageTypes().get(100);
+            getDescriptor().getMessageTypes().get(102);
           internal_static_hbase_pb_ExecProcedureRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ExecProcedureRequest_descriptor,
               new java.lang.String[] { "Procedure", });
           internal_static_hbase_pb_ExecProcedureResponse_descriptor =
-            getDescriptor().getMessageTypes().get(101);
+            getDescriptor().getMessageTypes().get(103);
           internal_static_hbase_pb_ExecProcedureResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ExecProcedureResponse_descriptor,
               new java.lang.String[] { "ExpectedTimeout", "ReturnData", });
           internal_static_hbase_pb_IsProcedureDoneRequest_descriptor =
-            getDescriptor().getMessageTypes().get(102);
+            getDescriptor().getMessageTypes().get(104);
           internal_static_hbase_pb_IsProcedureDoneRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsProcedureDoneRequest_descriptor,
               new java.lang.String[] { "Procedure", });
           internal_static_hbase_pb_IsProcedureDoneResponse_descriptor =
-            getDescriptor().getMessageTypes().get(103);
+            getDescriptor().getMessageTypes().get(105);
           internal_static_hbase_pb_IsProcedureDoneResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsProcedureDoneResponse_descriptor,
               new java.lang.String[] { "Done", "Snapshot", });
           internal_static_hbase_pb_GetProcedureResultRequest_descriptor =
-            getDescriptor().getMessageTypes().get(104);
+            getDescriptor().getMessageTypes().get(106);
           internal_static_hbase_pb_GetProcedureResultRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetProcedureResultRequest_descriptor,
               new java.lang.String[] { "ProcId", });
           internal_static_hbase_pb_GetProcedureResultResponse_descriptor =
-            getDescriptor().getMessageTypes().get(105);
+            getDescriptor().getMessageTypes().get(107);
           internal_static_hbase_pb_GetProcedureResultResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetProcedureResultResponse_descriptor,
               new java.lang.String[] { "State", "StartTime", "LastUpdate", "Result", "Exception", });
           internal_static_hbase_pb_AbortProcedureRequest_descriptor =
-            getDescriptor().getMessageTypes().get(106);
+            getDescriptor().getMessageTypes().get(108);
           internal_static_hbase_pb_AbortProcedureRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_AbortProcedureRequest_descriptor,
               new java.lang.String[] { "ProcId", "MayInterruptIfRunning", });
           internal_static_hbase_pb_AbortProcedureResponse_descriptor =
-            getDescriptor().getMessageTypes().get(107);
+            getDescriptor().getMessageTypes().get(109);
           internal_static_hbase_pb_AbortProcedureResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_AbortProcedureResponse_descriptor,
               new java.lang.String[] { "IsProcedureAborted", });
           internal_static_hbase_pb_ListProceduresRequest_descriptor =
-            getDescriptor().getMessageTypes().get(108);
+            getDescriptor().getMessageTypes().get(110);
           internal_static_hbase_pb_ListProceduresRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ListProceduresRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_ListProceduresResponse_descriptor =
-            getDescriptor().getMessageTypes().get(109);
+            getDescriptor().getMessageTypes().get(111);
           internal_static_hbase_pb_ListProceduresResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ListProceduresResponse_descriptor,
               new java.lang.String[] { "Procedure", });
           internal_static_hbase_pb_SetQuotaRequest_descriptor =
-            getDescriptor().getMessageTypes().get(110);
+            getDescriptor().getMessageTypes().get(112);
           internal_static_hbase_pb_SetQuotaRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SetQuotaRequest_descriptor,
               new java.lang.String[] { "UserName", "UserGroup", "Namespace", "TableName", "RemoveAll", "BypassGlobals", "Throttle", });
           internal_static_hbase_pb_SetQuotaResponse_descriptor =
-            getDescriptor().getMessageTypes().get(111);
+            getDescriptor().getMessageTypes().get(113);
           internal_static_hbase_pb_SetQuotaResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SetQuotaResponse_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor =
-            getDescriptor().getMessageTypes().get(112);
+            getDescriptor().getMessageTypes().get(114);
           internal_static_hbase_pb_MajorCompactionTimestampRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor,
               new java.lang.String[] { "TableName", });
           internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor =
-            getDescriptor().getMessageTypes().get(113);
+            getDescriptor().getMessageTypes().get(115);
           internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor,
               new java.lang.String[] { "Region", });
           internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor =
-            getDescriptor().getMessageTypes().get(114);
+            getDescriptor().getMessageTypes().get(116);
           internal_static_hbase_pb_MajorCompactionTimestampResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor,
               new java.lang.String[] { "CompactionTimestamp", });
           internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor =
-            getDescriptor().getMessageTypes().get(115);
+            getDescriptor().getMessageTypes().get(117);
           internal_static_hbase_pb_SecurityCapabilitiesRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor =
-            getDescriptor().getMessageTypes().get(116);
+            getDescriptor().getMessageTypes().get(118);
           internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor,
               new java.lang.String[] { "Capabilities", });
           internal_static_hbase_pb_ClearDeadServersRequest_descriptor =
-            getDescriptor().getMessageTypes().get(117);
+            getDescriptor().getMessageTypes().get(119);
           internal_static_hbase_pb_ClearDeadServersRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ClearDeadServersRequest_descriptor,
               new java.lang.String[] { "ServerName", });
           internal_static_hbase_pb_ClearDeadServersResponse_descriptor =
-            getDescriptor().getMessageTypes().get(118);
+            getDescriptor().getMessageTypes().get(120);
           internal_static_hbase_pb_ClearDeadServersResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ClearDeadServersResponse_descriptor,
               new java.lang.String[] { "ServerName", });
           internal_static_hbase_pb_SetSnapshotCleanupRequest_descriptor =
-            getDescriptor().getMessageTypes().get(119);
+            getDescriptor().getMessageTypes().get(121);
           internal_static_hbase_pb_SetSnapshotCleanupRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SetSnapshotCleanupRequest_descriptor,
               new java.lang.String[] { "Enabled", "Synchronous", });
           internal_static_hbase_pb_SetSnapshotCleanupResponse_descriptor =
-            getDescriptor().getMessageTypes().get(120);
+            getDescriptor().getMessageTypes().get(122);
           internal_static_hbase_pb_SetSnapshotCleanupResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SetSnapshotCleanupResponse_descriptor,
               new java.lang.String[] { "PrevSnapshotCleanup", });
           internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_descriptor =
-            getDescriptor().getMessageTypes().get(121);
+            getDescriptor().getMessageTypes().get(123);
           internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsSnapshotCleanupEnabledRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_descriptor =
-            getDescriptor().getMessageTypes().get(122);
+            getDescriptor().getMessageTypes().get(124);
           internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsSnapshotCleanupEnabledResponse_descriptor,
               new java.lang.String[] { "Enabled", });
           internal_static_hbase_pb_GetClusterIdRequest_descriptor =
-            getDescriptor().getMessageTypes().get(123);
+            getDescriptor().getMessageTypes().get(125);
           internal_static_hbase_pb_GetClusterIdRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetClusterIdRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_GetClusterIdResponse_descriptor =
-            getDescriptor().getMessageTypes().get(124);
+            getDescriptor().getMessageTypes().get(126);
           internal_static_hbase_pb_GetClusterIdResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetClusterIdResponse_descriptor,
               new java.lang.String[] { "ClusterId", });
           internal_static_hbase_pb_GetActiveMasterRequest_descriptor =
-            getDescriptor().getMessageTypes().get(125);
+            getDescriptor().getMessageTypes().get(127);
           internal_static_hbase_pb_GetActiveMasterRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetActiveMasterRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_GetActiveMasterResponse_descriptor =
-            getDescriptor().getMessageTypes().get(126);
+            getDescriptor().getMessageTypes().get(128);
           internal_static_hbase_pb_GetActiveMasterResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetActiveMasterResponse_descriptor,
               new java.lang.String[] { "ServerName", });
           internal_static_hbase_pb_GetMetaRegionLocationsRequest_descriptor =
-            getDescriptor().getMessageTypes().get(127);
+            getDescriptor().getMessageTypes().get(129);
           internal_static_hbase_pb_GetMetaRegionLocationsRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetMetaRegionLocationsRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_GetMetaRegionLocationsResponse_descriptor =
-            getDescriptor().getMessageTypes().get(128);
+            getDescriptor().getMessageTypes().get(130);
           internal_static_hbase_pb_GetMetaRegionLocationsResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetMetaRegionLocationsResponse_descriptor,
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
index fc181a8..e872f4c 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
@@ -4419,12 +4419,12 @@ public final class ZooKeeperProtos {
     // @@protoc_insertion_point(class_scope:hbase.pb.SplitLogTask)
   }
 
-  public interface TableOrBuilder
+  public interface DeprecatedTableStateOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
 
-    // required .hbase.pb.Table.State state = 1 [default = ENABLED];
+    // required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
     /**
-     * <code>required .hbase.pb.Table.State state = 1 [default = ENABLED];</code>
+     * <code>required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];</code>
      *
      * <pre>
      * This is the table's state.  If no znode for a table,
@@ -4434,7 +4434,7 @@ public final class ZooKeeperProtos {
      */
     boolean hasState();
     /**
-     * <code>required .hbase.pb.Table.State state = 1 [default = ENABLED];</code>
+     * <code>required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];</code>
      *
      * <pre>
      * This is the table's state.  If no znode for a table,
@@ -4442,32 +4442,33 @@ public final class ZooKeeperProtos {
      * for more.
      * </pre>
      */
-    org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State getState();
+    org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State getState();
   }
   /**
-   * Protobuf type {@code hbase.pb.Table}
+   * Protobuf type {@code hbase.pb.DeprecatedTableState}
    *
    * <pre>
    **
    * The znode that holds state of table.
+   * Deprected, table state is stored in table descriptor on HDFS.
    * </pre>
    */
-  public static final class Table extends
+  public static final class DeprecatedTableState extends
       com.google.protobuf.GeneratedMessage
-      implements TableOrBuilder {
-    // Use Table.newBuilder() to construct.
-    private Table(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      implements DeprecatedTableStateOrBuilder {
+    // Use DeprecatedTableState.newBuilder() to construct.
+    private DeprecatedTableState(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
       super(builder);
       this.unknownFields = builder.getUnknownFields();
     }
-    private Table(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+    private DeprecatedTableState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
 
-    private static final Table defaultInstance;
-    public static Table getDefaultInstance() {
+    private static final DeprecatedTableState defaultInstance;
+    public static DeprecatedTableState getDefaultInstance() {
       return defaultInstance;
     }
 
-    public Table getDefaultInstanceForType() {
+    public DeprecatedTableState getDefaultInstanceForType() {
       return defaultInstance;
     }
 
@@ -4477,7 +4478,7 @@ public final class ZooKeeperProtos {
         getUnknownFields() {
       return this.unknownFields;
     }
-    private Table(
+    private DeprecatedTableState(
         com.google.protobuf.CodedInputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
@@ -4502,7 +4503,7 @@ public final class ZooKeeperProtos {
             }
             case 8: {
               int rawValue = input.readEnum();
-              org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.valueOf(rawValue);
+              org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.valueOf(rawValue);
               if (value == null) {
                 unknownFields.mergeVarintField(1, rawValue);
               } else {
@@ -4525,33 +4526,33 @@ public final class ZooKeeperProtos {
     }
     public static final com.google.protobuf.Descriptors.Descriptor
         getDescriptor() {
-      return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Table_descriptor;
+      return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_descriptor;
     }
 
     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
         internalGetFieldAccessorTable() {
-      return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Table_fieldAccessorTable
+      return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_fieldAccessorTable
           .ensureFieldAccessorsInitialized(
-              org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.Builder.class);
+              org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.Builder.class);
     }
 
-    public static com.google.protobuf.Parser<Table> PARSER =
-        new com.google.protobuf.AbstractParser<Table>() {
-      public Table parsePartialFrom(
+    public static com.google.protobuf.Parser<DeprecatedTableState> PARSER =
+        new com.google.protobuf.AbstractParser<DeprecatedTableState>() {
+      public DeprecatedTableState parsePartialFrom(
           com.google.protobuf.CodedInputStream input,
           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
           throws com.google.protobuf.InvalidProtocolBufferException {
-        return new Table(input, extensionRegistry);
+        return new DeprecatedTableState(input, extensionRegistry);
       }
     };
 
     @java.lang.Override
-    public com.google.protobuf.Parser<Table> getParserForType() {
+    public com.google.protobuf.Parser<DeprecatedTableState> getParserForType() {
       return PARSER;
     }
 
     /**
-     * Protobuf enum {@code hbase.pb.Table.State}
+     * Protobuf enum {@code hbase.pb.DeprecatedTableState.State}
      *
      * <pre>
      * Table's current state
@@ -4629,7 +4630,7 @@ public final class ZooKeeperProtos {
       }
       public static final com.google.protobuf.Descriptors.EnumDescriptor
           getDescriptor() {
-        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDescriptor().getEnumTypes().get(0);
+        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.getDescriptor().getEnumTypes().get(0);
       }
 
       private static final State[] VALUES = values();
@@ -4651,15 +4652,15 @@ public final class ZooKeeperProtos {
         this.value = value;
       }
 
-      // @@protoc_insertion_point(enum_scope:hbase.pb.Table.State)
+      // @@protoc_insertion_point(enum_scope:hbase.pb.DeprecatedTableState.State)
     }
 
     private int bitField0_;
-    // required .hbase.pb.Table.State state = 1 [default = ENABLED];
+    // required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
     public static final int STATE_FIELD_NUMBER = 1;
-    private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State state_;
+    private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State state_;
     /**
-     * <code>required .hbase.pb.Table.State state = 1 [default = ENABLED];</code>
+     * <code>required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];</code>
      *
      * <pre>
      * This is the table's state.  If no znode for a table,
@@ -4671,7 +4672,7 @@ public final class ZooKeeperProtos {
       return ((bitField0_ & 0x00000001) == 0x00000001);
     }
     /**
-     * <code>required .hbase.pb.Table.State state = 1 [default = ENABLED];</code>
+     * <code>required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];</code>
      *
      * <pre>
      * This is the table's state.  If no znode for a table,
@@ -4679,12 +4680,12 @@ public final class ZooKeeperProtos {
      * for more.
      * </pre>
      */
-    public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State getState() {
+    public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State getState() {
       return state_;
     }
 
     private void initFields() {
-      state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED;
+      state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.ENABLED;
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -4735,10 +4736,10 @@ public final class ZooKeeperProtos {
       if (obj == this) {
        return true;
       }
-      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table)) {
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState)) {
         return super.equals(obj);
       }
-      org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table) obj;
+      org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState) obj;
 
       boolean result = true;
       result = result && (hasState() == other.hasState());
@@ -4768,53 +4769,53 @@ public final class ZooKeeperProtos {
       return hash;
     }
 
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(
         com.google.protobuf.ByteString data)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(
         com.google.protobuf.ByteString data,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data, extensionRegistry);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(byte[] data)
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(byte[] data)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(
         byte[] data,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data, extensionRegistry);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(java.io.InputStream input)
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(java.io.InputStream input)
         throws java.io.IOException {
       return PARSER.parseFrom(input);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(
         java.io.InputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
       return PARSER.parseFrom(input, extensionRegistry);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseDelimitedFrom(java.io.InputStream input)
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseDelimitedFrom(java.io.InputStream input)
         throws java.io.IOException {
       return PARSER.parseDelimitedFrom(input);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseDelimitedFrom(
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseDelimitedFrom(
         java.io.InputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
       return PARSER.parseDelimitedFrom(input, extensionRegistry);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(
         com.google.protobuf.CodedInputStream input)
         throws java.io.IOException {
       return PARSER.parseFrom(input);
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(
         com.google.protobuf.CodedInputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
@@ -4823,7 +4824,7 @@ public final class ZooKeeperProtos {
 
     public static Builder newBuilder() { return Builder.create(); }
     public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table prototype) {
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState prototype) {
       return newBuilder().mergeFrom(prototype);
     }
     public Builder toBuilder() { return newBuilder(this); }
@@ -4835,29 +4836,30 @@ public final class ZooKeeperProtos {
       return builder;
     }
     /**
-     * Protobuf type {@code hbase.pb.Table}
+     * Protobuf type {@code hbase.pb.DeprecatedTableState}
      *
      * <pre>
      **
      * The znode that holds state of table.
+     * Deprected, table state is stored in table descriptor on HDFS.
      * </pre>
      */
     public static final class Builder extends
         com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableOrBuilder {
+       implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableStateOrBuilder {
       public static final com.google.protobuf.Descriptors.Descriptor
           getDescriptor() {
-        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Table_descriptor;
+        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_descriptor;
       }
 
       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
           internalGetFieldAccessorTable() {
-        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Table_fieldAccessorTable
+        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_fieldAccessorTable
             .ensureFieldAccessorsInitialized(
-                org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.Builder.class);
+                org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.Builder.class);
       }
 
-      // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.newBuilder()
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.newBuilder()
       private Builder() {
         maybeForceBuilderInitialization();
       }
@@ -4877,7 +4879,7 @@ public final class ZooKeeperProtos {
 
       public Builder clear() {
         super.clear();
-        state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED;
+        state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.ENABLED;
         bitField0_ = (bitField0_ & ~0x00000001);
         return this;
       }
@@ -4888,23 +4890,23 @@ public final class ZooKeeperProtos {
 
       public com.google.protobuf.Descriptors.Descriptor
           getDescriptorForType() {
-        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Table_descriptor;
+        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_descriptor;
       }
 
-      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table getDefaultInstanceForType() {
-        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDefaultInstance();
+      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.getDefaultInstance();
       }
 
-      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table build() {
-        org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table result = buildPartial();
+      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState build() {
+        org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState result = buildPartial();
         if (!result.isInitialized()) {
           throw newUninitializedMessageException(result);
         }
         return result;
       }
 
-      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table buildPartial() {
-        org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table(this);
+      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState(this);
         int from_bitField0_ = bitField0_;
         int to_bitField0_ = 0;
         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
@@ -4917,16 +4919,16 @@ public final class ZooKeeperProtos {
       }
 
       public Builder mergeFrom(com.google.protobuf.Message other) {
-        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table) {
-          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table)other);
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState)other);
         } else {
           super.mergeFrom(other);
           return this;
         }
       }
 
-      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table other) {
-        if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDefaultInstance()) return this;
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.getDefaultInstance()) return this;
         if (other.hasState()) {
           setState(other.getState());
         }
@@ -4946,11 +4948,11 @@ public final class ZooKeeperProtos {
           com.google.protobuf.CodedInputStream input,
           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
           throws java.io.IOException {
-        org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parsedMessage = null;
+        org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parsedMessage = null;
         try {
           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table) e.getUnfinishedMessage();
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState) e.getUnfinishedMessage();
           throw e;
         } finally {
           if (parsedMessage != null) {
@@ -4961,10 +4963,10 @@ public final class ZooKeeperProtos {
       }
       private int bitField0_;
 
-      // required .hbase.pb.Table.State state = 1 [default = ENABLED];
-      private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED;
+      // required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
+      private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.ENABLED;
       /**
-       * <code>required .hbase.pb.Table.State state = 1 [default = ENABLED];</code>
+       * <code>required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];</code>
        *
        * <pre>
        * This is the table's state.  If no znode for a table,
@@ -4976,7 +4978,7 @@ public final class ZooKeeperProtos {
         return ((bitField0_ & 0x00000001) == 0x00000001);
       }
       /**
-       * <code>required .hbase.pb.Table.State state = 1 [default = ENABLED];</code>
+       * <code>required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];</code>
        *
        * <pre>
        * This is the table's state.  If no znode for a table,
@@ -4984,11 +4986,11 @@ public final class ZooKeeperProtos {
        * for more.
        * </pre>
        */
-      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State getState() {
+      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State getState() {
         return state_;
       }
       /**
-       * <code>required .hbase.pb.Table.State state = 1 [default = ENABLED];</code>
+       * <code>required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];</code>
        *
        * <pre>
        * This is the table's state.  If no znode for a table,
@@ -4996,7 +4998,7 @@ public final class ZooKeeperProtos {
        * for more.
        * </pre>
        */
-      public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State value) {
+      public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State value) {
         if (value == null) {
           throw new NullPointerException();
         }
@@ -5006,7 +5008,7 @@ public final class ZooKeeperProtos {
         return this;
       }
       /**
-       * <code>required .hbase.pb.Table.State state = 1 [default = ENABLED];</code>
+       * <code>required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];</code>
        *
        * <pre>
        * This is the table's state.  If no znode for a table,
@@ -5016,20 +5018,20 @@ public final class ZooKeeperProtos {
        */
       public Builder clearState() {
         bitField0_ = (bitField0_ & ~0x00000001);
-        state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED;
+        state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.ENABLED;
         onChanged();
         return this;
       }
 
-      // @@protoc_insertion_point(builder_scope:hbase.pb.Table)
+      // @@protoc_insertion_point(builder_scope:hbase.pb.DeprecatedTableState)
     }
 
     static {
-      defaultInstance = new Table(true);
+      defaultInstance = new DeprecatedTableState(true);
       defaultInstance.initFields();
     }
 
-    // @@protoc_insertion_point(class_scope:hbase.pb.Table)
+    // @@protoc_insertion_point(class_scope:hbase.pb.DeprecatedTableState)
   }
 
   public interface TableCFOrBuilder
@@ -10934,10 +10936,10 @@ public final class ZooKeeperProtos {
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_hbase_pb_SplitLogTask_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
-    internal_static_hbase_pb_Table_descriptor;
+    internal_static_hbase_pb_DeprecatedTableState_descriptor;
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
-      internal_static_hbase_pb_Table_fieldAccessorTable;
+      internal_static_hbase_pb_DeprecatedTableState_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_TableCF_descriptor;
   private static
@@ -11001,28 +11003,29 @@ public final class ZooKeeperProtos {
       "\022\016\n\nUNASSIGNED\020\000\022\t\n\005OWNED\020\001\022\014\n\010RESIGNED\020" +
       "\002\022\010\n\004DONE\020\003\022\007\n\003ERR\020\004\">\n\014RecoveryMode\022\013\n\007" +
       "UNKNOWN\020\000\022\021\n\rLOG_SPLITTING\020\001\022\016\n\nLOG_REPL" +
-      "AY\020\002\"w\n\005Table\022-\n\005state\030\001 \002(\0162\025.hbase.pb.",
-      "Table.State:\007ENABLED\"?\n\005State\022\013\n\007ENABLED" +
-      "\020\000\022\014\n\010DISABLED\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENABL" +
-      "ING\020\003\"D\n\007TableCF\022\'\n\ntable_name\030\001 \001(\0132\023.h" +
-      "base.pb.TableName\022\020\n\010families\030\002 \003(\014\"\330\001\n\017" +
-      "ReplicationPeer\022\022\n\nclusterkey\030\001 \002(\t\022\037\n\027r" +
-      "eplicationEndpointImpl\030\002 \001(\t\022&\n\004data\030\003 \003" +
-      "(\0132\030.hbase.pb.BytesBytesPair\022/\n\rconfigur" +
-      "ation\030\004 \003(\0132\030.hbase.pb.NameStringPair\022$\n" +
-      "\ttable_cfs\030\005 \003(\0132\021.hbase.pb.TableCF\022\021\n\tb" +
-      "andwidth\030\006 \001(\003\"g\n\020ReplicationState\022/\n\005st",
-      "ate\030\001 \002(\0162 .hbase.pb.ReplicationState.St" +
-      "ate\"\"\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DISABLED\020\001\"" +
-      "+\n\027ReplicationHLogPosition\022\020\n\010position\030\001" +
-      " \002(\003\"%\n\017ReplicationLock\022\022\n\nlock_owner\030\001 " +
-      "\002(\t\"\252\001\n\tTableLock\022\'\n\ntable_name\030\001 \001(\0132\023." +
-      "hbase.pb.TableName\022(\n\nlock_owner\030\002 \001(\0132\024" +
-      ".hbase.pb.ServerName\022\021\n\tthread_id\030\003 \001(\003\022" +
-      "\021\n\tis_shared\030\004 \001(\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013c" +
-      "reate_time\030\006 \001(\003\"\036\n\013SwitchState\022\017\n\007enabl" +
-      "ed\030\001 \001(\010BE\n*org.apache.hadoop.hbase.prot",
-      "obuf.generatedB\017ZooKeeperProtosH\001\210\001\001\240\001\001"
+      "AY\020\002\"\225\001\n\024DeprecatedTableState\022<\n\005state\030\001",
+      " \002(\0162$.hbase.pb.DeprecatedTableState.Sta" +
+      "te:\007ENABLED\"?\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DIS" +
+      "ABLED\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENABLING\020\003\"D\n\007" +
+      "TableCF\022\'\n\ntable_name\030\001 \001(\0132\023.hbase.pb.T" +
+      "ableName\022\020\n\010families\030\002 \003(\014\"\330\001\n\017Replicati" +
+      "onPeer\022\022\n\nclusterkey\030\001 \002(\t\022\037\n\027replicatio" +
+      "nEndpointImpl\030\002 \001(\t\022&\n\004data\030\003 \003(\0132\030.hbas" +
+      "e.pb.BytesBytesPair\022/\n\rconfiguration\030\004 \003" +
+      "(\0132\030.hbase.pb.NameStringPair\022$\n\ttable_cf" +
+      "s\030\005 \003(\0132\021.hbase.pb.TableCF\022\021\n\tbandwidth\030",
+      "\006 \001(\003\"g\n\020ReplicationState\022/\n\005state\030\001 \002(\016" +
+      "2 .hbase.pb.ReplicationState.State\"\"\n\005St" +
+      "ate\022\013\n\007ENABLED\020\000\022\014\n\010DISABLED\020\001\"+\n\027Replic" +
+      "ationHLogPosition\022\020\n\010position\030\001 \002(\003\"%\n\017R" +
+      "eplicationLock\022\022\n\nlock_owner\030\001 \002(\t\"\252\001\n\tT" +
+      "ableLock\022\'\n\ntable_name\030\001 \001(\0132\023.hbase.pb." +
+      "TableName\022(\n\nlock_owner\030\002 \001(\0132\024.hbase.pb" +
+      ".ServerName\022\021\n\tthread_id\030\003 \001(\003\022\021\n\tis_sha" +
+      "red\030\004 \001(\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013create_tim" +
+      "e\030\006 \001(\003\"\036\n\013SwitchState\022\017\n\007enabled\030\001 \001(\010B",
+      "E\n*org.apache.hadoop.hbase.protobuf.gene" +
+      "ratedB\017ZooKeeperProtosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -11059,11 +11062,11 @@ public final class ZooKeeperProtos {
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SplitLogTask_descriptor,
               new java.lang.String[] { "State", "ServerName", "Mode", });
-          internal_static_hbase_pb_Table_descriptor =
+          internal_static_hbase_pb_DeprecatedTableState_descriptor =
             getDescriptor().getMessageTypes().get(5);
-          internal_static_hbase_pb_Table_fieldAccessorTable = new
+          internal_static_hbase_pb_DeprecatedTableState_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
-              internal_static_hbase_pb_Table_descriptor,
+              internal_static_hbase_pb_DeprecatedTableState_descriptor,
               new java.lang.String[] { "State", });
           internal_static_hbase_pb_TableCF_descriptor =
             getDescriptor().getMessageTypes().get(6);
diff --git a/hbase-protocol/src/main/protobuf/HBase.proto b/hbase-protocol/src/main/protobuf/HBase.proto
index a594ccd..29bec72 100644
--- a/hbase-protocol/src/main/protobuf/HBase.proto
+++ b/hbase-protocol/src/main/protobuf/HBase.proto
@@ -39,6 +39,27 @@ message TableSchema {
   repeated NameStringPair configuration = 4;
 }
 
+/** Denotes state of the table */
+message TableState {
+  // Table's current state
+  enum State {
+    ENABLED = 0;
+    DISABLED = 1;
+    DISABLING = 2;
+    ENABLING = 3;
+  }
+  // This is the table's state.
+  required State state = 1;
+  required TableName table = 2;
+  optional uint64 timestamp = 3;
+}
+
+/** On HDFS representation of table state. */
+message TableDescriptor {
+  required TableSchema schema = 1;
+  optional TableState.State state = 2 [ default = ENABLED ];
+}
+
 /**
  * Column Family Schema
  * Inspired by the rest ColumSchemaMessage
diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto
index 27b5d75..b2fd3f8 100644
--- a/hbase-protocol/src/main/protobuf/Master.proto
+++ b/hbase-protocol/src/main/protobuf/Master.proto
@@ -456,6 +456,14 @@ message GetTableNamesResponse {
   repeated TableName table_names = 1;
 }
 
+message GetTableStateRequest {
+  required TableName table_name = 1;
+}
+
+message GetTableStateResponse {
+  required TableState table_state = 1;
+}
+
 message GetClusterStatusRequest {
 }
 
@@ -901,7 +909,9 @@ service MasterService {
   rpc IsSnapshotCleanupEnabled (IsSnapshotCleanupEnabledRequest)
     returns (IsSnapshotCleanupEnabledResponse);
 
-
+  /** returns table state */
+  rpc GetTableState(GetTableStateRequest)
+    returns(GetTableStateResponse);
 }
 
 /** Request and response to get the clusterID for this cluster */
diff --git a/hbase-protocol/src/main/protobuf/ZooKeeper.proto b/hbase-protocol/src/main/protobuf/ZooKeeper.proto
index 1638bf7..ad740f3 100644
--- a/hbase-protocol/src/main/protobuf/ZooKeeper.proto
+++ b/hbase-protocol/src/main/protobuf/ZooKeeper.proto
@@ -105,8 +105,9 @@ message SplitLogTask {
 
 /**
  * The znode that holds state of table.
+ * Deprected, table state is stored in table descriptor on HDFS.
  */
-message Table {
+message DeprecatedTableState {
   // Table's current state
   enum State {
     ENABLED = 0;
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
index 2b12f81..49f2e3c 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.LoadBalancer;
@@ -54,7 +55,6 @@ import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
 import org.apache.hadoop.hbase.net.Address;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 
 /**
  * Service to support Region Server Grouping (HBase-6721)
@@ -269,8 +269,8 @@ public class RSGroupAdminServer implements RSGroupAdmin {
     }
     for(TableName table: tables) {
       if (master.getAssignmentManager().getTableStateManager().isTableState(table,
-          ZooKeeperProtos.Table.State.DISABLED,
-          ZooKeeperProtos.Table.State.DISABLING)) {
+          TableState.State.DISABLED,
+          TableState.State.DISABLING)) {
         LOG.debug("Skipping move regions because the table" + table + " is disabled.");
         continue;
       }
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
index 6799e69..41a83a5 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -53,7 +53,6 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
@@ -61,11 +60,13 @@ import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.ServerListener;
+import org.apache.hadoop.hbase.master.TableStateManager;
 import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
 import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
 import org.apache.hadoop.hbase.net.Address;
@@ -74,7 +75,6 @@ import org.apache.hadoop.hbase.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
 import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
 import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -646,7 +646,7 @@ public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListene
                     if (sn == null) {
                       found.set(false);
                     } else if (tsm.isTableState(RSGROUP_TABLE_NAME,
-                        ZooKeeperProtos.Table.State.ENABLED)) {
+                        TableState.State.ENABLED)) {
                       try {
                         ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn);
                         ClientProtos.GetRequest request =
@@ -670,7 +670,7 @@ public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListene
                     if (sn == null) {
                       nsFound.set(false);
                     } else if (tsm.isTableState(TableName.NAMESPACE_TABLE_NAME,
-                        ZooKeeperProtos.Table.State.ENABLED)) {
+                        TableState.State.ENABLED)) {
                       try {
                         ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn);
                         ClientProtos.GetRequest request =
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java
index bdb202d..b4c808c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java
@@ -55,12 +55,4 @@ public interface CoordinatedStateManager {
    * @return instance of Server coordinated state manager runs within
    */
   Server getServer();
-
-  /**
-   * Returns implementation of TableStateManager.
-   * @throws InterruptedException if operation is interrupted
-   * @throws CoordinatedStateException if error happens in underlying coordination mechanism
-   */
-  TableStateManager getTableStateManager() throws InterruptedException,
-    CoordinatedStateException;
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java
new file mode 100644
index 0000000..5db0f69
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java
@@ -0,0 +1,161 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.protobuf.InvalidProtocolBufferException;
+/**
+ * Class represents table state on HDFS.
+ */
+@InterfaceAudience.Private
+public class TableDescriptor {
+  private HTableDescriptor hTableDescriptor;
+  private TableState.State tableState;
+
+  /**
+   * Creates TableDescriptor with all fields.
+   * @param hTableDescriptor HTableDescriptor to use
+   * @param tableState table state
+   */
+  public TableDescriptor(HTableDescriptor hTableDescriptor,
+      TableState.State tableState) {
+    this.hTableDescriptor = hTableDescriptor;
+    this.tableState = tableState;
+  }
+
+  /**
+   * Creates TableDescriptor with Enabled table.
+   * @param hTableDescriptor HTableDescriptor to use
+   */
+  @VisibleForTesting
+  public TableDescriptor(HTableDescriptor hTableDescriptor) {
+    this(hTableDescriptor, TableState.State.ENABLED);
+  }
+
+  /**
+   * Associated HTableDescriptor
+   * @return instance of HTableDescriptor
+   */
+  public HTableDescriptor getHTableDescriptor() {
+    return hTableDescriptor;
+  }
+
+  public void setHTableDescriptor(HTableDescriptor hTableDescriptor) {
+    this.hTableDescriptor = hTableDescriptor;
+  }
+
+  public TableState.State getTableState() {
+    return tableState;
+  }
+
+  public void setTableState(TableState.State tableState) {
+    this.tableState = tableState;
+  }
+
+  /**
+   * Convert to PB.
+   */
+  public HBaseProtos.TableDescriptor convert() {
+    return HBaseProtos.TableDescriptor.newBuilder()
+        .setSchema(hTableDescriptor.convert())
+        .setState(tableState.convert())
+        .build();
+  }
+
+  /**
+   * Convert from PB
+   */
+  public static TableDescriptor convert(HBaseProtos.TableDescriptor proto) {
+    HTableDescriptor hTableDescriptor = HTableDescriptor.convert(proto.getSchema());
+    TableState.State state = TableState.State.convert(proto.getState());
+    return new TableDescriptor(hTableDescriptor, state);
+  }
+
+  /**
+   * @return This instance serialized with pb with pb magic prefix
+   * @see #parseFrom(byte[])
+   */
+  public byte [] toByteArray() {
+    return ProtobufUtil.prependPBMagic(convert().toByteArray());
+  }
+
+  /**
+   * @param bytes A pb serialized {@link TableDescriptor} instance with pb magic prefix
+   * @see #toByteArray()
+   */
+  public static TableDescriptor parseFrom(final byte [] bytes)
+      throws DeserializationException, IOException {
+    if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
+      throw new DeserializationException("Expected PB encoded TableDescriptor");
+    }
+    int pblen = ProtobufUtil.lengthOfPBMagic();
+    HBaseProtos.TableDescriptor.Builder builder = HBaseProtos.TableDescriptor.newBuilder();
+    HBaseProtos.TableDescriptor ts;
+    try {
+      ts = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
+    } catch (InvalidProtocolBufferException e) {
+      throw new DeserializationException(e);
+    }
+    return convert(ts);
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+
+    TableDescriptor that = (TableDescriptor) o;
+
+    if (hTableDescriptor != null ?
+        !hTableDescriptor.equals(that.hTableDescriptor) :
+        that.hTableDescriptor != null){
+      return false;
+    }
+    if (tableState != that.tableState) {
+      return false;
+    }
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    int result = hTableDescriptor != null ? hTableDescriptor.hashCode() : 0;
+    result = 31 * result + (tableState != null ? tableState.hashCode() : 0);
+    return result;
+  }
+
+  @Override
+  public String toString() {
+    return "TableDescriptor{" +
+        "hTableDescriptor=" + hTableDescriptor +
+        ", tableState=" + tableState +
+        '}';
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
index 33ae1d5..c7bfd03 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
@@ -37,6 +37,14 @@ public interface TableDescriptors {
   throws IOException;
 
   /**
+   * @param tableName
+   * @return TableDescriptor for tablename
+   * @throws IOException
+   */
+  TableDescriptor getDescriptor(final TableName tableName)
+      throws IOException;
+
+  /**
    * Get Map of all NamespaceDescriptors for a given namespace.
    * @return Map of all descriptors.
    * @throws IOException
@@ -54,6 +62,15 @@ public interface TableDescriptors {
   throws IOException;
 
   /**
+   * Get Map of all TableDescriptors. Populates the descriptor cache as a
+   * side effect.
+   * @return Map of all descriptors.
+   * @throws IOException
+   */
+  Map<String, TableDescriptor> getAllDescriptors()
+      throws IOException;
+
+  /**
    * Add or update descriptor
    * @param htd Descriptor to set into TableDescriptors
    * @throws IOException
@@ -62,6 +79,14 @@ public interface TableDescriptors {
   throws IOException;
 
   /**
+   * Add or update descriptor
+   * @param htd Descriptor to set into TableDescriptors
+   * @throws IOException
+   */
+  void add(final TableDescriptor htd)
+      throws IOException;
+
+  /**
    * @param tablename
    * @return Instance of table descriptor or null if none found.
    * @throws IOException
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableStateManager.java
deleted file mode 100644
index 21c09b8..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableStateManager.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.InterruptedIOException;
-import java.util.Set;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
-
-/**
- * Helper class for table state management for operations running inside
- * RegionServer or HMaster.
- * Depending on implementation, fetches information from HBase system table,
- * local data store, ZooKeeper ensemble or somewhere else.
- * Code running on client side (with no coordinated state context) shall instead use
- * {@link org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader}
- */
-@InterfaceAudience.Private
-public interface TableStateManager {
-
-  /**
-   * Sets the table into desired state. Fails silently if the table is already in this state.
-   * @param tableName table to process
-   * @param state new state of this table
-   * @throws CoordinatedStateException if error happened when trying to set table state
-   */
-  void setTableState(TableName tableName, ZooKeeperProtos.Table.State state)
-    throws CoordinatedStateException;
-
-  /**
-   * Sets the specified table into the newState, but only if the table is already in
-   * one of the possibleCurrentStates (otherwise no operation is performed).
-   * @param tableName table to process
-   * @param newState new state for the table
-   * @param states table should be in one of these states for the operation
-   *                              to be performed
-   * @throws CoordinatedStateException if error happened while performing operation
-   * @return true if operation succeeded, false otherwise
-   */
-  boolean setTableStateIfInStates(TableName tableName, ZooKeeperProtos.Table.State newState,
-                                  ZooKeeperProtos.Table.State... states)
-    throws CoordinatedStateException;
-
-  /**
-   * Sets the specified table into the newState, but only if the table is NOT in
-   * one of the possibleCurrentStates (otherwise no operation is performed).
-   * @param tableName table to process
-   * @param newState new state for the table
-   * @param states table should NOT be in one of these states for the operation
-   *                              to be performed
-   * @throws CoordinatedStateException if error happened while performing operation
-   * @return true if operation succeeded, false otherwise
-   */
-  boolean setTableStateIfNotInStates(TableName tableName, ZooKeeperProtos.Table.State newState,
-                                     ZooKeeperProtos.Table.State... states)
-    throws CoordinatedStateException;
-
-  /**
-   * @return true if the table is in any one of the listed states, false otherwise.
-   */
-  boolean isTableState(TableName tableName, ZooKeeperProtos.Table.State... states);
-
-  /**
-   * @return true if the table is in any one of the listed states, false otherwise.
-   */
-  boolean isTableState(TableName tableName, boolean checkSource,
-      ZooKeeperProtos.Table.State... states);
-
-  /**
-   * Mark table as deleted. Fails silently if the table is not currently marked as disabled.
-   * @param tableName table to be deleted
-   * @throws CoordinatedStateException if error happened while performing operation
-   */
-  void setDeletedTable(TableName tableName) throws CoordinatedStateException;
-
-  /**
-   * Checks if table is present.
-   *
-   * @param tableName table we're checking
-   * @return true if the table is present, false otherwise
-   */
-  boolean isTablePresent(TableName tableName);
-
-  /**
-   * @return set of tables which are in any one of the listed states, empty Set if none
-   */
-  Set<TableName> getTablesInStates(ZooKeeperProtos.Table.State... states)
-    throws InterruptedIOException, CoordinatedStateException;
-
-  /**
-   * If the table is found in the given state the in-memory state is removed. This
-   * helps in cases where CreateTable is to be retried by the client in case of
-   * failures.  If deletePermanentState is true - the flag kept permanently is
-   * also reset.
-   *
-   * @param tableName table we're working on
-   * @param states if table isn't in any one of these states, operation aborts
-   * @param deletePermanentState if true, reset the permanent flag
-   * @throws CoordinatedStateException if error happened in underlying coordination engine
-   */
-  void checkAndRemoveTableState(TableName tableName, ZooKeeperProtos.Table.State states,
-                            boolean deletePermanentState)
-    throws CoordinatedStateException;
-}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java
index f79e5d8..03762ab 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java
@@ -18,10 +18,8 @@
 package org.apache.hadoop.hbase.coordination;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.TableStateManager;
 
 /**
  * Base class for {@link org.apache.hadoop.hbase.CoordinatedStateManager} implementations.
@@ -49,9 +47,6 @@ public abstract class BaseCoordinatedStateManager implements CoordinatedStateMan
     return null;
   }
 
-  @Override
-  public abstract TableStateManager getTableStateManager() throws InterruptedException,
-    CoordinatedStateException;
   /**
    * Method to retrieve coordination for split log worker
    */
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java
index 2f739be..7222b0f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java
@@ -20,13 +20,9 @@ package org.apache.hadoop.hbase.coordination;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.TableStateManager;
-import org.apache.hadoop.hbase.zookeeper.ZKTableStateManager;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.apache.zookeeper.KeeperException;
 
 /**
  * ZooKeeper-based implementation of {@link org.apache.hadoop.hbase.CoordinatedStateManager}.
@@ -61,16 +57,6 @@ public class ZkCoordinatedStateManager extends BaseCoordinatedStateManager {
   }
 
   @Override
-  public TableStateManager getTableStateManager() throws InterruptedException,
-      CoordinatedStateException {
-    try {
-      return new ZKTableStateManager(server.getZooKeeper());
-    } catch (KeeperException e) {
-      throw new CoordinatedStateException(e);
-    }
-  }
-
-  @Override
   public SplitLogWorkerCoordination getSplitLogWorkerCoordination() {
     return splitLogWorkerCoordination;
     }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkOpenRegionCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkOpenRegionCoordination.java
index 812bbe2..b54740a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkOpenRegionCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkOpenRegionCoordination.java
@@ -23,11 +23,11 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
@@ -309,7 +309,7 @@ public class ZkOpenRegionCoordination implements OpenRegionCoordination {
     }
     if (!openedNodeDeleted) {
       if (assignmentManager.getTableStateManager().isTableState(regionInfo.getTable(),
-          ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
+          TableState.State.DISABLED, TableState.State.DISABLING)) {
         debugLog(regionInfo, "Opened region "
           + regionInfo.getShortNameToLog() + " but "
           + "this table is disabled, triggering close of region");
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index de4edbb..842ce85 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hbase.master;
 
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -64,7 +63,6 @@ import org.apache.hadoop.hbase.RegionTransition;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Admin.MasterSwitchType;
@@ -77,6 +75,7 @@ import org.apache.hadoop.hbase.coordination.SplitTransactionCoordination.SplitTr
 import org.apache.hadoop.hbase.coordination.ZkOpenRegionCoordination;
 import org.apache.hadoop.hbase.coordination.ZkRegionMergeCoordination;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.executor.ExecutorService;
@@ -92,12 +91,12 @@ import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
 import org.apache.hadoop.hbase.master.handler.OpenedRegionHandler;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.regionserver.RegionAlreadyInTransitionException;
 import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
 import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
 import org.apache.hadoop.hbase.util.ConfigUtil;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.ExceptionUtil;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.KeyLocker;
 import org.apache.hadoop.hbase.util.Pair;
@@ -286,14 +285,11 @@ public class AssignmentManager extends ZooKeeperListener {
    * @param service Executor service
    * @param metricsMaster metrics manager
    * @param tableLockManager TableLock manager
-   * @throws KeeperException
-   * @throws IOException
    */
   public AssignmentManager(MasterServices server, ServerManager serverManager,
       final LoadBalancer balancer,
       final ExecutorService service, MetricsMaster metricsMaster,
-      final TableLockManager tableLockManager) throws KeeperException,
-        IOException, CoordinatedStateException {
+      final TableLockManager tableLockManager, final TableStateManager tableStateManager) {
     super(server.getZooKeeper());
     this.server = server;
     this.serverManager = serverManager;
@@ -306,15 +302,9 @@ public class AssignmentManager extends ZooKeeperListener {
     this.shouldAssignRegionsWithFavoredNodes = conf.getClass(
            HConstants.HBASE_MASTER_LOADBALANCER_CLASS, Object.class).equals(
            FavoredNodeLoadBalancer.class);
-    try {
-      if (server.getCoordinatedStateManager() != null) {
-        this.tableStateManager = server.getCoordinatedStateManager().getTableStateManager();
-      } else {
-        this.tableStateManager = null;
-      }
-    } catch (InterruptedException e) {
-      throw new InterruptedIOException();
-    }
+
+    this.tableStateManager = tableStateManager;
+
     // This is the max attempts, not retries, so it should be at least 1.
     this.maximumAttempts = Math.max(1,
       this.server.getConfiguration().getInt("hbase.assignment.maximum.attempts", 10));
@@ -392,7 +382,7 @@ public class AssignmentManager extends ZooKeeperListener {
   }
 
   /**
-   * @return Instance of ZKTableStateManager.
+   * @return Instance of TableStateManager.
    */
   public TableStateManager getTableStateManager() {
     // These are 'expensive' to make involving trip to zk ensemble so allow
@@ -516,10 +506,9 @@ public class AssignmentManager extends ZooKeeperListener {
    * @throws IOException
    * @throws KeeperException
    * @throws InterruptedException
-   * @throws CoordinatedStateException
    */
   void joinCluster() throws IOException,
-      KeeperException, InterruptedException, CoordinatedStateException {
+      KeeperException, CoordinatedStateException {
     long startTime = System.currentTimeMillis();
     // Concurrency note: In the below the accesses on regionsInTransition are
     // outside of a synchronization block where usually all accesses to RIT are
@@ -560,7 +549,7 @@ public class AssignmentManager extends ZooKeeperListener {
    * @throws InterruptedException
    */
   boolean processDeadServersAndRegionsInTransition(final Set<ServerName> deadServers)
-  throws KeeperException, IOException, InterruptedException, CoordinatedStateException {
+      throws KeeperException, IOException {
     List<String> nodes = ZKUtil.listChildrenNoWatch(watcher, watcher.assignmentZNode);
 
     if (useZKForAssignment && nodes == null) {
@@ -568,7 +557,6 @@ public class AssignmentManager extends ZooKeeperListener {
       server.abort(errorMessage, new IOException(errorMessage));
       return true; // Doesn't matter in this case
     }
-
     boolean failover = !serverManager.getDeadServers().isEmpty();
     if (failover) {
       // This may not be a failover actually, especially if meta is on this master.
@@ -689,7 +677,11 @@ public class AssignmentManager extends ZooKeeperListener {
     if (!failover) {
       // Fresh cluster startup.
       LOG.info("Clean cluster startup. Assigning user regions");
-      assignAllUserRegions(allRegions);
+      try {
+        assignAllUserRegions(allRegions);
+      } catch (InterruptedException ie) {
+        ExceptionUtil.rethrowIfInterrupt(ie);
+      }
     }
     // unassign replicas of the split parents and the merged regions
     // the daughter replicas are opened in assignAllUserRegions if it was
@@ -707,11 +699,10 @@ public class AssignmentManager extends ZooKeeperListener {
    * locations are returned.
    */
   private Map<HRegionInfo, ServerName> getUserRegionsToAssign()
-      throws InterruptedIOException, CoordinatedStateException {
+      throws IOException {
     Set<TableName> disabledOrDisablingOrEnabling =
-        tableStateManager.getTablesInStates(ZooKeeperProtos.Table.State.DISABLED,
-          ZooKeeperProtos.Table.State.DISABLING, ZooKeeperProtos.Table.State.ENABLING);
-
+        tableStateManager.getTablesInStates(TableState.State.DISABLED,
+          TableState.State.DISABLING, TableState.State.ENABLING);
     // Clean re/start, mark all user regions closed before reassignment
     return regionStates.closeAllUserRegions(disabledOrDisablingOrEnabling);
   }
@@ -739,7 +730,7 @@ public class AssignmentManager extends ZooKeeperListener {
         try {
           // Assign the regions
           assignAllUserRegions(getUserRegionsToAssign());
-        } catch (CoordinatedStateException | IOException | InterruptedException e) {
+        } catch (IOException | InterruptedException e) {
           LOG.error("Exception occured while assigning user regions.", e);
         }
       };
@@ -1482,7 +1473,7 @@ public class AssignmentManager extends ZooKeeperListener {
             LOG.debug("Znode " + regionNameStr + " deleted, state: " + rs);
 
             boolean disabled = getTableStateManager().isTableState(regionInfo.getTable(),
-                ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING);
+                TableState.State.DISABLED, TableState.State.DISABLING);
 
             ServerName serverName = rs.getServerName();
             if (serverManager.isServerOnline(serverName)) {
@@ -2269,7 +2260,7 @@ public class AssignmentManager extends ZooKeeperListener {
             // will not be in ENABLING or ENABLED state.
             TableName tableName = region.getTable();
             if (!tableStateManager.isTableState(tableName,
-              ZooKeeperProtos.Table.State.ENABLED, ZooKeeperProtos.Table.State.ENABLING)) {
+              TableState.State.ENABLED, TableState.State.ENABLING)) {
               LOG.debug("Setting table " + tableName + " to ENABLED state.");
               setEnabledTable(tableName);
             }
@@ -2495,8 +2486,8 @@ public class AssignmentManager extends ZooKeeperListener {
 
   private boolean isDisabledorDisablingRegionInRIT(final HRegionInfo region) {
     if (this.tableStateManager.isTableState(region.getTable(),
-        ZooKeeperProtos.Table.State.DISABLED,
-        ZooKeeperProtos.Table.State.DISABLING) || replicasToClose.contains(region)) {
+            TableState.State.DISABLED,
+            TableState.State.DISABLING) || replicasToClose.contains(region)) {
       LOG.info("Table " + region.getTable() + " is disabled or disabling;"
         + " skipping assign of " + region.getRegionNameAsString());
       offlineDisabledRegion(region);
@@ -3127,7 +3118,7 @@ public class AssignmentManager extends ZooKeeperListener {
     for (HRegionInfo hri : regionsFromMetaScan) {
       TableName tableName = hri.getTable();
       if (!tableStateManager.isTableState(tableName,
-          ZooKeeperProtos.Table.State.ENABLED)) {
+              TableState.State.ENABLED)) {
         setEnabledTable(tableName);
       }
     }
@@ -3194,14 +3185,14 @@ public class AssignmentManager extends ZooKeeperListener {
    * @throws IOException
    */
   Set<ServerName> rebuildUserRegions() throws
-      IOException, KeeperException, CoordinatedStateException {
+          IOException, KeeperException {
     Set<TableName> disabledOrEnablingTables = tableStateManager.getTablesInStates(
-      ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.ENABLING);
+            TableState.State.DISABLED, TableState.State.ENABLING);
 
     Set<TableName> disabledOrDisablingOrEnabling = tableStateManager.getTablesInStates(
-      ZooKeeperProtos.Table.State.DISABLED,
-      ZooKeeperProtos.Table.State.DISABLING,
-      ZooKeeperProtos.Table.State.ENABLING);
+            TableState.State.DISABLED,
+            TableState.State.DISABLING,
+            TableState.State.ENABLING);
 
     // Region assignment from META
     List<Result> results = MetaTableAccessor.fullScanOfMeta(server.getConnection());
@@ -3253,7 +3244,7 @@ public class AssignmentManager extends ZooKeeperListener {
         ServerName lastHost = hrl.getServerName();
         ServerName regionLocation = RegionStateStore.getRegionServer(result, replicaId);
         if (tableStateManager.isTableState(regionInfo.getTable(),
-             ZooKeeperProtos.Table.State.DISABLED)) {
+             TableState.State.DISABLED)) {
           // force region to forget it hosts for disabled/disabling tables.
           // see HBASE-13326
           lastHost = null;
@@ -3283,7 +3274,7 @@ public class AssignmentManager extends ZooKeeperListener {
         // this will be used in rolling restarts
         if (!disabledOrDisablingOrEnabling.contains(tableName)
           && !getTableStateManager().isTableState(tableName,
-            ZooKeeperProtos.Table.State.ENABLED)) {
+                TableState.State.ENABLED)) {
           setEnabledTable(tableName);
         }
       }
@@ -3300,9 +3291,9 @@ public class AssignmentManager extends ZooKeeperListener {
    * @throws IOException
    */
   private void recoverTableInDisablingState()
-      throws KeeperException, IOException, CoordinatedStateException {
+          throws KeeperException, IOException {
     Set<TableName> disablingTables =
-      tableStateManager.getTablesInStates(ZooKeeperProtos.Table.State.DISABLING);
+            tableStateManager.getTablesInStates(TableState.State.DISABLING);
     if (disablingTables.size() != 0) {
       for (TableName tableName : disablingTables) {
         // Recover by calling DisableTableHandler
@@ -3324,9 +3315,9 @@ public class AssignmentManager extends ZooKeeperListener {
    * @throws IOException
    */
   private void recoverTableInEnablingState()
-      throws KeeperException, IOException, CoordinatedStateException {
+          throws KeeperException, IOException {
     Set<TableName> enablingTables = tableStateManager.
-      getTablesInStates(ZooKeeperProtos.Table.State.ENABLING);
+            getTablesInStates(TableState.State.ENABLING);
     if (enablingTables.size() != 0) {
       for (TableName tableName : enablingTables) {
         // Recover by calling EnableTableHandler
@@ -3398,9 +3389,9 @@ public class AssignmentManager extends ZooKeeperListener {
         LOG.info("Server " + serverName + " isn't online. SSH will handle this");
         continue;
       }
+      RegionState.State state = regionState.getState();
       HRegionInfo regionInfo = regionState.getRegion();
-      State state = regionState.getState();
-
+      LOG.info("Processing " + regionState);
       switch (state) {
       case CLOSED:
         invokeAssign(regionInfo);
@@ -3790,7 +3781,7 @@ public class AssignmentManager extends ZooKeeperListener {
             server.abort("Unexpected ZK exception deleting node " + hri, ke);
           }
           if (tableStateManager.isTableState(hri.getTable(),
-              ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
+                  TableState.State.DISABLED, TableState.State.DISABLING)) {
             regionStates.regionOffline(hri);
             it.remove();
             continue;
@@ -3813,7 +3804,7 @@ public class AssignmentManager extends ZooKeeperListener {
     HRegionInfo hri = plan.getRegionInfo();
     TableName tableName = hri.getTable();
     if (tableStateManager.isTableState(tableName,
-      ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
+            TableState.State.DISABLED, TableState.State.DISABLING)) {
       LOG.info("Ignored moving region of disabling/disabled table "
         + tableName);
       return;
@@ -3861,8 +3852,8 @@ public class AssignmentManager extends ZooKeeperListener {
   protected void setEnabledTable(TableName tableName) {
     try {
       this.tableStateManager.setTableState(tableName,
-        ZooKeeperProtos.Table.State.ENABLED);
-    } catch (CoordinatedStateException e) {
+              TableState.State.ENABLED);
+    } catch (IOException e) {
       // here we can abort as it is the start up flow
       String errorMsg = "Unable to ensure that the table " + tableName
           + " will be" + " enabled because of a ZooKeeper issue";
@@ -3967,8 +3958,8 @@ public class AssignmentManager extends ZooKeeperListener {
         // When there are more than one region server a new RS is selected as the
         // destination and the same is updated in the region plan. (HBASE-5546)
         if (getTableStateManager().isTableState(hri.getTable(),
-            ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING) ||
-            replicasToClose.contains(hri)) {
+                TableState.State.DISABLED, TableState.State.DISABLING) ||
+                replicasToClose.contains(hri)) {
           offlineDisabledRegion(hri);
           return;
         }
@@ -3996,15 +3987,14 @@ public class AssignmentManager extends ZooKeeperListener {
     // reset the count, if any
     failedOpenTracker.remove(hri.getEncodedName());
     if (getTableStateManager().isTableState(hri.getTable(),
-        ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
+        TableState.State.DISABLED, TableState.State.DISABLING)) {
       invokeUnAssign(hri);
     }
   }
 
   private void onRegionClosed(final HRegionInfo hri) {
-    if (getTableStateManager().isTableState(hri.getTable(),
-        ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING) ||
-        replicasToClose.contains(hri)) {
+    if (getTableStateManager().isTableState(hri.getTable(), TableState.State.DISABLED,
+        TableState.State.DISABLING) || replicasToClose.contains(hri)) {
       offlineDisabledRegion(hri);
       return;
     }
@@ -4050,7 +4040,7 @@ public class AssignmentManager extends ZooKeeperListener {
     }
 
     if (getTableStateManager().isTableState(p.getTable(),
-        ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
+        TableState.State.DISABLED, TableState.State.DISABLING)) {
       invokeUnAssign(p);
     }
     return null;
@@ -4076,7 +4066,7 @@ public class AssignmentManager extends ZooKeeperListener {
 
       // User could disable the table before master knows the new region.
       if (getTableStateManager().isTableState(p.getTable(),
-          ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
+          TableState.State.DISABLED, TableState.State.DISABLING)) {
         invokeUnAssign(a);
         invokeUnAssign(b);
       } else {
@@ -4130,7 +4120,7 @@ public class AssignmentManager extends ZooKeeperListener {
 
       // User could disable the table before master knows the new region.
       if (getTableStateManager().isTableState(p.getTable(),
-          ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
+          TableState.State.DISABLED, TableState.State.DISABLING)) {
         invokeUnAssign(p);
       } else {
         Callable<Object> mergeReplicasCallable = new Callable<Object>() {
@@ -4170,7 +4160,7 @@ public class AssignmentManager extends ZooKeeperListener {
     }
 
     if (getTableStateManager().isTableState(p.getTable(),
-        ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
+        TableState.State.DISABLED, TableState.State.DISABLING)) {
       invokeUnAssign(a);
       invokeUnAssign(b);
     }
@@ -4291,7 +4281,7 @@ public class AssignmentManager extends ZooKeeperListener {
 
       // User could disable the table before master knows the new region.
       if (tableStateManager.isTableState(p.getTable(),
-          ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
+          TableState.State.DISABLED, TableState.State.DISABLING)) {
         unassign(p);
       }
     }
@@ -4421,7 +4411,7 @@ public class AssignmentManager extends ZooKeeperListener {
 
       // User could disable the table before master knows the new region.
       if (tableStateManager.isTableState(p.getTable(),
-          ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
+          TableState.State.DISABLED, TableState.State.DISABLING)) {
         unassign(hri_a);
         unassign(hri_b);
       }
@@ -4692,7 +4682,7 @@ public class AssignmentManager extends ZooKeeperListener {
         errorMsg = hri.getShortNameToLog()
           + " is not pending close on " + serverName;
       } else {
-        onRegionClosed(hri);
+          onRegionClosed(hri);
       }
       break;
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 71c79be..1e03d44 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -84,6 +84,7 @@ import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
 import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.executor.ExecutorType;
@@ -139,7 +140,6 @@ import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
 import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
 import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
@@ -167,6 +167,7 @@ import org.apache.hadoop.hbase.util.ModifyRegionUtils;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.util.VersionInfo;
+import org.apache.hadoop.hbase.util.ZKDataMigrator;
 import org.apache.hadoop.hbase.zookeeper.DrainingServerTracker;
 import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
@@ -385,6 +386,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
   private long splitPlanCount;
   private long mergePlanCount;
 
+  // handle table states
+  private TableStateManager tableStateManager;
+
   /** flag used in test cases in order to simulate RS failures during master initialization */
   private volatile boolean initializationBeforeMetaAssignment = false;
 
@@ -694,9 +698,8 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
 
     this.assignmentManager = new AssignmentManager(this, serverManager,
       this.balancer, this.service, this.metricsMaster,
-      this.tableLockManager);
+      this.tableLockManager, tableStateManager);
     zooKeeper.registerListenerFirst(assignmentManager);
-
     this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
         this.serverManager);
     this.regionServerTracker.start();
@@ -728,6 +731,14 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     this.mpmHost.register(new MasterFlushTableProcedureManager());
     this.mpmHost.loadProcedures(conf);
     this.mpmHost.initialize(this, this.metricsMaster);
+
+    // migrating existent table state from zk
+    for (Map.Entry<TableName, TableState.State> entry : ZKDataMigrator
+        .queryForTableStates(getZooKeeper()).entrySet()) {
+      LOG.info("Converting state from zk to new states:" + entry);
+      tableStateManager.setTableState(entry.getKey(), entry.getValue());
+    }
+    ZKUtil.deleteChildrenRecursively(getZooKeeper(), getZooKeeper().tableZNode);
   }
 
   /**
@@ -792,6 +803,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     // Invalidate all write locks held previously
     this.tableLockManager.reapWriteLocks();
 
+    this.tableStateManager = new TableStateManager(this);
+    this.tableStateManager.start();
+
     status.setStatus("Initializing ZK system trackers");
     initializeZKBasedSystemTrackers();
 
@@ -1186,8 +1200,8 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
   }
 
   private void enableMeta(TableName metaTableName) {
-    if (!this.assignmentManager.getTableStateManager().isTableState(metaTableName,
-        ZooKeeperProtos.Table.State.ENABLED)) {
+    if (!this.tableStateManager.isTableState(metaTableName,
+            TableState.State.ENABLED)) {
       this.assignmentManager.setEnabledTable(metaTableName);
     }
   }
@@ -1231,6 +1245,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     return tableNamespaceManager;
   }
 
+  @Override
+  public TableStateManager getTableStateManager() {
+    return tableStateManager;
+  }
+
   /*
    * Start up all services. If any of these threads gets an unhandled exception
    * then they just die with a logged message.  This should be fine because
@@ -1663,7 +1682,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
       // Don't run the normalizer concurrently
       List<TableName> allEnabledTables = new ArrayList<>(
         this.assignmentManager.getTableStateManager().getTablesInStates(
-          ZooKeeperProtos.Table.State.ENABLED));
+          TableState.State.ENABLED));
 
       Collections.shuffle(allEnabledTables);
 
@@ -2508,7 +2527,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
       throw new TableNotFoundException(tableName);
     }
     if (!getAssignmentManager().getTableStateManager().
-        isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED)) {
+        isTableState(tableName, TableState.State.DISABLED)) {
       throw new TableNotDisabledException(tableName);
     }
   }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 6ca0ad5..c5e8101 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -547,7 +547,6 @@ public class MasterFileSystem {
       fsd.createTableDescriptor(
           new HTableDescriptor(fsd.get(TableName.META_TABLE_NAME)));
     }
-
     return rd;
   }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 07ec9fc..4af4560 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -40,10 +40,12 @@ import org.apache.hadoop.hbase.ProcedureInfo;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.UnknownRegionException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.MergeRegionException;
 import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
 import org.apache.hadoop.hbase.ipc.PriorityFunction;
@@ -952,13 +954,11 @@ public class MasterRpcServices extends RSRpcServices
   public GetTableNamesResponse getTableNames(RpcController controller,
       GetTableNamesRequest req) throws ServiceException {
     try {
-      master.checkInitialized();
-
+      master.checkServiceStarted();
       final String regex = req.hasRegex() ? req.getRegex() : null;
       final String namespace = req.hasNamespace() ? req.getNamespace() : null;
       List<TableName> tableNames = master.listTableNames(namespace, regex,
           req.getIncludeSysTables());
-
       GetTableNamesResponse.Builder builder = GetTableNamesResponse.newBuilder();
       if (tableNames != null && tableNames.size() > 0) {
         // Add the table names to the response
@@ -973,6 +973,26 @@ public class MasterRpcServices extends RSRpcServices
   }
 
   @Override
+  public MasterProtos.GetTableStateResponse getTableState(RpcController controller,
+      MasterProtos.GetTableStateRequest request) throws ServiceException {
+    try {
+      master.checkServiceStarted();
+      TableName tableName = ProtobufUtil.toTableName(request.getTableName());
+      TableState.State state = master.getTableStateManager()
+              .getTableState(tableName);
+      if (state == null) {
+        throw new TableNotFoundException(tableName);
+      }
+      MasterProtos.GetTableStateResponse.Builder builder =
+              MasterProtos.GetTableStateResponse.newBuilder();
+      builder.setTableState(new TableState(tableName, state).convert());
+      return builder.build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
   public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled(RpcController c,
       IsCatalogJanitorEnabledRequest req) throws ServiceException {
     return IsCatalogJanitorEnabledResponse.newBuilder().setValue(
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index be6fb12..d20b764 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -84,6 +84,11 @@ public interface MasterServices extends Server {
   TableLockManager getTableLockManager();
 
   /**
+   * @return Master's instance of {@link TableStateManager}
+   */
+  TableStateManager getTableStateManager();
+
+  /**
    * @return Master's instance of {@link MasterCoprocessorHost}
    */
   MasterCoprocessorHost getMasterCoprocessorHost();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
index e31868e..b8b49d7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
@@ -31,6 +31,8 @@ import java.util.SortedSet;
 import java.util.TreeMap;
 import java.util.TreeSet;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -42,14 +44,13 @@ import org.apache.hadoop.hbase.RegionTransition;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.master.RegionState.State;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ConfigUtil;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -59,9 +60,6 @@ import org.apache.hadoop.hbase.zookeeper.ZKAssign;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-
 /**
  * Region state accountant. It holds the states of all regions in the memory.
  * In normal scenario, it should match the meta table and the true region states.
@@ -720,7 +718,7 @@ public class RegionStates {
       if (oldServerName != null && serverHoldings.containsKey(oldServerName)) {
         if (newState == State.MERGED || newState == State.SPLIT
             || hri.isMetaRegion() || tableStateManager.isTableState(hri.getTable(),
-              ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
+            TableState.State.DISABLED, TableState.State.DISABLING)) {
           // Offline the region only if it's merged/split, or the table is disabled/disabling.
           // Otherwise, offline it from this server only when it is online on a different server.
           LOG.info("Offlined " + hri.getShortNameToLog() + " from " + oldServerName);
@@ -1295,8 +1293,8 @@ public class RegionStates {
    * Update a region state. It will be put in transition if not already there.
    */
   private RegionState updateRegionState(final HRegionInfo hri,
-      final State state, final ServerName serverName, long openSeqNum) {
-    if (state == State.FAILED_CLOSE || state == State.FAILED_OPEN) {
+      final RegionState.State state, final ServerName serverName, long openSeqNum) {
+    if (state == RegionState.State.FAILED_CLOSE || state == RegionState.State.FAILED_OPEN) {
       LOG.warn("Failed to open/close " + hri.getShortNameToLog()
         + " on " + serverName + ", set to " + state);
     }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
index 5929f26..e576934 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.master.procedure.CreateNamespaceProcedure;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -228,7 +229,7 @@ public class TableNamespaceManager {
     }
 
     // Now check if the table is assigned, if not then fail fast
-    if (isTableAssigned()) {
+    if (isTableAssigned() && isTableEnabled()) {
       try {
         boolean initGoodSofar = true;
         nsTable = this.masterServices.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME);
@@ -297,6 +298,12 @@ public class TableNamespaceManager {
     return false;
   }
 
+  private boolean isTableEnabled() throws IOException {
+    return masterServices.getTableStateManager().getTableState(
+            TableName.NAMESPACE_TABLE_NAME
+    ).equals(TableState.State.ENABLED);
+  }
+
   private boolean isTableAssigned() {
     return !masterServices.getAssignmentManager().getRegionStates().
         getRegionsOfTable(TableName.NAMESPACE_TABLE_NAME).isEmpty();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
new file mode 100644
index 0000000..4ba3d10
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
@@ -0,0 +1,219 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.Set;
+
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableDescriptor;
+import org.apache.hadoop.hbase.TableDescriptors;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
+
+/**
+ * This is a helper class used to manage table states.
+ * States persisted in tableinfo and cached internally.
+ */
+@InterfaceAudience.Private
+public class TableStateManager {
+  private static final Log LOG = LogFactory.getLog(TableStateManager.class);
+  private final TableDescriptors descriptors;
+
+  private final Map<TableName, TableState.State> tableStates = Maps.newConcurrentMap();
+
+  public TableStateManager(MasterServices master) {
+    this.descriptors = master.getTableDescriptors();
+  }
+
+  public void start() throws IOException {
+    Map<String, TableDescriptor> all = descriptors.getAllDescriptors();
+    for (TableDescriptor table : all.values()) {
+      TableName tableName = table.getHTableDescriptor().getTableName();
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Adding table state: " + tableName
+            + ": " + table.getTableState());
+      }
+      tableStates.put(tableName, table.getTableState());
+    }
+  }
+
+  /**
+   * Set table state to provided.
+   * Caller should lock table on write.
+   * @param tableName table to change state for
+   * @param newState new state
+   * @throws IOException
+   */
+  public void setTableState(TableName tableName, TableState.State newState) throws IOException {
+    synchronized (tableStates) {
+      TableDescriptor descriptor = readDescriptor(tableName);
+      if (descriptor == null) {
+        throw new TableNotFoundException(tableName);
+      }
+      if (descriptor.getTableState() != newState) {
+        writeDescriptor(
+            new TableDescriptor(descriptor.getHTableDescriptor(), newState));
+      }
+    }
+  }
+
+  /**
+   * Set table state to provided but only if table in specified states
+   * Caller should lock table on write.
+   * @param tableName table to change state for
+   * @param newState new state
+   * @param states states to check against
+   * @throws IOException
+   */
+  public boolean setTableStateIfInStates(TableName tableName,
+                                         TableState.State newState,
+                                         TableState.State... states)
+          throws IOException {
+    synchronized (tableStates) {
+      TableDescriptor descriptor = readDescriptor(tableName);
+      if (descriptor == null) {
+        throw new TableNotFoundException(tableName);
+      }
+      if (TableState.isInStates(descriptor.getTableState(), states)) {
+        writeDescriptor(
+            new TableDescriptor(descriptor.getHTableDescriptor(), newState));
+        return true;
+      } else {
+        return false;
+      }
+    }
+  }
+
+
+  /**
+   * Set table state to provided but only if table not in specified states
+   * Caller should lock table on write.
+   * @param tableName table to change state for
+   * @param newState new state
+   * @param states states to check against
+   * @throws IOException
+   */
+  public boolean setTableStateIfNotInStates(TableName tableName,
+                                            TableState.State newState,
+                                            TableState.State... states)
+          throws IOException {
+    synchronized (tableStates) {
+      TableDescriptor descriptor = readDescriptor(tableName);
+      if (descriptor == null) {
+        throw new TableNotFoundException(tableName);
+      }
+      if (!TableState.isInStates(descriptor.getTableState(), states)) {
+        writeDescriptor(
+            new TableDescriptor(descriptor.getHTableDescriptor(), newState));
+        return true;
+      } else {
+        return false;
+      }
+    }
+  }
+
+  public boolean isTableState(TableName tableName, TableState.State... states) {
+    TableState.State tableState = null;
+    try {
+      tableState = getTableState(tableName);
+    } catch (IOException e) {
+      LOG.error("Unable to get table state, probably table not exists");
+      return false;
+    }
+    return tableState != null && TableState.isInStates(tableState, states);
+  }
+
+  public void setDeletedTable(TableName tableName) throws IOException {
+    TableState.State remove = tableStates.remove(tableName);
+    if (remove == null) {
+      LOG.warn("Moving table " + tableName + " state to deleted but was " +
+              "already deleted");
+    }
+  }
+
+  public boolean isTablePresent(TableName tableName) throws IOException {
+    return getTableState(tableName) != null;
+  }
+
+  /**
+   * Return all tables in given states.
+   *
+   * @param states filter by states
+   * @return tables in given states
+   * @throws IOException
+   */
+  public Set<TableName> getTablesInStates(TableState.State... states) throws IOException {
+    Set<TableName> rv = Sets.newHashSet();
+    for (Map.Entry<TableName, TableState.State> entry : tableStates.entrySet()) {
+      if (TableState.isInStates(entry.getValue(), states)) {
+        rv.add(entry.getKey());
+      }
+    }
+    return rv;
+  }
+
+  public TableState.State getTableState(TableName tableName) throws IOException {
+    TableState.State tableState = tableStates.get(tableName);
+    if (tableState == null) {
+      TableDescriptor descriptor = readDescriptor(tableName);
+      if (descriptor != null) {
+        tableState = descriptor.getTableState();
+      }
+    }
+    return tableState;
+  }
+
+  /**
+   * Write descriptor in place, update cache of states.
+   * Write lock should be hold by caller.
+   *
+   * @param descriptor what to write
+   */
+  private void writeDescriptor(TableDescriptor descriptor) throws IOException {
+    TableName tableName = descriptor.getHTableDescriptor().getTableName();
+    TableState.State state = descriptor.getTableState();
+    descriptors.add(descriptor);
+    LOG.debug("Table " + tableName + " written descriptor for state " + state);
+    tableStates.put(tableName, state);
+    LOG.debug("Table " + tableName + " updated state to " + state);
+  }
+
+  /**
+   * Read current descriptor for table, update cache of states.
+   *
+   * @param table descriptor to read
+   * @return descriptor
+   * @throws IOException
+   */
+  private TableDescriptor readDescriptor(TableName tableName) throws IOException {
+    TableDescriptor descriptor = descriptors.getDescriptor(tableName);
+    if (descriptor == null) {
+      tableStates.remove(tableName);
+    } else {
+      tableStates.put(tableName, descriptor.getTableState());
+    }
+    return descriptor;
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java
index 389a738..3be3316 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java
@@ -23,11 +23,10 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.master.AssignmentManager;
-import org.apache.hadoop.hbase.master.RegionState;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 
 /**
  * Handles CLOSED region event on Master.
@@ -93,7 +92,7 @@ public class ClosedRegionHandler extends EventHandler implements TotesHRegionInf
     LOG.debug("Handling CLOSED event for " + regionInfo.getEncodedName());
     // Check if this table is being disabled or not
     if (this.assignmentManager.getTableStateManager().isTableState(this.regionInfo.getTable(),
-        ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING) ||
+        TableState.State.DISABLED, TableState.State.DISABLING) ||
         assignmentManager.getReplicasToClose().contains(regionInfo)) {
       assignmentManager.offlineDisabledRegion(regionInfo);
       return;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
index 79e2493..09569b3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
@@ -30,14 +30,16 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CoordinatedStateException;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
 import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
-import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.ipc.RpcServer;
@@ -48,7 +50,6 @@ import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.TableLockManager;
 import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
@@ -119,13 +120,6 @@ public class CreateTableHandler extends EventHandler {
       if (MetaTableAccessor.tableExists(this.server.getConnection(), tableName)) {
         throw new TableExistsException(tableName);
       }
-
-      // During master initialization, the ZK state could be inconsistent from failed DDL
-      // in the past. If we fail here, it would prevent master to start.  We should force
-      // setting the system table state regardless the table state.
-      boolean skipTableStateCheck =
-          !((HMaster) this.server).isInitialized() && tableName.isSystemTable();
-      checkAndSetEnablingTable(assignmentManager, tableName, skipTableStateCheck);
       success = true;
     } finally {
       if (!success) {
@@ -135,52 +129,6 @@ public class CreateTableHandler extends EventHandler {
     return this;
   }
 
-  static void checkAndSetEnablingTable(final AssignmentManager assignmentManager,
-      final TableName tableName, boolean skipTableStateCheck) throws IOException {
-    // If we have multiple client threads trying to create the table at the
-    // same time, given the async nature of the operation, the table
-    // could be in a state where hbase:meta table hasn't been updated yet in
-    // the process() function.
-    // Use enabling state to tell if there is already a request for the same
-    // table in progress. This will introduce a new zookeeper call. Given
-    // createTable isn't a frequent operation, that should be ok.
-    // TODO: now that we have table locks, re-evaluate above -- table locks are not enough.
-    // We could have cleared the hbase.rootdir and not zk.  How can we detect this case?
-    // Having to clean zk AND hdfs is awkward.
-    try {
-      if (skipTableStateCheck) {
-        assignmentManager.getTableStateManager().setTableState(
-          tableName,
-          ZooKeeperProtos.Table.State.ENABLING);
-      } else if (!assignmentManager.getTableStateManager().setTableStateIfNotInStates(
-        tableName,
-        ZooKeeperProtos.Table.State.ENABLING,
-        ZooKeeperProtos.Table.State.ENABLING,
-        ZooKeeperProtos.Table.State.ENABLED)) {
-        throw new TableExistsException(tableName);
-      }
-    } catch (CoordinatedStateException e) {
-      throw new IOException("Unable to ensure that the table will be" +
-        " enabling because of a ZooKeeper issue", e);
-    }
-  }
-
-  static void removeEnablingTable(final AssignmentManager assignmentManager,
-      final TableName tableName) {
-    // Try deleting the enabling node in case of error
-    // If this does not happen then if the client tries to create the table
-    // again with the same Active master
-    // It will block the creation saying TableAlreadyExists.
-    try {
-      assignmentManager.getTableStateManager().checkAndRemoveTableState(tableName,
-        ZooKeeperProtos.Table.State.ENABLING, false);
-    } catch (CoordinatedStateException e) {
-      // Keeper exception should not happen here
-      LOG.error("Got a keeper exception while removing the ENABLING table znode "
-          + tableName, e);
-    }
-  }
-
   @Override
   public String toString() {
     String name = "UnknownServerName";
@@ -228,9 +176,6 @@ public class CreateTableHandler extends EventHandler {
     releaseTableLock();
     LOG.info("Table, " + this.hTableDescriptor.getTableName() + ", creation " +
         (exception == null ? "successful" : "failed. " + exception));
-    if (exception != null) {
-      removeEnablingTable(this.assignmentManager, this.hTableDescriptor.getTableName());
-    }
   }
 
   /**
@@ -253,9 +198,12 @@ public class CreateTableHandler extends EventHandler {
     FileSystem fs = fileSystemManager.getFileSystem();
 
     // 1. Create Table Descriptor
+    // using a copy of descriptor, table will be created enabling first
+    TableDescriptor underConstruction = new TableDescriptor(
+        this.hTableDescriptor, TableState.State.ENABLING);
     Path tempTableDir = FSUtils.getTableDir(tempdir, tableName);
     new FSTableDescriptors(this.conf).createTableDescriptorForTableDirectory(
-      tempTableDir, this.hTableDescriptor, false);
+      tempTableDir, underConstruction, false);
     Path tableDir = FSUtils.getTableDir(fileSystemManager.getRootDir(), tableName);
 
     // 2. Create Regions
@@ -280,24 +228,18 @@ public class CreateTableHandler extends EventHandler {
       // 7. Trigger immediate assignment of the regions in round-robin fashion
       ModifyRegionUtils.assignRegions(assignmentManager, regionInfos);
     }
-
-    // 8. Set table enabled flag up in zk.
-    try {
-      assignmentManager.getTableStateManager().setTableState(tableName,
-        ZooKeeperProtos.Table.State.ENABLED);
-    } catch (CoordinatedStateException e) {
-      throw new IOException("Unable to ensure that " + tableName + " will be" +
-        " enabled because of a ZooKeeper issue", e);
-    }
-
     // 8. Update the tabledescriptor cache.
     ((HMaster) this.server).getTableDescriptors().get(tableName);
+
+    // 9. Enable table
+    assignmentManager.getTableStateManager().setTableState(tableName,
+            TableState.State.ENABLED);
   }
 
   /**
    * Create any replicas for the regions (the default replicas that was
    * already created is passed to the method)
-   * @param hTableDescriptor
+   * @param hTableDescriptor descriptor to use
    * @param regions default replicas
    * @return the combined list of default and non-default replicas
    */
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
index 76f603f..e9b764e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
@@ -25,13 +25,13 @@ import java.util.concurrent.ExecutorService;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.TableNotEnabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
@@ -39,11 +39,10 @@ import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.BulkAssigner;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.RegionStates;
 import org.apache.hadoop.hbase.master.TableLockManager;
-import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.htrace.Trace;
 
 /**
@@ -91,16 +90,11 @@ public class DisableTableHandler extends EventHandler {
       // DISABLED or ENABLED.
       //TODO: reevaluate this since we have table locks now
       if (!skipTableStateCheck) {
-        try {
-          if (!this.assignmentManager.getTableStateManager().setTableStateIfInStates(
-            this.tableName, ZooKeeperProtos.Table.State.DISABLING,
-            ZooKeeperProtos.Table.State.ENABLED)) {
-            LOG.info("Table " + tableName + " isn't enabled; skipping disable");
-            throw new TableNotEnabledException(this.tableName);
-          }
-        } catch (CoordinatedStateException e) {
-          throw new IOException("Unable to ensure that the table will be" +
-            " disabling because of a coordination engine issue", e);
+        if (!this.assignmentManager.getTableStateManager().setTableStateIfInStates(
+          this.tableName, TableState.State.DISABLING,
+          TableState.State.ENABLED)) {
+          LOG.info("Table " + tableName + " isn't enabled; skipping disable");
+          throw new TableNotEnabledException(this.tableName);
         }
       }
       success = true;
@@ -139,8 +133,6 @@ public class DisableTableHandler extends EventHandler {
       }
     } catch (IOException e) {
       LOG.error("Error trying to disable table " + this.tableName, e);
-    } catch (CoordinatedStateException e) {
-      LOG.error("Error trying to disable table " + this.tableName, e);
     } finally {
       releaseTableLock();
     }
@@ -156,10 +148,10 @@ public class DisableTableHandler extends EventHandler {
     }
   }
 
-  private void handleDisableTable() throws IOException, CoordinatedStateException {
+  private void handleDisableTable() throws IOException {
     // Set table disabling flag up in zk.
     this.assignmentManager.getTableStateManager().setTableState(this.tableName,
-      ZooKeeperProtos.Table.State.DISABLING);
+      TableState.State.DISABLING);
     boolean done = false;
     while (true) {
       // Get list of online regions that are of this table.  Regions that are
@@ -188,7 +180,7 @@ public class DisableTableHandler extends EventHandler {
     }
     // Flip the table to disabled if success.
     if (done) this.assignmentManager.getTableStateManager().setTableState(this.tableName,
-      ZooKeeperProtos.Table.State.DISABLED);
+      TableState.State.DISABLED);
     LOG.info("Disabled table, " + this.tableName + ", is done=" + done);
   }
 
@@ -208,7 +200,7 @@ public class DisableTableHandler extends EventHandler {
       RegionStates regionStates = assignmentManager.getRegionStates();
       for (HRegionInfo region: regions) {
         if (regionStates.isRegionInTransition(region)
-            && !regionStates.isRegionInState(region, State.FAILED_CLOSE)) {
+            && !regionStates.isRegionInState(region, RegionState.State.FAILED_CLOSE)) {
           continue;
         }
         final HRegionInfo hri = region;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
index 2e6a10a..0b914d5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
@@ -26,15 +26,15 @@ import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.CoordinatedStateException;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.master.AssignmentManager;
@@ -47,7 +47,7 @@ import org.apache.hadoop.hbase.master.RegionStates;
 import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.master.TableLockManager;
 import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
+import org.apache.hadoop.hbase.master.TableStateManager;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 
@@ -97,14 +97,9 @@ public class EnableTableHandler extends EventHandler {
         if (!this.skipTableStateCheck) {
           throw new TableNotFoundException(tableName);
         }
-        try {
-          this.assignmentManager.getTableStateManager().checkAndRemoveTableState(tableName,
-            ZooKeeperProtos.Table.State.ENABLING, true);
-          throw new TableNotFoundException(tableName);
-        } catch (CoordinatedStateException e) {
-          // TODO : Use HBCK to clear such nodes
-          LOG.warn("Failed to delete the ENABLING node for the table " + tableName
-              + ".  The table will remain unusable. Run HBCK to manually fix the problem.");
+        TableStateManager tsm = assignmentManager.getTableStateManager();
+        if (tsm.isTableState(tableName, TableState.State.ENABLING)) {
+          tsm.setDeletedTable(tableName);
         }
       }
 
@@ -113,16 +108,11 @@ public class EnableTableHandler extends EventHandler {
       // After that, no other requests can be accepted until the table reaches
       // DISABLED or ENABLED.
       if (!skipTableStateCheck) {
-        try {
-          if (!this.assignmentManager.getTableStateManager().setTableStateIfInStates(
-              this.tableName, ZooKeeperProtos.Table.State.ENABLING,
-              ZooKeeperProtos.Table.State.DISABLED)) {
-            LOG.info("Table " + tableName + " isn't disabled; skipping enable");
-            throw new TableNotDisabledException(this.tableName);
-          }
-        } catch (CoordinatedStateException e) {
-          throw new IOException("Unable to ensure that the table will be" +
-            " enabling because of a coordination engine issue", e);
+        if (!this.assignmentManager.getTableStateManager().setTableStateIfInStates(
+            this.tableName, TableState.State.ENABLING,
+            TableState.State.DISABLED)) {
+          LOG.info("Table " + tableName + " isn't disabled; skipping enable");
+          throw new TableNotDisabledException(this.tableName);
         }
       }
       success = true;
@@ -158,11 +148,7 @@ public class EnableTableHandler extends EventHandler {
       if (cpHost != null) {
         cpHost.postEnableTableHandler(this.tableName, null);
       }
-    } catch (IOException e) {
-      LOG.error("Error trying to enable the table " + this.tableName, e);
-    } catch (CoordinatedStateException e) {
-      LOG.error("Error trying to enable the table " + this.tableName, e);
-    } catch (InterruptedException e) {
+    } catch (IOException | InterruptedException e) {
       LOG.error("Error trying to enable the table " + this.tableName, e);
     } finally {
       releaseTableLock();
@@ -179,14 +165,13 @@ public class EnableTableHandler extends EventHandler {
     }
   }
 
-  private void handleEnableTable() throws IOException, CoordinatedStateException,
+  private void handleEnableTable() throws IOException,
       InterruptedException {
     // I could check table is disabling and if so, not enable but require
     // that user first finish disabling but that might be obnoxious.
 
-    // Set table enabling flag up in zk.
     this.assignmentManager.getTableStateManager().setTableState(this.tableName,
-      ZooKeeperProtos.Table.State.ENABLING);
+      TableState.State.ENABLING);
     boolean done = false;
     ServerManager serverManager = ((HMaster)this.server).getServerManager();
     // Get the regions of this table. We're done when all listed
@@ -251,7 +236,7 @@ public class EnableTableHandler extends EventHandler {
     if (done) {
       // Flip the table to enabled.
       this.assignmentManager.getTableStateManager().setTableState(
-        this.tableName, ZooKeeperProtos.Table.State.ENABLED);
+        this.tableName, TableState.State.ENABLED);
       LOG.info("Table '" + this.tableName
       + "' was successfully enabled. Status: done=" + done);
     } else {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
index 43a0f65..0081f16 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
@@ -29,6 +29,8 @@ import java.util.TreeMap;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.CoordinatedStateException;
+import org.apache.hadoop.hbase.TableDescriptor;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
@@ -36,16 +38,15 @@ import org.apache.hadoop.hbase.InvalidFamilyOperationException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.master.BulkReOpen;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -137,7 +138,7 @@ public abstract class TableEventHandler extends EventHandler {
       handleTableOperation(hris);
       if (eventType.isOnlineSchemaChangeSupported() && this.masterServices.
           getAssignmentManager().getTableStateManager().isTableState(
-          tableName, ZooKeeperProtos.Table.State.ENABLED)) {
+          tableName, TableState.State.ENABLED)) {
         if (reOpenAllRegions(hris)) {
           LOG.info("Completed table operation " + eventType + " on table " +
               tableName);
@@ -236,10 +237,10 @@ public abstract class TableEventHandler extends EventHandler {
    * @throws FileNotFoundException
    * @throws IOException
    */
-  public HTableDescriptor getTableDescriptor()
+  public TableDescriptor getTableDescriptor()
   throws FileNotFoundException, IOException {
-    HTableDescriptor htd =
-      this.masterServices.getTableDescriptors().get(tableName);
+    TableDescriptor htd =
+      this.masterServices.getTableDescriptors().getDescriptor(tableName);
     if (htd == null) {
       throw new IOException("HTableDescriptor missing for " + tableName);
     }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java
index a3dc1a4..c9df56e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java
@@ -32,12 +32,12 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.InvalidFamilyOperationException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyState;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.security.User;
 
 /**
@@ -336,7 +336,7 @@ public class AddColumnFamilyProcedure
   private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException {
     // This operation only run when the table is enabled.
     if (!env.getMasterServices().getAssignmentManager().getTableStateManager()
-        .isTableState(getTableName(), ZooKeeperProtos.Table.State.ENABLED)) {
+        .isTableState(getTableName(), TableState.State.ENABLED)) {
       return;
     }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
index 152af45..a8459f1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -33,20 +33,21 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.HBaseException;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.master.TableStateManager;
 import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -299,8 +300,8 @@ public class CreateTableProcedure
         !(env.getMasterServices().isInitialized()) && tableName.isSystemTable();
     if (!skipTableStateCheck) {
       TableStateManager tsm = env.getMasterServices().getAssignmentManager().getTableStateManager();
-      if (tsm.isTableState(tableName, true, ZooKeeperProtos.Table.State.ENABLING,
-          ZooKeeperProtos.Table.State.ENABLED)) {
+      if (tsm.isTableState(tableName, TableState.State.ENABLING,
+          TableState.State.ENABLED)) {
         LOG.warn("The table " + tableName + " does not exist in meta but has a znode. " +
                "run hbck to fix inconsistencies.");
         setFailure("master-create-table", new TableExistsException(getTableName()));
@@ -375,7 +376,7 @@ public class CreateTableProcedure
     // using a copy of descriptor, table will be created enabling first
     final Path tempTableDir = FSUtils.getTableDir(tempdir, hTableDescriptor.getTableName());
     new FSTableDescriptors(env.getMasterConfiguration()).createTableDescriptorForTableDirectory(
-      tempTableDir, hTableDescriptor, false);
+      tempTableDir, new TableDescriptor(hTableDescriptor), false);
 
     // 2. Create Regions
     newRegions = hdfsRegionHandler.createHdfsRegions(env, tempdir,
@@ -448,14 +449,14 @@ public class CreateTableProcedure
 
     // Mark the table as Enabling
     assignmentManager.getTableStateManager().setTableState(tableName,
-        ZooKeeperProtos.Table.State.ENABLING);
+        TableState.State.ENABLING);
 
     // Trigger immediate assignment of the regions in round-robin fashion
     ModifyRegionUtils.assignRegions(assignmentManager, regions);
 
     // Enable table
     assignmentManager.getTableStateManager()
-      .setTableState(tableName, ZooKeeperProtos.Table.State.ENABLED);
+      .setTableState(tableName, TableState.State.ENABLED);
   }
 
   /**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java
index 5b1a69c..3e6568b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java
@@ -31,12 +31,12 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.InvalidFamilyOperationException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyState;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -357,7 +357,7 @@ public class DeleteColumnFamilyProcedure
   private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException {
     // This operation only run when the table is enabled.
     if (!env.getMasterServices().getAssignmentManager().getTableStateManager()
-        .isTableState(getTableName(), ZooKeeperProtos.Table.State.ENABLED)) {
+        .isTableState(getTableName(), TableState.State.ENABLED)) {
       return;
     }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
index bec599c..7fe2a89 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
@@ -32,8 +32,8 @@ import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotEnabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.exceptions.HBaseException;
 import org.apache.hadoop.hbase.master.AssignmentManager;
@@ -41,11 +41,11 @@ import org.apache.hadoop.hbase.master.BulkAssigner;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.RegionStates;
+import org.apache.hadoop.hbase.master.TableStateManager;
 import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableState;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.htrace.Trace;
@@ -286,8 +286,8 @@ public class DisableTableProcedure
       // this issue.
       TableStateManager tsm =
         env.getMasterServices().getAssignmentManager().getTableStateManager();
-      if (!tsm.setTableStateIfInStates(tableName, ZooKeeperProtos.Table.State.DISABLING,
-            ZooKeeperProtos.Table.State.DISABLING, ZooKeeperProtos.Table.State.ENABLED)) {
+      if (!tsm.setTableStateIfInStates(tableName, TableState.State.DISABLING,
+            TableState.State.DISABLING, TableState.State.ENABLED)) {
         LOG.info("Table " + tableName + " isn't enabled; skipping disable");
         setFailure("master-disable-table", new TableNotEnabledException(tableName));
         canTableBeDisabled = false;
@@ -311,7 +311,7 @@ public class DisableTableProcedure
       try {
         // If the state was changed, undo it.
         if (env.getMasterServices().getAssignmentManager().getTableStateManager().isTableState(
-            tableName, ZooKeeperProtos.Table.State.DISABLING)) {
+            tableName, TableState.State.DISABLING)) {
           EnableTableProcedure.setTableStateToEnabled(env, tableName);
         }
       } catch (Exception e) {
@@ -344,7 +344,7 @@ public class DisableTableProcedure
     // Set table disabling flag up in zk.
     env.getMasterServices().getAssignmentManager().getTableStateManager().setTableState(
       tableName,
-      ZooKeeperProtos.Table.State.DISABLING);
+      TableState.State.DISABLING);
   }
 
   /**
@@ -435,7 +435,7 @@ public class DisableTableProcedure
     // Flip the table to disabled
     env.getMasterServices().getAssignmentManager().getTableStateManager().setTableState(
       tableName,
-      ZooKeeperProtos.Table.State.DISABLED);
+      TableState.State.DISABLED);
     LOG.info("Disabled table, " + tableName + ", is completed.");
   }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
index f4a4538..c06bb07 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
@@ -35,8 +35,8 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.HBaseException;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.BulkAssigner;
@@ -45,11 +45,11 @@ import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionStates;
 import org.apache.hadoop.hbase.master.ServerManager;
+import org.apache.hadoop.hbase.master.TableStateManager;
 import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableState;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
@@ -307,7 +307,7 @@ public class EnableTableProcedure
       // was implemented. With table lock, there is no need to set the state here (it will
       // set the state later on). A quick state check should be enough for us to move forward.
       TableStateManager tsm = env.getMasterServices().getAssignmentManager().getTableStateManager();
-      if (!tsm.isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED)) {
+      if (!tsm.isTableState(tableName, TableState.State.DISABLED)) {
         LOG.info("Table " + tableName + " isn't disabled; skipping enable");
         setFailure("master-enable-table", new TableNotDisabledException(this.tableName));
         canTableBeEnabled = false;
@@ -344,8 +344,7 @@ public class EnableTableProcedure
     // Set table disabling flag up in zk.
     LOG.info("Attempting to enable the table " + tableName);
     env.getMasterServices().getAssignmentManager().getTableStateManager().setTableState(
-      tableName,
-      ZooKeeperProtos.Table.State.ENABLING);
+      tableName, TableState.State.ENABLING);
   }
 
   /**
@@ -490,8 +489,7 @@ public class EnableTableProcedure
       final TableName tableName) throws HBaseException, IOException {
     // Flip the table to Enabled
     env.getMasterServices().getAssignmentManager().getTableStateManager().setTableState(
-      tableName,
-      ZooKeeperProtos.Table.State.ENABLED);
+      tableName, TableState.State.ENABLED);
     LOG.info("Table '" + tableName + "' was successfully enabled.");
   }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java
index 2e8499f..c6ff1b6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java
@@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.BulkReOpen;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
@@ -78,7 +78,7 @@ public final class MasterDDLOperationHelper {
 
     // We only execute this procedure with table online if online schema change config is set.
     if (!env.getMasterServices().getAssignmentManager().getTableStateManager()
-        .isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED)
+        .isTableState(tableName, TableState.State.DISABLED)
         && !MasterDDLOperationHelper.isOnlineSchemaChangeAllowed(env)) {
       throw new TableNotDisabledException(tableName);
     }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java
index 5a6b592..590e4ce 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java
@@ -32,12 +32,12 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.InvalidFamilyOperationException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyState;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.security.User;
 
 /**
@@ -316,7 +316,7 @@ public class ModifyColumnFamilyProcedure
   private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException {
     // This operation only run when the table is enabled.
     if (!env.getMasterServices().getAssignmentManager().getTableStateManager()
-        .isTableState(getTableName(), ZooKeeperProtos.Table.State.ENABLED)) {
+        .isTableState(getTableName(), TableState.State.ENABLED)) {
       return;
     }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
index e785684..fa9746f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
@@ -42,11 +42,11 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableState;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
 
@@ -294,7 +294,7 @@ public class ModifyTableProcedure
         env.getMasterServices().getTableDescriptors().get(getTableName());
 
     if (env.getMasterServices().getAssignmentManager().getTableStateManager()
-        .isTableState(getTableName(), ZooKeeperProtos.Table.State.ENABLED)) {
+        .isTableState(getTableName(), TableState.State.ENABLED)) {
       // We only execute this procedure with table online if online schema change config is set.
       if (!MasterDDLOperationHelper.isOnlineSchemaChangeAllowed(env)) {
         throw new TableNotDisabledException(getTableName());
@@ -432,7 +432,7 @@ public class ModifyTableProcedure
   private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException {
     // This operation only run when the table is enabled.
     if (!env.getMasterServices().getAssignmentManager().getTableStateManager()
-        .isTableState(getTableName(), ZooKeeperProtos.Table.State.ENABLED)) {
+        .isTableState(getTableName(), TableState.State.ENABLED)) {
       return;
     }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
index b6e7a7c..ef04cfe 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.master.MasterServices;
@@ -48,7 +49,6 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ServerCrashState;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
@@ -526,7 +526,7 @@ implements ServerProcedureInterface {
           } else if (rit != null) {
             if ((rit.isPendingCloseOrClosing() || rit.isOffline())
                 && am.getTableStateManager().isTableState(hri.getTable(),
-                ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING) ||
+                TableState.State.DISABLED, TableState.State.DISABLING) ||
                 am.getReplicasToClose().contains(hri)) {
               // If the table was partially disabled and the RS went down, we should clear the
               // RIT and remove the node for the region.
@@ -713,7 +713,7 @@ implements ServerProcedureInterface {
     }
     // If table is not disabled but the region is offlined,
     boolean disabled = assignmentManager.getTableStateManager().isTableState(hri.getTable(),
-      ZooKeeperProtos.Table.State.DISABLED);
+      TableState.State.DISABLED);
     if (disabled){
       LOG.info("The table " + hri.getTable() + " was disabled.  Hence not proceeding.");
       return false;
@@ -725,7 +725,7 @@ implements ServerProcedureInterface {
       return false;
     }
     boolean disabling = assignmentManager.getTableStateManager().isTableState(hri.getTable(),
-      ZooKeeperProtos.Table.State.DISABLING);
+      TableState.State.DISABLING);
     if (disabling) {
       LOG.info("The table " + hri.getTable() + " is disabled.  Hence not assigning region" +
         hri.getEncodedName());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index 98018f0..5874c59 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.ipc.RpcServer;
@@ -72,7 +73,6 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.quotas.QuotaExceededException;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.security.User;
@@ -622,7 +622,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
     TableName snapshotTable = TableName.valueOf(snapshot.getTable());
     AssignmentManager assignmentMgr = master.getAssignmentManager();
     if (assignmentMgr.getTableStateManager().isTableState(snapshotTable,
-        ZooKeeperProtos.Table.State.ENABLED)) {
+      TableState.State.ENABLED)) {
       if (LOG.isDebugEnabled()) {
         LOG.debug("Table enabled, starting distributed snapshot for "
             + ClientSnapshotDescriptionUtils.toString(snapshot));
@@ -634,7 +634,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
     }
     // For disabled table, snapshot is created by the master
     else if (assignmentMgr.getTableStateManager().isTableState(snapshotTable,
-        ZooKeeperProtos.Table.State.DISABLED)) {
+        TableState.State.DISABLED)) {
       if (LOG.isDebugEnabled()) {
         LOG.debug("Table is disabled, running snapshot entirely on master "
             + ClientSnapshotDescriptionUtils.toString(snapshot));
@@ -801,7 +801,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
     // Execute the restore/clone operation
     if (MetaTableAccessor.tableExists(master.getConnection(), tableName)) {
       if (master.getAssignmentManager().getTableStateManager().isTableState(
-          TableName.valueOf(snapshot.getTable()), ZooKeeperProtos.Table.State.ENABLED)) {
+          TableName.valueOf(snapshot.getTable()), TableState.State.ENABLED)) {
         throw new UnsupportedOperationException("Table '" +
             TableName.valueOf(snapshot.getTable()) + "' must be disabled in order to " +
             "perform a restore operation" +
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
index 6da05cd..8a1c11a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
@@ -39,12 +39,14 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
@@ -373,7 +375,7 @@ public class NamespaceUpgrade implements Tool {
       HTableDescriptor newDesc = new HTableDescriptor(oldDesc);
       newDesc.setName(newTableName);
       new FSTableDescriptors(this.conf).createTableDescriptorForTableDirectory(
-        newTablePath, newDesc, true);
+        newTablePath, new TableDescriptor(newDesc, TableState.State.ENABLED), true);
     }
 
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
index 0b483d9..37528b2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.Text;
@@ -111,13 +112,14 @@ public class CompactionTool extends Configured implements Tool {
       if (isFamilyDir(fs, path)) {
         Path regionDir = path.getParent();
         Path tableDir = regionDir.getParent();
-        HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
+        TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
         HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
-        compactStoreFiles(tableDir, htd, hri, path.getName(), compactOnce, major);
+        compactStoreFiles(tableDir, htd.getHTableDescriptor(), hri,
+            path.getName(), compactOnce, major);
       } else if (isRegionDir(fs, path)) {
         Path tableDir = path.getParent();
-        HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
-        compactRegion(tableDir, htd, path, compactOnce, major);
+        TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
+        compactRegion(tableDir, htd.getHTableDescriptor(), path, compactOnce, major);
       } else if (isTableDir(fs, path)) {
         compactTable(path, compactOnce, major);
       } else {
@@ -128,9 +130,9 @@ public class CompactionTool extends Configured implements Tool {
 
     private void compactTable(final Path tableDir, final boolean compactOnce, final boolean major)
         throws IOException {
-      HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
+      TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
       for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) {
-        compactRegion(tableDir, htd, regionDir, compactOnce, major);
+        compactRegion(tableDir, htd.getHTableDescriptor(), regionDir, compactOnce, major);
       }
     }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java
index 11b6120..15360d1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java
@@ -90,6 +90,7 @@ public class WALCellCodec implements Codec {
    * Fully prepares the codec for use.
    * @param conf {@link Configuration} to read for the user-specified codec. If none is specified,
    *          uses a {@link WALCellCodec}.
+   * @param cellCodecClsName name of codec
    * @param compression compression the codec should use
    * @return a {@link WALCellCodec} ready for use.
    * @throws UnsupportedOperationException if the codec cannot be instantiated
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
index c76a3a9..0b54c4d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
@@ -43,7 +43,9 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -305,7 +307,8 @@ public final class SnapshotManifest {
   private void load() throws IOException {
     switch (getSnapshotFormat(desc)) {
       case SnapshotManifestV1.DESCRIPTOR_VERSION: {
-        this.htd = FSTableDescriptors.getTableDescriptorFromFs(workingDirFs, workingDir);
+        this.htd = FSTableDescriptors.getTableDescriptorFromFs(workingDirFs, workingDir)
+            .getHTableDescriptor();
         ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader");
         try {
           this.regionManifests =
@@ -410,7 +413,8 @@ public final class SnapshotManifest {
       LOG.info("Using old Snapshot Format");
       // write a copy of descriptor to the snapshot directory
       new FSTableDescriptors(conf, workingDirFs, rootDir)
-        .createTableDescriptorForTableDirectory(workingDir, htd, false);
+        .createTableDescriptorForTableDirectory(workingDir, new TableDescriptor(
+            htd, TableState.State.ENABLED), false);
     } else {
       LOG.debug("Convert to Single Snapshot Manifest for " + this.desc.getName());
       convertToV2SingleManifest();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index 7e161ca..8a163a9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -38,7 +38,9 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
@@ -88,15 +90,10 @@ public class FSTableDescriptors implements TableDescriptors {
   // This cache does not age out the old stuff.  Thinking is that the amount
   // of data we keep up in here is so small, no need to do occasional purge.
   // TODO.
-  private final Map<TableName, HTableDescriptor> cache =
-    new ConcurrentHashMap<TableName, HTableDescriptor>();
+  private final Map<TableName, TableDescriptor> cache =
+    new ConcurrentHashMap<TableName, TableDescriptor>();
 
   /**
-   * Table descriptor for <code>hbase:meta</code> catalog table
-   */
-   private final HTableDescriptor metaTableDescriptor;
-
-   /**
    * Construct a FSTableDescriptors instance using the hbase root dir of the given
    * conf and the filesystem where that root dir lives.
    * This instance can do write operations (is not read only).
@@ -121,7 +118,6 @@ public class FSTableDescriptors implements TableDescriptors {
     this.rootdir = rootdir;
     this.fsreadonly = fsreadonly;
     this.usecache = usecache;
-    this.metaTableDescriptor = HTableDescriptor.metaTableDescriptor(conf);
   }
 
   @Override
@@ -148,12 +144,12 @@ public class FSTableDescriptors implements TableDescriptors {
    * to see if a newer file has been created since the cached one was read.
    */
   @Override
-  public HTableDescriptor get(final TableName tablename)
+  public TableDescriptor getDescriptor(final TableName tablename)
   throws IOException {
     invocations++;
     if (TableName.META_TABLE_NAME.equals(tablename)) {
       cachehits++;
-      return metaTableDescriptor;
+      return new TableDescriptor(HTableDescriptor.META_TABLEDESC, TableState.State.ENABLED);
     }
     // hbase:meta is already handled. If some one tries to get the descriptor for
     // .logs, .oldlogs or .corrupt throw an exception.
@@ -163,74 +159,101 @@ public class FSTableDescriptors implements TableDescriptors {
 
     if (usecache) {
       // Look in cache of descriptors.
-      HTableDescriptor cachedtdm = this.cache.get(tablename);
+      TableDescriptor cachedtdm = this.cache.get(tablename);
       if (cachedtdm != null) {
         cachehits++;
         return cachedtdm;
       }
     }
-    HTableDescriptor tdmt = null;
+    TableDescriptor tdmt = null;
     try {
-      tdmt = getTableDescriptorFromFs(fs, rootdir, tablename, !fsreadonly);
-    } catch (NullPointerException e) {
-      LOG.debug("Exception during readTableDecriptor. Current table name = "
-          + tablename, e);
+      tdmt = getTableDescriptorFromFs(fs, rootdir, tablename);
     } catch (TableInfoMissingException e) {
       // ignore. This is regular operation
-    } catch (IOException ioe) {
+    } catch (NullPointerException | IOException e) {
       LOG.debug("Exception during readTableDecriptor. Current table name = "
-          + tablename, ioe);
+          + tablename, e);
     }
     // last HTD written wins
     if (usecache && tdmt != null) {
       this.cache.put(tablename, tdmt);
     }
-
     return tdmt;
   }
 
   /**
+   * Get the current table descriptor for the given table, or null if none exists.
+   *
+   * Uses a local cache of the descriptor but still checks the filesystem on each call
+   * to see if a newer file has been created since the cached one was read.
+   */
+  @Override
+  public HTableDescriptor get(TableName tableName) throws IOException {
+    if (HTableDescriptor.META_TABLEDESC.getTableName().equals(tableName)) {
+      cachehits++;
+      return HTableDescriptor.META_TABLEDESC;
+    }
+    TableDescriptor descriptor = getDescriptor(tableName);
+    return descriptor == null ? null : descriptor.getHTableDescriptor();
+  }
+
+  /**
    * Returns a map from table name to table descriptor for all tables.
    */
   @Override
-  public Map<String, HTableDescriptor> getAll()
+  public Map<String, TableDescriptor> getAllDescriptors()
   throws IOException {
-    Map<String, HTableDescriptor> htds = new TreeMap<String, HTableDescriptor>();
+    Map<String, TableDescriptor> tds = new TreeMap<String, TableDescriptor>();
 
     if (fsvisited && usecache) {
-      for (Map.Entry<TableName, HTableDescriptor> entry: this.cache.entrySet()) {
-        htds.put(entry.getKey().toString(), entry.getValue());
+      for (Map.Entry<TableName, TableDescriptor> entry: this.cache.entrySet()) {
+        tds.put(entry.getKey().toString(), entry.getValue());
       }
       // add hbase:meta to the response
-      htds.put(HTableDescriptor.META_TABLEDESC.getTableName().getNameAsString(),
-        HTableDescriptor.META_TABLEDESC);
+      tds.put(HTableDescriptor.META_TABLEDESC.getTableName().getNameAsString(),
+          new TableDescriptor(HTableDescriptor.META_TABLEDESC, TableState.State.ENABLED));
     } else {
       LOG.debug("Fetching table descriptors from the filesystem.");
       boolean allvisited = true;
       for (Path d : FSUtils.getTableDirs(fs, rootdir)) {
-        HTableDescriptor htd = null;
+        TableDescriptor td = null;
         try {
-          htd = get(FSUtils.getTableName(d));
+          td = getDescriptor(FSUtils.getTableName(d));
         } catch (FileNotFoundException fnfe) {
           // inability of retrieving one HTD shouldn't stop getting the remaining
           LOG.warn("Trouble retrieving htd", fnfe);
         }
-        if (htd == null) {
+        if (td == null) {
           allvisited = false;
           continue;
         } else {
-          htds.put(htd.getTableName().getNameAsString(), htd);
+          tds.put(td.getHTableDescriptor().getTableName().getNameAsString(), td);
         }
         fsvisited = allvisited;
       }
     }
-    return htds;
+    return tds;
   }
 
-  /* (non-Javadoc)
-   * @see org.apache.hadoop.hbase.TableDescriptors#getTableDescriptors(org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path)
+  /**
+   * Returns a map from table name to table descriptor for all tables.
    */
   @Override
+  public Map<String, HTableDescriptor> getAll() throws IOException {
+    Map<String, HTableDescriptor> htds = new TreeMap<String, HTableDescriptor>();
+    Map<String, TableDescriptor> allDescriptors = getAllDescriptors();
+    for (Map.Entry<String, TableDescriptor> entry : allDescriptors
+        .entrySet()) {
+      htds.put(entry.getKey(), entry.getValue().getHTableDescriptor());
+    }
+    return htds;
+  }
+
+  /**
+    * Find descriptors by namespace.
+    * @see #get(org.apache.hadoop.hbase.TableName)
+    */
+  @Override
   public Map<String, HTableDescriptor> getByNamespace(String name)
   throws IOException {
     Map<String, HTableDescriptor> htds = new TreeMap<String, HTableDescriptor>();
@@ -255,21 +278,51 @@ public class FSTableDescriptors implements TableDescriptors {
    * and updates the local cache with it.
    */
   @Override
-  public void add(HTableDescriptor htd) throws IOException {
+  public void add(TableDescriptor htd) throws IOException {
     if (fsreadonly) {
       throw new NotImplementedException("Cannot add a table descriptor - in read only mode");
     }
-    if (TableName.META_TABLE_NAME.equals(htd.getTableName())) {
+    TableName tableName = htd.getHTableDescriptor().getTableName();
+    if (TableName.META_TABLE_NAME.equals(tableName)) {
       throw new NotImplementedException();
     }
-    if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getTableName().getNameAsString())) {
+    if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) {
       throw new NotImplementedException(
-        "Cannot add a table descriptor for a reserved subdirectory name: " + htd.getNameAsString());
+        "Cannot add a table descriptor for a reserved subdirectory name: "
+            + htd.getHTableDescriptor().getNameAsString());
     }
     updateTableDescriptor(htd);
   }
 
   /**
+   * Adds (or updates) the table descriptor to the FileSystem
+   * and updates the local cache with it.
+   */
+  @Override
+  public void add(HTableDescriptor htd) throws IOException {
+    if (fsreadonly) {
+      throw new NotImplementedException("Cannot add a table descriptor - in read only mode");
+    }
+    TableName tableName = htd.getTableName();
+    if (TableName.META_TABLE_NAME.equals(tableName)) {
+      throw new NotImplementedException();
+    }
+    if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) {
+      throw new NotImplementedException(
+          "Cannot add a table descriptor for a reserved subdirectory name: "
+              + htd.getNameAsString());
+    }
+    TableDescriptor descriptor = getDescriptor(htd.getTableName());
+    if (descriptor == null) {
+      descriptor = new TableDescriptor(htd);
+    }
+    else {
+      descriptor.setHTableDescriptor(htd);
+    }
+    updateTableDescriptor(descriptor);
+  }
+
+  /**
    * Removes the table descriptor from the local cache and returns it.
    * If not in read only mode, it also deletes the entire table directory(!)
    * from the FileSystem.
@@ -286,11 +339,11 @@ public class FSTableDescriptors implements TableDescriptors {
         throw new IOException("Failed delete of " + tabledir.toString());
       }
     }
-    HTableDescriptor descriptor = this.cache.remove(tablename);
+    TableDescriptor descriptor = this.cache.remove(tablename);
     if (descriptor == null) {
       return null;
     } else {
-      return descriptor;
+      return descriptor.getHTableDescriptor();
     }
   }
 
@@ -474,8 +527,8 @@ public class FSTableDescriptors implements TableDescriptors {
    * if it exists, bypassing the local cache.
    * Returns null if it's not found.
    */
-  public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs,
-    Path hbaseRootDir, TableName tableName) throws IOException {
+  public static TableDescriptor getTableDescriptorFromFs(FileSystem fs,
+      Path hbaseRootDir, TableName tableName) throws IOException {
     Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
     return getTableDescriptorFromFs(fs, tableDir);
   }
@@ -485,37 +538,16 @@ public class FSTableDescriptors implements TableDescriptors {
    * directly from the file system if it exists.
    * @throws TableInfoMissingException if there is no descriptor
    */
-  public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs,
-    Path hbaseRootDir, TableName tableName, boolean rewritePb) throws IOException {
-    Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
-    return getTableDescriptorFromFs(fs, tableDir, rewritePb);
-  }
-  /**
-   * Returns the latest table descriptor for the table located at the given directory
-   * directly from the file system if it exists.
-   * @throws TableInfoMissingException if there is no descriptor
-   */
-  public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir)
+  public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir)
     throws IOException {
-    return getTableDescriptorFromFs(fs, tableDir, false);
-  }
-
-  /**
-   * Returns the latest table descriptor for the table located at the given directory
-   * directly from the file system if it exists.
-   * @throws TableInfoMissingException if there is no descriptor
-   */
-  public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir,
-    boolean rewritePb)
-  throws IOException {
     FileStatus status = getTableInfoPath(fs, tableDir, false);
     if (status == null) {
       throw new TableInfoMissingException("No table descriptor file under " + tableDir);
     }
-    return readTableDescriptor(fs, status, rewritePb);
+    return readTableDescriptor(fs, status, false);
   }
 
-  private static HTableDescriptor readTableDescriptor(FileSystem fs, FileStatus status,
+  private static TableDescriptor readTableDescriptor(FileSystem fs, FileStatus status,
       boolean rewritePb) throws IOException {
     int len = Ints.checkedCast(status.getLen());
     byte [] content = new byte[len];
@@ -525,30 +557,32 @@ public class FSTableDescriptors implements TableDescriptors {
     } finally {
       fsDataInputStream.close();
     }
-    HTableDescriptor htd = null;
+    TableDescriptor td = null;
     try {
-      htd = HTableDescriptor.parseFrom(content);
+      td = TableDescriptor.parseFrom(content);
     } catch (DeserializationException e) {
       // we have old HTableDescriptor here
       try {
         HTableDescriptor ohtd = HTableDescriptor.parseFrom(content);
         LOG.warn("Found old table descriptor, converting to new format for table " +
           ohtd.getTableName());
-        htd = new HTableDescriptor(ohtd);
-        if (rewritePb) rewriteTableDescriptor(fs, status, htd);
+        td = new TableDescriptor(ohtd);
+        if (rewritePb) {
+          rewriteTableDescriptor(fs, status, td);
+        }
       } catch (DeserializationException e1) {
         throw new IOException("content=" + Bytes.toShort(content), e1);
       }
     }
     if (rewritePb && !ProtobufUtil.isPBMagicPrefix(content)) {
       // Convert the file over to be pb before leaving here.
-      rewriteTableDescriptor(fs, status, htd);
+      rewriteTableDescriptor(fs, status, td);
     }
-    return htd;
+    return td;
   }
 
   private static void rewriteTableDescriptor(final FileSystem fs, final FileStatus status,
-    final HTableDescriptor td)
+    final TableDescriptor td)
   throws IOException {
     Path tableInfoDir = status.getPath().getParent();
     Path tableDir = tableInfoDir.getParent();
@@ -560,17 +594,18 @@ public class FSTableDescriptors implements TableDescriptors {
    * @throws IOException Thrown if failed update.
    * @throws NotImplementedException if in read only mode
    */
-  @VisibleForTesting Path updateTableDescriptor(HTableDescriptor htd)
+  @VisibleForTesting Path updateTableDescriptor(TableDescriptor td)
   throws IOException {
     if (fsreadonly) {
       throw new NotImplementedException("Cannot update a table descriptor - in read only mode");
     }
-    Path tableDir = getTableDir(htd.getTableName());
-    Path p = writeTableDescriptor(fs, htd, tableDir, getTableInfoPath(tableDir));
+    TableName tableName = td.getHTableDescriptor().getTableName();
+    Path tableDir = getTableDir(tableName);
+    Path p = writeTableDescriptor(fs, td, tableDir, getTableInfoPath(tableDir));
     if (p == null) throw new IOException("Failed update");
     LOG.info("Updated tableinfo=" + p);
     if (usecache) {
-      this.cache.put(htd.getTableName(), htd);
+      this.cache.put(td.getHTableDescriptor().getTableName(), td);
     }
     return p;
   }
@@ -621,9 +656,8 @@ public class FSTableDescriptors implements TableDescriptors {
    * @return Descriptor file or null if we failed write.
    */
   private static Path writeTableDescriptor(final FileSystem fs,
-    final HTableDescriptor htd, final Path tableDir,
-    final FileStatus currentDescriptorFile)
-  throws IOException {
+    final TableDescriptor htd, final Path tableDir,
+    final FileStatus currentDescriptorFile) throws IOException {
     // Get temporary dir into which we'll first write a file to avoid half-written file phenomenon.
     // This directory is never removed to avoid removing it out from under a concurrent writer.
     Path tmpTableDir = new Path(tableDir, TMP_DIR);
@@ -652,7 +686,7 @@ public class FSTableDescriptors implements TableDescriptors {
       }
       tableInfoDirPath = new Path(tableInfoDir, filename);
       try {
-        writeHTD(fs, tempPath, htd);
+        writeTD(fs, tempPath, htd);
         fs.mkdirs(tableInfoDirPath.getParent());
         if (!fs.rename(tempPath, tableInfoDirPath)) {
           throw new IOException("Failed rename of " + tempPath + " to " + tableInfoDirPath);
@@ -676,7 +710,7 @@ public class FSTableDescriptors implements TableDescriptors {
     return tableInfoDirPath;
   }
 
-  private static void writeHTD(final FileSystem fs, final Path p, final HTableDescriptor htd)
+  private static void writeTD(final FileSystem fs, final Path p, final TableDescriptor htd)
   throws IOException {
     FSDataOutputStream out = fs.create(p, false);
     try {
@@ -693,20 +727,29 @@ public class FSTableDescriptors implements TableDescriptors {
    * Used by tests.
    * @return True if we successfully created file.
    */
-  public boolean createTableDescriptor(HTableDescriptor htd) throws IOException {
+  public boolean createTableDescriptor(TableDescriptor htd) throws IOException {
     return createTableDescriptor(htd, false);
   }
 
   /**
+   * Create new HTableDescriptor in HDFS. Happens when we are creating table.
+   * Used by tests.
+   * @return True if we successfully created file.
+   */
+  public boolean createTableDescriptor(HTableDescriptor htd) throws IOException {
+    return createTableDescriptor(new TableDescriptor(htd), false);
+  }
+
+  /**
    * Create new HTableDescriptor in HDFS. Happens when we are creating table. If
    * forceCreation is true then even if previous table descriptor is present it
    * will be overwritten
    *
    * @return True if we successfully created file.
    */
-  public boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation)
+  public boolean createTableDescriptor(TableDescriptor htd, boolean forceCreation)
   throws IOException {
-    Path tableDir = getTableDir(htd.getTableName());
+    Path tableDir = getTableDir(htd.getHTableDescriptor().getTableName());
     return createTableDescriptorForTableDirectory(tableDir, htd, forceCreation);
   }
 
@@ -722,7 +765,7 @@ public class FSTableDescriptors implements TableDescriptors {
    * @throws IOException if a filesystem error occurs
    */
   public boolean createTableDescriptorForTableDirectory(Path tableDir,
-      HTableDescriptor htd, boolean forceCreation) throws IOException {
+      TableDescriptor htd, boolean forceCreation) throws IOException {
     if (fsreadonly) {
       throw new NotImplementedException("Cannot create a table descriptor - in read only mode");
     }
@@ -743,4 +786,3 @@ public class FSTableDescriptors implements TableDescriptors {
   }
 
 }
-
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 6cb3d20..6ed2b3e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -71,7 +71,6 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -84,6 +83,7 @@ import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -107,13 +107,13 @@ import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.io.FileLink;
 import org.apache.hadoop.hbase.io.HFileLink;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
@@ -128,9 +128,6 @@ import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler;
 import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl;
 import org.apache.hadoop.hbase.util.hbck.TableLockChecker;
 import org.apache.hadoop.hbase.wal.WALSplitter;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader;
-import org.apache.hadoop.hbase.zookeeper.ZKTableStateManager;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
@@ -1337,9 +1334,9 @@ public class HBaseFsck extends Configured implements Closeable {
         modTInfo = new TableInfo(tableName);
         tablesInfo.put(tableName, modTInfo);
         try {
-          HTableDescriptor htd =
+          TableDescriptor htd =
               FSTableDescriptors.getTableDescriptorFromFs(fs, hbaseRoot, tableName);
-          modTInfo.htds.add(htd);
+          modTInfo.htds.add(htd.getHTableDescriptor());
         } catch (IOException ioe) {
           if (!orphanTableDirs.containsKey(tableName)) {
             LOG.warn("Unable to read .tableinfo from " + hbaseRoot, ioe);
@@ -1394,7 +1391,7 @@ public class HBaseFsck extends Configured implements Closeable {
     for (String columnfamimly : columns) {
       htd.addFamily(new HColumnDescriptor(columnfamimly));
     }
-    fstd.createTableDescriptor(htd, true);
+    fstd.createTableDescriptor(new TableDescriptor(htd, TableState.State.ENABLED), true);
     return true;
   }
 
@@ -1442,7 +1439,7 @@ public class HBaseFsck extends Configured implements Closeable {
           if (tableName.equals(htds[j].getTableName())) {
             HTableDescriptor htd = htds[j];
             LOG.info("fixing orphan table: " + tableName + " from cache");
-            fstd.createTableDescriptor(htd, true);
+            fstd.createTableDescriptor(new TableDescriptor(htd, TableState.State.ENABLED), true);
             j++;
             iter.remove();
           }
@@ -1802,19 +1799,16 @@ public class HBaseFsck extends Configured implements Closeable {
    * @throws IOException
    */
   private void loadDisabledTables()
-  throws ZooKeeperConnectionException, IOException {
+  throws IOException {
     HConnectionManager.execute(new HConnectable<Void>(getConf()) {
       @Override
       public Void connect(HConnection connection) throws IOException {
-        try {
-          for (TableName tableName :
-              ZKTableStateClientSideReader.getDisabledOrDisablingTables(zkw)) {
-            disabledTables.add(tableName);
+        TableName[] tables = connection.listTableNames();
+        for (TableName table : tables) {
+          if (connection.getTableState(table)
+              .inStates(TableState.State.DISABLED, TableState.State.DISABLING)) {
+            disabledTables.add(table);
           }
-        } catch (KeeperException ke) {
-          throw new IOException(ke);
-        } catch (InterruptedException e) {
-          throw new InterruptedIOException();
         }
         return null;
       }
@@ -3546,12 +3540,15 @@ public class HBaseFsck extends Configured implements Closeable {
   /**
    * Check whether a orphaned table ZNode exists and fix it if requested.
    * @throws IOException
-   * @throws KeeperException
-   * @throws InterruptedException
    */
   private void checkAndFixOrphanedTableZNodes()
-      throws IOException, KeeperException, InterruptedException {
-    Set<TableName> enablingTables = ZKTableStateClientSideReader.getEnablingTables(zkw);
+      throws IOException {
+    Set<TableName> enablingTables = new HashSet<>();
+    for (TableName tableName: admin.listTableNames()) {
+      if (connection.getTableState(tableName).getState().equals(TableState.State.ENABLING)) {
+        enablingTables.add(tableName);
+      }
+    }
     String msg;
     TableInfo tableInfo;
 
@@ -3570,21 +3567,12 @@ public class HBaseFsck extends Configured implements Closeable {
     }
 
     if (orphanedTableZNodes.size() > 0 && this.fixTableZNodes) {
-      ZKTableStateManager zkTableStateMgr = new ZKTableStateManager(zkw);
-
       for (TableName tableName : orphanedTableZNodes) {
-        try {
-          // Set the table state to be disabled so that if we made mistake, we can trace
-          // the history and figure it out.
-          // Another choice is to call checkAndRemoveTableState() to delete the orphaned ZNode.
-          // Both approaches works.
-          zkTableStateMgr.setTableState(tableName, ZooKeeperProtos.Table.State.DISABLED);
-        } catch (CoordinatedStateException e) {
-          // This exception should not happen here
-          LOG.error(
-            "Got a CoordinatedStateException while fixing the ENABLING table znode " + tableName,
-            e);
-        }
+        // Set the table state to be disabled so that if we made mistake, we can trace
+        // the history and figure it out.
+        // Another choice is to call checkAndRemoveTableState() to delete the orphaned ZNode.
+        // Both approaches works.
+        admin.disableTable(tableName);
       }
     }
   }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
index 7f2c85d..02b5980 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
@@ -155,7 +155,8 @@ class HMerge {
 
       this.rootDir = FSUtils.getRootDir(conf);
       Path tabledir = FSUtils.getTableDir(this.rootDir, tableName);
-      this.htd = FSTableDescriptors.getTableDescriptorFromFs(this.fs, tabledir);
+      this.htd = FSTableDescriptors.getTableDescriptorFromFs(this.fs, tabledir)
+          .getHTableDescriptor();
       String logname = "merge_" + System.currentTimeMillis() + HConstants.HREGION_LOGDIR_NAME;
 
       final Configuration walConf = new Configuration(conf);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
index adab203..1530d28 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
@@ -153,9 +154,9 @@ public class Merge extends Configured implements Tool {
     if (info2 == null) {
       throw new NullPointerException("info2 is null using key " + meta);
     }
-    HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()),
+    TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()),
       this.rootdir, this.tableName);
-    HRegion merged = merge(htd, meta, info1, info2);
+    HRegion merged = merge(htd.getHTableDescriptor(), meta, info1, info2);
 
     LOG.info("Adding " + merged.getRegionInfo() + " to " +
         meta.getRegionInfo());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java
index 57ec87d..82308be 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java
@@ -18,8 +18,11 @@
 package org.apache.hadoop.hbase.util;
 
 import java.io.IOException;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 
+import com.google.protobuf.InvalidProtocolBufferException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -27,6 +30,9 @@ import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
@@ -153,8 +159,9 @@ public class ZKDataMigrator extends Configured implements Tool {
       }
       byte[] data = ZKUtil.getData(zkw, znode);
       if (ProtobufUtil.isPBMagicPrefix(data)) continue;
-      ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder();
-      builder.setState(ZooKeeperProtos.Table.State.valueOf(Bytes.toString(data)));
+      ZooKeeperProtos.DeprecatedTableState.Builder builder =
+          ZooKeeperProtos.DeprecatedTableState.newBuilder();
+      builder.setState(ZooKeeperProtos.DeprecatedTableState.State.valueOf(Bytes.toString(data)));
       data = ProtobufUtil.prependPBMagic(builder.build().toByteArray());
       ZKUtil.setData(zkw, znode, data);
     }
@@ -232,15 +239,14 @@ public class ZKDataMigrator extends Configured implements Tool {
   }
 
   private void migrateClusterKeyToPB(ZooKeeperWatcher zkw, String peerZnode, byte[] data)
-      throws KeeperException, NoNodeException {
+      throws KeeperException {
     ReplicationPeer peer = ZooKeeperProtos.ReplicationPeer.newBuilder()
         .setClusterkey(Bytes.toString(data)).build();
     ZKUtil.setData(zkw, peerZnode, ProtobufUtil.prependPBMagic(peer.toByteArray()));
   }
 
   private void migratePeerStateToPB(ZooKeeperWatcher zkw, byte[] data,
- String peerStatePath)
-      throws KeeperException, NoNodeException {
+     String peerStatePath) throws KeeperException {
     String state = Bytes.toString(data);
     if (ZooKeeperProtos.ReplicationState.State.ENABLED.name().equals(state)) {
       ZKUtil.setData(zkw, peerStatePath, ReplicationStateZKBase.ENABLED_ZNODE_BYTES);
@@ -249,6 +255,80 @@ public class ZKDataMigrator extends Configured implements Tool {
     }
   }
 
+  /**
+   * Method for table states migration.
+   * Reading state from zk, applying them to internal state
+   * and delete.
+   * Used by master to clean migration from zk based states to
+   * table descriptor based states.
+   */
+  @Deprecated
+  public static Map<TableName, TableState.State> queryForTableStates(ZooKeeperWatcher zkw)
+      throws KeeperException, InterruptedException {
+    Map<TableName, TableState.State> rv = new HashMap<>();
+    List<String> children = ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode);
+    if (children == null) {
+      return rv;
+    }
+    for (String child: children) {
+      TableName tableName = TableName.valueOf(child);
+      ZooKeeperProtos.DeprecatedTableState.State state = getTableState(zkw, tableName);
+      TableState.State newState = TableState.State.ENABLED;
+      if (state != null) {
+        switch (state) {
+          case ENABLED:
+            newState = TableState.State.ENABLED;
+            break;
+          case DISABLED:
+            newState = TableState.State.DISABLED;
+            break;
+          case DISABLING:
+            newState = TableState.State.DISABLING;
+            break;
+          case ENABLING:
+            newState = TableState.State.ENABLING;
+            break;
+          default:
+        }
+      }
+      rv.put(tableName, newState);
+    }
+    return rv;
+  }
+
+  /**
+   * Gets table state from ZK.
+   * @param zkw ZooKeeperWatcher instance to use
+   * @param tableName table we're checking
+   * @return Null or {@link ZooKeeperProtos.DeprecatedTableState.State} found in znode.
+   * @throws KeeperException
+   */
+  @Deprecated
+  private static  ZooKeeperProtos.DeprecatedTableState.State getTableState(
+      final ZooKeeperWatcher zkw, final TableName tableName)
+      throws KeeperException, InterruptedException {
+    String znode = ZKUtil.joinZNode(zkw.tableZNode, tableName.getNameAsString());
+    byte [] data = ZKUtil.getData(zkw, znode);
+    if (data == null || data.length <= 0) {
+      return null;
+    }
+    try {
+      ProtobufUtil.expectPBMagicPrefix(data);
+      ZooKeeperProtos.DeprecatedTableState.Builder builder =
+          ZooKeeperProtos.DeprecatedTableState.newBuilder();
+      int magicLen = ProtobufUtil.lengthOfPBMagic();
+      ZooKeeperProtos.DeprecatedTableState t = builder.mergeFrom(data,
+          magicLen, data.length - magicLen).build();
+      return t.getState();
+    } catch (InvalidProtocolBufferException e) {
+      KeeperException ke = new KeeperException.DataInconsistencyException();
+      ke.initCause(e);
+      throw ke;
+    } catch (DeserializationException e) {
+      throw ZKUtil.convert(e);
+    }
+  }
+
   public static void main(String args[]) throws Exception {
     System.exit(ToolRunner.run(HBaseConfiguration.create(), new ZKDataMigrator(), args));
   }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index 9273b6a..bb703ed 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -50,6 +50,9 @@ import java.util.concurrent.atomic.AtomicReference;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.protobuf.ServiceException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -61,7 +64,6 @@ import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
@@ -71,7 +73,6 @@ import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.ConnectionUtils;
 import org.apache.hadoop.hbase.client.Delete;
@@ -82,6 +83,7 @@ import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
 import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.RegionOpeningException;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.master.SplitLogManager;
@@ -98,7 +100,6 @@ import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStor
 import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.LastSequenceId;
@@ -123,9 +124,6 @@ import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
 import org.apache.hadoop.io.MultipleIOException;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.protobuf.ServiceException;
 import com.google.protobuf.TextFormat;
 
 /**
@@ -335,13 +333,14 @@ public class WALSplitter {
         LOG.warn("Nothing to split in log file " + logPath);
         return true;
       }
-      if (csm != null) {
-        try {
-          TableStateManager tsm = csm.getTableStateManager();
-          disablingOrDisabledTables = tsm.getTablesInStates(
-          ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING);
-        } catch (CoordinatedStateException e) {
-          throw new IOException("Can't get disabling/disabled tables", e);
+      if(csm != null) {
+        HConnection scc = csm.getServer().getConnection();
+        TableName[] tables = scc.listTableNames();
+        for (TableName table : tables) {
+          if (scc.getTableState(table)
+              .inStates(TableState.State.DISABLED, TableState.State.DISABLING)) {
+            disablingOrDisabledTables.add(table);
+          }
         }
       }
       int numOpenedFilesBeforeReporting = conf.getInt("hbase.splitlog.report.openedfiles", 3);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateManager.java
deleted file mode 100644
index db00c14..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateManager.java
+++ /dev/null
@@ -1,369 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.zookeeper;
-
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.CoordinatedStateException;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableStateManager;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
-import org.apache.zookeeper.KeeperException;
-
-/**
- * Implementation of TableStateManager which reads, caches and sets state
- * up in ZooKeeper.  If multiple read/write clients, will make for confusion.
- * Code running on client side without consensus context should use
- * {@link ZKTableStateClientSideReader} instead.
- *
- * <p>To save on trips to the zookeeper ensemble, internally we cache table
- * state.
- */
-@InterfaceAudience.Private
-public class ZKTableStateManager implements TableStateManager {
-  // A znode will exist under the table directory if it is in any of the
-  // following states: {@link TableState#ENABLING} , {@link TableState#DISABLING},
-  // or {@link TableState#DISABLED}.  If {@link TableState#ENABLED}, there will
-  // be no entry for a table in zk.  Thats how it currently works.
-
-  private static final Log LOG = LogFactory.getLog(ZKTableStateManager.class);
-  private final ZooKeeperWatcher watcher;
-
-  /**
-   * Cache of what we found in zookeeper so we don't have to go to zk ensemble
-   * for every query.  Synchronize access rather than use concurrent Map because
-   * synchronization needs to span query of zk.
-   */
-  private final Map<TableName, ZooKeeperProtos.Table.State> cache =
-    new HashMap<TableName, ZooKeeperProtos.Table.State>();
-
-  public ZKTableStateManager(final ZooKeeperWatcher zkw) throws KeeperException,
-      InterruptedException {
-    super();
-    this.watcher = zkw;
-    populateTableStates();
-  }
-
-  /**
-   * Gets a list of all the tables set as disabled in zookeeper.
-   * @throws KeeperException, InterruptedException
-   */
-  private void populateTableStates() throws KeeperException, InterruptedException {
-    synchronized (this.cache) {
-      List<String> children = ZKUtil.listChildrenNoWatch(this.watcher, this.watcher.tableZNode);
-      if (children == null) return;
-      for (String child: children) {
-        TableName tableName = TableName.valueOf(child);
-        ZooKeeperProtos.Table.State state = getTableState(this.watcher, tableName);
-        if (state != null) this.cache.put(tableName, state);
-      }
-    }
-  }
-
-  /**
-   * Sets table state in ZK. Sets no watches.
-   *
-   * {@inheritDoc}
-   */
-  @Override
-  public void setTableState(TableName tableName, ZooKeeperProtos.Table.State state)
-  throws CoordinatedStateException {
-    synchronized (this.cache) {
-      LOG.info("Moving table " + tableName + " state from " + this.cache.get(tableName)
-        + " to " + state);
-      try {
-        setTableStateInZK(tableName, state);
-      } catch (KeeperException e) {
-        throw new CoordinatedStateException(e);
-      }
-    }
-  }
-
-  /**
-   * Checks and sets table state in ZK. Sets no watches.
-   * {@inheritDoc}
-   */
-  @Override
-  public boolean setTableStateIfInStates(TableName tableName,
-                                         ZooKeeperProtos.Table.State newState,
-                                         ZooKeeperProtos.Table.State... states)
-      throws CoordinatedStateException {
-    synchronized (this.cache) {
-      // Transition ENABLED->DISABLING has to be performed with a hack, because
-      // we treat empty state as enabled in this case because 0.92- clusters.
-      if (
-          (newState == ZooKeeperProtos.Table.State.DISABLING) &&
-               this.cache.get(tableName) != null && !isTableState(tableName, states) ||
-          (newState != ZooKeeperProtos.Table.State.DISABLING &&
-               !isTableState(tableName, states) )) {
-        return false;
-      }
-      try {
-        setTableStateInZK(tableName, newState);
-      } catch (KeeperException e) {
-        throw new CoordinatedStateException(e);
-      }
-      return true;
-    }
-  }
-
-  /**
-   * Checks and sets table state in ZK. Sets no watches.
-   * {@inheritDoc}
-   */
-  @Override
-  public boolean setTableStateIfNotInStates(TableName tableName,
-                                            ZooKeeperProtos.Table.State newState,
-                                            ZooKeeperProtos.Table.State... states)
-    throws CoordinatedStateException {
-    synchronized (this.cache) {
-      if (isTableState(tableName, states)) {
-        // If the table is in the one of the states from the states list, the cache
-        // might be out-of-date, try to find it out from the master source (zookeeper server).
-        //
-        // Note: this adds extra zookeeper server calls and might have performance impact.
-        // However, this is not the happy path so we should not reach here often. Therefore,
-        // the performance impact should be minimal to none.
-        try {
-          ZooKeeperProtos.Table.State curstate = getTableState(watcher, tableName);
-
-          if (isTableInState(Arrays.asList(states), curstate)) {
-            return false;
-          }
-        } catch (KeeperException e) {
-          throw new CoordinatedStateException(e);
-        } catch (InterruptedException e) {
-          throw new CoordinatedStateException(e);
-        }
-      }
-      try {
-        setTableStateInZK(tableName, newState);
-      } catch (KeeperException e) {
-        throw new CoordinatedStateException(e);
-      }
-      return true;
-    }
-  }
-
-  private void setTableStateInZK(final TableName tableName,
-                                 final ZooKeeperProtos.Table.State state)
-      throws KeeperException {
-    String znode = ZKUtil.joinZNode(this.watcher.tableZNode, tableName.getNameAsString());
-    if (ZKUtil.checkExists(this.watcher, znode) == -1) {
-      ZKUtil.createAndFailSilent(this.watcher, znode);
-    }
-    synchronized (this.cache) {
-      ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder();
-      builder.setState(state);
-      byte [] data = ProtobufUtil.prependPBMagic(builder.build().toByteArray());
-      ZKUtil.setData(this.watcher, znode, data);
-      this.cache.put(tableName, state);
-    }
-  }
-
-  /**
-   * Checks if table is marked in specified state in ZK (using cache only). {@inheritDoc}
-   */
-  @Override
-  public boolean isTableState(final TableName tableName,
-      final ZooKeeperProtos.Table.State... states) {
-    return isTableState(tableName, false, states); // only check cache
-  }
-
-  /**
-   * Checks if table is marked in specified state in ZK. {@inheritDoc}
-   */
-  @Override
-  public boolean isTableState(final TableName tableName, final boolean checkSource,
-      final ZooKeeperProtos.Table.State... states) {
-    boolean isTableInSpecifiedState;
-    synchronized (this.cache) {
-      ZooKeeperProtos.Table.State currentState = this.cache.get(tableName);
-      if (checkSource) {
-        // The cache might be out-of-date, try to find it out from the master source (zookeeper
-        // server) and update the cache.
-        try {
-          ZooKeeperProtos.Table.State stateInZK = getTableState(watcher, tableName);
-
-          if (currentState != stateInZK) {
-            if (stateInZK != null) {
-              this.cache.put(tableName, stateInZK);
-            } else {
-              this.cache.remove(tableName);
-            }
-            currentState = stateInZK;
-          }
-        } catch (KeeperException | InterruptedException e) {
-          // Contacting zookeeper failed.  Let us just trust the value in cache.
-        }
-      }
-      return isTableInState(Arrays.asList(states), currentState);
-    }
-  }
-
-  /**
-   * Deletes the table in zookeeper. Fails silently if the table is not currently disabled in
-   * zookeeper. Sets no watches. {@inheritDoc}
-   */
-  @Override
-  public void setDeletedTable(final TableName tableName)
-  throws CoordinatedStateException {
-    synchronized (this.cache) {
-      if (this.cache.remove(tableName) == null) {
-        LOG.warn("Moving table " + tableName + " state to deleted but was already deleted");
-      }
-      try {
-        ZKUtil.deleteNodeFailSilent(this.watcher,
-          ZKUtil.joinZNode(this.watcher.tableZNode, tableName.getNameAsString()));
-      } catch (KeeperException e) {
-        throw new CoordinatedStateException(e);
-      }
-    }
-  }
-
-  /**
-   * check if table is present.
-   *
-   * @param tableName table we're working on
-   * @return true if the table is present
-   */
-  @Override
-  public boolean isTablePresent(final TableName tableName) {
-    synchronized (this.cache) {
-      ZooKeeperProtos.Table.State state = this.cache.get(tableName);
-      return !(state == null);
-    }
-  }
-
-  /**
-   * Gets a list of all the tables set as disabling in zookeeper.
-   * @return Set of disabling tables, empty Set if none
-   * @throws CoordinatedStateException if error happened in underlying coordination engine
-   */
-  @Override
-  public Set<TableName> getTablesInStates(ZooKeeperProtos.Table.State... states)
-    throws InterruptedIOException, CoordinatedStateException {
-    try {
-      return getAllTables(states);
-    } catch (KeeperException e) {
-      throw new CoordinatedStateException(e);
-    }
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public void checkAndRemoveTableState(TableName tableName, ZooKeeperProtos.Table.State states,
-                                       boolean deletePermanentState)
-      throws CoordinatedStateException {
-    synchronized (this.cache) {
-      if (isTableState(tableName, states)) {
-        this.cache.remove(tableName);
-        if (deletePermanentState) {
-          try {
-            ZKUtil.deleteNodeFailSilent(this.watcher,
-                ZKUtil.joinZNode(this.watcher.tableZNode, tableName.getNameAsString()));
-          } catch (KeeperException e) {
-            throw new CoordinatedStateException(e);
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * Gets a list of all the tables of specified states in zookeeper.
-   * @return Set of tables of specified states, empty Set if none
-   * @throws KeeperException
-   */
-  Set<TableName> getAllTables(final ZooKeeperProtos.Table.State... states)
-      throws KeeperException, InterruptedIOException {
-
-    Set<TableName> allTables = new HashSet<TableName>();
-    List<String> children =
-      ZKUtil.listChildrenNoWatch(watcher, watcher.tableZNode);
-    if(children == null) return allTables;
-    for (String child: children) {
-      TableName tableName = TableName.valueOf(child);
-      ZooKeeperProtos.Table.State state;
-      try {
-        state = getTableState(watcher, tableName);
-      } catch (InterruptedException e) {
-        throw new InterruptedIOException();
-      }
-      for (ZooKeeperProtos.Table.State expectedState: states) {
-        if (state == expectedState) {
-          allTables.add(tableName);
-          break;
-        }
-      }
-    }
-    return allTables;
-  }
-
-  /**
-   * Gets table state from ZK.
-   * @param zkw ZooKeeperWatcher instance to use
-   * @param tableName table we're checking
-   * @return Null or {@link ZooKeeperProtos.Table.State} found in znode.
-   * @throws KeeperException
-   */
-  private ZooKeeperProtos.Table.State getTableState(final ZooKeeperWatcher zkw,
-                                                   final TableName tableName)
-    throws KeeperException, InterruptedException {
-    String znode = ZKUtil.joinZNode(zkw.tableZNode, tableName.getNameAsString());
-    byte [] data = ZKUtil.getData(zkw, znode);
-    if (data == null || data.length <= 0) return null;
-    try {
-      ProtobufUtil.expectPBMagicPrefix(data);
-      ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder();
-      int magicLen = ProtobufUtil.lengthOfPBMagic();
-      ProtobufUtil.mergeFrom(builder, data, magicLen, data.length - magicLen);
-      return builder.getState();
-    } catch (IOException e) {
-      KeeperException ke = new KeeperException.DataInconsistencyException();
-      ke.initCause(e);
-      throw ke;
-    } catch (DeserializationException e) {
-      throw ZKUtil.convert(e);
-    }
-  }
-
-  /**
-   * @return true if current state isn't null and is contained
-   * in the list of expected states.
-   */
-  private boolean isTableInState(final List<ZooKeeperProtos.Table.State> expectedStates,
-                       final ZooKeeperProtos.Table.State currentState) {
-    return currentState != null && expectedStates.contains(currentState);
-  }
-}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index ec1e32c..a37c55d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -3390,6 +3390,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
     }
   }
 
+
   /**
    * Make sure that at least the specified number of region servers
    * are running
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java
index 946b812..5b7ba49 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.ServerManager;
+import org.apache.hadoop.hbase.master.TableStateManager;
 import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
 import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -54,6 +55,7 @@ import java.util.Set;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
 
 
 /**
@@ -98,70 +100,72 @@ public class TestDrainingServer {
     final HRegionInfo REGIONINFO = new HRegionInfo(TableName.valueOf("table_test"),
         HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW);
 
-    ZooKeeperWatcher zkWatcher = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
-      "zkWatcher-Test", abortable, true);
+    try (ZooKeeperWatcher zkWatcher = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
+      "zkWatcher-Test", abortable, true)) {
 
-    Map<ServerName, ServerLoad> onlineServers = new HashMap<ServerName, ServerLoad>();
+      Map<ServerName, ServerLoad> onlineServers = new HashMap<ServerName, ServerLoad>();
 
-    onlineServers.put(SERVERNAME_A, ServerLoad.EMPTY_SERVERLOAD);
-    onlineServers.put(SERVERNAME_B, ServerLoad.EMPTY_SERVERLOAD);
+      onlineServers.put(SERVERNAME_A, ServerLoad.EMPTY_SERVERLOAD);
+      onlineServers.put(SERVERNAME_B, ServerLoad.EMPTY_SERVERLOAD);
 
-    Mockito.when(server.getConfiguration()).thenReturn(conf);
-    Mockito.when(server.getServerName()).thenReturn(ServerName.valueOf("masterMock,1,1"));
-    Mockito.when(server.getZooKeeper()).thenReturn(zkWatcher);
-    Mockito.when(server.getRegionServerVersion(Mockito.any(ServerName.class))).thenReturn("0.0.0");
+      Mockito.when(server.getConfiguration()).thenReturn(conf);
+      Mockito.when(server.getServerName()).thenReturn(ServerName.valueOf("masterMock,1,1"));
+      Mockito.when(server.getZooKeeper()).thenReturn(zkWatcher);
+      Mockito.when(server.getRegionServerVersion(Mockito.any(ServerName.class))).thenReturn("0.0.0");
 
-    CoordinatedStateManager cp = new ZkCoordinatedStateManager();
-    cp.initialize(server);
-    cp.start();
+      CoordinatedStateManager cp = new ZkCoordinatedStateManager();
+      cp.initialize(server);
+      cp.start();
 
-    Mockito.when(server.getCoordinatedStateManager()).thenReturn(cp);
+      Mockito.when(server.getCoordinatedStateManager()).thenReturn(cp);
 
-    Mockito.when(serverManager.getOnlineServers()).thenReturn(onlineServers);
-    Mockito.when(serverManager.getOnlineServersList())
-    .thenReturn(new ArrayList<ServerName>(onlineServers.keySet()));
+      Mockito.when(serverManager.getOnlineServers()).thenReturn(onlineServers);
+      Mockito.when(serverManager.getOnlineServersList())
+          .thenReturn(new ArrayList<ServerName>(onlineServers.keySet()));
 
-    Mockito.when(serverManager.createDestinationServersList())
-        .thenReturn(new ArrayList<ServerName>(onlineServers.keySet()));
-    Mockito.when(serverManager.createDestinationServersList(null))
-        .thenReturn(new ArrayList<ServerName>(onlineServers.keySet()));
-    Mockito.when(serverManager.createDestinationServersList(Mockito.anyList())).thenReturn(
-        new ArrayList<ServerName>(onlineServers.keySet()));
+      Mockito.when(serverManager.createDestinationServersList())
+          .thenReturn(new ArrayList<ServerName>(onlineServers.keySet()));
+      Mockito.when(serverManager.createDestinationServersList(null))
+          .thenReturn(new ArrayList<ServerName>(onlineServers.keySet()));
+      Mockito.when(serverManager.createDestinationServersList(Mockito.anyList())).thenReturn(
+          new ArrayList<ServerName>(onlineServers.keySet()));
 
-    for (ServerName sn : onlineServers.keySet()) {
-      Mockito.when(serverManager.isServerOnline(sn)).thenReturn(true);
-      Mockito.when(serverManager.sendRegionClose(sn, REGIONINFO, -1)).thenReturn(true);
-      Mockito.when(serverManager.sendRegionClose(sn, REGIONINFO, -1, null, false)).thenReturn(true);
-      Mockito.when(serverManager.sendRegionOpen(sn, REGIONINFO, -1, new ArrayList<ServerName>()))
-      .thenReturn(RegionOpeningState.OPENED);
-      Mockito.when(serverManager.sendRegionOpen(sn, REGIONINFO, -1, null))
-      .thenReturn(RegionOpeningState.OPENED);
-      Mockito.when(serverManager.addServerToDrainList(sn)).thenReturn(true);
-    }
+      for (ServerName sn : onlineServers.keySet()) {
+        Mockito.when(serverManager.isServerOnline(sn)).thenReturn(true);
+        Mockito.when(serverManager.sendRegionClose(sn, REGIONINFO, -1)).thenReturn(true);
+        Mockito.when(serverManager.sendRegionClose(sn, REGIONINFO, -1, null, false)).thenReturn(true);
+        Mockito.when(serverManager.sendRegionOpen(sn, REGIONINFO, -1, new ArrayList<ServerName>()))
+            .thenReturn(RegionOpeningState.OPENED);
+        Mockito.when(serverManager.sendRegionOpen(sn, REGIONINFO, -1, null))
+            .thenReturn(RegionOpeningState.OPENED);
+        Mockito.when(serverManager.addServerToDrainList(sn)).thenReturn(true);
+      }
 
-    Mockito.when(master.getServerManager()).thenReturn(serverManager);
+      Mockito.when(master.getServerManager()).thenReturn(serverManager);
 
-    am = new AssignmentManager(server, serverManager,
-        balancer, startupMasterExecutor("mockExecutorService"), null, null);
+      TableStateManager tsm = mock(TableStateManager.class);
+      am = new AssignmentManager(server, serverManager,
+          balancer, startupMasterExecutor("mockExecutorService"), null, null, tsm);
 
-    Mockito.when(master.getAssignmentManager()).thenReturn(am);
-    Mockito.when(master.getZooKeeper()).thenReturn(zkWatcher);
+      Mockito.when(master.getAssignmentManager()).thenReturn(am);
+      Mockito.when(master.getZooKeeper()).thenReturn(zkWatcher);
 
-    am.addPlan(REGIONINFO.getEncodedName(), new RegionPlan(REGIONINFO, null, SERVERNAME_A));
+      am.addPlan(REGIONINFO.getEncodedName(), new RegionPlan(REGIONINFO, null, SERVERNAME_A));
 
-    zkWatcher.registerListenerFirst(am);
+      zkWatcher.registerListenerFirst(am);
 
-    addServerToDrainedList(SERVERNAME_A, onlineServers, serverManager);
+      addServerToDrainedList(SERVERNAME_A, onlineServers, serverManager);
 
-    am.assign(REGIONINFO, true);
+      am.assign(REGIONINFO, true);
 
-    setRegionOpenedOnZK(zkWatcher, SERVERNAME_A, REGIONINFO);
-    setRegionOpenedOnZK(zkWatcher, SERVERNAME_B, REGIONINFO);
+      setRegionOpenedOnZK(zkWatcher, SERVERNAME_A, REGIONINFO);
+      setRegionOpenedOnZK(zkWatcher, SERVERNAME_B, REGIONINFO);
 
-    am.waitForAssignment(REGIONINFO);
+      am.waitForAssignment(REGIONINFO);
 
-    assertTrue(am.getRegionStates().isRegionOnline(REGIONINFO));
-    assertNotEquals(am.getRegionStates().getRegionServerOfRegion(REGIONINFO), SERVERNAME_A);
+      assertTrue(am.getRegionStates().isRegionOnline(REGIONINFO));
+      assertNotEquals(am.getRegionStates().getRegionServerOfRegion(REGIONINFO), SERVERNAME_A);
+    }
   }
 
   @Test
@@ -207,80 +211,82 @@ public class TestDrainingServer {
     bulk.put(REGIONINFO_D, SERVERNAME_D);
     bulk.put(REGIONINFO_E, SERVERNAME_E);
 
-    ZooKeeperWatcher zkWatcher = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
-        "zkWatcher-BulkAssignTest", abortable, true);
-
-    Mockito.when(server.getConfiguration()).thenReturn(conf);
-    Mockito.when(server.getServerName()).thenReturn(ServerName.valueOf("masterMock,1,1"));
-    Mockito.when(server.getZooKeeper()).thenReturn(zkWatcher);
-
-    CoordinatedStateManager cp = new ZkCoordinatedStateManager();
-    cp.initialize(server);
-    cp.start();
-
-    Mockito.when(server.getCoordinatedStateManager()).thenReturn(cp);
-
-    Mockito.when(serverManager.getOnlineServers()).thenReturn(onlineServers);
-    Mockito.when(serverManager.getOnlineServersList()).thenReturn(
-      new ArrayList<ServerName>(onlineServers.keySet()));
-
-    Mockito.when(serverManager.createDestinationServersList()).thenReturn(
-      new ArrayList<ServerName>(onlineServers.keySet()));
-    Mockito.when(serverManager.createDestinationServersList(null)).thenReturn(
-      new ArrayList<ServerName>(onlineServers.keySet()));
-    Mockito.when(serverManager.createDestinationServersList(Mockito.anyList())).thenReturn(
-        new ArrayList<ServerName>(onlineServers.keySet()));
-
-    for (Entry<HRegionInfo, ServerName> entry : bulk.entrySet()) {
-      Mockito.when(serverManager.isServerOnline(entry.getValue())).thenReturn(true);
-      Mockito.when(serverManager.sendRegionClose(entry.getValue(),
-        entry.getKey(), -1)).thenReturn(true);
-      Mockito.when(serverManager.sendRegionOpen(entry.getValue(),
-        entry.getKey(), -1, null)).thenReturn(RegionOpeningState.OPENED);
-      Mockito.when(serverManager.addServerToDrainList(entry.getValue())).thenReturn(true);
-    }
-
-    Mockito.when(master.getServerManager()).thenReturn(serverManager);
-
-    drainedServers.add(SERVERNAME_A);
-    drainedServers.add(SERVERNAME_B);
-    drainedServers.add(SERVERNAME_C);
-    drainedServers.add(SERVERNAME_D);
-
-    am = new AssignmentManager(server, serverManager,
-      balancer, startupMasterExecutor("mockExecutorServiceBulk"), null, null);
-
-    Mockito.when(master.getAssignmentManager()).thenReturn(am);
-
-    zkWatcher.registerListener(am);
-
-    for (ServerName drained : drainedServers) {
-      addServerToDrainedList(drained, onlineServers, serverManager);
-    }
-
-    am.assign(bulk);
-
-    Set<RegionState> regionsInTransition = am.getRegionStates().getRegionsInTransition();
-    for (RegionState rs : regionsInTransition) {
-      setRegionOpenedOnZK(zkWatcher, rs.getServerName(), rs.getRegion());
-    }
-
-    am.waitForAssignment(REGIONINFO_A);
-    am.waitForAssignment(REGIONINFO_B);
-    am.waitForAssignment(REGIONINFO_C);
-    am.waitForAssignment(REGIONINFO_D);
-    am.waitForAssignment(REGIONINFO_E);
-
-    Map<HRegionInfo, ServerName> regionAssignments = am.getRegionStates().getRegionAssignments();
-    for (Entry<HRegionInfo, ServerName> entry : regionAssignments.entrySet()) {
-      LOG.info("Region Assignment: "
-          + entry.getKey().getRegionNameAsString() + " Server: " + entry.getValue());
-      bunchServersAssigned.add(entry.getValue());
-    }
-
-    for (ServerName sn : drainedServers) {
-      assertFalse(bunchServersAssigned.contains(sn));
-    }
+   try (ZooKeeperWatcher zkWatcher = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
+        "zkWatcher-BulkAssignTest", abortable, true)) {
+
+     Mockito.when(server.getConfiguration()).thenReturn(conf);
+     Mockito.when(server.getServerName()).thenReturn(ServerName.valueOf("masterMock,1,1"));
+     Mockito.when(server.getZooKeeper()).thenReturn(zkWatcher);
+
+     CoordinatedStateManager cp = new ZkCoordinatedStateManager();
+     cp.initialize(server);
+     cp.start();
+
+     Mockito.when(server.getCoordinatedStateManager()).thenReturn(cp);
+
+     Mockito.when(serverManager.getOnlineServers()).thenReturn(onlineServers);
+     Mockito.when(serverManager.getOnlineServersList()).thenReturn(
+         new ArrayList<ServerName>(onlineServers.keySet()));
+
+     Mockito.when(serverManager.createDestinationServersList()).thenReturn(
+         new ArrayList<ServerName>(onlineServers.keySet()));
+     Mockito.when(serverManager.createDestinationServersList(null)).thenReturn(
+         new ArrayList<ServerName>(onlineServers.keySet()));
+     Mockito.when(serverManager.createDestinationServersList(Mockito.anyList())).thenReturn(
+         new ArrayList<ServerName>(onlineServers.keySet()));
+
+     for (Entry<HRegionInfo, ServerName> entry : bulk.entrySet()) {
+       Mockito.when(serverManager.isServerOnline(entry.getValue())).thenReturn(true);
+       Mockito.when(serverManager.sendRegionClose(entry.getValue(),
+           entry.getKey(), -1)).thenReturn(true);
+       Mockito.when(serverManager.sendRegionOpen(entry.getValue(),
+           entry.getKey(), -1, null)).thenReturn(RegionOpeningState.OPENED);
+       Mockito.when(serverManager.addServerToDrainList(entry.getValue())).thenReturn(true);
+     }
+
+     Mockito.when(master.getServerManager()).thenReturn(serverManager);
+
+     drainedServers.add(SERVERNAME_A);
+     drainedServers.add(SERVERNAME_B);
+     drainedServers.add(SERVERNAME_C);
+     drainedServers.add(SERVERNAME_D);
+
+     TableStateManager tsm = mock(TableStateManager.class);
+     am = new AssignmentManager(server, serverManager, balancer,
+         startupMasterExecutor("mockExecutorServiceBulk"), null, null, tsm);
+
+     Mockito.when(master.getAssignmentManager()).thenReturn(am);
+
+     zkWatcher.registerListener(am);
+
+     for (ServerName drained : drainedServers) {
+       addServerToDrainedList(drained, onlineServers, serverManager);
+     }
+
+     am.assign(bulk);
+
+     Set<RegionState> regionsInTransition = am.getRegionStates().getRegionsInTransition();
+     for (RegionState rs : regionsInTransition) {
+       setRegionOpenedOnZK(zkWatcher, rs.getServerName(), rs.getRegion());
+     }
+
+     am.waitForAssignment(REGIONINFO_A);
+     am.waitForAssignment(REGIONINFO_B);
+     am.waitForAssignment(REGIONINFO_C);
+     am.waitForAssignment(REGIONINFO_D);
+     am.waitForAssignment(REGIONINFO_E);
+
+     Map<HRegionInfo, ServerName> regionAssignments = am.getRegionStates().getRegionAssignments();
+     for (Entry<HRegionInfo, ServerName> entry : regionAssignments.entrySet()) {
+       LOG.info("Region Assignment: "
+           + entry.getKey().getRegionNameAsString() + " Server: " + entry.getValue());
+       bunchServersAssigned.add(entry.getValue());
+     }
+
+     for (ServerName sn : drainedServers) {
+       assertFalse(bunchServersAssigned.contains(sn));
+     }
+   }
   }
 
   private void addServerToDrainedList(ServerName serverName,
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
index f963461..9d5259a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
@@ -42,8 +42,8 @@ public class TestFSTableDescriptorForceCreation {
     Path rootdir = new Path(UTIL.getDataTestDir(), name);
     FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
-
-    assertTrue("Should create new table descriptor", fstd.createTableDescriptor(htd, false));
+    assertTrue("Should create new table descriptor",
+        fstd.createTableDescriptor(new TableDescriptor(htd), false));
   }
 
   @Test
@@ -56,7 +56,8 @@ public class TestFSTableDescriptorForceCreation {
     FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
     HTableDescriptor htd = new HTableDescriptor(name);
     fstd.add(htd);
-    assertFalse("Should not create new table descriptor", fstd.createTableDescriptor(htd, false));
+    assertFalse("Should not create new table descriptor",
+        fstd.createTableDescriptor(new TableDescriptor(htd), false));
   }
 
   @Test
@@ -67,9 +68,10 @@ public class TestFSTableDescriptorForceCreation {
     Path rootdir = new Path(UTIL.getDataTestDir(), name);
     FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
-    fstd.createTableDescriptor(htd, false);
+    TableDescriptor td = new TableDescriptor(htd);
+    fstd.createTableDescriptor(td, false);
     assertTrue("Should create new table descriptor",
-        fstd.createTableDescriptor(htd, true));
+        fstd.createTableDescriptor(td, true));
   }
 
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java
index 4660bbb..8d0e418 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java
@@ -160,8 +160,8 @@ public class TestHColumnDescriptorDefaultVersions {
     // Verify descriptor from HDFS
     MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
     Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
-    htd = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
-    hcds = htd.getColumnFamilies();
+    TableDescriptor td = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
+    hcds = td.getHTableDescriptor().getColumnFamilies();
     verifyHColumnDescriptor(expected, hcds, tableName, families);
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTableDescriptor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTableDescriptor.java
new file mode 100644
index 0000000..19c1136
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTableDescriptor.java
@@ -0,0 +1,57 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Test setting values in the descriptor
+ */
+@Category(SmallTests.class)
+public class TestTableDescriptor {
+  final static Log LOG = LogFactory.getLog(TestTableDescriptor.class);
+
+  @Test
+  public void testPb() throws DeserializationException, IOException {
+    HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC);
+    final int v = 123;
+    htd.setMaxFileSize(v);
+    htd.setDurability(Durability.ASYNC_WAL);
+    htd.setReadOnly(true);
+    htd.setRegionReplication(2);
+    TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED);
+    byte[] bytes = td.toByteArray();
+    TableDescriptor deserializedTd = TableDescriptor.parseFrom(bytes);
+    assertEquals(td, deserializedTd);
+    assertEquals(td.getHTableDescriptor(), deserializedTd.getHTableDescriptor());
+    assertEquals(td.getTableState(), deserializedTd.getTableState());
+  }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
index c0b32b8..0a99845 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.InvalidFamilyOperationException;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
@@ -49,11 +50,8 @@ import org.apache.hadoop.hbase.TableNotEnabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.exceptions.MergeRegionException;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -255,7 +253,7 @@ public class TestAdmin1 {
     this.admin.disableTable(ht.getName());
     assertTrue("Table must be disabled.", TEST_UTIL.getHBaseCluster()
         .getMaster().getAssignmentManager().getTableStateManager().isTableState(
-        ht.getName(), ZooKeeperProtos.Table.State.DISABLED));
+        ht.getName(), TableState.State.DISABLED));
 
     // Test that table is disabled
     get = new Get(row);
@@ -282,7 +280,7 @@ public class TestAdmin1 {
     this.admin.enableTable(table);
     assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster()
         .getMaster().getAssignmentManager().getTableStateManager().isTableState(
-        ht.getName(), ZooKeeperProtos.Table.State.ENABLED));
+        ht.getName(), TableState.State.ENABLED));
 
     // Test that table is enabled
     try {
@@ -354,7 +352,7 @@ public class TestAdmin1 {
     assertEquals(numTables + 1, tables.length);
     assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster()
         .getMaster().getAssignmentManager().getTableStateManager().isTableState(
-        TableName.valueOf("testCreateTable"), ZooKeeperProtos.Table.State.ENABLED));
+        TableName.valueOf("testCreateTable"), TableState.State.ENABLED));
   }
 
   @Test (timeout=300000)
@@ -1340,11 +1338,9 @@ public class TestAdmin1 {
 
   @Test (timeout=300000)
   public void testEnableDisableAddColumnDeleteColumn() throws Exception {
-	ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL);
     TableName tableName = TableName.valueOf("testEnableDisableAddColumnDeleteColumn");
     TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close();
-    while (!ZKTableStateClientSideReader.isEnabledTable(zkw,
-      TableName.valueOf("testEnableDisableAddColumnDeleteColumn"))) {
+    while (!this.admin.isTableEnabled(tableName)) {
       Thread.sleep(10);
     }
     this.admin.disableTable(tableName);
@@ -1487,16 +1483,4 @@ public class TestAdmin1 {
       this.admin.deleteTable(tableName);
     }
   }
-
-  @Test (timeout=30000)
-  public void testTableNotFoundException() throws Exception {
-    ZooKeeperWatcher zkw = TEST_UTIL.getZooKeeperWatcher();
-    TableName table = TableName.valueOf("tableNotExists");
-    try {
-      ZKTableStateClientSideReader.isDisabledTable(zkw, table);
-      fail("Shouldn't be here");
-    } catch (TableNotFoundException e) {
-      // This is expected.
-    }
-  }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index db26d37..6258f6d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -71,6 +71,11 @@ public class MockNoopMasterServices implements MasterServices, Server {
   }
 
   @Override
+  public TableStateManager getTableStateManager() {
+    return null;
+  }
+
+  @Override
   public MasterCoprocessorHost getMasterCoprocessorHost() {
     return null;
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
index 28f9e83..92c045f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@@ -69,7 +70,6 @@ import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -156,10 +156,9 @@ public class TestAssignmentManagerOnCluster {
           Bytes.toBytes(metaServerName.getServerName()));
         master.assignmentManager.waitUntilNoRegionsInTransition(60000);
       }
-      RegionState metaState =
-          MetaTableLocator.getMetaRegionState(master.getZooKeeper());
-        assertEquals("Meta should be not in transition",
-            metaState.getState(), RegionState.State.OPEN);
+      RegionState metaState = MetaTableLocator.getMetaRegionState(master.getZooKeeper());
+      assertEquals("Meta should be not in transition",
+          metaState.getState(), RegionState.State.OPEN);
       assertNotEquals("Meta should be moved off master",
         metaServerName, master.getServerName());
       cluster.killRegionServer(metaServerName);
@@ -289,7 +288,8 @@ public class TestAssignmentManagerOnCluster {
     String table = "testAssignRegionOnRestartedServer";
     TEST_UTIL.getMiniHBaseCluster().getConf().setInt("hbase.assignment.maximum.attempts", 20);
     TEST_UTIL.getMiniHBaseCluster().stopMaster(0);
-    TEST_UTIL.getMiniHBaseCluster().startMaster(); //restart the master so that conf take into affect
+    //restart the master so that conf take into affect
+    TEST_UTIL.getMiniHBaseCluster().startMaster();
 
     ServerName deadServer = null;
     HMaster master = null;
@@ -888,7 +888,7 @@ public class TestAssignmentManagerOnCluster {
         }
       }
 
-      am.getTableStateManager().setTableState(table, ZooKeeperProtos.Table.State.DISABLING);
+      am.getTableStateManager().setTableState(table, TableState.State.DISABLING);
       List<HRegionInfo> toAssignRegions = am.cleanOutCrashedServerReferences(destServerName);
       assertTrue("Regions to be assigned should be empty.", toAssignRegions.isEmpty());
       assertTrue("Regions to be assigned should be empty.", am.getRegionStates()
@@ -897,7 +897,7 @@ public class TestAssignmentManagerOnCluster {
       if (hri != null && serverName != null) {
         am.regionOnline(hri, serverName);
       }
-      am.getTableStateManager().setTableState(table, ZooKeeperProtos.Table.State.DISABLED);
+      am.getTableStateManager().setTableState(table, TableState.State.DISABLED);
       TEST_UTIL.deleteTable(table);
     }
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
index 397d5a8..6b499f2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
@@ -41,6 +41,8 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.ChoreService;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
+import org.apache.hadoop.hbase.TableDescriptor;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
@@ -54,13 +56,13 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.TableDescriptors;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
 import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination;
 import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination.SplitLogManagerDetails;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator;
@@ -352,13 +354,18 @@ public class TestCatalogJanitor {
       return new TableDescriptors() {
         @Override
         public HTableDescriptor remove(TableName tablename) throws IOException {
-          // TODO Auto-generated method stub
+          // noop
           return null;
         }
 
         @Override
         public Map<String, HTableDescriptor> getAll() throws IOException {
-          // TODO Auto-generated method stub
+          // noop
+          return null;
+        }
+
+        @Override public Map<String, TableDescriptor> getAllDescriptors() throws IOException {
+          // noop
           return null;
         }
 
@@ -369,14 +376,24 @@ public class TestCatalogJanitor {
         }
 
         @Override
+        public TableDescriptor getDescriptor(TableName tablename)
+            throws IOException {
+          return createTableDescriptor();
+        }
+
+        @Override
         public Map<String, HTableDescriptor> getByNamespace(String name) throws IOException {
           return null;
         }
 
         @Override
         public void add(HTableDescriptor htd) throws IOException {
-          // TODO Auto-generated method stub
+          // noop
+        }
 
+        @Override
+        public void add(TableDescriptor htd) throws IOException {
+          // noop
         }
         @Override
         public void setCacheOn() throws IOException {
@@ -541,6 +558,11 @@ public class TestCatalogJanitor {
     }
 
     @Override
+    public TableStateManager getTableStateManager() {
+      return null;
+    }
+
+    @Override
     public void dispatchMergingRegions(HRegionInfo region_a, HRegionInfo region_b,
         boolean forcible, User user) throws IOException {
     }
@@ -1169,6 +1191,11 @@ public class TestCatalogJanitor {
     return htd;
   }
 
+  private TableDescriptor createTableDescriptor() {
+    TableDescriptor htd = new TableDescriptor(createHTableDescriptor(), TableState.State.ENABLED);
+    return htd;
+  }
+
   private MultiResponse buildMultiResponse(MultiRequest req) {
     MultiResponse.Builder builder = MultiResponse.newBuilder();
     RegionActionResult.Builder regionActionResultBuilder =
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
index 34715aa..80e05e00 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
@@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.UnknownRegionException;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.util.StringUtils;
@@ -84,7 +84,7 @@ public class TestMaster {
 
     try (HTable ht = TEST_UTIL.createTable(TABLENAME, FAMILYNAME)) {
       assertTrue(m.assignmentManager.getTableStateManager().isTableState(TABLENAME,
-        ZooKeeperProtos.Table.State.ENABLED));
+          TableState.State.ENABLED));
       TEST_UTIL.loadTable(ht, FAMILYNAME, false);
     }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
index 2228188..fcbe0a6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
@@ -43,20 +43,19 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.RegionTransition;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.RequestConverter;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
@@ -71,10 +70,8 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
-import org.apache.hadoop.hbase.zookeeper.ZKTableStateManager;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.data.Stat;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -302,8 +299,8 @@ public class TestMasterFailover {
     log("Beginning to mock scenarios");
 
     // Disable the disabledTable in ZK
-    TableStateManager zktable = new ZKTableStateManager(zkw);
-    zktable.setTableState(disabledTable, ZooKeeperProtos.Table.State.DISABLED);
+    TableStateManager tsm = master.getTableStateManager();
+    tsm.setTableState(disabledTable, TableState.State.DISABLED);
 
     /*
      *  ZK = OFFLINE
@@ -619,7 +616,7 @@ public class TestMasterFailover {
 
     assertTrue(" Table must be enabled.", master.getAssignmentManager()
         .getTableStateManager().isTableState(TableName.valueOf("enabledTable"),
-        ZooKeeperProtos.Table.State.ENABLED));
+        TableState.State.ENABLED));
     // we also need regions assigned out on the dead server
     List<HRegionInfo> enabledAndOnDeadRegions = new ArrayList<HRegionInfo>();
     enabledAndOnDeadRegions.addAll(enabledRegions.subList(0, 6));
@@ -679,13 +676,11 @@ public class TestMasterFailover {
     log("Beginning to mock scenarios");
 
     // Disable the disabledTable in ZK
-    TableStateManager zktable = new ZKTableStateManager(zkw);
-    zktable.setTableState(disabledTable, ZooKeeperProtos.Table.State.DISABLED);
+    TableStateManager tsm = master.getTableStateManager();
+    tsm.setTableState(disabledTable, TableState.State.DISABLED);
 
     assertTrue(" The enabled table should be identified on master fail over.",
-        zktable.isTableState(TableName.valueOf("enabledTable"),
-          ZooKeeperProtos.Table.State.ENABLED));
-
+        tsm.isTableState(TableName.valueOf("enabledTable"), TableState.State.ENABLED));
     /*
      * ZK = CLOSING
      */
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java
index a2ecfb4..5af7b47 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
@@ -102,8 +102,8 @@ public class TestMasterRestartAfterDisablingTable {
 
     assertTrue("The table should not be in enabled state", cluster.getMaster()
         .getAssignmentManager().getTableStateManager().isTableState(
-        TableName.valueOf("tableRestart"), ZooKeeperProtos.Table.State.DISABLED,
-        ZooKeeperProtos.Table.State.DISABLING));
+        TableName.valueOf("tableRestart"), TableState.State.DISABLED,
+        TableState.State.DISABLING));
     log("Enabling table\n");
     // Need a new Admin, the previous one is on the old master
     Admin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
@@ -118,7 +118,7 @@ public class TestMasterRestartAfterDisablingTable {
           6, regions.size());
     assertTrue("The table should be in enabled state", cluster.getMaster()
         .getAssignmentManager().getTableStateManager()
-        .isTableState(TableName.valueOf("tableRestart"), ZooKeeperProtos.Table.State.ENABLED));
+        .isTableState(TableName.valueOf("tableRestart"), TableState.State.ENABLED));
     ht.close();
     TEST_UTIL.shutdownMiniCluster();
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java
index 9ecac42..c1affd5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java
@@ -42,9 +42,9 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.MockServer;
 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
-import org.apache.hadoop.hbase.zookeeper.ZKTableStateManager;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
@@ -140,7 +140,10 @@ public class TestOpenedRegionHandler {
       // create a node with OPENED state
       zkw = HBaseTestingUtility.createAndForceNodeToOpenedState(TEST_UTIL,
           region, server.getServerName());
-      when(am.getTableStateManager()).thenReturn(new ZKTableStateManager(zkw));
+      MasterServices masterServices = Mockito.mock(MasterServices.class);
+      when(masterServices.getTableDescriptors()).thenReturn(new FSTableDescriptors(conf));
+      TableStateManager tsm = new TableStateManager(masterServices);
+      when(am.getTableStateManager()).thenReturn(tsm);
       Stat stat = new Stat();
       String nodeName = ZKAssign.getNodeName(zkw, region.getRegionInfo()
           .getEncodedName());
@@ -171,8 +174,8 @@ public class TestOpenedRegionHandler {
       } catch (Exception e) {
         expectedException = true;
       }
-      assertFalse("The process method should not throw any exception.",
-          expectedException);
+      assertFalse("The process method should not throw any exception. "
+          , expectedException);
       List<String> znodes = ZKUtil.listChildrenAndWatchForNewChildren(zkw,
           zkw.assignmentZNode);
       String regionName = znodes.get(0);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionStates.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionStates.java
index 0410294..a35e359 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionStates.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionStates.java
@@ -19,10 +19,8 @@ package org.apache.hadoop.hbase.master;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
index 16a6450..7e5656b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
@@ -36,7 +36,6 @@ import java.util.concurrent.Future;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ChoreService;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -53,7 +52,6 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver;
 import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.exceptions.LockTimeoutException;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -388,12 +386,14 @@ public class TestTableLockManager {
     choreService.scheduleChore(alterThread);
     choreService.scheduleChore(splitThread);
     TEST_UTIL.waitTableEnabled(tableName);
+
     while (true) {
       List<HRegionInfo> regions = admin.getTableRegions(tableName);
       LOG.info(String.format("Table #regions: %d regions: %s:", regions.size(), regions));
       assertEquals(admin.getTableDescriptor(tableName), desc);
       for (HRegion region : TEST_UTIL.getMiniHBaseCluster().getRegions(tableName)) {
-        assertEquals(desc, region.getTableDesc());
+        HTableDescriptor regionTableDesc = region.getTableDesc();
+        assertEquals(desc, regionTableDesc);
       }
       if (regions.size() >= 5) {
         break;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
index ff479d4..86a54e5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableStateManager;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.client.BufferedMutator;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Durability;
@@ -45,9 +45,9 @@ import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
 import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.TableStateManager;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.util.ModifyRegionUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -188,13 +188,13 @@ public class MasterProcedureTestingUtility {
   public static void validateTableIsEnabled(final HMaster master, final TableName tableName)
       throws IOException {
     TableStateManager tsm = master.getAssignmentManager().getTableStateManager();
-    assertTrue(tsm.isTableState(tableName, ZooKeeperProtos.Table.State.ENABLED));
+    assertTrue(tsm.isTableState(tableName, TableState.State.ENABLED));
   }
 
   public static void validateTableIsDisabled(final HMaster master, final TableName tableName)
       throws IOException {
     TableStateManager tsm = master.getAssignmentManager().getTableStateManager();
-    assertTrue(tsm.isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED));
+    assertTrue(tsm.isTableState(tableName, TableState.State.DISABLED));
   }
 
   /**
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure2.java
index f27150e..c4ec0ac 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure2.java
@@ -23,10 +23,10 @@ import static org.junit.Assert.assertTrue;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -45,6 +45,8 @@ public class TestCreateTableProcedure2 {
     TEST_UTIL.shutdownMiniZKCluster();
   }
 
+  /*
+  Note: Relevant fix was undone by HBASE-7767.
   @Test
   public void testMasterRestartAfterNameSpaceEnablingNodeIsCreated() throws Exception {
     // Step 1: start mini zk cluster.
@@ -54,8 +56,9 @@ public class TestCreateTableProcedure2 {
     TableName tableName = TableName.valueOf("hbase:namespace");
     ZooKeeperWatcher zkw = TEST_UTIL.getZooKeeperWatcher();
     String znode = ZKUtil.joinZNode(zkw.tableZNode, tableName.getNameAsString());
-    ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder();
-    builder.setState(ZooKeeperProtos.Table.State.ENABLED);
+    HBaseProtos.TableState.Builder builder = HBaseProtos.TableState.newBuilder();
+    builder.setState(HBaseProtos.TableState.State.ENABLED);
+    builder.setTable(ProtobufUtil.toProtoTableName(tableName));
     byte [] data = ProtobufUtil.prependPBMagic(builder.build().toByteArray());
     ZKUtil.createSetData(zkw, znode, data);
     LOG.info("Create an orphaned Znode " + znode);
@@ -65,4 +68,5 @@ public class TestCreateTableProcedure2 {
     TEST_UTIL.startMiniCluster();
     assertTrue(TEST_UTIL.getHBaseCluster().getLiveMasterThreads().size() == 1);
   }
+  */
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java
index 0b5e83f..d849f02 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.InvalidFamilyOperationException;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
@@ -273,8 +274,9 @@ public class TestTableDescriptorModificationFromClient {
     // Verify descriptor from HDFS
     MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
     Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
-    htd = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
... 374 lines suppressed ...