You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by en...@apache.org on 2016/06/18 00:37:48 UTC

[2/2] hbase git commit: HBASE-15467 Remove 1.x/2.0 TableDescriptor incompatibility

HBASE-15467 Remove 1.x/2.0 TableDescriptor incompatibility


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bdb0cc88
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bdb0cc88
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bdb0cc88

Branch: refs/heads/master
Commit: bdb0cc8808af7c3d08af4a506f34b8341726b58e
Parents: 65a8d77
Author: Enis Soztutar <en...@apache.org>
Authored: Fri Jun 17 17:16:57 2016 -0700
Committer: Enis Soztutar <en...@apache.org>
Committed: Fri Jun 17 17:25:04 2016 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hbase/HTableDescriptor.java   |  44 --
 .../hbase/protobuf/generated/HBaseProtos.java   | 731 ++-----------------
 hbase-protocol/src/main/protobuf/HBase.proto    |   5 -
 .../apache/hadoop/hbase/TableDescriptor.java    | 165 -----
 .../apache/hadoop/hbase/TableDescriptors.java   |  20 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  11 +-
 .../hadoop/hbase/master/MasterFileSystem.java   |   4 +-
 .../hadoop/hbase/master/TableStateManager.java  |   6 +-
 .../procedure/CloneSnapshotProcedure.java       |   3 +-
 .../master/procedure/CreateTableProcedure.java  |   4 +-
 .../procedure/TruncateTableProcedure.java       |   2 +-
 .../hbase/regionserver/CompactionTool.java      |  13 +-
 .../hadoop/hbase/snapshot/SnapshotManifest.java |   7 +-
 .../hadoop/hbase/util/FSTableDescriptors.java   | 231 ++----
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |   9 +-
 .../org/apache/hadoop/hbase/util/HMerge.java    |   3 +-
 .../org/apache/hadoop/hbase/util/Merge.java     |   5 +-
 .../TestHColumnDescriptorDefaultVersions.java   |   6 +-
 .../hadoop/hbase/TestTableDescriptor.java       |  55 --
 .../hadoop/hbase/master/TestCatalogJanitor.java |  29 +-
 .../MasterProcedureTestingUtility.java          |  19 +-
 ...stTableDescriptorModificationFromClient.java |   5 +-
 .../hbase/regionserver/TestDefaultMemStore.java |   5 +-
 .../TestRegionMergeTransactionOnCluster.java    |   2 -
 .../hbase/snapshot/SnapshotTestingUtils.java    |   7 +-
 .../hbase/util/TestFSTableDescriptors.java      |  61 +-
 .../apache/hadoop/hbase/util/TestMergeTool.java |   6 +-
 27 files changed, 237 insertions(+), 1221 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb0cc88/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
index 4283330..13f1bd9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
@@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
-import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
 
@@ -1422,49 +1421,6 @@ public class HTableDescriptor implements Comparable<HTableDescriptor> {
               new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString()))));
   }
 
-  /** Table descriptor for <code>hbase:meta</code> catalog table
-   * Deprecated, use TableDescriptors#get(TableName.META_TABLE) or
-   * Admin#getTableDescriptor(TableName.META_TABLE) instead.
-   */
-  @Deprecated
-  public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
-      TableName.META_TABLE_NAME,
-      new HColumnDescriptor[] {
-          new HColumnDescriptor(HConstants.CATALOG_FAMILY)
-              // Ten is arbitrary number.  Keep versions to help debugging.
-              .setMaxVersions(10)
-              .setInMemory(true)
-              .setBlocksize(8 * 1024)
-              .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
-              // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
-              .setBloomFilterType(BloomType.NONE)
-              // Enable cache of data blocks in L1 if more than one caching tier deployed:
-              // e.g. if using CombinedBlockCache (BucketCache).
-              .setCacheDataInL1(true),
-          new HColumnDescriptor(HConstants.TABLE_FAMILY)
-              // Ten is arbitrary number.  Keep versions to help debugging.
-              .setMaxVersions(10)
-              .setInMemory(true)
-              .setBlocksize(8 * 1024)
-              .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
-                  // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
-              .setBloomFilterType(BloomType.NONE)
-                  // Enable cache of data blocks in L1 if more than one caching tier deployed:
-                  // e.g. if using CombinedBlockCache (BucketCache).
-              .setCacheDataInL1(true)
-      });
-
-  static {
-    try {
-      META_TABLEDESC.addCoprocessor(
-          "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
-          null, Coprocessor.PRIORITY_SYSTEM, null);
-    } catch (IOException ex) {
-      //LOG.warn("exception in loading coprocessor for the hbase:meta table");
-      throw new RuntimeException(ex);
-    }
-  }
-
   public final static String NAMESPACE_FAMILY_INFO = "info";
   public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
   public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb0cc88/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
index 45dfd09..f08ba8f 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
@@ -3126,575 +3126,6 @@ public final class HBaseProtos {
     // @@protoc_insertion_point(class_scope:hbase.pb.TableState)
   }
 
-  public interface TableDescriptorOrBuilder
-      extends com.google.protobuf.MessageOrBuilder {
-
-    // required .hbase.pb.TableSchema schema = 1;
-    /**
-     * <code>required .hbase.pb.TableSchema schema = 1;</code>
-     */
-    boolean hasSchema();
-    /**
-     * <code>required .hbase.pb.TableSchema schema = 1;</code>
-     */
-    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getSchema();
-    /**
-     * <code>required .hbase.pb.TableSchema schema = 1;</code>
-     */
-    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getSchemaOrBuilder();
-  }
-  /**
-   * Protobuf type {@code hbase.pb.TableDescriptor}
-   *
-   * <pre>
-   ** On HDFS representation of table state. 
-   * </pre>
-   */
-  public static final class TableDescriptor extends
-      com.google.protobuf.GeneratedMessage
-      implements TableDescriptorOrBuilder {
-    // Use TableDescriptor.newBuilder() to construct.
-    private TableDescriptor(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
-      super(builder);
-      this.unknownFields = builder.getUnknownFields();
-    }
-    private TableDescriptor(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
-    private static final TableDescriptor defaultInstance;
-    public static TableDescriptor getDefaultInstance() {
-      return defaultInstance;
-    }
-
-    public TableDescriptor getDefaultInstanceForType() {
-      return defaultInstance;
-    }
-
-    private final com.google.protobuf.UnknownFieldSet unknownFields;
-    @java.lang.Override
-    public final com.google.protobuf.UnknownFieldSet
-        getUnknownFields() {
-      return this.unknownFields;
-    }
-    private TableDescriptor(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      initFields();
-      int mutable_bitField0_ = 0;
-      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
-          com.google.protobuf.UnknownFieldSet.newBuilder();
-      try {
-        boolean done = false;
-        while (!done) {
-          int tag = input.readTag();
-          switch (tag) {
-            case 0:
-              done = true;
-              break;
-            default: {
-              if (!parseUnknownField(input, unknownFields,
-                                     extensionRegistry, tag)) {
-                done = true;
-              }
-              break;
-            }
-            case 10: {
-              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null;
-              if (((bitField0_ & 0x00000001) == 0x00000001)) {
-                subBuilder = schema_.toBuilder();
-              }
-              schema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry);
-              if (subBuilder != null) {
-                subBuilder.mergeFrom(schema_);
-                schema_ = subBuilder.buildPartial();
-              }
-              bitField0_ |= 0x00000001;
-              break;
-            }
-          }
-        }
-      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-        throw e.setUnfinishedMessage(this);
-      } catch (java.io.IOException e) {
-        throw new com.google.protobuf.InvalidProtocolBufferException(
-            e.getMessage()).setUnfinishedMessage(this);
-      } finally {
-        this.unknownFields = unknownFields.build();
-        makeExtensionsImmutable();
-      }
-    }
-    public static final com.google.protobuf.Descriptors.Descriptor
-        getDescriptor() {
-      return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_descriptor;
-    }
-
-    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-        internalGetFieldAccessorTable() {
-      return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_fieldAccessorTable
-          .ensureFieldAccessorsInitialized(
-              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.Builder.class);
-    }
-
-    public static com.google.protobuf.Parser<TableDescriptor> PARSER =
-        new com.google.protobuf.AbstractParser<TableDescriptor>() {
-      public TableDescriptor parsePartialFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws com.google.protobuf.InvalidProtocolBufferException {
-        return new TableDescriptor(input, extensionRegistry);
-      }
-    };
-
-    @java.lang.Override
-    public com.google.protobuf.Parser<TableDescriptor> getParserForType() {
-      return PARSER;
-    }
-
-    private int bitField0_;
-    // required .hbase.pb.TableSchema schema = 1;
-    public static final int SCHEMA_FIELD_NUMBER = 1;
-    private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema schema_;
-    /**
-     * <code>required .hbase.pb.TableSchema schema = 1;</code>
-     */
-    public boolean hasSchema() {
-      return ((bitField0_ & 0x00000001) == 0x00000001);
-    }
-    /**
-     * <code>required .hbase.pb.TableSchema schema = 1;</code>
-     */
-    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getSchema() {
-      return schema_;
-    }
-    /**
-     * <code>required .hbase.pb.TableSchema schema = 1;</code>
-     */
-    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getSchemaOrBuilder() {
-      return schema_;
-    }
-
-    private void initFields() {
-      schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
-    }
-    private byte memoizedIsInitialized = -1;
-    public final boolean isInitialized() {
-      byte isInitialized = memoizedIsInitialized;
-      if (isInitialized != -1) return isInitialized == 1;
-
-      if (!hasSchema()) {
-        memoizedIsInitialized = 0;
-        return false;
-      }
-      if (!getSchema().isInitialized()) {
-        memoizedIsInitialized = 0;
-        return false;
-      }
-      memoizedIsInitialized = 1;
-      return true;
-    }
-
-    public void writeTo(com.google.protobuf.CodedOutputStream output)
-                        throws java.io.IOException {
-      getSerializedSize();
-      if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        output.writeMessage(1, schema_);
-      }
-      getUnknownFields().writeTo(output);
-    }
-
-    private int memoizedSerializedSize = -1;
-    public int getSerializedSize() {
-      int size = memoizedSerializedSize;
-      if (size != -1) return size;
-
-      size = 0;
-      if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeMessageSize(1, schema_);
-      }
-      size += getUnknownFields().getSerializedSize();
-      memoizedSerializedSize = size;
-      return size;
-    }
-
-    private static final long serialVersionUID = 0L;
-    @java.lang.Override
-    protected java.lang.Object writeReplace()
-        throws java.io.ObjectStreamException {
-      return super.writeReplace();
-    }
-
-    @java.lang.Override
-    public boolean equals(final java.lang.Object obj) {
-      if (obj == this) {
-       return true;
-      }
-      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor)) {
-        return super.equals(obj);
-      }
-      org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor) obj;
-
-      boolean result = true;
-      result = result && (hasSchema() == other.hasSchema());
-      if (hasSchema()) {
-        result = result && getSchema()
-            .equals(other.getSchema());
-      }
-      result = result &&
-          getUnknownFields().equals(other.getUnknownFields());
-      return result;
-    }
-
-    private int memoizedHashCode = 0;
-    @java.lang.Override
-    public int hashCode() {
-      if (memoizedHashCode != 0) {
-        return memoizedHashCode;
-      }
-      int hash = 41;
-      hash = (19 * hash) + getDescriptorForType().hashCode();
-      if (hasSchema()) {
-        hash = (37 * hash) + SCHEMA_FIELD_NUMBER;
-        hash = (53 * hash) + getSchema().hashCode();
-      }
-      hash = (29 * hash) + getUnknownFields().hashCode();
-      memoizedHashCode = hash;
-      return hash;
-    }
-
-    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(
-        com.google.protobuf.ByteString data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(
-        com.google.protobuf.ByteString data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(byte[] data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(
-        byte[] data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseDelimitedFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseDelimitedFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(
-        com.google.protobuf.CodedInputStream input)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
-    }
-
-    public static Builder newBuilder() { return Builder.create(); }
-    public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor prototype) {
-      return newBuilder().mergeFrom(prototype);
-    }
-    public Builder toBuilder() { return newBuilder(this); }
-
-    @java.lang.Override
-    protected Builder newBuilderForType(
-        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-      Builder builder = new Builder(parent);
-      return builder;
-    }
-    /**
-     * Protobuf type {@code hbase.pb.TableDescriptor}
-     *
-     * <pre>
-     ** On HDFS representation of table state. 
-     * </pre>
-     */
-    public static final class Builder extends
-        com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptorOrBuilder {
-      public static final com.google.protobuf.Descriptors.Descriptor
-          getDescriptor() {
-        return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_descriptor;
-      }
-
-      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-          internalGetFieldAccessorTable() {
-        return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_fieldAccessorTable
-            .ensureFieldAccessorsInitialized(
-                org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.Builder.class);
-      }
-
-      // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.newBuilder()
-      private Builder() {
-        maybeForceBuilderInitialization();
-      }
-
-      private Builder(
-          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-        super(parent);
-        maybeForceBuilderInitialization();
-      }
-      private void maybeForceBuilderInitialization() {
-        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
-          getSchemaFieldBuilder();
-        }
-      }
-      private static Builder create() {
-        return new Builder();
-      }
-
-      public Builder clear() {
-        super.clear();
-        if (schemaBuilder_ == null) {
-          schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
-        } else {
-          schemaBuilder_.clear();
-        }
-        bitField0_ = (bitField0_ & ~0x00000001);
-        return this;
-      }
-
-      public Builder clone() {
-        return create().mergeFrom(buildPartial());
-      }
-
-      public com.google.protobuf.Descriptors.Descriptor
-          getDescriptorForType() {
-        return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_descriptor;
-      }
-
-      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor getDefaultInstanceForType() {
-        return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.getDefaultInstance();
-      }
-
-      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor build() {
-        org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor result = buildPartial();
-        if (!result.isInitialized()) {
-          throw newUninitializedMessageException(result);
-        }
-        return result;
-      }
-
-      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor buildPartial() {
-        org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor(this);
-        int from_bitField0_ = bitField0_;
-        int to_bitField0_ = 0;
-        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
-          to_bitField0_ |= 0x00000001;
-        }
-        if (schemaBuilder_ == null) {
-          result.schema_ = schema_;
-        } else {
-          result.schema_ = schemaBuilder_.build();
-        }
-        result.bitField0_ = to_bitField0_;
-        onBuilt();
-        return result;
-      }
-
-      public Builder mergeFrom(com.google.protobuf.Message other) {
-        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor) {
-          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor)other);
-        } else {
-          super.mergeFrom(other);
-          return this;
-        }
-      }
-
-      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor other) {
-        if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.getDefaultInstance()) return this;
-        if (other.hasSchema()) {
-          mergeSchema(other.getSchema());
-        }
-        this.mergeUnknownFields(other.getUnknownFields());
-        return this;
-      }
-
-      public final boolean isInitialized() {
-        if (!hasSchema()) {
-          
-          return false;
-        }
-        if (!getSchema().isInitialized()) {
-          
-          return false;
-        }
-        return true;
-      }
-
-      public Builder mergeFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws java.io.IOException {
-        org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parsedMessage = null;
-        try {
-          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
-        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor) e.getUnfinishedMessage();
-          throw e;
-        } finally {
-          if (parsedMessage != null) {
-            mergeFrom(parsedMessage);
-          }
-        }
-        return this;
-      }
-      private int bitField0_;
-
-      // required .hbase.pb.TableSchema schema = 1;
-      private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
-      private com.google.protobuf.SingleFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> schemaBuilder_;
-      /**
-       * <code>required .hbase.pb.TableSchema schema = 1;</code>
-       */
-      public boolean hasSchema() {
-        return ((bitField0_ & 0x00000001) == 0x00000001);
-      }
-      /**
-       * <code>required .hbase.pb.TableSchema schema = 1;</code>
-       */
-      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getSchema() {
-        if (schemaBuilder_ == null) {
-          return schema_;
-        } else {
-          return schemaBuilder_.getMessage();
-        }
-      }
-      /**
-       * <code>required .hbase.pb.TableSchema schema = 1;</code>
-       */
-      public Builder setSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
-        if (schemaBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          schema_ = value;
-          onChanged();
-        } else {
-          schemaBuilder_.setMessage(value);
-        }
-        bitField0_ |= 0x00000001;
-        return this;
-      }
-      /**
-       * <code>required .hbase.pb.TableSchema schema = 1;</code>
-       */
-      public Builder setSchema(
-          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) {
-        if (schemaBuilder_ == null) {
-          schema_ = builderForValue.build();
-          onChanged();
-        } else {
-          schemaBuilder_.setMessage(builderForValue.build());
-        }
-        bitField0_ |= 0x00000001;
-        return this;
-      }
-      /**
-       * <code>required .hbase.pb.TableSchema schema = 1;</code>
-       */
-      public Builder mergeSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
-        if (schemaBuilder_ == null) {
-          if (((bitField0_ & 0x00000001) == 0x00000001) &&
-              schema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) {
-            schema_ =
-              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(schema_).mergeFrom(value).buildPartial();
-          } else {
-            schema_ = value;
-          }
-          onChanged();
-        } else {
-          schemaBuilder_.mergeFrom(value);
-        }
-        bitField0_ |= 0x00000001;
-        return this;
-      }
-      /**
-       * <code>required .hbase.pb.TableSchema schema = 1;</code>
-       */
-      public Builder clearSchema() {
-        if (schemaBuilder_ == null) {
-          schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
-          onChanged();
-        } else {
-          schemaBuilder_.clear();
-        }
-        bitField0_ = (bitField0_ & ~0x00000001);
-        return this;
-      }
-      /**
-       * <code>required .hbase.pb.TableSchema schema = 1;</code>
-       */
-      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getSchemaBuilder() {
-        bitField0_ |= 0x00000001;
-        onChanged();
-        return getSchemaFieldBuilder().getBuilder();
-      }
-      /**
-       * <code>required .hbase.pb.TableSchema schema = 1;</code>
-       */
-      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getSchemaOrBuilder() {
-        if (schemaBuilder_ != null) {
-          return schemaBuilder_.getMessageOrBuilder();
-        } else {
-          return schema_;
-        }
-      }
-      /**
-       * <code>required .hbase.pb.TableSchema schema = 1;</code>
-       */
-      private com.google.protobuf.SingleFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> 
-          getSchemaFieldBuilder() {
-        if (schemaBuilder_ == null) {
-          schemaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
-              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>(
-                  schema_,
-                  getParentForChildren(),
-                  isClean());
-          schema_ = null;
-        }
-        return schemaBuilder_;
-      }
-
-      // @@protoc_insertion_point(builder_scope:hbase.pb.TableDescriptor)
-    }
-
-    static {
-      defaultInstance = new TableDescriptor(true);
-      defaultInstance.initFields();
-    }
-
-    // @@protoc_insertion_point(class_scope:hbase.pb.TableDescriptor)
-  }
-
   public interface ColumnFamilySchemaOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
 
@@ -20113,11 +19544,6 @@ public final class HBaseProtos {
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_hbase_pb_TableState_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
-    internal_static_hbase_pb_TableDescriptor_descriptor;
-  private static
-    com.google.protobuf.GeneratedMessage.FieldAccessorTable
-      internal_static_hbase_pb_TableDescriptor_fieldAccessorTable;
-  private static com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_ColumnFamilySchema_descriptor;
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -20246,60 +19672,59 @@ public final class HBaseProtos {
       "ameStringPair\"x\n\nTableState\022)\n\005state\030\001 \002" +
       "(\0162\032.hbase.pb.TableState.State\"?\n\005State\022" +
       "\013\n\007ENABLED\020\000\022\014\n\010DISABLED\020\001\022\r\n\tDISABLING\020",
-      "\002\022\014\n\010ENABLING\020\003\"8\n\017TableDescriptor\022%\n\006sc" +
-      "hema\030\001 \002(\0132\025.hbase.pb.TableSchema\"\201\001\n\022Co" +
-      "lumnFamilySchema\022\014\n\004name\030\001 \002(\014\022,\n\nattrib" +
-      "utes\030\002 \003(\0132\030.hbase.pb.BytesBytesPair\022/\n\r" +
-      "configuration\030\003 \003(\0132\030.hbase.pb.NameStrin" +
-      "gPair\"\243\001\n\nRegionInfo\022\021\n\tregion_id\030\001 \002(\004\022" +
-      "\'\n\ntable_name\030\002 \002(\0132\023.hbase.pb.TableName" +
-      "\022\021\n\tstart_key\030\003 \001(\014\022\017\n\007end_key\030\004 \001(\014\022\017\n\007" +
-      "offline\030\005 \001(\010\022\r\n\005split\030\006 \001(\010\022\025\n\nreplica_" +
-      "id\030\007 \001(\005:\0010\":\n\014FavoredNodes\022*\n\014favored_n",
-      "ode\030\001 \003(\0132\024.hbase.pb.ServerName\"\236\001\n\017Regi" +
-      "onSpecifier\022;\n\004type\030\001 \002(\0162-.hbase.pb.Reg" +
-      "ionSpecifier.RegionSpecifierType\022\r\n\005valu" +
-      "e\030\002 \002(\014\"?\n\023RegionSpecifierType\022\017\n\013REGION" +
-      "_NAME\020\001\022\027\n\023ENCODED_REGION_NAME\020\002\"%\n\tTime" +
-      "Range\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 \001(\004\"W\n\025Colum" +
-      "nFamilyTimeRange\022\025\n\rcolumn_family\030\001 \002(\014\022" +
-      "\'\n\ntime_range\030\002 \002(\0132\023.hbase.pb.TimeRange" +
-      "\"A\n\nServerName\022\021\n\thost_name\030\001 \002(\t\022\014\n\004por" +
-      "t\030\002 \001(\r\022\022\n\nstart_code\030\003 \001(\004\"\033\n\013Coprocess",
-      "or\022\014\n\004name\030\001 \002(\t\"-\n\016NameStringPair\022\014\n\004na" +
-      "me\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\rNameBytesPair" +
-      "\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(\014\"/\n\016BytesBy" +
-      "tesPair\022\r\n\005first\030\001 \002(\014\022\016\n\006second\030\002 \002(\014\"," +
-      "\n\rNameInt64Pair\022\014\n\004name\030\001 \001(\t\022\r\n\005value\030\002" +
-      " \001(\003\"\325\001\n\023SnapshotDescription\022\014\n\004name\030\001 \002" +
-      "(\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcreation_time\030\003 \001(\003" +
-      ":\0010\0227\n\004type\030\004 \001(\0162\".hbase.pb.SnapshotDes" +
-      "cription.Type:\005FLUSH\022\017\n\007version\030\005 \001(\005\022\r\n" +
-      "\005owner\030\006 \001(\t\".\n\004Type\022\014\n\010DISABLED\020\000\022\t\n\005FL",
-      "USH\020\001\022\r\n\tSKIPFLUSH\020\002\"\206\001\n\024ProcedureDescri" +
-      "ption\022\021\n\tsignature\030\001 \002(\t\022\020\n\010instance\030\002 \001" +
-      "(\t\022\030\n\rcreation_time\030\003 \001(\003:\0010\022/\n\rconfigur" +
-      "ation\030\004 \003(\0132\030.hbase.pb.NameStringPair\"\n\n" +
-      "\010EmptyMsg\"\033\n\007LongMsg\022\020\n\010long_msg\030\001 \002(\003\"\037" +
-      "\n\tDoubleMsg\022\022\n\ndouble_msg\030\001 \002(\001\"\'\n\rBigDe" +
-      "cimalMsg\022\026\n\016bigdecimal_msg\030\001 \002(\014\"5\n\004UUID" +
-      "\022\026\n\016least_sig_bits\030\001 \002(\004\022\025\n\rmost_sig_bit" +
-      "s\030\002 \002(\004\"T\n\023NamespaceDescriptor\022\014\n\004name\030\001" +
-      " \002(\014\022/\n\rconfiguration\030\002 \003(\0132\030.hbase.pb.N",
-      "ameStringPair\"\235\001\n\013VersionInfo\022\017\n\007version" +
-      "\030\001 \002(\t\022\013\n\003url\030\002 \002(\t\022\020\n\010revision\030\003 \002(\t\022\014\n" +
-      "\004user\030\004 \002(\t\022\014\n\004date\030\005 \002(\t\022\024\n\014src_checksu" +
-      "m\030\006 \002(\t\022\025\n\rversion_major\030\007 \001(\r\022\025\n\rversio" +
-      "n_minor\030\010 \001(\r\"Q\n\020RegionServerInfo\022\020\n\010inf" +
-      "oPort\030\001 \001(\005\022+\n\014version_info\030\002 \001(\0132\025.hbas" +
-      "e.pb.VersionInfo*r\n\013CompareType\022\010\n\004LESS\020" +
-      "\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_E" +
-      "QUAL\020\003\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007GREATER\020" +
-      "\005\022\t\n\005NO_OP\020\006*n\n\010TimeUnit\022\017\n\013NANOSECONDS\020",
-      "\001\022\020\n\014MICROSECONDS\020\002\022\020\n\014MILLISECONDS\020\003\022\013\n" +
-      "\007SECONDS\020\004\022\013\n\007MINUTES\020\005\022\t\n\005HOURS\020\006\022\010\n\004DA" +
-      "YS\020\007B>\n*org.apache.hadoop.hbase.protobuf" +
-      ".generatedB\013HBaseProtosH\001\240\001\001"
+      "\002\022\014\n\010ENABLING\020\003\"\201\001\n\022ColumnFamilySchema\022\014" +
+      "\n\004name\030\001 \002(\014\022,\n\nattributes\030\002 \003(\0132\030.hbase" +
+      ".pb.BytesBytesPair\022/\n\rconfiguration\030\003 \003(" +
+      "\0132\030.hbase.pb.NameStringPair\"\243\001\n\nRegionIn" +
+      "fo\022\021\n\tregion_id\030\001 \002(\004\022\'\n\ntable_name\030\002 \002(" +
+      "\0132\023.hbase.pb.TableName\022\021\n\tstart_key\030\003 \001(" +
+      "\014\022\017\n\007end_key\030\004 \001(\014\022\017\n\007offline\030\005 \001(\010\022\r\n\005s" +
+      "plit\030\006 \001(\010\022\025\n\nreplica_id\030\007 \001(\005:\0010\":\n\014Fav" +
+      "oredNodes\022*\n\014favored_node\030\001 \003(\0132\024.hbase." +
+      "pb.ServerName\"\236\001\n\017RegionSpecifier\022;\n\004typ",
+      "e\030\001 \002(\0162-.hbase.pb.RegionSpecifier.Regio" +
+      "nSpecifierType\022\r\n\005value\030\002 \002(\014\"?\n\023RegionS" +
+      "pecifierType\022\017\n\013REGION_NAME\020\001\022\027\n\023ENCODED" +
+      "_REGION_NAME\020\002\"%\n\tTimeRange\022\014\n\004from\030\001 \001(" +
+      "\004\022\n\n\002to\030\002 \001(\004\"W\n\025ColumnFamilyTimeRange\022\025" +
+      "\n\rcolumn_family\030\001 \002(\014\022\'\n\ntime_range\030\002 \002(" +
+      "\0132\023.hbase.pb.TimeRange\"A\n\nServerName\022\021\n\t" +
+      "host_name\030\001 \002(\t\022\014\n\004port\030\002 \001(\r\022\022\n\nstart_c" +
+      "ode\030\003 \001(\004\"\033\n\013Coprocessor\022\014\n\004name\030\001 \002(\t\"-" +
+      "\n\016NameStringPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030",
+      "\002 \002(\t\",\n\rNameBytesPair\022\014\n\004name\030\001 \002(\t\022\r\n\005" +
+      "value\030\002 \001(\014\"/\n\016BytesBytesPair\022\r\n\005first\030\001" +
+      " \002(\014\022\016\n\006second\030\002 \002(\014\",\n\rNameInt64Pair\022\014\n" +
+      "\004name\030\001 \001(\t\022\r\n\005value\030\002 \001(\003\"\325\001\n\023SnapshotD" +
+      "escription\022\014\n\004name\030\001 \002(\t\022\r\n\005table\030\002 \001(\t\022" +
+      "\030\n\rcreation_time\030\003 \001(\003:\0010\0227\n\004type\030\004 \001(\0162" +
+      "\".hbase.pb.SnapshotDescription.Type:\005FLU" +
+      "SH\022\017\n\007version\030\005 \001(\005\022\r\n\005owner\030\006 \001(\t\".\n\004Ty" +
+      "pe\022\014\n\010DISABLED\020\000\022\t\n\005FLUSH\020\001\022\r\n\tSKIPFLUSH" +
+      "\020\002\"\206\001\n\024ProcedureDescription\022\021\n\tsignature",
+      "\030\001 \002(\t\022\020\n\010instance\030\002 \001(\t\022\030\n\rcreation_tim" +
+      "e\030\003 \001(\003:\0010\022/\n\rconfiguration\030\004 \003(\0132\030.hbas" +
+      "e.pb.NameStringPair\"\n\n\010EmptyMsg\"\033\n\007LongM" +
+      "sg\022\020\n\010long_msg\030\001 \002(\003\"\037\n\tDoubleMsg\022\022\n\ndou" +
+      "ble_msg\030\001 \002(\001\"\'\n\rBigDecimalMsg\022\026\n\016bigdec" +
+      "imal_msg\030\001 \002(\014\"5\n\004UUID\022\026\n\016least_sig_bits" +
+      "\030\001 \002(\004\022\025\n\rmost_sig_bits\030\002 \002(\004\"T\n\023Namespa" +
+      "ceDescriptor\022\014\n\004name\030\001 \002(\014\022/\n\rconfigurat" +
+      "ion\030\002 \003(\0132\030.hbase.pb.NameStringPair\"\235\001\n\013" +
+      "VersionInfo\022\017\n\007version\030\001 \002(\t\022\013\n\003url\030\002 \002(",
+      "\t\022\020\n\010revision\030\003 \002(\t\022\014\n\004user\030\004 \002(\t\022\014\n\004dat" +
+      "e\030\005 \002(\t\022\024\n\014src_checksum\030\006 \002(\t\022\025\n\rversion" +
+      "_major\030\007 \001(\r\022\025\n\rversion_minor\030\010 \001(\r\"Q\n\020R" +
+      "egionServerInfo\022\020\n\010infoPort\030\001 \001(\005\022+\n\014ver" +
+      "sion_info\030\002 \001(\0132\025.hbase.pb.VersionInfo*r" +
+      "\n\013CompareType\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL" +
+      "\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_" +
+      "OR_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006*n\n\010Ti" +
+      "meUnit\022\017\n\013NANOSECONDS\020\001\022\020\n\014MICROSECONDS\020" +
+      "\002\022\020\n\014MILLISECONDS\020\003\022\013\n\007SECONDS\020\004\022\013\n\007MINU",
+      "TES\020\005\022\t\n\005HOURS\020\006\022\010\n\004DAYS\020\007B>\n*org.apache" +
+      ".hadoop.hbase.protobuf.generatedB\013HBaseP" +
+      "rotosH\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -20324,140 +19749,134 @@ public final class HBaseProtos {
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_TableState_descriptor,
               new java.lang.String[] { "State", });
-          internal_static_hbase_pb_TableDescriptor_descriptor =
-            getDescriptor().getMessageTypes().get(3);
-          internal_static_hbase_pb_TableDescriptor_fieldAccessorTable = new
-            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
-              internal_static_hbase_pb_TableDescriptor_descriptor,
-              new java.lang.String[] { "Schema", });
           internal_static_hbase_pb_ColumnFamilySchema_descriptor =
-            getDescriptor().getMessageTypes().get(4);
+            getDescriptor().getMessageTypes().get(3);
           internal_static_hbase_pb_ColumnFamilySchema_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ColumnFamilySchema_descriptor,
               new java.lang.String[] { "Name", "Attributes", "Configuration", });
           internal_static_hbase_pb_RegionInfo_descriptor =
-            getDescriptor().getMessageTypes().get(5);
+            getDescriptor().getMessageTypes().get(4);
           internal_static_hbase_pb_RegionInfo_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_RegionInfo_descriptor,
               new java.lang.String[] { "RegionId", "TableName", "StartKey", "EndKey", "Offline", "Split", "ReplicaId", });
           internal_static_hbase_pb_FavoredNodes_descriptor =
-            getDescriptor().getMessageTypes().get(6);
+            getDescriptor().getMessageTypes().get(5);
           internal_static_hbase_pb_FavoredNodes_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_FavoredNodes_descriptor,
               new java.lang.String[] { "FavoredNode", });
           internal_static_hbase_pb_RegionSpecifier_descriptor =
-            getDescriptor().getMessageTypes().get(7);
+            getDescriptor().getMessageTypes().get(6);
           internal_static_hbase_pb_RegionSpecifier_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_RegionSpecifier_descriptor,
               new java.lang.String[] { "Type", "Value", });
           internal_static_hbase_pb_TimeRange_descriptor =
-            getDescriptor().getMessageTypes().get(8);
+            getDescriptor().getMessageTypes().get(7);
           internal_static_hbase_pb_TimeRange_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_TimeRange_descriptor,
               new java.lang.String[] { "From", "To", });
           internal_static_hbase_pb_ColumnFamilyTimeRange_descriptor =
-            getDescriptor().getMessageTypes().get(9);
+            getDescriptor().getMessageTypes().get(8);
           internal_static_hbase_pb_ColumnFamilyTimeRange_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ColumnFamilyTimeRange_descriptor,
               new java.lang.String[] { "ColumnFamily", "TimeRange", });
           internal_static_hbase_pb_ServerName_descriptor =
-            getDescriptor().getMessageTypes().get(10);
+            getDescriptor().getMessageTypes().get(9);
           internal_static_hbase_pb_ServerName_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ServerName_descriptor,
               new java.lang.String[] { "HostName", "Port", "StartCode", });
           internal_static_hbase_pb_Coprocessor_descriptor =
-            getDescriptor().getMessageTypes().get(11);
+            getDescriptor().getMessageTypes().get(10);
           internal_static_hbase_pb_Coprocessor_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_Coprocessor_descriptor,
               new java.lang.String[] { "Name", });
           internal_static_hbase_pb_NameStringPair_descriptor =
-            getDescriptor().getMessageTypes().get(12);
+            getDescriptor().getMessageTypes().get(11);
           internal_static_hbase_pb_NameStringPair_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_NameStringPair_descriptor,
               new java.lang.String[] { "Name", "Value", });
           internal_static_hbase_pb_NameBytesPair_descriptor =
-            getDescriptor().getMessageTypes().get(13);
+            getDescriptor().getMessageTypes().get(12);
           internal_static_hbase_pb_NameBytesPair_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_NameBytesPair_descriptor,
               new java.lang.String[] { "Name", "Value", });
           internal_static_hbase_pb_BytesBytesPair_descriptor =
-            getDescriptor().getMessageTypes().get(14);
+            getDescriptor().getMessageTypes().get(13);
           internal_static_hbase_pb_BytesBytesPair_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_BytesBytesPair_descriptor,
               new java.lang.String[] { "First", "Second", });
           internal_static_hbase_pb_NameInt64Pair_descriptor =
-            getDescriptor().getMessageTypes().get(15);
+            getDescriptor().getMessageTypes().get(14);
           internal_static_hbase_pb_NameInt64Pair_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_NameInt64Pair_descriptor,
               new java.lang.String[] { "Name", "Value", });
           internal_static_hbase_pb_SnapshotDescription_descriptor =
-            getDescriptor().getMessageTypes().get(16);
+            getDescriptor().getMessageTypes().get(15);
           internal_static_hbase_pb_SnapshotDescription_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SnapshotDescription_descriptor,
               new java.lang.String[] { "Name", "Table", "CreationTime", "Type", "Version", "Owner", });
           internal_static_hbase_pb_ProcedureDescription_descriptor =
-            getDescriptor().getMessageTypes().get(17);
+            getDescriptor().getMessageTypes().get(16);
           internal_static_hbase_pb_ProcedureDescription_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ProcedureDescription_descriptor,
               new java.lang.String[] { "Signature", "Instance", "CreationTime", "Configuration", });
           internal_static_hbase_pb_EmptyMsg_descriptor =
-            getDescriptor().getMessageTypes().get(18);
+            getDescriptor().getMessageTypes().get(17);
           internal_static_hbase_pb_EmptyMsg_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_EmptyMsg_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_LongMsg_descriptor =
-            getDescriptor().getMessageTypes().get(19);
+            getDescriptor().getMessageTypes().get(18);
           internal_static_hbase_pb_LongMsg_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_LongMsg_descriptor,
               new java.lang.String[] { "LongMsg", });
           internal_static_hbase_pb_DoubleMsg_descriptor =
-            getDescriptor().getMessageTypes().get(20);
+            getDescriptor().getMessageTypes().get(19);
           internal_static_hbase_pb_DoubleMsg_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_DoubleMsg_descriptor,
               new java.lang.String[] { "DoubleMsg", });
           internal_static_hbase_pb_BigDecimalMsg_descriptor =
-            getDescriptor().getMessageTypes().get(21);
+            getDescriptor().getMessageTypes().get(20);
           internal_static_hbase_pb_BigDecimalMsg_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_BigDecimalMsg_descriptor,
               new java.lang.String[] { "BigdecimalMsg", });
           internal_static_hbase_pb_UUID_descriptor =
-            getDescriptor().getMessageTypes().get(22);
+            getDescriptor().getMessageTypes().get(21);
           internal_static_hbase_pb_UUID_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_UUID_descriptor,
               new java.lang.String[] { "LeastSigBits", "MostSigBits", });
           internal_static_hbase_pb_NamespaceDescriptor_descriptor =
-            getDescriptor().getMessageTypes().get(23);
+            getDescriptor().getMessageTypes().get(22);
           internal_static_hbase_pb_NamespaceDescriptor_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_NamespaceDescriptor_descriptor,
               new java.lang.String[] { "Name", "Configuration", });
           internal_static_hbase_pb_VersionInfo_descriptor =
-            getDescriptor().getMessageTypes().get(24);
+            getDescriptor().getMessageTypes().get(23);
           internal_static_hbase_pb_VersionInfo_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_VersionInfo_descriptor,
               new java.lang.String[] { "Version", "Url", "Revision", "User", "Date", "SrcChecksum", "VersionMajor", "VersionMinor", });
           internal_static_hbase_pb_RegionServerInfo_descriptor =
-            getDescriptor().getMessageTypes().get(25);
+            getDescriptor().getMessageTypes().get(24);
           internal_static_hbase_pb_RegionServerInfo_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_RegionServerInfo_descriptor,

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb0cc88/hbase-protocol/src/main/protobuf/HBase.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/HBase.proto b/hbase-protocol/src/main/protobuf/HBase.proto
index e5c967a..c36b214 100644
--- a/hbase-protocol/src/main/protobuf/HBase.proto
+++ b/hbase-protocol/src/main/protobuf/HBase.proto
@@ -58,11 +58,6 @@ message TableState {
   required State state = 1;
 }
 
-/** On HDFS representation of table state. */
-message TableDescriptor {
-  required TableSchema schema = 1;
-}
-
 /**
  * Column Family Schema
  * Inspired by the rest ColumSchemaMessage

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb0cc88/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java
deleted file mode 100644
index 721b842..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java
+++ /dev/null
@@ -1,165 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
-import org.apache.hadoop.hbase.regionserver.BloomType;
-
-/**
- * Class represents table state on HDFS.
- */
-@InterfaceAudience.Private
-public class TableDescriptor {
-  private HTableDescriptor hTableDescriptor;
-
-  /**
-   * Creates TableDescriptor with Enabled table.
-   * @param hTableDescriptor HTableDescriptor to use
-   */
-  @VisibleForTesting
-  public TableDescriptor(HTableDescriptor hTableDescriptor) {
-    this.hTableDescriptor = hTableDescriptor;
-  }
-
-  /**
-   * Associated HTableDescriptor
-   * @return instance of HTableDescriptor
-   */
-  public HTableDescriptor getHTableDescriptor() {
-    return hTableDescriptor;
-  }
-
-  public void setHTableDescriptor(HTableDescriptor hTableDescriptor) {
-    this.hTableDescriptor = hTableDescriptor;
-  }
-
-  /**
-   * Convert to PB.
-   */
-  @SuppressWarnings("deprecation")
-  public HBaseProtos.TableDescriptor convert() {
-    HBaseProtos.TableDescriptor.Builder builder = HBaseProtos.TableDescriptor.newBuilder()
-        .setSchema(ProtobufUtil.convertToTableSchema(hTableDescriptor));
-    return builder.build();
-  }
-
-  /**
-   * Convert from PB
-   */
-  public static TableDescriptor convert(HBaseProtos.TableDescriptor proto) {
-    return new TableDescriptor(ProtobufUtil.convertToHTableDesc(proto.getSchema()));
-  }
-
-  /**
-   * @return This instance serialized with pb with pb magic prefix
-   * @see #parseFrom(byte[])
-   */
-  public byte [] toByteArray() {
-    return ProtobufUtil.prependPBMagic(convert().toByteArray());
-  }
-
-  /**
-   * @param bytes A pb serialized {@link TableDescriptor} instance with pb magic prefix
-   * @see #toByteArray()
-   */
-  public static TableDescriptor parseFrom(final byte [] bytes)
-      throws DeserializationException, IOException {
-    if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
-      throw new DeserializationException("Expected PB encoded TableDescriptor");
-    }
-    int pblen = ProtobufUtil.lengthOfPBMagic();
-    HBaseProtos.TableDescriptor.Builder builder = HBaseProtos.TableDescriptor.newBuilder();
-    HBaseProtos.TableDescriptor ts;
-    try {
-      ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
-      ts = builder.build();
-    } catch (IOException e) {
-      throw new DeserializationException(e);
-    }
-    return convert(ts);
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    TableDescriptor that = (TableDescriptor) o;
-
-    if (hTableDescriptor != null ?
-        !hTableDescriptor.equals(that.hTableDescriptor) :
-        that.hTableDescriptor != null) return false;
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    return hTableDescriptor != null ? hTableDescriptor.hashCode() : 0;
-  }
-
-  @Override
-  public String toString() {
-    return "TableDescriptor{" +
-        "hTableDescriptor=" + hTableDescriptor +
-        '}';
-  }
-
-  public static HTableDescriptor metaTableDescriptor(final Configuration conf)
-      throws IOException {
-    HTableDescriptor metaDescriptor = new HTableDescriptor(
-        TableName.META_TABLE_NAME,
-        new HColumnDescriptor[] {
-            new HColumnDescriptor(HConstants.CATALOG_FAMILY)
-                .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
-                    HConstants.DEFAULT_HBASE_META_VERSIONS))
-                .setInMemory(true)
-                .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
-                    HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
-                .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
-                    // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
-                .setBloomFilterType(BloomType.NONE)
-                    // Enable cache of data blocks in L1 if more than one caching tier deployed:
-                    // e.g. if using CombinedBlockCache (BucketCache).
-                .setCacheDataInL1(true),
-            new HColumnDescriptor(HConstants.TABLE_FAMILY)
-                // Ten is arbitrary number.  Keep versions to help debugging.
-                .setMaxVersions(10)
-                .setInMemory(true)
-                .setBlocksize(8 * 1024)
-                .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
-                    // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
-                .setBloomFilterType(BloomType.NONE)
-                    // Enable cache of data blocks in L1 if more than one caching tier deployed:
-                    // e.g. if using CombinedBlockCache (BucketCache).
-                .setCacheDataInL1(true)
-        }) {
-    };
-    metaDescriptor.addCoprocessor(
-        "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
-        null, Coprocessor.PRIORITY_SYSTEM, null);
-    return metaDescriptor;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb0cc88/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
index c7bfd03..7de2629 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
@@ -30,18 +30,10 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 public interface TableDescriptors {
   /**
    * @param tableName
-   * @return HTableDescriptor for tablename
-   * @throws IOException
-   */
-  HTableDescriptor get(final TableName tableName)
-  throws IOException;
-
-  /**
-   * @param tableName
    * @return TableDescriptor for tablename
    * @throws IOException
    */
-  TableDescriptor getDescriptor(final TableName tableName)
+  HTableDescriptor get(final TableName tableName)
       throws IOException;
 
   /**
@@ -67,7 +59,7 @@ public interface TableDescriptors {
    * @return Map of all descriptors.
    * @throws IOException
    */
-  Map<String, TableDescriptor> getAllDescriptors()
+  Map<String, HTableDescriptor> getAllDescriptors()
       throws IOException;
 
   /**
@@ -79,14 +71,6 @@ public interface TableDescriptors {
   throws IOException;
 
   /**
-   * Add or update descriptor
-   * @param htd Descriptor to set into TableDescriptors
-   * @throws IOException
-   */
-  void add(final TableDescriptor htd)
-      throws IOException;
-
-  /**
    * @param tablename
    * @return Instance of table descriptor or null if none found.
    * @throws IOException

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb0cc88/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index efb6b6e..3a02060 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -77,7 +77,6 @@ import org.apache.hadoop.hbase.ScheduledChore;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotDisabledException;
@@ -1353,6 +1352,7 @@ public class HMaster extends HRegionServer implements MasterServices {
     return true;
   }
 
+  @Override
   @VisibleForTesting
   public RegionNormalizer getRegionNormalizer() {
     return this.normalizer;
@@ -1384,10 +1384,10 @@ public class HMaster extends HRegionServer implements MasterServices {
       Collections.shuffle(allEnabledTables);
 
       for (TableName table : allEnabledTables) {
-        TableDescriptor tblDesc = getTableDescriptors().getDescriptor(table);
+        HTableDescriptor tblDesc = getTableDescriptors().get(table);
         if (table.isSystemTable() || (tblDesc != null &&
-            tblDesc.getHTableDescriptor() != null &&
-            !tblDesc.getHTableDescriptor().isNormalizationEnabled())) {
+            tblDesc != null &&
+            !tblDesc.isNormalizationEnabled())) {
           LOG.debug("Skipping normalization for table: " + table + ", as it's either system"
               + " table or doesn't have auto normalization turned on");
           continue;
@@ -2346,6 +2346,7 @@ public class HMaster extends HRegionServer implements MasterServices {
    *
    * @return true if active master, false if not.
    */
+  @Override
   public boolean isActiveMaster() {
     return isActiveMaster;
   }
@@ -2490,6 +2491,7 @@ public class HMaster extends HRegionServer implements MasterServices {
   /**
    * @return the underlying snapshot manager
    */
+  @Override
   public SnapshotManager getSnapshotManager() {
     return this.snapshotManager;
   }
@@ -2497,6 +2499,7 @@ public class HMaster extends HRegionServer implements MasterServices {
   /**
    * @return the underlying MasterProcedureManagerHost
    */
+  @Override
   public MasterProcedureManagerHost getMasterProcedureManagerHost() {
     return mpmHost;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb0cc88/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index ad6e09d..0ce7411 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.HFileArchiver;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -214,8 +213,7 @@ public class MasterFileSystem {
     // meta table is a system table, so descriptors are predefined,
     // we should get them from registry.
     FSTableDescriptors fsd = new FSTableDescriptors(c, fs, rd);
-    fsd.createTableDescriptor(
-        new TableDescriptor(fsd.get(TableName.META_TABLE_NAME)));
+    fsd.createTableDescriptor(fsd.get(TableName.META_TABLE_NAME));
 
     return rd;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb0cc88/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
index b6befaa..cc257d0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
@@ -29,8 +29,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 import com.google.common.collect.Sets;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
@@ -197,7 +197,7 @@ public class TableStateManager {
 
   public static void fixTableStates(TableDescriptors tableDescriptors, Connection connection)
       throws IOException {
-    final Map<String, TableDescriptor> allDescriptors =
+    final Map<String, HTableDescriptor> allDescriptors =
         tableDescriptors.getAllDescriptors();
     final Map<String, TableState> states = new HashMap<>();
     MetaTableAccessor.fullScanTables(connection, new MetaTableAccessor.Visitor() {
@@ -209,7 +209,7 @@ public class TableStateManager {
         return true;
       }
     });
-    for (Map.Entry<String, TableDescriptor> entry : allDescriptors.entrySet()) {
+    for (Map.Entry<String, HTableDescriptor> entry : allDescriptors.entrySet()) {
       String table = entry.getKey();
       if (table.equals(TableName.META_TABLE_NAME.getNameAsString()))
         continue;

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb0cc88/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
index e91bd29..fcad845 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
@@ -481,7 +480,7 @@ public class CloneSnapshotProcedure
 
     // 1. Create Table Descriptor
     // using a copy of descriptor, table will be created enabling first
-    TableDescriptor underConstruction = new TableDescriptor(hTableDescriptor);
+    HTableDescriptor underConstruction = new HTableDescriptor(hTableDescriptor);
     final Path tempTableDir = FSUtils.getTableDir(tempdir, hTableDescriptor.getTableName());
     ((FSTableDescriptors)(env.getMasterServices().getTableDescriptors()))
       .createTableDescriptorForTableDirectory(tempTableDir, underConstruction, false);

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb0cc88/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
index da4a732..40b56e0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
@@ -364,11 +363,10 @@ public class CreateTableProcedure
 
     // 1. Create Table Descriptor
     // using a copy of descriptor, table will be created enabling first
-    TableDescriptor underConstruction = new TableDescriptor(hTableDescriptor);
     final Path tempTableDir = FSUtils.getTableDir(tempdir, hTableDescriptor.getTableName());
     ((FSTableDescriptors)(env.getMasterServices().getTableDescriptors()))
         .createTableDescriptorForTableDirectory(
-          tempTableDir, underConstruction, false);
+          tempTableDir, hTableDescriptor, false);
 
     // 2. Create Regions
     newRegions = hdfsRegionHandler.createHdfsRegions(env, tempdir,

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb0cc88/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
index 9abed52..b420274 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
@@ -96,7 +96,7 @@ public class TruncateTableProcedure
           break;
         case TRUNCATE_TABLE_REMOVE_FROM_META:
           hTableDescriptor = env.getMasterServices().getTableDescriptors()
-              .getDescriptor(tableName).getHTableDescriptor();
+              .get(tableName);
           DeleteTableProcedure.deleteFromMeta(env, getTableName(), regions);
           DeleteTableProcedure.deleteAssignmentState(env, getTableName());
           setNextState(TruncateTableState.TRUNCATE_TABLE_CLEAR_FS_LAYOUT);

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb0cc88/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
index 4ad82ca..e35c686 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.Text;
@@ -112,14 +111,14 @@ public class CompactionTool extends Configured implements Tool {
       if (isFamilyDir(fs, path)) {
         Path regionDir = path.getParent();
         Path tableDir = regionDir.getParent();
-        TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
+        HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
         HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
-        compactStoreFiles(tableDir, htd.getHTableDescriptor(), hri,
+        compactStoreFiles(tableDir, htd, hri,
             path.getName(), compactOnce, major);
       } else if (isRegionDir(fs, path)) {
         Path tableDir = path.getParent();
-        TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
-        compactRegion(tableDir, htd.getHTableDescriptor(), path, compactOnce, major);
+        HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
+        compactRegion(tableDir, htd, path, compactOnce, major);
       } else if (isTableDir(fs, path)) {
         compactTable(path, compactOnce, major);
       } else {
@@ -130,9 +129,9 @@ public class CompactionTool extends Configured implements Tool {
 
     private void compactTable(final Path tableDir, final boolean compactOnce, final boolean major)
         throws IOException {
-      TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
+      HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
       for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) {
-        compactRegion(tableDir, htd.getHTableDescriptor(), regionDir, compactOnce, major);
+        compactRegion(tableDir, htd, regionDir, compactOnce, major);
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb0cc88/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
index 9d7af50..1763b2f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
@@ -42,7 +42,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
 import org.apache.hadoop.hbase.mob.MobUtils;
@@ -348,8 +347,7 @@ public final class SnapshotManifest {
   private void load() throws IOException {
     switch (getSnapshotFormat(desc)) {
       case SnapshotManifestV1.DESCRIPTOR_VERSION: {
-        this.htd = FSTableDescriptors.getTableDescriptorFromFs(fs, workingDir)
-            .getHTableDescriptor();
+        this.htd = FSTableDescriptors.getTableDescriptorFromFs(fs, workingDir);
         ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader");
         try {
           this.regionManifests =
@@ -447,8 +445,7 @@ public final class SnapshotManifest {
       LOG.info("Using old Snapshot Format");
       // write a copy of descriptor to the snapshot directory
       new FSTableDescriptors(conf, fs, rootDir)
-        .createTableDescriptorForTableDirectory(workingDir, new TableDescriptor(
-            htd), false);
+        .createTableDescriptorForTableDirectory(workingDir, htd, false);
     } else {
       LOG.debug("Convert to Single Snapshot Manifest");
       convertToV2SingleManifest();