You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2016/09/29 19:37:35 UTC

[21/51] [partial] hbase git commit: HBASE-16264 Figure how to deal with endpoints and shaded pb Shade our protobufs. Do it in a manner that makes it so we can still have in our API references to com.google.protobuf (and in REST). The c.g.p in API is for

http://git-wip-us.apache.org/repos/asf/hbase/blob/17d4b70d/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClusterStatusProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClusterStatusProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClusterStatusProtos.java
new file mode 100644
index 0000000..c9e34d9
--- /dev/null
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClusterStatusProtos.java
@@ -0,0 +1,14975 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: ClusterStatus.proto
+
+package org.apache.hadoop.hbase.shaded.protobuf.generated;
+
+public final class ClusterStatusProtos {
+  private ClusterStatusProtos() {}
+  public static void registerAllExtensions(
+      com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface RegionStateOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required .hbase.pb.RegionInfo region_info = 1;
+    /**
+     * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+     */
+    boolean hasRegionInfo();
+    /**
+     * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo();
+    /**
+     * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder();
+
+    // required .hbase.pb.RegionState.State state = 2;
+    /**
+     * <code>required .hbase.pb.RegionState.State state = 2;</code>
+     */
+    boolean hasState();
+    /**
+     * <code>required .hbase.pb.RegionState.State state = 2;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.State getState();
+
+    // optional uint64 stamp = 3;
+    /**
+     * <code>optional uint64 stamp = 3;</code>
+     */
+    boolean hasStamp();
+    /**
+     * <code>optional uint64 stamp = 3;</code>
+     */
+    long getStamp();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.RegionState}
+   */
+  public static final class RegionState extends
+      com.google.protobuf.GeneratedMessage
+      implements RegionStateOrBuilder {
+    // Use RegionState.newBuilder() to construct.
+    private RegionState(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private RegionState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final RegionState defaultInstance;
+    public static RegionState getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public RegionState getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private RegionState(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000001) == 0x00000001)) {
+                subBuilder = regionInfo_.toBuilder();
+              }
+              regionInfo_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(regionInfo_);
+                regionInfo_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000001;
+              break;
+            }
+            case 16: {
+              int rawValue = input.readEnum();
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.State value = org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.State.valueOf(rawValue);
+              if (value == null) {
+                unknownFields.mergeVarintField(2, rawValue);
+              } else {
+                bitField0_ |= 0x00000002;
+                state_ = value;
+              }
+              break;
+            }
+            case 24: {
+              bitField0_ |= 0x00000004;
+              stamp_ = input.readUInt64();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionState_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionState_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<RegionState> PARSER =
+        new com.google.protobuf.AbstractParser<RegionState>() {
+      public RegionState parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new RegionState(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<RegionState> getParserForType() {
+      return PARSER;
+    }
+
+    /**
+     * Protobuf enum {@code hbase.pb.RegionState.State}
+     */
+    public enum State
+        implements com.google.protobuf.ProtocolMessageEnum {
+      /**
+       * <code>OFFLINE = 0;</code>
+       *
+       * <pre>
+       * region is in an offline state
+       * </pre>
+       */
+      OFFLINE(0, 0),
+      /**
+       * <code>PENDING_OPEN = 1;</code>
+       *
+       * <pre>
+       * sent rpc to server to open but has not begun
+       * </pre>
+       */
+      PENDING_OPEN(1, 1),
+      /**
+       * <code>OPENING = 2;</code>
+       *
+       * <pre>
+       * server has begun to open but not yet done
+       * </pre>
+       */
+      OPENING(2, 2),
+      /**
+       * <code>OPEN = 3;</code>
+       *
+       * <pre>
+       * server opened region and updated meta
+       * </pre>
+       */
+      OPEN(3, 3),
+      /**
+       * <code>PENDING_CLOSE = 4;</code>
+       *
+       * <pre>
+       * sent rpc to server to close but has not begun
+       * </pre>
+       */
+      PENDING_CLOSE(4, 4),
+      /**
+       * <code>CLOSING = 5;</code>
+       *
+       * <pre>
+       * server has begun to close but not yet done
+       * </pre>
+       */
+      CLOSING(5, 5),
+      /**
+       * <code>CLOSED = 6;</code>
+       *
+       * <pre>
+       * server closed region and updated meta
+       * </pre>
+       */
+      CLOSED(6, 6),
+      /**
+       * <code>SPLITTING = 7;</code>
+       *
+       * <pre>
+       * server started split of a region
+       * </pre>
+       */
+      SPLITTING(7, 7),
+      /**
+       * <code>SPLIT = 8;</code>
+       *
+       * <pre>
+       * server completed split of a region
+       * </pre>
+       */
+      SPLIT(8, 8),
+      /**
+       * <code>FAILED_OPEN = 9;</code>
+       *
+       * <pre>
+       * failed to open, and won't retry any more
+       * </pre>
+       */
+      FAILED_OPEN(9, 9),
+      /**
+       * <code>FAILED_CLOSE = 10;</code>
+       *
+       * <pre>
+       * failed to close, and won't retry any more
+       * </pre>
+       */
+      FAILED_CLOSE(10, 10),
+      /**
+       * <code>MERGING = 11;</code>
+       *
+       * <pre>
+       * server started merge a region
+       * </pre>
+       */
+      MERGING(11, 11),
+      /**
+       * <code>MERGED = 12;</code>
+       *
+       * <pre>
+       * server completed merge of a region
+       * </pre>
+       */
+      MERGED(12, 12),
+      /**
+       * <code>SPLITTING_NEW = 13;</code>
+       *
+       * <pre>
+       * new region to be created when RS splits a parent
+       * </pre>
+       */
+      SPLITTING_NEW(13, 13),
+      /**
+       * <code>MERGING_NEW = 14;</code>
+       *
+       * <pre>
+       * region but hasn't be created yet, or master doesn't
+       * know it's already created
+       * </pre>
+       */
+      MERGING_NEW(14, 14),
+      ;
+
+      /**
+       * <code>OFFLINE = 0;</code>
+       *
+       * <pre>
+       * region is in an offline state
+       * </pre>
+       */
+      public static final int OFFLINE_VALUE = 0;
+      /**
+       * <code>PENDING_OPEN = 1;</code>
+       *
+       * <pre>
+       * sent rpc to server to open but has not begun
+       * </pre>
+       */
+      public static final int PENDING_OPEN_VALUE = 1;
+      /**
+       * <code>OPENING = 2;</code>
+       *
+       * <pre>
+       * server has begun to open but not yet done
+       * </pre>
+       */
+      public static final int OPENING_VALUE = 2;
+      /**
+       * <code>OPEN = 3;</code>
+       *
+       * <pre>
+       * server opened region and updated meta
+       * </pre>
+       */
+      public static final int OPEN_VALUE = 3;
+      /**
+       * <code>PENDING_CLOSE = 4;</code>
+       *
+       * <pre>
+       * sent rpc to server to close but has not begun
+       * </pre>
+       */
+      public static final int PENDING_CLOSE_VALUE = 4;
+      /**
+       * <code>CLOSING = 5;</code>
+       *
+       * <pre>
+       * server has begun to close but not yet done
+       * </pre>
+       */
+      public static final int CLOSING_VALUE = 5;
+      /**
+       * <code>CLOSED = 6;</code>
+       *
+       * <pre>
+       * server closed region and updated meta
+       * </pre>
+       */
+      public static final int CLOSED_VALUE = 6;
+      /**
+       * <code>SPLITTING = 7;</code>
+       *
+       * <pre>
+       * server started split of a region
+       * </pre>
+       */
+      public static final int SPLITTING_VALUE = 7;
+      /**
+       * <code>SPLIT = 8;</code>
+       *
+       * <pre>
+       * server completed split of a region
+       * </pre>
+       */
+      public static final int SPLIT_VALUE = 8;
+      /**
+       * <code>FAILED_OPEN = 9;</code>
+       *
+       * <pre>
+       * failed to open, and won't retry any more
+       * </pre>
+       */
+      public static final int FAILED_OPEN_VALUE = 9;
+      /**
+       * <code>FAILED_CLOSE = 10;</code>
+       *
+       * <pre>
+       * failed to close, and won't retry any more
+       * </pre>
+       */
+      public static final int FAILED_CLOSE_VALUE = 10;
+      /**
+       * <code>MERGING = 11;</code>
+       *
+       * <pre>
+       * server started merge a region
+       * </pre>
+       */
+      public static final int MERGING_VALUE = 11;
+      /**
+       * <code>MERGED = 12;</code>
+       *
+       * <pre>
+       * server completed merge of a region
+       * </pre>
+       */
+      public static final int MERGED_VALUE = 12;
+      /**
+       * <code>SPLITTING_NEW = 13;</code>
+       *
+       * <pre>
+       * new region to be created when RS splits a parent
+       * </pre>
+       */
+      public static final int SPLITTING_NEW_VALUE = 13;
+      /**
+       * <code>MERGING_NEW = 14;</code>
+       *
+       * <pre>
+       * region but hasn't be created yet, or master doesn't
+       * know it's already created
+       * </pre>
+       */
+      public static final int MERGING_NEW_VALUE = 14;
+
+
+      public final int getNumber() { return value; }
+
+      public static State valueOf(int value) {
+        switch (value) {
+          case 0: return OFFLINE;
+          case 1: return PENDING_OPEN;
+          case 2: return OPENING;
+          case 3: return OPEN;
+          case 4: return PENDING_CLOSE;
+          case 5: return CLOSING;
+          case 6: return CLOSED;
+          case 7: return SPLITTING;
+          case 8: return SPLIT;
+          case 9: return FAILED_OPEN;
+          case 10: return FAILED_CLOSE;
+          case 11: return MERGING;
+          case 12: return MERGED;
+          case 13: return SPLITTING_NEW;
+          case 14: return MERGING_NEW;
+          default: return null;
+        }
+      }
+
+      public static com.google.protobuf.Internal.EnumLiteMap<State>
+          internalGetValueMap() {
+        return internalValueMap;
+      }
+      private static com.google.protobuf.Internal.EnumLiteMap<State>
+          internalValueMap =
+            new com.google.protobuf.Internal.EnumLiteMap<State>() {
+              public State findValueByNumber(int number) {
+                return State.valueOf(number);
+              }
+            };
+
+      public final com.google.protobuf.Descriptors.EnumValueDescriptor
+          getValueDescriptor() {
+        return getDescriptor().getValues().get(index);
+      }
+      public final com.google.protobuf.Descriptors.EnumDescriptor
+          getDescriptorForType() {
+        return getDescriptor();
+      }
+      public static final com.google.protobuf.Descriptors.EnumDescriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.getDescriptor().getEnumTypes().get(0);
+      }
+
+      private static final State[] VALUES = values();
+
+      public static State valueOf(
+          com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+        if (desc.getType() != getDescriptor()) {
+          throw new java.lang.IllegalArgumentException(
+            "EnumValueDescriptor is not for this type.");
+        }
+        return VALUES[desc.getIndex()];
+      }
+
+      private final int index;
+      private final int value;
+
+      private State(int index, int value) {
+        this.index = index;
+        this.value = value;
+      }
+
+      // @@protoc_insertion_point(enum_scope:hbase.pb.RegionState.State)
+    }
+
+    private int bitField0_;
+    // required .hbase.pb.RegionInfo region_info = 1;
+    public static final int REGION_INFO_FIELD_NUMBER = 1;
+    private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo regionInfo_;
+    /**
+     * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+     */
+    public boolean hasRegionInfo() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() {
+      return regionInfo_;
+    }
+    /**
+     * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() {
+      return regionInfo_;
+    }
+
+    // required .hbase.pb.RegionState.State state = 2;
+    public static final int STATE_FIELD_NUMBER = 2;
+    private org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.State state_;
+    /**
+     * <code>required .hbase.pb.RegionState.State state = 2;</code>
+     */
+    public boolean hasState() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>required .hbase.pb.RegionState.State state = 2;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.State getState() {
+      return state_;
+    }
+
+    // optional uint64 stamp = 3;
+    public static final int STAMP_FIELD_NUMBER = 3;
+    private long stamp_;
+    /**
+     * <code>optional uint64 stamp = 3;</code>
+     */
+    public boolean hasStamp() {
+      return ((bitField0_ & 0x00000004) == 0x00000004);
+    }
+    /**
+     * <code>optional uint64 stamp = 3;</code>
+     */
+    public long getStamp() {
+      return stamp_;
+    }
+
+    private void initFields() {
+      regionInfo_ = org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
+      state_ = org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE;
+      stamp_ = 0L;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasRegionInfo()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasState()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!getRegionInfo().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeMessage(1, regionInfo_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeEnum(2, state_.getNumber());
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        output.writeUInt64(3, stamp_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, regionInfo_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeEnumSize(2, state_.getNumber());
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(3, stamp_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState other = (org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState) obj;
+
+      boolean result = true;
+      result = result && (hasRegionInfo() == other.hasRegionInfo());
+      if (hasRegionInfo()) {
+        result = result && getRegionInfo()
+            .equals(other.getRegionInfo());
+      }
+      result = result && (hasState() == other.hasState());
+      if (hasState()) {
+        result = result &&
+            (getState() == other.getState());
+      }
+      result = result && (hasStamp() == other.hasStamp());
+      if (hasStamp()) {
+        result = result && (getStamp()
+            == other.getStamp());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasRegionInfo()) {
+        hash = (37 * hash) + REGION_INFO_FIELD_NUMBER;
+        hash = (53 * hash) + getRegionInfo().hashCode();
+      }
+      if (hasState()) {
+        hash = (37 * hash) + STATE_FIELD_NUMBER;
+        hash = (53 * hash) + hashEnum(getState());
+      }
+      if (hasStamp()) {
+        hash = (37 * hash) + STAMP_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getStamp());
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.RegionState}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStateOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionState_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionState_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getRegionInfoFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        if (regionInfoBuilder_ == null) {
+          regionInfo_ = org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
+        } else {
+          regionInfoBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        state_ = org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE;
+        bitField0_ = (bitField0_ & ~0x00000002);
+        stamp_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000004);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionState_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState result = new org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        if (regionInfoBuilder_ == null) {
+          result.regionInfo_ = regionInfo_;
+        } else {
+          result.regionInfo_ = regionInfoBuilder_.build();
+        }
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.state_ = state_;
+        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+          to_bitField0_ |= 0x00000004;
+        }
+        result.stamp_ = stamp_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.getDefaultInstance()) return this;
+        if (other.hasRegionInfo()) {
+          mergeRegionInfo(other.getRegionInfo());
+        }
+        if (other.hasState()) {
+          setState(other.getState());
+        }
+        if (other.hasStamp()) {
+          setStamp(other.getStamp());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasRegionInfo()) {
+          
+          return false;
+        }
+        if (!hasState()) {
+          
+          return false;
+        }
+        if (!getRegionInfo().isInitialized()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required .hbase.pb.RegionInfo region_info = 1;
+      private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo regionInfo_ = org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_;
+      /**
+       * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+       */
+      public boolean hasRegionInfo() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() {
+        if (regionInfoBuilder_ == null) {
+          return regionInfo_;
+        } else {
+          return regionInfoBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+       */
+      public Builder setRegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) {
+        if (regionInfoBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          regionInfo_ = value;
+          onChanged();
+        } else {
+          regionInfoBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+       */
+      public Builder setRegionInfo(
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+        if (regionInfoBuilder_ == null) {
+          regionInfo_ = builderForValue.build();
+          onChanged();
+        } else {
+          regionInfoBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+       */
+      public Builder mergeRegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) {
+        if (regionInfoBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001) &&
+              regionInfo_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) {
+            regionInfo_ =
+              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(regionInfo_).mergeFrom(value).buildPartial();
+          } else {
+            regionInfo_ = value;
+          }
+          onChanged();
+        } else {
+          regionInfoBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+       */
+      public Builder clearRegionInfo() {
+        if (regionInfoBuilder_ == null) {
+          regionInfo_ = org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
+          onChanged();
+        } else {
+          regionInfoBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder() {
+        bitField0_ |= 0x00000001;
+        onChanged();
+        return getRegionInfoFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() {
+        if (regionInfoBuilder_ != null) {
+          return regionInfoBuilder_.getMessageOrBuilder();
+        } else {
+          return regionInfo_;
+        }
+      }
+      /**
+       * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> 
+          getRegionInfoFieldBuilder() {
+        if (regionInfoBuilder_ == null) {
+          regionInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>(
+                  regionInfo_,
+                  getParentForChildren(),
+                  isClean());
+          regionInfo_ = null;
+        }
+        return regionInfoBuilder_;
+      }
+
+      // required .hbase.pb.RegionState.State state = 2;
+      private org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.State state_ = org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE;
+      /**
+       * <code>required .hbase.pb.RegionState.State state = 2;</code>
+       */
+      public boolean hasState() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>required .hbase.pb.RegionState.State state = 2;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.State getState() {
+        return state_;
+      }
+      /**
+       * <code>required .hbase.pb.RegionState.State state = 2;</code>
+       */
+      public Builder setState(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.State value) {
+        if (value == null) {
+          throw new NullPointerException();
+        }
+        bitField0_ |= 0x00000002;
+        state_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.RegionState.State state = 2;</code>
+       */
+      public Builder clearState() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        state_ = org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE;
+        onChanged();
+        return this;
+      }
+
+      // optional uint64 stamp = 3;
+      private long stamp_ ;
+      /**
+       * <code>optional uint64 stamp = 3;</code>
+       */
+      public boolean hasStamp() {
+        return ((bitField0_ & 0x00000004) == 0x00000004);
+      }
+      /**
+       * <code>optional uint64 stamp = 3;</code>
+       */
+      public long getStamp() {
+        return stamp_;
+      }
+      /**
+       * <code>optional uint64 stamp = 3;</code>
+       */
+      public Builder setStamp(long value) {
+        bitField0_ |= 0x00000004;
+        stamp_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional uint64 stamp = 3;</code>
+       */
+      public Builder clearStamp() {
+        bitField0_ = (bitField0_ & ~0x00000004);
+        stamp_ = 0L;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.RegionState)
+    }
+
+    static {
+      defaultInstance = new RegionState(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.RegionState)
+  }
+
+  public interface RegionInTransitionOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required .hbase.pb.RegionSpecifier spec = 1;
+    /**
+     * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
+     */
+    boolean hasSpec();
+    /**
+     * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getSpec();
+    /**
+     * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getSpecOrBuilder();
+
+    // required .hbase.pb.RegionState region_state = 2;
+    /**
+     * <code>required .hbase.pb.RegionState region_state = 2;</code>
+     */
+    boolean hasRegionState();
+    /**
+     * <code>required .hbase.pb.RegionState region_state = 2;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState getRegionState();
+    /**
+     * <code>required .hbase.pb.RegionState region_state = 2;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStateOrBuilder getRegionStateOrBuilder();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.RegionInTransition}
+   */
+  public static final class RegionInTransition extends
+      com.google.protobuf.GeneratedMessage
+      implements RegionInTransitionOrBuilder {
+    // Use RegionInTransition.newBuilder() to construct.
+    private RegionInTransition(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private RegionInTransition(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final RegionInTransition defaultInstance;
+    public static RegionInTransition getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public RegionInTransition getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private RegionInTransition(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000001) == 0x00000001)) {
+                subBuilder = spec_.toBuilder();
+              }
+              spec_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(spec_);
+                spec_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000001;
+              break;
+            }
+            case 18: {
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000002) == 0x00000002)) {
+                subBuilder = regionState_.toBuilder();
+              }
+              regionState_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(regionState_);
+                regionState_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000002;
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionInTransition_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionInTransition_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<RegionInTransition> PARSER =
+        new com.google.protobuf.AbstractParser<RegionInTransition>() {
+      public RegionInTransition parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new RegionInTransition(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<RegionInTransition> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required .hbase.pb.RegionSpecifier spec = 1;
+    public static final int SPEC_FIELD_NUMBER = 1;
+    private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier spec_;
+    /**
+     * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
+     */
+    public boolean hasSpec() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getSpec() {
+      return spec_;
+    }
+    /**
+     * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getSpecOrBuilder() {
+      return spec_;
+    }
+
+    // required .hbase.pb.RegionState region_state = 2;
+    public static final int REGION_STATE_FIELD_NUMBER = 2;
+    private org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState regionState_;
+    /**
+     * <code>required .hbase.pb.RegionState region_state = 2;</code>
+     */
+    public boolean hasRegionState() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>required .hbase.pb.RegionState region_state = 2;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState getRegionState() {
+      return regionState_;
+    }
+    /**
+     * <code>required .hbase.pb.RegionState region_state = 2;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStateOrBuilder getRegionStateOrBuilder() {
+      return regionState_;
+    }
+
+    private void initFields() {
+      spec_ = org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
+      regionState_ = org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.getDefaultInstance();
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasSpec()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasRegionState()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!getSpec().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!getRegionState().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeMessage(1, spec_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeMessage(2, regionState_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, spec_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(2, regionState_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition other = (org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition) obj;
+
+      boolean result = true;
+      result = result && (hasSpec() == other.hasSpec());
+      if (hasSpec()) {
+        result = result && getSpec()
+            .equals(other.getSpec());
+      }
+      result = result && (hasRegionState() == other.hasRegionState());
+      if (hasRegionState()) {
+        result = result && getRegionState()
+            .equals(other.getRegionState());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasSpec()) {
+        hash = (37 * hash) + SPEC_FIELD_NUMBER;
+        hash = (53 * hash) + getSpec().hashCode();
+      }
+      if (hasRegionState()) {
+        hash = (37 * hash) + REGION_STATE_FIELD_NUMBER;
+        hash = (53 * hash) + getRegionState().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.RegionInTransition}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransitionOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionInTransition_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionInTransition_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getSpecFieldBuilder();
+          getRegionStateFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        if (specBuilder_ == null) {
+          spec_ = org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
+        } else {
+          specBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        if (regionStateBuilder_ == null) {
+          regionState_ = org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.getDefaultInstance();
+        } else {
+          regionStateBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionInTransition_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition result = new org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        if (specBuilder_ == null) {
+          result.spec_ = spec_;
+        } else {
+          result.spec_ = specBuilder_.build();
+        }
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        if (regionStateBuilder_ == null) {
+          result.regionState_ = regionState_;
+        } else {
+          result.regionState_ = regionStateBuilder_.build();
+        }
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition.getDefaultInstance()) return this;
+        if (other.hasSpec()) {
+          mergeSpec(other.getSpec());
+        }
+        if (other.hasRegionState()) {
+          mergeRegionState(other.getRegionState());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasSpec()) {
+          
+          return false;
+        }
+        if (!hasRegionState()) {
+          
+          return false;
+        }
+        if (!getSpec().isInitialized()) {
+          
+          return false;
+        }
+        if (!getRegionState().isInitialized()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required .hbase.pb.RegionSpecifier spec = 1;
+      private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier spec_ = org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> specBuilder_;
+      /**
+       * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
+       */
+      public boolean hasSpec() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getSpec() {
+        if (specBuilder_ == null) {
+          return spec_;
+        } else {
+          return specBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
+       */
+      public Builder setSpec(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) {
+        if (specBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          spec_ = value;
+          onChanged();
+        } else {
+          specBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
+       */
+      public Builder setSpec(
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) {
+        if (specBuilder_ == null) {
+          spec_ = builderForValue.build();
+          onChanged();
+        } else {
+          specBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
+       */
+      public Builder mergeSpec(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) {
+        if (specBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001) &&
+              spec_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) {
+            spec_ =
+              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(spec_).mergeFrom(value).buildPartial();
+          } else {
+            spec_ = value;
+          }
+          onChanged();
+        } else {
+          specBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
+       */
+      public Builder clearSpec() {
+        if (specBuilder_ == null) {
+          spec_ = org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
+          onChanged();
+        } else {
+          specBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getSpecBuilder() {
+        bitField0_ |= 0x00000001;
+        onChanged();
+        return getSpecFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getSpecOrBuilder() {
+        if (specBuilder_ != null) {
+          return specBuilder_.getMessageOrBuilder();
+        } else {
+          return spec_;
+        }
+      }
+      /**
+       * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> 
+          getSpecFieldBuilder() {
+        if (specBuilder_ == null) {
+          specBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>(
+                  spec_,
+                  getParentForChildren(),
+                  isClean());
+          spec_ = null;
+        }
+        return specBuilder_;
+      }
+
+      // required .hbase.pb.RegionState region_state = 2;
+      private org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState regionState_ = org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStateOrBuilder> regionStateBuilder_;
+      /**
+       * <code>required .hbase.pb.RegionState region_state = 2;</code>
+       */
+      public boolean hasRegionState() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>required .hbase.pb.RegionState region_state = 2;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState getRegionState() {
+        if (regionStateBuilder_ == null) {
+          return regionState_;
+        } else {
+          return regionStateBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>required .hbase.pb.RegionState region_state = 2;</code>
+       */
+      public Builder setRegionState(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState value) {
+        if (regionStateBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          regionState_ = value;
+          onChanged();
+        } else {
+          regionStateBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.RegionState region_state = 2;</code>
+       */
+      public Builder setRegionState(
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.Builder builderForValue) {
+        if (regionStateBuilder_ == null) {
+          regionState_ = builderForValue.build();
+          onChanged();
+        } else {
+          regionStateBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.RegionState region_state = 2;</code>
+       */
+      public Builder mergeRegionState(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState value) {
+        if (regionStateBuilder_ == null) {
+          if (((bitField0_ & 0x00000002) == 0x00000002) &&
+              regionState_ != org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.getDefaultInstance()) {
+            regionState_ =
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.newBuilder(regionState_).mergeFrom(value).buildPartial();
+          } else {
+            regionState_ = value;
+          }
+          onChanged();
+        } else {
+          regionStateBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.RegionState region_state = 2;</code>
+       */
+      public Builder clearRegionState() {
+        if (regionStateBuilder_ == null) {
+          regionState_ = org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.getDefaultInstance();
+          onChanged();
+        } else {
+          regionStateBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.RegionState region_state = 2;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.Builder getRegionStateBuilder() {
+        bitField0_ |= 0x00000002;
+        onChanged();
+        return getRegionStateFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>required .hbase.pb.RegionState region_state = 2;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStateOrBuilder getRegionStateOrBuilder() {
+        if (regionStateBuilder_ != null) {
+          return regionStateBuilder_.getMessageOrBuilder();
+        } else {
+          return regionState_;
+        }
+      }
+      /**
+       * <code>required .hbase.pb.RegionState region_state = 2;</code>
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStateOrBuilder> 
+          getRegionStateFieldBuilder() {
+        if (regionStateBuilder_ == null) {
+          regionStateBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStateOrBuilder>(
+                  regionState_,
+                  getParentForChildren(),
+                  isClean());
+          regionState_ = null;
+        }
+        return regionStateBuilder_;
+      }
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.RegionInTransition)
+    }
+
+    static {
+      defaultInstance = new RegionInTransition(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.RegionInTransition)
+  }
+
+  public interface StoreSequenceIdOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required bytes family_name = 1;
+    /**
+     * <code>required bytes family_name = 1;</code>
+     */
+    boolean hasFamilyName();
+    /**
+     * <code>required bytes family_name = 1;</code>
+     */
+    com.google.protobuf.ByteString getFamilyName();
+
+    // required uint64 sequence_id = 2;
+    /**
+     * <code>required uint64 sequence_id = 2;</code>
+     */
+    boolean hasSequenceId();
+    /**
+     * <code>required uint64 sequence_id = 2;</code>
+     */
+    long getSequenceId();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.StoreSequenceId}
+   *
+   * <pre>
+   **
+   * sequence Id of a store
+   * </pre>
+   */
+  public static final class StoreSequenceId extends
+      com.google.protobuf.GeneratedMessage
+      implements StoreSequenceIdOrBuilder {
+    // Use StoreSequenceId.newBuilder() to construct.
+    private StoreSequenceId(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private StoreSequenceId(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final StoreSequenceId defaultInstance;
+    public static StoreSequenceId getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public StoreSequenceId getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private StoreSequenceId(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              bitField0_ |= 0x00000001;
+              familyName_ = input.readBytes();
+              break;
+            }
+            case 16: {
+              bitField0_ |= 0x00000002;
+              sequenceId_ = input.readUInt64();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_StoreSequenceId_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_StoreSequenceId_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<StoreSequenceId> PARSER =
+        new com.google.protobuf.AbstractParser<StoreSequenceId>() {
+      public StoreSequenceId parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new StoreSequenceId(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<StoreSequenceId> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required bytes family_name = 1;
+    public static final int FAMILY_NAME_FIELD_NUMBER = 1;
+    private com.google.protobuf.ByteString familyName_;
+    /**
+     * <code>required bytes family_name = 1;</code>
+     */
+    public boolean hasFamilyName() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required bytes family_name = 1;</code>
+     */
+    public com.google.protobuf.ByteString getFamilyName() {
+      return familyName_;
+    }
+
+    // required uint64 sequence_id = 2;
+    public static final int SEQUENCE_ID_FIELD_NUMBER = 2;
+    private long sequenceId_;
+    /**
+     * <code>required uint64 sequence_id = 2;</code>
+     */
+    public boolean hasSequenceId() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>required uint64 sequence_id = 2;</code>
+     */
+    public long getSequenceId() {
+      return sequenceId_;
+    }
+
+    private void initFields() {
+      familyName_ = com.google.protobuf.ByteString.EMPTY;
+      sequenceId_ = 0L;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasFamilyName()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasSequenceId()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBytes(1, familyName_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeUInt64(2, sequenceId_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(1, familyName_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(2, sequenceId_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId other = (org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId) obj;
+
+      boolean result = true;
+      result = result && (hasFamilyName() == other.hasFamilyName());
+      if (hasFamilyName()) {
+        result = result && getFamilyName()
+            .equals(other.getFamilyName());
+      }
+      result = result && (hasSequenceId() == other.hasSequenceId());
+      if (hasSequenceId()) {
+        result = result && (getSequenceId()
+            == other.getSequenceId());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasFamilyName()) {
+        hash = (37 * hash) + FAMILY_NAME_FIELD_NUMBER;
+        hash = (53 * hash) + getFamilyName().hashCode();
+      }
+      if (hasSequenceId()) {
+        hash = (37 * hash) + SEQUENCE_ID_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getSequenceId());
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.StoreSequenceId}
+     *
+     * <pre>
+     **
+     * sequence Id of a store
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_StoreSequenceId_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_StoreSequenceId_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        familyName_ = com.google.protobuf.ByteString.EMPTY;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        sequenceId_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_StoreSequenceId_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId result = new org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.familyName_ = familyName_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 

<TRUNCATED>