You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by sy...@apache.org on 2016/07/23 03:09:16 UTC

[1/3] hbase git commit: Revert HBASE-15406: Split / merge switch left disabled after early termination of hbck (HBASE-16008 will address this issue)

Repository: hbase
Updated Branches:
  refs/heads/master 744248c13 -> cd0b85e0a


http://git-wip-us.apache.org/repos/asf/hbase/blob/cd0b85e0/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
index a45c421..f64d0c1 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
@@ -9733,540 +9733,6 @@ public final class ZooKeeperProtos {
     // @@protoc_insertion_point(class_scope:hbase.pb.SwitchState)
   }
 
-  public interface SplitAndMergeStateOrBuilder
-      extends com.google.protobuf.MessageOrBuilder {
-
-    // optional bool split_enabled = 1;
-    /**
-     * <code>optional bool split_enabled = 1;</code>
-     */
-    boolean hasSplitEnabled();
-    /**
-     * <code>optional bool split_enabled = 1;</code>
-     */
-    boolean getSplitEnabled();
-
-    // optional bool merge_enabled = 2;
-    /**
-     * <code>optional bool merge_enabled = 2;</code>
-     */
-    boolean hasMergeEnabled();
-    /**
-     * <code>optional bool merge_enabled = 2;</code>
-     */
-    boolean getMergeEnabled();
-  }
-  /**
-   * Protobuf type {@code hbase.pb.SplitAndMergeState}
-   *
-   * <pre>
-   **
-   * State for split and merge, used in hbck
-   * </pre>
-   */
-  public static final class SplitAndMergeState extends
-      com.google.protobuf.GeneratedMessage
-      implements SplitAndMergeStateOrBuilder {
-    // Use SplitAndMergeState.newBuilder() to construct.
-    private SplitAndMergeState(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
-      super(builder);
-      this.unknownFields = builder.getUnknownFields();
-    }
-    private SplitAndMergeState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
-    private static final SplitAndMergeState defaultInstance;
-    public static SplitAndMergeState getDefaultInstance() {
-      return defaultInstance;
-    }
-
-    public SplitAndMergeState getDefaultInstanceForType() {
-      return defaultInstance;
-    }
-
-    private final com.google.protobuf.UnknownFieldSet unknownFields;
-    @java.lang.Override
-    public final com.google.protobuf.UnknownFieldSet
-        getUnknownFields() {
-      return this.unknownFields;
-    }
-    private SplitAndMergeState(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      initFields();
-      int mutable_bitField0_ = 0;
-      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
-          com.google.protobuf.UnknownFieldSet.newBuilder();
-      try {
-        boolean done = false;
-        while (!done) {
-          int tag = input.readTag();
-          switch (tag) {
-            case 0:
-              done = true;
-              break;
-            default: {
-              if (!parseUnknownField(input, unknownFields,
-                                     extensionRegistry, tag)) {
-                done = true;
-              }
-              break;
-            }
-            case 8: {
-              bitField0_ |= 0x00000001;
-              splitEnabled_ = input.readBool();
-              break;
-            }
-            case 16: {
-              bitField0_ |= 0x00000002;
-              mergeEnabled_ = input.readBool();
-              break;
-            }
-          }
-        }
-      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-        throw e.setUnfinishedMessage(this);
-      } catch (java.io.IOException e) {
-        throw new com.google.protobuf.InvalidProtocolBufferException(
-            e.getMessage()).setUnfinishedMessage(this);
-      } finally {
-        this.unknownFields = unknownFields.build();
-        makeExtensionsImmutable();
-      }
-    }
-    public static final com.google.protobuf.Descriptors.Descriptor
-        getDescriptor() {
-      return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_descriptor;
-    }
-
-    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-        internalGetFieldAccessorTable() {
-      return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_fieldAccessorTable
-          .ensureFieldAccessorsInitialized(
-              org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.Builder.class);
-    }
-
-    public static com.google.protobuf.Parser<SplitAndMergeState> PARSER =
-        new com.google.protobuf.AbstractParser<SplitAndMergeState>() {
-      public SplitAndMergeState parsePartialFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws com.google.protobuf.InvalidProtocolBufferException {
-        return new SplitAndMergeState(input, extensionRegistry);
-      }
-    };
-
-    @java.lang.Override
-    public com.google.protobuf.Parser<SplitAndMergeState> getParserForType() {
-      return PARSER;
-    }
-
-    private int bitField0_;
-    // optional bool split_enabled = 1;
-    public static final int SPLIT_ENABLED_FIELD_NUMBER = 1;
-    private boolean splitEnabled_;
-    /**
-     * <code>optional bool split_enabled = 1;</code>
-     */
-    public boolean hasSplitEnabled() {
-      return ((bitField0_ & 0x00000001) == 0x00000001);
-    }
-    /**
-     * <code>optional bool split_enabled = 1;</code>
-     */
-    public boolean getSplitEnabled() {
-      return splitEnabled_;
-    }
-
-    // optional bool merge_enabled = 2;
-    public static final int MERGE_ENABLED_FIELD_NUMBER = 2;
-    private boolean mergeEnabled_;
-    /**
-     * <code>optional bool merge_enabled = 2;</code>
-     */
-    public boolean hasMergeEnabled() {
-      return ((bitField0_ & 0x00000002) == 0x00000002);
-    }
-    /**
-     * <code>optional bool merge_enabled = 2;</code>
-     */
-    public boolean getMergeEnabled() {
-      return mergeEnabled_;
-    }
-
-    private void initFields() {
-      splitEnabled_ = false;
-      mergeEnabled_ = false;
-    }
-    private byte memoizedIsInitialized = -1;
-    public final boolean isInitialized() {
-      byte isInitialized = memoizedIsInitialized;
-      if (isInitialized != -1) return isInitialized == 1;
-
-      memoizedIsInitialized = 1;
-      return true;
-    }
-
-    public void writeTo(com.google.protobuf.CodedOutputStream output)
-                        throws java.io.IOException {
-      getSerializedSize();
-      if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        output.writeBool(1, splitEnabled_);
-      }
-      if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        output.writeBool(2, mergeEnabled_);
-      }
-      getUnknownFields().writeTo(output);
-    }
-
-    private int memoizedSerializedSize = -1;
-    public int getSerializedSize() {
-      int size = memoizedSerializedSize;
-      if (size != -1) return size;
-
-      size = 0;
-      if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeBoolSize(1, splitEnabled_);
-      }
-      if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeBoolSize(2, mergeEnabled_);
-      }
-      size += getUnknownFields().getSerializedSize();
-      memoizedSerializedSize = size;
-      return size;
-    }
-
-    private static final long serialVersionUID = 0L;
-    @java.lang.Override
-    protected java.lang.Object writeReplace()
-        throws java.io.ObjectStreamException {
-      return super.writeReplace();
-    }
-
-    @java.lang.Override
-    public boolean equals(final java.lang.Object obj) {
-      if (obj == this) {
-       return true;
-      }
-      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState)) {
-        return super.equals(obj);
-      }
-      org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState) obj;
-
-      boolean result = true;
-      result = result && (hasSplitEnabled() == other.hasSplitEnabled());
-      if (hasSplitEnabled()) {
-        result = result && (getSplitEnabled()
-            == other.getSplitEnabled());
-      }
-      result = result && (hasMergeEnabled() == other.hasMergeEnabled());
-      if (hasMergeEnabled()) {
-        result = result && (getMergeEnabled()
-            == other.getMergeEnabled());
-      }
-      result = result &&
-          getUnknownFields().equals(other.getUnknownFields());
-      return result;
-    }
-
-    private int memoizedHashCode = 0;
-    @java.lang.Override
-    public int hashCode() {
-      if (memoizedHashCode != 0) {
-        return memoizedHashCode;
-      }
-      int hash = 41;
-      hash = (19 * hash) + getDescriptorForType().hashCode();
-      if (hasSplitEnabled()) {
-        hash = (37 * hash) + SPLIT_ENABLED_FIELD_NUMBER;
-        hash = (53 * hash) + hashBoolean(getSplitEnabled());
-      }
-      if (hasMergeEnabled()) {
-        hash = (37 * hash) + MERGE_ENABLED_FIELD_NUMBER;
-        hash = (53 * hash) + hashBoolean(getMergeEnabled());
-      }
-      hash = (29 * hash) + getUnknownFields().hashCode();
-      memoizedHashCode = hash;
-      return hash;
-    }
-
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom(
-        com.google.protobuf.ByteString data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom(
-        com.google.protobuf.ByteString data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom(byte[] data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom(
-        byte[] data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseDelimitedFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseDelimitedFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom(
-        com.google.protobuf.CodedInputStream input)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
-    }
-
-    public static Builder newBuilder() { return Builder.create(); }
-    public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState prototype) {
-      return newBuilder().mergeFrom(prototype);
-    }
-    public Builder toBuilder() { return newBuilder(this); }
-
-    @java.lang.Override
-    protected Builder newBuilderForType(
-        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-      Builder builder = new Builder(parent);
-      return builder;
-    }
-    /**
-     * Protobuf type {@code hbase.pb.SplitAndMergeState}
-     *
-     * <pre>
-     **
-     * State for split and merge, used in hbck
-     * </pre>
-     */
-    public static final class Builder extends
-        com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeStateOrBuilder {
-      public static final com.google.protobuf.Descriptors.Descriptor
-          getDescriptor() {
-        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_descriptor;
-      }
-
-      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-          internalGetFieldAccessorTable() {
-        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_fieldAccessorTable
-            .ensureFieldAccessorsInitialized(
-                org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.Builder.class);
-      }
-
-      // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.newBuilder()
-      private Builder() {
-        maybeForceBuilderInitialization();
-      }
-
-      private Builder(
-          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-        super(parent);
-        maybeForceBuilderInitialization();
-      }
-      private void maybeForceBuilderInitialization() {
-        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
-        }
-      }
-      private static Builder create() {
-        return new Builder();
-      }
-
-      public Builder clear() {
-        super.clear();
-        splitEnabled_ = false;
-        bitField0_ = (bitField0_ & ~0x00000001);
-        mergeEnabled_ = false;
-        bitField0_ = (bitField0_ & ~0x00000002);
-        return this;
-      }
-
-      public Builder clone() {
-        return create().mergeFrom(buildPartial());
-      }
-
-      public com.google.protobuf.Descriptors.Descriptor
-          getDescriptorForType() {
-        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_descriptor;
-      }
-
-      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState getDefaultInstanceForType() {
-        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.getDefaultInstance();
-      }
-
-      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState build() {
-        org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState result = buildPartial();
-        if (!result.isInitialized()) {
-          throw newUninitializedMessageException(result);
-        }
-        return result;
-      }
-
-      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState buildPartial() {
-        org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState(this);
-        int from_bitField0_ = bitField0_;
-        int to_bitField0_ = 0;
-        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
-          to_bitField0_ |= 0x00000001;
-        }
-        result.splitEnabled_ = splitEnabled_;
-        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
-          to_bitField0_ |= 0x00000002;
-        }
-        result.mergeEnabled_ = mergeEnabled_;
-        result.bitField0_ = to_bitField0_;
-        onBuilt();
-        return result;
-      }
-
-      public Builder mergeFrom(com.google.protobuf.Message other) {
-        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState) {
-          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState)other);
-        } else {
-          super.mergeFrom(other);
-          return this;
-        }
-      }
-
-      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState other) {
-        if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.getDefaultInstance()) return this;
-        if (other.hasSplitEnabled()) {
-          setSplitEnabled(other.getSplitEnabled());
-        }
-        if (other.hasMergeEnabled()) {
-          setMergeEnabled(other.getMergeEnabled());
-        }
-        this.mergeUnknownFields(other.getUnknownFields());
-        return this;
-      }
-
-      public final boolean isInitialized() {
-        return true;
-      }
-
-      public Builder mergeFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws java.io.IOException {
-        org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parsedMessage = null;
-        try {
-          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
-        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState) e.getUnfinishedMessage();
-          throw e;
-        } finally {
-          if (parsedMessage != null) {
-            mergeFrom(parsedMessage);
-          }
-        }
-        return this;
-      }
-      private int bitField0_;
-
-      // optional bool split_enabled = 1;
-      private boolean splitEnabled_ ;
-      /**
-       * <code>optional bool split_enabled = 1;</code>
-       */
-      public boolean hasSplitEnabled() {
-        return ((bitField0_ & 0x00000001) == 0x00000001);
-      }
-      /**
-       * <code>optional bool split_enabled = 1;</code>
-       */
-      public boolean getSplitEnabled() {
-        return splitEnabled_;
-      }
-      /**
-       * <code>optional bool split_enabled = 1;</code>
-       */
-      public Builder setSplitEnabled(boolean value) {
-        bitField0_ |= 0x00000001;
-        splitEnabled_ = value;
-        onChanged();
-        return this;
-      }
-      /**
-       * <code>optional bool split_enabled = 1;</code>
-       */
-      public Builder clearSplitEnabled() {
-        bitField0_ = (bitField0_ & ~0x00000001);
-        splitEnabled_ = false;
-        onChanged();
-        return this;
-      }
-
-      // optional bool merge_enabled = 2;
-      private boolean mergeEnabled_ ;
-      /**
-       * <code>optional bool merge_enabled = 2;</code>
-       */
-      public boolean hasMergeEnabled() {
-        return ((bitField0_ & 0x00000002) == 0x00000002);
-      }
-      /**
-       * <code>optional bool merge_enabled = 2;</code>
-       */
-      public boolean getMergeEnabled() {
-        return mergeEnabled_;
-      }
-      /**
-       * <code>optional bool merge_enabled = 2;</code>
-       */
-      public Builder setMergeEnabled(boolean value) {
-        bitField0_ |= 0x00000002;
-        mergeEnabled_ = value;
-        onChanged();
-        return this;
-      }
-      /**
-       * <code>optional bool merge_enabled = 2;</code>
-       */
-      public Builder clearMergeEnabled() {
-        bitField0_ = (bitField0_ & ~0x00000002);
-        mergeEnabled_ = false;
-        onChanged();
-        return this;
-      }
-
-      // @@protoc_insertion_point(builder_scope:hbase.pb.SplitAndMergeState)
-    }
-
-    static {
-      defaultInstance = new SplitAndMergeState(true);
-      defaultInstance.initFields();
-    }
-
-    // @@protoc_insertion_point(class_scope:hbase.pb.SplitAndMergeState)
-  }
-
   private static com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_MetaRegionServer_descriptor;
   private static
@@ -10327,11 +9793,6 @@ public final class ZooKeeperProtos {
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_hbase_pb_SwitchState_fieldAccessorTable;
-  private static com.google.protobuf.Descriptors.Descriptor
-    internal_static_hbase_pb_SplitAndMergeState_descriptor;
-  private static
-    com.google.protobuf.GeneratedMessage.FieldAccessorTable
-      internal_static_hbase_pb_SplitAndMergeState_fieldAccessorTable;
 
   public static com.google.protobuf.Descriptors.FileDescriptor
       getDescriptor() {
@@ -10376,10 +9837,9 @@ public final class ZooKeeperProtos {
       "ner\030\002 \001(\0132\024.hbase.pb.ServerName\022\021\n\tthrea" +
       "d_id\030\003 \001(\003\022\021\n\tis_shared\030\004 \001(\010\022\017\n\007purpose" +
       "\030\005 \001(\t\022\023\n\013create_time\030\006 \001(\003\"\036\n\013SwitchSta" +
-      "te\022\017\n\007enabled\030\001 \001(\010\"B\n\022SplitAndMergeStat" +
-      "e\022\025\n\rsplit_enabled\030\001 \001(\010\022\025\n\rmerge_enable" +
-      "d\030\002 \001(\010BE\n*org.apache.hadoop.hbase.proto" +
-      "buf.generatedB\017ZooKeeperProtosH\001\210\001\001\240\001\001"
+      "te\022\017\n\007enabled\030\001 \001(\010BE\n*org.apache.hadoop" +
+      ".hbase.protobuf.generatedB\017ZooKeeperProt" +
+      "osH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -10458,12 +9918,6 @@ public final class ZooKeeperProtos {
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SwitchState_descriptor,
               new java.lang.String[] { "Enabled", });
-          internal_static_hbase_pb_SplitAndMergeState_descriptor =
-            getDescriptor().getMessageTypes().get(12);
-          internal_static_hbase_pb_SplitAndMergeState_fieldAccessorTable = new
-            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
-              internal_static_hbase_pb_SplitAndMergeState_descriptor,
-              new java.lang.String[] { "SplitEnabled", "MergeEnabled", });
           return null;
         }
       };

http://git-wip-us.apache.org/repos/asf/hbase/blob/cd0b85e0/hbase-protocol/src/main/protobuf/Master.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto
index 23bbbf8..2bf36b4 100644
--- a/hbase-protocol/src/main/protobuf/Master.proto
+++ b/hbase-protocol/src/main/protobuf/Master.proto
@@ -291,7 +291,6 @@ message SetSplitOrMergeEnabledRequest {
   required bool enabled = 1;
   optional bool synchronous = 2;
   repeated MasterSwitchType switch_types = 3;
-  optional bool skip_lock = 4;
 }
 
 message SetSplitOrMergeEnabledResponse {
@@ -306,12 +305,6 @@ message IsSplitOrMergeEnabledResponse {
   required bool enabled = 1;
 }
 
-message ReleaseSplitOrMergeLockAndRollbackRequest {
-}
-
-message ReleaseSplitOrMergeLockAndRollbackResponse {
-}
-
 message NormalizeRequest {
 }
 
@@ -682,12 +675,6 @@ service MasterService {
     returns(IsSplitOrMergeEnabledResponse);
 
   /**
-   * Release lock and rollback state.
-   */
-  rpc ReleaseSplitOrMergeLockAndRollback(ReleaseSplitOrMergeLockAndRollbackRequest)
-    returns(ReleaseSplitOrMergeLockAndRollbackResponse);
-
-  /**
    * Run region normalizer. Can NOT run for various reasons. Check logs.
    */
   rpc Normalize(NormalizeRequest)

http://git-wip-us.apache.org/repos/asf/hbase/blob/cd0b85e0/hbase-protocol/src/main/protobuf/ZooKeeper.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/ZooKeeper.proto b/hbase-protocol/src/main/protobuf/ZooKeeper.proto
index 41c0e0e..8713cbd 100644
--- a/hbase-protocol/src/main/protobuf/ZooKeeper.proto
+++ b/hbase-protocol/src/main/protobuf/ZooKeeper.proto
@@ -166,11 +166,3 @@ message TableLock {
 message SwitchState {
   optional bool enabled = 1;
 }
-
-/**
- * State for split and merge, used in hbck
- */
-message SplitAndMergeState {
-  optional bool split_enabled = 1;
-  optional bool merge_enabled = 2;
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/cd0b85e0/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 986ff6e..90af7c7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -53,7 +53,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
 import org.apache.hadoop.hbase.ipc.PriorityFunction;
 import org.apache.hadoop.hbase.ipc.QosPriority;
@@ -1474,10 +1473,6 @@ public class MasterRpcServices extends RSRpcServices
     try {
       master.checkInitialized();
       boolean newValue = request.getEnabled();
-      boolean skipLock = request.getSkipLock();
-      if (!master.getSplitOrMergeTracker().lock(skipLock)) {
-        throw new DoNotRetryIOException("can't set splitOrMerge switch due to lock");
-      }
       for (MasterProtos.MasterSwitchType masterSwitchType : request.getSwitchTypesList()) {
         MasterSwitchType switchType = convert(masterSwitchType);
         boolean oldValue = master.isSplitOrMergeEnabled(switchType);
@@ -1510,24 +1505,6 @@ public class MasterRpcServices extends RSRpcServices
   }
 
   @Override
-  public ReleaseSplitOrMergeLockAndRollbackResponse
-  releaseSplitOrMergeLockAndRollback(RpcController controller,
-    ReleaseSplitOrMergeLockAndRollbackRequest request) throws ServiceException {
-    try {
-      master.getSplitOrMergeTracker().releaseLockAndRollback();
-    } catch (KeeperException e) {
-      throw new ServiceException(e);
-    } catch (DeserializationException e) {
-      throw new ServiceException(e);
-    } catch (InterruptedException e) {
-      throw new ServiceException(e);
-    }
-    ReleaseSplitOrMergeLockAndRollbackResponse.Builder builder =
-      ReleaseSplitOrMergeLockAndRollbackResponse.newBuilder();
-    return builder.build();
-  }
-
-  @Override
   public NormalizeResponse normalize(RpcController controller,
       NormalizeRequest request) throws ServiceException {
     try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/cd0b85e0/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 82f39e3..c05973b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hbase.util;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
@@ -311,6 +310,7 @@ public class HBaseFsck extends Configured implements Closeable {
   private Map<TableName, Set<String>> skippedRegions = new HashMap<TableName, Set<String>>();
 
   ZooKeeperWatcher zkw = null;
+
   /**
    * Constructor
    *
@@ -691,8 +691,7 @@ public class HBaseFsck extends Configured implements Closeable {
     }
     boolean[] oldSplitAndMerge = null;
     if (shouldDisableSplitAndMerge()) {
-      admin.releaseSplitOrMergeLockAndRollback();
-      oldSplitAndMerge = admin.setSplitOrMergeEnabled(false, false, false,
+      oldSplitAndMerge = admin.setSplitOrMergeEnabled(false, false,
         MasterSwitchType.SPLIT, MasterSwitchType.MERGE);
     }
 
@@ -709,7 +708,14 @@ public class HBaseFsck extends Configured implements Closeable {
 
       if (shouldDisableSplitAndMerge()) {
         if (oldSplitAndMerge != null) {
-          admin.releaseSplitOrMergeLockAndRollback();
+          if (oldSplitAndMerge[0] && oldSplitAndMerge[1]) {
+            admin.setSplitOrMergeEnabled(true, false,
+              MasterSwitchType.SPLIT, MasterSwitchType.MERGE);
+          } else if (oldSplitAndMerge[0]) {
+            admin.setSplitOrMergeEnabled(true, false, MasterSwitchType.SPLIT);
+          } else if (oldSplitAndMerge[1]) {
+            admin.setSplitOrMergeEnabled(true, false, MasterSwitchType.MERGE);
+          }
         }
       }
     }
@@ -4226,12 +4232,7 @@ public class HBaseFsck extends Configured implements Closeable {
    * Disable the split and merge
    */
   public static void setDisableSplitAndMerge() {
-    setDisableSplitAndMerge(true);
-  }
-
-  @VisibleForTesting
-  public static void setDisableSplitAndMerge(boolean flag) {
-    disableSplitAndMerge = flag;
+    disableSplitAndMerge = true;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/cd0b85e0/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java
index b975c43..2dad987 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java
@@ -18,8 +18,6 @@
 package org.apache.hadoop.hbase.zookeeper;
 
 import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
@@ -27,7 +25,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.zookeeper.KeeperException;
@@ -40,13 +37,8 @@ import org.apache.zookeeper.KeeperException;
 @InterfaceAudience.Private
 public class SplitOrMergeTracker {
 
-  public static final String LOCK = "splitOrMergeLock";
-  public static final String STATE = "splitOrMergeState";
-
   private String splitZnode;
   private String mergeZnode;
-  private String splitOrMergeLock;
-  private ZooKeeperWatcher watcher;
 
   private SwitchStateTracker splitStateTracker;
   private SwitchStateTracker mergeStateTracker;
@@ -57,9 +49,6 @@ public class SplitOrMergeTracker {
       if (ZKUtil.checkExists(watcher, watcher.getSwitchZNode()) < 0) {
         ZKUtil.createAndFailSilent(watcher, watcher.getSwitchZNode());
       }
-      if (ZKUtil.checkExists(watcher, watcher.getSwitchLockZNode()) < 0) {
-        ZKUtil.createAndFailSilent(watcher, watcher.getSwitchLockZNode());
-      }
     } catch (KeeperException e) {
       throw new RuntimeException(e);
     }
@@ -67,12 +56,8 @@ public class SplitOrMergeTracker {
       conf.get("zookeeper.znode.switch.split", "split"));
     mergeZnode = ZKUtil.joinZNode(watcher.getSwitchZNode(),
       conf.get("zookeeper.znode.switch.merge", "merge"));
-
-    splitOrMergeLock = ZKUtil.joinZNode(watcher.getSwitchLockZNode(), LOCK);
-
     splitStateTracker = new SwitchStateTracker(watcher, splitZnode, abortable);
     mergeStateTracker = new SwitchStateTracker(watcher, mergeZnode, abortable);
-    this.watcher = watcher;
   }
 
   public void start() {
@@ -106,76 +91,6 @@ public class SplitOrMergeTracker {
     }
   }
 
-  /**
-   *  rollback the original state and delete lock node.
-   * */
-  public void releaseLockAndRollback()
-    throws KeeperException, DeserializationException, InterruptedException {
-    if (ZKUtil.checkExists(watcher, splitOrMergeLock) != -1) {
-      List<ZKUtil.ZKUtilOp> ops = new ArrayList<>();
-      rollback(ops);
-      ops.add(ZKUtil.ZKUtilOp.deleteNodeFailSilent(splitOrMergeLock));
-      ZKUtil.multiOrSequential(watcher, ops, false);
-    }
-  }
-
-  // If there is old states of switch on zk, do rollback
-  private void rollback(List<ZKUtil.ZKUtilOp> ops) throws KeeperException, InterruptedException, DeserializationException {
-    String splitOrMergeState = ZKUtil.joinZNode(watcher.getSwitchLockZNode(),
-      SplitOrMergeTracker.STATE);
-    if (ZKUtil.checkExists(watcher, splitOrMergeState) != -1) {
-      byte[] bytes = ZKUtil.getData(watcher, splitOrMergeState);
-      ProtobufUtil.expectPBMagicPrefix(bytes);
-      ZooKeeperProtos.SplitAndMergeState.Builder builder =
-        ZooKeeperProtos.SplitAndMergeState.newBuilder();
-      try {
-        int magicLen = ProtobufUtil.lengthOfPBMagic();
-        ProtobufUtil.mergeFrom(builder, bytes, magicLen, bytes.length - magicLen);
-      } catch (IOException e) {
-        throw new DeserializationException(e);
-      }
-      ZooKeeperProtos.SplitAndMergeState splitAndMergeState =  builder.build();
-      splitStateTracker.setSwitchEnabled(splitAndMergeState.hasSplitEnabled());
-      mergeStateTracker.setSwitchEnabled(splitAndMergeState.hasMergeEnabled());
-      ops.add(ZKUtil.ZKUtilOp.deleteNodeFailSilent(splitOrMergeState));
-    }
-  }
-
-  /**
-   *  If there is no lock, you could acquire the lock.
-   *  After we create lock on zk, we save original splitOrMerge switches on zk.
-   *  @param skipLock if true, it means we will skip the lock action
-   *                  but we still need to check whether the lock exists or not.
-   *  @return true, lock successfully.  otherwise, false
-   * */
-  public boolean lock(boolean skipLock) throws KeeperException {
-    if (ZKUtil.checkExists(watcher, splitOrMergeLock) != -1) {
-      return false;
-    }
-    if (skipLock) {
-      return true;
-    }
-    ZKUtil.createAndFailSilent(watcher,  splitOrMergeLock);
-    if (ZKUtil.checkExists(watcher, splitOrMergeLock) != -1) {
-      saveOriginalState();
-      return true;
-    }
-    return false;
-  }
-
-  private void saveOriginalState() throws KeeperException {
-    boolean splitEnabled = isSplitOrMergeEnabled(MasterSwitchType.SPLIT);
-    boolean mergeEnabled = isSplitOrMergeEnabled(MasterSwitchType.MERGE);
-    String splitOrMergeStates = ZKUtil.joinZNode(watcher.getSwitchLockZNode(),
-      SplitOrMergeTracker.STATE);
-    ZooKeeperProtos.SplitAndMergeState.Builder builder
-      = ZooKeeperProtos.SplitAndMergeState.newBuilder();
-    builder.setSplitEnabled(splitEnabled);
-    builder.setMergeEnabled(mergeEnabled);
-    ZKUtil.createSetData(watcher,  splitOrMergeStates,
-      ProtobufUtil.prependPBMagic(builder.build().toByteArray()));
-  }
-
   private static class SwitchStateTracker extends ZooKeeperNodeTracker {
 
     public SwitchStateTracker(ZooKeeperWatcher watcher, String node, Abortable abortable) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/cd0b85e0/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java
index e10cde5..f6b6361 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java
@@ -41,7 +41,6 @@ import java.util.List;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
 
 @Category({MediumTests.class, ClientTests.class})
 public class TestSplitOrMergeStatus {
@@ -77,15 +76,13 @@ public class TestSplitOrMergeStatus {
 
     Admin admin = TEST_UTIL.getAdmin();
     initSwitchStatus(admin);
-    boolean[] results = admin.setSplitOrMergeEnabled(false, false,
-      true, MasterSwitchType.SPLIT);
+    boolean[] results = admin.setSplitOrMergeEnabled(false, false, MasterSwitchType.SPLIT);
     assertEquals(results.length, 1);
     assertTrue(results[0]);
     admin.split(t.getName());
     int count = waitOnSplitOrMerge(t).size();
     assertTrue(orignalCount == count);
-
-    results = admin.setSplitOrMergeEnabled(true, false, true, MasterSwitchType.SPLIT);
+    results = admin.setSplitOrMergeEnabled(true, false, MasterSwitchType.SPLIT);
     assertEquals(results.length, 1);
     assertFalse(results[0]);
     admin.split(t.getName());
@@ -110,8 +107,7 @@ public class TestSplitOrMergeStatus {
 
     waitForMergable(admin, name);
     int orignalCount = locator.getAllRegionLocations().size();
-    boolean[] results = admin.setSplitOrMergeEnabled(false, false,
-      true, MasterSwitchType.MERGE);
+    boolean[] results = admin.setSplitOrMergeEnabled(false, false, MasterSwitchType.MERGE);
     assertEquals(results.length, 1);
     assertTrue(results[0]);
     List<HRegionInfo> regions = admin.getTableRegions(t.getName());
@@ -122,7 +118,7 @@ public class TestSplitOrMergeStatus {
     assertTrue(orignalCount == count);
 
     waitForMergable(admin, name);
-    results = admin.setSplitOrMergeEnabled(true, false, true, MasterSwitchType.MERGE);
+    results = admin.setSplitOrMergeEnabled(true, false, MasterSwitchType.MERGE);
     assertEquals(results.length, 1);
     assertFalse(results[0]);
     admin.mergeRegionsAsync(regions.get(0).getEncodedNameAsBytes(),
@@ -135,7 +131,7 @@ public class TestSplitOrMergeStatus {
   @Test
   public void testMultiSwitches() throws IOException {
     Admin admin = TEST_UTIL.getAdmin();
-    boolean[] switches = admin.setSplitOrMergeEnabled(false, false, true,
+    boolean[] switches = admin.setSplitOrMergeEnabled(false, false,
       MasterSwitchType.SPLIT, MasterSwitchType.MERGE);
     for (boolean s : switches){
       assertTrue(s);
@@ -145,34 +141,12 @@ public class TestSplitOrMergeStatus {
     admin.close();
   }
 
-  @Test
-  public void testSwitchLock() throws IOException {
-    Admin admin = TEST_UTIL.getAdmin();
-    admin.setSplitOrMergeEnabled(false, false, false,
-      MasterSwitchType.SPLIT, MasterSwitchType.MERGE);
-    try {
-      admin.setSplitOrMergeEnabled(false, false, true,
-        MasterSwitchType.SPLIT, MasterSwitchType.MERGE);
-      fail();
-    } catch (IOException e) {
-      LOG.info("", e);
-    }
-    admin.releaseSplitOrMergeLockAndRollback();
-    try {
-      admin.setSplitOrMergeEnabled(true, false, true,
-        MasterSwitchType.SPLIT, MasterSwitchType.MERGE);
-    } catch (IOException e) {
-      fail();
-    }
-    admin.close();
-  }
-  
   private void initSwitchStatus(Admin admin) throws IOException {
     if (!admin.isSplitOrMergeEnabled(MasterSwitchType.SPLIT)) {
-      admin.setSplitOrMergeEnabled(true, false, true, MasterSwitchType.SPLIT);
+      admin.setSplitOrMergeEnabled(true, false, MasterSwitchType.SPLIT);
     }
     if (!admin.isSplitOrMergeEnabled(MasterSwitchType.MERGE)) {
-      admin.setSplitOrMergeEnabled(true, false, true, MasterSwitchType.MERGE);
+      admin.setSplitOrMergeEnabled(true, false, MasterSwitchType.MERGE);
     }
     assertTrue(admin.isSplitOrMergeEnabled(MasterSwitchType.SPLIT));
     assertTrue(admin.isSplitOrMergeEnabled(MasterSwitchType.MERGE));

http://git-wip-us.apache.org/repos/asf/hbase/blob/cd0b85e0/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
index ecfe521..7f0f6db 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
@@ -63,7 +63,6 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
 import org.apache.hadoop.hbase.util.hbck.HbckTestingUtil;
-import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -72,8 +71,6 @@ import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -95,8 +92,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
 
 import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.*;
 import static org.junit.Assert.*;
-import static org.mockito.Mockito.doAnswer;
-import static org.mockito.Mockito.spy;
 
 @Category({MiscTests.class, LargeTests.class})
 public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
@@ -1851,53 +1846,4 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
     doQuarantineTest(table, hbck, 3, 0, 0, 0, 1);
     hbck.close();
   }
-
-  /**
-   *  See HBASE-15406
-   * */
-  @Test
-  public void testSplitOrMergeStatWhenHBCKAbort() throws Exception {
-    admin.setSplitOrMergeEnabled(true, false, true,
-      MasterSwitchType.SPLIT, MasterSwitchType.MERGE);
-    boolean oldSplit = admin.isSplitOrMergeEnabled(MasterSwitchType.SPLIT);
-    boolean oldMerge = admin.isSplitOrMergeEnabled(MasterSwitchType.MERGE);
-
-    assertTrue(oldSplit);
-    assertTrue(oldMerge);
-
-    ExecutorService exec = new ScheduledThreadPoolExecutor(10);
-    HBaseFsck hbck = new HBaseFsck(conf, exec);
-    HBaseFsck.setDisplayFullReport(); // i.e. -details
-    final HBaseFsck spiedHbck = spy(hbck);
-    doAnswer(new Answer() {
-      @Override
-      public Object answer(InvocationOnMock invocation) throws Throwable {
-        // we close splitOrMerge flag in hbck, so in finally hbck will not set splitOrMerge back.
-        spiedHbck.setDisableSplitAndMerge(false);
-        return null;
-      }
-    }).when(spiedHbck).onlineConsistencyRepair();
-    spiedHbck.setDisableSplitAndMerge();
-    spiedHbck.connect();
-    spiedHbck.onlineHbck();
-    spiedHbck.close();
-
-    boolean split = admin.isSplitOrMergeEnabled(MasterSwitchType.SPLIT);
-    boolean merge = admin.isSplitOrMergeEnabled(MasterSwitchType.MERGE);
-    assertFalse(split);
-    assertFalse(merge);
-
-    // rerun hbck to repair the switches state
-    hbck = new HBaseFsck(conf, exec);
-    hbck.setDisableSplitAndMerge();
-    hbck.connect();
-    hbck.onlineHbck();
-    hbck.close();
-
-    split = admin.isSplitOrMergeEnabled(MasterSwitchType.SPLIT);
-    merge = admin.isSplitOrMergeEnabled(MasterSwitchType.MERGE);
-
-    assertTrue(split);
-    assertTrue(merge);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/cd0b85e0/hbase-shell/src/main/ruby/hbase/admin.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb
index c6e6f48..e4f52e9 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -151,7 +151,7 @@ module Hbase
       end
       @admin.setSplitOrMergeEnabled(
         java.lang.Boolean.valueOf(enabled), java.lang.Boolean.valueOf(false),
-        java.lang.Boolean.valueOf(true), switch_type)[0]
+        switch_type)[0]
     end
 
     #----------------------------------------------------------------------------------------------


[3/3] hbase git commit: Revert HBASE-15406: Split / merge switch left disabled after early termination of hbck (HBASE-16008 will address this issue)

Posted by sy...@apache.org.
Revert HBASE-15406: Split / merge switch left disabled after early termination of hbck (HBASE-16008 will address this issue)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cd0b85e0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cd0b85e0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cd0b85e0

Branch: refs/heads/master
Commit: cd0b85e0a814821489ddbe9053a8fc222da55609
Parents: 744248c
Author: Stephen Yuan Jiang <sy...@gmail.com>
Authored: Fri Jul 22 20:09:03 2016 -0700
Committer: Stephen Yuan Jiang <sy...@gmail.com>
Committed: Fri Jul 22 20:09:03 2016 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/client/Admin.java   |   13 -
 .../hbase/client/ConnectionImplementation.java  |    7 -
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |   18 +-
 .../hadoop/hbase/protobuf/RequestConverter.java |   11 +-
 .../hbase/zookeeper/ZooKeeperWatcher.java       |   10 -
 .../hbase/protobuf/generated/MasterProtos.java  | 1903 +++++-------------
 .../protobuf/generated/ZooKeeperProtos.java     |  552 +----
 hbase-protocol/src/main/protobuf/Master.proto   |   13 -
 .../src/main/protobuf/ZooKeeper.proto           |    8 -
 .../hadoop/hbase/master/MasterRpcServices.java  |   23 -
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |   21 +-
 .../hbase/zookeeper/SplitOrMergeTracker.java    |   85 -
 .../hbase/client/TestSplitOrMergeStatus.java    |   40 +-
 .../hadoop/hbase/util/TestHBaseFsckOneRS.java   |   54 -
 hbase-shell/src/main/ruby/hbase/admin.rb        |    2 +-
 15 files changed, 540 insertions(+), 2220 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/cd0b85e0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 34e0a89..df79dcf 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -1729,15 +1729,10 @@ public interface Admin extends Abortable, Closeable {
    *
    * @param enabled enabled or not
    * @param synchronous If true, it waits until current split() call, if outstanding, to return.
-   * @param skipLock if false, we will do lock before change switch.
-   *                 with the lock, other requests to change the switch will be rejected!
-   *                 And when you set it to be false,
-   *                 you should call {@link #releaseSplitOrMergeLockAndRollback()} by yourself
    * @param switchTypes switchType list {@link MasterSwitchType}
    * @return Previous switch value array
    */
   boolean[] setSplitOrMergeEnabled(final boolean enabled, final boolean synchronous,
-                                   final boolean skipLock,
                                    final MasterSwitchType... switchTypes) throws IOException;
 
   /**
@@ -1746,12 +1741,4 @@ public interface Admin extends Abortable, Closeable {
    * @return true if the switch is enabled, false otherwise.
    */
   boolean isSplitOrMergeEnabled(final MasterSwitchType switchType) throws IOException;
-
-  /**
-   *  You should call this method after you call
-   *  {@link #setSplitOrMergeEnabled(boolean, boolean, boolean, MasterSwitchType...)}
-   *  with skipLock be false, this method will release the lock created by above method
-   *  and rollback the switch state to be original state before you change switch
-   * */
-  void releaseSplitOrMergeLockAndRollback() throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/cd0b85e0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index 1fe29c8..9b913c8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -1664,13 +1664,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
       }
 
       @Override
-      public MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse
-        releaseSplitOrMergeLockAndRollback(RpcController controller,
-        MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest request) throws ServiceException {
-        return stub.releaseSplitOrMergeLockAndRollback(controller, request);
-      }
-
-      @Override
       public IsNormalizerEnabledResponse isNormalizerEnabled(RpcController controller,
           IsNormalizerEnabledRequest request) throws ServiceException {
         return stub.isNormalizerEnabled(controller, request);

http://git-wip-us.apache.org/repos/asf/hbase/blob/cd0b85e0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index ac98bdb..074fe7f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -3704,13 +3704,13 @@ public class HBaseAdmin implements Admin {
 
   @Override
   public boolean[] setSplitOrMergeEnabled(final boolean enabled, final boolean synchronous,
-    final boolean skipLock, final MasterSwitchType... switchTypes) throws IOException {
+                                          final MasterSwitchType... switchTypes)
+    throws IOException {
     return executeCallable(new MasterCallable<boolean[]>(getConnection()) {
       @Override
       public boolean[] call(int callTimeout) throws ServiceException {
         MasterProtos.SetSplitOrMergeEnabledResponse response = master.setSplitOrMergeEnabled(null,
-          RequestConverter.buildSetSplitOrMergeEnabledRequest(enabled, synchronous,
-            skipLock, switchTypes));
+          RequestConverter.buildSetSplitOrMergeEnabledRequest(enabled, synchronous, switchTypes));
         boolean[] result = new boolean[switchTypes.length];
         int i = 0;
         for (Boolean prevValue : response.getPrevValueList()) {
@@ -3732,18 +3732,6 @@ public class HBaseAdmin implements Admin {
     });
   }
 
-  @Override
-  public void releaseSplitOrMergeLockAndRollback() throws IOException {
-    executeCallable(new MasterCallable<Void>(getConnection()) {
-      @Override
-      public Void call(int callTimeout) throws ServiceException {
-        master.releaseSplitOrMergeLockAndRollback(null,
-          RequestConverter.buildReleaseSplitOrMergeLockAndRollbackRequest());
-        return null;
-      }
-    });
-  }
-
   private HRegionInfo getMobRegionInfo(TableName tableName) {
     return new HRegionInfo(tableName, Bytes.toBytes(".mob"),
             HConstants.EMPTY_END_ROW, false, 0);

http://git-wip-us.apache.org/repos/asf/hbase/blob/cd0b85e0/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
index 5fe2016..b75d2b8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
@@ -103,7 +103,6 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableReques
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
@@ -1739,13 +1738,6 @@ public final class RequestConverter {
     return builder.build();
   }
 
-  public static ReleaseSplitOrMergeLockAndRollbackRequest
-    buildReleaseSplitOrMergeLockAndRollbackRequest() {
-    ReleaseSplitOrMergeLockAndRollbackRequest.Builder builder =
-      ReleaseSplitOrMergeLockAndRollbackRequest.newBuilder();
-    return builder.build();
-  }
-
   /**
    * Creates a protocol buffer SetSplitOrMergeEnabledRequest
    *
@@ -1756,11 +1748,10 @@ public final class RequestConverter {
    * @return a SetSplitOrMergeEnabledRequest
    */
   public static SetSplitOrMergeEnabledRequest buildSetSplitOrMergeEnabledRequest(boolean enabled,
-    boolean synchronous, boolean skipLock, MasterSwitchType... switchTypes) {
+    boolean synchronous, MasterSwitchType... switchTypes) {
     SetSplitOrMergeEnabledRequest.Builder builder = SetSplitOrMergeEnabledRequest.newBuilder();
     builder.setEnabled(enabled);
     builder.setSynchronous(synchronous);
-    builder.setSkipLock(skipLock);
     for (MasterSwitchType switchType : switchTypes) {
       builder.addSwitchTypes(convert(switchType));
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/cd0b85e0/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
index ff3d1c7..7cbfc98 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
@@ -117,8 +117,6 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
   private String regionNormalizerZNode;
   // znode containing the state of all switches, currently there are split and merge child node.
   private String switchZNode;
-  // znode containing the lock for the switches
-  private String switchLockZNode;
   // znode containing the lock for the tables
   public String tableLockZNode;
   // znode containing the state of recovering regions
@@ -438,7 +436,6 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
     regionNormalizerZNode = ZKUtil.joinZNode(baseZNode,
       conf.get("zookeeper.znode.regionNormalizer", "normalizer"));
     switchZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.switch", "switch"));
-    switchLockZNode = ZKUtil.joinZNode(switchZNode, "locks");
     tableLockZNode = ZKUtil.joinZNode(baseZNode,
         conf.get("zookeeper.znode.tableLock", "table-lock"));
     recoveringRegionsZNode = ZKUtil.joinZNode(baseZNode,
@@ -782,11 +779,4 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
   public String getSwitchZNode() {
     return switchZNode;
   }
-
-  /**
-   *  @return ZK node for switchLock node.
-   * */
-  public String getSwitchLockZNode() {
-    return switchLockZNode;
-  }
 }


[2/3] hbase git commit: Revert HBASE-15406: Split / merge switch left disabled after early termination of hbck (HBASE-16008 will address this issue)

Posted by sy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/cd0b85e0/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index 4a92e14..6daf889 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -29158,16 +29158,6 @@ public final class MasterProtos {
      * <code>repeated .hbase.pb.MasterSwitchType switch_types = 3;</code>
      */
     org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterSwitchType getSwitchTypes(int index);
-
-    // optional bool skip_lock = 4;
-    /**
-     * <code>optional bool skip_lock = 4;</code>
-     */
-    boolean hasSkipLock();
-    /**
-     * <code>optional bool skip_lock = 4;</code>
-     */
-    boolean getSkipLock();
   }
   /**
    * Protobuf type {@code hbase.pb.SetSplitOrMergeEnabledRequest}
@@ -29263,11 +29253,6 @@ public final class MasterProtos {
               input.popLimit(oldLimit);
               break;
             }
-            case 32: {
-              bitField0_ |= 0x00000004;
-              skipLock_ = input.readBool();
-              break;
-            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -29365,27 +29350,10 @@ public final class MasterProtos {
       return switchTypes_.get(index);
     }
 
-    // optional bool skip_lock = 4;
-    public static final int SKIP_LOCK_FIELD_NUMBER = 4;
-    private boolean skipLock_;
-    /**
-     * <code>optional bool skip_lock = 4;</code>
-     */
-    public boolean hasSkipLock() {
-      return ((bitField0_ & 0x00000004) == 0x00000004);
-    }
-    /**
-     * <code>optional bool skip_lock = 4;</code>
-     */
-    public boolean getSkipLock() {
-      return skipLock_;
-    }
-
     private void initFields() {
       enabled_ = false;
       synchronous_ = false;
       switchTypes_ = java.util.Collections.emptyList();
-      skipLock_ = false;
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -29412,9 +29380,6 @@ public final class MasterProtos {
       for (int i = 0; i < switchTypes_.size(); i++) {
         output.writeEnum(3, switchTypes_.get(i).getNumber());
       }
-      if (((bitField0_ & 0x00000004) == 0x00000004)) {
-        output.writeBool(4, skipLock_);
-      }
       getUnknownFields().writeTo(output);
     }
 
@@ -29441,10 +29406,6 @@ public final class MasterProtos {
         size += dataSize;
         size += 1 * switchTypes_.size();
       }
-      if (((bitField0_ & 0x00000004) == 0x00000004)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeBoolSize(4, skipLock_);
-      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -29480,11 +29441,6 @@ public final class MasterProtos {
       }
       result = result && getSwitchTypesList()
           .equals(other.getSwitchTypesList());
-      result = result && (hasSkipLock() == other.hasSkipLock());
-      if (hasSkipLock()) {
-        result = result && (getSkipLock()
-            == other.getSkipLock());
-      }
       result = result &&
           getUnknownFields().equals(other.getUnknownFields());
       return result;
@@ -29510,10 +29466,6 @@ public final class MasterProtos {
         hash = (37 * hash) + SWITCH_TYPES_FIELD_NUMBER;
         hash = (53 * hash) + hashEnumList(getSwitchTypesList());
       }
-      if (hasSkipLock()) {
-        hash = (37 * hash) + SKIP_LOCK_FIELD_NUMBER;
-        hash = (53 * hash) + hashBoolean(getSkipLock());
-      }
       hash = (29 * hash) + getUnknownFields().hashCode();
       memoizedHashCode = hash;
       return hash;
@@ -29629,8 +29581,6 @@ public final class MasterProtos {
         bitField0_ = (bitField0_ & ~0x00000002);
         switchTypes_ = java.util.Collections.emptyList();
         bitField0_ = (bitField0_ & ~0x00000004);
-        skipLock_ = false;
-        bitField0_ = (bitField0_ & ~0x00000008);
         return this;
       }
 
@@ -29672,10 +29622,6 @@ public final class MasterProtos {
           bitField0_ = (bitField0_ & ~0x00000004);
         }
         result.switchTypes_ = switchTypes_;
-        if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
-          to_bitField0_ |= 0x00000004;
-        }
-        result.skipLock_ = skipLock_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -29708,9 +29654,6 @@ public final class MasterProtos {
           }
           onChanged();
         }
-        if (other.hasSkipLock()) {
-          setSkipLock(other.getSkipLock());
-        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -29880,39 +29823,6 @@ public final class MasterProtos {
         return this;
       }
 
-      // optional bool skip_lock = 4;
-      private boolean skipLock_ ;
-      /**
-       * <code>optional bool skip_lock = 4;</code>
-       */
-      public boolean hasSkipLock() {
-        return ((bitField0_ & 0x00000008) == 0x00000008);
-      }
-      /**
-       * <code>optional bool skip_lock = 4;</code>
-       */
-      public boolean getSkipLock() {
-        return skipLock_;
-      }
-      /**
-       * <code>optional bool skip_lock = 4;</code>
-       */
-      public Builder setSkipLock(boolean value) {
-        bitField0_ |= 0x00000008;
-        skipLock_ = value;
-        onChanged();
-        return this;
-      }
-      /**
-       * <code>optional bool skip_lock = 4;</code>
-       */
-      public Builder clearSkipLock() {
-        bitField0_ = (bitField0_ & ~0x00000008);
-        skipLock_ = false;
-        onChanged();
-        return this;
-      }
-
       // @@protoc_insertion_point(builder_scope:hbase.pb.SetSplitOrMergeEnabledRequest)
     }
 
@@ -31318,682 +31228,6 @@ public final class MasterProtos {
     // @@protoc_insertion_point(class_scope:hbase.pb.IsSplitOrMergeEnabledResponse)
   }
 
-  public interface ReleaseSplitOrMergeLockAndRollbackRequestOrBuilder
-      extends com.google.protobuf.MessageOrBuilder {
-  }
-  /**
-   * Protobuf type {@code hbase.pb.ReleaseSplitOrMergeLockAndRollbackRequest}
-   */
-  public static final class ReleaseSplitOrMergeLockAndRollbackRequest extends
-      com.google.protobuf.GeneratedMessage
-      implements ReleaseSplitOrMergeLockAndRollbackRequestOrBuilder {
-    // Use ReleaseSplitOrMergeLockAndRollbackRequest.newBuilder() to construct.
-    private ReleaseSplitOrMergeLockAndRollbackRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
-      super(builder);
-      this.unknownFields = builder.getUnknownFields();
-    }
-    private ReleaseSplitOrMergeLockAndRollbackRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
-    private static final ReleaseSplitOrMergeLockAndRollbackRequest defaultInstance;
-    public static ReleaseSplitOrMergeLockAndRollbackRequest getDefaultInstance() {
-      return defaultInstance;
-    }
-
-    public ReleaseSplitOrMergeLockAndRollbackRequest getDefaultInstanceForType() {
-      return defaultInstance;
-    }
-
-    private final com.google.protobuf.UnknownFieldSet unknownFields;
-    @java.lang.Override
-    public final com.google.protobuf.UnknownFieldSet
-        getUnknownFields() {
-      return this.unknownFields;
-    }
-    private ReleaseSplitOrMergeLockAndRollbackRequest(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      initFields();
-      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
-          com.google.protobuf.UnknownFieldSet.newBuilder();
-      try {
-        boolean done = false;
-        while (!done) {
-          int tag = input.readTag();
-          switch (tag) {
-            case 0:
-              done = true;
-              break;
-            default: {
-              if (!parseUnknownField(input, unknownFields,
-                                     extensionRegistry, tag)) {
-                done = true;
-              }
-              break;
-            }
-          }
-        }
-      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-        throw e.setUnfinishedMessage(this);
-      } catch (java.io.IOException e) {
-        throw new com.google.protobuf.InvalidProtocolBufferException(
-            e.getMessage()).setUnfinishedMessage(this);
-      } finally {
-        this.unknownFields = unknownFields.build();
-        makeExtensionsImmutable();
-      }
-    }
-    public static final com.google.protobuf.Descriptors.Descriptor
-        getDescriptor() {
-      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_descriptor;
-    }
-
-    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-        internalGetFieldAccessorTable() {
-      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_fieldAccessorTable
-          .ensureFieldAccessorsInitialized(
-              org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.Builder.class);
-    }
-
-    public static com.google.protobuf.Parser<ReleaseSplitOrMergeLockAndRollbackRequest> PARSER =
-        new com.google.protobuf.AbstractParser<ReleaseSplitOrMergeLockAndRollbackRequest>() {
-      public ReleaseSplitOrMergeLockAndRollbackRequest parsePartialFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws com.google.protobuf.InvalidProtocolBufferException {
-        return new ReleaseSplitOrMergeLockAndRollbackRequest(input, extensionRegistry);
-      }
-    };
-
-    @java.lang.Override
-    public com.google.protobuf.Parser<ReleaseSplitOrMergeLockAndRollbackRequest> getParserForType() {
-      return PARSER;
-    }
-
-    private void initFields() {
-    }
-    private byte memoizedIsInitialized = -1;
-    public final boolean isInitialized() {
-      byte isInitialized = memoizedIsInitialized;
-      if (isInitialized != -1) return isInitialized == 1;
-
-      memoizedIsInitialized = 1;
-      return true;
-    }
-
-    public void writeTo(com.google.protobuf.CodedOutputStream output)
-                        throws java.io.IOException {
-      getSerializedSize();
-      getUnknownFields().writeTo(output);
-    }
-
-    private int memoizedSerializedSize = -1;
-    public int getSerializedSize() {
-      int size = memoizedSerializedSize;
-      if (size != -1) return size;
-
-      size = 0;
-      size += getUnknownFields().getSerializedSize();
-      memoizedSerializedSize = size;
-      return size;
-    }
-
-    private static final long serialVersionUID = 0L;
-    @java.lang.Override
-    protected java.lang.Object writeReplace()
-        throws java.io.ObjectStreamException {
-      return super.writeReplace();
-    }
-
-    @java.lang.Override
-    public boolean equals(final java.lang.Object obj) {
-      if (obj == this) {
-       return true;
-      }
-      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest)) {
-        return super.equals(obj);
-      }
-      org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest) obj;
-
-      boolean result = true;
-      result = result &&
-          getUnknownFields().equals(other.getUnknownFields());
-      return result;
-    }
-
-    private int memoizedHashCode = 0;
-    @java.lang.Override
-    public int hashCode() {
-      if (memoizedHashCode != 0) {
-        return memoizedHashCode;
-      }
-      int hash = 41;
-      hash = (19 * hash) + getDescriptorForType().hashCode();
-      hash = (29 * hash) + getUnknownFields().hashCode();
-      memoizedHashCode = hash;
-      return hash;
-    }
-
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseFrom(
-        com.google.protobuf.ByteString data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseFrom(
-        com.google.protobuf.ByteString data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseFrom(byte[] data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseFrom(
-        byte[] data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseDelimitedFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseDelimitedFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseFrom(
-        com.google.protobuf.CodedInputStream input)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseFrom(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
-    }
-
-    public static Builder newBuilder() { return Builder.create(); }
-    public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest prototype) {
-      return newBuilder().mergeFrom(prototype);
-    }
-    public Builder toBuilder() { return newBuilder(this); }
-
-    @java.lang.Override
-    protected Builder newBuilderForType(
-        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-      Builder builder = new Builder(parent);
-      return builder;
-    }
-    /**
-     * Protobuf type {@code hbase.pb.ReleaseSplitOrMergeLockAndRollbackRequest}
-     */
-    public static final class Builder extends
-        com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequestOrBuilder {
-      public static final com.google.protobuf.Descriptors.Descriptor
-          getDescriptor() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_descriptor;
-      }
-
-      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-          internalGetFieldAccessorTable() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_fieldAccessorTable
-            .ensureFieldAccessorsInitialized(
-                org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.Builder.class);
-      }
-
-      // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.newBuilder()
-      private Builder() {
-        maybeForceBuilderInitialization();
-      }
-
-      private Builder(
-          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-        super(parent);
-        maybeForceBuilderInitialization();
-      }
-      private void maybeForceBuilderInitialization() {
-        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
-        }
-      }
-      private static Builder create() {
-        return new Builder();
-      }
-
-      public Builder clear() {
-        super.clear();
-        return this;
-      }
-
-      public Builder clone() {
-        return create().mergeFrom(buildPartial());
-      }
-
-      public com.google.protobuf.Descriptors.Descriptor
-          getDescriptorForType() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_descriptor;
-      }
-
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest getDefaultInstanceForType() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.getDefaultInstance();
-      }
-
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest build() {
-        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest result = buildPartial();
-        if (!result.isInitialized()) {
-          throw newUninitializedMessageException(result);
-        }
-        return result;
-      }
-
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest buildPartial() {
-        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest(this);
-        onBuilt();
-        return result;
-      }
-
-      public Builder mergeFrom(com.google.protobuf.Message other) {
-        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest) {
-          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest)other);
-        } else {
-          super.mergeFrom(other);
-          return this;
-        }
-      }
-
-      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest other) {
-        if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.getDefaultInstance()) return this;
-        this.mergeUnknownFields(other.getUnknownFields());
-        return this;
-      }
-
-      public final boolean isInitialized() {
-        return true;
-      }
-
-      public Builder mergeFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws java.io.IOException {
-        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parsedMessage = null;
-        try {
-          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
-        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest) e.getUnfinishedMessage();
-          throw e;
-        } finally {
-          if (parsedMessage != null) {
-            mergeFrom(parsedMessage);
-          }
-        }
-        return this;
-      }
-
-      // @@protoc_insertion_point(builder_scope:hbase.pb.ReleaseSplitOrMergeLockAndRollbackRequest)
-    }
-
-    static {
-      defaultInstance = new ReleaseSplitOrMergeLockAndRollbackRequest(true);
-      defaultInstance.initFields();
-    }
-
-    // @@protoc_insertion_point(class_scope:hbase.pb.ReleaseSplitOrMergeLockAndRollbackRequest)
-  }
-
-  public interface ReleaseSplitOrMergeLockAndRollbackResponseOrBuilder
-      extends com.google.protobuf.MessageOrBuilder {
-  }
-  /**
-   * Protobuf type {@code hbase.pb.ReleaseSplitOrMergeLockAndRollbackResponse}
-   */
-  public static final class ReleaseSplitOrMergeLockAndRollbackResponse extends
-      com.google.protobuf.GeneratedMessage
-      implements ReleaseSplitOrMergeLockAndRollbackResponseOrBuilder {
-    // Use ReleaseSplitOrMergeLockAndRollbackResponse.newBuilder() to construct.
-    private ReleaseSplitOrMergeLockAndRollbackResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
-      super(builder);
-      this.unknownFields = builder.getUnknownFields();
-    }
-    private ReleaseSplitOrMergeLockAndRollbackResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
-    private static final ReleaseSplitOrMergeLockAndRollbackResponse defaultInstance;
-    public static ReleaseSplitOrMergeLockAndRollbackResponse getDefaultInstance() {
-      return defaultInstance;
-    }
-
-    public ReleaseSplitOrMergeLockAndRollbackResponse getDefaultInstanceForType() {
-      return defaultInstance;
-    }
-
-    private final com.google.protobuf.UnknownFieldSet unknownFields;
-    @java.lang.Override
-    public final com.google.protobuf.UnknownFieldSet
-        getUnknownFields() {
-      return this.unknownFields;
-    }
-    private ReleaseSplitOrMergeLockAndRollbackResponse(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      initFields();
-      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
-          com.google.protobuf.UnknownFieldSet.newBuilder();
-      try {
-        boolean done = false;
-        while (!done) {
-          int tag = input.readTag();
-          switch (tag) {
-            case 0:
-              done = true;
-              break;
-            default: {
-              if (!parseUnknownField(input, unknownFields,
-                                     extensionRegistry, tag)) {
-                done = true;
-              }
-              break;
-            }
-          }
-        }
-      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-        throw e.setUnfinishedMessage(this);
-      } catch (java.io.IOException e) {
-        throw new com.google.protobuf.InvalidProtocolBufferException(
-            e.getMessage()).setUnfinishedMessage(this);
-      } finally {
-        this.unknownFields = unknownFields.build();
-        makeExtensionsImmutable();
-      }
-    }
-    public static final com.google.protobuf.Descriptors.Descriptor
-        getDescriptor() {
-      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_descriptor;
-    }
-
-    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-        internalGetFieldAccessorTable() {
-      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_fieldAccessorTable
-          .ensureFieldAccessorsInitialized(
-              org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.Builder.class);
-    }
-
-    public static com.google.protobuf.Parser<ReleaseSplitOrMergeLockAndRollbackResponse> PARSER =
-        new com.google.protobuf.AbstractParser<ReleaseSplitOrMergeLockAndRollbackResponse>() {
-      public ReleaseSplitOrMergeLockAndRollbackResponse parsePartialFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws com.google.protobuf.InvalidProtocolBufferException {
-        return new ReleaseSplitOrMergeLockAndRollbackResponse(input, extensionRegistry);
-      }
-    };
-
-    @java.lang.Override
-    public com.google.protobuf.Parser<ReleaseSplitOrMergeLockAndRollbackResponse> getParserForType() {
-      return PARSER;
-    }
-
-    private void initFields() {
-    }
-    private byte memoizedIsInitialized = -1;
-    public final boolean isInitialized() {
-      byte isInitialized = memoizedIsInitialized;
-      if (isInitialized != -1) return isInitialized == 1;
-
-      memoizedIsInitialized = 1;
-      return true;
-    }
-
-    public void writeTo(com.google.protobuf.CodedOutputStream output)
-                        throws java.io.IOException {
-      getSerializedSize();
-      getUnknownFields().writeTo(output);
-    }
-
-    private int memoizedSerializedSize = -1;
-    public int getSerializedSize() {
-      int size = memoizedSerializedSize;
-      if (size != -1) return size;
-
-      size = 0;
-      size += getUnknownFields().getSerializedSize();
-      memoizedSerializedSize = size;
-      return size;
-    }
-
-    private static final long serialVersionUID = 0L;
-    @java.lang.Override
-    protected java.lang.Object writeReplace()
-        throws java.io.ObjectStreamException {
-      return super.writeReplace();
-    }
-
-    @java.lang.Override
-    public boolean equals(final java.lang.Object obj) {
-      if (obj == this) {
-       return true;
-      }
-      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse)) {
-        return super.equals(obj);
-      }
-      org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse) obj;
-
-      boolean result = true;
-      result = result &&
-          getUnknownFields().equals(other.getUnknownFields());
-      return result;
-    }
-
-    private int memoizedHashCode = 0;
-    @java.lang.Override
-    public int hashCode() {
-      if (memoizedHashCode != 0) {
-        return memoizedHashCode;
-      }
-      int hash = 41;
-      hash = (19 * hash) + getDescriptorForType().hashCode();
-      hash = (29 * hash) + getUnknownFields().hashCode();
-      memoizedHashCode = hash;
-      return hash;
-    }
-
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseFrom(
-        com.google.protobuf.ByteString data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseFrom(
-        com.google.protobuf.ByteString data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseFrom(byte[] data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseFrom(
-        byte[] data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseDelimitedFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseDelimitedFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseFrom(
-        com.google.protobuf.CodedInputStream input)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseFrom(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
-    }
-
-    public static Builder newBuilder() { return Builder.create(); }
-    public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse prototype) {
-      return newBuilder().mergeFrom(prototype);
-    }
-    public Builder toBuilder() { return newBuilder(this); }
-
-    @java.lang.Override
-    protected Builder newBuilderForType(
-        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-      Builder builder = new Builder(parent);
-      return builder;
-    }
-    /**
-     * Protobuf type {@code hbase.pb.ReleaseSplitOrMergeLockAndRollbackResponse}
-     */
-    public static final class Builder extends
-        com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponseOrBuilder {
-      public static final com.google.protobuf.Descriptors.Descriptor
-          getDescriptor() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_descriptor;
-      }
-
-      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-          internalGetFieldAccessorTable() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_fieldAccessorTable
-            .ensureFieldAccessorsInitialized(
-                org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.Builder.class);
-      }
-
-      // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.newBuilder()
-      private Builder() {
-        maybeForceBuilderInitialization();
-      }
-
-      private Builder(
-          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-        super(parent);
-        maybeForceBuilderInitialization();
-      }
-      private void maybeForceBuilderInitialization() {
-        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
-        }
-      }
-      private static Builder create() {
-        return new Builder();
-      }
-
-      public Builder clear() {
-        super.clear();
-        return this;
-      }
-
-      public Builder clone() {
-        return create().mergeFrom(buildPartial());
-      }
-
-      public com.google.protobuf.Descriptors.Descriptor
-          getDescriptorForType() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_descriptor;
-      }
-
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse getDefaultInstanceForType() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance();
-      }
-
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse build() {
-        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse result = buildPartial();
-        if (!result.isInitialized()) {
-          throw newUninitializedMessageException(result);
-        }
-        return result;
-      }
-
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse buildPartial() {
-        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse(this);
-        onBuilt();
-        return result;
-      }
-
-      public Builder mergeFrom(com.google.protobuf.Message other) {
-        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse) {
-          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse)other);
-        } else {
-          super.mergeFrom(other);
-          return this;
-        }
-      }
-
-      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse other) {
-        if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance()) return this;
-        this.mergeUnknownFields(other.getUnknownFields());
-        return this;
-      }
-
-      public final boolean isInitialized() {
-        return true;
-      }
-
-      public Builder mergeFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws java.io.IOException {
-        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parsedMessage = null;
-        try {
-          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
-        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse) e.getUnfinishedMessage();
-          throw e;
-        } finally {
-          if (parsedMessage != null) {
-            mergeFrom(parsedMessage);
-          }
-        }
-        return this;
-      }
-
-      // @@protoc_insertion_point(builder_scope:hbase.pb.ReleaseSplitOrMergeLockAndRollbackResponse)
-    }
-
-    static {
-      defaultInstance = new ReleaseSplitOrMergeLockAndRollbackResponse(true);
-      defaultInstance.initFields();
-    }
-
-    // @@protoc_insertion_point(class_scope:hbase.pb.ReleaseSplitOrMergeLockAndRollbackResponse)
-  }
-
   public interface NormalizeRequestOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
   }
@@ -60879,19 +60113,6 @@ public final class MasterProtos {
           com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse> done);
 
       /**
-       * <code>rpc ReleaseSplitOrMergeLockAndRollback(.hbase.pb.ReleaseSplitOrMergeLockAndRollbackRequest) returns (.hbase.pb.ReleaseSplitOrMergeLockAndRollbackResponse);</code>
-       *
-       * <pre>
-       **
-       * Release lock and rollback state.
-       * </pre>
-       */
-      public abstract void releaseSplitOrMergeLockAndRollback(
-          com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest request,
-          com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse> done);
-
-      /**
        * <code>rpc Normalize(.hbase.pb.NormalizeRequest) returns (.hbase.pb.NormalizeResponse);</code>
        *
        * <pre>
@@ -61476,14 +60697,6 @@ public final class MasterProtos {
         }
 
         @java.lang.Override
-        public  void releaseSplitOrMergeLockAndRollback(
-            com.google.protobuf.RpcController controller,
-            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest request,
-            com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse> done) {
-          impl.releaseSplitOrMergeLockAndRollback(controller, request, done);
-        }
-
-        @java.lang.Override
         public  void normalize(
             com.google.protobuf.RpcController controller,
             org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest request,
@@ -61798,66 +61011,64 @@ public final class MasterProtos {
             case 25:
               return impl.isSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest)request);
             case 26:
-              return impl.releaseSplitOrMergeLockAndRollback(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest)request);
-            case 27:
               return impl.normalize(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest)request);
-            case 28:
+            case 27:
               return impl.setNormalizerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest)request);
-            case 29:
+            case 28:
               return impl.isNormalizerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest)request);
-            case 30:
+            case 29:
               return impl.runCatalogScan(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest)request);
-            case 31:
+            case 30:
               return impl.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest)request);
-            case 32:
+            case 31:
               return impl.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest)request);
-            case 33:
+            case 32:
               return impl.execMasterService(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request);
-            case 34:
+            case 33:
               return impl.snapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest)request);
-            case 35:
+            case 34:
               return impl.getCompletedSnapshots(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest)request);
-            case 36:
+            case 35:
               return impl.deleteSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest)request);
-            case 37:
+            case 36:
               return impl.isSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest)request);
-            case 38:
+            case 37:
               return impl.restoreSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest)request);
-            case 39:
+            case 38:
               return impl.execProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request);
-            case 40:
+            case 39:
               return impl.execProcedureWithRet(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request);
-            case 41:
+            case 40:
               return impl.isProcedureDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)request);
-            case 42:
+            case 41:
               return impl.modifyNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest)request);
-            case 43:
+            case 42:
               return impl.createNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest)request);
-            case 44:
+            case 43:
               return impl.deleteNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest)request);
-            case 45:
+            case 44:
               return impl.getNamespaceDescriptor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest)request);
-            case 46:
+            case 45:
               return impl.listNamespaceDescriptors(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest)request);
-            case 47:
+            case 46:
               return impl.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request);
-            case 48:
+            case 47:
               return impl.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request);
-            case 49:
+            case 48:
               return impl.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request);
-            case 50:
+            case 49:
               return impl.setQuota(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)request);
-            case 51:
+            case 50:
               return impl.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request);
-            case 52:
+            case 51:
               return impl.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request);
-            case 53:
+            case 52:
               return impl.getProcedureResult(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)request);
-            case 54:
+            case 53:
               return impl.getSecurityCapabilities(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)request);
-            case 55:
+            case 54:
               return impl.abortProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)request);
-            case 56:
+            case 55:
               return impl.listProcedures(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)request);
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -61926,66 +61137,64 @@ public final class MasterProtos {
             case 25:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest.getDefaultInstance();
             case 26:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.getDefaultInstance();
-            case 27:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest.getDefaultInstance();
-            case 28:
+            case 27:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest.getDefaultInstance();
-            case 29:
+            case 28:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest.getDefaultInstance();
-            case 30:
+            case 29:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance();
-            case 31:
+            case 30:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance();
-            case 32:
+            case 31:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance();
-            case 33:
+            case 32:
               return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance();
-            case 34:
+            case 33:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance();
-            case 35:
+            case 34:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance();
-            case 36:
+            case 35:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance();
-            case 37:
+            case 36:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance();
-            case 38:
+            case 37:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance();
+            case 38:
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance();
             case 39:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance();
             case 40:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance();
-            case 41:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance();
-            case 42:
+            case 41:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance();
-            case 43:
+            case 42:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance();
-            case 44:
+            case 43:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance();
-            case 45:
+            case 44:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance();
-            case 46:
+            case 45:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance();
-            case 47:
+            case 46:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance();
-            case 48:
+            case 47:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance();
-            case 49:
+            case 48:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance();
-            case 50:
+            case 49:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance();
-            case 51:
+            case 50:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance();
-            case 52:
+            case 51:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance();
-            case 53:
+            case 52:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance();
-            case 54:
+            case 53:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance();
-            case 55:
+            case 54:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance();
-            case 56:
+            case 55:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -62054,66 +61263,64 @@ public final class MasterProtos {
             case 25:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance();
             case 26:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance();
-            case 27:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance();
-            case 28:
+            case 27:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance();
-            case 29:
+            case 28:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance();
-            case 30:
+            case 29:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance();
-            case 31:
+            case 30:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance();
-            case 32:
+            case 31:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance();
-            case 33:
+            case 32:
               return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance();
-            case 34:
+            case 33:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance();
-            case 35:
+            case 34:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance();
-            case 36:
+            case 35:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance();
-            case 37:
+            case 36:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance();
-            case 38:
+            case 37:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance();
+            case 38:
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance();
             case 39:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance();
             case 40:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance();
-            case 41:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance();
-            case 42:
+            case 41:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance();
-            case 43:
+            case 42:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance();
-            case 44:
+            case 43:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance();
-            case 45:
+            case 44:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance();
-            case 46:
+            case 45:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance();
-            case 47:
+            case 46:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance();
-            case 48:
+            case 47:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance();
-            case 49:
+            case 48:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance();
-            case 50:
+            case 49:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance();
+            case 50:
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
             case 51:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
             case 52:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
-            case 53:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance();
-            case 54:
+            case 53:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance();
-            case 55:
+            case 54:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance();
-            case 56:
+            case 55:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -62453,19 +61660,6 @@ public final class MasterProtos {
         com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse> done);
 
     /**
-     * <code>rpc ReleaseSplitOrMergeLockAndRollback(.hbase.pb.ReleaseSplitOrMergeLockAndRollbackRequest) returns (.hbase.pb.ReleaseSplitOrMergeLockAndRollbackResponse);</code>
-     *
-     * <pre>
-     **
-     * Release lock and rollback state.
-     * </pre>
-     */
-    public abstract void releaseSplitOrMergeLockAndRollback(
-        com.google.protobuf.RpcController controller,
-        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest request,
-        com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse> done);
-
-    /**
      * <code>rpc Normalize(.hbase.pb.NormalizeRequest) returns (.hbase.pb.NormalizeResponse);</code>
      *
      * <pre>
@@ -62989,156 +62183,151 @@ public final class MasterProtos {
               done));
           return;
         case 26:
-          this.releaseSplitOrMergeLockAndRollback(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest)request,
-            com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse>specializeCallback(
-              done));
-          return;
-        case 27:
           this.normalize(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse>specializeCallback(
               done));
           return;
-        case 28:
+        case 27:
           this.setNormalizerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse>specializeCallback(
               done));
           return;
-        case 29:
+        case 28:
           this.isNormalizerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse>specializeCallback(
               done));
           return;
-        case 30:
+        case 29:
           this.runCatalogScan(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse>specializeCallback(
               done));
           return;
-        case 31:
+        case 30:
           this.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse>specializeCallback(
               done));
           return;
-        case 32:
+        case 31:
           this.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse>specializeCallback(
               done));
           return;
-        case 33:
+        case 32:
           this.execMasterService(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse>specializeCallback(
               done));
           return;
-        case 34:
+        case 33:
           this.snapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse>specializeCallback(
               done));
           return;
-        case 35:
+        case 34:
           this.getCompletedSnapshots(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse>specializeCallback(
               done));
           return;
-        case 36:
+        case 35:
           this.deleteSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse>specializeCallback(
               done));
           return;
-        case 37:
+        case 36:
           this.isSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse>specializeCallback(
               done));
           return;
-        case 38:
+        case 37:
           this.restoreSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse>specializeCallback(
               done));
           return;
-        case 39:
+        case 38:
           this.execProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse>specializeCallback(
               done));
           return;
-        case 40:
+        case 39:
           this.execProcedureWithRet(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse>specializeCallback(
               done));
           return;
-        case 41:
+        case 40:
           this.isProcedureDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse>specializeCallback(
               done));
           return;
-        case 42:
+        case 41:
           this.modifyNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse>specializeCallback(
               done));
           return;
-        case 43:
+        case 42:
           this.createNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse>specializeCallback(
               done));
           return;
-        case 44:
+        case 43:
           this.deleteNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse>specializeCallback(
               done));
           return;
-        case 45:
+        case 44:
           this.getNamespaceDescriptor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse>specializeCallback(
               done));
           return;
-        case 46:
+        case 45:
           this.listNamespaceDescriptors(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse>specializeCallback(
               done));
           return;
-        case 47:
+        case 46:
           this.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse>specializeCallback(
               done));
           return;
-        case 48:
+        case 47:
           this.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse>specializeCallback(
               done));
           return;
-        case 49:
+        case 48:
           this.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse>specializeCallback(
               done));
           return;
-        case 50:
+        case 49:
           this.setQuota(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse>specializeCallback(
               done));
           return;
-        case 51:
+        case 50:
           this.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse>specializeCallback(
               done));
           return;
-        case 52:
+        case 51:
           this.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse>specializeCallback(
               done));
           return;
-        case 53:
+        case 52:
           this.getProcedureResult(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse>specializeCallback(
               done));
           return;
-        case 54:
+        case 53:
           this.getSecurityCapabilities(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse>specializeCallback(
               done));
           return;
-        case 55:
+        case 54:
           this.abortProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse>specializeCallback(
               done));
           return;
-        case 56:
+        case 55:
           this.listProcedures(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse>specializeCallback(
               done));
@@ -63210,66 +62399,64 @@ public final class MasterProtos {
         case 25:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest.getDefaultInstance();
         case 26:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.getDefaultInstance();
-        case 27:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest.getDefaultInstance();
-        case 28:
+        case 27:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest.getDefaultInstance();
-        case 29:
+        case 28:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest.getDefaultInstance();
-        case 30:
+        case 29:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance();
-        case 31:
+        case 30:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance();
-        case 32:
+        case 31:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance();
-        case 33:
+        case 32:
           return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance();
-        case 34:
+        case 33:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance();
-        case 35:
+        case 34:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance();
-        case 36:
+        case 35:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance();
-        case 37:
+        case 36:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance();
-        case 38:
+        case 37:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance();
+        case 38:
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance();
         case 39:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance();
         case 40:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance();
-        case 41:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance();
-        case 42:
+        case 41:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance();
-        case 43:
+        case 42:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance();
-        case 44:
+        case 43:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance();
-        case 45:
+        case 44:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance();
-        case 46:
+        case 45:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance();
-        case 47:
+        case 46:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance();
-        case 48:
+        case 47:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance();
-        case 49:
+        case 48:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance();
-        case 50:
+        case 49:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance();
-        case 51:
+        case 50:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance();
-        case 52:
+        case 51:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance();
-        case 53:
+        case 52:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance();
-        case 54:
+        case 53:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance();
-        case 55:
+        case 54:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance();
-        case 56:
+        case 55:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
@@ -63338,66 +62525,64 @@ public final class MasterProtos {
         case 25:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance();
         case 26:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance();
-        case 27:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance();
-        case 28:
+        case 27:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance();
-        case 29:
+        case 28:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance();
-        case 30:
+        case 29:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance();
-        case 31:
+        case 30:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance();
-        case 32:
+        case 31:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance();
-        case 33:
+        case 32:
           return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance();
-        case 34:
+        case 33:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance();
-        case 35:
+        case 34:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance();
-        case 36:
+        case 35:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance();
-        case 37:
+        case 36:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance();
-        case 38:
+        case 37:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance();
+        case 38:
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance();
         case 39:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance();
         case 40:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance();
-        case 41:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance();
-        case 42:
+        case 41:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance();
-        case 43:
+        case 42:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance();
-        case 44:
+        case 43:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance();
-        case 45:
+        case 44:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance();
-        case 46:
+        case 45:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance();
-        case 47:
+        case 46:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance();
-        case 48:
+        case 47:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance();
-        case 49:
+        case 48:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance();
-        case 50:
+        case 49:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance();
+        case 50:
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
         case 51:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
         case 52:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
-        case 53:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance();
-        case 54:
+        case 53:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance();
-        case 55:
+        case 54:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance();
-        case 56:
+        case 55:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
@@ -63810,27 +62995,12 @@ public final class MasterProtos {
             org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance()));
       }
 
-      public  void releaseSplitOrMergeLockAndRollback(
-          com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest request,
-          com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse> done) {
-        channel.callMethod(
-          getDescriptor().getMethods().get(26),
-          controller,
-          request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance(),
-          com.google.protobuf.RpcUtil.generalizeCallback(
-            done,
-            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.class,
-            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance()));
-      }
-
       public  void normalize(
           com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest request,
           com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(27),
+          getDescriptor().getMethods().get(26),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance(),
@@ -63845,7 +63015,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest request,
           com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(28),
+          getDescriptor().getMethods().get(27),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance(),
@@ -63860,7 +63030,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest request,
           com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(29),
+          getDescriptor().getMethods().get(28),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance(),
@@ -63875,7 +63045,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest request,
           com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(30),
+          getDescriptor().getMethods().get(29),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(),
@@ -63890,7 +63060,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest request,
           com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(31),
+          getDescriptor().getMethods().get(30),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(),
@@ -63905,7 +63075,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest request,
           com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(32),
+          getDescriptor().getMethods().get(31),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(),
@@ -63920,7 +63090,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest request,
           com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(33),
+          getDescriptor().getMethods().get(32),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(),
@@ -63935,7 +63105,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest request,
           com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(34),
+          getDescriptor().getMethods().get(33),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(),
@@ -63950,7 +63120,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest request,
           com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(35),
+          getDescriptor().getMethods().get(34),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(),
@@ -63965,7 +63135,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest request,
           com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(36),
+          getDescriptor().getMethods().get(35),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(),
@@ -63980,7 +63150,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest request,
           com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(37),
+          getDescriptor().getMethods().get(36),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(),
@@ -63995,7 +63165,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest request,
           com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(38),
+          getDescriptor().getMethods().get(37),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(),
@@ -64010,7 +63180,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request,
           com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(39),
+          getDescriptor().getMethods().get(38),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(),
@@ -64025,7 +63195,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request,
           com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(40),
+          getDescriptor().getMethods().get(39),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(),
@@ -64040,7 +63210,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest request,
           com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(41),
+          getDescriptor().getMethods().get(40),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefault

<TRUNCATED>