You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by sy...@apache.org on 2016/01/06 02:15:46 UTC
[2/2] hbase git commit: HBASE-14108 Procedure V2 - Administrative
Task: provide an API to abort a procedure (Stephen Yuan Jiang)
HBASE-14108 Procedure V2 - Administrative Task: provide an API to abort a procedure (Stephen Yuan Jiang)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cbd4e5a1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cbd4e5a1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cbd4e5a1
Branch: refs/heads/branch-1.2
Commit: cbd4e5a1f305a28ad7961d8a41df4b1075047c91
Parents: 883e3cd
Author: Stephen Yuan Jiang <sy...@gmail.com>
Authored: Thu Sep 3 07:01:30 2015 -0700
Committer: Stephen Yuan Jiang <sy...@gmail.com>
Committed: Tue Jan 5 17:15:11 2016 -0800
----------------------------------------------------------------------
.../org/apache/hadoop/hbase/client/Admin.java | 28 +
.../hadoop/hbase/client/ConnectionManager.java | 7 +
.../apache/hadoop/hbase/client/HBaseAdmin.java | 115 +-
.../hbase/procedure2/ProcedureExecutor.java | 17 +-
.../hbase/procedure2/TestProcedureRecovery.java | 3 -
.../hbase/protobuf/generated/MasterProtos.java | 1324 ++++++++++++++++--
hbase-protocol/src/main/protobuf/Master.proto | 25 +-
.../org/apache/hadoop/hbase/master/HMaster.java | 7 +-
.../hadoop/hbase/master/MasterRpcServices.java | 13 +
.../hadoop/hbase/master/MasterServices.java | 8 +
.../apache/hadoop/hbase/client/TestAdmin2.java | 11 +-
.../hadoop/hbase/master/TestCatalogJanitor.java | 5 +
.../master/procedure/TestProcedureAdmin.java | 186 +++
13 files changed, 1612 insertions(+), 137 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/cbd4e5a1/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index f6605c7..70fdd53 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -22,6 +22,7 @@ import java.io.Closeable;
import java.io.IOException;
import java.util.List;
import java.util.Map;
+import java.util.concurrent.Future;
import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration;
@@ -946,6 +947,33 @@ public interface Admin extends Abortable, Closeable {
throws IOException;
/**
+ * abort a procedure
+ * @param procId ID of the procedure to abort
+ * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
+ * @return true if aborted, false if procedure already completed or does not exist
+ * @throws IOException
+ */
+ boolean abortProcedure(
+ final long procId,
+ final boolean mayInterruptIfRunning) throws IOException;
+
+ /**
+ * Abort a procedure but does not block and wait for it be completely removed.
+ * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
+ * It may throw ExecutionException if there was an error while executing the operation
+ * or TimeoutException in case the wait timeout was not long enough to allow the
+ * operation to complete.
+ *
+ * @param procId ID of the procedure to abort
+ * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
+ * @return true if aborted, false if procedure already completed or does not exist
+ * @throws IOException
+ */
+ Future<Boolean> abortProcedureAsync(
+ final long procId,
+ final boolean mayInterruptIfRunning) throws IOException;
+
+ /**
* Roll the log writer. I.e. for filesystem based write ahead logs, start writing to a new file.
*
* Note that the actual rolling of the log writer is asynchronous and may not be complete when
http://git-wip-us.apache.org/repos/asf/hbase/blob/cbd4e5a1/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index 8d24874..f6f15ae 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -82,6 +82,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
+import org.apache.hadoop.hbase.protobuf.generated.*;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
@@ -1751,6 +1752,12 @@ class ConnectionManager {
return new MasterKeepAliveConnection() {
MasterServiceState mss = masterServiceState;
@Override
+ public MasterProtos.AbortProcedureResponse abortProcedure(
+ RpcController controller,
+ MasterProtos.AbortProcedureRequest request) throws ServiceException {
+ return stub.abortProcedure(controller, request);
+ }
+ @Override
public AddColumnResponse addColumn(RpcController controller, AddColumnRequest request)
throws ServiceException {
return stub.addColumn(controller, request);
http://git-wip-us.apache.org/repos/asf/hbase/blob/cbd4e5a1/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 47ad323..86af68e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -92,6 +92,8 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescripti
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest;
@@ -283,6 +285,86 @@ public class HBaseAdmin implements Admin {
return this.aborted;
}
+ /**
+ * Abort a procedure
+ * @param procId ID of the procedure to abort
+ * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
+ * @return true if aborted, false if procedure already completed or does not exist
+ * @throws IOException
+ */
+ @Override
+ public boolean abortProcedure(
+ final long procId,
+ final boolean mayInterruptIfRunning) throws IOException {
+ Future<Boolean> future = abortProcedureAsync(procId, mayInterruptIfRunning);
+ try {
+ return future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
+ } catch (InterruptedException e) {
+ throw new InterruptedIOException("Interrupted when waiting for procedure to be cancelled");
+ } catch (TimeoutException e) {
+ throw new TimeoutIOException(e);
+ } catch (ExecutionException e) {
+ if (e.getCause() instanceof IOException) {
+ throw (IOException)e.getCause();
+ } else {
+ throw new IOException(e.getCause());
+ }
+ }
+ }
+
+ /**
+ * Abort a procedure but does not block and wait for it be completely removed.
+ * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
+ * It may throw ExecutionException if there was an error while executing the operation
+ * or TimeoutException in case the wait timeout was not long enough to allow the
+ * operation to complete.
+ *
+ * @param procId ID of the procedure to abort
+ * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
+ * @return true if aborted, false if procedure already completed or does not exist
+ * @throws IOException
+ */
+ @Override
+ public Future<Boolean> abortProcedureAsync(
+ final long procId,
+ final boolean mayInterruptIfRunning) throws IOException {
+ Boolean abortProcResponse = executeCallable(
+ new MasterCallable<AbortProcedureResponse>(getConnection()) {
+ @Override
+ public AbortProcedureResponse call(int callTimeout) throws ServiceException {
+ AbortProcedureRequest abortProcRequest =
+ AbortProcedureRequest.newBuilder().setProcId(procId).build();
+ return master.abortProcedure(null,abortProcRequest);
+ }
+ }).getIsProcedureAborted();
+
+ AbortProcedureFuture abortProcFuture =
+ new AbortProcedureFuture(this, procId, abortProcResponse);
+ return abortProcFuture;
+ }
+
+ private static class AbortProcedureFuture extends ProcedureFuture<Boolean> {
+ private boolean isAbortInProgress;
+
+ public AbortProcedureFuture(
+ final HBaseAdmin admin,
+ final Long procId,
+ final Boolean abortProcResponse) {
+ super(admin, procId);
+ this.isAbortInProgress = abortProcResponse;
+ }
+
+ @Override
+ public Boolean get(long timeout, TimeUnit unit)
+ throws InterruptedException, ExecutionException, TimeoutException {
+ if (!this.isAbortInProgress) {
+ return false;
+ }
+ super.get(timeout, unit);
+ return true;
+ }
+ }
+
/** @return HConnection used by this object. */
@Override
public HConnection getConnection() {
@@ -4152,6 +4234,7 @@ public class HBaseAdmin implements Admin {
private ExecutionException exception = null;
private boolean procResultFound = false;
private boolean done = false;
+ private boolean cancelled = false;
private V result = null;
private final HBaseAdmin admin;
@@ -4164,13 +4247,39 @@ public class HBaseAdmin implements Admin {
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
- throw new UnsupportedOperationException();
+ AbortProcedureRequest abortProcRequest = AbortProcedureRequest.newBuilder()
+ .setProcId(procId).setMayInterruptIfRunning(mayInterruptIfRunning).build();
+ try {
+ cancelled = abortProcedureResult(abortProcRequest).getIsProcedureAborted();
+ if (cancelled) {
+ done = true;
+ }
+ } catch (IOException e) {
+ // Cancell thrown exception for some reason. At this time, we are not sure whether
+ // the cancell succeeds or fails. We assume that it is failed, but print out a warning
+ // for debugging purpose.
+ LOG.warn(
+ "Cancelling the procedure with procId=" + procId + " throws exception " + e.getMessage(),
+ e);
+ cancelled = false;
+ }
+ return cancelled;
}
@Override
public boolean isCancelled() {
- // TODO: Abort not implemented yet
- return false;
+ return cancelled;
+ }
+
+ protected AbortProcedureResponse abortProcedureResult(
+ final AbortProcedureRequest request) throws IOException {
+ return admin.executeCallable(new MasterCallable<AbortProcedureResponse>(
+ admin.getConnection()) {
+ @Override
+ public AbortProcedureResponse call(int callTimeout) throws ServiceException {
+ return master.abortProcedure(null, request);
+ }
+ });
}
@Override
http://git-wip-us.apache.org/repos/asf/hbase/blob/cbd4e5a1/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index b7403ec..1a098e5 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -696,9 +696,24 @@ public class ProcedureExecutor<TEnvironment> {
* @return true if the procedure exist and has received the abort, otherwise false.
*/
public boolean abort(final long procId) {
+ return abort(procId, true);
+ }
+
+ /**
+ * Send an abort notification the specified procedure.
+ * Depending on the procedure implementation the abort can be considered or ignored.
+ * @param procId the procedure to abort
+ * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
+ * @return true if the procedure exist and has received the abort, otherwise false.
+ */
+ public boolean abort(final long procId, final boolean mayInterruptIfRunning) {
Procedure proc = procedures.get(procId);
if (proc != null) {
- return proc.abort(getEnvironment());
+ if (!mayInterruptIfRunning && proc.wasExecuted()) {
+ return false;
+ } else {
+ return proc.abort(getEnvironment());
+ }
}
return false;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/cbd4e5a1/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java
index ac57c15..9b3e097 100644
--- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java
+++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java
@@ -37,21 +37,18 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Threads;
import org.junit.After;
import org.junit.Before;
-import org.junit.Assert;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
@Category(SmallTests.class)
public class TestProcedureRecovery {
private static final Log LOG = LogFactory.getLog(TestProcedureRecovery.class);
private static final int PROCEDURE_EXECUTOR_SLOTS = 1;
- private static final Procedure NULL_PROC = null;
private static TestProcEnv procEnv;
private static ProcedureExecutor<TestProcEnv> procExecutor;
http://git-wip-us.apache.org/repos/asf/hbase/blob/cbd4e5a1/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index bfe236c..7daf168 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -48153,6 +48153,980 @@ public final class MasterProtos {
// @@protoc_insertion_point(class_scope:GetProcedureResultResponse)
}
+ public interface AbortProcedureRequestOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required uint64 proc_id = 1;
+ /**
+ * <code>required uint64 proc_id = 1;</code>
+ */
+ boolean hasProcId();
+ /**
+ * <code>required uint64 proc_id = 1;</code>
+ */
+ long getProcId();
+
+ // optional bool mayInterruptIfRunning = 2 [default = true];
+ /**
+ * <code>optional bool mayInterruptIfRunning = 2 [default = true];</code>
+ */
+ boolean hasMayInterruptIfRunning();
+ /**
+ * <code>optional bool mayInterruptIfRunning = 2 [default = true];</code>
+ */
+ boolean getMayInterruptIfRunning();
+ }
+ /**
+ * Protobuf type {@code AbortProcedureRequest}
+ */
+ public static final class AbortProcedureRequest extends
+ com.google.protobuf.GeneratedMessage
+ implements AbortProcedureRequestOrBuilder {
+ // Use AbortProcedureRequest.newBuilder() to construct.
+ private AbortProcedureRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private AbortProcedureRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final AbortProcedureRequest defaultInstance;
+ public static AbortProcedureRequest getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public AbortProcedureRequest getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private AbortProcedureRequest(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ procId_ = input.readUInt64();
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ mayInterruptIfRunning_ = input.readBool();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_AbortProcedureRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_AbortProcedureRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<AbortProcedureRequest> PARSER =
+ new com.google.protobuf.AbstractParser<AbortProcedureRequest>() {
+ public AbortProcedureRequest parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new AbortProcedureRequest(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<AbortProcedureRequest> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required uint64 proc_id = 1;
+ public static final int PROC_ID_FIELD_NUMBER = 1;
+ private long procId_;
+ /**
+ * <code>required uint64 proc_id = 1;</code>
+ */
+ public boolean hasProcId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required uint64 proc_id = 1;</code>
+ */
+ public long getProcId() {
+ return procId_;
+ }
+
+ // optional bool mayInterruptIfRunning = 2 [default = true];
+ public static final int MAYINTERRUPTIFRUNNING_FIELD_NUMBER = 2;
+ private boolean mayInterruptIfRunning_;
+ /**
+ * <code>optional bool mayInterruptIfRunning = 2 [default = true];</code>
+ */
+ public boolean hasMayInterruptIfRunning() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional bool mayInterruptIfRunning = 2 [default = true];</code>
+ */
+ public boolean getMayInterruptIfRunning() {
+ return mayInterruptIfRunning_;
+ }
+
+ private void initFields() {
+ procId_ = 0L;
+ mayInterruptIfRunning_ = true;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasProcId()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeUInt64(1, procId_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBool(2, mayInterruptIfRunning_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(1, procId_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(2, mayInterruptIfRunning_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest) obj;
+
+ boolean result = true;
+ result = result && (hasProcId() == other.hasProcId());
+ if (hasProcId()) {
+ result = result && (getProcId()
+ == other.getProcId());
+ }
+ result = result && (hasMayInterruptIfRunning() == other.hasMayInterruptIfRunning());
+ if (hasMayInterruptIfRunning()) {
+ result = result && (getMayInterruptIfRunning()
+ == other.getMayInterruptIfRunning());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasProcId()) {
+ hash = (37 * hash) + PROC_ID_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getProcId());
+ }
+ if (hasMayInterruptIfRunning()) {
+ hash = (37 * hash) + MAYINTERRUPTIFRUNNING_FIELD_NUMBER;
+ hash = (53 * hash) + hashBoolean(getMayInterruptIfRunning());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code AbortProcedureRequest}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequestOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_AbortProcedureRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_AbortProcedureRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ procId_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ mayInterruptIfRunning_ = true;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_AbortProcedureRequest_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.procId_ = procId_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.mayInterruptIfRunning_ = mayInterruptIfRunning_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance()) return this;
+ if (other.hasProcId()) {
+ setProcId(other.getProcId());
+ }
+ if (other.hasMayInterruptIfRunning()) {
+ setMayInterruptIfRunning(other.getMayInterruptIfRunning());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasProcId()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required uint64 proc_id = 1;
+ private long procId_ ;
+ /**
+ * <code>required uint64 proc_id = 1;</code>
+ */
+ public boolean hasProcId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required uint64 proc_id = 1;</code>
+ */
+ public long getProcId() {
+ return procId_;
+ }
+ /**
+ * <code>required uint64 proc_id = 1;</code>
+ */
+ public Builder setProcId(long value) {
+ bitField0_ |= 0x00000001;
+ procId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required uint64 proc_id = 1;</code>
+ */
+ public Builder clearProcId() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ procId_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // optional bool mayInterruptIfRunning = 2 [default = true];
+ private boolean mayInterruptIfRunning_ = true;
+ /**
+ * <code>optional bool mayInterruptIfRunning = 2 [default = true];</code>
+ */
+ public boolean hasMayInterruptIfRunning() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional bool mayInterruptIfRunning = 2 [default = true];</code>
+ */
+ public boolean getMayInterruptIfRunning() {
+ return mayInterruptIfRunning_;
+ }
+ /**
+ * <code>optional bool mayInterruptIfRunning = 2 [default = true];</code>
+ */
+ public Builder setMayInterruptIfRunning(boolean value) {
+ bitField0_ |= 0x00000002;
+ mayInterruptIfRunning_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional bool mayInterruptIfRunning = 2 [default = true];</code>
+ */
+ public Builder clearMayInterruptIfRunning() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ mayInterruptIfRunning_ = true;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:AbortProcedureRequest)
+ }
+
+ static {
+ defaultInstance = new AbortProcedureRequest(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:AbortProcedureRequest)
+ }
+
+ public interface AbortProcedureResponseOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required bool is_procedure_aborted = 1;
+ /**
+ * <code>required bool is_procedure_aborted = 1;</code>
+ */
+ boolean hasIsProcedureAborted();
+ /**
+ * <code>required bool is_procedure_aborted = 1;</code>
+ */
+ boolean getIsProcedureAborted();
+ }
+ /**
+ * Protobuf type {@code AbortProcedureResponse}
+ */
+ public static final class AbortProcedureResponse extends
+ com.google.protobuf.GeneratedMessage
+ implements AbortProcedureResponseOrBuilder {
+ // Use AbortProcedureResponse.newBuilder() to construct.
+ private AbortProcedureResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private AbortProcedureResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final AbortProcedureResponse defaultInstance;
+ public static AbortProcedureResponse getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public AbortProcedureResponse getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private AbortProcedureResponse(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ isProcedureAborted_ = input.readBool();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_AbortProcedureResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_AbortProcedureResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<AbortProcedureResponse> PARSER =
+ new com.google.protobuf.AbstractParser<AbortProcedureResponse>() {
+ public AbortProcedureResponse parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new AbortProcedureResponse(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<AbortProcedureResponse> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required bool is_procedure_aborted = 1;
+ public static final int IS_PROCEDURE_ABORTED_FIELD_NUMBER = 1;
+ private boolean isProcedureAborted_;
+ /**
+ * <code>required bool is_procedure_aborted = 1;</code>
+ */
+ public boolean hasIsProcedureAborted() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required bool is_procedure_aborted = 1;</code>
+ */
+ public boolean getIsProcedureAborted() {
+ return isProcedureAborted_;
+ }
+
+ private void initFields() {
+ isProcedureAborted_ = false;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasIsProcedureAborted()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBool(1, isProcedureAborted_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(1, isProcedureAborted_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) obj;
+
+ boolean result = true;
+ result = result && (hasIsProcedureAborted() == other.hasIsProcedureAborted());
+ if (hasIsProcedureAborted()) {
+ result = result && (getIsProcedureAborted()
+ == other.getIsProcedureAborted());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasIsProcedureAborted()) {
+ hash = (37 * hash) + IS_PROCEDURE_ABORTED_FIELD_NUMBER;
+ hash = (53 * hash) + hashBoolean(getIsProcedureAborted());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code AbortProcedureResponse}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponseOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_AbortProcedureResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_AbortProcedureResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ isProcedureAborted_ = false;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_AbortProcedureResponse_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.isProcedureAborted_ = isProcedureAborted_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance()) return this;
+ if (other.hasIsProcedureAborted()) {
+ setIsProcedureAborted(other.getIsProcedureAborted());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasIsProcedureAborted()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required bool is_procedure_aborted = 1;
+ private boolean isProcedureAborted_ ;
+ /**
+ * <code>required bool is_procedure_aborted = 1;</code>
+ */
+ public boolean hasIsProcedureAborted() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required bool is_procedure_aborted = 1;</code>
+ */
+ public boolean getIsProcedureAborted() {
+ return isProcedureAborted_;
+ }
+ /**
+ * <code>required bool is_procedure_aborted = 1;</code>
+ */
+ public Builder setIsProcedureAborted(boolean value) {
+ bitField0_ |= 0x00000001;
+ isProcedureAborted_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required bool is_procedure_aborted = 1;</code>
+ */
+ public Builder clearIsProcedureAborted() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ isProcedureAborted_ = false;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:AbortProcedureResponse)
+ }
+
+ static {
+ defaultInstance = new AbortProcedureResponse(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:AbortProcedureResponse)
+ }
+
public interface SetQuotaRequestOrBuilder
extends com.google.protobuf.MessageOrBuilder {
@@ -53126,6 +54100,18 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request,
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse> done);
+ /**
+ * <code>rpc AbortProcedure(.AbortProcedureRequest) returns (.AbortProcedureResponse);</code>
+ *
+ * <pre>
+ ** Abort a procedure
+ * </pre>
+ */
+ public abstract void abortProcedure(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse> done);
+
}
public static com.google.protobuf.Service newReflectiveService(
@@ -53547,6 +54533,14 @@ public final class MasterProtos {
impl.getSecurityCapabilities(controller, request, done);
}
+ @java.lang.Override
+ public void abortProcedure(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse> done) {
+ impl.abortProcedure(controller, request, done);
+ }
+
};
}
@@ -53673,6 +54667,8 @@ public final class MasterProtos {
return impl.getProcedureResult(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)request);
case 51:
return impl.getSecurityCapabilities(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)request);
+ case 52:
+ return impl.abortProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)request);
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -53791,6 +54787,8 @@ public final class MasterProtos {
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance();
case 51:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance();
+ case 52:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -53909,6 +54907,8 @@ public final class MasterProtos {
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance();
case 51:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance();
+ case 52:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -54567,6 +55567,18 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request,
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse> done);
+ /**
+ * <code>rpc AbortProcedure(.AbortProcedureRequest) returns (.AbortProcedureResponse);</code>
+ *
+ * <pre>
+ ** Abort a procedure
+ * </pre>
+ */
+ public abstract void abortProcedure(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse> done);
+
public static final
com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptor() {
@@ -54849,6 +55861,11 @@ public final class MasterProtos {
com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse>specializeCallback(
done));
return;
+ case 52:
+ this.abortProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)request,
+ com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse>specializeCallback(
+ done));
+ return;
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -54967,6 +55984,8 @@ public final class MasterProtos {
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance();
case 51:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance();
+ case 52:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -55085,6 +56104,8 @@ public final class MasterProtos {
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance();
case 51:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance();
+ case 52:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -55885,6 +56906,21 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.class,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance()));
}
+
+ public void abortProcedure(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse> done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(52),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance()));
+ }
}
public static BlockingInterface newBlockingStub(
@@ -56152,6 +57188,11 @@ public final class MasterProtos {
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request)
throws com.google.protobuf.ServiceException;
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse abortProcedure(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request)
+ throws com.google.protobuf.ServiceException;
}
private static final class BlockingStub implements BlockingInterface {
@@ -56784,6 +57825,18 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance());
}
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse abortProcedure(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(52),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance());
+ }
+
}
// @@protoc_insertion_point(class_scope:MasterService)
@@ -57250,6 +58303,16 @@ public final class MasterProtos {
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_GetProcedureResultResponse_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_AbortProcedureRequest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_AbortProcedureRequest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_AbortProcedureResponse_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_AbortProcedureResponse_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
internal_static_SetQuotaRequest_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -57432,121 +58495,126 @@ public final class MasterProtos {
"\002 \001(\004\022\023\n\013last_update\030\003 \001(\004\022\016\n\006result\030\004 \001" +
"(\014\022+\n\texception\030\005 \001(\0132\030.ForeignException" +
"Message\"1\n\005State\022\r\n\tNOT_FOUND\020\000\022\013\n\007RUNNI" +
- "NG\020\001\022\014\n\010FINISHED\020\002\"\273\001\n\017SetQuotaRequest\022\021",
- "\n\tuser_name\030\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021\n" +
- "\tnamespace\030\003 \001(\t\022\036\n\ntable_name\030\004 \001(\0132\n.T" +
- "ableName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016bypass_g" +
- "lobals\030\006 \001(\010\022\"\n\010throttle\030\007 \001(\0132\020.Throttl" +
- "eRequest\"\022\n\020SetQuotaResponse\"A\n\037MajorCom" +
- "pactionTimestampRequest\022\036\n\ntable_name\030\001 " +
- "\002(\0132\n.TableName\"L\n(MajorCompactionTimest" +
- "ampForRegionRequest\022 \n\006region\030\001 \002(\0132\020.Re" +
- "gionSpecifier\"@\n MajorCompactionTimestam" +
- "pResponse\022\034\n\024compaction_timestamp\030\001 \002(\003\"",
- "\035\n\033SecurityCapabilitiesRequest\"\343\001\n\034Secur" +
- "ityCapabilitiesResponse\022>\n\014capabilities\030" +
- "\001 \003(\0162(.SecurityCapabilitiesResponse.Cap" +
- "ability\"\202\001\n\nCapability\022\031\n\025SIMPLE_AUTHENT" +
- "ICATION\020\000\022\031\n\025SECURE_AUTHENTICATION\020\001\022\021\n\r" +
- "AUTHORIZATION\020\002\022\026\n\022CELL_AUTHORIZATION\020\003\022" +
- "\023\n\017CELL_VISIBILITY\020\0042\326\035\n\rMasterService\022S" +
- "\n\024GetSchemaAlterStatus\022\034.GetSchemaAlterS" +
- "tatusRequest\032\035.GetSchemaAlterStatusRespo" +
- "nse\022P\n\023GetTableDescriptors\022\033.GetTableDes",
- "criptorsRequest\032\034.GetTableDescriptorsRes" +
- "ponse\022>\n\rGetTableNames\022\025.GetTableNamesRe" +
- "quest\032\026.GetTableNamesResponse\022G\n\020GetClus" +
- "terStatus\022\030.GetClusterStatusRequest\032\031.Ge" +
- "tClusterStatusResponse\022D\n\017IsMasterRunnin" +
- "g\022\027.IsMasterRunningRequest\032\030.IsMasterRun" +
- "ningResponse\0222\n\tAddColumn\022\021.AddColumnReq" +
- "uest\032\022.AddColumnResponse\022;\n\014DeleteColumn" +
- "\022\024.DeleteColumnRequest\032\025.DeleteColumnRes" +
- "ponse\022;\n\014ModifyColumn\022\024.ModifyColumnRequ",
- "est\032\025.ModifyColumnResponse\0225\n\nMoveRegion" +
- "\022\022.MoveRegionRequest\032\023.MoveRegionRespons" +
- "e\022Y\n\026DispatchMergingRegions\022\036.DispatchMe" +
- "rgingRegionsRequest\032\037.DispatchMergingReg" +
- "ionsResponse\022;\n\014AssignRegion\022\024.AssignReg" +
- "ionRequest\032\025.AssignRegionResponse\022A\n\016Una" +
- "ssignRegion\022\026.UnassignRegionRequest\032\027.Un" +
- "assignRegionResponse\022>\n\rOfflineRegion\022\025." +
- "OfflineRegionRequest\032\026.OfflineRegionResp" +
- "onse\0228\n\013DeleteTable\022\023.DeleteTableRequest",
- "\032\024.DeleteTableResponse\022>\n\rtruncateTable\022" +
- "\025.TruncateTableRequest\032\026.TruncateTableRe" +
- "sponse\0228\n\013EnableTable\022\023.EnableTableReque" +
- "st\032\024.EnableTableResponse\022;\n\014DisableTable" +
- "\022\024.DisableTableRequest\032\025.DisableTableRes" +
- "ponse\0228\n\013ModifyTable\022\023.ModifyTableReques" +
- "t\032\024.ModifyTableResponse\0228\n\013CreateTable\022\023" +
- ".CreateTableRequest\032\024.CreateTableRespons" +
- "e\022/\n\010Shutdown\022\020.ShutdownRequest\032\021.Shutdo" +
- "wnResponse\0225\n\nStopMaster\022\022.StopMasterReq",
- "uest\032\023.StopMasterResponse\022,\n\007Balance\022\017.B" +
- "alanceRequest\032\020.BalanceResponse\022M\n\022SetBa" +
- "lancerRunning\022\032.SetBalancerRunningReques" +
- "t\032\033.SetBalancerRunningResponse\022J\n\021IsBala" +
- "ncerEnabled\022\031.IsBalancerEnabledRequest\032\032" +
- ".IsBalancerEnabledResponse\0222\n\tNormalize\022" +
- "\021.NormalizeRequest\032\022.NormalizeResponse\022S" +
- "\n\024SetNormalizerRunning\022\034.SetNormalizerRu" +
- "nningRequest\032\035.SetNormalizerRunningRespo" +
- "nse\022P\n\023IsNormalizerEnabled\022\033.IsNormalize",
- "rEnabledRequest\032\034.IsNormalizerEnabledRes" +
- "ponse\022A\n\016RunCatalogScan\022\026.RunCatalogScan" +
- "Request\032\027.RunCatalogScanResponse\022S\n\024Enab" +
- "leCatalogJanitor\022\034.EnableCatalogJanitorR" +
- "equest\032\035.EnableCatalogJanitorResponse\022\\\n" +
- "\027IsCatalogJanitorEnabled\022\037.IsCatalogJani" +
- "torEnabledRequest\032 .IsCatalogJanitorEnab" +
- "ledResponse\022L\n\021ExecMasterService\022\032.Copro" +
- "cessorServiceRequest\032\033.CoprocessorServic" +
- "eResponse\022/\n\010Snapshot\022\020.SnapshotRequest\032",
- "\021.SnapshotResponse\022V\n\025GetCompletedSnapsh" +
- "ots\022\035.GetCompletedSnapshotsRequest\032\036.Get" +
- "CompletedSnapshotsResponse\022A\n\016DeleteSnap" +
- "shot\022\026.DeleteSnapshotRequest\032\027.DeleteSna" +
- "pshotResponse\022A\n\016IsSnapshotDone\022\026.IsSnap" +
- "shotDoneRequest\032\027.IsSnapshotDoneResponse" +
- "\022D\n\017RestoreSnapshot\022\027.RestoreSnapshotReq" +
- "uest\032\030.RestoreSnapshotResponse\022V\n\025IsRest" +
- "oreSnapshotDone\022\035.IsRestoreSnapshotDoneR" +
- "equest\032\036.IsRestoreSnapshotDoneResponse\022>",
- "\n\rExecProcedure\022\025.ExecProcedureRequest\032\026" +
- ".ExecProcedureResponse\022E\n\024ExecProcedureW" +
- "ithRet\022\025.ExecProcedureRequest\032\026.ExecProc" +
- "edureResponse\022D\n\017IsProcedureDone\022\027.IsPro" +
- "cedureDoneRequest\032\030.IsProcedureDoneRespo" +
- "nse\022D\n\017ModifyNamespace\022\027.ModifyNamespace" +
- "Request\032\030.ModifyNamespaceResponse\022D\n\017Cre" +
- "ateNamespace\022\027.CreateNamespaceRequest\032\030." +
- "CreateNamespaceResponse\022D\n\017DeleteNamespa" +
- "ce\022\027.DeleteNamespaceRequest\032\030.DeleteName",
- "spaceResponse\022Y\n\026GetNamespaceDescriptor\022" +
- "\036.GetNamespaceDescriptorRequest\032\037.GetNam" +
- "espaceDescriptorResponse\022_\n\030ListNamespac" +
- "eDescriptors\022 .ListNamespaceDescriptorsR" +
- "equest\032!.ListNamespaceDescriptorsRespons" +
- "e\022t\n\037ListTableDescriptorsByNamespace\022\'.L" +
- "istTableDescriptorsByNamespaceRequest\032(." +
- "ListTableDescriptorsByNamespaceResponse\022" +
- "b\n\031ListTableNamesByNamespace\022!.ListTable" +
- "NamesByNamespaceRequest\032\".ListTableNames",
- "ByNamespaceResponse\022/\n\010SetQuota\022\020.SetQuo" +
- "taRequest\032\021.SetQuotaResponse\022f\n\037getLastM" +
- "ajorCompactionTimestamp\022 .MajorCompactio" +
- "nTimestampRequest\032!.MajorCompactionTimes" +
- "tampResponse\022x\n(getLastMajorCompactionTi" +
- "mestampForRegion\022).MajorCompactionTimest" +
- "ampForRegionRequest\032!.MajorCompactionTim" +
- "estampResponse\022M\n\022getProcedureResult\022\032.G" +
- "etProcedureResultRequest\032\033.GetProcedureR" +
- "esultResponse\022V\n\027getSecurityCapabilities",
- "\022\034.SecurityCapabilitiesRequest\032\035.Securit" +
- "yCapabilitiesResponseBB\n*org.apache.hado" +
- "op.hbase.protobuf.generatedB\014MasterProto" +
- "sH\001\210\001\001\240\001\001"
+ "NG\020\001\022\014\n\010FINISHED\020\002\"M\n\025AbortProcedureRequ",
+ "est\022\017\n\007proc_id\030\001 \002(\004\022#\n\025mayInterruptIfRu" +
+ "nning\030\002 \001(\010:\004true\"6\n\026AbortProcedureRespo" +
+ "nse\022\034\n\024is_procedure_aborted\030\001 \002(\010\"\273\001\n\017Se" +
+ "tQuotaRequest\022\021\n\tuser_name\030\001 \001(\t\022\022\n\nuser" +
+ "_group\030\002 \001(\t\022\021\n\tnamespace\030\003 \001(\t\022\036\n\ntable" +
+ "_name\030\004 \001(\0132\n.TableName\022\022\n\nremove_all\030\005 " +
+ "\001(\010\022\026\n\016bypass_globals\030\006 \001(\010\022\"\n\010throttle\030" +
+ "\007 \001(\0132\020.ThrottleRequest\"\022\n\020SetQuotaRespo" +
+ "nse\"A\n\037MajorCompactionTimestampRequest\022\036" +
+ "\n\ntable_name\030\001 \002(\0132\n.TableName\"L\n(MajorC",
+ "ompactionTimestampForRegionRequest\022 \n\006re" +
+ "gion\030\001 \002(\0132\020.RegionSpecifier\"@\n MajorCom" +
+ "pactionTimestampResponse\022\034\n\024compaction_t" +
+ "imestamp\030\001 \002(\003\"\035\n\033SecurityCapabilitiesRe" +
+ "quest\"\343\001\n\034SecurityCapabilitiesResponse\022>" +
+ "\n\014capabilities\030\001 \003(\0162(.SecurityCapabilit" +
+ "iesResponse.Capability\"\202\001\n\nCapability\022\031\n" +
+ "\025SIMPLE_AUTHENTICATION\020\000\022\031\n\025SECURE_AUTHE" +
+ "NTICATION\020\001\022\021\n\rAUTHORIZATION\020\002\022\026\n\022CELL_A" +
+ "UTHORIZATION\020\003\022\023\n\017CELL_VISIBILITY\020\0042\231\036\n\r",
+ "MasterService\022S\n\024GetSchemaAlterStatus\022\034." +
+ "GetSchemaAlterStatusRequest\032\035.GetSchemaA" +
+ "lterStatusResponse\022P\n\023GetTableDescriptor" +
+ "s\022\033.GetTableDescriptorsRequest\032\034.GetTabl" +
+ "eDescriptorsResponse\022>\n\rGetTableNames\022\025." +
+ "GetTableNamesRequest\032\026.GetTableNamesResp" +
+ "onse\022G\n\020GetClusterStatus\022\030.GetClusterSta" +
+ "tusRequest\032\031.GetClusterStatusResponse\022D\n" +
+ "\017IsMasterRunning\022\027.IsMasterRunningReques" +
+ "t\032\030.IsMasterRunningResponse\0222\n\tAddColumn",
+ "\022\021.AddColumnRequest\032\022.AddColumnResponse\022" +
+ ";\n\014DeleteColumn\022\024.DeleteColumnRequest\032\025." +
+ "DeleteColumnResponse\022;\n\014ModifyColumn\022\024.M" +
+ "odifyColumnRequest\032\025.ModifyColumnRespons" +
+ "e\0225\n\nMoveRegion\022\022.MoveRegionRequest\032\023.Mo" +
+ "veRegionResponse\022Y\n\026DispatchMergingRegio" +
+ "ns\022\036.DispatchMergingRegionsRequest\032\037.Dis" +
+ "patchMergingRegionsResponse\022;\n\014AssignReg" +
+ "ion\022\024.AssignRegionRequest\032\025.AssignRegion" +
+ "Response\022A\n\016UnassignRegion\022\026.UnassignReg",
+ "ionRequest\032\027.UnassignRegionResponse\022>\n\rO" +
+ "fflineRegion\022\025.OfflineRegionRequest\032\026.Of" +
+ "flineRegionResponse\0228\n\013DeleteTable\022\023.Del" +
+ "eteTableRequest\032\024.DeleteTableResponse\022>\n" +
+ "\rtruncateTable\022\025.TruncateTableRequest\032\026." +
+ "TruncateTableResponse\0228\n\013EnableTable\022\023.E" +
+ "nableTableRequest\032\024.EnableTableResponse\022" +
+ ";\n\014DisableTable\022\024.DisableTableRequest\032\025." +
+ "DisableTableResponse\0228\n\013ModifyTable\022\023.Mo" +
+ "difyTableRequest\032\024.ModifyTableResponse\0228",
+ "\n\013CreateTable\022\023.CreateTableRequest\032\024.Cre" +
+ "ateTableResponse\022/\n\010Shutdown\022\020.ShutdownR" +
+ "equest\032\021.ShutdownResponse\0225\n\nStopMaster\022" +
+ "\022.StopMasterRequest\032\023.StopMasterResponse" +
+ "\022,\n\007Balance\022\017.BalanceRequest\032\020.BalanceRe" +
+ "sponse\022M\n\022SetBalancerRunning\022\032.SetBalanc" +
+ "erRunningRequest\032\033.SetBalancerRunningRes" +
+ "ponse\022J\n\021IsBalancerEnabled\022\031.IsBalancerE" +
+ "nabledRequest\032\032.IsBalancerEnabledRespons" +
+ "e\0222\n\tNormalize\022\021.NormalizeRequest\032\022.Norm",
+ "alizeResponse\022S\n\024SetNormalizerRunning\022\034." +
+ "SetNormalizerRunningRequest\032\035.SetNormali" +
+ "zerRunningResponse\022P\n\023IsNormalizerEnable" +
+ "d\022\033.IsNormalizerEnabledRequest\032\034.IsNorma" +
+ "lizerEnabledResponse\022A\n\016RunCatalogScan\022\026" +
+ ".RunCatalogScanRequest\032\027.RunCatalogScanR" +
+ "esponse\022S\n\024EnableCatalogJanitor\022\034.Enable" +
+ "CatalogJanitorRequest\032\035.EnableCatalogJan" +
+ "itorResponse\022\\\n\027IsCatalogJanitorEnabled\022" +
+ "\037.IsCatalogJanitorEnabledRequest\032 .IsCat",
+ "alogJanitorEnabledResponse\022L\n\021ExecMaster" +
+ "Service\022\032.CoprocessorServiceRequest\032\033.Co" +
+ "processorServiceResponse\022/\n\010Snapshot\022\020.S" +
+ "napshotRequest\032\021.SnapshotResponse\022V\n\025Get" +
+ "CompletedSnapshots\022\035.GetCompletedSnapsho" +
+ "tsRequest\032\036.GetCompletedSnapshotsRespons" +
+ "e\022A\n\016DeleteSnapshot\022\026.DeleteSnapshotRequ" +
+ "est\032\027.DeleteSnapshotResponse\022A\n\016IsSnapsh" +
+ "otDone\022\026.IsSnapshotDoneRequest\032\027.IsSnaps" +
+ "hotDoneResponse\022D\n\017RestoreSnapshot\022\027.Res",
+ "toreSnapshotRequest\032\030.RestoreSnapshotRes" +
+ "ponse\022V\n\025IsRestoreSnapshotDone\022\035.IsResto" +
+ "reSnapshotDoneRequest\032\036.IsRestoreSnapsho" +
+ "tDoneResponse\022>\n\rExecProcedure\022\025.ExecPro" +
+ "cedureRequest\032\026.ExecProcedureResponse\022E\n" +
+ "\024ExecProcedureWithRet\022\025.ExecProcedureReq" +
+ "uest\032\026.ExecProcedureResponse\022D\n\017IsProced" +
+ "ureDone\022\027.IsProcedureDoneRequest\032\030.IsPro" +
+ "cedureDoneResponse\022D\n\017ModifyNamespace\022\027." +
+ "ModifyNamespaceRequest\032\030.ModifyNamespace",
+ "Response\022D\n\017CreateNamespace\022\027.CreateName" +
+ "spaceRequest\032\030.CreateNamespaceResponse\022D" +
+ "\n\017DeleteNamespace\022\027.DeleteNamespaceReque" +
+ "st\032\030.DeleteNamespaceResponse\022Y\n\026GetNames" +
+ "paceDescriptor\022\036.GetNamespaceDescriptorR" +
+ "equest\032\037.GetNamespaceDescriptorResponse\022" +
+ "_\n\030ListNamespaceDescriptors\022 .ListNamesp" +
+ "aceDescriptorsRequest\032!.ListNamespaceDes" +
+ "criptorsResponse\022t\n\037ListTableDescriptors" +
+ "ByNamespace\022\'.ListTableDescriptorsByName",
+ "spaceRequest\032(.ListTableDescriptorsByNam" +
+ "espaceResponse\022b\n\031ListTableNamesByNamesp" +
+ "ace\022!.ListTableNamesByNamespaceRequest\032\"" +
+ ".ListTableNamesByNamespaceResponse\022/\n\010Se" +
+ "tQuota\022\020.SetQuotaRequest\032\021.SetQuotaRespo" +
+ "nse\022f\n\037getLastMajorCompactionTimestamp\022 " +
+ ".MajorCompactionTimestampRequest\032!.Major" +
+ "CompactionTimestampResponse\022x\n(getLastMa" +
+ "jorCompactionTimestampForRegion\022).MajorC" +
+ "ompactionTimestampForRegionRequest\032!.Maj",
+ "orCompactionTimestampResponse\022M\n\022getProc" +
+ "edureResult\022\032.GetProcedureResultRequest\032" +
+ "\033.GetProcedureResultResponse\022V\n\027getSecur" +
+ "ityCapabilities\022\034.SecurityCapabilitiesRe" +
+ "quest\032\035.SecurityCapabilitiesResponse\022A\n\016" +
+ "AbortProcedure\022\026.AbortProcedureRequest\032\027" +
+ ".AbortProcedureResponseBB\n*org.apache.ha" +
+ "doop.hbase.protobuf.generatedB\014MasterPro" +
+ "tosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -58105,44 +59173,56 @@ public final class MasterProtos {
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_GetProcedureResultResponse_descriptor,
new java.lang.String[] { "State", "StartTime", "LastUpdate", "Result", "Exception", });
- internal_static_SetQuotaRequest_descriptor =
+ internal_static_AbortProcedureRequest_descriptor =
getDescriptor().getMessageTypes().get(92);
+ internal_static_AbortProcedureRequest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_AbortProcedureRequest_descriptor,
+ new java.lang.String[] { "ProcId", "MayInterruptIfRunning", });
+ internal_static_AbortProcedureResponse_descriptor =
+ getDescriptor().getMessageTypes().get(93);
+ internal_static_AbortProcedureResponse_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_AbortProcedureResponse_descriptor,
+ new java.lang.String[] { "IsProcedureAborted", });
+ internal_static_SetQuotaRequest_descriptor =
+ getDescriptor().getMessageTypes().get(94);
internal_static_SetQuotaRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_SetQuotaRequest_descriptor,
new java.lang.String[] { "UserName", "UserGroup", "Namespace", "TableName", "RemoveAll", "BypassGlobals", "Throttle", });
internal_static_SetQuotaResponse_descriptor =
- getDescriptor().getMessageTypes().get(93);
+ getDescriptor().getMessageTypes().get(95);
internal_static_SetQuotaResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_SetQuotaResponse_descriptor,
new java.lang.String[] { });
internal_static_MajorCompactionTimestampRequest_descriptor =
- getDescriptor().getMessageTypes().get(94);
+ getDescriptor().getMessageTypes().get(96);
internal_static_MajorCompactionTimestampRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_MajorCompactionTimestampRequest_descriptor,
new java.lang.String[] { "TableName", });
internal_static_MajorCompactionTimestampForRegionRequest_descriptor =
- getDescriptor().getMessageTypes().get(95);
+ getDescriptor().getMessageTypes().get(97);
internal_static_MajorCompactionTimestampForRegionRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_MajorCompactionTimestampForRegionRequest_descriptor,
new java.lang.String[] { "Region", });
internal_static_MajorCompactionTimestampResponse_descriptor =
- getDescriptor().getMessageTypes().get(96);
+ getDescriptor().getMessageTypes().get(98);
internal_static_MajorCompactionTimestampResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_MajorCompactionTimestampResponse_descriptor,
new java.lang.String[] { "CompactionTimestamp", });
internal_static_SecurityCapabilitiesRequest_descriptor =
- getDescriptor().getMessageTypes().get(97);
+ getDescriptor().getMessageTypes().get(99);
internal_static_SecurityCapabilitiesRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_SecurityCapabilitiesRequest_descriptor,
new java.lang.String[] { });
internal_static_SecurityCapabilitiesResponse_descriptor =
- getDescriptor().getMessageTypes().get(98);
+ getDescriptor().getMessageTypes().get(100);
internal_static_SecurityCapabilitiesResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_SecurityCapabilitiesResponse_descriptor,
http://git-wip-us.apache.org/repos/asf/hbase/blob/cbd4e5a1/hbase-protocol/src/main/protobuf/Master.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto
index bb9077a..9606cb0 100644
--- a/hbase-protocol/src/main/protobuf/Master.proto
+++ b/hbase-protocol/src/main/protobuf/Master.proto
@@ -400,21 +400,21 @@ message IsMasterRunningResponse {
}
message ExecProcedureRequest {
- required ProcedureDescription procedure = 1;
+ required ProcedureDescription procedure = 1;
}
message ExecProcedureResponse {
- optional int64 expected_timeout = 1;
- optional bytes return_data = 2;
+ optional int64 expected_timeout = 1;
+ optional bytes return_data = 2;
}
message IsProcedureDoneRequest {
- optional ProcedureDescription procedure = 1;
+ optional ProcedureDescription procedure = 1;
}
message IsProcedureDoneResponse {
- optional bool done = 1 [default = false];
- optional ProcedureDescription snapshot = 2;
+ optional bool done = 1 [default = false];
+ optional ProcedureDescription snapshot = 2;
}
message GetProcedureResultRequest {
@@ -435,6 +435,15 @@ message GetProcedureResultResponse {
optional ForeignExceptionMessage exception = 5;
}
+message AbortProcedureRequest {
+ required uint64 proc_id = 1;
+ optional bool mayInterruptIfRunning = 2 [default = true];
+}
+
+message AbortProcedureResponse {
+ required bool is_procedure_aborted = 1;
+}
+
message SetQuotaRequest {
optional string user_name = 1;
optional string user_group = 2;
@@ -725,4 +734,8 @@ service MasterService {
/** Returns the security capabilities in effect on the cluster */
rpc getSecurityCapabilities(SecurityCapabilitiesRequest)
returns(SecurityCapabilitiesResponse);
+
+ /** Abort a procedure */
+ rpc AbortProcedure(AbortProcedureRequest)
+ returns(AbortProcedureResponse);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/cbd4e5a1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index f219794..ef6669b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -381,7 +381,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
// should we check encryption settings at master side, default true
this.masterCheckEncryption = conf.getBoolean("hbase.master.check.encryption", true);
- this.metricsMaster = new MetricsMaster( new MetricsMasterWrapperImpl(this));
+ this.metricsMaster = new MetricsMaster(new MetricsMasterWrapperImpl(this));
// preload table descriptor at startup
this.preLoadTableDescriptors = conf.getBoolean("hbase.master.preload.tabledescriptors", true);
@@ -2505,6 +2505,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
}
@Override
+ public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning) {
+ return this.procedureExecutor.abort(procId, mayInterruptIfRunning);
+ }
+
+ @Override
public List<HTableDescriptor> listTableDescriptorsByNamespace(String name) throws IOException {
ensureNamespaceExists(name);
return listTableDescriptors(name, null, null, true);
http://git-wip-us.apache.org/repos/asf/hbase/blob/cbd4e5a1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 8b0d587..613bb9c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -59,6 +59,8 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
@@ -1056,6 +1058,17 @@ public class MasterRpcServices extends RSRpcServices
}
@Override
+ public AbortProcedureResponse abortProcedure(
+ RpcController rpcController,
+ AbortProcedureRequest request) {
+ AbortProcedureResponse.Builder response = AbortProcedureResponse.newBuilder();
+ boolean abortResult =
+ master.abortProcedure(request.getProcId(), request.getMayInterruptIfRunning());
+ response.setIsProcedureAborted(abortResult);
+ return response.build();
+ }
+
+ @Override
public ListNamespaceDescriptorsResponse listNamespaceDescriptors(RpcController c,
ListNamespaceDescriptorsRequest request) throws ServiceException {
try {