You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by te...@apache.org on 2012/10/23 17:59:58 UTC
svn commit: r1401330 [1/2] - in /hbase/trunk/hbase-server/src:
main/java/org/apache/hadoop/hbase/coprocessor/example/
main/java/org/apache/hadoop/hbase/coprocessor/example/generated/
main/protobuf/ test/java/org/apache/hadoop/hbase/coprocessor/example/
Author: tedyu
Date: Tue Oct 23 15:59:57 2012
New Revision: 1401330
URL: http://svn.apache.org/viewvc?rev=1401330&view=rev
Log:
HBASE-6942 Endpoint implementation for bulk delete rows (Anoop)
Added:
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/example/generated/BulkDeleteProtos.java
hbase/trunk/hbase-server/src/main/protobuf/BulkDelete.proto
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestBulkDeleteProtocol.java
Added: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java?rev=1401330&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java (added)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java Tue Oct 23 15:59:57 2012
@@ -0,0 +1,290 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.coprocessor.example;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.Coprocessor;
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorException;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest;
+import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse;
+import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteService;
+import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType;
+import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse.Builder;
+import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.ResponseConverter;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.OperationStatus;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+
+import com.google.protobuf.RpcCallback;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.Service;
+
+/**
+ * Defines a protocol to delete data in bulk based on a scan. The scan can be range scan or with
+ * conditions(filters) etc.This can be used to delete rows, column family(s), column qualifier(s)
+ * or version(s) of columns.When delete type is FAMILY or COLUMN, which all family(s) or column(s)
+ * getting deleted will be determined by the Scan. Scan need to select all the families/qualifiers
+ * which need to be deleted.When delete type is VERSION, Which column(s) and version(s) to be
+ * deleted will be determined by the Scan. Scan need to select all the qualifiers and its versions
+ * which needs to be deleted.When a timestamp is passed only one version at that timestamp will be
+ * deleted(even if Scan fetches many versions). When timestamp passed as null, all the versions
+ * which the Scan selects will get deleted.
+ *
+ * </br> Example: <code><pre>
+ * Scan scan = new Scan();
+ * // set scan properties(rowkey range, filters, timerange etc).
+ * HTable ht = ...;
+ * long noOfDeletedRows = 0L;
+ * Batch.Call<BulkDeleteService, BulkDeleteResponse> callable =
+ * new Batch.Call<BulkDeleteService, BulkDeleteResponse>() {
+ * ServerRpcController controller = new ServerRpcController();
+ * BlockingRpcCallback<BulkDeleteResponse> rpcCallback =
+ * new BlockingRpcCallback<BulkDeleteResponse>();
+ *
+ * public BulkDeleteResponse call(BulkDeleteService service) throws IOException {
+ * Builder builder = BulkDeleteRequest.newBuilder();
+ * builder.setScan(ProtobufUtil.toScan(scan));
+ * builder.setDeleteType(DeleteType.VERSION);
+ * builder.setRowBatchSize(rowBatchSize);
+ * // Set optional timestamp if needed
+ * builder.setTimestamp(timeStamp);
+ * service.delete(controller, builder.build(), rpcCallback);
+ * return rpcCallback.get();
+ * }
+ * };
+ * Map<byte[], BulkDeleteResponse> result = ht.coprocessorService(BulkDeleteService.class, scan
+ * .getStartRow(), scan.getStopRow(), callable);
+ * for (BulkDeleteResponse response : result.values()) {
+ * noOfDeletedRows += response.getRowsDeleted();
+ * }
+ * </pre></code>
+ */
+public class BulkDeleteEndpoint extends BulkDeleteService implements CoprocessorService,
+ Coprocessor {
+ private static final String NO_OF_VERSIONS_TO_DELETE = "noOfVersionsToDelete";
+ private static final Log LOG = LogFactory.getLog(BulkDeleteEndpoint.class);
+
+ private RegionCoprocessorEnvironment env;
+
+ @Override
+ public Service getService() {
+ return this;
+ }
+
+ @Override
+ public void delete(RpcController controller, BulkDeleteRequest request,
+ RpcCallback<BulkDeleteResponse> done) {
+ long totalRowsDeleted = 0L;
+ long totalVersionsDeleted = 0L;
+ HRegion region = env.getRegion();
+ int rowBatchSize = request.getRowBatchSize();
+ Long timestamp = null;
+ if (request.hasTimestamp()) {
+ timestamp = request.getTimestamp();
+ }
+ DeleteType deleteType = request.getDeleteType();
+ boolean hasMore = true;
+ RegionScanner scanner = null;
+ try {
+ Scan scan = ProtobufUtil.toScan(request.getScan());
+ if (scan.getFilter() == null && deleteType == DeleteType.ROW) {
+ // What we need is just the rowkeys. So only 1st KV from any row is enough.
+ // Only when it is a row delete, we can apply this filter.
+ // In other types we rely on the scan to know which all columns to be deleted.
+ scan.setFilter(new FirstKeyOnlyFilter());
+ }
+ // Here by assume that the scan is perfect with the appropriate
+ // filter and having necessary column(s).
+ scanner = region.getScanner(scan);
+ while (hasMore) {
+ List<List<KeyValue>> deleteRows = new ArrayList<List<KeyValue>>(rowBatchSize);
+ for (int i = 0; i < rowBatchSize; i++) {
+ List<KeyValue> results = new ArrayList<KeyValue>();
+ hasMore = scanner.next(results);
+ if (results.size() > 0) {
+ deleteRows.add(results);
+ }
+ if (!hasMore) {
+ // There are no more rows.
+ break;
+ }
+ }
+ if (deleteRows.size() > 0) {
+ Pair<Mutation, Integer>[] deleteWithLockArr = new Pair[deleteRows.size()];
+ int i = 0;
+ for (List<KeyValue> deleteRow : deleteRows) {
+ Delete delete = createDeleteMutation(deleteRow, deleteType, timestamp);
+ deleteWithLockArr[i++] = new Pair<Mutation, Integer>(delete, null);
+ }
+ OperationStatus[] opStatus = region.batchMutate(deleteWithLockArr);
+ for (i = 0; i < opStatus.length; i++) {
+ if (opStatus[i].getOperationStatusCode() != OperationStatusCode.SUCCESS) {
+ break;
+ }
+ totalRowsDeleted++;
+ if (deleteType == DeleteType.VERSION) {
+ byte[] versionsDeleted = deleteWithLockArr[i].getFirst().getAttribute(
+ NO_OF_VERSIONS_TO_DELETE);
+ if (versionsDeleted != null) {
+ totalVersionsDeleted += Bytes.toInt(versionsDeleted);
+ }
+ }
+ }
+ }
+ }
+ } catch (IOException ioe) {
+ LOG.error(ioe);
+ // Call ServerRpcController#getFailedOn() to retrieve this IOException at client side.
+ ResponseConverter.setControllerException(controller, ioe);
+ } finally {
+ if (scanner != null) {
+ try {
+ scanner.close();
+ } catch (IOException ioe) {
+ LOG.error(ioe);
+ }
+ }
+ }
+ Builder responseBuilder = BulkDeleteResponse.newBuilder();
+ responseBuilder.setRowsDeleted(totalRowsDeleted);
+ if (deleteType == DeleteType.VERSION) {
+ responseBuilder.setVersionsDeleted(totalVersionsDeleted);
+ }
+ BulkDeleteResponse result = responseBuilder.build();
+ done.run(result);
+ }
+
+ private Delete createDeleteMutation(List<KeyValue> deleteRow, DeleteType deleteType,
+ Long timestamp) {
+ long ts;
+ if (timestamp == null) {
+ ts = HConstants.LATEST_TIMESTAMP;
+ } else {
+ ts = timestamp;
+ }
+ // We just need the rowkey. Get it from 1st KV.
+ byte[] row = deleteRow.get(0).getRow();
+ Delete delete = new Delete(row, ts, null);
+ if (deleteType == DeleteType.FAMILY) {
+ Set<byte[]> families = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
+ for (KeyValue kv : deleteRow) {
+ if (families.add(kv.getFamily())) {
+ delete.deleteFamily(kv.getFamily(), ts);
+ }
+ }
+ } else if (deleteType == DeleteType.COLUMN) {
+ Set<Column> columns = new HashSet<Column>();
+ for (KeyValue kv : deleteRow) {
+ Column column = new Column(kv.getFamily(), kv.getQualifier());
+ if (columns.add(column)) {
+ // Making deleteColumns() calls more than once for the same cf:qualifier is not correct
+ // Every call to deleteColumns() will add a new KV to the familymap which will finally
+ // get written to the memstore as part of delete().
+ delete.deleteColumns(column.family, column.qualifier, ts);
+ }
+ }
+ } else if (deleteType == DeleteType.VERSION) {
+ // When some timestamp was passed to the delete() call only one version of the column (with
+ // given timestamp) will be deleted. If no timestamp passed, it will delete N versions.
+ // How many versions will get deleted depends on the Scan being passed. All the KVs that
+ // the scan fetched will get deleted.
+ int noOfVersionsToDelete = 0;
+ if (timestamp == null) {
+ for (KeyValue kv : deleteRow) {
+ delete.deleteColumn(kv.getFamily(), kv.getQualifier(), kv.getTimestamp());
+ noOfVersionsToDelete++;
+ }
+ } else {
+ Set<Column> columns = new HashSet<Column>();
+ for (KeyValue kv : deleteRow) {
+ Column column = new Column(kv.getFamily(), kv.getQualifier());
+ // Only one version of particular column getting deleted.
+ if (columns.add(column)) {
+ delete.deleteColumn(column.family, column.qualifier, ts);
+ noOfVersionsToDelete++;
+ }
+ }
+ }
+ delete.setAttribute(NO_OF_VERSIONS_TO_DELETE, Bytes.toBytes(noOfVersionsToDelete));
+ }
+ return delete;
+ }
+
+ private static class Column {
+ private byte[] family;
+ private byte[] qualifier;
+
+ public Column(byte[] family, byte[] qualifier) {
+ this.family = family;
+ this.qualifier = qualifier;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (!(other instanceof Column)) {
+ return false;
+ }
+ Column column = (Column) other;
+ return Bytes.equals(this.family, column.family)
+ && Bytes.equals(this.qualifier, column.qualifier);
+ }
+
+ @Override
+ public int hashCode() {
+ int h = 31;
+ h = h + 13 * Bytes.hashCode(this.family);
+ h = h + 13 * Bytes.hashCode(this.qualifier);
+ return h;
+ }
+ }
+
+ @Override
+ public void start(CoprocessorEnvironment env) throws IOException {
+ if (env instanceof RegionCoprocessorEnvironment) {
+ this.env = (RegionCoprocessorEnvironment) env;
+ } else {
+ throw new CoprocessorException("Must be loaded on a table region!");
+ }
+ }
+
+ @Override
+ public void stop(CoprocessorEnvironment env) throws IOException {
+ // nothing to do
+ }
+}
Added: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/example/generated/BulkDeleteProtos.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/example/generated/BulkDeleteProtos.java?rev=1401330&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/example/generated/BulkDeleteProtos.java (added)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/example/generated/BulkDeleteProtos.java Tue Oct 23 15:59:57 2012
@@ -0,0 +1,1512 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: BulkDelete.proto
+
+package org.apache.hadoop.hbase.coprocessor.example.generated;
+
+public final class BulkDeleteProtos {
+ private BulkDeleteProtos() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public interface BulkDeleteRequestOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .Scan scan = 1;
+ boolean hasScan();
+ org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan getScan();
+ org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanOrBuilder getScanOrBuilder();
+
+ // required .BulkDeleteRequest.DeleteType deleteType = 2;
+ boolean hasDeleteType();
+ org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType getDeleteType();
+
+ // optional uint64 timestamp = 3;
+ boolean hasTimestamp();
+ long getTimestamp();
+
+ // required uint32 rowBatchSize = 4;
+ boolean hasRowBatchSize();
+ int getRowBatchSize();
+ }
+ public static final class BulkDeleteRequest extends
+ com.google.protobuf.GeneratedMessage
+ implements BulkDeleteRequestOrBuilder {
+ // Use BulkDeleteRequest.newBuilder() to construct.
+ private BulkDeleteRequest(Builder builder) {
+ super(builder);
+ }
+ private BulkDeleteRequest(boolean noInit) {}
+
+ private static final BulkDeleteRequest defaultInstance;
+ public static BulkDeleteRequest getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public BulkDeleteRequest getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.internal_static_BulkDeleteRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.internal_static_BulkDeleteRequest_fieldAccessorTable;
+ }
+
+ public enum DeleteType
+ implements com.google.protobuf.ProtocolMessageEnum {
+ ROW(0, 0),
+ FAMILY(1, 1),
+ COLUMN(2, 2),
+ VERSION(3, 3),
+ ;
+
+ public static final int ROW_VALUE = 0;
+ public static final int FAMILY_VALUE = 1;
+ public static final int COLUMN_VALUE = 2;
+ public static final int VERSION_VALUE = 3;
+
+
+ public final int getNumber() { return value; }
+
+ public static DeleteType valueOf(int value) {
+ switch (value) {
+ case 0: return ROW;
+ case 1: return FAMILY;
+ case 2: return COLUMN;
+ case 3: return VERSION;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap<DeleteType>
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap<DeleteType>
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap<DeleteType>() {
+ public DeleteType findValueByNumber(int number) {
+ return DeleteType.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.getDescriptor().getEnumTypes().get(0);
+ }
+
+ private static final DeleteType[] VALUES = {
+ ROW, FAMILY, COLUMN, VERSION,
+ };
+
+ public static DeleteType valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private DeleteType(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:BulkDeleteRequest.DeleteType)
+ }
+
+ private int bitField0_;
+ // required .Scan scan = 1;
+ public static final int SCAN_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan scan_;
+ public boolean hasScan() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan getScan() {
+ return scan_;
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanOrBuilder getScanOrBuilder() {
+ return scan_;
+ }
+
+ // required .BulkDeleteRequest.DeleteType deleteType = 2;
+ public static final int DELETETYPE_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType deleteType_;
+ public boolean hasDeleteType() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType getDeleteType() {
+ return deleteType_;
+ }
+
+ // optional uint64 timestamp = 3;
+ public static final int TIMESTAMP_FIELD_NUMBER = 3;
+ private long timestamp_;
+ public boolean hasTimestamp() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ public long getTimestamp() {
+ return timestamp_;
+ }
+
+ // required uint32 rowBatchSize = 4;
+ public static final int ROWBATCHSIZE_FIELD_NUMBER = 4;
+ private int rowBatchSize_;
+ public boolean hasRowBatchSize() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ public int getRowBatchSize() {
+ return rowBatchSize_;
+ }
+
+ private void initFields() {
+ scan_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.getDefaultInstance();
+ deleteType_ = org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType.ROW;
+ timestamp_ = 0L;
+ rowBatchSize_ = 0;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasScan()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasDeleteType()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasRowBatchSize()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getScan().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, scan_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeEnum(2, deleteType_.getNumber());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeUInt64(3, timestamp_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeUInt32(4, rowBatchSize_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, scan_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeEnumSize(2, deleteType_.getNumber());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(3, timestamp_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt32Size(4, rowBatchSize_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest other = (org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest) obj;
+
+ boolean result = true;
+ result = result && (hasScan() == other.hasScan());
+ if (hasScan()) {
+ result = result && getScan()
+ .equals(other.getScan());
+ }
+ result = result && (hasDeleteType() == other.hasDeleteType());
+ if (hasDeleteType()) {
+ result = result &&
+ (getDeleteType() == other.getDeleteType());
+ }
+ result = result && (hasTimestamp() == other.hasTimestamp());
+ if (hasTimestamp()) {
+ result = result && (getTimestamp()
+ == other.getTimestamp());
+ }
+ result = result && (hasRowBatchSize() == other.hasRowBatchSize());
+ if (hasRowBatchSize()) {
+ result = result && (getRowBatchSize()
+ == other.getRowBatchSize());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasScan()) {
+ hash = (37 * hash) + SCAN_FIELD_NUMBER;
+ hash = (53 * hash) + getScan().hashCode();
+ }
+ if (hasDeleteType()) {
+ hash = (37 * hash) + DELETETYPE_FIELD_NUMBER;
+ hash = (53 * hash) + hashEnum(getDeleteType());
+ }
+ if (hasTimestamp()) {
+ hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getTimestamp());
+ }
+ if (hasRowBatchSize()) {
+ hash = (37 * hash) + ROWBATCHSIZE_FIELD_NUMBER;
+ hash = (53 * hash) + getRowBatchSize();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequestOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.internal_static_BulkDeleteRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.internal_static_BulkDeleteRequest_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getScanFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (scanBuilder_ == null) {
+ scan_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.getDefaultInstance();
+ } else {
+ scanBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ deleteType_ = org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType.ROW;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ timestamp_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ rowBatchSize_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000008);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest build() {
+ org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest buildPartial() {
+ org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest result = new org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (scanBuilder_ == null) {
+ result.scan_ = scan_;
+ } else {
+ result.scan_ = scanBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.deleteType_ = deleteType_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.timestamp_ = timestamp_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.rowBatchSize_ = rowBatchSize_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest) {
+ return mergeFrom((org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest other) {
+ if (other == org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.getDefaultInstance()) return this;
+ if (other.hasScan()) {
+ mergeScan(other.getScan());
+ }
+ if (other.hasDeleteType()) {
+ setDeleteType(other.getDeleteType());
+ }
+ if (other.hasTimestamp()) {
+ setTimestamp(other.getTimestamp());
+ }
+ if (other.hasRowBatchSize()) {
+ setRowBatchSize(other.getRowBatchSize());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasScan()) {
+
+ return false;
+ }
+ if (!hasDeleteType()) {
+
+ return false;
+ }
+ if (!hasRowBatchSize()) {
+
+ return false;
+ }
+ if (!getScan().isInitialized()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.newBuilder();
+ if (hasScan()) {
+ subBuilder.mergeFrom(getScan());
+ }
+ input.readMessage(subBuilder, extensionRegistry);
+ setScan(subBuilder.buildPartial());
+ break;
+ }
+ case 16: {
+ int rawValue = input.readEnum();
+ org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType value = org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType.valueOf(rawValue);
+ if (value == null) {
+ unknownFields.mergeVarintField(2, rawValue);
+ } else {
+ bitField0_ |= 0x00000002;
+ deleteType_ = value;
+ }
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000004;
+ timestamp_ = input.readUInt64();
+ break;
+ }
+ case 32: {
+ bitField0_ |= 0x00000008;
+ rowBatchSize_ = input.readUInt32();
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // required .Scan scan = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan scan_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanOrBuilder> scanBuilder_;
+ public boolean hasScan() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan getScan() {
+ if (scanBuilder_ == null) {
+ return scan_;
+ } else {
+ return scanBuilder_.getMessage();
+ }
+ }
+ public Builder setScan(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan value) {
+ if (scanBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ scan_ = value;
+ onChanged();
+ } else {
+ scanBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ public Builder setScan(
+ org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder builderForValue) {
+ if (scanBuilder_ == null) {
+ scan_ = builderForValue.build();
+ onChanged();
+ } else {
+ scanBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ public Builder mergeScan(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan value) {
+ if (scanBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ scan_ != org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.getDefaultInstance()) {
+ scan_ =
+ org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.newBuilder(scan_).mergeFrom(value).buildPartial();
+ } else {
+ scan_ = value;
+ }
+ onChanged();
+ } else {
+ scanBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ public Builder clearScan() {
+ if (scanBuilder_ == null) {
+ scan_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.getDefaultInstance();
+ onChanged();
+ } else {
+ scanBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder getScanBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getScanFieldBuilder().getBuilder();
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanOrBuilder getScanOrBuilder() {
+ if (scanBuilder_ != null) {
+ return scanBuilder_.getMessageOrBuilder();
+ } else {
+ return scan_;
+ }
+ }
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanOrBuilder>
+ getScanFieldBuilder() {
+ if (scanBuilder_ == null) {
+ scanBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanOrBuilder>(
+ scan_,
+ getParentForChildren(),
+ isClean());
+ scan_ = null;
+ }
+ return scanBuilder_;
+ }
+
+ // required .BulkDeleteRequest.DeleteType deleteType = 2;
+ private org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType deleteType_ = org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType.ROW;
+ public boolean hasDeleteType() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType getDeleteType() {
+ return deleteType_;
+ }
+ public Builder setDeleteType(org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ deleteType_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearDeleteType() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ deleteType_ = org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType.ROW;
+ onChanged();
+ return this;
+ }
+
+ // optional uint64 timestamp = 3;
+ private long timestamp_ ;
+ public boolean hasTimestamp() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ public long getTimestamp() {
+ return timestamp_;
+ }
+ public Builder setTimestamp(long value) {
+ bitField0_ |= 0x00000004;
+ timestamp_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearTimestamp() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ timestamp_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // required uint32 rowBatchSize = 4;
+ private int rowBatchSize_ ;
+ public boolean hasRowBatchSize() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ public int getRowBatchSize() {
+ return rowBatchSize_;
+ }
+ public Builder setRowBatchSize(int value) {
+ bitField0_ |= 0x00000008;
+ rowBatchSize_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearRowBatchSize() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ rowBatchSize_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:BulkDeleteRequest)
+ }
+
+ static {
+ defaultInstance = new BulkDeleteRequest(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:BulkDeleteRequest)
+ }
+
+ public interface BulkDeleteResponseOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required uint64 rowsDeleted = 1;
+ boolean hasRowsDeleted();
+ long getRowsDeleted();
+
+ // optional uint64 versionsDeleted = 2;
+ boolean hasVersionsDeleted();
+ long getVersionsDeleted();
+ }
+ public static final class BulkDeleteResponse extends
+ com.google.protobuf.GeneratedMessage
+ implements BulkDeleteResponseOrBuilder {
+ // Use BulkDeleteResponse.newBuilder() to construct.
+ private BulkDeleteResponse(Builder builder) {
+ super(builder);
+ }
+ private BulkDeleteResponse(boolean noInit) {}
+
+ private static final BulkDeleteResponse defaultInstance;
+ public static BulkDeleteResponse getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public BulkDeleteResponse getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.internal_static_BulkDeleteResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.internal_static_BulkDeleteResponse_fieldAccessorTable;
+ }
+
+ private int bitField0_;
+ // required uint64 rowsDeleted = 1;
+ public static final int ROWSDELETED_FIELD_NUMBER = 1;
+ private long rowsDeleted_;
+ public boolean hasRowsDeleted() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public long getRowsDeleted() {
+ return rowsDeleted_;
+ }
+
+ // optional uint64 versionsDeleted = 2;
+ public static final int VERSIONSDELETED_FIELD_NUMBER = 2;
+ private long versionsDeleted_;
+ public boolean hasVersionsDeleted() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public long getVersionsDeleted() {
+ return versionsDeleted_;
+ }
+
+ private void initFields() {
+ rowsDeleted_ = 0L;
+ versionsDeleted_ = 0L;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasRowsDeleted()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeUInt64(1, rowsDeleted_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeUInt64(2, versionsDeleted_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(1, rowsDeleted_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(2, versionsDeleted_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse other = (org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse) obj;
+
+ boolean result = true;
+ result = result && (hasRowsDeleted() == other.hasRowsDeleted());
+ if (hasRowsDeleted()) {
+ result = result && (getRowsDeleted()
+ == other.getRowsDeleted());
+ }
+ result = result && (hasVersionsDeleted() == other.hasVersionsDeleted());
+ if (hasVersionsDeleted()) {
+ result = result && (getVersionsDeleted()
+ == other.getVersionsDeleted());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasRowsDeleted()) {
+ hash = (37 * hash) + ROWSDELETED_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getRowsDeleted());
+ }
+ if (hasVersionsDeleted()) {
+ hash = (37 * hash) + VERSIONSDELETED_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getVersionsDeleted());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponseOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.internal_static_BulkDeleteResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.internal_static_BulkDeleteResponse_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ rowsDeleted_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ versionsDeleted_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse build() {
+ org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse buildPartial() {
+ org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse result = new org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.rowsDeleted_ = rowsDeleted_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.versionsDeleted_ = versionsDeleted_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse) {
+ return mergeFrom((org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse other) {
+ if (other == org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse.getDefaultInstance()) return this;
+ if (other.hasRowsDeleted()) {
+ setRowsDeleted(other.getRowsDeleted());
+ }
+ if (other.hasVersionsDeleted()) {
+ setVersionsDeleted(other.getVersionsDeleted());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasRowsDeleted()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ rowsDeleted_ = input.readUInt64();
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ versionsDeleted_ = input.readUInt64();
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // required uint64 rowsDeleted = 1;
+ private long rowsDeleted_ ;
+ public boolean hasRowsDeleted() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public long getRowsDeleted() {
+ return rowsDeleted_;
+ }
+ public Builder setRowsDeleted(long value) {
+ bitField0_ |= 0x00000001;
+ rowsDeleted_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearRowsDeleted() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ rowsDeleted_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // optional uint64 versionsDeleted = 2;
+ private long versionsDeleted_ ;
+ public boolean hasVersionsDeleted() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public long getVersionsDeleted() {
+ return versionsDeleted_;
+ }
+ public Builder setVersionsDeleted(long value) {
+ bitField0_ |= 0x00000002;
+ versionsDeleted_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearVersionsDeleted() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ versionsDeleted_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:BulkDeleteResponse)
+ }
+
+ static {
+ defaultInstance = new BulkDeleteResponse(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:BulkDeleteResponse)
+ }
+
+ public static abstract class BulkDeleteService
+ implements com.google.protobuf.Service {
+ protected BulkDeleteService() {}
+
+ public interface Interface {
+ public abstract void delete(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse> done);
+
+ }
+
+ public static com.google.protobuf.Service newReflectiveService(
+ final Interface impl) {
+ return new BulkDeleteService() {
+ @java.lang.Override
+ public void delete(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse> done) {
+ impl.delete(controller, request, done);
+ }
+
+ };
+ }
+
+ public static com.google.protobuf.BlockingService
+ newReflectiveBlockingService(final BlockingInterface impl) {
+ return new com.google.protobuf.BlockingService() {
+ public final com.google.protobuf.Descriptors.ServiceDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+
+ public final com.google.protobuf.Message callBlockingMethod(
+ com.google.protobuf.Descriptors.MethodDescriptor method,
+ com.google.protobuf.RpcController controller,
+ com.google.protobuf.Message request)
+ throws com.google.protobuf.ServiceException {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.callBlockingMethod() given method descriptor for " +
+ "wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return impl.delete(controller, (org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest)request);
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public final com.google.protobuf.Message
+ getRequestPrototype(
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.getRequestPrototype() given method " +
+ "descriptor for wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.getDefaultInstance();
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public final com.google.protobuf.Message
+ getResponsePrototype(
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.getResponsePrototype() given method " +
+ "descriptor for wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse.getDefaultInstance();
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ };
+ }
+
+ public abstract void delete(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse> done);
+
+ public static final
+ com.google.protobuf.Descriptors.ServiceDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.getDescriptor().getServices().get(0);
+ }
+ public final com.google.protobuf.Descriptors.ServiceDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+
+ public final void callMethod(
+ com.google.protobuf.Descriptors.MethodDescriptor method,
+ com.google.protobuf.RpcController controller,
+ com.google.protobuf.Message request,
+ com.google.protobuf.RpcCallback<
+ com.google.protobuf.Message> done) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.callMethod() given method descriptor for wrong " +
+ "service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ this.delete(controller, (org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest)request,
+ com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse>specializeCallback(
+ done));
+ return;
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public final com.google.protobuf.Message
+ getRequestPrototype(
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.getRequestPrototype() given method " +
+ "descriptor for wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.getDefaultInstance();
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public final com.google.protobuf.Message
+ getResponsePrototype(
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.getResponsePrototype() given method " +
+ "descriptor for wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse.getDefaultInstance();
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public static Stub newStub(
+ com.google.protobuf.RpcChannel channel) {
+ return new Stub(channel);
+ }
+
+ public static final class Stub extends org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteService implements Interface {
+ private Stub(com.google.protobuf.RpcChannel channel) {
+ this.channel = channel;
+ }
+
+ private final com.google.protobuf.RpcChannel channel;
+
+ public com.google.protobuf.RpcChannel getChannel() {
+ return channel;
+ }
+
+ public void delete(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse> done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(0),
+ controller,
+ request,
+ org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse.class,
+ org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse.getDefaultInstance()));
+ }
+ }
+
+ public static BlockingInterface newBlockingStub(
+ com.google.protobuf.BlockingRpcChannel channel) {
+ return new BlockingStub(channel);
+ }
+
+ public interface BlockingInterface {
+ public org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse delete(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest request)
+ throws com.google.protobuf.ServiceException;
+ }
+
+ private static final class BlockingStub implements BlockingInterface {
+ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
+ this.channel = channel;
+ }
+
+ private final com.google.protobuf.BlockingRpcChannel channel;
+
+ public org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse delete(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(0),
+ controller,
+ request,
+ org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse.getDefaultInstance());
+ }
+
+ }
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_BulkDeleteRequest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_BulkDeleteRequest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_BulkDeleteResponse_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_BulkDeleteResponse_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\020BulkDelete.proto\032\014Client.proto\"\300\001\n\021Bul" +
+ "kDeleteRequest\022\023\n\004scan\030\001 \002(\0132\005.Scan\0221\n\nd" +
+ "eleteType\030\002 \002(\0162\035.BulkDeleteRequest.Dele" +
+ "teType\022\021\n\ttimestamp\030\003 \001(\004\022\024\n\014rowBatchSiz" +
+ "e\030\004 \002(\r\":\n\nDeleteType\022\007\n\003ROW\020\000\022\n\n\006FAMILY" +
+ "\020\001\022\n\n\006COLUMN\020\002\022\013\n\007VERSION\020\003\"B\n\022BulkDelet" +
+ "eResponse\022\023\n\013rowsDeleted\030\001 \002(\004\022\027\n\017versio" +
+ "nsDeleted\030\002 \001(\0042F\n\021BulkDeleteService\0221\n\006" +
+ "delete\022\022.BulkDeleteRequest\032\023.BulkDeleteR" +
+ "esponseBQ\n5org.apache.hadoop.hbase.copro",
+ "cessor.example.generatedB\020BulkDeleteProt" +
+ "osH\001\210\001\001\240\001\001"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_BulkDeleteRequest_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_BulkDeleteRequest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_BulkDeleteRequest_descriptor,
+ new java.lang.String[] { "Scan", "DeleteType", "Timestamp", "RowBatchSize", },
+ org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.class,
+ org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.Builder.class);
+ internal_static_BulkDeleteResponse_descriptor =
+ getDescriptor().getMessageTypes().get(1);
+ internal_static_BulkDeleteResponse_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_BulkDeleteResponse_descriptor,
+ new java.lang.String[] { "RowsDeleted", "VersionsDeleted", },
+ org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse.class,
+ org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse.Builder.class);
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ org.apache.hadoop.hbase.protobuf.generated.ClientProtos.getDescriptor(),
+ }, assigner);
+ }
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
Added: hbase/trunk/hbase-server/src/main/protobuf/BulkDelete.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/protobuf/BulkDelete.proto?rev=1401330&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/main/protobuf/BulkDelete.proto (added)
+++ hbase/trunk/hbase-server/src/main/protobuf/BulkDelete.proto Tue Oct 23 15:59:57 2012
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+option java_package = "org.apache.hadoop.hbase.coprocessor.example.generated";
+option java_outer_classname = "BulkDeleteProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+import "Client.proto";
+
+message BulkDeleteRequest {
+ required Scan scan = 1;
+ required DeleteType deleteType = 2;
+ optional uint64 timestamp = 3;
+ required uint32 rowBatchSize = 4;
+
+ enum DeleteType {
+ ROW = 0;
+ FAMILY = 1;
+ COLUMN = 2;
+ VERSION = 3;
+ }
+}
+
+message BulkDeleteResponse {
+ required uint64 rowsDeleted = 1;
+ optional uint64 versionsDeleted = 2;
+}
+
+service BulkDeleteService {
+ rpc delete(BulkDeleteRequest)
+ returns (BulkDeleteResponse);
+}
\ No newline at end of file