You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ga...@apache.org on 2012/09/18 08:32:58 UTC
svn commit: r1387001 [2/5] - in /hbase/trunk/hbase-server/src:
main/java/org/apache/hadoop/hbase/client/
main/java/org/apache/hadoop/hbase/client/coprocessor/
main/java/org/apache/hadoop/hbase/coprocessor/
main/java/org/apache/hadoop/hbase/coprocessor/...
Added: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/example/generated/ExampleProtos.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/example/generated/ExampleProtos.java?rev=1387001&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/example/generated/ExampleProtos.java (added)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/example/generated/ExampleProtos.java Tue Sep 18 06:32:57 2012
@@ -0,0 +1,1034 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: Examples.proto
+
+package org.apache.hadoop.hbase.coprocessor.example.generated;
+
+public final class ExampleProtos {
+ private ExampleProtos() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public interface CountRequestOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+ }
+ public static final class CountRequest extends
+ com.google.protobuf.GeneratedMessage
+ implements CountRequestOrBuilder {
+ // Use CountRequest.newBuilder() to construct.
+ private CountRequest(Builder builder) {
+ super(builder);
+ }
+ private CountRequest(boolean noInit) {}
+
+ private static final CountRequest defaultInstance;
+ public static CountRequest getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public CountRequest getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.internal_static_CountRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.internal_static_CountRequest_fieldAccessorTable;
+ }
+
+ private void initFields() {
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest other = (org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest) obj;
+
+ boolean result = true;
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequestOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.internal_static_CountRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.internal_static_CountRequest_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest build() {
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest buildPartial() {
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest result = new org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest(this);
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest) {
+ return mergeFrom((org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest other) {
+ if (other == org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest.getDefaultInstance()) return this;
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ }
+ }
+ }
+
+
+ // @@protoc_insertion_point(builder_scope:CountRequest)
+ }
+
+ static {
+ defaultInstance = new CountRequest(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:CountRequest)
+ }
+
+ public interface CountResponseOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required int64 count = 1 [default = 0];
+ boolean hasCount();
+ long getCount();
+ }
+ public static final class CountResponse extends
+ com.google.protobuf.GeneratedMessage
+ implements CountResponseOrBuilder {
+ // Use CountResponse.newBuilder() to construct.
+ private CountResponse(Builder builder) {
+ super(builder);
+ }
+ private CountResponse(boolean noInit) {}
+
+ private static final CountResponse defaultInstance;
+ public static CountResponse getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public CountResponse getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.internal_static_CountResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.internal_static_CountResponse_fieldAccessorTable;
+ }
+
+ private int bitField0_;
+ // required int64 count = 1 [default = 0];
+ public static final int COUNT_FIELD_NUMBER = 1;
+ private long count_;
+ public boolean hasCount() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public long getCount() {
+ return count_;
+ }
+
+ private void initFields() {
+ count_ = 0L;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasCount()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeInt64(1, count_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(1, count_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse other = (org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse) obj;
+
+ boolean result = true;
+ result = result && (hasCount() == other.hasCount());
+ if (hasCount()) {
+ result = result && (getCount()
+ == other.getCount());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasCount()) {
+ hash = (37 * hash) + COUNT_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getCount());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponseOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.internal_static_CountResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.internal_static_CountResponse_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ count_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse build() {
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse buildPartial() {
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse result = new org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.count_ = count_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse) {
+ return mergeFrom((org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse other) {
+ if (other == org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse.getDefaultInstance()) return this;
+ if (other.hasCount()) {
+ setCount(other.getCount());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasCount()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ count_ = input.readInt64();
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // required int64 count = 1 [default = 0];
+ private long count_ ;
+ public boolean hasCount() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public long getCount() {
+ return count_;
+ }
+ public Builder setCount(long value) {
+ bitField0_ |= 0x00000001;
+ count_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearCount() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ count_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:CountResponse)
+ }
+
+ static {
+ defaultInstance = new CountResponse(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:CountResponse)
+ }
+
+ public static abstract class RowCountService
+ implements com.google.protobuf.Service {
+ protected RowCountService() {}
+
+ public interface Interface {
+ public abstract void getRowCount(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse> done);
+
+ public abstract void getKeyValueCount(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse> done);
+
+ }
+
+ public static com.google.protobuf.Service newReflectiveService(
+ final Interface impl) {
+ return new RowCountService() {
+ @java.lang.Override
+ public void getRowCount(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse> done) {
+ impl.getRowCount(controller, request, done);
+ }
+
+ @java.lang.Override
+ public void getKeyValueCount(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse> done) {
+ impl.getKeyValueCount(controller, request, done);
+ }
+
+ };
+ }
+
+ public static com.google.protobuf.BlockingService
+ newReflectiveBlockingService(final BlockingInterface impl) {
+ return new com.google.protobuf.BlockingService() {
+ public final com.google.protobuf.Descriptors.ServiceDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+
+ public final com.google.protobuf.Message callBlockingMethod(
+ com.google.protobuf.Descriptors.MethodDescriptor method,
+ com.google.protobuf.RpcController controller,
+ com.google.protobuf.Message request)
+ throws com.google.protobuf.ServiceException {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.callBlockingMethod() given method descriptor for " +
+ "wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return impl.getRowCount(controller, (org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest)request);
+ case 1:
+ return impl.getKeyValueCount(controller, (org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest)request);
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public final com.google.protobuf.Message
+ getRequestPrototype(
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.getRequestPrototype() given method " +
+ "descriptor for wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest.getDefaultInstance();
+ case 1:
+ return org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest.getDefaultInstance();
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public final com.google.protobuf.Message
+ getResponsePrototype(
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.getResponsePrototype() given method " +
+ "descriptor for wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse.getDefaultInstance();
+ case 1:
+ return org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse.getDefaultInstance();
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ };
+ }
+
+ public abstract void getRowCount(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse> done);
+
+ public abstract void getKeyValueCount(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse> done);
+
+ public static final
+ com.google.protobuf.Descriptors.ServiceDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.getDescriptor().getServices().get(0);
+ }
+ public final com.google.protobuf.Descriptors.ServiceDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+
+ public final void callMethod(
+ com.google.protobuf.Descriptors.MethodDescriptor method,
+ com.google.protobuf.RpcController controller,
+ com.google.protobuf.Message request,
+ com.google.protobuf.RpcCallback<
+ com.google.protobuf.Message> done) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.callMethod() given method descriptor for wrong " +
+ "service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ this.getRowCount(controller, (org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest)request,
+ com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse>specializeCallback(
+ done));
+ return;
+ case 1:
+ this.getKeyValueCount(controller, (org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest)request,
+ com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse>specializeCallback(
+ done));
+ return;
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public final com.google.protobuf.Message
+ getRequestPrototype(
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.getRequestPrototype() given method " +
+ "descriptor for wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest.getDefaultInstance();
+ case 1:
+ return org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest.getDefaultInstance();
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public final com.google.protobuf.Message
+ getResponsePrototype(
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.getResponsePrototype() given method " +
+ "descriptor for wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse.getDefaultInstance();
+ case 1:
+ return org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse.getDefaultInstance();
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public static Stub newStub(
+ com.google.protobuf.RpcChannel channel) {
+ return new Stub(channel);
+ }
+
+ public static final class Stub extends org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.RowCountService implements Interface {
+ private Stub(com.google.protobuf.RpcChannel channel) {
+ this.channel = channel;
+ }
+
+ private final com.google.protobuf.RpcChannel channel;
+
+ public com.google.protobuf.RpcChannel getChannel() {
+ return channel;
+ }
+
+ public void getRowCount(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse> done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(0),
+ controller,
+ request,
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse.class,
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse.getDefaultInstance()));
+ }
+
+ public void getKeyValueCount(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse> done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(1),
+ controller,
+ request,
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse.class,
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse.getDefaultInstance()));
+ }
+ }
+
+ public static BlockingInterface newBlockingStub(
+ com.google.protobuf.BlockingRpcChannel channel) {
+ return new BlockingStub(channel);
+ }
+
+ public interface BlockingInterface {
+ public org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse getRowCount(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest request)
+ throws com.google.protobuf.ServiceException;
+
+ public org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse getKeyValueCount(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest request)
+ throws com.google.protobuf.ServiceException;
+ }
+
+ private static final class BlockingStub implements BlockingInterface {
+ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
+ this.channel = channel;
+ }
+
+ private final com.google.protobuf.BlockingRpcChannel channel;
+
+ public org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse getRowCount(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(0),
+ controller,
+ request,
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse.getDefaultInstance());
+ }
+
+
+ public org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse getKeyValueCount(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(1),
+ controller,
+ request,
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse.getDefaultInstance());
+ }
+
+ }
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_CountRequest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_CountRequest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_CountResponse_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_CountResponse_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\016Examples.proto\"\016\n\014CountRequest\"!\n\rCoun" +
+ "tResponse\022\020\n\005count\030\001 \002(\003:\00102r\n\017RowCountS" +
+ "ervice\022,\n\013getRowCount\022\r.CountRequest\032\016.C" +
+ "ountResponse\0221\n\020getKeyValueCount\022\r.Count" +
+ "Request\032\016.CountResponseBN\n5org.apache.ha" +
+ "doop.hbase.coprocessor.example.generated" +
+ "B\rExampleProtosH\001\210\001\001\240\001\001"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_CountRequest_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_CountRequest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_CountRequest_descriptor,
+ new java.lang.String[] { },
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest.class,
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest.Builder.class);
+ internal_static_CountResponse_descriptor =
+ getDescriptor().getMessageTypes().get(1);
+ internal_static_CountResponse_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_CountResponse_descriptor,
+ new java.lang.String[] { "Count", },
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse.class,
+ org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse.Builder.class);
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java?rev=1387001&r1=1387000&r2=1387001&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java Tue Sep 18 06:32:57 2012
@@ -171,117 +171,37 @@ public class AccessControlCoprocessor ex
<h2><a name="commandtarget">Endpoint</a></h2>
<code>Coprocessor</code> and <code>RegionObserver</code> provide certain hooks
-for injecting user code running at each region. The user code will be triggerd
+for injecting user code running at each region. The user code will be triggered
by existing <code>HTable</code> and <code>HBaseAdmin</code> operations at
the certain hook points.
<p>
-Through Endpoint and dynamic RPC protocol, you can define your own
-interface communicated between client and region server,
-i.e., you can create a new method, specify passed parameters and return types
-for this new method.
-And the new Endpoint methods can be triggered by
-calling client side dynamic RPC functions -- <code>HTable.coprocessorExec(...)
+Coprocessor Endpoints allow you to define your own dynamic RPC protocol to communicate
+between clients and region servers, i.e., you can create a new method, specifying custom
+request parameters and return types. RPC methods exposed by coprocessor Endpoints can be
+triggered by calling client side dynamic RPC functions -- <code>HTable.coprocessorService(...)
</code>.
<p>
-To implement a Endpoint, you need to:
+To implement an Endpoint, you need to:
<ul>
-<li>Extend <code>CoprocessorProtocol</code>: the interface defines
-communication protocol for the new Endpoint, and will be
-served as the RPC protocol between client and region server.</li>
-<li>Extend both <code>BaseEndpointCoprocessor</code> abstract class,
-and the above extended <code>CoprocessorProtocol</code> interface:
-the actually implemented class running at region server.</li>
+ <li>Define a protocol buffer Service and supporting Message types for the RPC methods.
+ See the
+ <a href="https://developers.google.com/protocol-buffers/docs/proto#services">protocol buffer guide</a>
+ for more details on defining services.</li>
+ <li>Generate the Service and Message code using the protoc compiler</li>
+ <li>Implement the generated Service interface in your coprocessor class and implement the
+ <code>CoprocessorService</code> interface. The <code>CoprocessorService.getService()</code>
+ method should return a reference to the Endpoint's protocol buffer Service instance.
</ul>
<p>
-Here's an example of performing column aggregation at region server:
-<div style="background-color: #cccccc; padding: 2px">
-<blockquote><pre>
-// A sample protocol for performing aggregation at regions.
-public static interface ColumnAggregationProtocol
-extends CoprocessorProtocol {
- // Perform aggregation for a given column at the region. The aggregation
- // will include all the rows inside the region. It can be extended to
- // allow passing start and end rows for a fine-grained aggregation.
- public int sum(byte[] family, byte[] qualifier) throws IOException;
-}
-// Aggregation implementation at a region.
-public static class ColumnAggregationEndpoint extends BaseEndpointCoprocessor
-implements ColumnAggregationProtocol {
- // @Override
- // Scan the region by the given family and qualifier. Return the aggregation
- // result.
- public int sum(byte[] family, byte[] qualifier)
- throws IOException {
- // aggregate at each region
- Scan scan = new Scan();
- scan.addColumn(family, qualifier);
- int sumResult = 0;
- // use an internal scanner to perform scanning.
- InternalScanner scanner = getEnvironment().getRegion().getScanner(scan);
- try {
- List<KeyValue> curVals = new ArrayList<KeyValue>();
- boolean done = false;
- do {
- curVals.clear();
- done = scanner.next(curVals);
- KeyValue kv = curVals.get(0);
- sumResult += Bytes.toInt(kv.getValue());
- } while (done);
- } finally {
- scanner.close();
- }
- return sumResult;
- }
-}
-</pre></blockquote>
-</div>
-<p>
-Client invocations are performed through <code>HTable</code>,
-which has the following methods added by dynamic RPC protocol:
-
-<div style="background-color: #cccccc; padding: 2px">
-<blockquote><pre>
-public <T extends CoprocessorProtocol> T coprocessorProxy(Class<T> protocol, Row row)
+For a more detailed discussion of how to implement a coprocessor Endpoint, along with some sample
+code, see the {@link org.apache.hadoop.hbase.client.coprocessor} package documentation.
+</p>
-public <T extends CoprocessorProtocol, R> void coprocessorExec(
- Class<T> protocol, List<? extends Row> rows,
- BatchCall<T,R> callable, BatchCallback<R> callback)
-
-public <T extends CoprocessorProtocol, R> void coprocessorExec(
- Class<T> protocol, RowRange range,
- BatchCall<T,R> callable, BatchCallback<R> callback)
-</pre></blockquote>
-</div>
-
-<p>
-Here is a client side example of invoking
-<code>ColumnAggregationEndpoint</code>:
-<div style="background-color: #cccccc; padding: 2px">
-<blockquote><pre>
-HTable table = new HTable(util.getConfiguration(), TEST_TABLE);
-Scan scan;
-Map<byte[], Integer> results;
-
-// scan: for all regions
-scan = new Scan();
-results = table.coprocessorExec(ColumnAggregationProtocol.class, scan,
- new BatchCall<ColumnAggregationProtocol,Integer>() {
- public Integer call(ColumnAggregationProtocol instance) throws IOException{
- return instance.sum(TEST_FAMILY, TEST_QUALIFIER);
- }
- });
-int sumResult = 0;
-int expectedResult = 0;
-for (Map.Entry<byte[], Integer> e : results.entrySet()) {
- sumResult += e.getValue();
-}
-</pre></blockquote>
-</div>
-<h2><a name="load">Coprocess loading</a></h2>
+<h2><a name="load">Coprocessor loading</a></h2>
A customized coprocessor can be loaded by two different ways, by configuration,
or by <code>HTableDescriptor</code> for a newly created table.
<p>
-(Currently we don't really have an on demand coprocessor loading machanism for
+(Currently we don't really have an on demand coprocessor loading mechanism for
opened regions.)
<h3>Load from configuration</h3>
Whenever a region is opened, it will read coprocessor class names from
@@ -294,7 +214,7 @@ default coprocessors. The classes must b
<blockquote><pre>
<property>
<name>hbase.coprocessor.region.classes</name>
- <value>org.apache.hadoop.hbase.coprocessor.AccessControllCoprocessor, org.apache.hadoop.hbase.coprocessor.ColumnAggregationProtocol</value>
+ <value>org.apache.hadoop.hbase.coprocessor.AccessControlCoprocessor, org.apache.hadoop.hbase.coprocessor.ColumnAggregationProtocol</value>
<description>A comma-separated list of Coprocessors that are loaded by
default. For any override coprocessor method from RegionObservor or
Coprocessor, these classes' implementation will be called
Added: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcCallback.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcCallback.java?rev=1387001&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcCallback.java (added)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcCallback.java Tue Sep 18 06:32:57 2012
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.ipc;
+
+import com.google.protobuf.RpcCallback;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+
+/**
+ * Simple {@link RpcCallback} implementation providing a
+ * {@link java.util.concurrent.Future}-like {@link BlockingRpcCallback#get()} method, which
+ * will block util the instance's {@link BlockingRpcCallback#run(Object)} method has been called.
+ * {@code R} is the RPC response type that will be passed to the {@link #run(Object)} method.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class BlockingRpcCallback<R> implements RpcCallback<R> {
+ private R result;
+ private boolean resultSet = false;
+
+ /**
+ * Called on completion of the RPC call with the response object, or {@code null} in the case of
+ * an error.
+ * @param parameter the response object or {@code null} if an error occurred
+ */
+ @Override
+ public void run(R parameter) {
+ synchronized (this) {
+ result = parameter;
+ resultSet = true;
+ this.notify();
+ }
+ }
+
+ /**
+ * Returns the parameter passed to {@link #run(Object)} or {@code null} if a null value was
+ * passed. When used asynchronously, this method will block until the {@link #run(Object)}
+ * method has been called.
+ * @return the response object or {@code null} if no response was passed
+ */
+ public synchronized R get() throws IOException {
+ while (!resultSet) {
+ try {
+ this.wait();
+ } catch (InterruptedException ie) {
+ Thread.currentThread().interrupt();
+ InterruptedIOException exception = new InterruptedIOException(ie.getMessage());
+ exception.initCause(ie);
+ throw exception;
+ }
+ }
+ return result;
+ }
+}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorProtocol.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorProtocol.java?rev=1387001&r1=1387000&r2=1387001&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorProtocol.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorProtocol.java Tue Sep 18 06:32:57 2012
@@ -35,9 +35,12 @@ import org.apache.hadoop.classification.
* <li>an array or {@code java.util.List} of one of the above</li>
* </ul>
* </p>
+ * @deprecated since 0.96. Use {@link org.apache.hadoop.hbase.coprocessor.CoprocessorService}
+ * instead.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
+@Deprecated
public interface CoprocessorProtocol extends VersionedProtocol {
public static final long VERSION = 1L;
}
Added: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java?rev=1387001&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java (added)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java Tue Sep 18 06:32:57 2012
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.ipc;
+
+import com.google.protobuf.*;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ServerCallable;
+import org.apache.hadoop.hbase.client.coprocessor.Exec;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.ResponseConverter;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import java.io.IOException;
+
+import static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
+
+/**
+ * Provides clients with an RPC connection to call coprocessor endpoint {@link Service}s
+ * against a given table region. An instance of this class may be obtained
+ * by calling {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(byte[])},
+ * but should normally only be used in creating a new {@link Service} stub to call the endpoint
+ * methods.
+ * @see org.apache.hadoop.hbase.client.HTable#coprocessorService(byte[])
+ */
+@InterfaceAudience.Private
+public class CoprocessorRpcChannel implements RpcChannel, BlockingRpcChannel {
+ private static Log LOG = LogFactory.getLog(CoprocessorRpcChannel.class);
+
+ private final HConnection connection;
+ private final byte[] table;
+ private final byte[] row;
+ private byte[] lastRegion;
+
+ public CoprocessorRpcChannel(HConnection conn, byte[] table, byte[] row) {
+ this.connection = conn;
+ this.table = table;
+ this.row = row;
+ }
+
+ @Override
+ public void callMethod(Descriptors.MethodDescriptor method,
+ RpcController controller,
+ Message request, Message responsePrototype,
+ RpcCallback<Message> callback) {
+ Message response = null;
+ try {
+ response = callExecService(method, request, responsePrototype);
+ } catch (IOException ioe) {
+ LOG.warn("Call failed on IOException", ioe);
+ ResponseConverter.setControllerException(controller, ioe);
+ }
+ if (callback != null) {
+ callback.run(response);
+ }
+ }
+
+ @Override
+ public Message callBlockingMethod(Descriptors.MethodDescriptor method,
+ RpcController controller,
+ Message request, Message responsePrototype)
+ throws ServiceException {
+ try {
+ return callExecService(method, request, responsePrototype);
+ } catch (IOException ioe) {
+ throw new ServiceException("Error calling method "+method.getFullName(), ioe);
+ }
+ }
+
+ private Message callExecService(Descriptors.MethodDescriptor method,
+ Message request, Message responsePrototype)
+ throws IOException {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Call: "+method.getName()+", "+request.toString());
+ }
+
+ if (row == null) {
+ throw new IllegalArgumentException("Missing row property for remote region location");
+ }
+
+ final ClientProtos.CoprocessorServiceCall call =
+ ClientProtos.CoprocessorServiceCall.newBuilder()
+ .setRow(ByteString.copyFrom(row))
+ .setServiceName(method.getService().getFullName())
+ .setMethodName(method.getName())
+ .setRequest(request.toByteString()).build();
+ ServerCallable<ClientProtos.CoprocessorServiceResponse> callable =
+ new ServerCallable<ClientProtos.CoprocessorServiceResponse>(connection, table, row) {
+ public CoprocessorServiceResponse call() throws Exception {
+ byte[] regionName = location.getRegionInfo().getRegionName();
+ return ProtobufUtil.execService(server, call, regionName);
+ }
+ };
+ CoprocessorServiceResponse result = callable.withRetries();
+ Message response = null;
+ if (result.getValue().hasValue()) {
+ response = responsePrototype.newBuilderForType()
+ .mergeFrom(result.getValue().getValue()).build();
+ } else {
+ response = responsePrototype.getDefaultInstanceForType();
+ }
+ lastRegion = result.getRegion().getValue().toByteArray();
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Result is region=" + Bytes.toStringBinary(lastRegion) + ", value=" + response);
+ }
+ return response;
+ }
+
+ public byte[] getLastRegion() {
+ return lastRegion;
+ }
+}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ExecRPCInvoker.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ExecRPCInvoker.java?rev=1387001&r1=1387000&r2=1387001&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ExecRPCInvoker.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ExecRPCInvoker.java Tue Sep 18 06:32:57 2012
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.util.Byte
* {@link org.apache.hadoop.hbase.client.ServerCallable} instance).
*/
@InterfaceAudience.Private
+@Deprecated
public class ExecRPCInvoker implements InvocationHandler {
// LOG is NOT in hbase subpackage intentionally so that the default HBase
// DEBUG log level does NOT emit RPC-level logging.
@@ -84,6 +85,8 @@ public class ExecRPCInvoker implements I
LOG.debug("Result is region="+ Bytes.toStringBinary(regionName) +
", value="+result.getValue());
return result.getValue();
+ } else if (LOG.isDebugEnabled()) {
+ LOG.debug("Null row passed for call");
}
return null;
Added: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java?rev=1387001&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java (added)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java Tue Sep 18 06:32:57 2012
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.ipc;
+
+import com.google.protobuf.Descriptors;
+import com.google.protobuf.Message;
+import com.google.protobuf.RpcCallback;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.Service;
+import org.apache.hadoop.util.StringUtils;
+
+import java.io.IOException;
+
+/**
+ * Used for server-side protobuf RPC service invocations. This handler allows
+ * invocation exceptions to easily be passed through to the RPC server from coprocessor
+ * {@link Service} implementations.
+ *
+ * <p>
+ * When implementing {@link Service} defined methods, coprocessor endpoints can use the following
+ * pattern to pass exceptions back to the RPC client:
+ * <code>
+ * public void myMethod(RpcController controller, MyRequest request, RpcCallback<MyResponse> done) {
+ * MyResponse response = null;
+ * try {
+ * // do processing
+ * response = MyResponse.getDefaultInstance(); // or use a new builder to populate the response
+ * } catch (IOException ioe) {
+ * // pass exception back up
+ * ResponseConverter.setControllerException(controller, ioe);
+ * }
+ * done.run(response);
+ * }
+ * </code>
+ * </p>
+ */
+public class ServerRpcController implements RpcController {
+ /**
+ * The exception thrown within
+ * {@link Service#callMethod(Descriptors.MethodDescriptor, RpcController, Message, RpcCallback)},
+ * if any.
+ */
+ // TODO: it would be good widen this to just Throwable, but IOException is what we allow now
+ private IOException serviceException;
+ private String errorMessage;
+
+ @Override
+ public void reset() {
+ serviceException = null;
+ errorMessage = null;
+ }
+
+ @Override
+ public boolean failed() {
+ return (failedOnException() || errorMessage != null);
+ }
+
+ @Override
+ public String errorText() {
+ return errorMessage;
+ }
+
+ @Override
+ public void startCancel() {
+ // not implemented
+ }
+
+ @Override
+ public void setFailed(String message) {
+ errorMessage = message;
+ }
+
+ @Override
+ public boolean isCanceled() {
+ return false;
+ }
+
+ @Override
+ public void notifyOnCancel(RpcCallback<Object> objectRpcCallback) {
+ // not implemented
+ }
+
+ /**
+ * Sets an exception to be communicated back to the {@link Service} client.
+ * @param ioe the exception encountered during execution of the service method
+ */
+ public void setFailedOn(IOException ioe) {
+ serviceException = ioe;
+ setFailed(StringUtils.stringifyException(ioe));
+ }
+
+ /**
+ * Returns any exception thrown during service method invocation, or {@code null} if no exception
+ * was thrown. This can be used by clients to receive exceptions generated by RPC calls, even
+ * when {@link RpcCallback}s are used and no {@link com.google.protobuf.ServiceException} is
+ * declared.
+ */
+ public IOException getFailedOn() {
+ return serviceException;
+ }
+
+ /**
+ * Returns whether or not a server exception was generated in the prior RPC invocation.
+ */
+ public boolean failedOnException() {
+ return serviceException != null;
+ }
+}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java?rev=1387001&r1=1387000&r2=1387001&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java Tue Sep 18 06:32:57 2012
@@ -70,6 +70,7 @@ import org.apache.hadoop.hbase.filter.By
import org.apache.hadoop.hbase.io.HbaseObjectWritable;
import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.ipc.CoprocessorProtocol;
+import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
@@ -93,6 +94,9 @@ import org.apache.hadoop.hbase.protobuf.
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ExecCoprocessorRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ExecCoprocessorResponse;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
@@ -120,6 +124,7 @@ import org.apache.hadoop.hbase.security.
import org.apache.hadoop.hbase.security.access.TablePermission;
import org.apache.hadoop.hbase.security.access.UserPermission;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Methods;
import org.apache.hadoop.hbase.util.Pair;
import com.google.common.collect.ArrayListMultimap;
@@ -127,8 +132,12 @@ import com.google.common.collect.ListMul
import com.google.protobuf.ByteString;
import com.google.protobuf.Message;
+import com.google.protobuf.RpcChannel;
+import com.google.protobuf.Service;
import com.google.protobuf.ServiceException;
+import static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.*;
+
/**
* Protobufs utility.
*/
@@ -1306,6 +1315,26 @@ public final class ProtobufUtil {
}
}
+ public static CoprocessorServiceResponse execService(final ClientProtocol client,
+ final CoprocessorServiceCall call, final byte[] regionName) throws IOException {
+ CoprocessorServiceRequest request = CoprocessorServiceRequest.newBuilder()
+ .setCall(call).setRegion(
+ RequestConverter.buildRegionSpecifier(REGION_NAME, regionName)).build();
+ try {
+ CoprocessorServiceResponse response =
+ client.execService(null, request);
+ return response;
+ } catch (ServiceException se) {
+ throw getRemoteException(se);
+ }
+ }
+
+ public static <T extends Service> T newServiceStub(Class<T> service, RpcChannel channel)
+ throws Exception {
+ return (T)Methods.call(service, null, "newStub",
+ new Class[]{ RpcChannel.class }, new Object[]{ channel });
+ }
+
// End helpers for Client
// Start helpers for Admin
@@ -1609,7 +1638,7 @@ public final class ProtobufUtil {
/**
* Convert a client Permission to a Permission proto
*
- * @param action the client Permission
+ * @param perm the client Permission
* @return the protobuf Permission
*/
public static AccessControlProtos.Permission toPermission(Permission perm) {
@@ -1650,7 +1679,7 @@ public final class ProtobufUtil {
/**
* Converts a Permission.Action proto to a client Permission.Action object.
*
- * @param proto the protobuf Action
+ * @param action the protobuf Action
* @return the converted Action
*/
public static Permission.Action toPermissionAction(
@@ -1789,4 +1818,21 @@ public final class ProtobufUtil {
return perms;
}
+
+ /**
+ * Unwraps an exception from a protobuf service into the underlying (expected) IOException.
+ * This method will <strong>always</strong> throw an exception.
+ * @param se the {@code ServiceException} instance to convert into an {@code IOException}
+ */
+ public static void toIOException(ServiceException se) throws IOException {
+ if (se == null) {
+ throw new NullPointerException("Null service exception passed!");
+ }
+
+ Throwable cause = se.getCause();
+ if (cause != null && cause instanceof IOException) {
+ throw (IOException)cause;
+ }
+ throw new IOException(se);
+ }
}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java?rev=1387001&r1=1387000&r2=1387001&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java Tue Sep 18 06:32:57 2012
@@ -21,10 +21,13 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
+import com.google.protobuf.RpcController;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.ipc.ServerRpcController;
+import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse;
@@ -40,10 +43,13 @@ import org.apache.hadoop.hbase.protobuf.
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
+import org.apache.hadoop.hbase.security.access.UserPermission;
import org.apache.hadoop.util.StringUtils;
import com.google.protobuf.ByteString;
+import static org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UserPermissionsResponse;
+
/**
* Helper utility to build protocol buffer responses,
* or retrieve data from protocol buffer responses.
@@ -117,6 +123,18 @@ public final class ResponseConverter {
return builder.build();
}
+ /**
+ * Converts the permissions list into a protocol buffer UserPermissionsResponse
+ */
+ public static UserPermissionsResponse buildUserPermissionsResponse(
+ final List<UserPermission> permissions) {
+ UserPermissionsResponse.Builder builder = UserPermissionsResponse.newBuilder();
+ for (UserPermission perm : permissions) {
+ builder.addPermission(ProtobufUtil.toUserPermission(perm));
+ }
+ return builder.build();
+ }
+
// End utilities for Client
// Start utilities for Admin
@@ -249,4 +267,19 @@ public final class ResponseConverter {
return GetLastFlushedSequenceIdResponse.newBuilder().setLastFlushedSequenceId(seqId).build();
}
+ /**
+ * Stores an exception encountered during RPC invocation so it can be passed back
+ * through to the client.
+ * @param controller the controller instance provided by the client when calling the service
+ * @param ioe the exception encountered
+ */
+ public static void setControllerException(RpcController controller, IOException ioe) {
+ if (controller != null) {
+ if (controller instanceof ServerRpcController) {
+ ((ServerRpcController)controller).setFailedOn(ioe);
+ } else {
+ controller.setFailed(StringUtils.stringifyException(ioe));
+ }
+ }
+ }
}