You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ap...@apache.org on 2017/08/10 01:20:49 UTC
[02/11] hbase git commit: HBASE-18431 Mitigate compatibility concerns
between branch-1.3 and branch-1.4
http://git-wip-us.apache.org/repos/asf/hbase/blob/e9e16b59/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/TableProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/TableProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/TableProtos.java
new file mode 100644
index 0000000..9507d01
--- /dev/null
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/TableProtos.java
@@ -0,0 +1,607 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: Table.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class TableProtos {
+ private TableProtos() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public interface TableNameOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required bytes namespace = 1;
+ /**
+ * <code>required bytes namespace = 1;</code>
+ */
+ boolean hasNamespace();
+ /**
+ * <code>required bytes namespace = 1;</code>
+ */
+ com.google.protobuf.ByteString getNamespace();
+
+ // required bytes qualifier = 2;
+ /**
+ * <code>required bytes qualifier = 2;</code>
+ */
+ boolean hasQualifier();
+ /**
+ * <code>required bytes qualifier = 2;</code>
+ */
+ com.google.protobuf.ByteString getQualifier();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.TableName}
+ *
+ * <pre>
+ **
+ * Table Name
+ * </pre>
+ */
+ public static final class TableName extends
+ com.google.protobuf.GeneratedMessage
+ implements TableNameOrBuilder {
+ // Use TableName.newBuilder() to construct.
+ private TableName(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private TableName(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final TableName defaultInstance;
+ public static TableName getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public TableName getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private TableName(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ namespace_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ qualifier_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.TableProtos.internal_static_hbase_pb_TableName_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.TableProtos.internal_static_hbase_pb_TableName_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.class, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<TableName> PARSER =
+ new com.google.protobuf.AbstractParser<TableName>() {
+ public TableName parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new TableName(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<TableName> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required bytes namespace = 1;
+ public static final int NAMESPACE_FIELD_NUMBER = 1;
+ private com.google.protobuf.ByteString namespace_;
+ /**
+ * <code>required bytes namespace = 1;</code>
+ */
+ public boolean hasNamespace() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required bytes namespace = 1;</code>
+ */
+ public com.google.protobuf.ByteString getNamespace() {
+ return namespace_;
+ }
+
+ // required bytes qualifier = 2;
+ public static final int QUALIFIER_FIELD_NUMBER = 2;
+ private com.google.protobuf.ByteString qualifier_;
+ /**
+ * <code>required bytes qualifier = 2;</code>
+ */
+ public boolean hasQualifier() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>required bytes qualifier = 2;</code>
+ */
+ public com.google.protobuf.ByteString getQualifier() {
+ return qualifier_;
+ }
+
+ private void initFields() {
+ namespace_ = com.google.protobuf.ByteString.EMPTY;
+ qualifier_ = com.google.protobuf.ByteString.EMPTY;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasNamespace()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasQualifier()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, namespace_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, qualifier_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, namespace_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, qualifier_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName other = (org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName) obj;
+
+ boolean result = true;
+ result = result && (hasNamespace() == other.hasNamespace());
+ if (hasNamespace()) {
+ result = result && getNamespace()
+ .equals(other.getNamespace());
+ }
+ result = result && (hasQualifier() == other.hasQualifier());
+ if (hasQualifier()) {
+ result = result && getQualifier()
+ .equals(other.getQualifier());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasNamespace()) {
+ hash = (37 * hash) + NAMESPACE_FIELD_NUMBER;
+ hash = (53 * hash) + getNamespace().hashCode();
+ }
+ if (hasQualifier()) {
+ hash = (37 * hash) + QUALIFIER_FIELD_NUMBER;
+ hash = (53 * hash) + getQualifier().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.TableName}
+ *
+ * <pre>
+ **
+ * Table Name
+ * </pre>
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.TableProtos.internal_static_hbase_pb_TableName_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.TableProtos.internal_static_hbase_pb_TableName_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.class, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ namespace_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ qualifier_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.TableProtos.internal_static_hbase_pb_TableName_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName build() {
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName result = new org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.namespace_ = namespace_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.qualifier_ = qualifier_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()) return this;
+ if (other.hasNamespace()) {
+ setNamespace(other.getNamespace());
+ }
+ if (other.hasQualifier()) {
+ setQualifier(other.getQualifier());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasNamespace()) {
+
+ return false;
+ }
+ if (!hasQualifier()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required bytes namespace = 1;
+ private com.google.protobuf.ByteString namespace_ = com.google.protobuf.ByteString.EMPTY;
+ /**
+ * <code>required bytes namespace = 1;</code>
+ */
+ public boolean hasNamespace() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required bytes namespace = 1;</code>
+ */
+ public com.google.protobuf.ByteString getNamespace() {
+ return namespace_;
+ }
+ /**
+ * <code>required bytes namespace = 1;</code>
+ */
+ public Builder setNamespace(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ namespace_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required bytes namespace = 1;</code>
+ */
+ public Builder clearNamespace() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ namespace_ = getDefaultInstance().getNamespace();
+ onChanged();
+ return this;
+ }
+
+ // required bytes qualifier = 2;
+ private com.google.protobuf.ByteString qualifier_ = com.google.protobuf.ByteString.EMPTY;
+ /**
+ * <code>required bytes qualifier = 2;</code>
+ */
+ public boolean hasQualifier() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>required bytes qualifier = 2;</code>
+ */
+ public com.google.protobuf.ByteString getQualifier() {
+ return qualifier_;
+ }
+ /**
+ * <code>required bytes qualifier = 2;</code>
+ */
+ public Builder setQualifier(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ qualifier_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required bytes qualifier = 2;</code>
+ */
+ public Builder clearQualifier() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ qualifier_ = getDefaultInstance().getQualifier();
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.TableName)
+ }
+
+ static {
+ defaultInstance = new TableName(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.TableName)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_TableName_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_TableName_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\013Table.proto\022\010hbase.pb\"1\n\tTableName\022\021\n\t" +
+ "namespace\030\001 \002(\014\022\021\n\tqualifier\030\002 \002(\014B>\n*or" +
+ "g.apache.hadoop.hbase.protobuf.generated" +
+ "B\013TableProtosH\001\240\001\001"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_hbase_pb_TableName_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_hbase_pb_TableName_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_TableName_descriptor,
+ new java.lang.String[] { "Namespace", "Qualifier", });
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/e9e16b59/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/WALProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/WALProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/WALProtos.java
index 9bae06f..a466e6c 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/WALProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/WALProtos.java
@@ -8813,11 +8813,11 @@ public final class WALProtos {
/**
* <code>required .hbase.pb.TableName table_name = 1;</code>
*/
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName();
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName();
/**
* <code>required .hbase.pb.TableName table_name = 1;</code>
*/
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder();
// required bytes encoded_region_name = 2;
/**
@@ -8921,11 +8921,11 @@ public final class WALProtos {
break;
}
case 10: {
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null;
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = tableName_.toBuilder();
}
- tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry);
+ tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(tableName_);
tableName_ = subBuilder.buildPartial();
@@ -8996,7 +8996,7 @@ public final class WALProtos {
private int bitField0_;
// required .hbase.pb.TableName table_name = 1;
public static final int TABLE_NAME_FIELD_NUMBER = 1;
- private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_;
+ private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_;
/**
* <code>required .hbase.pb.TableName table_name = 1;</code>
*/
@@ -9006,13 +9006,13 @@ public final class WALProtos {
/**
* <code>required .hbase.pb.TableName table_name = 1;</code>
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() {
return tableName_;
}
/**
* <code>required .hbase.pb.TableName table_name = 1;</code>
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() {
return tableName_;
}
@@ -9085,7 +9085,7 @@ public final class WALProtos {
}
private void initFields() {
- tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
encodedRegionName_ = com.google.protobuf.ByteString.EMPTY;
stores_ = java.util.Collections.emptyList();
bulkloadSeqNum_ = 0L;
@@ -9347,7 +9347,7 @@ public final class WALProtos {
public Builder clear() {
super.clear();
if (tableNameBuilder_ == null) {
- tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
} else {
tableNameBuilder_.clear();
}
@@ -9516,9 +9516,9 @@ public final class WALProtos {
private int bitField0_;
// required .hbase.pb.TableName table_name = 1;
- private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> tableNameBuilder_;
/**
* <code>required .hbase.pb.TableName table_name = 1;</code>
*/
@@ -9528,7 +9528,7 @@ public final class WALProtos {
/**
* <code>required .hbase.pb.TableName table_name = 1;</code>
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() {
if (tableNameBuilder_ == null) {
return tableName_;
} else {
@@ -9538,7 +9538,7 @@ public final class WALProtos {
/**
* <code>required .hbase.pb.TableName table_name = 1;</code>
*/
- public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) {
if (tableNameBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
@@ -9555,7 +9555,7 @@ public final class WALProtos {
* <code>required .hbase.pb.TableName table_name = 1;</code>
*/
public Builder setTableName(
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) {
if (tableNameBuilder_ == null) {
tableName_ = builderForValue.build();
onChanged();
@@ -9568,12 +9568,12 @@ public final class WALProtos {
/**
* <code>required .hbase.pb.TableName table_name = 1;</code>
*/
- public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) {
if (tableNameBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
- tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
+ tableName_ != org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()) {
tableName_ =
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
} else {
tableName_ = value;
}
@@ -9589,7 +9589,7 @@ public final class WALProtos {
*/
public Builder clearTableName() {
if (tableNameBuilder_ == null) {
- tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
onChanged();
} else {
tableNameBuilder_.clear();
@@ -9600,7 +9600,7 @@ public final class WALProtos {
/**
* <code>required .hbase.pb.TableName table_name = 1;</code>
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
+ public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder getTableNameBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getTableNameFieldBuilder().getBuilder();
@@ -9608,7 +9608,7 @@ public final class WALProtos {
/**
* <code>required .hbase.pb.TableName table_name = 1;</code>
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() {
if (tableNameBuilder_ != null) {
return tableNameBuilder_.getMessageOrBuilder();
} else {
@@ -9619,11 +9619,11 @@ public final class WALProtos {
* <code>required .hbase.pb.TableName table_name = 1;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>
getTableNameFieldBuilder() {
if (tableNameBuilder_ == null) {
tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>(
tableName_,
getParentForChildren(),
isClean());
@@ -11978,56 +11978,56 @@ public final class WALProtos {
descriptor;
static {
java.lang.String[] descriptorData = {
- "\n\tWAL.proto\022\010hbase.pb\032\013HBase.proto\032\014Clie" +
- "nt.proto\"\217\001\n\tWALHeader\022\027\n\017has_compressio" +
- "n\030\001 \001(\010\022\026\n\016encryption_key\030\002 \001(\014\022\033\n\023has_t" +
- "ag_compression\030\003 \001(\010\022\027\n\017writer_cls_name\030" +
- "\004 \001(\t\022\033\n\023cell_codec_cls_name\030\005 \001(\t\"\273\002\n\006W" +
- "ALKey\022\033\n\023encoded_region_name\030\001 \002(\014\022\022\n\nta" +
- "ble_name\030\002 \002(\014\022\033\n\023log_sequence_number\030\003 " +
- "\002(\004\022\022\n\nwrite_time\030\004 \002(\004\022&\n\ncluster_id\030\005 " +
- "\001(\0132\016.hbase.pb.UUIDB\002\030\001\022%\n\006scopes\030\006 \003(\0132" +
- "\025.hbase.pb.FamilyScope\022\032\n\022following_kv_c",
- "ount\030\007 \001(\r\022#\n\013cluster_ids\030\010 \003(\0132\016.hbase." +
- "pb.UUID\022\022\n\nnonceGroup\030\t \001(\004\022\r\n\005nonce\030\n \001" +
- "(\004\022\034\n\024orig_sequence_number\030\013 \001(\004\"F\n\013Fami" +
- "lyScope\022\016\n\006family\030\001 \002(\014\022\'\n\nscope_type\030\002 " +
- "\002(\0162\023.hbase.pb.ScopeType\"\276\001\n\024CompactionD" +
- "escriptor\022\022\n\ntable_name\030\001 \002(\014\022\033\n\023encoded" +
- "_region_name\030\002 \002(\014\022\023\n\013family_name\030\003 \002(\014\022" +
- "\030\n\020compaction_input\030\004 \003(\t\022\031\n\021compaction_" +
- "output\030\005 \003(\t\022\026\n\016store_home_dir\030\006 \002(\t\022\023\n\013" +
- "region_name\030\007 \001(\014\"\244\003\n\017FlushDescriptor\0225\n",
- "\006action\030\001 \002(\0162%.hbase.pb.FlushDescriptor" +
- ".FlushAction\022\022\n\ntable_name\030\002 \002(\014\022\033\n\023enco" +
- "ded_region_name\030\003 \002(\014\022\035\n\025flush_sequence_" +
- "number\030\004 \001(\004\022E\n\rstore_flushes\030\005 \003(\0132..hb" +
- "ase.pb.FlushDescriptor.StoreFlushDescrip" +
- "tor\022\023\n\013region_name\030\006 \001(\014\032Y\n\024StoreFlushDe" +
- "scriptor\022\023\n\013family_name\030\001 \002(\014\022\026\n\016store_h" +
- "ome_dir\030\002 \002(\t\022\024\n\014flush_output\030\003 \003(\t\"S\n\013F" +
- "lushAction\022\017\n\013START_FLUSH\020\000\022\020\n\014COMMIT_FL" +
- "USH\020\001\022\017\n\013ABORT_FLUSH\020\002\022\020\n\014CANNOT_FLUSH\020\003",
- "\"q\n\017StoreDescriptor\022\023\n\013family_name\030\001 \002(\014" +
- "\022\026\n\016store_home_dir\030\002 \002(\t\022\022\n\nstore_file\030\003" +
- " \003(\t\022\035\n\025store_file_size_bytes\030\004 \001(\004\"\237\001\n\022" +
- "BulkLoadDescriptor\022\'\n\ntable_name\030\001 \002(\0132\023" +
- ".hbase.pb.TableName\022\033\n\023encoded_region_na" +
- "me\030\002 \002(\014\022)\n\006stores\030\003 \003(\0132\031.hbase.pb.Stor" +
- "eDescriptor\022\030\n\020bulkload_seq_num\030\004 \002(\003\"\272\002" +
- "\n\025RegionEventDescriptor\022=\n\nevent_type\030\001 " +
- "\002(\0162).hbase.pb.RegionEventDescriptor.Eve" +
- "ntType\022\022\n\ntable_name\030\002 \002(\014\022\033\n\023encoded_re",
- "gion_name\030\003 \002(\014\022\033\n\023log_sequence_number\030\004" +
- " \001(\004\022)\n\006stores\030\005 \003(\0132\031.hbase.pb.StoreDes" +
- "criptor\022$\n\006server\030\006 \001(\0132\024.hbase.pb.Serve" +
- "rName\022\023\n\013region_name\030\007 \001(\014\".\n\tEventType\022" +
- "\017\n\013REGION_OPEN\020\000\022\020\n\014REGION_CLOSE\020\001\"\014\n\nWA" +
- "LTrailer*d\n\tScopeType\022\033\n\027REPLICATION_SCO" +
- "PE_LOCAL\020\000\022\034\n\030REPLICATION_SCOPE_GLOBAL\020\001" +
- "\022\034\n\030REPLICATION_SCOPE_SERIAL\020\002B?\n*org.ap" +
- "ache.hadoop.hbase.protobuf.generatedB\tWA" +
- "LProtosH\001\210\001\000\240\001\001"
+ "\n\tWAL.proto\022\010hbase.pb\032\013Table.proto\032\013HBas" +
+ "e.proto\032\014Client.proto\"\217\001\n\tWALHeader\022\027\n\017h" +
+ "as_compression\030\001 \001(\010\022\026\n\016encryption_key\030\002" +
+ " \001(\014\022\033\n\023has_tag_compression\030\003 \001(\010\022\027\n\017wri" +
+ "ter_cls_name\030\004 \001(\t\022\033\n\023cell_codec_cls_nam" +
+ "e\030\005 \001(\t\"\273\002\n\006WALKey\022\033\n\023encoded_region_nam" +
+ "e\030\001 \002(\014\022\022\n\ntable_name\030\002 \002(\014\022\033\n\023log_seque" +
+ "nce_number\030\003 \002(\004\022\022\n\nwrite_time\030\004 \002(\004\022&\n\n" +
+ "cluster_id\030\005 \001(\0132\016.hbase.pb.UUIDB\002\030\001\022%\n\006" +
+ "scopes\030\006 \003(\0132\025.hbase.pb.FamilyScope\022\032\n\022f",
+ "ollowing_kv_count\030\007 \001(\r\022#\n\013cluster_ids\030\010" +
+ " \003(\0132\016.hbase.pb.UUID\022\022\n\nnonceGroup\030\t \001(\004" +
+ "\022\r\n\005nonce\030\n \001(\004\022\034\n\024orig_sequence_number\030" +
+ "\013 \001(\004\"F\n\013FamilyScope\022\016\n\006family\030\001 \002(\014\022\'\n\n" +
+ "scope_type\030\002 \002(\0162\023.hbase.pb.ScopeType\"\276\001" +
+ "\n\024CompactionDescriptor\022\022\n\ntable_name\030\001 \002" +
+ "(\014\022\033\n\023encoded_region_name\030\002 \002(\014\022\023\n\013famil" +
+ "y_name\030\003 \002(\014\022\030\n\020compaction_input\030\004 \003(\t\022\031" +
+ "\n\021compaction_output\030\005 \003(\t\022\026\n\016store_home_" +
+ "dir\030\006 \002(\t\022\023\n\013region_name\030\007 \001(\014\"\244\003\n\017Flush",
+ "Descriptor\0225\n\006action\030\001 \002(\0162%.hbase.pb.Fl" +
+ "ushDescriptor.FlushAction\022\022\n\ntable_name\030" +
+ "\002 \002(\014\022\033\n\023encoded_region_name\030\003 \002(\014\022\035\n\025fl" +
+ "ush_sequence_number\030\004 \001(\004\022E\n\rstore_flush" +
+ "es\030\005 \003(\0132..hbase.pb.FlushDescriptor.Stor" +
+ "eFlushDescriptor\022\023\n\013region_name\030\006 \001(\014\032Y\n" +
+ "\024StoreFlushDescriptor\022\023\n\013family_name\030\001 \002" +
+ "(\014\022\026\n\016store_home_dir\030\002 \002(\t\022\024\n\014flush_outp" +
+ "ut\030\003 \003(\t\"S\n\013FlushAction\022\017\n\013START_FLUSH\020\000" +
+ "\022\020\n\014COMMIT_FLUSH\020\001\022\017\n\013ABORT_FLUSH\020\002\022\020\n\014C",
+ "ANNOT_FLUSH\020\003\"q\n\017StoreDescriptor\022\023\n\013fami" +
+ "ly_name\030\001 \002(\014\022\026\n\016store_home_dir\030\002 \002(\t\022\022\n" +
+ "\nstore_file\030\003 \003(\t\022\035\n\025store_file_size_byt" +
+ "es\030\004 \001(\004\"\237\001\n\022BulkLoadDescriptor\022\'\n\ntable" +
+ "_name\030\001 \002(\0132\023.hbase.pb.TableName\022\033\n\023enco" +
+ "ded_region_name\030\002 \002(\014\022)\n\006stores\030\003 \003(\0132\031." +
+ "hbase.pb.StoreDescriptor\022\030\n\020bulkload_seq" +
+ "_num\030\004 \002(\003\"\272\002\n\025RegionEventDescriptor\022=\n\n" +
+ "event_type\030\001 \002(\0162).hbase.pb.RegionEventD" +
+ "escriptor.EventType\022\022\n\ntable_name\030\002 \002(\014\022",
+ "\033\n\023encoded_region_name\030\003 \002(\014\022\033\n\023log_sequ" +
+ "ence_number\030\004 \001(\004\022)\n\006stores\030\005 \003(\0132\031.hbas" +
+ "e.pb.StoreDescriptor\022$\n\006server\030\006 \001(\0132\024.h" +
+ "base.pb.ServerName\022\023\n\013region_name\030\007 \001(\014\"" +
+ ".\n\tEventType\022\017\n\013REGION_OPEN\020\000\022\020\n\014REGION_" +
+ "CLOSE\020\001\"\014\n\nWALTrailer*d\n\tScopeType\022\033\n\027RE" +
+ "PLICATION_SCOPE_LOCAL\020\000\022\034\n\030REPLICATION_S" +
+ "COPE_GLOBAL\020\001\022\034\n\030REPLICATION_SCOPE_SERIA" +
+ "L\020\002B?\n*org.apache.hadoop.hbase.protobuf." +
+ "generatedB\tWALProtosH\001\210\001\000\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -12100,6 +12100,7 @@ public final class WALProtos {
com.google.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.getDescriptor(),
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(),
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.getDescriptor(),
}, assigner);
http://git-wip-us.apache.org/repos/asf/hbase/blob/e9e16b59/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
index fb06a78..fc181a8 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
@@ -5043,11 +5043,11 @@ public final class ZooKeeperProtos {
/**
* <code>optional .hbase.pb.TableName table_name = 1;</code>
*/
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName();
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName();
/**
* <code>optional .hbase.pb.TableName table_name = 1;</code>
*/
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder();
// repeated bytes families = 2;
/**
@@ -5115,11 +5115,11 @@ public final class ZooKeeperProtos {
break;
}
case 10: {
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null;
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = tableName_.toBuilder();
}
- tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry);
+ tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(tableName_);
tableName_ = subBuilder.buildPartial();
@@ -5180,7 +5180,7 @@ public final class ZooKeeperProtos {
private int bitField0_;
// optional .hbase.pb.TableName table_name = 1;
public static final int TABLE_NAME_FIELD_NUMBER = 1;
- private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_;
+ private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_;
/**
* <code>optional .hbase.pb.TableName table_name = 1;</code>
*/
@@ -5190,13 +5190,13 @@ public final class ZooKeeperProtos {
/**
* <code>optional .hbase.pb.TableName table_name = 1;</code>
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() {
return tableName_;
}
/**
* <code>optional .hbase.pb.TableName table_name = 1;</code>
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() {
return tableName_;
}
@@ -5224,7 +5224,7 @@ public final class ZooKeeperProtos {
}
private void initFields() {
- tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
families_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
@@ -5435,7 +5435,7 @@ public final class ZooKeeperProtos {
public Builder clear() {
super.clear();
if (tableNameBuilder_ == null) {
- tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
} else {
tableNameBuilder_.clear();
}
@@ -5546,9 +5546,9 @@ public final class ZooKeeperProtos {
private int bitField0_;
// optional .hbase.pb.TableName table_name = 1;
- private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> tableNameBuilder_;
/**
* <code>optional .hbase.pb.TableName table_name = 1;</code>
*/
@@ -5558,7 +5558,7 @@ public final class ZooKeeperProtos {
/**
* <code>optional .hbase.pb.TableName table_name = 1;</code>
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() {
if (tableNameBuilder_ == null) {
return tableName_;
} else {
@@ -5568,7 +5568,7 @@ public final class ZooKeeperProtos {
/**
* <code>optional .hbase.pb.TableName table_name = 1;</code>
*/
- public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) {
if (tableNameBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
@@ -5585,7 +5585,7 @@ public final class ZooKeeperProtos {
* <code>optional .hbase.pb.TableName table_name = 1;</code>
*/
public Builder setTableName(
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) {
if (tableNameBuilder_ == null) {
tableName_ = builderForValue.build();
onChanged();
@@ -5598,12 +5598,12 @@ public final class ZooKeeperProtos {
/**
* <code>optional .hbase.pb.TableName table_name = 1;</code>
*/
- public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) {
if (tableNameBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
- tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
+ tableName_ != org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()) {
tableName_ =
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
} else {
tableName_ = value;
}
@@ -5619,7 +5619,7 @@ public final class ZooKeeperProtos {
*/
public Builder clearTableName() {
if (tableNameBuilder_ == null) {
- tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
onChanged();
} else {
tableNameBuilder_.clear();
@@ -5630,7 +5630,7 @@ public final class ZooKeeperProtos {
/**
* <code>optional .hbase.pb.TableName table_name = 1;</code>
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
+ public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder getTableNameBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getTableNameFieldBuilder().getBuilder();
@@ -5638,7 +5638,7 @@ public final class ZooKeeperProtos {
/**
* <code>optional .hbase.pb.TableName table_name = 1;</code>
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() {
if (tableNameBuilder_ != null) {
return tableNameBuilder_.getMessageOrBuilder();
} else {
@@ -5649,11 +5649,11 @@ public final class ZooKeeperProtos {
* <code>optional .hbase.pb.TableName table_name = 1;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>
getTableNameFieldBuilder() {
if (tableNameBuilder_ == null) {
tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>(
tableName_,
getParentForChildren(),
isClean());
@@ -9260,11 +9260,11 @@ public final class ZooKeeperProtos {
/**
* <code>optional .hbase.pb.TableName table_name = 1;</code>
*/
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName();
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName();
/**
* <code>optional .hbase.pb.TableName table_name = 1;</code>
*/
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder();
// optional .hbase.pb.ServerName lock_owner = 2;
/**
@@ -9382,11 +9382,11 @@ public final class ZooKeeperProtos {
break;
}
case 10: {
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null;
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = tableName_.toBuilder();
}
- tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry);
+ tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(tableName_);
tableName_ = subBuilder.buildPartial();
@@ -9469,7 +9469,7 @@ public final class ZooKeeperProtos {
private int bitField0_;
// optional .hbase.pb.TableName table_name = 1;
public static final int TABLE_NAME_FIELD_NUMBER = 1;
- private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_;
+ private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_;
/**
* <code>optional .hbase.pb.TableName table_name = 1;</code>
*/
@@ -9479,13 +9479,13 @@ public final class ZooKeeperProtos {
/**
* <code>optional .hbase.pb.TableName table_name = 1;</code>
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() {
return tableName_;
}
/**
* <code>optional .hbase.pb.TableName table_name = 1;</code>
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() {
return tableName_;
}
@@ -9603,7 +9603,7 @@ public final class ZooKeeperProtos {
}
private void initFields() {
- tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
lockOwner_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
threadId_ = 0L;
isShared_ = false;
@@ -9892,7 +9892,7 @@ public final class ZooKeeperProtos {
public Builder clear() {
super.clear();
if (tableNameBuilder_ == null) {
- tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
} else {
tableNameBuilder_.clear();
}
@@ -10047,9 +10047,9 @@ public final class ZooKeeperProtos {
private int bitField0_;
// optional .hbase.pb.TableName table_name = 1;
- private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> tableNameBuilder_;
/**
* <code>optional .hbase.pb.TableName table_name = 1;</code>
*/
@@ -10059,7 +10059,7 @@ public final class ZooKeeperProtos {
/**
* <code>optional .hbase.pb.TableName table_name = 1;</code>
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() {
if (tableNameBuilder_ == null) {
return tableName_;
} else {
@@ -10069,7 +10069,7 @@ public final class ZooKeeperProtos {
/**
* <code>optional .hbase.pb.TableName table_name = 1;</code>
*/
- public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) {
if (tableNameBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
@@ -10086,7 +10086,7 @@ public final class ZooKeeperProtos {
* <code>optional .hbase.pb.TableName table_name = 1;</code>
*/
public Builder setTableName(
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) {
if (tableNameBuilder_ == null) {
tableName_ = builderForValue.build();
onChanged();
@@ -10099,12 +10099,12 @@ public final class ZooKeeperProtos {
/**
* <code>optional .hbase.pb.TableName table_name = 1;</code>
*/
- public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) {
if (tableNameBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
- tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
+ tableName_ != org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()) {
tableName_ =
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
} else {
tableName_ = value;
}
@@ -10120,7 +10120,7 @@ public final class ZooKeeperProtos {
*/
public Builder clearTableName() {
if (tableNameBuilder_ == null) {
- tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
onChanged();
} else {
tableNameBuilder_.clear();
@@ -10131,7 +10131,7 @@ public final class ZooKeeperProtos {
/**
* <code>optional .hbase.pb.TableName table_name = 1;</code>
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
+ public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder getTableNameBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getTableNameFieldBuilder().getBuilder();
@@ -10139,7 +10139,7 @@ public final class ZooKeeperProtos {
/**
* <code>optional .hbase.pb.TableName table_name = 1;</code>
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() {
if (tableNameBuilder_ != null) {
return tableNameBuilder_.getMessageOrBuilder();
} else {
@@ -10150,11 +10150,11 @@ public final class ZooKeeperProtos {
* <code>optional .hbase.pb.TableName table_name = 1;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>
getTableNameFieldBuilder() {
if (tableNameBuilder_ == null) {
tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>(
tableName_,
getParentForChildren(),
isClean());
@@ -10982,47 +10982,47 @@ public final class ZooKeeperProtos {
descriptor;
static {
java.lang.String[] descriptorData = {
- "\n\017ZooKeeper.proto\022\010hbase.pb\032\013HBase.proto" +
- "\032\023ClusterStatus.proto\"y\n\020MetaRegionServe" +
- "r\022$\n\006server\030\001 \002(\0132\024.hbase.pb.ServerName\022" +
- "\023\n\013rpc_version\030\002 \001(\r\022*\n\005state\030\003 \001(\0162\033.hb" +
- "ase.pb.RegionState.State\"V\n\006Master\022$\n\006ma" +
- "ster\030\001 \002(\0132\024.hbase.pb.ServerName\022\023\n\013rpc_" +
- "version\030\002 \001(\r\022\021\n\tinfo_port\030\003 \001(\r\"\037\n\tClus" +
- "terUp\022\022\n\nstart_date\030\001 \002(\t\"\221\001\n\020RegionTran" +
- "sition\022\027\n\017event_type_code\030\001 \002(\r\022\023\n\013regio" +
- "n_name\030\002 \002(\014\022\023\n\013create_time\030\003 \002(\004\022)\n\013ser",
- "ver_name\030\004 \002(\0132\024.hbase.pb.ServerName\022\017\n\007" +
- "payload\030\005 \001(\014\"\247\002\n\014SplitLogTask\022+\n\005state\030" +
- "\001 \002(\0162\034.hbase.pb.SplitLogTask.State\022)\n\013s" +
- "erver_name\030\002 \002(\0132\024.hbase.pb.ServerName\022:" +
- "\n\004mode\030\003 \001(\0162#.hbase.pb.SplitLogTask.Rec" +
- "overyMode:\007UNKNOWN\"C\n\005State\022\016\n\nUNASSIGNE" +
- "D\020\000\022\t\n\005OWNED\020\001\022\014\n\010RESIGNED\020\002\022\010\n\004DONE\020\003\022\007" +
- "\n\003ERR\020\004\">\n\014RecoveryMode\022\013\n\007UNKNOWN\020\000\022\021\n\r" +
- "LOG_SPLITTING\020\001\022\016\n\nLOG_REPLAY\020\002\"w\n\005Table" +
- "\022-\n\005state\030\001 \002(\0162\025.hbase.pb.Table.State:\007",
- "ENABLED\"?\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DISABLE" +
- "D\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENABLING\020\003\"D\n\007Tabl" +
- "eCF\022\'\n\ntable_name\030\001 \001(\0132\023.hbase.pb.Table" +
- "Name\022\020\n\010families\030\002 \003(\014\"\330\001\n\017ReplicationPe" +
- "er\022\022\n\nclusterkey\030\001 \002(\t\022\037\n\027replicationEnd" +
- "pointImpl\030\002 \001(\t\022&\n\004data\030\003 \003(\0132\030.hbase.pb" +
- ".BytesBytesPair\022/\n\rconfiguration\030\004 \003(\0132\030" +
- ".hbase.pb.NameStringPair\022$\n\ttable_cfs\030\005 " +
- "\003(\0132\021.hbase.pb.TableCF\022\021\n\tbandwidth\030\006 \001(" +
- "\003\"g\n\020ReplicationState\022/\n\005state\030\001 \002(\0162 .h",
- "base.pb.ReplicationState.State\"\"\n\005State\022" +
- "\013\n\007ENABLED\020\000\022\014\n\010DISABLED\020\001\"+\n\027Replicatio" +
- "nHLogPosition\022\020\n\010position\030\001 \002(\003\"%\n\017Repli" +
- "cationLock\022\022\n\nlock_owner\030\001 \002(\t\"\252\001\n\tTable" +
- "Lock\022\'\n\ntable_name\030\001 \001(\0132\023.hbase.pb.Tabl" +
- "eName\022(\n\nlock_owner\030\002 \001(\0132\024.hbase.pb.Ser" +
- "verName\022\021\n\tthread_id\030\003 \001(\003\022\021\n\tis_shared\030" +
- "\004 \001(\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013create_time\030\006 " +
- "\001(\003\"\036\n\013SwitchState\022\017\n\007enabled\030\001 \001(\010BE\n*o" +
- "rg.apache.hadoop.hbase.protobuf.generate",
- "dB\017ZooKeeperProtosH\001\210\001\001\240\001\001"
+ "\n\017ZooKeeper.proto\022\010hbase.pb\032\013Table.proto" +
+ "\032\013HBase.proto\032\023ClusterStatus.proto\"y\n\020Me" +
+ "taRegionServer\022$\n\006server\030\001 \002(\0132\024.hbase.p" +
+ "b.ServerName\022\023\n\013rpc_version\030\002 \001(\r\022*\n\005sta" +
+ "te\030\003 \001(\0162\033.hbase.pb.RegionState.State\"V\n" +
+ "\006Master\022$\n\006master\030\001 \002(\0132\024.hbase.pb.Serve" +
+ "rName\022\023\n\013rpc_version\030\002 \001(\r\022\021\n\tinfo_port\030" +
+ "\003 \001(\r\"\037\n\tClusterUp\022\022\n\nstart_date\030\001 \002(\t\"\221" +
+ "\001\n\020RegionTransition\022\027\n\017event_type_code\030\001" +
+ " \002(\r\022\023\n\013region_name\030\002 \002(\014\022\023\n\013create_time",
+ "\030\003 \002(\004\022)\n\013server_name\030\004 \002(\0132\024.hbase.pb.S" +
+ "erverName\022\017\n\007payload\030\005 \001(\014\"\247\002\n\014SplitLogT" +
+ "ask\022+\n\005state\030\001 \002(\0162\034.hbase.pb.SplitLogTa" +
+ "sk.State\022)\n\013server_name\030\002 \002(\0132\024.hbase.pb" +
+ ".ServerName\022:\n\004mode\030\003 \001(\0162#.hbase.pb.Spl" +
+ "itLogTask.RecoveryMode:\007UNKNOWN\"C\n\005State" +
+ "\022\016\n\nUNASSIGNED\020\000\022\t\n\005OWNED\020\001\022\014\n\010RESIGNED\020" +
+ "\002\022\010\n\004DONE\020\003\022\007\n\003ERR\020\004\">\n\014RecoveryMode\022\013\n\007" +
+ "UNKNOWN\020\000\022\021\n\rLOG_SPLITTING\020\001\022\016\n\nLOG_REPL" +
+ "AY\020\002\"w\n\005Table\022-\n\005state\030\001 \002(\0162\025.hbase.pb.",
+ "Table.State:\007ENABLED\"?\n\005State\022\013\n\007ENABLED" +
+ "\020\000\022\014\n\010DISABLED\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENABL" +
+ "ING\020\003\"D\n\007TableCF\022\'\n\ntable_name\030\001 \001(\0132\023.h" +
+ "base.pb.TableName\022\020\n\010families\030\002 \003(\014\"\330\001\n\017" +
+ "ReplicationPeer\022\022\n\nclusterkey\030\001 \002(\t\022\037\n\027r" +
+ "eplicationEndpointImpl\030\002 \001(\t\022&\n\004data\030\003 \003" +
+ "(\0132\030.hbase.pb.BytesBytesPair\022/\n\rconfigur" +
+ "ation\030\004 \003(\0132\030.hbase.pb.NameStringPair\022$\n" +
+ "\ttable_cfs\030\005 \003(\0132\021.hbase.pb.TableCF\022\021\n\tb" +
+ "andwidth\030\006 \001(\003\"g\n\020ReplicationState\022/\n\005st",
+ "ate\030\001 \002(\0162 .hbase.pb.ReplicationState.St" +
+ "ate\"\"\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DISABLED\020\001\"" +
+ "+\n\027ReplicationHLogPosition\022\020\n\010position\030\001" +
+ " \002(\003\"%\n\017ReplicationLock\022\022\n\nlock_owner\030\001 " +
+ "\002(\t\"\252\001\n\tTableLock\022\'\n\ntable_name\030\001 \001(\0132\023." +
+ "hbase.pb.TableName\022(\n\nlock_owner\030\002 \001(\0132\024" +
+ ".hbase.pb.ServerName\022\021\n\tthread_id\030\003 \001(\003\022" +
+ "\021\n\tis_shared\030\004 \001(\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013c" +
+ "reate_time\030\006 \001(\003\"\036\n\013SwitchState\022\017\n\007enabl" +
+ "ed\030\001 \001(\010BE\n*org.apache.hadoop.hbase.prot",
+ "obuf.generatedB\017ZooKeeperProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -11113,6 +11113,7 @@ public final class ZooKeeperProtos {
com.google.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.getDescriptor(),
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(),
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.getDescriptor(),
}, assigner);
http://git-wip-us.apache.org/repos/asf/hbase/blob/e9e16b59/hbase-protocol/src/main/protobuf/AccessControl.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/AccessControl.proto b/hbase-protocol/src/main/protobuf/AccessControl.proto
index cc0d4a5..02e3a5b 100644
--- a/hbase-protocol/src/main/protobuf/AccessControl.proto
+++ b/hbase-protocol/src/main/protobuf/AccessControl.proto
@@ -23,7 +23,7 @@ option java_generic_services = true;
option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
-import "HBase.proto";
+import "Table.proto";
message Permission {
enum Action {
http://git-wip-us.apache.org/repos/asf/hbase/blob/e9e16b59/hbase-protocol/src/main/protobuf/HBase.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/HBase.proto b/hbase-protocol/src/main/protobuf/HBase.proto
index 279eb39..67645d4 100644
--- a/hbase-protocol/src/main/protobuf/HBase.proto
+++ b/hbase-protocol/src/main/protobuf/HBase.proto
@@ -25,14 +25,8 @@ option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
import "Cell.proto";
-
-/**
- * Table Name
- */
-message TableName {
- required bytes namespace = 1;
- required bytes qualifier = 2;
-}
+import "Table.proto";
+import "AccessControl.proto";
/**
* Table Schema
@@ -221,3 +215,21 @@ message RegionServerInfo {
optional int32 infoPort = 1;
optional VersionInfo version_info = 2;
}
+
+/**
+ * Description of the snapshot to take
+ */
+message SnapshotDescription {
+ required string name = 1;
+ optional string table = 2; // not needed for delete, but checked for in taking snapshot
+ optional int64 creation_time = 3 [default = 0];
+ enum Type {
+ DISABLED = 0;
+ FLUSH = 1;
+ SKIPFLUSH = 2;
+ }
+ optional Type type = 4 [default = FLUSH];
+ optional int32 version = 5;
+ optional string owner = 6;
+ optional UsersAndPermissions users_and_permissions = 7;
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/e9e16b59/hbase-protocol/src/main/protobuf/Master.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto
index d19856b..4db8779 100644
--- a/hbase-protocol/src/main/protobuf/Master.proto
+++ b/hbase-protocol/src/main/protobuf/Master.proto
@@ -26,6 +26,7 @@ option java_generic_services = true;
option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
+import "Table.proto";
import "HBase.proto";
import "Client.proto";
import "ClusterStatus.proto";
http://git-wip-us.apache.org/repos/asf/hbase/blob/e9e16b59/hbase-protocol/src/main/protobuf/MasterProcedure.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/MasterProcedure.proto b/hbase-protocol/src/main/protobuf/MasterProcedure.proto
index 2d2aff4..c1d3789 100644
--- a/hbase-protocol/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol/src/main/protobuf/MasterProcedure.proto
@@ -23,6 +23,7 @@ option java_generic_services = true;
option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
+import "Table.proto";
import "HBase.proto";
import "RPC.proto";
http://git-wip-us.apache.org/repos/asf/hbase/blob/e9e16b59/hbase-protocol/src/main/protobuf/SecureBulkLoad.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/SecureBulkLoad.proto b/hbase-protocol/src/main/protobuf/SecureBulkLoad.proto
index 814735b..12e7cf7 100644
--- a/hbase-protocol/src/main/protobuf/SecureBulkLoad.proto
+++ b/hbase-protocol/src/main/protobuf/SecureBulkLoad.proto
@@ -23,6 +23,7 @@ option java_generic_services = true;
option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
+import 'Table.proto';
import 'HBase.proto';
import 'Client.proto';
http://git-wip-us.apache.org/repos/asf/hbase/blob/e9e16b59/hbase-protocol/src/main/protobuf/Snapshot.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Snapshot.proto b/hbase-protocol/src/main/protobuf/Snapshot.proto
index 015787d..ae1a1e6 100644
--- a/hbase-protocol/src/main/protobuf/Snapshot.proto
+++ b/hbase-protocol/src/main/protobuf/Snapshot.proto
@@ -23,28 +23,9 @@ option java_generic_services = true;
option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
-import "AccessControl.proto";
import "FS.proto";
import "HBase.proto";
-/**
- * Description of the snapshot to take
- */
-message SnapshotDescription {
- required string name = 1;
- optional string table = 2; // not needed for delete, but checked for in taking snapshot
- optional int64 creation_time = 3 [default = 0];
- enum Type {
- DISABLED = 0;
- FLUSH = 1;
- SKIPFLUSH = 2;
- }
- optional Type type = 4 [default = FLUSH];
- optional int32 version = 5;
- optional string owner = 6;
- optional UsersAndPermissions users_and_permissions = 7;
-}
-
message SnapshotFileInfo {
enum Type {
HFILE = 1;
http://git-wip-us.apache.org/repos/asf/hbase/blob/e9e16b59/hbase-protocol/src/main/protobuf/Table.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Table.proto b/hbase-protocol/src/main/protobuf/Table.proto
new file mode 100644
index 0000000..4452eb2
--- /dev/null
+++ b/hbase-protocol/src/main/protobuf/Table.proto
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This file contains protocol buffers that are shared throughout HBase
+package hbase.pb;
+
+option java_package = "org.apache.hadoop.hbase.protobuf.generated";
+option java_outer_classname = "TableProtos";
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+/**
+ * Table Name
+ */
+message TableName {
+ required bytes namespace = 1;
+ required bytes qualifier = 2;
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/e9e16b59/hbase-protocol/src/main/protobuf/WAL.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/WAL.proto b/hbase-protocol/src/main/protobuf/WAL.proto
index 5d91c4c..08925f8 100644
--- a/hbase-protocol/src/main/protobuf/WAL.proto
+++ b/hbase-protocol/src/main/protobuf/WAL.proto
@@ -23,6 +23,7 @@ option java_generic_services = false;
option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
+import "Table.proto";
import "HBase.proto";
import "Client.proto";
http://git-wip-us.apache.org/repos/asf/hbase/blob/e9e16b59/hbase-protocol/src/main/protobuf/ZooKeeper.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/ZooKeeper.proto b/hbase-protocol/src/main/protobuf/ZooKeeper.proto
index a632552..1638bf7 100644
--- a/hbase-protocol/src/main/protobuf/ZooKeeper.proto
+++ b/hbase-protocol/src/main/protobuf/ZooKeeper.proto
@@ -26,6 +26,7 @@ option java_generic_services = true;
option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
+import "Table.proto";
import "HBase.proto";
import "ClusterStatus.proto";
http://git-wip-us.apache.org/repos/asf/hbase/blob/e9e16b59/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
index 5803297..0ecc131 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
@@ -49,7 +49,7 @@ org.apache.hadoop.hbase.HTableDescriptor;
org.apache.hadoop.hbase.HBaseConfiguration;
org.apache.hadoop.hbase.TableName;
org.apache.hadoop.hbase.tool.Canary;
-org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
org.apache.hadoop.hbase.master.DeadServer;
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
org.apache.hadoop.hbase.security.visibility.VisibilityConstants;
http://git-wip-us.apache.org/repos/asf/hbase/blob/e9e16b59/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
index b2f76d2..2d8fdba 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
@@ -34,8 +34,8 @@ import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
-import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import java.io.IOException;
import java.util.List;
http://git-wip-us.apache.org/repos/asf/hbase/blob/e9e16b59/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
index f747599..5fe80c0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
@@ -34,8 +34,8 @@ import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
-import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import java.io.IOException;
import java.util.List;
http://git-wip-us.apache.org/repos/asf/hbase/blob/e9e16b59/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
index d6afec0..3fb0858 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.regionserver.DeleteTracker;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
@@ -56,7 +57,6 @@ import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.regionserver.StoreFile.Reader;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-import org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.wal.WALKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
http://git-wip-us.apache.org/repos/asf/hbase/blob/e9e16b59/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index 7558147..24c62b2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -37,8 +37,8 @@ import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
-import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
/**
* Defines coprocessor hooks for interacting with operations on the
http://git-wip-us.apache.org/repos/asf/hbase/blob/e9e16b59/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
index ad56231..ad16b97 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.regionserver.DeleteTracker;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.regionserver.Region.Operation;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
@@ -55,7 +56,6 @@ import org.apache.hadoop.hbase.regionserver.ScanType;
import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-import org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.wal.WALKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
http://git-wip-us.apache.org/repos/asf/hbase/blob/e9e16b59/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
index 9f52850..a8d387a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
@@ -36,8 +36,8 @@ import org.apache.hadoop.hbase.client.IsolationLevel;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit;
-import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
http://git-wip-us.apache.org/repos/asf/hbase/blob/e9e16b59/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index c7dd282..226ba06 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
@@ -45,8 +45,8 @@ import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.metrics.MetricRegistry;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
-import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
/**
* Provides the coprocessor framework and environment for master oriented