You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2013/08/21 07:04:22 UTC
svn commit: r1516084 [28/43] - in /hbase/trunk: ./
hbase-client/src/main/java/org/apache/hadoop/hbase/
hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/
hbase-common/src/test/java/org/apache/hadoop/hbase/
hbase-protocol/src/main/java/org/apa...
Modified: hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java?rev=1516084&r1=1516083&r2=1516084&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java (original)
+++ hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java Wed Aug 21 05:04:20 2013
@@ -10,68 +10,231 @@ public final class ZooKeeperProtos {
}
public interface MetaRegionServerOrBuilder
extends com.google.protobuf.MessageOrBuilder {
-
+
// required .ServerName server = 1;
+ /**
+ * <code>required .ServerName server = 1;</code>
+ *
+ * <pre>
+ * The ServerName hosting the meta region currently.
+ * </pre>
+ */
boolean hasServer();
+ /**
+ * <code>required .ServerName server = 1;</code>
+ *
+ * <pre>
+ * The ServerName hosting the meta region currently.
+ * </pre>
+ */
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer();
+ /**
+ * <code>required .ServerName server = 1;</code>
+ *
+ * <pre>
+ * The ServerName hosting the meta region currently.
+ * </pre>
+ */
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder();
-
+
// optional uint32 rpc_version = 2;
+ /**
+ * <code>optional uint32 rpc_version = 2;</code>
+ *
+ * <pre>
+ * The major version of the rpc the server speaks. This is used so that
+ * clients connecting to the cluster can have prior knowledge of what version
+ * to send to a RegionServer. AsyncHBase will use this to detect versions.
+ * </pre>
+ */
boolean hasRpcVersion();
+ /**
+ * <code>optional uint32 rpc_version = 2;</code>
+ *
+ * <pre>
+ * The major version of the rpc the server speaks. This is used so that
+ * clients connecting to the cluster can have prior knowledge of what version
+ * to send to a RegionServer. AsyncHBase will use this to detect versions.
+ * </pre>
+ */
int getRpcVersion();
}
+ /**
+ * Protobuf type {@code MetaRegionServer}
+ *
+ * <pre>
+ **
+ * Content of the meta-region-server znode.
+ * </pre>
+ */
public static final class MetaRegionServer extends
com.google.protobuf.GeneratedMessage
implements MetaRegionServerOrBuilder {
// Use MetaRegionServer.newBuilder() to construct.
- private MetaRegionServer(Builder builder) {
+ private MetaRegionServer(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
+ this.unknownFields = builder.getUnknownFields();
}
- private MetaRegionServer(boolean noInit) {}
-
+ private MetaRegionServer(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
private static final MetaRegionServer defaultInstance;
public static MetaRegionServer getDefaultInstance() {
return defaultInstance;
}
-
+
public MetaRegionServer getDefaultInstanceForType() {
return defaultInstance;
}
-
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private MetaRegionServer(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = server_.toBuilder();
+ }
+ server_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(server_);
+ server_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ rpcVersion_ = input.readUInt32();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_MetaRegionServer_descriptor;
}
-
+
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_MetaRegionServer_fieldAccessorTable;
+ return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_MetaRegionServer_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<MetaRegionServer> PARSER =
+ new com.google.protobuf.AbstractParser<MetaRegionServer>() {
+ public MetaRegionServer parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new MetaRegionServer(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<MetaRegionServer> getParserForType() {
+ return PARSER;
}
-
+
private int bitField0_;
// required .ServerName server = 1;
public static final int SERVER_FIELD_NUMBER = 1;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_;
+ /**
+ * <code>required .ServerName server = 1;</code>
+ *
+ * <pre>
+ * The ServerName hosting the meta region currently.
+ * </pre>
+ */
public boolean hasServer() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
+ /**
+ * <code>required .ServerName server = 1;</code>
+ *
+ * <pre>
+ * The ServerName hosting the meta region currently.
+ * </pre>
+ */
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() {
return server_;
}
+ /**
+ * <code>required .ServerName server = 1;</code>
+ *
+ * <pre>
+ * The ServerName hosting the meta region currently.
+ * </pre>
+ */
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() {
return server_;
}
-
+
// optional uint32 rpc_version = 2;
public static final int RPC_VERSION_FIELD_NUMBER = 2;
private int rpcVersion_;
+ /**
+ * <code>optional uint32 rpc_version = 2;</code>
+ *
+ * <pre>
+ * The major version of the rpc the server speaks. This is used so that
+ * clients connecting to the cluster can have prior knowledge of what version
+ * to send to a RegionServer. AsyncHBase will use this to detect versions.
+ * </pre>
+ */
public boolean hasRpcVersion() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
+ /**
+ * <code>optional uint32 rpc_version = 2;</code>
+ *
+ * <pre>
+ * The major version of the rpc the server speaks. This is used so that
+ * clients connecting to the cluster can have prior knowledge of what version
+ * to send to a RegionServer. AsyncHBase will use this to detect versions.
+ * </pre>
+ */
public int getRpcVersion() {
return rpcVersion_;
}
-
+
private void initFields() {
server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
rpcVersion_ = 0;
@@ -80,7 +243,7 @@ public final class ZooKeeperProtos {
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
-
+
if (!hasServer()) {
memoizedIsInitialized = 0;
return false;
@@ -92,7 +255,7 @@ public final class ZooKeeperProtos {
memoizedIsInitialized = 1;
return true;
}
-
+
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
@@ -104,12 +267,12 @@ public final class ZooKeeperProtos {
}
getUnknownFields().writeTo(output);
}
-
+
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
-
+
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
@@ -123,14 +286,14 @@ public final class ZooKeeperProtos {
memoizedSerializedSize = size;
return size;
}
-
+
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
-
+
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
@@ -140,7 +303,7 @@ public final class ZooKeeperProtos {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer) obj;
-
+
boolean result = true;
result = result && (hasServer() == other.hasServer());
if (hasServer()) {
@@ -156,9 +319,13 @@ public final class ZooKeeperProtos {
getUnknownFields().equals(other.getUnknownFields());
return result;
}
-
+
+ private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasServer()) {
@@ -170,89 +337,84 @@ public final class ZooKeeperProtos {
hash = (53 * hash) + getRpcVersion();
}
hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
return hash;
}
-
+
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data).buildParsed();
+ return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data).buildParsed();
+ return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(java.io.InputStream input)
throws java.io.IOException {
- return newBuilder().mergeFrom(input).buildParsed();
+ return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- return newBuilder().mergeFrom(input, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
- Builder builder = newBuilder();
- if (builder.mergeDelimitedFrom(input)) {
- return builder.buildParsed();
- } else {
- return null;
- }
+ return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- Builder builder = newBuilder();
- if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
- return builder.buildParsed();
- } else {
- return null;
- }
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
- return newBuilder().mergeFrom(input).buildParsed();
+ return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- return newBuilder().mergeFrom(input, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(input, extensionRegistry);
}
-
+
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
-
+
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
+ /**
+ * Protobuf type {@code MetaRegionServer}
+ *
+ * <pre>
+ **
+ * Content of the meta-region-server znode.
+ * </pre>
+ */
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServerOrBuilder {
@@ -260,18 +422,21 @@ public final class ZooKeeperProtos {
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_MetaRegionServer_descriptor;
}
-
+
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_MetaRegionServer_fieldAccessorTable;
+ return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_MetaRegionServer_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.Builder.class);
}
-
+
// Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
-
- private Builder(BuilderParent parent) {
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
@@ -283,7 +448,7 @@ public final class ZooKeeperProtos {
private static Builder create() {
return new Builder();
}
-
+
public Builder clear() {
super.clear();
if (serverBuilder_ == null) {
@@ -296,20 +461,20 @@ public final class ZooKeeperProtos {
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
-
+
public Builder clone() {
return create().mergeFrom(buildPartial());
}
-
+
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
- return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.getDescriptor();
+ return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_MetaRegionServer_descriptor;
}
-
+
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.getDefaultInstance();
}
-
+
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer build() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer result = buildPartial();
if (!result.isInitialized()) {
@@ -317,17 +482,7 @@ public final class ZooKeeperProtos {
}
return result;
}
-
- private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer buildParsed()
- throws com.google.protobuf.InvalidProtocolBufferException {
- org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(
- result).asInvalidProtocolBufferException();
- }
- return result;
- }
-
+
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer(this);
int from_bitField0_ = bitField0_;
@@ -348,7 +503,7 @@ public final class ZooKeeperProtos {
onBuilt();
return result;
}
-
+
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer)other);
@@ -357,7 +512,7 @@ public final class ZooKeeperProtos {
return this;
}
}
-
+
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.getDefaultInstance()) return this;
if (other.hasServer()) {
@@ -369,7 +524,7 @@ public final class ZooKeeperProtos {
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
-
+
public final boolean isInitialized() {
if (!hasServer()) {
@@ -381,57 +536,47 @@ public final class ZooKeeperProtos {
}
return true;
}
-
+
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder(
- this.getUnknownFields());
- while (true) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- this.setUnknownFields(unknownFields.build());
- onChanged();
- return this;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- this.setUnknownFields(unknownFields.build());
- onChanged();
- return this;
- }
- break;
- }
- case 10: {
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder();
- if (hasServer()) {
- subBuilder.mergeFrom(getServer());
- }
- input.readMessage(subBuilder, extensionRegistry);
- setServer(subBuilder.buildPartial());
- break;
- }
- case 16: {
- bitField0_ |= 0x00000002;
- rpcVersion_ = input.readUInt32();
- break;
- }
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
}
}
+ return this;
}
-
private int bitField0_;
-
+
// required .ServerName server = 1;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverBuilder_;
+ /**
+ * <code>required .ServerName server = 1;</code>
+ *
+ * <pre>
+ * The ServerName hosting the meta region currently.
+ * </pre>
+ */
public boolean hasServer() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
+ /**
+ * <code>required .ServerName server = 1;</code>
+ *
+ * <pre>
+ * The ServerName hosting the meta region currently.
+ * </pre>
+ */
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() {
if (serverBuilder_ == null) {
return server_;
@@ -439,6 +584,13 @@ public final class ZooKeeperProtos {
return serverBuilder_.getMessage();
}
}
+ /**
+ * <code>required .ServerName server = 1;</code>
+ *
+ * <pre>
+ * The ServerName hosting the meta region currently.
+ * </pre>
+ */
public Builder setServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
if (serverBuilder_ == null) {
if (value == null) {
@@ -452,6 +604,13 @@ public final class ZooKeeperProtos {
bitField0_ |= 0x00000001;
return this;
}
+ /**
+ * <code>required .ServerName server = 1;</code>
+ *
+ * <pre>
+ * The ServerName hosting the meta region currently.
+ * </pre>
+ */
public Builder setServer(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
if (serverBuilder_ == null) {
@@ -463,6 +622,13 @@ public final class ZooKeeperProtos {
bitField0_ |= 0x00000001;
return this;
}
+ /**
+ * <code>required .ServerName server = 1;</code>
+ *
+ * <pre>
+ * The ServerName hosting the meta region currently.
+ * </pre>
+ */
public Builder mergeServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
if (serverBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
@@ -479,6 +645,13 @@ public final class ZooKeeperProtos {
bitField0_ |= 0x00000001;
return this;
}
+ /**
+ * <code>required .ServerName server = 1;</code>
+ *
+ * <pre>
+ * The ServerName hosting the meta region currently.
+ * </pre>
+ */
public Builder clearServer() {
if (serverBuilder_ == null) {
server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
@@ -489,11 +662,25 @@ public final class ZooKeeperProtos {
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
+ /**
+ * <code>required .ServerName server = 1;</code>
+ *
+ * <pre>
+ * The ServerName hosting the meta region currently.
+ * </pre>
+ */
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getServerFieldBuilder().getBuilder();
}
+ /**
+ * <code>required .ServerName server = 1;</code>
+ *
+ * <pre>
+ * The ServerName hosting the meta region currently.
+ * </pre>
+ */
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() {
if (serverBuilder_ != null) {
return serverBuilder_.getMessageOrBuilder();
@@ -501,6 +688,13 @@ public final class ZooKeeperProtos {
return server_;
}
}
+ /**
+ * <code>required .ServerName server = 1;</code>
+ *
+ * <pre>
+ * The ServerName hosting the meta region currently.
+ * </pre>
+ */
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
getServerFieldBuilder() {
@@ -514,103 +708,294 @@ public final class ZooKeeperProtos {
}
return serverBuilder_;
}
-
+
// optional uint32 rpc_version = 2;
private int rpcVersion_ ;
+ /**
+ * <code>optional uint32 rpc_version = 2;</code>
+ *
+ * <pre>
+ * The major version of the rpc the server speaks. This is used so that
+ * clients connecting to the cluster can have prior knowledge of what version
+ * to send to a RegionServer. AsyncHBase will use this to detect versions.
+ * </pre>
+ */
public boolean hasRpcVersion() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
+ /**
+ * <code>optional uint32 rpc_version = 2;</code>
+ *
+ * <pre>
+ * The major version of the rpc the server speaks. This is used so that
+ * clients connecting to the cluster can have prior knowledge of what version
+ * to send to a RegionServer. AsyncHBase will use this to detect versions.
+ * </pre>
+ */
public int getRpcVersion() {
return rpcVersion_;
}
+ /**
+ * <code>optional uint32 rpc_version = 2;</code>
+ *
+ * <pre>
+ * The major version of the rpc the server speaks. This is used so that
+ * clients connecting to the cluster can have prior knowledge of what version
+ * to send to a RegionServer. AsyncHBase will use this to detect versions.
+ * </pre>
+ */
public Builder setRpcVersion(int value) {
bitField0_ |= 0x00000002;
rpcVersion_ = value;
onChanged();
return this;
}
+ /**
+ * <code>optional uint32 rpc_version = 2;</code>
+ *
+ * <pre>
+ * The major version of the rpc the server speaks. This is used so that
+ * clients connecting to the cluster can have prior knowledge of what version
+ * to send to a RegionServer. AsyncHBase will use this to detect versions.
+ * </pre>
+ */
public Builder clearRpcVersion() {
bitField0_ = (bitField0_ & ~0x00000002);
rpcVersion_ = 0;
onChanged();
return this;
}
-
+
// @@protoc_insertion_point(builder_scope:MetaRegionServer)
}
-
+
static {
defaultInstance = new MetaRegionServer(true);
defaultInstance.initFields();
}
-
+
// @@protoc_insertion_point(class_scope:MetaRegionServer)
}
-
+
public interface MasterOrBuilder
extends com.google.protobuf.MessageOrBuilder {
-
+
// required .ServerName master = 1;
+ /**
+ * <code>required .ServerName master = 1;</code>
+ *
+ * <pre>
+ * The ServerName of the current Master
+ * </pre>
+ */
boolean hasMaster();
+ /**
+ * <code>required .ServerName master = 1;</code>
+ *
+ * <pre>
+ * The ServerName of the current Master
+ * </pre>
+ */
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getMaster();
+ /**
+ * <code>required .ServerName master = 1;</code>
+ *
+ * <pre>
+ * The ServerName of the current Master
+ * </pre>
+ */
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getMasterOrBuilder();
-
+
// optional uint32 rpc_version = 2;
+ /**
+ * <code>optional uint32 rpc_version = 2;</code>
+ *
+ * <pre>
+ * Major RPC version so that clients can know what version the master can accept.
+ * </pre>
+ */
boolean hasRpcVersion();
+ /**
+ * <code>optional uint32 rpc_version = 2;</code>
+ *
+ * <pre>
+ * Major RPC version so that clients can know what version the master can accept.
+ * </pre>
+ */
int getRpcVersion();
}
+ /**
+ * Protobuf type {@code Master}
+ *
+ * <pre>
+ **
+ * Content of the master znode.
+ * </pre>
+ */
public static final class Master extends
com.google.protobuf.GeneratedMessage
implements MasterOrBuilder {
// Use Master.newBuilder() to construct.
- private Master(Builder builder) {
+ private Master(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
+ this.unknownFields = builder.getUnknownFields();
}
- private Master(boolean noInit) {}
-
+ private Master(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
private static final Master defaultInstance;
public static Master getDefaultInstance() {
return defaultInstance;
}
-
+
public Master getDefaultInstanceForType() {
return defaultInstance;
}
-
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Master(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = master_.toBuilder();
+ }
+ master_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(master_);
+ master_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ rpcVersion_ = input.readUInt32();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Master_descriptor;
}
-
+
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Master_fieldAccessorTable;
+ return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Master_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<Master> PARSER =
+ new com.google.protobuf.AbstractParser<Master>() {
+ public Master parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new Master(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<Master> getParserForType() {
+ return PARSER;
}
-
+
private int bitField0_;
// required .ServerName master = 1;
public static final int MASTER_FIELD_NUMBER = 1;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName master_;
+ /**
+ * <code>required .ServerName master = 1;</code>
+ *
+ * <pre>
+ * The ServerName of the current Master
+ * </pre>
+ */
public boolean hasMaster() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
+ /**
+ * <code>required .ServerName master = 1;</code>
+ *
+ * <pre>
+ * The ServerName of the current Master
+ * </pre>
+ */
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getMaster() {
return master_;
}
+ /**
+ * <code>required .ServerName master = 1;</code>
+ *
+ * <pre>
+ * The ServerName of the current Master
+ * </pre>
+ */
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getMasterOrBuilder() {
return master_;
}
-
+
// optional uint32 rpc_version = 2;
public static final int RPC_VERSION_FIELD_NUMBER = 2;
private int rpcVersion_;
+ /**
+ * <code>optional uint32 rpc_version = 2;</code>
+ *
+ * <pre>
+ * Major RPC version so that clients can know what version the master can accept.
+ * </pre>
+ */
public boolean hasRpcVersion() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
+ /**
+ * <code>optional uint32 rpc_version = 2;</code>
+ *
+ * <pre>
+ * Major RPC version so that clients can know what version the master can accept.
+ * </pre>
+ */
public int getRpcVersion() {
return rpcVersion_;
}
-
+
private void initFields() {
master_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
rpcVersion_ = 0;
@@ -619,7 +1004,7 @@ public final class ZooKeeperProtos {
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
-
+
if (!hasMaster()) {
memoizedIsInitialized = 0;
return false;
@@ -631,7 +1016,7 @@ public final class ZooKeeperProtos {
memoizedIsInitialized = 1;
return true;
}
-
+
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
@@ -643,12 +1028,12 @@ public final class ZooKeeperProtos {
}
getUnknownFields().writeTo(output);
}
-
+
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
-
+
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
@@ -662,14 +1047,14 @@ public final class ZooKeeperProtos {
memoizedSerializedSize = size;
return size;
}
-
+
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
-
+
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
@@ -679,7 +1064,7 @@ public final class ZooKeeperProtos {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master) obj;
-
+
boolean result = true;
result = result && (hasMaster() == other.hasMaster());
if (hasMaster()) {
@@ -695,9 +1080,13 @@ public final class ZooKeeperProtos {
getUnknownFields().equals(other.getUnknownFields());
return result;
}
-
+
+ private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasMaster()) {
@@ -709,89 +1098,84 @@ public final class ZooKeeperProtos {
hash = (53 * hash) + getRpcVersion();
}
hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
return hash;
}
-
+
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data).buildParsed();
+ return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data).buildParsed();
+ return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom(java.io.InputStream input)
throws java.io.IOException {
- return newBuilder().mergeFrom(input).buildParsed();
+ return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- return newBuilder().mergeFrom(input, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
- Builder builder = newBuilder();
- if (builder.mergeDelimitedFrom(input)) {
- return builder.buildParsed();
- } else {
- return null;
- }
+ return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- Builder builder = newBuilder();
- if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
- return builder.buildParsed();
- } else {
- return null;
- }
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
- return newBuilder().mergeFrom(input).buildParsed();
+ return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- return newBuilder().mergeFrom(input, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(input, extensionRegistry);
}
-
+
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
-
+
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
+ /**
+ * Protobuf type {@code Master}
+ *
+ * <pre>
+ **
+ * Content of the master znode.
+ * </pre>
+ */
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MasterOrBuilder {
@@ -799,18 +1183,21 @@ public final class ZooKeeperProtos {
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Master_descriptor;
}
-
+
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Master_fieldAccessorTable;
+ return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Master_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.Builder.class);
}
-
+
// Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
-
- private Builder(BuilderParent parent) {
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
@@ -822,7 +1209,7 @@ public final class ZooKeeperProtos {
private static Builder create() {
return new Builder();
}
-
+
public Builder clear() {
super.clear();
if (masterBuilder_ == null) {
@@ -835,20 +1222,20 @@ public final class ZooKeeperProtos {
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
-
+
public Builder clone() {
return create().mergeFrom(buildPartial());
}
-
+
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
- return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.getDescriptor();
+ return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Master_descriptor;
}
-
+
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.getDefaultInstance();
}
-
+
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master build() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master result = buildPartial();
if (!result.isInitialized()) {
@@ -856,17 +1243,7 @@ public final class ZooKeeperProtos {
}
return result;
}
-
- private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master buildParsed()
- throws com.google.protobuf.InvalidProtocolBufferException {
- org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(
- result).asInvalidProtocolBufferException();
- }
- return result;
- }
-
+
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master(this);
int from_bitField0_ = bitField0_;
@@ -887,7 +1264,7 @@ public final class ZooKeeperProtos {
onBuilt();
return result;
}
-
+
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master)other);
@@ -896,7 +1273,7 @@ public final class ZooKeeperProtos {
return this;
}
}
-
+
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.getDefaultInstance()) return this;
if (other.hasMaster()) {
@@ -908,7 +1285,7 @@ public final class ZooKeeperProtos {
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
-
+
public final boolean isInitialized() {
if (!hasMaster()) {
@@ -920,57 +1297,47 @@ public final class ZooKeeperProtos {
}
return true;
}
-
+
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder(
- this.getUnknownFields());
- while (true) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- this.setUnknownFields(unknownFields.build());
- onChanged();
- return this;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- this.setUnknownFields(unknownFields.build());
- onChanged();
- return this;
- }
- break;
- }
- case 10: {
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder();
- if (hasMaster()) {
- subBuilder.mergeFrom(getMaster());
- }
- input.readMessage(subBuilder, extensionRegistry);
- setMaster(subBuilder.buildPartial());
- break;
- }
- case 16: {
- bitField0_ |= 0x00000002;
- rpcVersion_ = input.readUInt32();
- break;
- }
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
}
}
+ return this;
}
-
private int bitField0_;
-
+
// required .ServerName master = 1;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName master_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> masterBuilder_;
+ /**
+ * <code>required .ServerName master = 1;</code>
+ *
+ * <pre>
+ * The ServerName of the current Master
+ * </pre>
+ */
public boolean hasMaster() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
+ /**
+ * <code>required .ServerName master = 1;</code>
+ *
+ * <pre>
+ * The ServerName of the current Master
+ * </pre>
+ */
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getMaster() {
if (masterBuilder_ == null) {
return master_;
@@ -978,6 +1345,13 @@ public final class ZooKeeperProtos {
return masterBuilder_.getMessage();
}
}
+ /**
+ * <code>required .ServerName master = 1;</code>
+ *
+ * <pre>
+ * The ServerName of the current Master
+ * </pre>
+ */
public Builder setMaster(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
if (masterBuilder_ == null) {
if (value == null) {
@@ -991,6 +1365,13 @@ public final class ZooKeeperProtos {
bitField0_ |= 0x00000001;
return this;
}
+ /**
+ * <code>required .ServerName master = 1;</code>
+ *
+ * <pre>
+ * The ServerName of the current Master
+ * </pre>
+ */
public Builder setMaster(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
if (masterBuilder_ == null) {
@@ -1002,6 +1383,13 @@ public final class ZooKeeperProtos {
bitField0_ |= 0x00000001;
return this;
}
+ /**
+ * <code>required .ServerName master = 1;</code>
+ *
+ * <pre>
+ * The ServerName of the current Master
+ * </pre>
+ */
public Builder mergeMaster(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
if (masterBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
@@ -1018,6 +1406,13 @@ public final class ZooKeeperProtos {
bitField0_ |= 0x00000001;
return this;
}
+ /**
+ * <code>required .ServerName master = 1;</code>
+ *
+ * <pre>
+ * The ServerName of the current Master
+ * </pre>
+ */
public Builder clearMaster() {
if (masterBuilder_ == null) {
master_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
@@ -1028,11 +1423,25 @@ public final class ZooKeeperProtos {
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
+ /**
+ * <code>required .ServerName master = 1;</code>
+ *
+ * <pre>
+ * The ServerName of the current Master
+ * </pre>
+ */
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getMasterBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getMasterFieldBuilder().getBuilder();
}
+ /**
+ * <code>required .ServerName master = 1;</code>
+ *
+ * <pre>
+ * The ServerName of the current Master
+ * </pre>
+ */
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getMasterOrBuilder() {
if (masterBuilder_ != null) {
return masterBuilder_.getMessageOrBuilder();
@@ -1040,6 +1449,13 @@ public final class ZooKeeperProtos {
return master_;
}
}
+ /**
+ * <code>required .ServerName master = 1;</code>
+ *
+ * <pre>
+ * The ServerName of the current Master
+ * </pre>
+ */
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
getMasterFieldBuilder() {
@@ -1053,107 +1469,259 @@ public final class ZooKeeperProtos {
}
return masterBuilder_;
}
-
+
// optional uint32 rpc_version = 2;
private int rpcVersion_ ;
+ /**
+ * <code>optional uint32 rpc_version = 2;</code>
+ *
+ * <pre>
+ * Major RPC version so that clients can know what version the master can accept.
+ * </pre>
+ */
public boolean hasRpcVersion() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
+ /**
+ * <code>optional uint32 rpc_version = 2;</code>
+ *
+ * <pre>
+ * Major RPC version so that clients can know what version the master can accept.
+ * </pre>
+ */
public int getRpcVersion() {
return rpcVersion_;
}
+ /**
+ * <code>optional uint32 rpc_version = 2;</code>
+ *
+ * <pre>
+ * Major RPC version so that clients can know what version the master can accept.
+ * </pre>
+ */
public Builder setRpcVersion(int value) {
bitField0_ |= 0x00000002;
rpcVersion_ = value;
onChanged();
return this;
}
+ /**
+ * <code>optional uint32 rpc_version = 2;</code>
+ *
+ * <pre>
+ * Major RPC version so that clients can know what version the master can accept.
+ * </pre>
+ */
public Builder clearRpcVersion() {
bitField0_ = (bitField0_ & ~0x00000002);
rpcVersion_ = 0;
onChanged();
return this;
}
-
+
// @@protoc_insertion_point(builder_scope:Master)
}
-
+
static {
defaultInstance = new Master(true);
defaultInstance.initFields();
}
-
+
// @@protoc_insertion_point(class_scope:Master)
}
-
+
public interface ClusterUpOrBuilder
extends com.google.protobuf.MessageOrBuilder {
-
+
// required string start_date = 1;
+ /**
+ * <code>required string start_date = 1;</code>
+ *
+ * <pre>
+ * If this znode is present, cluster is up. Currently
+ * the data is cluster start_date.
+ * </pre>
+ */
boolean hasStartDate();
- String getStartDate();
+ /**
+ * <code>required string start_date = 1;</code>
+ *
+ * <pre>
+ * If this znode is present, cluster is up. Currently
+ * the data is cluster start_date.
+ * </pre>
+ */
+ java.lang.String getStartDate();
+ /**
+ * <code>required string start_date = 1;</code>
+ *
+ * <pre>
+ * If this znode is present, cluster is up. Currently
+ * the data is cluster start_date.
+ * </pre>
+ */
+ com.google.protobuf.ByteString
+ getStartDateBytes();
}
+ /**
+ * Protobuf type {@code ClusterUp}
+ *
+ * <pre>
+ **
+ * Content of the '/hbase/running', cluster state, znode.
+ * </pre>
+ */
public static final class ClusterUp extends
com.google.protobuf.GeneratedMessage
implements ClusterUpOrBuilder {
// Use ClusterUp.newBuilder() to construct.
- private ClusterUp(Builder builder) {
+ private ClusterUp(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
+ this.unknownFields = builder.getUnknownFields();
}
- private ClusterUp(boolean noInit) {}
-
+ private ClusterUp(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
private static final ClusterUp defaultInstance;
public static ClusterUp getDefaultInstance() {
return defaultInstance;
}
-
+
public ClusterUp getDefaultInstanceForType() {
return defaultInstance;
}
-
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private ClusterUp(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ startDate_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ClusterUp_descriptor;
}
-
+
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ClusterUp_fieldAccessorTable;
+ return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ClusterUp_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<ClusterUp> PARSER =
+ new com.google.protobuf.AbstractParser<ClusterUp>() {
+ public ClusterUp parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new ClusterUp(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<ClusterUp> getParserForType() {
+ return PARSER;
}
-
+
private int bitField0_;
// required string start_date = 1;
public static final int START_DATE_FIELD_NUMBER = 1;
private java.lang.Object startDate_;
+ /**
+ * <code>required string start_date = 1;</code>
+ *
+ * <pre>
+ * If this znode is present, cluster is up. Currently
+ * the data is cluster start_date.
+ * </pre>
+ */
public boolean hasStartDate() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
- public String getStartDate() {
+ /**
+ * <code>required string start_date = 1;</code>
+ *
+ * <pre>
+ * If this znode is present, cluster is up. Currently
+ * the data is cluster start_date.
+ * </pre>
+ */
+ public java.lang.String getStartDate() {
java.lang.Object ref = startDate_;
- if (ref instanceof String) {
- return (String) ref;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
- String s = bs.toStringUtf8();
- if (com.google.protobuf.Internal.isValidUtf8(bs)) {
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
startDate_ = s;
}
return s;
}
}
- private com.google.protobuf.ByteString getStartDateBytes() {
+ /**
+ * <code>required string start_date = 1;</code>
+ *
+ * <pre>
+ * If this znode is present, cluster is up. Currently
+ * the data is cluster start_date.
+ * </pre>
+ */
+ public com.google.protobuf.ByteString
+ getStartDateBytes() {
java.lang.Object ref = startDate_;
- if (ref instanceof String) {
+ if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8((String) ref);
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
startDate_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
-
+
private void initFields() {
startDate_ = "";
}
@@ -1161,7 +1729,7 @@ public final class ZooKeeperProtos {
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
-
+
if (!hasStartDate()) {
memoizedIsInitialized = 0;
return false;
@@ -1169,7 +1737,7 @@ public final class ZooKeeperProtos {
memoizedIsInitialized = 1;
return true;
}
-
+
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
@@ -1178,12 +1746,12 @@ public final class ZooKeeperProtos {
}
getUnknownFields().writeTo(output);
}
-
+
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
-
+
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
@@ -1193,14 +1761,14 @@ public final class ZooKeeperProtos {
memoizedSerializedSize = size;
return size;
}
-
+
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
-
+
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
@@ -1210,7 +1778,7 @@ public final class ZooKeeperProtos {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp) obj;
-
+
boolean result = true;
result = result && (hasStartDate() == other.hasStartDate());
if (hasStartDate()) {
@@ -1221,9 +1789,13 @@ public final class ZooKeeperProtos {
getUnknownFields().equals(other.getUnknownFields());
return result;
}
-
+
+ private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasStartDate()) {
@@ -1231,89 +1803,84 @@ public final class ZooKeeperProtos {
hash = (53 * hash) + getStartDate().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
return hash;
}
-
+
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data).buildParsed();
+ return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data).buildParsed();
+ return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom(java.io.InputStream input)
throws java.io.IOException {
- return newBuilder().mergeFrom(input).buildParsed();
+ return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- return newBuilder().mergeFrom(input, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
- Builder builder = newBuilder();
- if (builder.mergeDelimitedFrom(input)) {
- return builder.buildParsed();
- } else {
- return null;
- }
+ return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- Builder builder = newBuilder();
- if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
- return builder.buildParsed();
- } else {
- return null;
- }
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
- return newBuilder().mergeFrom(input).buildParsed();
+ return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- return newBuilder().mergeFrom(input, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(input, extensionRegistry);
}
-
+
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
-
+
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
+ /**
+ * Protobuf type {@code ClusterUp}
+ *
+ * <pre>
+ **
+ * Content of the '/hbase/running', cluster state, znode.
+ * </pre>
+ */
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUpOrBuilder {
@@ -1321,18 +1888,21 @@ public final class ZooKeeperProtos {
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ClusterUp_descriptor;
}
-
+
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ClusterUp_fieldAccessorTable;
+ return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ClusterUp_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.Builder.class);
}
-
+
// Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
-
- private Builder(BuilderParent parent) {
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
@@ -1343,27 +1913,27 @@ public final class ZooKeeperProtos {
private static Builder create() {
return new Builder();
}
-
+
public Builder clear() {
super.clear();
startDate_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
-
+
public Builder clone() {
return create().mergeFrom(buildPartial());
}
-
+
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
- return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.getDescriptor();
+ return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ClusterUp_descriptor;
}
-
+
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.getDefaultInstance();
}
-
+
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp build() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp result = buildPartial();
if (!result.isInitialized()) {
@@ -1371,17 +1941,7 @@ public final class ZooKeeperProtos {
}
return result;
}
-
- private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp buildParsed()
- throws com.google.protobuf.InvalidProtocolBufferException {
- org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(
- result).asInvalidProtocolBufferException();
- }
- return result;
- }
-
+
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp(this);
int from_bitField0_ = bitField0_;
@@ -1394,7 +1954,7 @@ public final class ZooKeeperProtos {
onBuilt();
return result;
}
-
+
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp)other);
@@ -1403,16 +1963,18 @@ public final class ZooKeeperProtos {
return this;
}
}
-
+
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.getDefaultInstance()) return this;
if (other.hasStartDate()) {
- setStartDate(other.getStartDate());
+ bitField0_ |= 0x00000001;
+ startDate_ = other.startDate_;
+ onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
-
+
public final boolean isInitialized() {
if (!hasStartDate()) {
@@ -1420,57 +1982,89 @@ public final class ZooKeeperProtos {
}
return true;
}
-
+
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder(
- this.getUnknownFields());
- while (true) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- this.setUnknownFields(unknownFields.build());
- onChanged();
- return this;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- this.setUnknownFields(unknownFields.build());
- onChanged();
- return this;
- }
- break;
- }
- case 10: {
- bitField0_ |= 0x00000001;
- startDate_ = input.readBytes();
- break;
- }
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
}
}
+ return this;
}
-
private int bitField0_;
-
+
// required string start_date = 1;
private java.lang.Object startDate_ = "";
+ /**
+ * <code>required string start_date = 1;</code>
+ *
+ * <pre>
+ * If this znode is present, cluster is up. Currently
+ * the data is cluster start_date.
+ * </pre>
+ */
public boolean hasStartDate() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
- public String getStartDate() {
+ /**
+ * <code>required string start_date = 1;</code>
+ *
+ * <pre>
+ * If this znode is present, cluster is up. Currently
+ * the data is cluster start_date.
+ * </pre>
+ */
+ public java.lang.String getStartDate() {
java.lang.Object ref = startDate_;
- if (!(ref instanceof String)) {
- String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
startDate_ = s;
return s;
} else {
- return (String) ref;
+ return (java.lang.String) ref;
}
}
- public Builder setStartDate(String value) {
+ /**
+ * <code>required string start_date = 1;</code>
+ *
+ * <pre>
+ * If this znode is present, cluster is up. Currently
+ * the data is cluster start_date.
+ * </pre>
+ */
+ public com.google.protobuf.ByteString
+ getStartDateBytes() {
+ java.lang.Object ref = startDate_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ startDate_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>required string start_date = 1;</code>
+ *
+ * <pre>
+ * If this znode is present, cluster is up. Currently
+ * the data is cluster start_date.
+ * </pre>
+ */
+ public Builder setStartDate(
+ java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
@@ -1479,135 +2073,379 @@ public final class ZooKeeperProtos {
onChanged();
return this;
}
+ /**
+ * <code>required string start_date = 1;</code>
+ *
+ * <pre>
+ * If this znode is present, cluster is up. Currently
+ * the data is cluster start_date.
+ * </pre>
+ */
public Builder clearStartDate() {
bitField0_ = (bitField0_ & ~0x00000001);
startDate_ = getDefaultInstance().getStartDate();
onChanged();
return this;
}
- void setStartDate(com.google.protobuf.ByteString value) {
- bitField0_ |= 0x00000001;
+ /**
+ * <code>required string start_date = 1;</code>
+ *
+ * <pre>
+ * If this znode is present, cluster is up. Currently
+ * the data is cluster start_date.
+ * </pre>
+ */
+ public Builder setStartDateBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
startDate_ = value;
onChanged();
+ return this;
}
-
+
// @@protoc_insertion_point(builder_scope:ClusterUp)
}
-
+
static {
defaultInstance = new ClusterUp(true);
defaultInstance.initFields();
}
-
+
// @@protoc_insertion_point(class_scope:ClusterUp)
}
-
+
public interface RegionTransitionOrBuilder
extends com.google.protobuf.MessageOrBuilder {
-
+
// required uint32 event_type_code = 1;
+ /**
+ * <code>required uint32 event_type_code = 1;</code>
+ *
+ * <pre>
+ * Code for EventType gotten by doing o.a.h.h.EventHandler.EventType.getCode()
+ * </pre>
+ */
boolean hasEventTypeCode();
+ /**
+ * <code>required uint32 event_type_code = 1;</code>
+ *
+ * <pre>
+ * Code for EventType gotten by doing o.a.h.h.EventHandler.EventType.getCode()
+ * </pre>
+ */
int getEventTypeCode();
-
+
// required bytes region_name = 2;
+ /**
+ * <code>required bytes region_name = 2;</code>
+ *
+ * <pre>
+ * Full regionname in bytes
+ * </pre>
+ */
boolean hasRegionName();
+ /**
+ * <code>required bytes region_name = 2;</code>
+ *
+ * <pre>
+ * Full regionname in bytes
+ * </pre>
+ */
com.google.protobuf.ByteString getRegionName();
-
+
// required uint64 create_time = 3;
+ /**
+ * <code>required uint64 create_time = 3;</code>
+ */
boolean hasCreateTime();
+ /**
+ * <code>required uint64 create_time = 3;</code>
+ */
long getCreateTime();
-
+
// required .ServerName server_name = 4;
+ /**
+ * <code>required .ServerName server_name = 4;</code>
+ *
+ * <pre>
+ * The region server where the transition will happen or is happening
+ * </pre>
+ */
boolean hasServerName();
+ /**
+ * <code>required .ServerName server_name = 4;</code>
+ *
+ * <pre>
+ * The region server where the transition will happen or is happening
+ * </pre>
+ */
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName();
+ /**
+ * <code>required .ServerName server_name = 4;</code>
+ *
+ * <pre>
+ * The region server where the transition will happen or is happening
+ * </pre>
+ */
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder();
-
+
// optional bytes payload = 5;
+ /**
+ * <code>optional bytes payload = 5;</code>
+ */
boolean hasPayload();
+ /**
+ * <code>optional bytes payload = 5;</code>
+ */
com.google.protobuf.ByteString getPayload();
}
+ /**
+ * Protobuf type {@code RegionTransition}
+ *
+ * <pre>
+ **
+ * What we write under unassigned up in zookeeper as a region moves through
+ * open/close, etc., regions. Details a region in transition.
+ * </pre>
+ */
public static final class RegionTransition extends
com.google.protobuf.GeneratedMessage
implements RegionTransitionOrBuilder {
// Use RegionTransition.newBuilder() to construct.
- private RegionTransition(Builder builder) {
+ private RegionTransition(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
+ this.unknownFields = builder.getUnknownFields();
}
- private RegionTransition(boolean noInit) {}
-
+ private RegionTransition(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
private static final RegionTransition defaultInstance;
public static RegionTransition getDefaultInstance() {
return defaultInstance;
}
-
+
public RegionTransition getDefaultInstanceForType() {
return defaultInstance;
}
-
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private RegionTransition(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ eventTypeCode_ = input.readUInt32();
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ regionName_ = input.readBytes();
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000004;
+ createTime_ = input.readUInt64();
+ break;
+ }
+ case 34: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ subBuilder = serverName_.toBuilder();
+ }
+ serverName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(serverName_);
+ serverName_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000008;
+ break;
+ }
+ case 42: {
+ bitField0_ |= 0x00000010;
+ payload_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionTransition_descriptor;
}
-
+
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionTransition_fieldAccessorTable;
[... 7528 lines stripped ...]