You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2011/06/11 02:36:13 UTC
svn commit: r1134492 [2/4] - in /hadoop/hdfs/trunk: ./
src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/
src/java/org/apache/hadoop/hdfs/protocol/proto/
src/java/org/apache/hadoop/hdfs/server/balancer/ src/java/org/apache/hadoo...
Added: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/proto/DataTransferProtos.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/proto/DataTransferProtos.java?rev=1134492&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/proto/DataTransferProtos.java (added)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/proto/DataTransferProtos.java Sat Jun 11 00:36:12 2011
@@ -0,0 +1,9122 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: datatransfer.proto
+
+package org.apache.hadoop.hdfs.protocol.proto;
+
+public final class DataTransferProtos {
+ private DataTransferProtos() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public enum Status
+ implements com.google.protobuf.ProtocolMessageEnum {
+ SUCCESS(0, 0),
+ ERROR(1, 1),
+ ERROR_CHECKSUM(2, 2),
+ ERROR_INVALID(3, 3),
+ ERROR_EXISTS(4, 4),
+ ERROR_ACCESS_TOKEN(5, 5),
+ CHECKSUM_OK(6, 6),
+ ;
+
+ public static final int SUCCESS_VALUE = 0;
+ public static final int ERROR_VALUE = 1;
+ public static final int ERROR_CHECKSUM_VALUE = 2;
+ public static final int ERROR_INVALID_VALUE = 3;
+ public static final int ERROR_EXISTS_VALUE = 4;
+ public static final int ERROR_ACCESS_TOKEN_VALUE = 5;
+ public static final int CHECKSUM_OK_VALUE = 6;
+
+
+ public final int getNumber() { return value; }
+
+ public static Status valueOf(int value) {
+ switch (value) {
+ case 0: return SUCCESS;
+ case 1: return ERROR;
+ case 2: return ERROR_CHECKSUM;
+ case 3: return ERROR_INVALID;
+ case 4: return ERROR_EXISTS;
+ case 5: return ERROR_ACCESS_TOKEN;
+ case 6: return CHECKSUM_OK;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap<Status>
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap<Status>
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap<Status>() {
+ public Status findValueByNumber(int number) {
+ return Status.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.getDescriptor().getEnumTypes().get(0);
+ }
+
+ private static final Status[] VALUES = {
+ SUCCESS, ERROR, ERROR_CHECKSUM, ERROR_INVALID, ERROR_EXISTS, ERROR_ACCESS_TOKEN, CHECKSUM_OK,
+ };
+
+ public static Status valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private Status(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:Status)
+ }
+
+ public interface BaseHeaderProtoOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .ExtendedBlockProto block = 1;
+ boolean hasBlock();
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock();
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder();
+
+ // optional .BlockTokenIdentifierProto token = 2;
+ boolean hasToken();
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getToken();
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getTokenOrBuilder();
+ }
+ public static final class BaseHeaderProto extends
+ com.google.protobuf.GeneratedMessage
+ implements BaseHeaderProtoOrBuilder {
+ // Use BaseHeaderProto.newBuilder() to construct.
+ private BaseHeaderProto(Builder builder) {
+ super(builder);
+ }
+ private BaseHeaderProto(boolean noInit) {}
+
+ private static final BaseHeaderProto defaultInstance;
+ public static BaseHeaderProto getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public BaseHeaderProto getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_BaseHeaderProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_BaseHeaderProto_fieldAccessorTable;
+ }
+
+ private int bitField0_;
+ // required .ExtendedBlockProto block = 1;
+ public static final int BLOCK_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_;
+ public boolean hasBlock() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
+ return block_;
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
+ return block_;
+ }
+
+ // optional .BlockTokenIdentifierProto token = 2;
+ public static final int TOKEN_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto token_;
+ public boolean hasToken() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getToken() {
+ return token_;
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getTokenOrBuilder() {
+ return token_;
+ }
+
+ private void initFields() {
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
+ token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasBlock()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getBlock().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (hasToken()) {
+ if (!getToken().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, block_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, token_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, block_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, token_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto) obj;
+
+ boolean result = true;
+ result = result && (hasBlock() == other.hasBlock());
+ if (hasBlock()) {
+ result = result && getBlock()
+ .equals(other.getBlock());
+ }
+ result = result && (hasToken() == other.hasToken());
+ if (hasToken()) {
+ result = result && getToken()
+ .equals(other.getToken());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasBlock()) {
+ hash = (37 * hash) + BLOCK_FIELD_NUMBER;
+ hash = (53 * hash) + getBlock().hashCode();
+ }
+ if (hasToken()) {
+ hash = (37 * hash) + TOKEN_FIELD_NUMBER;
+ hash = (53 * hash) + getToken().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_BaseHeaderProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_BaseHeaderProto_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getBlockFieldBuilder();
+ getTokenFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (blockBuilder_ == null) {
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
+ } else {
+ blockBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (tokenBuilder_ == null) {
+ token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance();
+ } else {
+ tokenBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDescriptor();
+ }
+
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getDefaultInstanceForType() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto build() {
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto buildPartial() {
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (blockBuilder_ == null) {
+ result.block_ = block_;
+ } else {
+ result.block_ = blockBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (tokenBuilder_ == null) {
+ result.token_ = token_;
+ } else {
+ result.token_ = tokenBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto) {
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto other) {
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) return this;
+ if (other.hasBlock()) {
+ mergeBlock(other.getBlock());
+ }
+ if (other.hasToken()) {
+ mergeToken(other.getToken());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasBlock()) {
+
+ return false;
+ }
+ if (!getBlock().isInitialized()) {
+
+ return false;
+ }
+ if (hasToken()) {
+ if (!getToken().isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder();
+ if (hasBlock()) {
+ subBuilder.mergeFrom(getBlock());
+ }
+ input.readMessage(subBuilder, extensionRegistry);
+ setBlock(subBuilder.buildPartial());
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.newBuilder();
+ if (hasToken()) {
+ subBuilder.mergeFrom(getToken());
+ }
+ input.readMessage(subBuilder, extensionRegistry);
+ setToken(subBuilder.buildPartial());
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // required .ExtendedBlockProto block = 1;
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_;
+ public boolean hasBlock() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
+ if (blockBuilder_ == null) {
+ return block_;
+ } else {
+ return blockBuilder_.getMessage();
+ }
+ }
+ public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
+ if (blockBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ block_ = value;
+ onChanged();
+ } else {
+ blockBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ public Builder setBlock(
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) {
+ if (blockBuilder_ == null) {
+ block_ = builderForValue.build();
+ onChanged();
+ } else {
+ blockBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
+ if (blockBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) {
+ block_ =
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial();
+ } else {
+ block_ = value;
+ }
+ onChanged();
+ } else {
+ blockBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ public Builder clearBlock() {
+ if (blockBuilder_ == null) {
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
+ onChanged();
+ } else {
+ blockBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getBlockFieldBuilder().getBuilder();
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
+ if (blockBuilder_ != null) {
+ return blockBuilder_.getMessageOrBuilder();
+ } else {
+ return block_;
+ }
+ }
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>
+ getBlockFieldBuilder() {
+ if (blockBuilder_ == null) {
+ blockBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>(
+ block_,
+ getParentForChildren(),
+ isClean());
+ block_ = null;
+ }
+ return blockBuilder_;
+ }
+
+ // optional .BlockTokenIdentifierProto token = 2;
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder> tokenBuilder_;
+ public boolean hasToken() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getToken() {
+ if (tokenBuilder_ == null) {
+ return token_;
+ } else {
+ return tokenBuilder_.getMessage();
+ }
+ }
+ public Builder setToken(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto value) {
+ if (tokenBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ token_ = value;
+ onChanged();
+ } else {
+ tokenBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ public Builder setToken(
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder builderForValue) {
+ if (tokenBuilder_ == null) {
+ token_ = builderForValue.build();
+ onChanged();
+ } else {
+ tokenBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ public Builder mergeToken(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto value) {
+ if (tokenBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ token_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance()) {
+ token_ =
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.newBuilder(token_).mergeFrom(value).buildPartial();
+ } else {
+ token_ = value;
+ }
+ onChanged();
+ } else {
+ tokenBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ public Builder clearToken() {
+ if (tokenBuilder_ == null) {
+ token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance();
+ onChanged();
+ } else {
+ tokenBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder getTokenBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getTokenFieldBuilder().getBuilder();
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getTokenOrBuilder() {
+ if (tokenBuilder_ != null) {
+ return tokenBuilder_.getMessageOrBuilder();
+ } else {
+ return token_;
+ }
+ }
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder>
+ getTokenFieldBuilder() {
+ if (tokenBuilder_ == null) {
+ tokenBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder>(
+ token_,
+ getParentForChildren(),
+ isClean());
+ token_ = null;
+ }
+ return tokenBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:BaseHeaderProto)
+ }
+
+ static {
+ defaultInstance = new BaseHeaderProto(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:BaseHeaderProto)
+ }
+
+ public interface ClientOperationHeaderProtoOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .BaseHeaderProto baseHeader = 1;
+ boolean hasBaseHeader();
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getBaseHeader();
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getBaseHeaderOrBuilder();
+
+ // required string clientName = 2;
+ boolean hasClientName();
+ String getClientName();
+ }
+ public static final class ClientOperationHeaderProto extends
+ com.google.protobuf.GeneratedMessage
+ implements ClientOperationHeaderProtoOrBuilder {
+ // Use ClientOperationHeaderProto.newBuilder() to construct.
+ private ClientOperationHeaderProto(Builder builder) {
+ super(builder);
+ }
+ private ClientOperationHeaderProto(boolean noInit) {}
+
+ private static final ClientOperationHeaderProto defaultInstance;
+ public static ClientOperationHeaderProto getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public ClientOperationHeaderProto getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ClientOperationHeaderProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ClientOperationHeaderProto_fieldAccessorTable;
+ }
+
+ private int bitField0_;
+ // required .BaseHeaderProto baseHeader = 1;
+ public static final int BASEHEADER_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto baseHeader_;
+ public boolean hasBaseHeader() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getBaseHeader() {
+ return baseHeader_;
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getBaseHeaderOrBuilder() {
+ return baseHeader_;
+ }
+
+ // required string clientName = 2;
+ public static final int CLIENTNAME_FIELD_NUMBER = 2;
+ private java.lang.Object clientName_;
+ public boolean hasClientName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public String getClientName() {
+ java.lang.Object ref = clientName_;
+ if (ref instanceof String) {
+ return (String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ String s = bs.toStringUtf8();
+ if (com.google.protobuf.Internal.isValidUtf8(bs)) {
+ clientName_ = s;
+ }
+ return s;
+ }
+ }
+ private com.google.protobuf.ByteString getClientNameBytes() {
+ java.lang.Object ref = clientName_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((String) ref);
+ clientName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ private void initFields() {
+ baseHeader_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
+ clientName_ = "";
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasBaseHeader()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasClientName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getBaseHeader().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, baseHeader_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, getClientNameBytes());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, baseHeader_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getClientNameBytes());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto) obj;
+
+ boolean result = true;
+ result = result && (hasBaseHeader() == other.hasBaseHeader());
+ if (hasBaseHeader()) {
+ result = result && getBaseHeader()
+ .equals(other.getBaseHeader());
+ }
+ result = result && (hasClientName() == other.hasClientName());
+ if (hasClientName()) {
+ result = result && getClientName()
+ .equals(other.getClientName());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasBaseHeader()) {
+ hash = (37 * hash) + BASEHEADER_FIELD_NUMBER;
+ hash = (53 * hash) + getBaseHeader().hashCode();
+ }
+ if (hasClientName()) {
+ hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER;
+ hash = (53 * hash) + getClientName().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ClientOperationHeaderProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ClientOperationHeaderProto_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getBaseHeaderFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (baseHeaderBuilder_ == null) {
+ baseHeader_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
+ } else {
+ baseHeaderBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ clientName_ = "";
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDescriptor();
+ }
+
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getDefaultInstanceForType() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto build() {
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto buildPartial() {
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (baseHeaderBuilder_ == null) {
+ result.baseHeader_ = baseHeader_;
+ } else {
+ result.baseHeader_ = baseHeaderBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.clientName_ = clientName_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto) {
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto other) {
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance()) return this;
+ if (other.hasBaseHeader()) {
+ mergeBaseHeader(other.getBaseHeader());
+ }
+ if (other.hasClientName()) {
+ setClientName(other.getClientName());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasBaseHeader()) {
+
+ return false;
+ }
+ if (!hasClientName()) {
+
+ return false;
+ }
+ if (!getBaseHeader().isInitialized()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder();
+ if (hasBaseHeader()) {
+ subBuilder.mergeFrom(getBaseHeader());
+ }
+ input.readMessage(subBuilder, extensionRegistry);
+ setBaseHeader(subBuilder.buildPartial());
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ clientName_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // required .BaseHeaderProto baseHeader = 1;
+ private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto baseHeader_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> baseHeaderBuilder_;
+ public boolean hasBaseHeader() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getBaseHeader() {
+ if (baseHeaderBuilder_ == null) {
+ return baseHeader_;
+ } else {
+ return baseHeaderBuilder_.getMessage();
+ }
+ }
+ public Builder setBaseHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
+ if (baseHeaderBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ baseHeader_ = value;
+ onChanged();
+ } else {
+ baseHeaderBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ public Builder setBaseHeader(
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder builderForValue) {
+ if (baseHeaderBuilder_ == null) {
+ baseHeader_ = builderForValue.build();
+ onChanged();
+ } else {
+ baseHeaderBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ public Builder mergeBaseHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
+ if (baseHeaderBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ baseHeader_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
+ baseHeader_ =
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(baseHeader_).mergeFrom(value).buildPartial();
+ } else {
+ baseHeader_ = value;
+ }
+ onChanged();
+ } else {
+ baseHeaderBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ public Builder clearBaseHeader() {
+ if (baseHeaderBuilder_ == null) {
+ baseHeader_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
+ onChanged();
+ } else {
+ baseHeaderBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder getBaseHeaderBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getBaseHeaderFieldBuilder().getBuilder();
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getBaseHeaderOrBuilder() {
+ if (baseHeaderBuilder_ != null) {
+ return baseHeaderBuilder_.getMessageOrBuilder();
+ } else {
+ return baseHeader_;
+ }
+ }
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>
+ getBaseHeaderFieldBuilder() {
+ if (baseHeaderBuilder_ == null) {
+ baseHeaderBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>(
+ baseHeader_,
+ getParentForChildren(),
+ isClean());
+ baseHeader_ = null;
+ }
+ return baseHeaderBuilder_;
+ }
+
+ // required string clientName = 2;
+ private java.lang.Object clientName_ = "";
+ public boolean hasClientName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public String getClientName() {
+ java.lang.Object ref = clientName_;
+ if (!(ref instanceof String)) {
+ String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
+ clientName_ = s;
+ return s;
+ } else {
+ return (String) ref;
+ }
+ }
+ public Builder setClientName(String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ clientName_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearClientName() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ clientName_ = getDefaultInstance().getClientName();
+ onChanged();
+ return this;
+ }
+ void setClientName(com.google.protobuf.ByteString value) {
+ bitField0_ |= 0x00000002;
+ clientName_ = value;
+ onChanged();
+ }
+
+ // @@protoc_insertion_point(builder_scope:ClientOperationHeaderProto)
+ }
+
+ static {
+ defaultInstance = new ClientOperationHeaderProto(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:ClientOperationHeaderProto)
+ }
+
+ public interface OpReadBlockProtoOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .ClientOperationHeaderProto header = 1;
+ boolean hasHeader();
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader();
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder();
+
+ // required uint64 offset = 2;
+ boolean hasOffset();
+ long getOffset();
+
+ // required uint64 len = 3;
+ boolean hasLen();
+ long getLen();
+ }
+ public static final class OpReadBlockProto extends
+ com.google.protobuf.GeneratedMessage
+ implements OpReadBlockProtoOrBuilder {
+ // Use OpReadBlockProto.newBuilder() to construct.
+ private OpReadBlockProto(Builder builder) {
+ super(builder);
+ }
+ private OpReadBlockProto(boolean noInit) {}
+
+ private static final OpReadBlockProto defaultInstance;
+ public static OpReadBlockProto getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public OpReadBlockProto getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpReadBlockProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpReadBlockProto_fieldAccessorTable;
+ }
+
+ private int bitField0_;
+ // required .ClientOperationHeaderProto header = 1;
+ public static final int HEADER_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_;
+ public boolean hasHeader() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
+ return header_;
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
+ return header_;
+ }
+
+ // required uint64 offset = 2;
+ public static final int OFFSET_FIELD_NUMBER = 2;
+ private long offset_;
+ public boolean hasOffset() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public long getOffset() {
+ return offset_;
+ }
+
+ // required uint64 len = 3;
+ public static final int LEN_FIELD_NUMBER = 3;
+ private long len_;
+ public boolean hasLen() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ public long getLen() {
+ return len_;
+ }
+
+ private void initFields() {
+ header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
+ offset_ = 0L;
+ len_ = 0L;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasHeader()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasOffset()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasLen()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getHeader().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, header_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeUInt64(2, offset_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeUInt64(3, len_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, header_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(2, offset_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(3, len_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto) obj;
+
+ boolean result = true;
+ result = result && (hasHeader() == other.hasHeader());
+ if (hasHeader()) {
+ result = result && getHeader()
+ .equals(other.getHeader());
+ }
+ result = result && (hasOffset() == other.hasOffset());
+ if (hasOffset()) {
+ result = result && (getOffset()
+ == other.getOffset());
+ }
+ result = result && (hasLen() == other.hasLen());
+ if (hasLen()) {
+ result = result && (getLen()
+ == other.getLen());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasHeader()) {
+ hash = (37 * hash) + HEADER_FIELD_NUMBER;
+ hash = (53 * hash) + getHeader().hashCode();
+ }
+ if (hasOffset()) {
+ hash = (37 * hash) + OFFSET_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getOffset());
+ }
+ if (hasLen()) {
+ hash = (37 * hash) + LEN_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getLen());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProtoOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpReadBlockProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpReadBlockProto_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getHeaderFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (headerBuilder_ == null) {
+ header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
+ } else {
+ headerBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ offset_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ len_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.getDescriptor();
+ }
+
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto getDefaultInstanceForType() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto build() {
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto buildPartial() {
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (headerBuilder_ == null) {
+ result.header_ = header_;
+ } else {
+ result.header_ = headerBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.offset_ = offset_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.len_ = len_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto) {
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto other) {
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.getDefaultInstance()) return this;
+ if (other.hasHeader()) {
+ mergeHeader(other.getHeader());
+ }
+ if (other.hasOffset()) {
+ setOffset(other.getOffset());
+ }
+ if (other.hasLen()) {
+ setLen(other.getLen());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasHeader()) {
+
+ return false;
+ }
+ if (!hasOffset()) {
+
+ return false;
+ }
+ if (!hasLen()) {
+
+ return false;
+ }
+ if (!getHeader().isInitialized()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder();
+ if (hasHeader()) {
+ subBuilder.mergeFrom(getHeader());
+ }
+ input.readMessage(subBuilder, extensionRegistry);
+ setHeader(subBuilder.buildPartial());
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ offset_ = input.readUInt64();
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000004;
+ len_ = input.readUInt64();
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // required .ClientOperationHeaderProto header = 1;
+ private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder> headerBuilder_;
+ public boolean hasHeader() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
+ if (headerBuilder_ == null) {
+ return header_;
+ } else {
+ return headerBuilder_.getMessage();
+ }
+ }
+ public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
+ if (headerBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ header_ = value;
+ onChanged();
+ } else {
+ headerBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ public Builder setHeader(
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder builderForValue) {
+ if (headerBuilder_ == null) {
+ header_ = builderForValue.build();
+ onChanged();
+ } else {
+ headerBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
+ if (headerBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance()) {
+ header_ =
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
+ } else {
+ header_ = value;
+ }
+ onChanged();
+ } else {
+ headerBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ public Builder clearHeader() {
+ if (headerBuilder_ == null) {
+ header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
+ onChanged();
+ } else {
+ headerBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder getHeaderBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getHeaderFieldBuilder().getBuilder();
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
+ if (headerBuilder_ != null) {
+ return headerBuilder_.getMessageOrBuilder();
+ } else {
+ return header_;
+ }
+ }
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>
+ getHeaderFieldBuilder() {
+ if (headerBuilder_ == null) {
+ headerBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>(
+ header_,
+ getParentForChildren(),
+ isClean());
+ header_ = null;
+ }
+ return headerBuilder_;
+ }
+
+ // required uint64 offset = 2;
+ private long offset_ ;
+ public boolean hasOffset() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public long getOffset() {
+ return offset_;
+ }
+ public Builder setOffset(long value) {
+ bitField0_ |= 0x00000002;
+ offset_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearOffset() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ offset_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // required uint64 len = 3;
+ private long len_ ;
+ public boolean hasLen() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ public long getLen() {
+ return len_;
+ }
+ public Builder setLen(long value) {
+ bitField0_ |= 0x00000004;
+ len_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearLen() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ len_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:OpReadBlockProto)
+ }
+
+ static {
+ defaultInstance = new OpReadBlockProto(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:OpReadBlockProto)
+ }
+
+ public interface OpWriteBlockProtoOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .ClientOperationHeaderProto header = 1;
+ boolean hasHeader();
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader();
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder();
+
+ // repeated .DatanodeInfoProto targets = 2;
+ java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto>
+ getTargetsList();
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index);
+ int getTargetsCount();
+ java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
+ getTargetsOrBuilderList();
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
+ int index);
+
+ // optional .DatanodeInfoProto source = 3;
+ boolean hasSource();
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource();
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder();
+
+ // required .OpWriteBlockProto.BlockConstructionStage stage = 4;
+ boolean hasStage();
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage getStage();
+
+ // required uint32 pipelineSize = 5;
+ boolean hasPipelineSize();
+ int getPipelineSize();
+
+ // required uint64 minBytesRcvd = 6;
+ boolean hasMinBytesRcvd();
+ long getMinBytesRcvd();
+
+ // required uint64 maxBytesRcvd = 7;
+ boolean hasMaxBytesRcvd();
+ long getMaxBytesRcvd();
+
+ // required uint64 latestGenerationStamp = 8;
+ boolean hasLatestGenerationStamp();
+ long getLatestGenerationStamp();
+ }
+ public static final class OpWriteBlockProto extends
+ com.google.protobuf.GeneratedMessage
+ implements OpWriteBlockProtoOrBuilder {
+ // Use OpWriteBlockProto.newBuilder() to construct.
+ private OpWriteBlockProto(Builder builder) {
+ super(builder);
+ }
+ private OpWriteBlockProto(boolean noInit) {}
+
+ private static final OpWriteBlockProto defaultInstance;
+ public static OpWriteBlockProto getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public OpWriteBlockProto getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpWriteBlockProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpWriteBlockProto_fieldAccessorTable;
+ }
+
+ public enum BlockConstructionStage
+ implements com.google.protobuf.ProtocolMessageEnum {
+ PIPELINE_SETUP_APPEND(0, 0),
+ PIPELINE_SETUP_APPEND_RECOVERY(1, 1),
+ DATA_STREAMING(2, 2),
+ PIPELINE_SETUP_STREAMING_RECOVERY(3, 3),
+ PIPELINE_CLOSE(4, 4),
+ PIPELINE_CLOSE_RECOVERY(5, 5),
+ PIPELINE_SETUP_CREATE(6, 6),
+ TRANSFER_RBW(7, 7),
+ TRANSFER_FINALIZED(8, 8),
+ ;
+
+ public static final int PIPELINE_SETUP_APPEND_VALUE = 0;
+ public static final int PIPELINE_SETUP_APPEND_RECOVERY_VALUE = 1;
+ public static final int DATA_STREAMING_VALUE = 2;
+ public static final int PIPELINE_SETUP_STREAMING_RECOVERY_VALUE = 3;
+ public static final int PIPELINE_CLOSE_VALUE = 4;
+ public static final int PIPELINE_CLOSE_RECOVERY_VALUE = 5;
+ public static final int PIPELINE_SETUP_CREATE_VALUE = 6;
+ public static final int TRANSFER_RBW_VALUE = 7;
+ public static final int TRANSFER_FINALIZED_VALUE = 8;
+
+
+ public final int getNumber() { return value; }
+
+ public static BlockConstructionStage valueOf(int value) {
+ switch (value) {
+ case 0: return PIPELINE_SETUP_APPEND;
+ case 1: return PIPELINE_SETUP_APPEND_RECOVERY;
+ case 2: return DATA_STREAMING;
+ case 3: return PIPELINE_SETUP_STREAMING_RECOVERY;
+ case 4: return PIPELINE_CLOSE;
+ case 5: return PIPELINE_CLOSE_RECOVERY;
+ case 6: return PIPELINE_SETUP_CREATE;
+ case 7: return TRANSFER_RBW;
+ case 8: return TRANSFER_FINALIZED;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap<BlockConstructionStage>
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap<BlockConstructionStage>
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap<BlockConstructionStage>() {
+ public BlockConstructionStage findValueByNumber(int number) {
+ return BlockConstructionStage.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.getDescriptor().getEnumTypes().get(0);
+ }
+
+ private static final BlockConstructionStage[] VALUES = {
+ PIPELINE_SETUP_APPEND, PIPELINE_SETUP_APPEND_RECOVERY, DATA_STREAMING, PIPELINE_SETUP_STREAMING_RECOVERY, PIPELINE_CLOSE, PIPELINE_CLOSE_RECOVERY, PIPELINE_SETUP_CREATE, TRANSFER_RBW, TRANSFER_FINALIZED,
+ };
+
+ public static BlockConstructionStage valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private BlockConstructionStage(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:OpWriteBlockProto.BlockConstructionStage)
+ }
+
+ private int bitField0_;
+ // required .ClientOperationHeaderProto header = 1;
+ public static final int HEADER_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_;
+ public boolean hasHeader() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
+ return header_;
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
+ return header_;
+ }
+
+ // repeated .DatanodeInfoProto targets = 2;
+ public static final int TARGETS_FIELD_NUMBER = 2;
+ private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> targets_;
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> getTargetsList() {
+ return targets_;
+ }
+ public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
+ getTargetsOrBuilderList() {
+ return targets_;
+ }
+ public int getTargetsCount() {
+ return targets_.size();
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index) {
+ return targets_.get(index);
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
+ int index) {
+ return targets_.get(index);
+ }
+
+ // optional .DatanodeInfoProto source = 3;
+ public static final int SOURCE_FIELD_NUMBER = 3;
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto source_;
+ public boolean hasSource() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource() {
+ return source_;
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder() {
+ return source_;
+ }
+
+ // required .OpWriteBlockProto.BlockConstructionStage stage = 4;
+ public static final int STAGE_FIELD_NUMBER = 4;
+ private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage stage_;
+ public boolean hasStage() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage getStage() {
+ return stage_;
+ }
+
+ // required uint32 pipelineSize = 5;
+ public static final int PIPELINESIZE_FIELD_NUMBER = 5;
+ private int pipelineSize_;
+ public boolean hasPipelineSize() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ public int getPipelineSize() {
+ return pipelineSize_;
+ }
+
+ // required uint64 minBytesRcvd = 6;
+ public static final int MINBYTESRCVD_FIELD_NUMBER = 6;
+ private long minBytesRcvd_;
+ public boolean hasMinBytesRcvd() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ public long getMinBytesRcvd() {
+ return minBytesRcvd_;
+ }
+
+ // required uint64 maxBytesRcvd = 7;
+ public static final int MAXBYTESRCVD_FIELD_NUMBER = 7;
+ private long maxBytesRcvd_;
+ public boolean hasMaxBytesRcvd() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ public long getMaxBytesRcvd() {
+ return maxBytesRcvd_;
+ }
+
+ // required uint64 latestGenerationStamp = 8;
+ public static final int LATESTGENERATIONSTAMP_FIELD_NUMBER = 8;
+ private long latestGenerationStamp_;
+ public boolean hasLatestGenerationStamp() {
+ return ((bitField0_ & 0x00000040) == 0x00000040);
+ }
+ public long getLatestGenerationStamp() {
+ return latestGenerationStamp_;
+ }
+
+ private void initFields() {
+ header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
+ targets_ = java.util.Collections.emptyList();
+ source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance();
+ stage_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.PIPELINE_SETUP_APPEND;
[... 6923 lines stripped ...]