You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by at...@apache.org on 2011/11/02 06:35:26 UTC
svn commit: r1196458 [3/9] - in
/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/bin/ src/main/java/ src/main/java/org/apache/hadoop/fs/
src/main/java/org/apache/hadoop/hdfs/
src/main/java/org/apache/hadoop/hdfs/protocol/ ...
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/DataTransferProtos.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/DataTransferProtos.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/DataTransferProtos.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/DataTransferProtos.java Wed Nov 2 05:34:31 2011
@@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: datatransfer.proto
@@ -1952,127 +1951,77 @@ public final class DataTransferProtos {
// @@protoc_insertion_point(class_scope:OpReadBlockProto)
}
- public interface OpWriteBlockProtoOrBuilder
+ public interface ChecksumProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
- // required .ClientOperationHeaderProto header = 1;
- boolean hasHeader();
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader();
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder();
-
- // repeated .DatanodeInfoProto targets = 2;
- java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto>
- getTargetsList();
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index);
- int getTargetsCount();
- java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
- getTargetsOrBuilderList();
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
- int index);
-
- // optional .DatanodeInfoProto source = 3;
- boolean hasSource();
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource();
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder();
-
- // required .OpWriteBlockProto.BlockConstructionStage stage = 4;
- boolean hasStage();
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage getStage();
-
- // required uint32 pipelineSize = 5;
- boolean hasPipelineSize();
- int getPipelineSize();
-
- // required uint64 minBytesRcvd = 6;
- boolean hasMinBytesRcvd();
- long getMinBytesRcvd();
-
- // required uint64 maxBytesRcvd = 7;
- boolean hasMaxBytesRcvd();
- long getMaxBytesRcvd();
-
- // required uint64 latestGenerationStamp = 8;
- boolean hasLatestGenerationStamp();
- long getLatestGenerationStamp();
+ // required .ChecksumProto.ChecksumType type = 1;
+ boolean hasType();
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType getType();
+
+ // required uint32 bytesPerChecksum = 2;
+ boolean hasBytesPerChecksum();
+ int getBytesPerChecksum();
}
- public static final class OpWriteBlockProto extends
+ public static final class ChecksumProto extends
com.google.protobuf.GeneratedMessage
- implements OpWriteBlockProtoOrBuilder {
- // Use OpWriteBlockProto.newBuilder() to construct.
- private OpWriteBlockProto(Builder builder) {
+ implements ChecksumProtoOrBuilder {
+ // Use ChecksumProto.newBuilder() to construct.
+ private ChecksumProto(Builder builder) {
super(builder);
}
- private OpWriteBlockProto(boolean noInit) {}
+ private ChecksumProto(boolean noInit) {}
- private static final OpWriteBlockProto defaultInstance;
- public static OpWriteBlockProto getDefaultInstance() {
+ private static final ChecksumProto defaultInstance;
+ public static ChecksumProto getDefaultInstance() {
return defaultInstance;
}
- public OpWriteBlockProto getDefaultInstanceForType() {
+ public ChecksumProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpWriteBlockProto_descriptor;
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ChecksumProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpWriteBlockProto_fieldAccessorTable;
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ChecksumProto_fieldAccessorTable;
}
- public enum BlockConstructionStage
+ public enum ChecksumType
implements com.google.protobuf.ProtocolMessageEnum {
- PIPELINE_SETUP_APPEND(0, 0),
- PIPELINE_SETUP_APPEND_RECOVERY(1, 1),
- DATA_STREAMING(2, 2),
- PIPELINE_SETUP_STREAMING_RECOVERY(3, 3),
- PIPELINE_CLOSE(4, 4),
- PIPELINE_CLOSE_RECOVERY(5, 5),
- PIPELINE_SETUP_CREATE(6, 6),
- TRANSFER_RBW(7, 7),
- TRANSFER_FINALIZED(8, 8),
+ NULL(0, 0),
+ CRC32(1, 1),
+ CRC32C(2, 2),
;
- public static final int PIPELINE_SETUP_APPEND_VALUE = 0;
- public static final int PIPELINE_SETUP_APPEND_RECOVERY_VALUE = 1;
- public static final int DATA_STREAMING_VALUE = 2;
- public static final int PIPELINE_SETUP_STREAMING_RECOVERY_VALUE = 3;
- public static final int PIPELINE_CLOSE_VALUE = 4;
- public static final int PIPELINE_CLOSE_RECOVERY_VALUE = 5;
- public static final int PIPELINE_SETUP_CREATE_VALUE = 6;
- public static final int TRANSFER_RBW_VALUE = 7;
- public static final int TRANSFER_FINALIZED_VALUE = 8;
+ public static final int NULL_VALUE = 0;
+ public static final int CRC32_VALUE = 1;
+ public static final int CRC32C_VALUE = 2;
public final int getNumber() { return value; }
- public static BlockConstructionStage valueOf(int value) {
+ public static ChecksumType valueOf(int value) {
switch (value) {
- case 0: return PIPELINE_SETUP_APPEND;
- case 1: return PIPELINE_SETUP_APPEND_RECOVERY;
- case 2: return DATA_STREAMING;
- case 3: return PIPELINE_SETUP_STREAMING_RECOVERY;
- case 4: return PIPELINE_CLOSE;
- case 5: return PIPELINE_CLOSE_RECOVERY;
- case 6: return PIPELINE_SETUP_CREATE;
- case 7: return TRANSFER_RBW;
- case 8: return TRANSFER_FINALIZED;
+ case 0: return NULL;
+ case 1: return CRC32;
+ case 2: return CRC32C;
default: return null;
}
}
- public static com.google.protobuf.Internal.EnumLiteMap<BlockConstructionStage>
+ public static com.google.protobuf.Internal.EnumLiteMap<ChecksumType>
internalGetValueMap() {
return internalValueMap;
}
- private static com.google.protobuf.Internal.EnumLiteMap<BlockConstructionStage>
+ private static com.google.protobuf.Internal.EnumLiteMap<ChecksumType>
internalValueMap =
- new com.google.protobuf.Internal.EnumLiteMap<BlockConstructionStage>() {
- public BlockConstructionStage findValueByNumber(int number) {
- return BlockConstructionStage.valueOf(number);
+ new com.google.protobuf.Internal.EnumLiteMap<ChecksumType>() {
+ public ChecksumType findValueByNumber(int number) {
+ return ChecksumType.valueOf(number);
}
};
@@ -2086,14 +2035,14 @@ public final class DataTransferProtos {
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.getDescriptor().getEnumTypes().get(0);
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDescriptor().getEnumTypes().get(0);
}
- private static final BlockConstructionStage[] VALUES = {
- PIPELINE_SETUP_APPEND, PIPELINE_SETUP_APPEND_RECOVERY, DATA_STREAMING, PIPELINE_SETUP_STREAMING_RECOVERY, PIPELINE_CLOSE, PIPELINE_CLOSE_RECOVERY, PIPELINE_SETUP_CREATE, TRANSFER_RBW, TRANSFER_FINALIZED,
+ private static final ChecksumType[] VALUES = {
+ NULL, CRC32, CRC32C,
};
- public static BlockConstructionStage valueOf(
+ public static ChecksumType valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
@@ -2105,167 +2054,52 @@ public final class DataTransferProtos {
private final int index;
private final int value;
- private BlockConstructionStage(int index, int value) {
+ private ChecksumType(int index, int value) {
this.index = index;
this.value = value;
}
- // @@protoc_insertion_point(enum_scope:OpWriteBlockProto.BlockConstructionStage)
+ // @@protoc_insertion_point(enum_scope:ChecksumProto.ChecksumType)
}
private int bitField0_;
- // required .ClientOperationHeaderProto header = 1;
- public static final int HEADER_FIELD_NUMBER = 1;
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_;
- public boolean hasHeader() {
+ // required .ChecksumProto.ChecksumType type = 1;
+ public static final int TYPE_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType type_;
+ public boolean hasType() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
- return header_;
- }
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
- return header_;
- }
-
- // repeated .DatanodeInfoProto targets = 2;
- public static final int TARGETS_FIELD_NUMBER = 2;
- private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> targets_;
- public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> getTargetsList() {
- return targets_;
- }
- public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
- getTargetsOrBuilderList() {
- return targets_;
- }
- public int getTargetsCount() {
- return targets_.size();
- }
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index) {
- return targets_.get(index);
- }
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
- int index) {
- return targets_.get(index);
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType getType() {
+ return type_;
}
- // optional .DatanodeInfoProto source = 3;
- public static final int SOURCE_FIELD_NUMBER = 3;
- private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto source_;
- public boolean hasSource() {
+ // required uint32 bytesPerChecksum = 2;
+ public static final int BYTESPERCHECKSUM_FIELD_NUMBER = 2;
+ private int bytesPerChecksum_;
+ public boolean hasBytesPerChecksum() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource() {
- return source_;
- }
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder() {
- return source_;
- }
-
- // required .OpWriteBlockProto.BlockConstructionStage stage = 4;
- public static final int STAGE_FIELD_NUMBER = 4;
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage stage_;
- public boolean hasStage() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
- }
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage getStage() {
- return stage_;
- }
-
- // required uint32 pipelineSize = 5;
- public static final int PIPELINESIZE_FIELD_NUMBER = 5;
- private int pipelineSize_;
- public boolean hasPipelineSize() {
- return ((bitField0_ & 0x00000008) == 0x00000008);
- }
- public int getPipelineSize() {
- return pipelineSize_;
- }
-
- // required uint64 minBytesRcvd = 6;
- public static final int MINBYTESRCVD_FIELD_NUMBER = 6;
- private long minBytesRcvd_;
- public boolean hasMinBytesRcvd() {
- return ((bitField0_ & 0x00000010) == 0x00000010);
- }
- public long getMinBytesRcvd() {
- return minBytesRcvd_;
- }
-
- // required uint64 maxBytesRcvd = 7;
- public static final int MAXBYTESRCVD_FIELD_NUMBER = 7;
- private long maxBytesRcvd_;
- public boolean hasMaxBytesRcvd() {
- return ((bitField0_ & 0x00000020) == 0x00000020);
- }
- public long getMaxBytesRcvd() {
- return maxBytesRcvd_;
- }
-
- // required uint64 latestGenerationStamp = 8;
- public static final int LATESTGENERATIONSTAMP_FIELD_NUMBER = 8;
- private long latestGenerationStamp_;
- public boolean hasLatestGenerationStamp() {
- return ((bitField0_ & 0x00000040) == 0x00000040);
- }
- public long getLatestGenerationStamp() {
- return latestGenerationStamp_;
+ public int getBytesPerChecksum() {
+ return bytesPerChecksum_;
}
private void initFields() {
- header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
- targets_ = java.util.Collections.emptyList();
- source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance();
- stage_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.PIPELINE_SETUP_APPEND;
- pipelineSize_ = 0;
- minBytesRcvd_ = 0L;
- maxBytesRcvd_ = 0L;
- latestGenerationStamp_ = 0L;
+ type_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType.NULL;
+ bytesPerChecksum_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
- if (!hasHeader()) {
- memoizedIsInitialized = 0;
- return false;
- }
- if (!hasStage()) {
- memoizedIsInitialized = 0;
- return false;
- }
- if (!hasPipelineSize()) {
- memoizedIsInitialized = 0;
- return false;
- }
- if (!hasMinBytesRcvd()) {
- memoizedIsInitialized = 0;
- return false;
- }
- if (!hasMaxBytesRcvd()) {
- memoizedIsInitialized = 0;
- return false;
- }
- if (!hasLatestGenerationStamp()) {
+ if (!hasType()) {
memoizedIsInitialized = 0;
return false;
}
- if (!getHeader().isInitialized()) {
+ if (!hasBytesPerChecksum()) {
memoizedIsInitialized = 0;
return false;
}
- for (int i = 0; i < getTargetsCount(); i++) {
- if (!getTargets(i).isInitialized()) {
- memoizedIsInitialized = 0;
- return false;
- }
- }
- if (hasSource()) {
- if (!getSource().isInitialized()) {
- memoizedIsInitialized = 0;
- return false;
- }
- }
memoizedIsInitialized = 1;
return true;
}
@@ -2274,28 +2108,10 @@ public final class DataTransferProtos {
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeMessage(1, header_);
- }
- for (int i = 0; i < targets_.size(); i++) {
- output.writeMessage(2, targets_.get(i));
+ output.writeEnum(1, type_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
- output.writeMessage(3, source_);
- }
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- output.writeEnum(4, stage_.getNumber());
- }
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
- output.writeUInt32(5, pipelineSize_);
- }
- if (((bitField0_ & 0x00000010) == 0x00000010)) {
- output.writeUInt64(6, minBytesRcvd_);
- }
- if (((bitField0_ & 0x00000020) == 0x00000020)) {
- output.writeUInt64(7, maxBytesRcvd_);
- }
- if (((bitField0_ & 0x00000040) == 0x00000040)) {
- output.writeUInt64(8, latestGenerationStamp_);
+ output.writeUInt32(2, bytesPerChecksum_);
}
getUnknownFields().writeTo(output);
}
@@ -2308,35 +2124,11 @@ public final class DataTransferProtos {
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(1, header_);
- }
- for (int i = 0; i < targets_.size(); i++) {
- size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(2, targets_.get(i));
+ .computeEnumSize(1, type_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(3, source_);
- }
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- size += com.google.protobuf.CodedOutputStream
- .computeEnumSize(4, stage_.getNumber());
- }
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
- size += com.google.protobuf.CodedOutputStream
- .computeUInt32Size(5, pipelineSize_);
- }
- if (((bitField0_ & 0x00000010) == 0x00000010)) {
- size += com.google.protobuf.CodedOutputStream
- .computeUInt64Size(6, minBytesRcvd_);
- }
- if (((bitField0_ & 0x00000020) == 0x00000020)) {
- size += com.google.protobuf.CodedOutputStream
- .computeUInt64Size(7, maxBytesRcvd_);
- }
- if (((bitField0_ & 0x00000040) == 0x00000040)) {
- size += com.google.protobuf.CodedOutputStream
- .computeUInt64Size(8, latestGenerationStamp_);
+ .computeUInt32Size(2, bytesPerChecksum_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
@@ -2355,48 +2147,21 @@ public final class DataTransferProtos {
if (obj == this) {
return true;
}
- if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto)) {
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto)) {
return super.equals(obj);
}
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto) obj;
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto) obj;
boolean result = true;
- result = result && (hasHeader() == other.hasHeader());
- if (hasHeader()) {
- result = result && getHeader()
- .equals(other.getHeader());
- }
- result = result && getTargetsList()
- .equals(other.getTargetsList());
- result = result && (hasSource() == other.hasSource());
- if (hasSource()) {
- result = result && getSource()
- .equals(other.getSource());
- }
- result = result && (hasStage() == other.hasStage());
- if (hasStage()) {
+ result = result && (hasType() == other.hasType());
+ if (hasType()) {
result = result &&
- (getStage() == other.getStage());
- }
- result = result && (hasPipelineSize() == other.hasPipelineSize());
- if (hasPipelineSize()) {
- result = result && (getPipelineSize()
- == other.getPipelineSize());
- }
- result = result && (hasMinBytesRcvd() == other.hasMinBytesRcvd());
- if (hasMinBytesRcvd()) {
- result = result && (getMinBytesRcvd()
- == other.getMinBytesRcvd());
- }
- result = result && (hasMaxBytesRcvd() == other.hasMaxBytesRcvd());
- if (hasMaxBytesRcvd()) {
- result = result && (getMaxBytesRcvd()
- == other.getMaxBytesRcvd());
+ (getType() == other.getType());
}
- result = result && (hasLatestGenerationStamp() == other.hasLatestGenerationStamp());
- if (hasLatestGenerationStamp()) {
- result = result && (getLatestGenerationStamp()
- == other.getLatestGenerationStamp());
+ result = result && (hasBytesPerChecksum() == other.hasBytesPerChecksum());
+ if (hasBytesPerChecksum()) {
+ result = result && (getBytesPerChecksum()
+ == other.getBytesPerChecksum());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
@@ -2407,77 +2172,53 @@ public final class DataTransferProtos {
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
- if (hasHeader()) {
- hash = (37 * hash) + HEADER_FIELD_NUMBER;
- hash = (53 * hash) + getHeader().hashCode();
- }
- if (getTargetsCount() > 0) {
- hash = (37 * hash) + TARGETS_FIELD_NUMBER;
- hash = (53 * hash) + getTargetsList().hashCode();
- }
- if (hasSource()) {
- hash = (37 * hash) + SOURCE_FIELD_NUMBER;
- hash = (53 * hash) + getSource().hashCode();
- }
- if (hasStage()) {
- hash = (37 * hash) + STAGE_FIELD_NUMBER;
- hash = (53 * hash) + hashEnum(getStage());
- }
- if (hasPipelineSize()) {
- hash = (37 * hash) + PIPELINESIZE_FIELD_NUMBER;
- hash = (53 * hash) + getPipelineSize();
- }
- if (hasMinBytesRcvd()) {
- hash = (37 * hash) + MINBYTESRCVD_FIELD_NUMBER;
- hash = (53 * hash) + hashLong(getMinBytesRcvd());
- }
- if (hasMaxBytesRcvd()) {
- hash = (37 * hash) + MAXBYTESRCVD_FIELD_NUMBER;
- hash = (53 * hash) + hashLong(getMaxBytesRcvd());
- }
- if (hasLatestGenerationStamp()) {
- hash = (37 * hash) + LATESTGENERATIONSTAMP_FIELD_NUMBER;
- hash = (53 * hash) + hashLong(getLatestGenerationStamp());
+ if (hasType()) {
+ hash = (37 * hash) + TYPE_FIELD_NUMBER;
+ hash = (53 * hash) + hashEnum(getType());
+ }
+ if (hasBytesPerChecksum()) {
+ hash = (37 * hash) + BYTESPERCHECKSUM_FIELD_NUMBER;
+ hash = (53 * hash) + getBytesPerChecksum();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(byte[] data)
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(java.io.InputStream input)
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseDelimitedFrom(java.io.InputStream input)
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
@@ -2486,7 +2227,7 @@ public final class DataTransferProtos {
return null;
}
}
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseDelimitedFrom(
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
@@ -2497,12 +2238,12 @@ public final class DataTransferProtos {
return null;
}
}
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
@@ -2512,7 +2253,7 @@ public final class DataTransferProtos {
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto prototype) {
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@@ -2525,18 +2266,18 @@ public final class DataTransferProtos {
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
- implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProtoOrBuilder {
+ implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpWriteBlockProto_descriptor;
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ChecksumProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpWriteBlockProto_fieldAccessorTable;
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ChecksumProto_fieldAccessorTable;
}
- // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.newBuilder()
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
@@ -2547,9 +2288,6 @@ public final class DataTransferProtos {
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- getHeaderFieldBuilder();
- getTargetsFieldBuilder();
- getSourceFieldBuilder();
}
}
private static Builder create() {
@@ -2558,34 +2296,10 @@ public final class DataTransferProtos {
public Builder clear() {
super.clear();
- if (headerBuilder_ == null) {
- header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
- } else {
- headerBuilder_.clear();
- }
+ type_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType.NULL;
bitField0_ = (bitField0_ & ~0x00000001);
- if (targetsBuilder_ == null) {
- targets_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000002);
- } else {
- targetsBuilder_.clear();
- }
- if (sourceBuilder_ == null) {
- source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance();
- } else {
- sourceBuilder_.clear();
- }
- bitField0_ = (bitField0_ & ~0x00000004);
- stage_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.PIPELINE_SETUP_APPEND;
- bitField0_ = (bitField0_ & ~0x00000008);
- pipelineSize_ = 0;
- bitField0_ = (bitField0_ & ~0x00000010);
- minBytesRcvd_ = 0L;
- bitField0_ = (bitField0_ & ~0x00000020);
- maxBytesRcvd_ = 0L;
- bitField0_ = (bitField0_ & ~0x00000040);
- latestGenerationStamp_ = 0L;
- bitField0_ = (bitField0_ & ~0x00000080);
+ bytesPerChecksum_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
@@ -2595,24 +2309,24 @@ public final class DataTransferProtos {
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.getDescriptor();
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDescriptor();
}
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto getDefaultInstanceForType() {
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.getDefaultInstance();
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getDefaultInstanceForType() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance();
}
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto build() {
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto result = buildPartial();
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto build() {
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto buildParsed()
+ private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto result = buildPartial();
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
@@ -2620,163 +2334,53 @@ public final class DataTransferProtos {
return result;
}
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto buildPartial() {
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto(this);
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto buildPartial() {
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
- if (headerBuilder_ == null) {
- result.header_ = header_;
- } else {
- result.header_ = headerBuilder_.build();
- }
- if (targetsBuilder_ == null) {
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- targets_ = java.util.Collections.unmodifiableList(targets_);
- bitField0_ = (bitField0_ & ~0x00000002);
- }
- result.targets_ = targets_;
- } else {
- result.targets_ = targetsBuilder_.build();
- }
- if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ result.type_ = type_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
- if (sourceBuilder_ == null) {
- result.source_ = source_;
- } else {
- result.source_ = sourceBuilder_.build();
- }
- if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
- to_bitField0_ |= 0x00000004;
- }
- result.stage_ = stage_;
- if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
- to_bitField0_ |= 0x00000008;
- }
- result.pipelineSize_ = pipelineSize_;
- if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
- to_bitField0_ |= 0x00000010;
- }
- result.minBytesRcvd_ = minBytesRcvd_;
- if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
- to_bitField0_ |= 0x00000020;
- }
- result.maxBytesRcvd_ = maxBytesRcvd_;
- if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
- to_bitField0_ |= 0x00000040;
- }
- result.latestGenerationStamp_ = latestGenerationStamp_;
+ result.bytesPerChecksum_ = bytesPerChecksum_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto) {
- return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto)other);
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto) {
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
- public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto other) {
- if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.getDefaultInstance()) return this;
- if (other.hasHeader()) {
- mergeHeader(other.getHeader());
- }
- if (targetsBuilder_ == null) {
- if (!other.targets_.isEmpty()) {
- if (targets_.isEmpty()) {
- targets_ = other.targets_;
- bitField0_ = (bitField0_ & ~0x00000002);
- } else {
- ensureTargetsIsMutable();
- targets_.addAll(other.targets_);
- }
- onChanged();
- }
- } else {
- if (!other.targets_.isEmpty()) {
- if (targetsBuilder_.isEmpty()) {
- targetsBuilder_.dispose();
- targetsBuilder_ = null;
- targets_ = other.targets_;
- bitField0_ = (bitField0_ & ~0x00000002);
- targetsBuilder_ =
- com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
- getTargetsFieldBuilder() : null;
- } else {
- targetsBuilder_.addAllMessages(other.targets_);
- }
- }
- }
- if (other.hasSource()) {
- mergeSource(other.getSource());
- }
- if (other.hasStage()) {
- setStage(other.getStage());
- }
- if (other.hasPipelineSize()) {
- setPipelineSize(other.getPipelineSize());
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto other) {
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance()) return this;
+ if (other.hasType()) {
+ setType(other.getType());
}
- if (other.hasMinBytesRcvd()) {
- setMinBytesRcvd(other.getMinBytesRcvd());
- }
- if (other.hasMaxBytesRcvd()) {
- setMaxBytesRcvd(other.getMaxBytesRcvd());
- }
- if (other.hasLatestGenerationStamp()) {
- setLatestGenerationStamp(other.getLatestGenerationStamp());
+ if (other.hasBytesPerChecksum()) {
+ setBytesPerChecksum(other.getBytesPerChecksum());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
- if (!hasHeader()) {
-
- return false;
- }
- if (!hasStage()) {
-
- return false;
- }
- if (!hasPipelineSize()) {
-
- return false;
- }
- if (!hasMinBytesRcvd()) {
-
- return false;
- }
- if (!hasMaxBytesRcvd()) {
-
- return false;
- }
- if (!hasLatestGenerationStamp()) {
+ if (!hasType()) {
return false;
}
- if (!getHeader().isInitialized()) {
+ if (!hasBytesPerChecksum()) {
return false;
}
- for (int i = 0; i < getTargetsCount(); i++) {
- if (!getTargets(i).isInitialized()) {
-
- return false;
- }
- }
- if (hasSource()) {
- if (!getSource().isInitialized()) {
-
- return false;
- }
- }
return true;
}
@@ -2803,59 +2407,2118 @@ public final class DataTransferProtos {
}
break;
}
- case 10: {
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder();
- if (hasHeader()) {
- subBuilder.mergeFrom(getHeader());
- }
- input.readMessage(subBuilder, extensionRegistry);
- setHeader(subBuilder.buildPartial());
- break;
- }
- case 18: {
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder();
- input.readMessage(subBuilder, extensionRegistry);
- addTargets(subBuilder.buildPartial());
- break;
- }
- case 26: {
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder();
- if (hasSource()) {
- subBuilder.mergeFrom(getSource());
- }
- input.readMessage(subBuilder, extensionRegistry);
- setSource(subBuilder.buildPartial());
- break;
- }
- case 32: {
+ case 8: {
int rawValue = input.readEnum();
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.valueOf(rawValue);
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType.valueOf(rawValue);
if (value == null) {
- unknownFields.mergeVarintField(4, rawValue);
+ unknownFields.mergeVarintField(1, rawValue);
} else {
- bitField0_ |= 0x00000008;
- stage_ = value;
+ bitField0_ |= 0x00000001;
+ type_ = value;
}
break;
}
- case 40: {
- bitField0_ |= 0x00000010;
- pipelineSize_ = input.readUInt32();
+ case 16: {
+ bitField0_ |= 0x00000002;
+ bytesPerChecksum_ = input.readUInt32();
break;
}
- case 48: {
- bitField0_ |= 0x00000020;
- minBytesRcvd_ = input.readUInt64();
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // required .ChecksumProto.ChecksumType type = 1;
+ private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType type_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType.NULL;
+ public boolean hasType() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType getType() {
+ return type_;
+ }
+ public Builder setType(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ type_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearType() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ type_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType.NULL;
+ onChanged();
+ return this;
+ }
+
+ // required uint32 bytesPerChecksum = 2;
+ private int bytesPerChecksum_ ;
+ public boolean hasBytesPerChecksum() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public int getBytesPerChecksum() {
+ return bytesPerChecksum_;
+ }
+ public Builder setBytesPerChecksum(int value) {
+ bitField0_ |= 0x00000002;
+ bytesPerChecksum_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearBytesPerChecksum() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ bytesPerChecksum_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:ChecksumProto)
+ }
+
+ static {
+ defaultInstance = new ChecksumProto(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:ChecksumProto)
+ }
+
+ public interface OpWriteBlockProtoOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .ClientOperationHeaderProto header = 1;
+ boolean hasHeader();
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader();
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder();
+
+ // repeated .DatanodeInfoProto targets = 2;
+ java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto>
+ getTargetsList();
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index);
+ int getTargetsCount();
+ java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
+ getTargetsOrBuilderList();
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
+ int index);
+
+ // optional .DatanodeInfoProto source = 3;
+ boolean hasSource();
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource();
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder();
+
+ // required .OpWriteBlockProto.BlockConstructionStage stage = 4;
+ boolean hasStage();
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage getStage();
+
+ // required uint32 pipelineSize = 5;
+ boolean hasPipelineSize();
+ int getPipelineSize();
+
+ // required uint64 minBytesRcvd = 6;
+ boolean hasMinBytesRcvd();
+ long getMinBytesRcvd();
+
+ // required uint64 maxBytesRcvd = 7;
+ boolean hasMaxBytesRcvd();
+ long getMaxBytesRcvd();
+
+ // required uint64 latestGenerationStamp = 8;
+ boolean hasLatestGenerationStamp();
+ long getLatestGenerationStamp();
+
+ // required .ChecksumProto requestedChecksum = 9;
+ boolean hasRequestedChecksum();
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getRequestedChecksum();
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getRequestedChecksumOrBuilder();
+ }
+ public static final class OpWriteBlockProto extends
+ com.google.protobuf.GeneratedMessage
+ implements OpWriteBlockProtoOrBuilder {
+ // Use OpWriteBlockProto.newBuilder() to construct.
+ private OpWriteBlockProto(Builder builder) {
+ super(builder);
+ }
+ private OpWriteBlockProto(boolean noInit) {}
+
+ private static final OpWriteBlockProto defaultInstance;
+ public static OpWriteBlockProto getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public OpWriteBlockProto getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpWriteBlockProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpWriteBlockProto_fieldAccessorTable;
+ }
+
+ public enum BlockConstructionStage
+ implements com.google.protobuf.ProtocolMessageEnum {
+ PIPELINE_SETUP_APPEND(0, 0),
+ PIPELINE_SETUP_APPEND_RECOVERY(1, 1),
+ DATA_STREAMING(2, 2),
+ PIPELINE_SETUP_STREAMING_RECOVERY(3, 3),
+ PIPELINE_CLOSE(4, 4),
+ PIPELINE_CLOSE_RECOVERY(5, 5),
+ PIPELINE_SETUP_CREATE(6, 6),
+ TRANSFER_RBW(7, 7),
+ TRANSFER_FINALIZED(8, 8),
+ ;
+
+ public static final int PIPELINE_SETUP_APPEND_VALUE = 0;
+ public static final int PIPELINE_SETUP_APPEND_RECOVERY_VALUE = 1;
+ public static final int DATA_STREAMING_VALUE = 2;
+ public static final int PIPELINE_SETUP_STREAMING_RECOVERY_VALUE = 3;
+ public static final int PIPELINE_CLOSE_VALUE = 4;
+ public static final int PIPELINE_CLOSE_RECOVERY_VALUE = 5;
+ public static final int PIPELINE_SETUP_CREATE_VALUE = 6;
+ public static final int TRANSFER_RBW_VALUE = 7;
+ public static final int TRANSFER_FINALIZED_VALUE = 8;
+
+
+ public final int getNumber() { return value; }
+
+ public static BlockConstructionStage valueOf(int value) {
+ switch (value) {
+ case 0: return PIPELINE_SETUP_APPEND;
+ case 1: return PIPELINE_SETUP_APPEND_RECOVERY;
+ case 2: return DATA_STREAMING;
+ case 3: return PIPELINE_SETUP_STREAMING_RECOVERY;
+ case 4: return PIPELINE_CLOSE;
+ case 5: return PIPELINE_CLOSE_RECOVERY;
+ case 6: return PIPELINE_SETUP_CREATE;
+ case 7: return TRANSFER_RBW;
+ case 8: return TRANSFER_FINALIZED;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap<BlockConstructionStage>
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap<BlockConstructionStage>
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap<BlockConstructionStage>() {
+ public BlockConstructionStage findValueByNumber(int number) {
+ return BlockConstructionStage.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.getDescriptor().getEnumTypes().get(0);
+ }
+
+ private static final BlockConstructionStage[] VALUES = {
+ PIPELINE_SETUP_APPEND, PIPELINE_SETUP_APPEND_RECOVERY, DATA_STREAMING, PIPELINE_SETUP_STREAMING_RECOVERY, PIPELINE_CLOSE, PIPELINE_CLOSE_RECOVERY, PIPELINE_SETUP_CREATE, TRANSFER_RBW, TRANSFER_FINALIZED,
+ };
+
+ public static BlockConstructionStage valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private BlockConstructionStage(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:OpWriteBlockProto.BlockConstructionStage)
+ }
+
+ private int bitField0_;
+ // required .ClientOperationHeaderProto header = 1;
+ public static final int HEADER_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_;
+ public boolean hasHeader() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
+ return header_;
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
+ return header_;
+ }
+
+ // repeated .DatanodeInfoProto targets = 2;
+ public static final int TARGETS_FIELD_NUMBER = 2;
+ private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> targets_;
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> getTargetsList() {
+ return targets_;
+ }
+ public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
+ getTargetsOrBuilderList() {
+ return targets_;
+ }
+ public int getTargetsCount() {
+ return targets_.size();
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index) {
+ return targets_.get(index);
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
+ int index) {
+ return targets_.get(index);
+ }
+
+ // optional .DatanodeInfoProto source = 3;
+ public static final int SOURCE_FIELD_NUMBER = 3;
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto source_;
+ public boolean hasSource() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource() {
+ return source_;
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder() {
+ return source_;
+ }
+
+ // required .OpWriteBlockProto.BlockConstructionStage stage = 4;
+ public static final int STAGE_FIELD_NUMBER = 4;
+ private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage stage_;
+ public boolean hasStage() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage getStage() {
+ return stage_;
+ }
+
+ // required uint32 pipelineSize = 5;
+ public static final int PIPELINESIZE_FIELD_NUMBER = 5;
+ private int pipelineSize_;
+ public boolean hasPipelineSize() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ public int getPipelineSize() {
+ return pipelineSize_;
+ }
+
+ // required uint64 minBytesRcvd = 6;
+ public static final int MINBYTESRCVD_FIELD_NUMBER = 6;
+ private long minBytesRcvd_;
+ public boolean hasMinBytesRcvd() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ public long getMinBytesRcvd() {
+ return minBytesRcvd_;
+ }
+
+ // required uint64 maxBytesRcvd = 7;
+ public static final int MAXBYTESRCVD_FIELD_NUMBER = 7;
+ private long maxBytesRcvd_;
+ public boolean hasMaxBytesRcvd() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ public long getMaxBytesRcvd() {
+ return maxBytesRcvd_;
+ }
+
+ // required uint64 latestGenerationStamp = 8;
+ public static final int LATESTGENERATIONSTAMP_FIELD_NUMBER = 8;
+ private long latestGenerationStamp_;
+ public boolean hasLatestGenerationStamp() {
+ return ((bitField0_ & 0x00000040) == 0x00000040);
+ }
+ public long getLatestGenerationStamp() {
+ return latestGenerationStamp_;
+ }
+
+ // required .ChecksumProto requestedChecksum = 9;
+ public static final int REQUESTEDCHECKSUM_FIELD_NUMBER = 9;
+ private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto requestedChecksum_;
+ public boolean hasRequestedChecksum() {
+ return ((bitField0_ & 0x00000080) == 0x00000080);
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getRequestedChecksum() {
+ return requestedChecksum_;
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getRequestedChecksumOrBuilder() {
+ return requestedChecksum_;
+ }
+
+ private void initFields() {
+ header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
+ targets_ = java.util.Collections.emptyList();
+ source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance();
+ stage_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.PIPELINE_SETUP_APPEND;
+ pipelineSize_ = 0;
+ minBytesRcvd_ = 0L;
+ maxBytesRcvd_ = 0L;
+ latestGenerationStamp_ = 0L;
+ requestedChecksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasHeader()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasStage()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasPipelineSize()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasMinBytesRcvd()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasMaxBytesRcvd()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasLatestGenerationStamp()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasRequestedChecksum()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getHeader().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ for (int i = 0; i < getTargetsCount(); i++) {
+ if (!getTargets(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ if (hasSource()) {
+ if (!getSource().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ if (!getRequestedChecksum().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, header_);
+ }
+ for (int i = 0; i < targets_.size(); i++) {
+ output.writeMessage(2, targets_.get(i));
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(3, source_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeEnum(4, stage_.getNumber());
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeUInt32(5, pipelineSize_);
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ output.writeUInt64(6, minBytesRcvd_);
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ output.writeUInt64(7, maxBytesRcvd_);
+ }
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ output.writeUInt64(8, latestGenerationStamp_);
+ }
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
+ output.writeMessage(9, requestedChecksum_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, header_);
+ }
+ for (int i = 0; i < targets_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, targets_.get(i));
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, source_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeEnumSize(4, stage_.getNumber());
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt32Size(5, pipelineSize_);
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(6, minBytesRcvd_);
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(7, maxBytesRcvd_);
+ }
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(8, latestGenerationStamp_);
+ }
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(9, requestedChecksum_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto) obj;
+
+ boolean result = true;
+ result = result && (hasHeader() == other.hasHeader());
+ if (hasHeader()) {
+ result = result && getHeader()
+ .equals(other.getHeader());
+ }
+ result = result && getTargetsList()
+ .equals(other.getTargetsList());
+ result = result && (hasSource() == other.hasSource());
+ if (hasSource()) {
+ result = result && getSource()
+ .equals(other.getSource());
+ }
+ result = result && (hasStage() == other.hasStage());
+ if (hasStage()) {
+ result = result &&
+ (getStage() == other.getStage());
+ }
+ result = result && (hasPipelineSize() == other.hasPipelineSize());
+ if (hasPipelineSize()) {
+ result = result && (getPipelineSize()
+ == other.getPipelineSize());
+ }
+ result = result && (hasMinBytesRcvd() == other.hasMinBytesRcvd());
+ if (hasMinBytesRcvd()) {
+ result = result && (getMinBytesRcvd()
+ == other.getMinBytesRcvd());
+ }
+ result = result && (hasMaxBytesRcvd() == other.hasMaxBytesRcvd());
+ if (hasMaxBytesRcvd()) {
+ result = result && (getMaxBytesRcvd()
+ == other.getMaxBytesRcvd());
+ }
+ result = result && (hasLatestGenerationStamp() == other.hasLatestGenerationStamp());
+ if (hasLatestGenerationStamp()) {
+ result = result && (getLatestGenerationStamp()
+ == other.getLatestGenerationStamp());
+ }
+ result = result && (hasRequestedChecksum() == other.hasRequestedChecksum());
+ if (hasRequestedChecksum()) {
+ result = result && getRequestedChecksum()
+ .equals(other.getRequestedChecksum());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasHeader()) {
+ hash = (37 * hash) + HEADER_FIELD_NUMBER;
+ hash = (53 * hash) + getHeader().hashCode();
+ }
+ if (getTargetsCount() > 0) {
+ hash = (37 * hash) + TARGETS_FIELD_NUMBER;
+ hash = (53 * hash) + getTargetsList().hashCode();
+ }
+ if (hasSource()) {
+ hash = (37 * hash) + SOURCE_FIELD_NUMBER;
+ hash = (53 * hash) + getSource().hashCode();
+ }
+ if (hasStage()) {
+ hash = (37 * hash) + STAGE_FIELD_NUMBER;
+ hash = (53 * hash) + hashEnum(getStage());
+ }
+ if (hasPipelineSize()) {
+ hash = (37 * hash) + PIPELINESIZE_FIELD_NUMBER;
+ hash = (53 * hash) + getPipelineSize();
+ }
+ if (hasMinBytesRcvd()) {
+ hash = (37 * hash) + MINBYTESRCVD_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getMinBytesRcvd());
+ }
+ if (hasMaxBytesRcvd()) {
+ hash = (37 * hash) + MAXBYTESRCVD_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getMaxBytesRcvd());
+ }
+ if (hasLatestGenerationStamp()) {
+ hash = (37 * hash) + LATESTGENERATIONSTAMP_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getLatestGenerationStamp());
+ }
+ if (hasRequestedChecksum()) {
+ hash = (37 * hash) + REQUESTEDCHECKSUM_FIELD_NUMBER;
+ hash = (53 * hash) + getRequestedChecksum().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProtoOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpWriteBlockProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpWriteBlockProto_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getHeaderFieldBuilder();
+ getTargetsFieldBuilder();
+ getSourceFieldBuilder();
+ getRequestedChecksumFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (headerBuilder_ == null) {
+ header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
+ } else {
+ headerBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (targetsBuilder_ == null) {
+ targets_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000002);
+ } else {
+ targetsBuilder_.clear();
+ }
+ if (sourceBuilder_ == null) {
+ source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance();
+ } else {
+ sourceBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000004);
+ stage_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.PIPELINE_SETUP_APPEND;
+ bitField0_ = (bitField0_ & ~0x00000008);
+ pipelineSize_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000010);
+ minBytesRcvd_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000020);
+ maxBytesRcvd_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000040);
+ latestGenerationStamp_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000080);
+ if (requestedChecksumBuilder_ == null) {
+ requestedChecksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance();
+ } else {
+ requestedChecksumBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000100);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.getDescriptor();
+ }
+
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto getDefaultInstanceForType() {
+ return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto build() {
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto buildPartial() {
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (headerBuilder_ == null) {
+ result.header_ = header_;
+ } else {
+ result.header_ = headerBuilder_.build();
+ }
+ if (targetsBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ targets_ = java.util.Collections.unmodifiableList(targets_);
+ bitField0_ = (bitField0_ & ~0x00000002);
+ }
+ result.targets_ = targets_;
+ } else {
+ result.targets_ = targetsBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (sourceBuilder_ == null) {
+ result.source_ = source_;
+ } else {
+ result.source_ = sourceBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.stage_ = stage_;
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.pipelineSize_ = pipelineSize_;
+ if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
+ to_bitField0_ |= 0x00000010;
+ }
+ result.minBytesRcvd_ = minBytesRcvd_;
+ if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
+ to_bitField0_ |= 0x00000020;
+ }
+ result.maxBytesRcvd_ = maxBytesRcvd_;
+ if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
+ to_bitField0_ |= 0x00000040;
+ }
+ result.latestGenerationStamp_ = latestGenerationStamp_;
+ if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
+ to_bitField0_ |= 0x00000080;
+ }
+ if (requestedChecksumBuilder_ == null) {
+ result.requestedChecksum_ = requestedChecksum_;
+ } else {
+ result.requestedChecksum_ = requestedChecksumBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto) {
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto other) {
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.getDefaultInstance()) return this;
+ if (other.hasHeader()) {
+ mergeHeader(other.getHeader());
+ }
+ if (targetsBuilder_ == null) {
+ if (!other.targets_.isEmpty()) {
+ if (targets_.isEmpty()) {
+ targets_ = other.targets_;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ } else {
+ ensureTargetsIsMutable();
+ targets_.addAll(other.targets_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.targets_.isEmpty()) {
+ if (targetsBuilder_.isEmpty()) {
+ targetsBuilder_.dispose();
+ targetsBuilder_ = null;
+ targets_ = other.targets_;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ targetsBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getTargetsFieldBuilder() : null;
+ } else {
+ targetsBuilder_.addAllMessages(other.targets_);
+ }
+ }
+ }
+ if (other.hasSource()) {
+ mergeSource(other.getSource());
+ }
+ if (other.hasStage()) {
+ setStage(other.getStage());
+ }
+ if (other.hasPipelineSize()) {
+ setPipelineSize(other.getPipelineSize());
+ }
+ if (other.hasMinBytesRcvd()) {
+ setMinBytesRcvd(other.getMinBytesRcvd());
+ }
+ if (other.hasMaxBytesRcvd()) {
+ setMaxBytesRcvd(other.getMaxBytesRcvd());
+ }
+ if (other.hasLatestGenerationStamp()) {
+ setLatestGenerationStamp(other.getLatestGenerationStamp());
+ }
+ if (other.hasRequestedChecksum()) {
+ mergeRequestedChecksum(other.getRequestedChecksum());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasHeader()) {
+
+ return false;
+ }
+ if (!hasStage()) {
+
+ return false;
+ }
+ if (!hasPipelineSize()) {
+
+ return false;
+ }
+ if (!hasMinBytesRcvd()) {
+
+ return false;
+ }
+ if (!hasMaxBytesRcvd()) {
+
+ return false;
+ }
+ if (!hasLatestGenerationStamp()) {
+
+ return false;
+ }
+ if (!hasRequestedChecksum()) {
+
+ return false;
+ }
+ if (!getHeader().isInitialized()) {
+
+ return false;
+ }
+ for (int i = 0; i < getTargetsCount(); i++) {
+ if (!getTargets(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ if (hasSource()) {
+ if (!getSource().isInitialized()) {
+
+ return false;
+ }
+ }
+ if (!getRequestedChecksum().isInitialized()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder();
+ if (hasHeader()) {
+ subBuilder.mergeFrom(getHeader());
+ }
+ input.readMessage(subBuilder, extensionRegistry);
+ setHeader(subBuilder.buildPartial());
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder();
+ input.readMessage(subBuilder, extensionRegistry);
+ addTargets(subBuilder.buildPartial());
+ break;
+ }
+ case 26: {
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder();
+ if (hasSource()) {
+ subBuilder.mergeFrom(getSource());
+ }
+ input.readMessage(subBuilder, extensionRegistry);
+ setSource(subBuilder.buildPartial());
+ break;
+ }
+ case 32: {
+ int rawValue = input.readEnum();
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.valueOf(rawValue);
+ if (value == null) {
+ unknownFields.mergeVarintField(4, rawValue);
+ } else {
+ bitField0_ |= 0x00000008;
+ stage_ = value;
+ }
+ break;
+ }
+ case 40: {
+ bitField0_ |= 0x00000010;
+ pipelineSize_ = input.readUInt32();
+ break;
+ }
+ case 48: {
+ bitField0_ |= 0x00000020;
+ minBytesRcvd_ = input.readUInt64();
+ break;
+ }
+ case 56: {
+ bitField0_ |= 0x00000040;
+ maxBytesRcvd_ = input.readUInt64();
+ break;
+ }
+ case 64: {
+ bitField0_ |= 0x00000080;
+ latestGenerationStamp_ = input.readUInt64();
+ break;
+ }
+ case 74: {
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.newBuilder();
+ if (hasRequestedChecksum()) {
+ subBuilder.mergeFrom(getRequestedChecksum());
+ }
+ input.readMessage(subBuilder, extensionRegistry);
+ setRequestedChecksum(subBuilder.buildPartial());
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // required .ClientOperationHeaderProto header = 1;
+ private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder> headerBuilder_;
+ public boolean hasHeader() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
+ if (headerBuilder_ == null) {
+ return header_;
+ } else {
+ return headerBuilder_.getMessage();
+ }
+ }
+ public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
+ if (headerBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ header_ = value;
+ onChanged();
+ } else {
+ headerBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ public Builder setHeader(
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder builderForValue) {
+ if (headerBuilder_ == null) {
+ header_ = builderForValue.build();
+ onChanged();
+ } else {
+ headerBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
+ if (headerBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance()) {
+ header_ =
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
+ } else {
+ header_ = value;
+ }
+ onChanged();
+ } else {
+ headerBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ public Builder clearHeader() {
+ if (headerBuilder_ == null) {
+ header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
+ onChanged();
+ } else {
+ headerBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder getHeaderBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getHeaderFieldBuilder().getBuilder();
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
+ if (headerBuilder_ != null) {
+ return headerBuilder_.getMessageOrBuilder();
+ } else {
+ return header_;
+ }
+ }
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>
+ getHeaderFieldBuilder() {
+ if (headerBuilder_ == null) {
+ headerBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>(
+ header_,
+ getParentForChildren(),
+ isClean());
+ header_ = null;
+ }
+ return headerBuilder_;
+ }
+
+ // repeated .DatanodeInfoProto targets = 2;
[... 5682 lines stripped ...]