You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sr...@apache.org on 2011/10/28 03:52:01 UTC
svn commit: r1190127 [2/3] - in
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: CHANGES.txt
src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java
src/main/java/org/apache/hadoop/hdfs/protocol/proto/HdfsProtos.java
src/proto/hdfs.proto
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/HdfsProtos.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/HdfsProtos.java?rev=1190127&r1=1190126&r2=1190127&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/HdfsProtos.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/HdfsProtos.java Fri Oct 28 01:52:00 2011
@@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: hdfs.proto
@@ -37,13 +36,13 @@ public final class HdfsProtos {
boolean hasBlockId();
long getBlockId();
- // required uint64 numBytes = 3;
- boolean hasNumBytes();
- long getNumBytes();
-
- // required uint64 generationStamp = 4;
+ // required uint64 generationStamp = 3;
boolean hasGenerationStamp();
long getGenerationStamp();
+
+ // optional uint64 numBytes = 4;
+ boolean hasNumBytes();
+ long getNumBytes();
}
public static final class ExtendedBlockProto extends
com.google.protobuf.GeneratedMessage
@@ -116,31 +115,31 @@ public final class HdfsProtos {
return blockId_;
}
- // required uint64 numBytes = 3;
- public static final int NUMBYTES_FIELD_NUMBER = 3;
- private long numBytes_;
- public boolean hasNumBytes() {
+ // required uint64 generationStamp = 3;
+ public static final int GENERATIONSTAMP_FIELD_NUMBER = 3;
+ private long generationStamp_;
+ public boolean hasGenerationStamp() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
- public long getNumBytes() {
- return numBytes_;
+ public long getGenerationStamp() {
+ return generationStamp_;
}
- // required uint64 generationStamp = 4;
- public static final int GENERATIONSTAMP_FIELD_NUMBER = 4;
- private long generationStamp_;
- public boolean hasGenerationStamp() {
+ // optional uint64 numBytes = 4;
+ public static final int NUMBYTES_FIELD_NUMBER = 4;
+ private long numBytes_;
+ public boolean hasNumBytes() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
- public long getGenerationStamp() {
- return generationStamp_;
+ public long getNumBytes() {
+ return numBytes_;
}
private void initFields() {
poolId_ = "";
blockId_ = 0L;
- numBytes_ = 0L;
generationStamp_ = 0L;
+ numBytes_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -155,10 +154,6 @@ public final class HdfsProtos {
memoizedIsInitialized = 0;
return false;
}
- if (!hasNumBytes()) {
- memoizedIsInitialized = 0;
- return false;
- }
if (!hasGenerationStamp()) {
memoizedIsInitialized = 0;
return false;
@@ -177,10 +172,10 @@ public final class HdfsProtos {
output.writeUInt64(2, blockId_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
- output.writeUInt64(3, numBytes_);
+ output.writeUInt64(3, generationStamp_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
- output.writeUInt64(4, generationStamp_);
+ output.writeUInt64(4, numBytes_);
}
getUnknownFields().writeTo(output);
}
@@ -201,11 +196,11 @@ public final class HdfsProtos {
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
- .computeUInt64Size(3, numBytes_);
+ .computeUInt64Size(3, generationStamp_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
- .computeUInt64Size(4, generationStamp_);
+ .computeUInt64Size(4, numBytes_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
@@ -240,16 +235,16 @@ public final class HdfsProtos {
result = result && (getBlockId()
== other.getBlockId());
}
- result = result && (hasNumBytes() == other.hasNumBytes());
- if (hasNumBytes()) {
- result = result && (getNumBytes()
- == other.getNumBytes());
- }
result = result && (hasGenerationStamp() == other.hasGenerationStamp());
if (hasGenerationStamp()) {
result = result && (getGenerationStamp()
== other.getGenerationStamp());
}
+ result = result && (hasNumBytes() == other.hasNumBytes());
+ if (hasNumBytes()) {
+ result = result && (getNumBytes()
+ == other.getNumBytes());
+ }
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@@ -267,14 +262,14 @@ public final class HdfsProtos {
hash = (37 * hash) + BLOCKID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getBlockId());
}
- if (hasNumBytes()) {
- hash = (37 * hash) + NUMBYTES_FIELD_NUMBER;
- hash = (53 * hash) + hashLong(getNumBytes());
- }
if (hasGenerationStamp()) {
hash = (37 * hash) + GENERATIONSTAMP_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getGenerationStamp());
}
+ if (hasNumBytes()) {
+ hash = (37 * hash) + NUMBYTES_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getNumBytes());
+ }
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
@@ -395,9 +390,9 @@ public final class HdfsProtos {
bitField0_ = (bitField0_ & ~0x00000001);
blockId_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
- numBytes_ = 0L;
- bitField0_ = (bitField0_ & ~0x00000004);
generationStamp_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ numBytes_ = 0L;
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
@@ -448,11 +443,11 @@ public final class HdfsProtos {
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
- result.numBytes_ = numBytes_;
+ result.generationStamp_ = generationStamp_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
- result.generationStamp_ = generationStamp_;
+ result.numBytes_ = numBytes_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@@ -475,12 +470,12 @@ public final class HdfsProtos {
if (other.hasBlockId()) {
setBlockId(other.getBlockId());
}
- if (other.hasNumBytes()) {
- setNumBytes(other.getNumBytes());
- }
if (other.hasGenerationStamp()) {
setGenerationStamp(other.getGenerationStamp());
}
+ if (other.hasNumBytes()) {
+ setNumBytes(other.getNumBytes());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -494,10 +489,6 @@ public final class HdfsProtos {
return false;
}
- if (!hasNumBytes()) {
-
- return false;
- }
if (!hasGenerationStamp()) {
return false;
@@ -540,12 +531,12 @@ public final class HdfsProtos {
}
case 24: {
bitField0_ |= 0x00000004;
- numBytes_ = input.readUInt64();
+ generationStamp_ = input.readUInt64();
break;
}
case 32: {
bitField0_ |= 0x00000008;
- generationStamp_ = input.readUInt64();
+ numBytes_ = input.readUInt64();
break;
}
}
@@ -611,44 +602,44 @@ public final class HdfsProtos {
return this;
}
- // required uint64 numBytes = 3;
- private long numBytes_ ;
- public boolean hasNumBytes() {
+ // required uint64 generationStamp = 3;
+ private long generationStamp_ ;
+ public boolean hasGenerationStamp() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
- public long getNumBytes() {
- return numBytes_;
+ public long getGenerationStamp() {
+ return generationStamp_;
}
- public Builder setNumBytes(long value) {
+ public Builder setGenerationStamp(long value) {
bitField0_ |= 0x00000004;
- numBytes_ = value;
+ generationStamp_ = value;
onChanged();
return this;
}
- public Builder clearNumBytes() {
+ public Builder clearGenerationStamp() {
bitField0_ = (bitField0_ & ~0x00000004);
- numBytes_ = 0L;
+ generationStamp_ = 0L;
onChanged();
return this;
}
- // required uint64 generationStamp = 4;
- private long generationStamp_ ;
- public boolean hasGenerationStamp() {
+ // optional uint64 numBytes = 4;
+ private long numBytes_ ;
+ public boolean hasNumBytes() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
- public long getGenerationStamp() {
- return generationStamp_;
+ public long getNumBytes() {
+ return numBytes_;
}
- public Builder setGenerationStamp(long value) {
+ public Builder setNumBytes(long value) {
bitField0_ |= 0x00000008;
- generationStamp_ = value;
+ numBytes_ = value;
onChanged();
return this;
}
- public Builder clearGenerationStamp() {
+ public Builder clearNumBytes() {
bitField0_ = (bitField0_ & ~0x00000008);
- generationStamp_ = 0L;
+ numBytes_ = 0L;
onChanged();
return this;
}
@@ -1359,6 +1350,10 @@ public final class HdfsProtos {
// required uint32 infoPort = 3;
boolean hasInfoPort();
int getInfoPort();
+
+ // required uint32 ipcPort = 4;
+ boolean hasIpcPort();
+ int getIpcPort();
}
public static final class DatanodeIDProto extends
com.google.protobuf.GeneratedMessage
@@ -1463,10 +1458,21 @@ public final class HdfsProtos {
return infoPort_;
}
+ // required uint32 ipcPort = 4;
+ public static final int IPCPORT_FIELD_NUMBER = 4;
+ private int ipcPort_;
+ public boolean hasIpcPort() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ public int getIpcPort() {
+ return ipcPort_;
+ }
+
private void initFields() {
name_ = "";
storageID_ = "";
infoPort_ = 0;
+ ipcPort_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -1485,6 +1491,10 @@ public final class HdfsProtos {
memoizedIsInitialized = 0;
return false;
}
+ if (!hasIpcPort()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
memoizedIsInitialized = 1;
return true;
}
@@ -1501,6 +1511,9 @@ public final class HdfsProtos {
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt32(3, infoPort_);
}
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeUInt32(4, ipcPort_);
+ }
getUnknownFields().writeTo(output);
}
@@ -1522,6 +1535,10 @@ public final class HdfsProtos {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(3, infoPort_);
}
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt32Size(4, ipcPort_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -1560,6 +1577,11 @@ public final class HdfsProtos {
result = result && (getInfoPort()
== other.getInfoPort());
}
+ result = result && (hasIpcPort() == other.hasIpcPort());
+ if (hasIpcPort()) {
+ result = result && (getIpcPort()
+ == other.getIpcPort());
+ }
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@@ -1581,6 +1603,10 @@ public final class HdfsProtos {
hash = (37 * hash) + INFOPORT_FIELD_NUMBER;
hash = (53 * hash) + getInfoPort();
}
+ if (hasIpcPort()) {
+ hash = (37 * hash) + IPCPORT_FIELD_NUMBER;
+ hash = (53 * hash) + getIpcPort();
+ }
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
@@ -1703,6 +1729,8 @@ public final class HdfsProtos {
bitField0_ = (bitField0_ & ~0x00000002);
infoPort_ = 0;
bitField0_ = (bitField0_ & ~0x00000004);
+ ipcPort_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
@@ -1753,6 +1781,10 @@ public final class HdfsProtos {
to_bitField0_ |= 0x00000004;
}
result.infoPort_ = infoPort_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.ipcPort_ = ipcPort_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@@ -1778,6 +1810,9 @@ public final class HdfsProtos {
if (other.hasInfoPort()) {
setInfoPort(other.getInfoPort());
}
+ if (other.hasIpcPort()) {
+ setIpcPort(other.getIpcPort());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -1795,6 +1830,10 @@ public final class HdfsProtos {
return false;
}
+ if (!hasIpcPort()) {
+
+ return false;
+ }
return true;
}
@@ -1836,6 +1875,11 @@ public final class HdfsProtos {
infoPort_ = input.readUInt32();
break;
}
+ case 32: {
+ bitField0_ |= 0x00000008;
+ ipcPort_ = input.readUInt32();
+ break;
+ }
}
}
}
@@ -1935,6 +1979,27 @@ public final class HdfsProtos {
return this;
}
+ // required uint32 ipcPort = 4;
+ private int ipcPort_ ;
+ public boolean hasIpcPort() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ public int getIpcPort() {
+ return ipcPort_;
+ }
+ public Builder setIpcPort(int value) {
+ bitField0_ |= 0x00000008;
+ ipcPort_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearIpcPort() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ ipcPort_ = 0;
+ onChanged();
+ return this;
+ }
+
// @@protoc_insertion_point(builder_scope:DatanodeIDProto)
}
@@ -3168,90 +3233,7274 @@ public final class HdfsProtos {
// @@protoc_insertion_point(class_scope:DatanodeInfoProto)
}
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_ExtendedBlockProto_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_ExtendedBlockProto_fieldAccessorTable;
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_BlockTokenIdentifierProto_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_BlockTokenIdentifierProto_fieldAccessorTable;
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_DatanodeIDProto_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_DatanodeIDProto_fieldAccessorTable;
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_DatanodeInfoProto_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_DatanodeInfoProto_fieldAccessorTable;
-
- public static com.google.protobuf.Descriptors.FileDescriptor
- getDescriptor() {
- return descriptor;
+ public interface ContentSummaryProtoOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required uint64 length = 1;
+ boolean hasLength();
+ long getLength();
+
+ // required uint64 fileCount = 2;
+ boolean hasFileCount();
+ long getFileCount();
+
+ // required uint64 directoryCount = 3;
+ boolean hasDirectoryCount();
+ long getDirectoryCount();
+
+ // required uint64 quota = 4;
+ boolean hasQuota();
+ long getQuota();
+
+ // required uint64 spaceConsumed = 5;
+ boolean hasSpaceConsumed();
+ long getSpaceConsumed();
+
+ // required uint64 spaceQuota = 6;
+ boolean hasSpaceQuota();
+ long getSpaceQuota();
}
- private static com.google.protobuf.Descriptors.FileDescriptor
- descriptor;
- static {
- java.lang.String[] descriptorData = {
- "\n\nhdfs.proto\"`\n\022ExtendedBlockProto\022\016\n\006po" +
- "olId\030\001 \002(\t\022\017\n\007blockId\030\002 \002(\004\022\020\n\010numBytes\030" +
- "\003 \002(\004\022\027\n\017generationStamp\030\004 \002(\004\"`\n\031BlockT" +
- "okenIdentifierProto\022\022\n\nidentifier\030\001 \002(\014\022" +
- "\020\n\010password\030\002 \002(\014\022\014\n\004kind\030\003 \002(\t\022\017\n\007servi" +
- "ce\030\004 \002(\t\"D\n\017DatanodeIDProto\022\014\n\004name\030\001 \002(" +
- "\t\022\021\n\tstorageID\030\002 \002(\t\022\020\n\010infoPort\030\003 \002(\r\"\312" +
- "\002\n\021DatanodeInfoProto\022\034\n\002id\030\001 \002(\0132\020.Datan" +
- "odeIDProto\022\020\n\010capacity\030\002 \001(\004\022\017\n\007dfsUsed\030" +
- "\003 \001(\004\022\021\n\tremaining\030\004 \001(\004\022\025\n\rblockPoolUse",
- "d\030\005 \001(\004\022\022\n\nlastUpdate\030\006 \001(\004\022\024\n\014xceiverCo" +
- "unt\030\007 \001(\r\022\020\n\010location\030\010 \001(\t\022\020\n\010hostName\030" +
- "\t \001(\t\0221\n\nadminState\030\n \001(\0162\035.DatanodeInfo" +
- "Proto.AdminState\"I\n\nAdminState\022\n\n\006NORMAL" +
- "\020\000\022\033\n\027DECOMMISSION_INPROGRESS\020\001\022\022\n\016DECOM" +
- "MISSIONED\020\002B6\n%org.apache.hadoop.hdfs.pr" +
- "otocol.protoB\nHdfsProtos\240\001\001"
- };
- com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
- new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
- public com.google.protobuf.ExtensionRegistry assignDescriptors(
- com.google.protobuf.Descriptors.FileDescriptor root) {
- descriptor = root;
- internal_static_ExtendedBlockProto_descriptor =
- getDescriptor().getMessageTypes().get(0);
- internal_static_ExtendedBlockProto_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_ExtendedBlockProto_descriptor,
- new java.lang.String[] { "PoolId", "BlockId", "NumBytes", "GenerationStamp", },
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.class,
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder.class);
- internal_static_BlockTokenIdentifierProto_descriptor =
- getDescriptor().getMessageTypes().get(1);
- internal_static_BlockTokenIdentifierProto_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_BlockTokenIdentifierProto_descriptor,
- new java.lang.String[] { "Identifier", "Password", "Kind", "Service", },
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.class,
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder.class);
- internal_static_DatanodeIDProto_descriptor =
- getDescriptor().getMessageTypes().get(2);
- internal_static_DatanodeIDProto_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_DatanodeIDProto_descriptor,
- new java.lang.String[] { "Name", "StorageID", "InfoPort", },
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.class,
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder.class);
- internal_static_DatanodeInfoProto_descriptor =
- getDescriptor().getMessageTypes().get(3);
- internal_static_DatanodeInfoProto_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_DatanodeInfoProto_descriptor,
- new java.lang.String[] { "Id", "Capacity", "DfsUsed", "Remaining", "BlockPoolUsed", "LastUpdate", "XceiverCount", "Location", "HostName", "AdminState", },
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.class,
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder.class);
+ public static final class ContentSummaryProto extends
+ com.google.protobuf.GeneratedMessage
+ implements ContentSummaryProtoOrBuilder {
+ // Use ContentSummaryProto.newBuilder() to construct.
+ private ContentSummaryProto(Builder builder) {
+ super(builder);
+ }
+ private ContentSummaryProto(boolean noInit) {}
+
+ private static final ContentSummaryProto defaultInstance;
+ public static ContentSummaryProto getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public ContentSummaryProto getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_ContentSummaryProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_ContentSummaryProto_fieldAccessorTable;
+ }
+
+ private int bitField0_;
+ // required uint64 length = 1;
+ public static final int LENGTH_FIELD_NUMBER = 1;
+ private long length_;
+ public boolean hasLength() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public long getLength() {
+ return length_;
+ }
+
+ // required uint64 fileCount = 2;
+ public static final int FILECOUNT_FIELD_NUMBER = 2;
+ private long fileCount_;
+ public boolean hasFileCount() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public long getFileCount() {
+ return fileCount_;
+ }
+
+ // required uint64 directoryCount = 3;
+ public static final int DIRECTORYCOUNT_FIELD_NUMBER = 3;
+ private long directoryCount_;
+ public boolean hasDirectoryCount() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ public long getDirectoryCount() {
+ return directoryCount_;
+ }
+
+ // required uint64 quota = 4;
+ public static final int QUOTA_FIELD_NUMBER = 4;
+ private long quota_;
+ public boolean hasQuota() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ public long getQuota() {
+ return quota_;
+ }
+
+ // required uint64 spaceConsumed = 5;
+ public static final int SPACECONSUMED_FIELD_NUMBER = 5;
+ private long spaceConsumed_;
+ public boolean hasSpaceConsumed() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ public long getSpaceConsumed() {
+ return spaceConsumed_;
+ }
+
+ // required uint64 spaceQuota = 6;
+ public static final int SPACEQUOTA_FIELD_NUMBER = 6;
+ private long spaceQuota_;
+ public boolean hasSpaceQuota() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ public long getSpaceQuota() {
+ return spaceQuota_;
+ }
+
+ private void initFields() {
+ length_ = 0L;
+ fileCount_ = 0L;
+ directoryCount_ = 0L;
+ quota_ = 0L;
+ spaceConsumed_ = 0L;
+ spaceQuota_ = 0L;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasLength()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasFileCount()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasDirectoryCount()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasQuota()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasSpaceConsumed()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasSpaceQuota()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeUInt64(1, length_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeUInt64(2, fileCount_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeUInt64(3, directoryCount_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeUInt64(4, quota_);
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ output.writeUInt64(5, spaceConsumed_);
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ output.writeUInt64(6, spaceQuota_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(1, length_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(2, fileCount_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(3, directoryCount_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(4, quota_);
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(5, spaceConsumed_);
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(6, spaceQuota_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto) obj;
+
+ boolean result = true;
+ result = result && (hasLength() == other.hasLength());
+ if (hasLength()) {
+ result = result && (getLength()
+ == other.getLength());
+ }
+ result = result && (hasFileCount() == other.hasFileCount());
+ if (hasFileCount()) {
+ result = result && (getFileCount()
+ == other.getFileCount());
+ }
+ result = result && (hasDirectoryCount() == other.hasDirectoryCount());
+ if (hasDirectoryCount()) {
+ result = result && (getDirectoryCount()
+ == other.getDirectoryCount());
+ }
+ result = result && (hasQuota() == other.hasQuota());
+ if (hasQuota()) {
+ result = result && (getQuota()
+ == other.getQuota());
+ }
+ result = result && (hasSpaceConsumed() == other.hasSpaceConsumed());
+ if (hasSpaceConsumed()) {
+ result = result && (getSpaceConsumed()
+ == other.getSpaceConsumed());
+ }
+ result = result && (hasSpaceQuota() == other.hasSpaceQuota());
+ if (hasSpaceQuota()) {
+ result = result && (getSpaceQuota()
+ == other.getSpaceQuota());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasLength()) {
+ hash = (37 * hash) + LENGTH_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getLength());
+ }
+ if (hasFileCount()) {
+ hash = (37 * hash) + FILECOUNT_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getFileCount());
+ }
+ if (hasDirectoryCount()) {
+ hash = (37 * hash) + DIRECTORYCOUNT_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getDirectoryCount());
+ }
+ if (hasQuota()) {
+ hash = (37 * hash) + QUOTA_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getQuota());
+ }
+ if (hasSpaceConsumed()) {
+ hash = (37 * hash) + SPACECONSUMED_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getSpaceConsumed());
+ }
+ if (hasSpaceQuota()) {
+ hash = (37 * hash) + SPACEQUOTA_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getSpaceQuota());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProtoOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_ContentSummaryProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_ContentSummaryProto_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ length_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ fileCount_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ directoryCount_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ quota_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000008);
+ spaceConsumed_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000010);
+ spaceQuota_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000020);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDescriptor();
+ }
+
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto getDefaultInstanceForType() {
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto build() {
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto buildPartial() {
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.length_ = length_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.fileCount_ = fileCount_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.directoryCount_ = directoryCount_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.quota_ = quota_;
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+ to_bitField0_ |= 0x00000010;
+ }
+ result.spaceConsumed_ = spaceConsumed_;
+ if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
+ to_bitField0_ |= 0x00000020;
+ }
+ result.spaceQuota_ = spaceQuota_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto) {
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto other) {
+ if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDefaultInstance()) return this;
+ if (other.hasLength()) {
+ setLength(other.getLength());
+ }
+ if (other.hasFileCount()) {
+ setFileCount(other.getFileCount());
+ }
+ if (other.hasDirectoryCount()) {
+ setDirectoryCount(other.getDirectoryCount());
+ }
+ if (other.hasQuota()) {
+ setQuota(other.getQuota());
+ }
+ if (other.hasSpaceConsumed()) {
+ setSpaceConsumed(other.getSpaceConsumed());
+ }
+ if (other.hasSpaceQuota()) {
+ setSpaceQuota(other.getSpaceQuota());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasLength()) {
+
+ return false;
+ }
+ if (!hasFileCount()) {
+
+ return false;
+ }
+ if (!hasDirectoryCount()) {
+
+ return false;
+ }
+ if (!hasQuota()) {
+
+ return false;
+ }
+ if (!hasSpaceConsumed()) {
+
+ return false;
+ }
+ if (!hasSpaceQuota()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ length_ = input.readUInt64();
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ fileCount_ = input.readUInt64();
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000004;
+ directoryCount_ = input.readUInt64();
+ break;
+ }
+ case 32: {
+ bitField0_ |= 0x00000008;
+ quota_ = input.readUInt64();
+ break;
+ }
+ case 40: {
+ bitField0_ |= 0x00000010;
+ spaceConsumed_ = input.readUInt64();
+ break;
+ }
+ case 48: {
+ bitField0_ |= 0x00000020;
+ spaceQuota_ = input.readUInt64();
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // required uint64 length = 1;
+ private long length_ ;
+ public boolean hasLength() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public long getLength() {
+ return length_;
+ }
+ public Builder setLength(long value) {
+ bitField0_ |= 0x00000001;
+ length_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearLength() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ length_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // required uint64 fileCount = 2;
+ private long fileCount_ ;
+ public boolean hasFileCount() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public long getFileCount() {
+ return fileCount_;
+ }
+ public Builder setFileCount(long value) {
+ bitField0_ |= 0x00000002;
+ fileCount_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearFileCount() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ fileCount_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // required uint64 directoryCount = 3;
+ private long directoryCount_ ;
+ public boolean hasDirectoryCount() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ public long getDirectoryCount() {
+ return directoryCount_;
+ }
+ public Builder setDirectoryCount(long value) {
+ bitField0_ |= 0x00000004;
+ directoryCount_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearDirectoryCount() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ directoryCount_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // required uint64 quota = 4;
+ private long quota_ ;
+ public boolean hasQuota() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ public long getQuota() {
+ return quota_;
+ }
+ public Builder setQuota(long value) {
+ bitField0_ |= 0x00000008;
+ quota_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearQuota() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ quota_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // required uint64 spaceConsumed = 5;
+ private long spaceConsumed_ ;
+ public boolean hasSpaceConsumed() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ public long getSpaceConsumed() {
+ return spaceConsumed_;
+ }
+ public Builder setSpaceConsumed(long value) {
+ bitField0_ |= 0x00000010;
+ spaceConsumed_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearSpaceConsumed() {
+ bitField0_ = (bitField0_ & ~0x00000010);
+ spaceConsumed_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // required uint64 spaceQuota = 6;
+ private long spaceQuota_ ;
+ public boolean hasSpaceQuota() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ public long getSpaceQuota() {
+ return spaceQuota_;
+ }
+ public Builder setSpaceQuota(long value) {
+ bitField0_ |= 0x00000020;
+ spaceQuota_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearSpaceQuota() {
+ bitField0_ = (bitField0_ & ~0x00000020);
+ spaceQuota_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:ContentSummaryProto)
+ }
+
+ static {
+ defaultInstance = new ContentSummaryProto(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:ContentSummaryProto)
+ }
+
+ public interface CorruptFileBlocksProtoOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // repeated string files = 1;
+ java.util.List<String> getFilesList();
+ int getFilesCount();
+ String getFiles(int index);
+
+ // required string cookie = 2;
+ boolean hasCookie();
+ String getCookie();
+ }
+ public static final class CorruptFileBlocksProto extends
+ com.google.protobuf.GeneratedMessage
+ implements CorruptFileBlocksProtoOrBuilder {
+ // Use CorruptFileBlocksProto.newBuilder() to construct.
+ private CorruptFileBlocksProto(Builder builder) {
+ super(builder);
+ }
+ private CorruptFileBlocksProto(boolean noInit) {}
+
+ private static final CorruptFileBlocksProto defaultInstance;
+ public static CorruptFileBlocksProto getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public CorruptFileBlocksProto getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_CorruptFileBlocksProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_CorruptFileBlocksProto_fieldAccessorTable;
+ }
+
+ private int bitField0_;
+ // repeated string files = 1;
+ public static final int FILES_FIELD_NUMBER = 1;
+ private com.google.protobuf.LazyStringList files_;
+ public java.util.List<String>
+ getFilesList() {
+ return files_;
+ }
+ public int getFilesCount() {
+ return files_.size();
+ }
+ public String getFiles(int index) {
+ return files_.get(index);
+ }
+
+ // required string cookie = 2;
+ public static final int COOKIE_FIELD_NUMBER = 2;
+ private java.lang.Object cookie_;
+ public boolean hasCookie() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public String getCookie() {
+ java.lang.Object ref = cookie_;
+ if (ref instanceof String) {
+ return (String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ String s = bs.toStringUtf8();
+ if (com.google.protobuf.Internal.isValidUtf8(bs)) {
+ cookie_ = s;
+ }
+ return s;
+ }
+ }
+ private com.google.protobuf.ByteString getCookieBytes() {
+ java.lang.Object ref = cookie_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((String) ref);
+ cookie_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ private void initFields() {
+ files_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ cookie_ = "";
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasCookie()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ for (int i = 0; i < files_.size(); i++) {
+ output.writeBytes(1, files_.getByteString(i));
+ }
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(2, getCookieBytes());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ {
+ int dataSize = 0;
+ for (int i = 0; i < files_.size(); i++) {
+ dataSize += com.google.protobuf.CodedOutputStream
+ .computeBytesSizeNoTag(files_.getByteString(i));
+ }
+ size += dataSize;
+ size += 1 * getFilesList().size();
+ }
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getCookieBytes());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto) obj;
+
+ boolean result = true;
+ result = result && getFilesList()
+ .equals(other.getFilesList());
+ result = result && (hasCookie() == other.hasCookie());
+ if (hasCookie()) {
+ result = result && getCookie()
+ .equals(other.getCookie());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (getFilesCount() > 0) {
+ hash = (37 * hash) + FILES_FIELD_NUMBER;
+ hash = (53 * hash) + getFilesList().hashCode();
+ }
+ if (hasCookie()) {
+ hash = (37 * hash) + COOKIE_FIELD_NUMBER;
+ hash = (53 * hash) + getCookie().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProtoOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_CorruptFileBlocksProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_CorruptFileBlocksProto_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ files_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ cookie_ = "";
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDescriptor();
+ }
+
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto getDefaultInstanceForType() {
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto build() {
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto buildPartial() {
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ files_ = new com.google.protobuf.UnmodifiableLazyStringList(
+ files_);
+ bitField0_ = (bitField0_ & ~0x00000001);
+ }
+ result.files_ = files_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.cookie_ = cookie_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto) {
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto other) {
+ if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDefaultInstance()) return this;
+ if (!other.files_.isEmpty()) {
+ if (files_.isEmpty()) {
+ files_ = other.files_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ ensureFilesIsMutable();
+ files_.addAll(other.files_);
+ }
+ onChanged();
+ }
+ if (other.hasCookie()) {
+ setCookie(other.getCookie());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasCookie()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ ensureFilesIsMutable();
+ files_.add(input.readBytes());
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ cookie_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // repeated string files = 1;
+ private com.google.protobuf.LazyStringList files_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ private void ensureFilesIsMutable() {
+ if (!((bitField0_ & 0x00000001) == 0x00000001)) {
+ files_ = new com.google.protobuf.LazyStringArrayList(files_);
+ bitField0_ |= 0x00000001;
+ }
+ }
+ public java.util.List<String>
+ getFilesList() {
+ return java.util.Collections.unmodifiableList(files_);
+ }
+ public int getFilesCount() {
+ return files_.size();
+ }
+ public String getFiles(int index) {
+ return files_.get(index);
+ }
+ public Builder setFiles(
+ int index, String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureFilesIsMutable();
+ files_.set(index, value);
+ onChanged();
+ return this;
+ }
+ public Builder addFiles(String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureFilesIsMutable();
+ files_.add(value);
+ onChanged();
+ return this;
+ }
+ public Builder addAllFiles(
+ java.lang.Iterable<String> values) {
+ ensureFilesIsMutable();
+ super.addAll(values, files_);
+ onChanged();
+ return this;
+ }
+ public Builder clearFiles() {
+ files_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ onChanged();
+ return this;
+ }
+ void addFiles(com.google.protobuf.ByteString value) {
+ ensureFilesIsMutable();
+ files_.add(value);
+ onChanged();
+ }
+
+ // required string cookie = 2;
+ private java.lang.Object cookie_ = "";
+ public boolean hasCookie() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public String getCookie() {
+ java.lang.Object ref = cookie_;
+ if (!(ref instanceof String)) {
+ String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
+ cookie_ = s;
+ return s;
+ } else {
+ return (String) ref;
+ }
+ }
+ public Builder setCookie(String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ cookie_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearCookie() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ cookie_ = getDefaultInstance().getCookie();
+ onChanged();
+ return this;
+ }
+ void setCookie(com.google.protobuf.ByteString value) {
+ bitField0_ |= 0x00000002;
+ cookie_ = value;
+ onChanged();
+ }
+
+ // @@protoc_insertion_point(builder_scope:CorruptFileBlocksProto)
+ }
+
+ static {
+ defaultInstance = new CorruptFileBlocksProto(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:CorruptFileBlocksProto)
+ }
+
+ public interface FsPermissionProtoOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required uint32 perm = 1;
+ boolean hasPerm();
+ int getPerm();
+ }
+ public static final class FsPermissionProto extends
+ com.google.protobuf.GeneratedMessage
+ implements FsPermissionProtoOrBuilder {
+ // Use FsPermissionProto.newBuilder() to construct.
+ private FsPermissionProto(Builder builder) {
+ super(builder);
+ }
+ private FsPermissionProto(boolean noInit) {}
+
+ private static final FsPermissionProto defaultInstance;
+ public static FsPermissionProto getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public FsPermissionProto getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_FsPermissionProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_FsPermissionProto_fieldAccessorTable;
+ }
+
+ private int bitField0_;
+ // required uint32 perm = 1;
+ public static final int PERM_FIELD_NUMBER = 1;
+ private int perm_;
+ public boolean hasPerm() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public int getPerm() {
+ return perm_;
+ }
+
+ private void initFields() {
+ perm_ = 0;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasPerm()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeUInt32(1, perm_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt32Size(1, perm_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto) obj;
+
+ boolean result = true;
+ result = result && (hasPerm() == other.hasPerm());
+ if (hasPerm()) {
+ result = result && (getPerm()
+ == other.getPerm());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasPerm()) {
+ hash = (37 * hash) + PERM_FIELD_NUMBER;
+ hash = (53 * hash) + getPerm();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_FsPermissionProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_FsPermissionProto_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ perm_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDescriptor();
+ }
+
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getDefaultInstanceForType() {
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto build() {
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto buildPartial() {
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.perm_ = perm_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto) {
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto other) {
+ if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance()) return this;
+ if (other.hasPerm()) {
+ setPerm(other.getPerm());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasPerm()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ perm_ = input.readUInt32();
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // required uint32 perm = 1;
+ private int perm_ ;
+ public boolean hasPerm() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public int getPerm() {
+ return perm_;
+ }
+ public Builder setPerm(int value) {
+ bitField0_ |= 0x00000001;
+ perm_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearPerm() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ perm_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:FsPermissionProto)
+ }
+
+ static {
+ defaultInstance = new FsPermissionProto(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:FsPermissionProto)
+ }
+
+ public interface LocatedBlockProtoOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .ExtendedBlockProto b = 1;
+ boolean hasB();
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB();
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder();
+
+ // required uint64 offset = 2;
+ boolean hasOffset();
+ long getOffset();
+
+ // repeated .DatanodeInfoProto locs = 3;
+ java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto>
+ getLocsList();
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getLocs(int index);
+ int getLocsCount();
+ java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
+ getLocsOrBuilderList();
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getLocsOrBuilder(
+ int index);
+
+ // required bool corrupt = 4;
+ boolean hasCorrupt();
+ boolean getCorrupt();
+
+ // required .BlockTokenIdentifierProto blockToken = 5;
+ boolean hasBlockToken();
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getBlockToken();
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getBlockTokenOrBuilder();
+ }
+ public static final class LocatedBlockProto extends
+ com.google.protobuf.GeneratedMessage
+ implements LocatedBlockProtoOrBuilder {
+ // Use LocatedBlockProto.newBuilder() to construct.
+ private LocatedBlockProto(Builder builder) {
+ super(builder);
+ }
+ private LocatedBlockProto(boolean noInit) {}
+
+ private static final LocatedBlockProto defaultInstance;
+ public static LocatedBlockProto getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public LocatedBlockProto getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_LocatedBlockProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_LocatedBlockProto_fieldAccessorTable;
+ }
+
+ private int bitField0_;
+ // required .ExtendedBlockProto b = 1;
+ public static final int B_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto b_;
+ public boolean hasB() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB() {
+ return b_;
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder() {
+ return b_;
+ }
+
+ // required uint64 offset = 2;
+ public static final int OFFSET_FIELD_NUMBER = 2;
+ private long offset_;
+ public boolean hasOffset() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public long getOffset() {
+ return offset_;
+ }
+
+ // repeated .DatanodeInfoProto locs = 3;
+ public static final int LOCS_FIELD_NUMBER = 3;
+ private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> locs_;
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> getLocsList() {
+ return locs_;
+ }
+ public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
+ getLocsOrBuilderList() {
+ return locs_;
+ }
+ public int getLocsCount() {
+ return locs_.size();
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getLocs(int index) {
+ return locs_.get(index);
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getLocsOrBuilder(
+ int index) {
+ return locs_.get(index);
+ }
+
+ // required bool corrupt = 4;
+ public static final int CORRUPT_FIELD_NUMBER = 4;
+ private boolean corrupt_;
+ public boolean hasCorrupt() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ public boolean getCorrupt() {
+ return corrupt_;
+ }
+
+ // required .BlockTokenIdentifierProto blockToken = 5;
+ public static final int BLOCKTOKEN_FIELD_NUMBER = 5;
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto blockToken_;
+ public boolean hasBlockToken() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getBlockToken() {
+ return blockToken_;
+ }
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getBlockTokenOrBuilder() {
+ return blockToken_;
+ }
+
+ private void initFields() {
+ b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
+ offset_ = 0L;
+ locs_ = java.util.Collections.emptyList();
+ corrupt_ = false;
+ blockToken_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasB()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasOffset()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasCorrupt()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasBlockToken()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getB().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ for (int i = 0; i < getLocsCount(); i++) {
+ if (!getLocs(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ if (!getBlockToken().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, b_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeUInt64(2, offset_);
+ }
+ for (int i = 0; i < locs_.size(); i++) {
+ output.writeMessage(3, locs_.get(i));
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeBool(4, corrupt_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeMessage(5, blockToken_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, b_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(2, offset_);
+ }
+ for (int i = 0; i < locs_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, locs_.get(i));
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(4, corrupt_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(5, blockToken_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto) obj;
+
+ boolean result = true;
+ result = result && (hasB() == other.hasB());
+ if (hasB()) {
+ result = result && getB()
+ .equals(other.getB());
+ }
[... 5363 lines stripped ...]