You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2011/12/13 01:21:48 UTC
svn commit: r1213512 - in
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/org/apache/hadoop/hdfs/protocolPB/ src/main/proto/
src/test/java/org/apache/hadoop/hdfs/
Author: suresh
Date: Tue Dec 13 00:21:48 2011
New Revision: 1213512
URL: http://svn.apache.org/viewvc?rev=1213512&view=rev
Log:
HDFS-2663. Handle protobuf optional parameters correctly. Contributed by Suresh Srinivas.
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDfsOverAvroRpc.java
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1213512&r1=1213511&r2=1213512&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Dec 13 00:21:48 2011
@@ -32,6 +32,8 @@ Trunk (unreleased changes)
HDFS-2647. Used protobuf based RPC for InterDatanodeProtocol,
ClientDatanodeProtocol, JournalProtocol, NamenodeProtocol. (suresh)
+ HDFS-2663. Handle protobuf optional parameters correctly. (suresh)
+
IMPROVEMENTS
HADOOP-7524 Change RPC to allow multiple protocols including multuple
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java?rev=1213512&r1=1213511&r2=1213512&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java Tue Dec 13 00:21:48 2011
@@ -24,6 +24,9 @@ import org.apache.hadoop.classification.
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
@@ -52,6 +55,7 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.Builder;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
@@ -124,6 +128,7 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
+import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
@@ -208,11 +213,16 @@ public class ClientNamenodeProtocolServe
RpcController controller, GetBlockLocationsRequestProto req)
throws ServiceException {
try {
- return GetBlockLocationsResponseProto
- .newBuilder()
- .setLocations(
- PBHelper.convert(server.getBlockLocations(req.getSrc(),
- req.getOffset(), req.getLength()))).build();
+ LocatedBlocks b = server.getBlockLocations(req.getSrc(), req.getOffset(),
+ req.getLength());
+ Builder builder = GetBlockLocationsResponseProto
+ .newBuilder();
+ if (b != null) {
+ builder.setLocations(
+ PBHelper.convert(server.getBlockLocations(req.getSrc(),
+ req.getOffset(), req.getLength()))).build();
+ }
+ return builder.build();
} catch (IOException e) {
throw new ServiceException(e);
}
@@ -325,7 +335,7 @@ public class ClientNamenodeProtocolServe
return AddBlockResponseProto.newBuilder().setBlock(
PBHelper.convert(
server.addBlock(req.getSrc(), req.getClientName(),
- PBHelper.convert(req.getPrevious()),
+ req.hasPrevious() ? PBHelper.convert(req.getPrevious()) : null,
PBHelper.convert(
(DatanodeInfoProto[]) req.getExcludeNodesList().toArray()))))
.build();
@@ -594,10 +604,14 @@ public class ClientNamenodeProtocolServe
RpcController controller, DistributedUpgradeProgressRequestProto req)
throws ServiceException {
try {
- UpgradeStatusReportProto result = PBHelper.convert(server
- .distributedUpgradeProgress(PBHelper.convert(req.getAction())));
- return DistributedUpgradeProgressResponseProto.newBuilder()
- .setReport(result).build();
+ UpgradeStatusReport result = server.distributedUpgradeProgress(PBHelper
+ .convert(req.getAction()));
+ DistributedUpgradeProgressResponseProto.Builder builder =
+ DistributedUpgradeProgressResponseProto.newBuilder();
+ if (result != null) {
+ builder.setReport(PBHelper.convert(result));
+ }
+ return builder.build();
} catch (IOException e) {
throw new ServiceException(e);
}
@@ -636,9 +650,13 @@ public class ClientNamenodeProtocolServe
public GetFileInfoResponseProto getFileInfo(RpcController controller,
GetFileInfoRequestProto req) throws ServiceException {
try {
- HdfsFileStatusProto result =
- PBHelper.convert(server.getFileInfo(req.getSrc()));
- return GetFileInfoResponseProto.newBuilder().setFs(result).build();
+ HdfsFileStatus res = server.getFileInfo(req.getSrc());
+ GetFileInfoResponseProto.Builder builder =
+ GetFileInfoResponseProto.newBuilder();
+ if (res != null) {
+ builder.setFs(PBHelper.convert(res));
+ }
+ return builder.build();
} catch (IOException e) {
throw new ServiceException(e);
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java?rev=1213512&r1=1213511&r2=1213512&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java Tue Dec 13 00:21:48 2011
@@ -83,14 +83,17 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto;
@@ -205,7 +208,10 @@ public class ClientNamenodeProtocolTrans
.setLength(length)
.build();
try {
- return PBHelper.convert(rpcProxy.getBlockLocations(null, req).getLocations());
+ GetBlockLocationsResponseProto resp = rpcProxy.getBlockLocations(null,
+ req);
+ return resp.hasLocations() ?
+ PBHelper.convert(resp.getLocations()) : null;
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
@@ -329,12 +335,15 @@ public class ClientNamenodeProtocolTrans
throws AccessControlException, FileNotFoundException,
NotReplicatedYetException, SafeModeException, UnresolvedLinkException,
IOException {
- AddBlockRequestProto req = AddBlockRequestProto.newBuilder().setSrc(src)
- .setClientName(clientName).setPrevious(PBHelper.convert(previous))
- .addAllExcludeNodes(Arrays.asList(PBHelper.convert(excludeNodes)))
- .build();
+ AddBlockRequestProto.Builder builder = AddBlockRequestProto.newBuilder();
+ builder.setSrc(src)
+ .setClientName(clientName)
+ .addAllExcludeNodes(Arrays.asList(PBHelper.convert(excludeNodes)));
+ if (previous != null) {
+ builder.setPrevious(PBHelper.convert(previous));
+ }
try {
- return PBHelper.convert(rpcProxy.addBlock(null, req).getBlock());
+ return PBHelper.convert(rpcProxy.addBlock(null, builder.build()).getBlock());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
@@ -615,8 +624,9 @@ public class ClientNamenodeProtocolTrans
DistributedUpgradeProgressRequestProto.newBuilder().
setAction(PBHelper.convert(action)).build();
try {
- return PBHelper.convert(
- rpcProxy.distributedUpgradeProgress(null, req).getReport());
+ DistributedUpgradeProgressResponseProto res = rpcProxy
+ .distributedUpgradeProgress(null, req);
+ return res.hasReport() ? PBHelper.convert(res.getReport()) : null;
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
@@ -653,7 +663,8 @@ public class ClientNamenodeProtocolTrans
GetFileInfoRequestProto req = GetFileInfoRequestProto.newBuilder()
.setSrc(src).build();
try {
- return PBHelper.convert(rpcProxy.getFileInfo(null, req).getFs());
+ GetFileInfoResponseProto res = rpcProxy.getFileInfo(null, req);
+ return res.hasFs() ? PBHelper.convert(res.getFs()) : null;
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java?rev=1213512&r1=1213511&r2=1213512&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java Tue Dec 13 00:21:48 2011
@@ -204,7 +204,7 @@ public class DatanodeProtocolClientSideT
} catch (ServiceException se) {
throw ProtobufHelper.getRemoteException(se);
}
- return PBHelper.convert(resp.getCmd());
+ return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null;
}
@Override
@@ -262,7 +262,7 @@ public class DatanodeProtocolClientSideT
} catch (ServiceException se) {
throw ProtobufHelper.getRemoteException(se);
}
- return PBHelper.convert(resp.getCmd());
+ return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null;
}
@Override
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java?rev=1213512&r1=1213511&r2=1213512&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java Tue Dec 13 00:21:48 2011
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto;
@@ -108,7 +109,9 @@ public class DatanodeProtocolServerSideT
.newBuilder();
if (cmds != null) {
for (int i = 0; i < cmds.length; i++) {
- builder.addCmds(i, PBHelper.convert(cmds[i]));
+ if (cmds[i] != null) {
+ builder.addCmds(PBHelper.convert(cmds[i]));
+ }
}
}
return builder.build();
@@ -129,8 +132,12 @@ public class DatanodeProtocolServerSideT
} catch (IOException e) {
throw new ServiceException(e);
}
- return BlockReportResponseProto.newBuilder().setCmd(PBHelper.convert(cmd))
- .build();
+ BlockReportResponseProto.Builder builder =
+ BlockReportResponseProto.newBuilder();
+ if (cmd != null) {
+ builder.setCmd(PBHelper.convert(cmd));
+ }
+ return builder.build();
}
@Override
@@ -180,14 +187,20 @@ public class DatanodeProtocolServerSideT
@Override
public ProcessUpgradeResponseProto processUpgrade(RpcController controller,
ProcessUpgradeRequestProto request) throws ServiceException {
- UpgradeCommand cmd;
+ UpgradeCommand ret;
try {
- cmd = impl.processUpgradeCommand(PBHelper.convert(request.getCmd()));
+ UpgradeCommand cmd = request.hasCmd() ? PBHelper
+ .convert(request.getCmd()) : null;
+ ret = impl.processUpgradeCommand(cmd);
} catch (IOException e) {
throw new ServiceException(e);
}
- return ProcessUpgradeResponseProto.newBuilder()
- .setCmd(PBHelper.convert(cmd)).build();
+ ProcessUpgradeResponseProto.Builder builder =
+ ProcessUpgradeResponseProto.newBuilder();
+ if (ret != null) {
+ builder.setCmd(PBHelper.convert(ret));
+ }
+ return builder.build();
}
@Override
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java?rev=1213512&r1=1213511&r2=1213512&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java Tue Dec 13 00:21:48 2011
@@ -127,6 +127,10 @@ import com.google.protobuf.ByteString;
/**
* Utilities for converting protobuf classes to and from implementation classes.
+ *
+ * Note that when converting from an internal type to protobuf type, the
+ * converter never return null for protobuf type. The check for internal type
+ * being null must be done before calling the convert() method.
*/
public class PBHelper {
private static final RegisterCommandProto REG_CMD_PROTO =
@@ -367,6 +371,7 @@ public class PBHelper {
}
public static NamenodeCommand convert(NamenodeCommandProto cmd) {
+ if (cmd == null) return null;
switch (cmd.getType()) {
case CheckPointCommand:
CheckpointCommandProto chkPt = cmd.getCheckpointCmd();
@@ -423,7 +428,8 @@ public class PBHelper {
if (di == null) return null;
return new DatanodeInfo(
PBHelper.convert(di.getId()),
- di.getLocation(), di.getHostName(),
+ di.hasLocation() ? di.getLocation() : null ,
+ di.hasHostName() ? di.getHostName() : null,
di.getCapacity(), di.getDfsUsed(), di.getRemaining(),
di.getBlockPoolUsed() , di.getLastUpdate() , di.getXceiverCount() ,
PBHelper.convert(di.getAdminState()));
@@ -431,10 +437,16 @@ public class PBHelper {
static public DatanodeInfoProto convertDatanodeInfo(DatanodeInfo di) {
if (di == null) return null;
- return DatanodeInfoProto.newBuilder().
+ DatanodeInfoProto.Builder builder = DatanodeInfoProto.newBuilder();
+ if (di.getHostName() != null) {
+ builder.setHostName(di.getHostName());
+ }
+ if (di.getNetworkLocation() != null) {
+ builder.setLocation(di.getNetworkLocation());
+ }
+
+ return builder.
setId(PBHelper.convert((DatanodeID) di)).
- setLocation(di.getNetworkLocation()).
- setHostName(di.getHostName()).
setCapacity(di.getCapacity()).
setDfsUsed(di.getDfsUsed()).
setRemaining(di.getRemaining()).
@@ -774,9 +786,14 @@ public class PBHelper {
public static ReceivedDeletedBlockInfoProto convert(
ReceivedDeletedBlockInfo receivedDeletedBlockInfo) {
- return ReceivedDeletedBlockInfoProto.newBuilder()
- .setBlock(PBHelper.convert(receivedDeletedBlockInfo.getBlock()))
- .setDeleteHint(receivedDeletedBlockInfo.getDelHints()).build();
+ ReceivedDeletedBlockInfoProto.Builder builder =
+ ReceivedDeletedBlockInfoProto.newBuilder();
+
+ if (receivedDeletedBlockInfo.getDelHints() != null) {
+ builder.setDeleteHint(receivedDeletedBlockInfo.getDelHints());
+ }
+ return builder.setBlock(PBHelper.convert(receivedDeletedBlockInfo.getBlock()))
+ .build();
}
public static UpgradeCommandProto convert(UpgradeCommand comm) {
@@ -800,7 +817,7 @@ public class PBHelper {
public static ReceivedDeletedBlockInfo convert(
ReceivedDeletedBlockInfoProto proto) {
return new ReceivedDeletedBlockInfo(PBHelper.convert(proto.getBlock()),
- proto.getDeleteHint());
+ proto.hasDeleteHint() ? proto.getDeleteHint() : null);
}
public static NamespaceInfoProto convert(NamespaceInfo info) {
@@ -860,13 +877,10 @@ public class PBHelper {
// LocatedBlocks
public static LocatedBlocks convert(LocatedBlocksProto lb) {
- if (lb == null) {
- return null;
- }
return new LocatedBlocks(
lb.getFileLength(), lb.getUnderConstruction(),
PBHelper.convertLocatedBlock(lb.getBlocksList()),
- PBHelper.convert(lb.getLastBlock()),
+ lb.hasLastBlock() ? PBHelper.convert(lb.getLastBlock()) : null,
lb.getIsLastBlockComplete());
}
@@ -874,11 +888,15 @@ public class PBHelper {
if (lb == null) {
return null;
}
- return LocatedBlocksProto.newBuilder().
- setFileLength(lb.getFileLength()).
- setUnderConstruction(lb.isUnderConstruction()).
- addAllBlocks(PBHelper.convertLocatedBlock2(lb.getLocatedBlocks())).
- setLastBlock(PBHelper.convert(lb.getLastLocatedBlock())).setIsLastBlockComplete(lb.isLastBlockComplete()).build();
+ LocatedBlocksProto.Builder builder =
+ LocatedBlocksProto.newBuilder();
+ if (lb.getLastLocatedBlock() != null) {
+ builder.setLastBlock(PBHelper.convert(lb.getLastLocatedBlock()));
+ }
+ return builder.setFileLength(lb.getFileLength())
+ .setUnderConstruction(lb.isUnderConstruction())
+ .addAllBlocks(PBHelper.convertLocatedBlock2(lb.getLocatedBlocks()))
+ .setIsLastBlockComplete(lb.isLastBlockComplete()).build();
}
public static FsServerDefaults convert(FsServerDefaultsProto fs) {
@@ -979,11 +997,16 @@ public class PBHelper {
setPermission(PBHelper.convert(fs.getPermission())).
setOwner(fs.getOwner()).
setGroup(fs.getGroup()).
- setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes())).
setPath(ByteString.copyFrom(fs.getLocalNameInBytes()));
- LocatedBlocks locations = null;
+
+ if (fs.getSymlink() != null) {
+ builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes()));
+ }
if (fs instanceof HdfsLocatedFileStatus) {
- builder.setLocations(PBHelper.convert(locations));
+ LocatedBlocks locations = ((HdfsLocatedFileStatus)fs).getBlockLocations();
+ if (locations != null) {
+ builder.setLocations(PBHelper.convert(locations));
+ }
}
return builder.build();
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto?rev=1213512&r1=1213511&r2=1213512&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto Tue Dec 13 00:21:48 2011
@@ -39,7 +39,7 @@ message GetBlockLocationsRequestProto {
}
message GetBlockLocationsResponseProto {
- required LocatedBlocksProto locations = 1;
+ optional LocatedBlocksProto locations = 1;
}
message GetServerDefaultsRequestProto { // No parameters
@@ -115,7 +115,7 @@ message AbandonBlockResponseProto { // v
message AddBlockRequestProto {
required string src = 1;
required string clientName = 2;
- required ExtendedBlockProto previous = 3;
+ optional ExtendedBlockProto previous = 3;
repeated DatanodeInfoProto excludeNodes = 4;
}
@@ -306,7 +306,7 @@ message DistributedUpgradeProgressReques
required UpgradeActionProto action = 1;
}
message DistributedUpgradeProgressResponseProto {
- required UpgradeStatusReportProto report = 1;
+ optional UpgradeStatusReportProto report = 1;
}
message ListCorruptFileBlocksRequestProto {
@@ -330,7 +330,7 @@ message GetFileInfoRequestProto {
}
message GetFileInfoResponseProto {
- required HdfsFileStatusProto fs = 1;
+ optional HdfsFileStatusProto fs = 1;
}
message GetFileLinkInfoRequestProto {
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto?rev=1213512&r1=1213511&r2=1213512&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto Tue Dec 13 00:21:48 2011
@@ -170,7 +170,7 @@ message HeartbeatRequestProto {
* cmds - Commands from namenode to datanode.
*/
message HeartbeatResponseProto {
- repeated DatanodeCommandProto cmds = 1;
+ repeated DatanodeCommandProto cmds = 1; // Returned commands can be null
}
/**
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1213512&r1=1213511&r2=1213512&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto Tue Dec 13 00:21:48 2011
@@ -30,7 +30,8 @@ message ExtendedBlockProto {
required string poolId = 1; // Block pool id - gloablly unique across clusters
required uint64 blockId = 2; // the local id within a pool
required uint64 generationStamp = 3;
- optional uint64 numBytes = 4; // block len does not belong in ebid - here for historical reasons
+ optional uint64 numBytes = 4 [default = 0]; // len does not belong in ebid
+ // here for historical reasons
}
/**
@@ -65,12 +66,12 @@ message DatanodeInfosProto {
*/
message DatanodeInfoProto {
required DatanodeIDProto id = 1;
- optional uint64 capacity = 2;
- optional uint64 dfsUsed = 3;
- optional uint64 remaining = 4;
- optional uint64 blockPoolUsed = 5;
- optional uint64 lastUpdate = 6;
- optional uint32 xceiverCount = 7;
+ optional uint64 capacity = 2 [default = 0];
+ optional uint64 dfsUsed = 3 [default = 0];
+ optional uint64 remaining = 4 [default = 0];
+ optional uint64 blockPoolUsed = 5 [default = 0];
+ optional uint64 lastUpdate = 6 [default = 0];
+ optional uint32 xceiverCount = 7 [default = 0];
optional string location = 8;
optional string hostName = 9;
enum AdminState {
@@ -79,7 +80,7 @@ message DatanodeInfoProto {
DECOMMISSIONED = 2;
}
- optional AdminState adminState = 10;
+ optional AdminState adminState = 10 [default = NORMAL];
}
/**
@@ -162,8 +163,8 @@ message HdfsFileStatusProto {
optional bytes symlink = 9; // if symlink, target encoded java UTF8
// Optional fields for file
- optional uint32 block_replication = 10; // Actually a short - only 16bits used
- optional uint64 blocksize = 11;
+ optional uint32 block_replication = 10 [default = 0]; // only 16bits used
+ optional uint64 blocksize = 11 [default = 0];
optional LocatedBlocksProto locations = 12; // suppled only if asked by client
}
@@ -218,7 +219,7 @@ message NamenodeRegistrationProto {
CHECKPOINT = 3;
}
required StorageInfoProto storageInfo = 3; // Node information
- optional NamenodeRoleProto role = 4; // Namenode role
+ optional NamenodeRoleProto role = 4 [default = NAMENODE]; // Namenode role
}
/**
@@ -264,7 +265,7 @@ message CheckpointCommandProto {
message BlockProto {
required uint64 blockId = 1;
required uint64 genStamp = 2;
- optional uint64 numBytes = 3;
+ optional uint64 numBytes = 3 [default = 0];
}
/**
@@ -313,7 +314,7 @@ message NamespaceInfoProto {
message BlockKeyProto {
required uint32 keyId = 1; // Key identifier
required uint64 expiryDate = 2; // Expiry time in milliseconds
- required bytes keyBytes = 3; // Key secret
+ optional bytes keyBytes = 3; // Key secret
}
/**
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1213512&r1=1213511&r2=1213512&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Tue Dec 13 00:21:48 2011
@@ -507,6 +507,11 @@ public class MiniDFSCluster {
this.waitSafeMode = waitSafeMode;
// use alternate RPC engine if spec'd
+ /*
+ Turned off - see HDFS-2647 and HDFS-2660 for related comments.
+ This test can be turned on when Avro RPC is enabled using mechanism
+ similar to protobuf.
+
String rpcEngineName = System.getProperty("hdfs.rpc.engine");
if (rpcEngineName != null && !"".equals(rpcEngineName)) {
@@ -530,6 +535,7 @@ public class MiniDFSCluster {
conf.setBoolean(HADOOP_SECURITY_AUTHORIZATION,
false);
}
+ */
int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDfsOverAvroRpc.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDfsOverAvroRpc.java?rev=1213512&r1=1213511&r2=1213512&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDfsOverAvroRpc.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDfsOverAvroRpc.java Tue Dec 13 00:21:48 2011
@@ -28,9 +28,16 @@ public class TestDfsOverAvroRpc extends
@Test(timeout=20000)
public void testWorkingDirectory() throws IOException {
+ /*
+ Test turned off - see HDFS-2647 and HDFS-2660 for related comments.
+ This test can be turned on when Avro RPC is enabled using mechanism
+ similar to protobuf.
+ */
+ /*
System.setProperty("hdfs.rpc.engine",
"org.apache.hadoop.ipc.AvroRpcEngine");
super.testWorkingDirectory();
+ */
}
}