You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by um...@apache.org on 2014/11/28 16:44:26 UTC
hadoop git commit: HDFS-7210. Avoid two separate RPC's
namenode.append() and namenode.getFileInfo() for an append call from
DFSClient. (Vinayakumar B via umamahesh) (cherry picked from commit
1556f86a31a54733d6550363aa0e027acca7823b)
Repository: hadoop
Updated Branches:
refs/heads/branch-2 8f9822541 -> f4ab30634
HDFS-7210. Avoid two separate RPC's namenode.append() and namenode.getFileInfo() for an append call from DFSClient. (Vinayakumar B via umamahesh)
(cherry picked from commit 1556f86a31a54733d6550363aa0e027acca7823b)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4ab3063
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4ab3063
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4ab3063
Branch: refs/heads/branch-2
Commit: f4ab30634715b975c059561df27185662a8e910a
Parents: 8f98225
Author: Uma Maheswara Rao G <um...@apache.org>
Authored: Fri Nov 28 21:09:16 2014 +0530
Committer: Uma Maheswara Rao G <um...@apache.org>
Committed: Fri Nov 28 21:13:17 2014 +0530
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++
.../java/org/apache/hadoop/hdfs/DFSClient.java | 10 +++--
.../hadoop/hdfs/protocol/ClientProtocol.java | 5 ++-
.../hdfs/protocol/LastBlockWithStatus.java | 46 ++++++++++++++++++++
...tNamenodeProtocolServerSideTranslatorPB.java | 20 +++++----
.../ClientNamenodeProtocolTranslatorPB.java | 11 +++--
.../hdfs/server/namenode/FSEditLogLoader.java | 7 ++-
.../hdfs/server/namenode/FSNamesystem.java | 10 +++--
.../hdfs/server/namenode/NameNodeRpcServer.java | 8 ++--
.../src/main/proto/ClientNamenodeProtocol.proto | 1 +
.../server/namenode/TestNamenodeRetryCache.java | 4 +-
.../namenode/ha/TestRetryCacheWithHA.java | 3 +-
12 files changed, 100 insertions(+), 28 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4ab3063/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0e353fd..d909497 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -147,6 +147,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7310. Mover can give first priority to local DN if it has target storage type
available in local DN. (Vinayakumar B via umamahesh)
+ HDFS-7210. Avoid two separate RPC's namenode.append() and namenode.getFileInfo()
+ for an append call from DFSClient. (Vinayakumar B via umamahesh)
+
OPTIMIZATIONS
BUG FIXES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4ab3063/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 6bf90c8..bd9622d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -164,6 +164,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
@@ -1777,9 +1778,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
/** Method to get stream returned by append call */
private DFSOutputStream callAppend(String src,
int buffersize, Progressable progress) throws IOException {
- LocatedBlock lastBlock = null;
+ LastBlockWithStatus lastBlockWithStatus = null;
try {
- lastBlock = namenode.append(src, clientName);
+ lastBlockWithStatus = namenode.append(src, clientName);
} catch(RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
@@ -1789,9 +1790,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
UnresolvedPathException.class,
SnapshotAccessControlException.class);
}
- HdfsFileStatus newStat = getFileInfo(src);
+ HdfsFileStatus newStat = lastBlockWithStatus.getFileStatus();
return DFSOutputStream.newStreamForAppend(this, src, buffersize, progress,
- lastBlock, newStat, dfsClientConf.createChecksum());
+ lastBlockWithStatus.getLastBlock(), newStat,
+ dfsClientConf.createChecksum());
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4ab3063/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 6506bef..5f8bf30 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -203,7 +203,8 @@ public interface ClientProtocol {
* Append to the end of the file.
* @param src path of the file being created.
* @param clientName name of the current client.
- * @return information about the last partial block if any.
+ * @return wrapper with information about the last partial block and file
+ * status if any
* @throws AccessControlException if permission to append file is
* denied by the system. As usually on the client side the exception will
* be wrapped into {@link org.apache.hadoop.ipc.RemoteException}.
@@ -224,7 +225,7 @@ public interface ClientProtocol {
* @throws UnsupportedOperationException if append is not supported
*/
@AtMostOnce
- public LocatedBlock append(String src, String clientName)
+ public LastBlockWithStatus append(String src, String clientName)
throws AccessControlException, DSQuotaExceededException,
FileNotFoundException, SafeModeException, UnresolvedLinkException,
SnapshotAccessControlException, IOException;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4ab3063/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LastBlockWithStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LastBlockWithStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LastBlockWithStatus.java
new file mode 100644
index 0000000..1cd80f9
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LastBlockWithStatus.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Class to contain Lastblock and HdfsFileStatus for the Append operation
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class LastBlockWithStatus {
+
+ private final LocatedBlock lastBlock;
+
+ private final HdfsFileStatus fileStatus;
+
+ public LastBlockWithStatus(LocatedBlock lastBlock, HdfsFileStatus fileStatus) {
+ this.lastBlock = lastBlock;
+ this.fileStatus = fileStatus;
+ }
+
+ public LocatedBlock getLastBlock() {
+ return lastBlock;
+ }
+
+ public HdfsFileStatus getFileStatus() {
+ return fileStatus;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4ab3063/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index a92d455..5b6609b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
@@ -247,9 +248,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
private static final CreateResponseProto VOID_CREATE_RESPONSE =
CreateResponseProto.newBuilder().build();
- private static final AppendResponseProto VOID_APPEND_RESPONSE =
- AppendResponseProto.newBuilder().build();
-
private static final SetPermissionResponseProto VOID_SET_PERM_RESPONSE =
SetPermissionResponseProto.newBuilder().build();
@@ -407,17 +405,21 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
throw new ServiceException(e);
}
}
-
+
@Override
public AppendResponseProto append(RpcController controller,
AppendRequestProto req) throws ServiceException {
try {
- LocatedBlock result = server.append(req.getSrc(), req.getClientName());
- if (result != null) {
- return AppendResponseProto.newBuilder()
- .setBlock(PBHelper.convert(result)).build();
+ LastBlockWithStatus result = server.append(req.getSrc(),
+ req.getClientName());
+ AppendResponseProto.Builder builder = AppendResponseProto.newBuilder();
+ if (result.getLastBlock() != null) {
+ builder.setBlock(PBHelper.convert(result.getLastBlock()));
+ }
+ if (result.getFileStatus() != null) {
+ builder.setStat(PBHelper.convert(result.getFileStatus()));
}
- return VOID_APPEND_RESPONSE;
+ return builder.build();
} catch (IOException e) {
throw new ServiceException(e);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4ab3063/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index 077a3e9..ab14cd8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -25,6 +25,7 @@ import java.util.EnumSet;
import java.util.List;
import com.google.common.collect.Lists;
+
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.crypto.CipherSuite;
@@ -63,6 +64,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
@@ -188,7 +190,6 @@ import org.apache.hadoop.security.token.Token;
import com.google.protobuf.ByteString;
import com.google.protobuf.ServiceException;
-
import static org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
import static org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos
.EncryptionZoneProto;
@@ -301,7 +302,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
}
@Override
- public LocatedBlock append(String src, String clientName)
+ public LastBlockWithStatus append(String src, String clientName)
throws AccessControlException, DSQuotaExceededException,
FileNotFoundException, SafeModeException, UnresolvedLinkException,
IOException {
@@ -311,7 +312,11 @@ public class ClientNamenodeProtocolTranslatorPB implements
.build();
try {
AppendResponseProto res = rpcProxy.append(null, req);
- return res.hasBlock() ? PBHelper.convert(res.getBlock()) : null;
+ LocatedBlock lastBlock = res.hasBlock() ? PBHelper
+ .convert(res.getBlock()) : null;
+ HdfsFileStatus stat = (res.hasStat()) ? PBHelper.convert(res.getStat())
+ : null;
+ return new LastBlockWithStatus(lastBlock, stat);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4ab3063/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 8df7d48..b22b156 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
@@ -393,8 +394,12 @@ public class FSEditLogLoader {
// add the op into retry cache is necessary
if (toAddRetryCache) {
+ HdfsFileStatus stat = fsNamesys.dir.createFileStatus(
+ HdfsFileStatus.EMPTY_NAME, newFile,
+ BlockStoragePolicySuite.ID_UNSPECIFIED,
+ Snapshot.CURRENT_STATE_ID, false, iip);
fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId,
- addCloseOp.rpcCallId, lb);
+ addCloseOp.rpcCallId, new LastBlockWithStatus(lb, stat));
}
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4ab3063/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index f4eeb18..e877fc2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -189,6 +189,7 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -2882,7 +2883,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
/**
* Append to an existing file in the namespace.
*/
- LocatedBlock appendFile(
+ LastBlockWithStatus appendFile(
String src, String holder, String clientMachine, boolean logRetryCache)
throws IOException {
try {
@@ -2893,7 +2894,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
}
}
- private LocatedBlock appendFileInt(final String srcArg, String holder,
+ private LastBlockWithStatus appendFileInt(final String srcArg, String holder,
String clientMachine, boolean logRetryCache)
throws AccessControlException, SafeModeException,
FileAlreadyExistsException, FileNotFoundException,
@@ -2912,6 +2913,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
}
LocatedBlock lb = null;
+ HdfsFileStatus stat = null;
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
@@ -2921,6 +2923,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
checkNameNodeSafeMode("Cannot append to file" + src);
src = dir.resolvePath(pc, src, pathComponents);
lb = appendFileInternal(pc, src, holder, clientMachine, logRetryCache);
+ stat = dir.getFileInfo(src, false, FSDirectory.isReservedRawName(srcArg),
+ true);
} catch (StandbyException se) {
skipSync = true;
throw se;
@@ -2941,7 +2945,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
}
}
logAuditEvent(true, "append", srcArg);
- return lb;
+ return new LastBlockWithStatus(lb, stat);
}
ExtendedBlock getExtendedBlock(Block blk) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4ab3063/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 7ba0921..c269444 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -35,6 +35,7 @@ import java.util.List;
import java.util.Set;
import com.google.common.collect.Lists;
+
import org.apache.commons.logging.Log;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
@@ -86,6 +87,7 @@ import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FSLimitException;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
@@ -613,7 +615,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
}
@Override // ClientProtocol
- public LocatedBlock append(String src, String clientName)
+ public LastBlockWithStatus append(String src, String clientName)
throws IOException {
String clientMachine = getClientMachine();
if (stateChangeLog.isDebugEnabled()) {
@@ -622,10 +624,10 @@ class NameNodeRpcServer implements NamenodeProtocols {
}
CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache, null);
if (cacheEntry != null && cacheEntry.isSuccess()) {
- return (LocatedBlock) cacheEntry.getPayload();
+ return (LastBlockWithStatus) cacheEntry.getPayload();
}
- LocatedBlock info = null;
+ LastBlockWithStatus info = null;
boolean success = false;
try {
info = namesystem.appendFile(src, clientName, clientMachine,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4ab3063/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
index ad75865..2c1d3cb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
@@ -90,6 +90,7 @@ message AppendRequestProto {
message AppendResponseProto {
optional LocatedBlockProto block = 1;
+ optional HdfsFileStatusProto stat = 2;
}
message SetReplicationRequestProto {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4ab3063/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
index e9a4914..b9e62e3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
@@ -45,7 +45,7 @@ import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.ipc.ClientId;
@@ -232,7 +232,7 @@ public class TestNamenodeRetryCache {
// Retried append requests succeed
newCall();
- LocatedBlock b = nnRpc.append(src, "holder");
+ LastBlockWithStatus b = nnRpc.append(src, "holder");
Assert.assertEquals(b, nnRpc.append(src, "holder"));
Assert.assertEquals(b, nnRpc.append(src, "holder"));
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4ab3063/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
index bf889f3..3739bd9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
@@ -66,6 +66,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
@@ -420,7 +421,7 @@ public class TestRetryCacheWithHA {
/** append operation */
class AppendOp extends AtMostOnceOp {
private final String fileName;
- private LocatedBlock lbk;
+ private LastBlockWithStatus lbk;
AppendOp(DFSClient client, String fileName) {
super("append", client);