You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ka...@apache.org on 2016/01/29 17:29:10 UTC
[07/35] hadoop git commit: HDFS-9690. ClientProtocol.addBlock is not
idempotent after HDFS-8071.
HDFS-9690. ClientProtocol.addBlock is not idempotent after HDFS-8071.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/45c763ad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/45c763ad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/45c763ad
Branch: refs/heads/YARN-1011
Commit: 45c763ad6171bc7808c2ddcb9099a4215113da2a
Parents: bd909ed
Author: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Authored: Tue Jan 26 11:20:13 2016 +0800
Committer: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Committed: Tue Jan 26 11:20:13 2016 +0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++
.../hdfs/server/namenode/FSDirWriteFileOp.java | 11 +++---
.../hadoop/hdfs/TestDFSClientRetries.java | 36 +++++++++++++++-----
3 files changed, 35 insertions(+), 15 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/45c763ad/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a14a1d8..56a85f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2696,6 +2696,9 @@ Release 2.7.3 - UNRELEASED
HDFS-9672. o.a.h.hdfs.TestLeaseRecovery2 fails intermittently (Mingliang Liu
via jitendra)
+ HDFS-9690. ClientProtocol.addBlock is not idempotent after HDFS-8071.
+ (szetszwo)
+
Release 2.7.2 - 2016-01-25
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/45c763ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index 914fbd9..6ba8e1c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -184,17 +184,16 @@ class FSDirWriteFileOp {
src = fsn.dir.resolvePath(pc, src, pathComponents);
FileState fileState = analyzeFileState(fsn, src, fileId, clientName,
previous, onRetryBlock);
- final INodeFile pendingFile = fileState.inode;
- // Check if the penultimate block is minimally replicated
- if (!fsn.checkFileProgress(src, pendingFile, false)) {
- throw new NotReplicatedYetException("Not replicated yet: " + src);
- }
-
if (onRetryBlock[0] != null && onRetryBlock[0].getLocations().length > 0) {
// This is a retry. No need to generate new locations.
// Use the last block if it has locations.
return null;
}
+
+ final INodeFile pendingFile = fileState.inode;
+ if (!fsn.checkFileProgress(src, pendingFile, false)) {
+ throw new NotReplicatedYetException("Not replicated yet: " + src);
+ }
if (pendingFile.getBlocks().length >= fsn.maxBlocksPerFile) {
throw new IOException("File has reached the limit on maximum number of"
+ " blocks (" + DFSConfigKeys.DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY
http://git-wip-us.apache.org/repos/asf/hadoop/blob/45c763ad/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index e41c06a..1f783f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -432,19 +432,37 @@ public class TestDFSClientRetries {
// Make the call to addBlock() get called twice, as if it were retried
// due to an IPC issue.
doAnswer(new Answer<LocatedBlock>() {
- @Override
- public LocatedBlock answer(InvocationOnMock invocation) throws Throwable {
- LocatedBlock ret = (LocatedBlock) invocation.callRealMethod();
+ private int getBlockCount(LocatedBlock ret) throws IOException {
LocatedBlocks lb = cluster.getNameNodeRpc().getBlockLocations(src, 0, Long.MAX_VALUE);
- int blockCount = lb.getLocatedBlocks().size();
assertEquals(lb.getLastLocatedBlock().getBlock(), ret.getBlock());
-
+ return lb.getLocatedBlocks().size();
+ }
+
+ @Override
+ public LocatedBlock answer(InvocationOnMock invocation) throws Throwable {
+ LOG.info("Called addBlock: "
+ + Arrays.toString(invocation.getArguments()));
+
+ // call first time
+ // warp NotReplicatedYetException with RemoteException as rpc does.
+ final LocatedBlock ret;
+ try {
+ ret = (LocatedBlock) invocation.callRealMethod();
+ } catch(NotReplicatedYetException e) {
+ throw new RemoteException(e.getClass().getName(), e.getMessage());
+ }
+ final int blockCount = getBlockCount(ret);
+
// Retrying should result in a new block at the end of the file.
// (abandoning the old one)
- LocatedBlock ret2 = (LocatedBlock) invocation.callRealMethod();
- lb = cluster.getNameNodeRpc().getBlockLocations(src, 0, Long.MAX_VALUE);
- int blockCount2 = lb.getLocatedBlocks().size();
- assertEquals(lb.getLastLocatedBlock().getBlock(), ret2.getBlock());
+ // It should not have NotReplicatedYetException.
+ final LocatedBlock ret2;
+ try {
+ ret2 = (LocatedBlock) invocation.callRealMethod();
+ } catch(NotReplicatedYetException e) {
+ throw new AssertionError("Unexpected exception", e);
+ }
+ final int blockCount2 = getBlockCount(ret2);
// We shouldn't have gained an extra block by the RPC.
assertEquals(blockCount, blockCount2);