You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ji...@apache.org on 2013/12/11 02:26:34 UTC
svn commit: r1550013 - in
/hadoop/common/branches/branch-2.3/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/org/apache/hadoop/hdfs/
src/main/java/org/apache/hadoop/hdfs/server/namenode/
src/test/java/org/apache/hadoop/hdfs/server/namenode/
Author: jing9
Date: Wed Dec 11 01:26:33 2013
New Revision: 1550013
URL: http://svn.apache.org/r1550013
Log:
HDFS-5257. Merge change r1535813 from branch-2.
Modified:
hadoop/common/branches/branch-2.3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/branch-2.3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
hadoop/common/branches/branch-2.3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/common/branches/branch-2.3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
Modified: hadoop/common/branches/branch-2.3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1550013&r1=1550012&r2=1550013&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-2.3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Dec 11 01:26:33 2013
@@ -142,6 +142,9 @@ Release 2.3.0 - UNRELEASED
HDFS-5353. Short circuit reads fail when dfs.encrypt.data.transfer is
enabled. (Colin Patrick McCabe via jing9)
+ HDFS-5257. addBlock() retry should return LocatedBlock with locations else client
+ will get AIOBE. (Vinay via jing9)
+
Release 2.2.0 - 2013-10-13
INCOMPATIBLE CHANGES
Modified: hadoop/common/branches/branch-2.3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1550013&r1=1550012&r2=1550013&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original)
+++ hadoop/common/branches/branch-2.3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java Wed Dec 11 01:26:33 2013
@@ -1107,6 +1107,11 @@ public class DFSOutputStream extends FSO
//
private boolean createBlockOutputStream(DatanodeInfo[] nodes, long newGS,
boolean recoveryFlag) {
+ if (nodes.length == 0) {
+ DFSClient.LOG.info("nodes are empty for write pipeline of block "
+ + block);
+ return false;
+ }
Status pipelineStatus = SUCCESS;
String firstBadLink = "";
if (DFSClient.LOG.isDebugEnabled()) {
Modified: hadoop/common/branches/branch-2.3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1550013&r1=1550012&r2=1550013&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/branch-2.3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed Dec 11 01:26:33 2013
@@ -2508,8 +2508,8 @@ public class FSNamesystem implements Nam
final INodeFileUnderConstruction pendingFile =
(INodeFileUnderConstruction) inodes[inodes.length - 1];
- if(onRetryBlock[0] != null) {
- // This is a retry. Just return the last block.
+ if (onRetryBlock[0] != null && onRetryBlock[0].getLocations().length > 0) {
+ // This is a retry. Just return the last block if having locations.
return onRetryBlock[0];
}
if (pendingFile.getBlocks().length >= maxBlocksPerFile) {
@@ -2546,9 +2546,18 @@ public class FSNamesystem implements Nam
final INodeFileUnderConstruction pendingFile =
(INodeFileUnderConstruction) inodes[inodes.length - 1];
- if(onRetryBlock[0] != null) {
- // This is a retry. Just return the last block.
- return onRetryBlock[0];
+ if (onRetryBlock[0] != null) {
+ if (onRetryBlock[0].getLocations().length > 0) {
+ // This is a retry. Just return the last block if having locations.
+ return onRetryBlock[0];
+ } else {
+ // add new chosen targets to already allocated block and return
+ BlockInfo lastBlockInFile = pendingFile.getLastBlock();
+ ((BlockInfoUnderConstruction) lastBlockInFile)
+ .setExpectedLocations(targets);
+ offset = pendingFile.computeFileSize();
+ return makeLocatedBlock(lastBlockInFile, targets, offset);
+ }
}
// commit the last block and complete it if it has minimum replicas
Modified: hadoop/common/branches/branch-2.3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java?rev=1550013&r1=1550012&r2=1550013&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java (original)
+++ hadoop/common/branches/branch-2.3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java Wed Dec 11 01:26:33 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.na
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.spy;
@@ -139,4 +140,33 @@ public class TestAddBlockRetry {
assertEquals("Wrong replication", REPLICATION, lb1.getLocations().length);
assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
}
+
+ /*
+ * Since NameNode will not persist any locations of the block, addBlock()
+ * retry call after restart NN should re-select the locations and return to
+ * client. refer HDFS-5257
+ */
+ @Test
+ public void testAddBlockRetryShouldReturnBlockWithLocations()
+ throws Exception {
+ final String src = "/testAddBlockRetryShouldReturnBlockWithLocations";
+ NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
+ // create file
+ nameNodeRpc.create(src, FsPermission.getFileDefault(), "clientName",
+ new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
+ (short) 3, 1024);
+ // start first addBlock()
+ LOG.info("Starting first addBlock for " + src);
+ LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null,
+ INodeId.GRANDFATHER_INODE_ID, null);
+ assertTrue("Block locations should be present",
+ lb1.getLocations().length > 0);
+
+ cluster.restartNameNode();
+ nameNodeRpc = cluster.getNameNodeRpc();
+ LocatedBlock lb2 = nameNodeRpc.addBlock(src, "clientName", null, null,
+ INodeId.GRANDFATHER_INODE_ID, null);
+ assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
+ assertTrue("Wrong locations with retry", lb2.getLocations().length > 0);
+ }
}