You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ji...@apache.org on 2013/10/25 20:46:58 UTC

svn commit: r1535813 - in /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/test/java/org/apache/hadoop/hdfs/server/namenode/

Author: jing9
Date: Fri Oct 25 18:46:57 2013
New Revision: 1535813

URL: http://svn.apache.org/r1535813
Log:
HDFS-5257. Merge change r1535811 from trunk.

Modified:
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1535813&r1=1535812&r2=1535813&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Oct 25 18:46:57 2013
@@ -140,6 +140,9 @@ Release 2.3.0 - UNRELEASED
     HDFS-5400.  DFS_CLIENT_MMAP_CACHE_THREAD_RUNS_PER_TIMEOUT constant is set
     to the wrong value.  (Colin Patrick McCabe)
 
+    HDFS-5257. addBlock() retry should return LocatedBlock with locations else client 
+    will get AIOBE. (Vinay via jing9)
+
 Release 2.2.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1535813&r1=1535812&r2=1535813&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java Fri Oct 25 18:46:57 2013
@@ -1102,6 +1102,11 @@ public class DFSOutputStream extends FSO
     //
     private boolean createBlockOutputStream(DatanodeInfo[] nodes, long newGS,
         boolean recoveryFlag) {
+      if (nodes.length == 0) {
+        DFSClient.LOG.info("nodes are empty for write pipeline of block "
+            + block);
+        return false;
+      }
       Status pipelineStatus = SUCCESS;
       String firstBadLink = "";
       if (DFSClient.LOG.isDebugEnabled()) {

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1535813&r1=1535812&r2=1535813&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Fri Oct 25 18:46:57 2013
@@ -2490,8 +2490,8 @@ public class FSNamesystem implements Nam
       final INodeFileUnderConstruction pendingFile =
           (INodeFileUnderConstruction) inodes[inodes.length - 1];
 
-      if(onRetryBlock[0] != null) {
-        // This is a retry. Just return the last block.
+      if (onRetryBlock[0] != null && onRetryBlock[0].getLocations().length > 0) {
+        // This is a retry. Just return the last block if having locations.
         return onRetryBlock[0];
       }
       if (pendingFile.getBlocks().length >= maxBlocksPerFile) {
@@ -2528,9 +2528,18 @@ public class FSNamesystem implements Nam
       final INodeFileUnderConstruction pendingFile =
           (INodeFileUnderConstruction) inodes[inodes.length - 1];
 
-      if(onRetryBlock[0] != null) {
-        // This is a retry. Just return the last block.
-        return onRetryBlock[0];
+      if (onRetryBlock[0] != null) {
+        if (onRetryBlock[0].getLocations().length > 0) {
+          // This is a retry. Just return the last block if having locations.
+          return onRetryBlock[0];
+        } else {
+          // add new chosen targets to already allocated block and return
+          BlockInfo lastBlockInFile = pendingFile.getLastBlock();
+          ((BlockInfoUnderConstruction) lastBlockInFile)
+              .setExpectedLocations(targets);
+          offset = pendingFile.computeFileSize();
+          return makeLocatedBlock(lastBlockInFile, targets, offset);
+        }
       }
 
       // commit the last block and complete it if it has minimum replicas

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java?rev=1535813&r1=1535812&r2=1535813&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java Fri Oct 25 18:46:57 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.na
 
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.spy;
 
@@ -139,4 +140,33 @@ public class TestAddBlockRetry {
     assertEquals("Wrong replication", REPLICATION, lb1.getLocations().length);
     assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
   }
+
+  /*
+   * Since NameNode will not persist any locations of the block, addBlock()
+   * retry call after restart NN should re-select the locations and return to
+   * client. refer HDFS-5257
+   */
+  @Test
+  public void testAddBlockRetryShouldReturnBlockWithLocations()
+      throws Exception {
+    final String src = "/testAddBlockRetryShouldReturnBlockWithLocations";
+    NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
+    // create file
+    nameNodeRpc.create(src, FsPermission.getFileDefault(), "clientName",
+        new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
+        (short) 3, 1024);
+    // start first addBlock()
+    LOG.info("Starting first addBlock for " + src);
+    LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null,
+        INodeId.GRANDFATHER_INODE_ID, null);
+    assertTrue("Block locations should be present",
+        lb1.getLocations().length > 0);
+
+    cluster.restartNameNode();
+    nameNodeRpc = cluster.getNameNodeRpc();
+    LocatedBlock lb2 = nameNodeRpc.addBlock(src, "clientName", null, null,
+        INodeId.GRANDFATHER_INODE_ID, null);
+    assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
+    assertTrue("Wrong locations with retry", lb2.getLocations().length > 0);
+  }
 }