You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sh...@apache.org on 2013/08/15 23:24:16 UTC
svn commit: r1514501 - in
/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs: CHANGES.txt
src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
Author: shv
Date: Thu Aug 15 21:24:16 2013
New Revision: 1514501
URL: http://svn.apache.org/r1514501
Log:
HDFS-2994. If lease soft limit is recovered successfully the append can fail. Contributed by Tao Luo.
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1514501&r1=1514500&r2=1514501&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Aug 15 21:24:16 2013
@@ -116,6 +116,9 @@ Release 2.1.1-beta - UNRELEASED
HDFS-5099. Namenode#copyEditLogSegmentsToSharedDir should close
EditLogInputStreams upon finishing. (Chuan Liu via cnauroth)
+ HDFS-2994. If lease soft limit is recovered successfully
+ the append can fail. (Tao Luo via shv)
+
Release 2.1.0-beta - 2013-08-22
INCOMPATIBLE CHANGES
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1514501&r1=1514500&r2=1514501&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Thu Aug 15 21:24:16 2013
@@ -2136,10 +2136,15 @@ public class FSNamesystem implements Nam
throw new FileNotFoundException("failed to append to non-existent file "
+ src + " on client " + clientMachine);
}
- final INodeFile myFile = INodeFile.valueOf(inode, src, true);
+ INodeFile myFile = INodeFile.valueOf(inode, src, true);
// Opening an existing file for write - may need to recover lease.
recoverLeaseInternal(myFile, src, holder, clientMachine, false);
-
+
+ // recoverLeaseInternal may create a new InodeFile via
+ // finalizeINodeFileUnderConstruction so we need to refresh
+ // the referenced file.
+ myFile = INodeFile.valueOf(dir.getINode(src), src, true);
+
final DatanodeDescriptor clientNode =
blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
return prepareFileForWrite(src, myFile, holder, clientMachine, clientNode,
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java?rev=1514501&r1=1514500&r2=1514501&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java Thu Aug 15 21:24:16 2013
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.File;
@@ -337,4 +338,47 @@ public class TestFileAppend{
cluster.shutdown();
}
}
+
+ /** Tests appending after soft-limit expires. */
+ @Test
+ public void testAppendAfterSoftLimit()
+ throws IOException, InterruptedException {
+ Configuration conf = new HdfsConfiguration();
+ conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
+ conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
+ //Set small soft-limit for lease
+ final long softLimit = 1L;
+ final long hardLimit = 9999999L;
+
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+ .build();
+ cluster.setLeasePeriod(softLimit, hardLimit);
+ cluster.waitActive();
+
+ FileSystem fs = cluster.getFileSystem();
+ FileSystem fs2 = new DistributedFileSystem();
+ fs2.initialize(fs.getUri(), conf);
+
+ final Path testPath = new Path("/testAppendAfterSoftLimit");
+ final byte[] fileContents = AppendTestUtil.initBuffer(32);
+
+ // create a new file without closing
+ FSDataOutputStream out = fs.create(testPath);
+ out.write(fileContents);
+
+ //Wait for > soft-limit
+ Thread.sleep(250);
+
+ try {
+ FSDataOutputStream appendStream2 = fs2.append(testPath);
+ appendStream2.write(fileContents);
+ appendStream2.close();
+ assertEquals(fileContents.length, fs.getFileStatus(testPath).getLen());
+ } finally {
+ fs.close();
+ fs2.close();
+ cluster.shutdown();
+ }
+ }
+
}