You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ha...@apache.org on 2010/01/20 02:13:08 UTC
svn commit: r901026 - in /hadoop/hdfs/trunk: CHANGES.txt
src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java
Author: hairong
Date: Wed Jan 20 01:13:07 2010
New Revision: 901026
URL: http://svn.apache.org/viewvc?rev=901026&view=rev
Log:
HDFS-822. Appends to already-finalized blocks can rename across volumes. Contributed by Hairong Kuang.
Modified:
hadoop/hdfs/trunk/CHANGES.txt
hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java
Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=901026&r1=901025&r2=901026&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Wed Jan 20 01:13:07 2010
@@ -631,6 +631,9 @@
HDFS-101. DFS write pipeline: DFSClient sometimes does not detect second
datanode failure. (hairong)
+ HDFS-822. Appends to already-finalized blocks can rename across volumes.
+ (hairong)
+
Release 0.20.2 - Unreleased
IMPROVEMENTS
Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=901026&r1=901025&r2=901026&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java Wed Jan 20 01:13:07 2010
@@ -1101,7 +1101,11 @@
// construct a RBW replica with the new GS
File blkfile = replicaInfo.getBlockFile();
- FSVolume v = volumes.getNextVolume(estimateBlockLen);
+ FSVolume v = replicaInfo.getVolume();
+ if (v.getAvailable() < estimateBlockLen - replicaInfo.getNumBytes()) {
+ throw new DiskOutOfSpaceException("Insufficient space for appending to "
+ + replicaInfo);
+ }
File newBlkFile = new File(v.rbwDir, replicaInfo.getBlockName());
File oldmeta = replicaInfo.getMetaFile();
ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten(
Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java?rev=901026&r1=901025&r2=901026&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java Wed Jan 20 01:13:07 2010
@@ -24,6 +24,7 @@
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
import org.junit.Assert;
import org.junit.Test;
@@ -147,10 +148,26 @@
private void testAppend(FSDataset dataSet) throws IOException {
long newGS = blocks[FINALIZED].getGenerationStamp()+1;
+ FSVolume v = dataSet.volumeMap.get(blocks[FINALIZED]).getVolume();
+ long available = v.getCapacity()-v.getDfsUsed();
+ long expectedLen = blocks[FINALIZED].getNumBytes();
+ try {
+ v.decDfsUsed(-available);
+ blocks[FINALIZED].setNumBytes(expectedLen+100);
+ dataSet.append(blocks[FINALIZED], newGS, expectedLen);
+ Assert.fail("Should not have space to append to an RWR replica" + blocks[RWR]);
+ } catch (DiskOutOfSpaceException e) {
+ Assert.assertTrue(e.getMessage().startsWith(
+ "Insufficient space for appending to "));
+ }
+ v.decDfsUsed(available);
+ blocks[FINALIZED].setNumBytes(expectedLen);
+
+ newGS = blocks[RBW].getGenerationStamp()+1;
dataSet.append(blocks[FINALIZED], newGS,
blocks[FINALIZED].getNumBytes()); // successful
blocks[FINALIZED].setGenerationStamp(newGS);
-
+
try {
dataSet.append(blocks[TEMPORARY], blocks[TEMPORARY].getGenerationStamp()+1,
blocks[TEMPORARY].getNumBytes());