You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by el...@apache.org on 2010/12/10 08:14:22 UTC
svn commit: r1044225 - in /hadoop/common/branches/branch-0.20: CHANGES.txt
src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
src/test/org/apache/hadoop/hdfs/TestQuota.java
Author: eli
Date: Fri Dec 10 07:14:21 2010
New Revision: 1044225
URL: http://svn.apache.org/viewvc?rev=1044225&view=rev
Log:
HDFS-1377. Quota bug for partial blocks allows quotas to be violated. Contributed by Eli Collins
Modified:
hadoop/common/branches/branch-0.20/CHANGES.txt
hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestQuota.java
Modified: hadoop/common/branches/branch-0.20/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20/CHANGES.txt?rev=1044225&r1=1044224&r2=1044225&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20/CHANGES.txt (original)
+++ hadoop/common/branches/branch-0.20/CHANGES.txt Fri Dec 10 07:14:21 2010
@@ -70,6 +70,8 @@ Release 0.20.3 - Unreleased
HDFS-908. TestDistributedFileSystem fails with Wrong FS on weird hosts.
(Todd Lipcon via eli)
+ HDFS-1377. Quota bug for partial blocks allows quotas to be violated. (eli)
+
IMPROVEMENTS
MAPREDUCE-1407. Update javadoc in mapreduce.{Mapper,Reducer} to match
Modified: hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1044225&r1=1044224&r2=1044225&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Fri Dec 10 07:14:21 2010
@@ -639,19 +639,9 @@ class FSDirectory implements FSConstants
/**
* Replaces the specified inode with the specified one.
*/
- void replaceNode(String path, INodeFile oldnode, INodeFile newnode)
- throws IOException {
- replaceNode(path, oldnode, newnode, true);
- }
-
- /**
- * @see #replaceNode(String, INodeFile, INodeFile)
- */
- private void replaceNode(String path, INodeFile oldnode, INodeFile newnode,
- boolean updateDiskspace) throws IOException {
+ void replaceNode(String path, INodeFile oldnode, INodeFile newnode)
+ throws IOException {
synchronized (rootDir) {
- long dsOld = oldnode.diskspaceConsumed();
-
//
// Remove the node from the namespace
//
@@ -668,18 +658,6 @@ class FSDirectory implements FSConstants
rootDir.addNode(path, newnode);
- //check if disk space needs to be updated.
- long dsNew = 0;
- if (updateDiskspace && (dsNew = newnode.diskspaceConsumed()) != dsOld) {
- try {
- updateSpaceConsumed(path, 0, dsNew-dsOld);
- } catch (QuotaExceededException e) {
- // undo
- replaceNode(path, newnode, oldnode, false);
- throw e;
- }
- }
-
int index = 0;
for (Block b : newnode.getBlocks()) {
BlockInfo info = namesystem.blocksMap.addINode(b, newnode);
Modified: hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=1044225&r1=1044224&r2=1044225&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java (original)
+++ hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java Fri Dec 10 07:14:21 2010
@@ -320,6 +320,15 @@ class INodeDirectory extends INode {
child.computeContentSummary(summary);
}
}
+ if (this instanceof INodeDirectoryWithQuota) {
+ // Warn if the cached and computed diskspace values differ
+ INodeDirectoryWithQuota node = (INodeDirectoryWithQuota)this;
+ long space = node.diskspaceConsumed();
+ if (-1 != node.getDsQuota() && space != summary[3]) {
+ NameNode.LOG.warn("Inconsistent diskspace for directory "
+ +getLocalName()+". Cached: "+space+" Computed: "+summary[3]);
+ }
+ }
summary[2]++;
return summary;
}
Modified: hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestQuota.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestQuota.java?rev=1044225&r1=1044224&r2=1044225&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestQuota.java (original)
+++ hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestQuota.java Fri Dec 10 07:14:21 2010
@@ -626,4 +626,124 @@ public class TestQuota extends TestCase
cluster.shutdown();
}
}
+
+ /**
+ * Violate a space quota using files of size < 1 block. Test that
+ * block allocation conservatively assumes that for quota checking
+ * the entire space of the block is used.
+ */
+ public void testBlockAllocationAdjustUsageConservatively() throws Exception {
+ Configuration conf = new Configuration();
+ final int BLOCK_SIZE = 6 * 1024;
+ conf.set("dfs.block.size", Integer.toString(BLOCK_SIZE));
+ MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
+ cluster.waitActive();
+ FileSystem fs = cluster.getFileSystem();
+ DFSAdmin admin = new DFSAdmin(conf);
+
+ try {
+ Path dir = new Path("/test");
+ Path file1 = new Path("/test/test1");
+ Path file2 = new Path("/test/test2");
+ boolean exceededQuota = false;
+ final int QUOTA_SIZE = 3 * BLOCK_SIZE; // total usage including repl.
+ final int FILE_SIZE = BLOCK_SIZE / 2;
+ ContentSummary c;
+
+ // Create the directory and set the quota
+ assertTrue(fs.mkdirs(dir));
+ runCommand(admin, false, "-setSpaceQuota", Integer.toString(QUOTA_SIZE),
+ dir.toString());
+
+ // Creating one file should use half the quota
+ DFSTestUtil.createFile(fs, file1, FILE_SIZE, (short)3, 1L);
+ DFSTestUtil.waitReplication(fs, file1, (short)3);
+ c = fs.getContentSummary(dir);
+ assertEquals("Quota is half consumed", QUOTA_SIZE / 2,
+ c.getSpaceConsumed());
+
+ // We can not create the 2nd file because even though the total
+ // spaced used by two files (2 * 3 * 512/2) would fit within the
+ // quota (3 * 512) when a block for a file is created the space
+ // used is adjusted conservatively (3 * block size, ie assumes a
+ // full block is written) which will violate the quota (3 *
+ // block size) since we've already used half the quota for the
+ // first file.
+ try {
+ DFSTestUtil.createFile(fs, file2, FILE_SIZE, (short)3, 1L);
+ } catch (QuotaExceededException e) {
+ exceededQuota = true;
+ }
+ assertTrue("Quota not exceeded", exceededQuota);
+ } finally {
+ cluster.shutdown();
+ }
+ }
+
+ /**
+ * Like the previous test but create many files. This covers bugs
+ * where the quota adjustment is incorrect but it takes many files
+ * to accrue a big enough accounting error to violate the quota.
+ */
+ public void testMultipleFilesSmallerThanOneBlock() throws Exception {
+ Configuration conf = new Configuration();
+ final int BLOCK_SIZE = 6 * 1024;
+ conf.set("dfs.block.size", Integer.toString(BLOCK_SIZE));
+ MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
+ cluster.waitActive();
+ FileSystem fs = cluster.getFileSystem();
+ DFSAdmin admin = new DFSAdmin(conf);
+
+ try {
+ Path dir = new Path("/test");
+ boolean exceededQuota = false;
+ ContentSummary c;
+ // 1kb file
+ // 6kb block
+ // 192kb quota
+ final int FILE_SIZE = 1024;
+ final int QUOTA_SIZE = 32 * (int)fs.getDefaultBlockSize();
+ assertEquals(6 * 1024, fs.getDefaultBlockSize());
+ assertEquals(192 * 1024, QUOTA_SIZE);
+
+ // Create the dir and set the quota. We need to enable the quota before
+ // writing the files as setting the quota afterwards will over-write
+ // the cached disk space used for quota verification with the actual
+ // amount used as calculated by INode#spaceConsumedInTree.
+ assertTrue(fs.mkdirs(dir));
+ runCommand(admin, false, "-setSpaceQuota", Integer.toString(QUOTA_SIZE),
+ dir.toString());
+
+ // We can create at most 59 files because block allocation is
+ // conservative and initially assumes a full block is used, so we
+ // need to leave at least 3 * BLOCK_SIZE free space when allocating
+ // the last block: (58 * 3 * 1024) + (3 * 6 * 1024) = 192kb
+ for (int i = 0; i < 59; i++) {
+ Path file = new Path("/test/test" + i);
+ DFSTestUtil.createFile(fs, file, FILE_SIZE, (short)3, 1L);
+ DFSTestUtil.waitReplication(fs, file, (short)3);
+ }
+
+ // Should account for all 59 files (almost QUOTA_SIZE)
+ c = fs.getContentSummary(dir);
+ assertEquals("Invalid space consumed",
+ 59 * FILE_SIZE * 3,
+ c.getSpaceConsumed());
+ assertEquals("Invalid space consumed",
+ QUOTA_SIZE - (59 * FILE_SIZE * 3),
+ 3 * (fs.getDefaultBlockSize() - FILE_SIZE));
+
+ // Now check that trying to create another file violates the quota
+ try {
+ Path file = new Path("/test/test59");
+ DFSTestUtil.createFile(fs, file, FILE_SIZE, (short) 3, 1L);
+ DFSTestUtil.waitReplication(fs, file, (short) 3);
+ } catch (QuotaExceededException e) {
+ exceededQuota = true;
+ }
+ assertTrue("Quota not exceeded", exceededQuota);
+ } finally {
+ cluster.shutdown();
+ }
+ }
}