You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by el...@apache.org on 2011/08/02 01:59:57 UTC
svn commit: r1152972 - in /hadoop/common/branches/branch-0.20-security:
CHANGES.txt
src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
src/test/org/apache/hadoop/hdfs/TestQuota.java
Author: eli
Date: Mon Aug 1 23:59:56 2011
New Revision: 1152972
URL: http://svn.apache.org/viewvc?rev=1152972&view=rev
Log:
HDFS-2053. Bug in INodeDirectory#computeContentSummary warning. Contributed by Michael Noll
Modified:
hadoop/common/branches/branch-0.20-security/CHANGES.txt
hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/hdfs/TestQuota.java
Modified: hadoop/common/branches/branch-0.20-security/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/CHANGES.txt?rev=1152972&r1=1152971&r2=1152972&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security/CHANGES.txt (original)
+++ hadoop/common/branches/branch-0.20-security/CHANGES.txt Mon Aug 1 23:59:56 2011
@@ -21,6 +21,9 @@ Release 0.20.205.0 - unreleased
MAPREDUCE-2650. back-port MAPREDUCE-2238 to 0.20-security.
(Sherry Chen via mahadev)
+ HDFS-2053. Bug in INodeDirectory#computeContentSummary warning
+ (Michael Noll via eli)
+
IMPROVEMENTS
MAPREDUCE-7343. Make the number of warnings accepted by test-patch
Modified: hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=1152972&r1=1152971&r2=1152972&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java (original)
+++ hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java Mon Aug 1 23:59:56 2011
@@ -333,20 +333,31 @@ class INodeDirectory extends INode {
/** {@inheritDoc} */
long[] computeContentSummary(long[] summary) {
+ // Walk through the children of this node, using a new summary array
+ // for the (sub)tree rooted at this node
+ assert 4 == summary.length;
+ long[] subtreeSummary = new long[]{0,0,0,0};
if (children != null) {
for (INode child : children) {
- child.computeContentSummary(summary);
+ child.computeContentSummary(subtreeSummary);
}
}
if (this instanceof INodeDirectoryWithQuota) {
// Warn if the cached and computed diskspace values differ
INodeDirectoryWithQuota node = (INodeDirectoryWithQuota)this;
long space = node.diskspaceConsumed();
- if (-1 != node.getDsQuota() && space != summary[3]) {
+ assert -1 == node.getDsQuota() || space == subtreeSummary[3];
+ if (-1 != node.getDsQuota() && space != subtreeSummary[3]) {
NameNode.LOG.warn("Inconsistent diskspace for directory "
- +getLocalName()+". Cached: "+space+" Computed: "+summary[3]);
+ +getLocalName()+". Cached: "+space+" Computed: "+subtreeSummary[3]);
}
}
+
+ // update the passed summary array with the values for this node's subtree
+ for (int i = 0; i < summary.length; i++) {
+ summary[i] += subtreeSummary[i];
+ }
+
summary[2]++;
return summary;
}
Modified: hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/hdfs/TestQuota.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/hdfs/TestQuota.java?rev=1152972&r1=1152971&r2=1152972&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/hdfs/TestQuota.java (original)
+++ hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/hdfs/TestQuota.java Mon Aug 1 23:59:56 2011
@@ -690,7 +690,57 @@ public class TestQuota extends TestCase
// verify increase in space
c = dfs.getContentSummary(dstPath);
assertEquals(c.getSpaceConsumed(), 5 * fileSpace + file2Len);
-
+
+ // Test HDFS-2053 :
+
+ // Create directory /hdfs-2053
+ final Path quotaDir2053 = new Path("/hdfs-2053");
+ assertTrue(dfs.mkdirs(quotaDir2053));
+
+ // Create subdirectories /hdfs-2053/{A,B,C}
+ final Path quotaDir2053_A = new Path(quotaDir2053, "A");
+ assertTrue(dfs.mkdirs(quotaDir2053_A));
+ final Path quotaDir2053_B = new Path(quotaDir2053, "B");
+ assertTrue(dfs.mkdirs(quotaDir2053_B));
+ final Path quotaDir2053_C = new Path(quotaDir2053, "C");
+ assertTrue(dfs.mkdirs(quotaDir2053_C));
+
+ // Factors to vary the sizes of test files created in each subdir.
+ // The actual factors are not really important but they allow us to create
+ // identifiable file sizes per subdir, which helps during debugging.
+ int sizeFactorA = 1;
+ int sizeFactorB = 2;
+ int sizeFactorC = 4;
+
+ // Set space quota for subdirectory C
+ dfs.setQuota(quotaDir2053_C, FSConstants.QUOTA_DONT_SET,
+ (sizeFactorC + 1) * fileSpace);
+ c = dfs.getContentSummary(quotaDir2053_C);
+ assertEquals(c.getSpaceQuota(), (sizeFactorC + 1) * fileSpace);
+
+ // Create a file under subdirectory A
+ DFSTestUtil.createFile(dfs, new Path(quotaDir2053_A, "fileA"),
+ sizeFactorA * fileLen, replication, 0);
+ c = dfs.getContentSummary(quotaDir2053_A);
+ assertEquals(c.getSpaceConsumed(), sizeFactorA * fileSpace);
+
+ // Create a file under subdirectory B
+ DFSTestUtil.createFile(dfs, new Path(quotaDir2053_B, "fileB"),
+ sizeFactorB * fileLen, replication, 0);
+ c = dfs.getContentSummary(quotaDir2053_B);
+ assertEquals(c.getSpaceConsumed(), sizeFactorB * fileSpace);
+
+ // Create a file under subdirectory C (which has a space quota)
+ DFSTestUtil.createFile(dfs, new Path(quotaDir2053_C, "fileC"),
+ sizeFactorC * fileLen, replication, 0);
+ c = dfs.getContentSummary(quotaDir2053_C);
+ assertEquals(c.getSpaceConsumed(), sizeFactorC * fileSpace);
+
+ // Check space consumed for /hdfs-2053
+ c = dfs.getContentSummary(quotaDir2053);
+ assertEquals(c.getSpaceConsumed(),
+ (sizeFactorA + sizeFactorB + sizeFactorC) * fileSpace);
+
} finally {
cluster.shutdown();
}