You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ji...@apache.org on 2014/05/22 10:07:17 UTC

svn commit: r1596774 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: CHANGES.txt src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java

Author: jing9
Date: Thu May 22 08:07:16 2014
New Revision: 1596774

URL: http://svn.apache.org/r1596774
Log:
HDFS-6423. Diskspace quota usage should be updated when appending data to partial block. Contributed by Jing Zhao.

Added:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
Modified:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1596774&r1=1596773&r2=1596774&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu May 22 08:07:16 2014
@@ -580,6 +580,9 @@ Release 2.5.0 - UNRELEASED
 
     HDFS-6438. DeleteSnapshot should be a DELETE request in WebHdfs. (jing9)
 
+    HDFS-6423. Diskspace quota usage should be updated when appending data to
+    partial block. (jing9)
+
 Release 2.4.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1596774&r1=1596773&r2=1596774&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Thu May 22 08:07:16 2014
@@ -2424,6 +2424,12 @@ public class FSNamesystem implements Nam
         .getClientName(), src);
     
     LocatedBlock ret = blockManager.convertLastBlockToUnderConstruction(cons);
+    if (ret != null) {
+      // update the quota: use the preferred block size for UC block
+      final long diff = file.getPreferredBlockSize() - ret.getBlockSize();
+      dir.updateSpaceConsumed(src, 0, diff);
+    }
+
     if (writeToEditLog) {
       getEditLog().logOpenFile(src, cons, logRetryCache);
     }

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java?rev=1596774&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java Thu May 22 08:07:16 2014
@@ -0,0 +1,139 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.EnumSet;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSOutputStream;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestDiskspaceQuotaUpdate {
+  private static final int BLOCKSIZE = 1024;
+  private static final short REPLICATION = 1;
+
+  private Configuration conf;
+  private MiniDFSCluster cluster;
+  private FSDirectory fsdir;
+  private DistributedFileSystem dfs;
+
+  @Before
+  public void setUp() throws Exception {
+    conf = new Configuration();
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
+        .build();
+    cluster.waitActive();
+
+    fsdir = cluster.getNamesystem().getFSDirectory();
+    dfs = cluster.getFileSystem();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * Test if the quota can be correctly updated for append
+   */
+  @Test
+  public void testUpdateQuotaForAppend() throws Exception {
+    final Path foo = new Path("/foo");
+    final Path bar = new Path(foo, "bar");
+    DFSTestUtil.createFile(dfs, bar, BLOCKSIZE, REPLICATION, 0L);
+    dfs.setQuota(foo, Long.MAX_VALUE-1, Long.MAX_VALUE-1);
+
+    // append half of the block data
+    DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE / 2);
+
+    INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
+    Quota.Counts quota = fooNode.getDirectoryWithQuotaFeature()
+        .getSpaceConsumed();
+    long ns = quota.get(Quota.NAMESPACE);
+    long ds = quota.get(Quota.DISKSPACE);
+    assertEquals(2, ns); // foo and bar
+    assertEquals((BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION, ds);
+
+    // append another block
+    DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE);
+
+    quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
+    ns = quota.get(Quota.NAMESPACE);
+    ds = quota.get(Quota.DISKSPACE);
+    assertEquals(2, ns); // foo and bar
+    assertEquals((BLOCKSIZE * 2 + BLOCKSIZE / 2) * REPLICATION, ds);
+  }
+
+  /**
+   * Test if the quota can be correctly updated when file length is updated
+   * through fsync
+   */
+  @Test
+  public void testUpdateQuotaForFSync() throws Exception {
+    final Path foo = new Path("/foo");
+    final Path bar = new Path(foo, "bar");
+    DFSTestUtil.createFile(dfs, bar, BLOCKSIZE, REPLICATION, 0L);
+    dfs.setQuota(foo, Long.MAX_VALUE-1, Long.MAX_VALUE-1);
+
+    FSDataOutputStream out = dfs.append(bar);
+    out.write(new byte[BLOCKSIZE / 4]);
+    ((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet
+        .of(HdfsDataOutputStream.SyncFlag.UPDATE_LENGTH));
+
+    INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
+    Quota.Counts quota = fooNode.getDirectoryWithQuotaFeature()
+        .getSpaceConsumed();
+    long ns = quota.get(Quota.NAMESPACE);
+    long ds = quota.get(Quota.DISKSPACE);
+    assertEquals(2, ns); // foo and bar
+    assertEquals(BLOCKSIZE * 2 * REPLICATION, ds); // file is under construction
+
+    out.write(new byte[BLOCKSIZE / 4]);
+    out.close();
+
+    fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
+    quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
+    ns = quota.get(Quota.NAMESPACE);
+    ds = quota.get(Quota.DISKSPACE);
+    assertEquals(2, ns);
+    assertEquals((BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION, ds);
+
+    // append another block
+    DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE);
+
+    quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
+    ns = quota.get(Quota.NAMESPACE);
+    ds = quota.get(Quota.DISKSPACE);
+    assertEquals(2, ns); // foo and bar
+    assertEquals((BLOCKSIZE * 2 + BLOCKSIZE / 2) * REPLICATION, ds);
+  }
+}