You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by xy...@apache.org on 2019/09/03 16:30:43 UTC

[hadoop] branch trunk updated: HDFS-14633. The StorageType quota and consume in QuotaFeature is not handled for rename. Contributed by Jinglun.

This is an automated email from the ASF dual-hosted git repository.

xyao pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 3c11716  HDFS-14633. The StorageType quota and consume in QuotaFeature is not handled for rename. Contributed by Jinglun.
3c11716 is described below

commit 3c117163a343d7da7ac958e22789b461c24efa5f
Author: Xiaoyu Yao <xy...@apache.org>
AuthorDate: Fri Aug 30 16:46:04 2019 -0700

    HDFS-14633. The StorageType quota and consume in QuotaFeature is not handled for rename. Contributed by Jinglun.
---
 .../hadoop/hdfs/server/namenode/FSDirRenameOp.java |  7 +++-
 .../hadoop/hdfs/server/namenode/FSDirectory.java   |  5 ++-
 .../java/org/apache/hadoop/hdfs/TestQuota.java     | 49 ++++++++++++++++++++++
 3 files changed, 59 insertions(+), 2 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index 68cf3e7..6ca6568 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -76,7 +76,12 @@ class FSDirRenameOp {
     while(src.getINode(i) == dst.getINode(i)) { i++; }
     // src[i - 1] is the last common ancestor.
     BlockStoragePolicySuite bsps = fsd.getBlockStoragePolicySuite();
-    final QuotaCounts delta = src.getLastINode().computeQuotaUsage(bsps);
+    // Assume dstParent existence check done by callers.
+    INode dstParent = dst.getINode(-2);
+    // Use the destination parent's storage policy for quota delta verify.
+    final QuotaCounts delta = src.getLastINode()
+        .computeQuotaUsage(bsps, dstParent.getStoragePolicyID(), false,
+            Snapshot.CURRENT_STATE_ID);
 
     // Reduce the required quota by dst that is being removed
     final INode dstINode = dst.getLastINode();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 45f859c..b2b42e4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.util.StringUtils;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -1296,7 +1297,9 @@ public class FSDirectory implements Closeable {
     // always verify inode name
     verifyINodeName(inode.getLocalNameBytes());
 
-    final QuotaCounts counts = inode.computeQuotaUsage(getBlockStoragePolicySuite());
+    final QuotaCounts counts = inode
+        .computeQuotaUsage(getBlockStoragePolicySuite(),
+            parent.getStoragePolicyID(), false, Snapshot.CURRENT_STATE_ID);
     updateCount(existing, pos, counts, checkQuota);
 
     boolean isRename = (inode.getParent() != null);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
index f5d232c..d117b1b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
@@ -1579,6 +1579,44 @@ public class TestQuota {
   }
 
   @Test
+  public void testRename() throws Exception {
+    int fileLen = 1024;
+    short replication = 3;
+
+    final Path parent = new Path(PathUtils.getTestDir(getClass()).getPath(),
+        GenericTestUtils.getMethodName());
+    assertTrue(dfs.mkdirs(parent));
+
+    final Path srcDir = new Path(parent, "src-dir");
+    Path file = new Path(srcDir, "file1");
+    DFSTestUtil.createFile(dfs, file, fileLen, replication, 0);
+    dfs.setStoragePolicy(srcDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
+
+    final Path dstDir = new Path(parent, "dst-dir");
+    assertTrue(dfs.mkdirs(dstDir));
+    dfs.setStoragePolicy(dstDir, HdfsConstants.ALLSSD_STORAGE_POLICY_NAME);
+
+    dfs.setQuota(srcDir, 100000, 100000);
+    dfs.setQuota(dstDir, 100000, 100000);
+
+    Path dstFile = new Path(dstDir, "file1");
+    // Test quota check of rename. Expect a QuotaExceedException.
+    dfs.setQuotaByStorageType(dstDir, StorageType.SSD, 10);
+    try {
+      dfs.rename(file, dstFile);
+      fail("Expect QuotaExceedException.");
+    } catch (QuotaExceededException qe) {
+    }
+
+    // Set enough quota, expect a successful rename.
+    dfs.setQuotaByStorageType(dstDir, StorageType.SSD, fileLen * replication);
+    dfs.rename(file, dstFile);
+    // Verify the storage type usage is properly updated on source and dst.
+    checkQuotaAndCount(dfs, srcDir);
+    checkQuotaAndCount(dfs, dstDir);
+  }
+
+  @Test
   public void testSpaceQuotaExceptionOnAppend() throws Exception {
     GenericTestUtils.setLogLevel(DFSOutputStream.LOG, Level.TRACE);
     GenericTestUtils.setLogLevel(DataStreamer.LOG, Level.TRACE);
@@ -1648,4 +1686,15 @@ public class TestQuota {
     }
     scanner.close();
   }
+
+  // quota and count should match.
+  private void checkQuotaAndCount(DistributedFileSystem fs, Path path)
+      throws IOException {
+    QuotaUsage qu = fs.getQuotaUsage(path);
+    ContentSummary cs = fs.getContentSummary(path);
+    for (StorageType st : StorageType.values()) {
+      // it will fail here, because the quota and consume is not handled right.
+      assertEquals(qu.getTypeConsumed(st), cs.getTypeConsumed(st));
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org