You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by st...@apache.org on 2022/04/13 16:32:56 UTC
[hadoop] 09/16: HDFS-16428. Source path with storagePolicy cause wrong typeConsumed while rename (#3898). Contributed by lei w.
This is an automated email from the ASF dual-hosted git repository.
stevel pushed a commit to branch branch-3.3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git
commit f7f630b7132d70a13ffbc69a7a3dd01ccfa91471
Author: Thinker313 <47...@users.noreply.github.com>
AuthorDate: Tue Jan 25 15:26:18 2022 +0800
HDFS-16428. Source path with storagePolicy cause wrong typeConsumed while rename (#3898). Contributed by lei w.
Signed-off-by: Ayush Saxena <ay...@apache.org>
Signed-off-by: He Xiaoqiao <he...@apache.org>
---
.../hadoop/hdfs/server/namenode/FSDirRenameOp.java | 6 +++-
.../hadoop/hdfs/server/namenode/FSDirectory.java | 6 +++-
.../apache/hadoop/hdfs/server/namenode/INode.java | 10 ++++++
.../java/org/apache/hadoop/hdfs/TestQuota.java | 39 ++++++++++++++++++++++
4 files changed, 59 insertions(+), 2 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index c60acaa0031..ee0bf8a5fb1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -80,8 +80,12 @@ class FSDirRenameOp {
// Assume dstParent existence check done by callers.
INode dstParent = dst.getINode(-2);
// Use the destination parent's storage policy for quota delta verify.
+ final boolean isSrcSetSp = src.getLastINode().isSetStoragePolicy();
+ final byte storagePolicyID = isSrcSetSp ?
+ src.getLastINode().getLocalStoragePolicyID() :
+ dstParent.getStoragePolicyID();
final QuotaCounts delta = src.getLastINode()
- .computeQuotaUsage(bsps, dstParent.getStoragePolicyID(), false,
+ .computeQuotaUsage(bsps, storagePolicyID, false,
Snapshot.CURRENT_STATE_ID);
// Reduce the required quota by dst that is being removed
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 7b902d5ff1b..fc17eaebf7f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -1363,9 +1363,13 @@ public class FSDirectory implements Closeable {
// always verify inode name
verifyINodeName(inode.getLocalNameBytes());
+ final boolean isSrcSetSp = inode.isSetStoragePolicy();
+ final byte storagePolicyID = isSrcSetSp ?
+ inode.getLocalStoragePolicyID() :
+ parent.getStoragePolicyID();
final QuotaCounts counts = inode
.computeQuotaUsage(getBlockStoragePolicySuite(),
- parent.getStoragePolicyID(), false, Snapshot.CURRENT_STATE_ID);
+ storagePolicyID, false, Snapshot.CURRENT_STATE_ID);
updateCount(existing, pos, counts, checkQuota);
boolean isRename = (inode.getParent() != null);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index 03f01eb32ee..8e417fe43aa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -340,6 +340,16 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
return false;
}
+ /**
+ * Check if this inode itself has a storage policy set.
+ */
+ public boolean isSetStoragePolicy() {
+ if (isSymlink()) {
+ return false;
+ }
+ return getLocalStoragePolicyID() != HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
+ }
+
/** Cast this inode to an {@link INodeFile}. */
public INodeFile asFile() {
throw new IllegalStateException("Current inode is not a file: "
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
index 79088d3be85..e14ea4dc265 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.impl.LeaseRenewer;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -958,6 +959,44 @@ public class TestQuota {
6 * fileSpace);
}
+ @Test
+ public void testRenameInodeWithStorageType() throws IOException {
+ final int size = 64;
+ final short repl = 1;
+ final Path foo = new Path("/foo");
+ final Path bs1 = new Path(foo, "bs1");
+ final Path wow = new Path(bs1, "wow");
+ final Path bs2 = new Path(foo, "bs2");
+ final Path wow2 = new Path(bs2, "wow2");
+ final Path wow3 = new Path(bs2, "wow3");
+
+ dfs.mkdirs(bs1, FsPermission.getDirDefault());
+ dfs.mkdirs(bs2, FsPermission.getDirDefault());
+ dfs.setQuota(bs1, 1000, 434217728);
+ dfs.setQuota(bs2, 1000, 434217728);
+ // file wow3 without storage policy
+ DFSTestUtil.createFile(dfs, wow3, size, repl, 0);
+
+ dfs.setStoragePolicy(bs2, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
+
+ DFSTestUtil.createFile(dfs, wow, size, repl, 0);
+ DFSTestUtil.createFile(dfs, wow2, size, repl, 0);
+ assertTrue("Without storage policy, typeConsumed should be 0.",
+ dfs.getQuotaUsage(bs1).getTypeConsumed(StorageType.SSD) == 0);
+ assertTrue("With storage policy, typeConsumed should not be 0.",
+ dfs.getQuotaUsage(bs2).getTypeConsumed(StorageType.SSD) != 0);
+ // wow3 without storage policy , rename will not change typeConsumed
+ dfs.rename(wow3, bs1);
+ assertTrue("Rename src without storagePolicy, dst typeConsumed should not be changed.",
+ dfs.getQuotaUsage(bs2).getTypeConsumed(StorageType.SSD) == 0);
+
+ long srcTypeQuota = dfs.getQuotaUsage(bs2).getTypeQuota(StorageType.SSD);
+ dfs.rename(bs2, bs1);
+ long dstTypeQuota = dfs.getQuotaUsage(bs1).getTypeConsumed(StorageType.SSD);
+ assertTrue("Rename with storage policy, typeConsumed should not be 0.",
+ dstTypeQuota != srcTypeQuota);
+ }
+
private static void checkContentSummary(final ContentSummary expected,
final ContentSummary computed) {
assertEquals(expected.toString(), computed.toString());
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org