You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ay...@apache.org on 2022/05/27 03:36:16 UTC
[hadoop] branch trunk updated: HDFS-15225. RBF: Add snapshot counts to content summary in router. (#4356). Contributed by Ayush Saxena.
This is an automated email from the ASF dual-hosted git repository.
ayushsaxena pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/trunk by this push:
new 96985f4c452 HDFS-15225. RBF: Add snapshot counts to content summary in router. (#4356). Contributed by Ayush Saxena.
96985f4c452 is described below
commit 96985f4c452405cd71cdd0b7601729554f370a81
Author: Ayush Saxena <ay...@apache.org>
AuthorDate: Fri May 27 09:06:07 2022 +0530
HDFS-15225. RBF: Add snapshot counts to content summary in router. (#4356). Contributed by Ayush Saxena.
Reviewed-by: Inigo Goiri <in...@apache.org>
---
.../federation/router/RouterClientProtocol.java | 6 +++
.../server/federation/router/TestRouterRpc.java | 43 ++++++++++++++++++++++
2 files changed, 49 insertions(+)
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
index 1bd7d65836d..469b16178a2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
@@ -1903,6 +1903,8 @@ public class RouterClientProtocol implements ClientProtocol {
long quota = 0;
long spaceConsumed = 0;
long spaceQuota = 0;
+ long snapshotDirectoryCount = 0;
+ long snapshotFileCount = 0;
String ecPolicy = "";
for (ContentSummary summary : summaries) {
@@ -1912,6 +1914,8 @@ public class RouterClientProtocol implements ClientProtocol {
quota = summary.getQuota();
spaceConsumed += summary.getSpaceConsumed();
spaceQuota = summary.getSpaceQuota();
+ snapshotDirectoryCount += summary.getSnapshotDirectoryCount();
+ snapshotFileCount += summary.getSnapshotFileCount();
// We return from the first response as we assume that the EC policy
// of each sub-cluster is same.
if (ecPolicy.isEmpty()) {
@@ -1927,6 +1931,8 @@ public class RouterClientProtocol implements ClientProtocol {
.spaceConsumed(spaceConsumed)
.spaceQuota(spaceQuota)
.erasureCodingPolicy(ecPolicy)
+ .snapshotDirectoryCount(snapshotDirectoryCount)
+ .snapshotFileCount(snapshotFileCount)
.build();
return ret;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
index 96e77d58007..4aeb2ec9b8f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
@@ -56,6 +56,7 @@ import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.CryptoProtocolVersion;
+import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
@@ -2004,4 +2005,46 @@ public class TestRouterRpc {
assertFalse(auditLog.getOutput().contains("clientIp:1.1.1.1"));
assertFalse(auditLog.getOutput().contains("clientPort:1234"));
}
+
+ @Test
+ public void testContentSummaryWithSnapshot() throws Exception {
+ DistributedFileSystem routerDFS = (DistributedFileSystem) routerFS;
+ Path dirPath = new Path("/testdir");
+ Path subdirPath = new Path(dirPath, "subdir");
+ Path filePath1 = new Path(dirPath, "file");
+ Path filePath2 = new Path(subdirPath, "file2");
+
+ // Create directories.
+ routerDFS.mkdirs(dirPath);
+ routerDFS.mkdirs(subdirPath);
+
+ // Create files.
+ createFile(routerDFS, filePath1.toString(), 32);
+ createFile(routerDFS, filePath2.toString(), 16);
+
+ // Allow & Create snapshot.
+ routerDFS.allowSnapshot(dirPath);
+ routerDFS.createSnapshot(dirPath, "s1");
+
+ try {
+ // Check content summary, snapshot count should be 0
+ ContentSummary contentSummary = routerDFS.getContentSummary(dirPath);
+ assertEquals(0, contentSummary.getSnapshotDirectoryCount());
+ assertEquals(0, contentSummary.getSnapshotFileCount());
+
+ // Delete the file & subdir(Total 2 files deleted & 1 directory)
+ routerDFS.delete(filePath1, true);
+ routerDFS.delete(subdirPath, true);
+
+ // Get the Content Summary
+ contentSummary = routerDFS.getContentSummary(dirPath);
+ assertEquals(1, contentSummary.getSnapshotDirectoryCount());
+ assertEquals(2, contentSummary.getSnapshotFileCount());
+ } finally {
+ // Cleanup
+ routerDFS.deleteSnapshot(dirPath, "s1");
+ routerDFS.disallowSnapshot(dirPath);
+ routerDFS.delete(dirPath, true);
+ }
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org