You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by we...@apache.org on 2020/08/27 06:13:47 UTC
[hadoop] 02/02: HDFS-15540. Directories protected from delete can
still be moved to the trash. Contributed by Stephen O'Donnell.
This is an automated email from the ASF dual-hosted git repository.
weichiu pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git
commit a9ce6001eaf3b94ca15ccdb0330784a078b9761d
Author: Stephen O'Donnell <so...@apache.org>
AuthorDate: Wed Aug 26 23:04:56 2020 -0700
HDFS-15540. Directories protected from delete can still be moved to the trash. Contributed by Stephen O'Donnell.
Signed-off-by: Wei-Chiu Chuang <we...@apache.org>
(cherry picked from commit 2ffe00fc46aa74929e722dc1804fb0b3d48ee7a9)
---
.../hadoop/hdfs/server/namenode/FSDirRenameOp.java | 5 ++
.../server/namenode/TestProtectedDirectories.java | 70 ++++++++++++++++++++++
2 files changed, 75 insertions(+)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index 602f996..423f3a2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -262,6 +262,11 @@ class FSDirRenameOp {
throws IOException {
final INodesInPath srcIIP = fsd.resolvePath(pc, src, DirOp.WRITE_LINK);
final INodesInPath dstIIP = fsd.resolvePath(pc, dst, DirOp.CREATE_LINK);
+
+ if(fsd.isNonEmptyDirectory(srcIIP)) {
+ DFSUtil.checkProtectedDescendants(fsd, srcIIP);
+ }
+
if (fsd.isPermissionEnabled()) {
boolean renameToTrash = false;
if (null != options &&
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java
index c15af55..e5f2631 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java
@@ -26,6 +26,8 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.Trash;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.AccessControlException;
@@ -36,6 +38,7 @@ import org.junit.rules.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.*;
@@ -284,6 +287,31 @@ public class TestProtectedDirectories {
}
}
+ @Test
+ public void testMoveToTrash() throws Throwable {
+ for (TestMatrixEntry testMatrixEntry : createTestMatrix()) {
+ Configuration conf = new HdfsConfiguration();
+ conf.setInt(DFSConfigKeys.FS_TRASH_INTERVAL_KEY, 3600);
+ MiniDFSCluster cluster = setupTestCase(
+ conf, testMatrixEntry.getProtectedPaths(),
+ testMatrixEntry.getUnprotectedPaths());
+
+ try {
+ LOG.info("Running {}", testMatrixEntry);
+ FileSystem fs = cluster.getFileSystem();
+ for (Path path : testMatrixEntry.getAllPathsToBeDeleted()) {
+ assertThat(
+ testMatrixEntry + ": Testing whether " + path +
+ " can be moved to trash",
+ moveToTrash(fs, path, conf),
+ is(testMatrixEntry.canPathBeDeleted(path)));
+ }
+ } finally {
+ cluster.shutdown();
+ }
+ }
+ }
+
/*
* Verify that protected directories could not be renamed.
*/
@@ -340,6 +368,33 @@ public class TestProtectedDirectories {
}
@Test
+ public void testMoveProtectedSubDirsToTrash() throws Throwable {
+ for (TestMatrixEntry testMatrixEntry :
+ createTestMatrixForProtectSubDirs()) {
+ Configuration conf = new HdfsConfiguration();
+ conf.setBoolean(DFS_PROTECTED_SUBDIRECTORIES_ENABLE, true);
+ conf.setInt(DFSConfigKeys.FS_TRASH_INTERVAL_KEY, 3600);
+ MiniDFSCluster cluster = setupTestCase(
+ conf, testMatrixEntry.getProtectedPaths(),
+ testMatrixEntry.getUnprotectedPaths());
+
+ try {
+ LOG.info("Running {}", testMatrixEntry);
+ FileSystem fs = cluster.getFileSystem();
+ for (Path srcPath : testMatrixEntry.getAllPathsToBeDeleted()) {
+ assertThat(
+ testMatrixEntry + ": Testing whether "
+ + srcPath + " can be moved to trash",
+ moveToTrash(fs, srcPath, conf),
+ is(testMatrixEntry.canPathBeRenamed(srcPath)));
+ }
+ } finally {
+ cluster.shutdown();
+ }
+ }
+ }
+
+ @Test
public void testDeleteProtectSubDirs() throws Throwable {
for (TestMatrixEntry testMatrixEntry :
createTestMatrixForProtectSubDirs()) {
@@ -465,6 +520,21 @@ public class TestProtectedDirectories {
}
}
+ private boolean moveToTrash(FileSystem fs, Path path, Configuration conf) {
+ try {
+ return Trash.moveToAppropriateTrash(fs, path, conf);
+ } catch (FileNotFoundException fnf) {
+ // fs.delete(...) does not throw an exception if the file does not exist.
+ // The deletePath method in this class, will therefore return true if
+ // there is an attempt to delete a file which does not exist. Therefore
+ // catching this exception and returning true to keep it consistent and
+ // allow tests to work with the same test matrix.
+ return true;
+ } catch (IOException ace) {
+ return false;
+ }
+ }
+
/**
* Return true if the path was successfully renamed. False if it
* failed with AccessControlException. Any other exceptions are
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org