You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by we...@apache.org on 2020/08/27 06:13:45 UTC

[hadoop] branch branch-3.3 updated (ee7d214 -> a9ce600)

This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a change to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


    from ee7d214  Revert "HADOOP-17159 Ability for forceful relogin in UserGroupInformation class (#2197)"
     new 9eaa352  HDFS-15243. Add an option to prevent sub-directories of protected directories from deletion. Contributed by liuyanyu.
     new a9ce600  HDFS-15540. Directories protected from delete can still be moved to the trash. Contributed by Stephen O'Donnell.

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  10 ++
 .../main/java/org/apache/hadoop/hdfs/DFSUtil.java  |  12 ++
 .../hadoop/hdfs/server/namenode/FSDirRenameOp.java |   5 +
 .../hadoop/hdfs/server/namenode/FSDirectory.java   |  10 ++
 .../src/main/resources/hdfs-default.xml            |   8 ++
 .../server/namenode/TestProtectedDirectories.java  | 151 +++++++++++++++++++++
 6 files changed, 196 insertions(+)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 01/02: HDFS-15243. Add an option to prevent sub-directories of protected directories from deletion. Contributed by liuyanyu.

Posted by we...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 9eaa3520e6b38f337743b2f6da15f7e12be3d637
Author: Ayush Saxena <ay...@apache.org>
AuthorDate: Tue May 12 13:11:31 2020 +0530

    HDFS-15243. Add an option to prevent sub-directories of protected directories from deletion. Contributed by liuyanyu.
    
    (cherry picked from commit 0fe49036e557f210a390e07276f5732bc212ae32)
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java | 10 +++
 .../main/java/org/apache/hadoop/hdfs/DFSUtil.java  | 12 ++++
 .../hadoop/hdfs/server/namenode/FSDirectory.java   | 10 +++
 .../src/main/resources/hdfs-default.xml            |  8 +++
 .../server/namenode/TestProtectedDirectories.java  | 81 ++++++++++++++++++++++
 5 files changed, 121 insertions(+)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index e4a710f..4b8c27b 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -1471,6 +1471,16 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
       "dfs.namenode.state.context.enabled";
   public static final boolean DFS_NAMENODE_STATE_CONTEXT_ENABLED_DEFAULT = false;
 
+  /**
+   * whether to protect the subdirectories of directories which
+   * set on fs.protected.directories.
+   */
+  public static final String DFS_PROTECTED_SUBDIRECTORIES_ENABLE =
+      "dfs.protected.subdirectories.enable";
+  // Default value for DFS_PROTECTED_SUBDIRECTORIES_ENABLE.
+  public static final boolean DFS_PROTECTED_SUBDIRECTORIES_ENABLE_DEFAULT =
+      false;
+
   // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry
   @Deprecated
   public static final String  DFS_CLIENT_RETRY_POLICY_ENABLED_KEY
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 0facfd9..00f14cd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -1809,6 +1809,18 @@ public class DFSUtil {
             + descendant);
       }
     }
+
+    if (fsd.isProtectedSubDirectoriesEnable()) {
+      while (!src.isEmpty()) {
+        int index = src.lastIndexOf(Path.SEPARATOR_CHAR);
+        src = src.substring(0, index);
+        if (protectedDirs.contains(src)) {
+          throw new AccessControlException(
+              "Cannot delete/rename subdirectory under protected subdirectory "
+              + src);
+        }
+      }
+    }
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index ac976d3..817844f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -89,6 +89,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECI
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_QUOTA_BY_STORAGETYPE_ENABLED_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_QUOTA_BY_STORAGETYPE_ENABLED_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROTECTED_SUBDIRECTORIES_ENABLE;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROTECTED_SUBDIRECTORIES_ENABLE_DEFAULT;
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_ENCRYPTION_ZONE;
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER;
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
@@ -169,6 +171,7 @@ public class FSDirectory implements Closeable {
   //
   // Each entry in this set must be a normalized path.
   private volatile SortedSet<String> protectedDirectories;
+  private final boolean isProtectedSubDirectoriesEnable;
 
   private final boolean isPermissionEnabled;
   private final boolean isPermissionContentSummarySubAccess;
@@ -381,6 +384,9 @@ public class FSDirectory implements Closeable {
         DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT);
 
     this.protectedDirectories = parseProtectedDirectories(conf);
+    this.isProtectedSubDirectoriesEnable = conf.getBoolean(
+        DFS_PROTECTED_SUBDIRECTORIES_ENABLE,
+        DFS_PROTECTED_SUBDIRECTORIES_ENABLE_DEFAULT);
 
     Preconditions.checkArgument(this.inodeXAttrsLimit >= 0,
         "Cannot set a negative limit on the number of xattrs per inode (%s).",
@@ -541,6 +547,10 @@ public class FSDirectory implements Closeable {
     return protectedDirectories;
   }
 
+  public boolean isProtectedSubDirectoriesEnable() {
+    return isProtectedSubDirectoriesEnable;
+  }
+
   /**
    * Set directories that cannot be removed unless empty, even by an
    * administrator.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 9c31c18..00d5dfd 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -5913,4 +5913,12 @@
       directories when permissions is enabled. Default value is false;
     </description>
   </property>
+
+  <property>
+    <name>dfs.protected.subdirectories.enable</name>
+    <value>false</value>
+    <description>whether to protect the subdirectories of directories which
+      set on fs.protected.directories.
+    </description>
+  </property>
 </configuration>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java
index 67d2ffb..c15af55 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java
@@ -39,6 +39,7 @@ import org.slf4j.LoggerFactory;
 import java.io.IOException;
 import java.util.*;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROTECTED_SUBDIRECTORIES_ENABLE;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -193,6 +194,25 @@ public class TestProtectedDirectories {
     return matrix;
   }
 
+  private Collection<TestMatrixEntry> createTestMatrixForProtectSubDirs() {
+    Collection<TestMatrixEntry> matrix = new ArrayList<TestMatrixEntry>();
+
+    // Nested unprotected dirs.
+    matrix.add(TestMatrixEntry.get()
+            .addUnprotectedDir("/1", true)
+            .addUnprotectedDir("/1/2", true)
+            .addUnprotectedDir("/1/2/3", true)
+            .addUnprotectedDir("/1/2/3/4", true));
+
+    // Non-empty protected dir.
+    matrix.add(TestMatrixEntry.get()
+            .addProtectedDir("/1", false)
+            .addUnprotectedDir("/1/2", false)
+            .addUnprotectedDir("/1/2/3", false)
+            .addUnprotectedDir("/1/2/3/4", true));
+    return matrix;
+  }
+
   @Test
   public void testReconfigureProtectedPaths() throws Throwable {
     Configuration conf = new HdfsConfiguration();
@@ -292,6 +312,67 @@ public class TestProtectedDirectories {
     }
   }
 
+  @Test
+  public void testRenameProtectSubDirs() throws Throwable {
+    for (TestMatrixEntry testMatrixEntry :
+            createTestMatrixForProtectSubDirs()) {
+      Configuration conf = new HdfsConfiguration();
+      conf.setBoolean(DFS_PROTECTED_SUBDIRECTORIES_ENABLE, true);
+      MiniDFSCluster cluster = setupTestCase(
+              conf, testMatrixEntry.getProtectedPaths(),
+              testMatrixEntry.getUnprotectedPaths());
+
+      try {
+        LOG.info("Running {}", testMatrixEntry);
+        FileSystem fs = cluster.getFileSystem();
+        for (Path srcPath : testMatrixEntry.getAllPathsToBeDeleted()) {
+          assertThat(
+                  testMatrixEntry + ": Testing whether "
+                          + srcPath + " can be renamed",
+                  renamePath(fs, srcPath,
+                          new Path(srcPath.toString() + "_renamed")),
+                  is(testMatrixEntry.canPathBeRenamed(srcPath)));
+        }
+      } finally {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  @Test
+  public void testDeleteProtectSubDirs() throws Throwable {
+    for (TestMatrixEntry testMatrixEntry :
+            createTestMatrixForProtectSubDirs()) {
+      Configuration conf = new HdfsConfiguration();
+      conf.setBoolean(DFS_PROTECTED_SUBDIRECTORIES_ENABLE, true);
+      MiniDFSCluster cluster = setupTestCase(
+              conf, testMatrixEntry.getProtectedPaths(),
+              testMatrixEntry.getUnprotectedPaths());
+
+      try {
+        LOG.info("Running {}", testMatrixEntry);
+        FileSystem fs = cluster.getFileSystem();
+        for (Path path : testMatrixEntry.getAllPathsToBeDeleted()) {
+          final long countBefore = cluster.getNamesystem().getFilesTotal();
+          assertThat(
+                  testMatrixEntry + ": Testing whether "
+                          + path + " can be deleted",
+                  deletePath(fs, path),
+                  is(testMatrixEntry.canPathBeDeleted(path)));
+          final long countAfter = cluster.getNamesystem().getFilesTotal();
+
+          if (!testMatrixEntry.canPathBeDeleted(path)) {
+            assertThat(
+                    "Either all paths should be deleted or none",
+                    countAfter, is(countBefore));
+          }
+        }
+      } finally {
+        cluster.shutdown();
+      }
+    }
+  }
+
   /**
    * Verify that configured paths are normalized by removing
    * redundant separators.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 02/02: HDFS-15540. Directories protected from delete can still be moved to the trash. Contributed by Stephen O'Donnell.

Posted by we...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit a9ce6001eaf3b94ca15ccdb0330784a078b9761d
Author: Stephen O'Donnell <so...@apache.org>
AuthorDate: Wed Aug 26 23:04:56 2020 -0700

    HDFS-15540. Directories protected from delete can still be moved to the trash. Contributed by Stephen O'Donnell.
    
    Signed-off-by: Wei-Chiu Chuang <we...@apache.org>
    (cherry picked from commit 2ffe00fc46aa74929e722dc1804fb0b3d48ee7a9)
---
 .../hadoop/hdfs/server/namenode/FSDirRenameOp.java |  5 ++
 .../server/namenode/TestProtectedDirectories.java  | 70 ++++++++++++++++++++++
 2 files changed, 75 insertions(+)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index 602f996..423f3a2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -262,6 +262,11 @@ class FSDirRenameOp {
           throws IOException {
     final INodesInPath srcIIP = fsd.resolvePath(pc, src, DirOp.WRITE_LINK);
     final INodesInPath dstIIP = fsd.resolvePath(pc, dst, DirOp.CREATE_LINK);
+
+    if(fsd.isNonEmptyDirectory(srcIIP)) {
+      DFSUtil.checkProtectedDescendants(fsd, srcIIP);
+    }
+
     if (fsd.isPermissionEnabled()) {
       boolean renameToTrash = false;
       if (null != options &&
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java
index c15af55..e5f2631 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java
@@ -26,6 +26,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.Trash;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.AccessControlException;
@@ -36,6 +38,7 @@ import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.*;
 
@@ -284,6 +287,31 @@ public class TestProtectedDirectories {
     }
   }
 
+  @Test
+  public void testMoveToTrash() throws Throwable {
+    for (TestMatrixEntry testMatrixEntry : createTestMatrix()) {
+      Configuration conf = new HdfsConfiguration();
+      conf.setInt(DFSConfigKeys.FS_TRASH_INTERVAL_KEY, 3600);
+      MiniDFSCluster cluster = setupTestCase(
+          conf, testMatrixEntry.getProtectedPaths(),
+          testMatrixEntry.getUnprotectedPaths());
+
+      try {
+        LOG.info("Running {}", testMatrixEntry);
+        FileSystem fs = cluster.getFileSystem();
+        for (Path path : testMatrixEntry.getAllPathsToBeDeleted()) {
+          assertThat(
+              testMatrixEntry + ": Testing whether " + path +
+                  " can be moved to trash",
+              moveToTrash(fs, path, conf),
+              is(testMatrixEntry.canPathBeDeleted(path)));
+        }
+      } finally {
+        cluster.shutdown();
+      }
+    }
+  }
+
   /*
    * Verify that protected directories could not be renamed.
    */
@@ -340,6 +368,33 @@ public class TestProtectedDirectories {
   }
 
   @Test
+  public void testMoveProtectedSubDirsToTrash() throws Throwable {
+    for (TestMatrixEntry testMatrixEntry :
+        createTestMatrixForProtectSubDirs()) {
+      Configuration conf = new HdfsConfiguration();
+      conf.setBoolean(DFS_PROTECTED_SUBDIRECTORIES_ENABLE, true);
+      conf.setInt(DFSConfigKeys.FS_TRASH_INTERVAL_KEY, 3600);
+      MiniDFSCluster cluster = setupTestCase(
+          conf, testMatrixEntry.getProtectedPaths(),
+          testMatrixEntry.getUnprotectedPaths());
+
+      try {
+        LOG.info("Running {}", testMatrixEntry);
+        FileSystem fs = cluster.getFileSystem();
+        for (Path srcPath : testMatrixEntry.getAllPathsToBeDeleted()) {
+          assertThat(
+              testMatrixEntry + ": Testing whether "
+                  + srcPath + " can be moved to trash",
+              moveToTrash(fs, srcPath, conf),
+              is(testMatrixEntry.canPathBeRenamed(srcPath)));
+        }
+      } finally {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  @Test
   public void testDeleteProtectSubDirs() throws Throwable {
     for (TestMatrixEntry testMatrixEntry :
             createTestMatrixForProtectSubDirs()) {
@@ -465,6 +520,21 @@ public class TestProtectedDirectories {
     }
   }
 
+  private boolean moveToTrash(FileSystem fs, Path path, Configuration conf) {
+    try {
+      return Trash.moveToAppropriateTrash(fs, path, conf);
+    } catch (FileNotFoundException fnf) {
+      // fs.delete(...) does not throw an exception if the file does not exist.
+      // The deletePath method in this class, will therefore return true if
+      // there is an attempt to delete a file which does not exist. Therefore
+      // catching this exception and returning true to keep it consistent and
+      // allow tests to work with the same test matrix.
+      return true;
+    } catch (IOException ace) {
+      return false;
+    }
+  }
+
   /**
    * Return true if the path was successfully renamed. False if it
    * failed with AccessControlException. Any other exceptions are


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org