You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by so...@apache.org on 2021/07/28 14:28:34 UTC

[hadoop] branch branch-3.3 updated: HDFS-16144. Revert HDFS-15372 (Files in snapshots no longer see attribute provider permissions). Contributed by Stephen O'Donnell

This is an automated email from the ASF dual-hosted git repository.

sodonnell pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
     new d661afc  HDFS-16144. Revert HDFS-15372 (Files in snapshots no longer see attribute provider permissions). Contributed by Stephen O'Donnell
d661afc is described below

commit d661afc06f40008296afb71012bcb33977e23dce
Author: S O'Donnell <so...@cloudera.com>
AuthorDate: Wed Jul 28 14:49:23 2021 +0100

    HDFS-16144. Revert HDFS-15372 (Files in snapshots no longer see attribute provider permissions). Contributed by Stephen O'Donnell
    
    (cherry picked from commit 4eae284827c040cd9fd0eab2e7fd8aaae721326e)
---
 .../hadoop/hdfs/server/namenode/FSDirectory.java   |  18 +--
 .../hdfs/server/namenode/FSPermissionChecker.java  |  44 +++---
 .../hadoop/hdfs/server/namenode/INodesInPath.java  |  21 ---
 .../namenode/TestINodeAttributeProvider.java       | 155 +++++++--------------
 4 files changed, 71 insertions(+), 167 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index bd2ff52..7b902d5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -2052,23 +2052,7 @@ public class FSDirectory implements Closeable {
       // first empty component for the root.  however file status
       // related calls are expected to strip out the root component according
       // to TestINodeAttributeProvider.
-      // Due to HDFS-15372 the attribute provider should received the resolved
-      // snapshot path. Ie, rather than seeing /d/.snapshot/sn/data it should
-      // see /d/data. However, for the path /d/.snapshot/sn it should see this
-      // full path. If the current inode is the snapshot name, it always has the
-      // same ID as its parent inode, so we can use that to check if it is the
-      // path which needs handled specially.
-      byte[][] components;
-      INodeDirectory parent = node.getParent();
-      if (iip.isSnapshot()
-          && parent != null && parent.getId() != node.getId()) {
-        // For snapshot paths, we always user node.getPathComponents so the
-        // snapshot path is resolved to the real path, unless the last component
-        // is the snapshot name root directory.
-        components = node.getPathComponents();
-      } else {
-        components = iip.getPathComponents();
-      }
+      byte[][] components = iip.getPathComponents();
       components = Arrays.copyOfRange(components, 1, components.length);
       nodeAttrs = ap.getAttributes(components, nodeAttrs);
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
index a539bf6..92e5858 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
 import java.util.Stack;
@@ -208,7 +207,7 @@ public class FSPermissionChecker implements AccessControlEnforcer {
     final INodeAttributes[] inodeAttrs = new INodeAttributes[inodes.length];
     final byte[][] components = inodesInPath.getPathComponents();
     for (int i = 0; i < inodes.length && inodes[i] != null; i++) {
-      inodeAttrs[i] = getINodeAttrs(inodes[i], snapshotId);
+      inodeAttrs[i] = getINodeAttrs(components, i, inodes[i], snapshotId);
     }
 
     String path = inodesInPath.getPath();
@@ -258,7 +257,8 @@ public class FSPermissionChecker implements AccessControlEnforcer {
   void checkPermission(INode inode, int snapshotId, FsAction access)
       throws AccessControlException {
     byte[][] pathComponents = inode.getPathComponents();
-    INodeAttributes nodeAttributes = getINodeAttrs(inode, snapshotId);
+    INodeAttributes nodeAttributes = getINodeAttrs(pathComponents,
+        pathComponents.length - 1, inode, snapshotId);
     try {
       INodeAttributes[] iNodeAttr = {nodeAttributes};
       AccessControlEnforcer enforcer = getAccessControlEnforcer();
@@ -367,31 +367,23 @@ public class FSPermissionChecker implements AccessControlEnforcer {
         authzContext.getSubAccess(), authzContext.isIgnoreEmptyDir());
   }
 
-  private INodeAttributes getINodeAttrs(INode inode, int snapshotId) {
+  private INodeAttributes getINodeAttrs(byte[][] pathByNameArr, int pathIdx,
+      INode inode, int snapshotId) {
     INodeAttributes inodeAttrs = inode.getSnapshotINode(snapshotId);
-    /**
-     * This logic is similar to {@link FSDirectory#getAttributes()} and it
-     * ensures that the attribute provider sees snapshot paths resolved to their
-     * original location. This means the attributeProvider can apply permissions
-     * to the snapshot paths in the same was as the live paths. See HDFS-15372.
-     */
     if (getAttributesProvider() != null) {
+      String[] elements = new String[pathIdx + 1];
       /**
-       * If we have an inode representing a path like /d/.snapshot/snap1
-       * then calling inode.getPathComponents returns [null, d, snap1]. If we
-       * call inode.getFullPathName() it will return /d/.snapshot/snap1. For
-       * this special path (snapshot root) the attribute provider should see:
-       *
-       * [null, d, .snapshot/snap1]
-       *
-       * Using IIP.resolveFromRoot, it will take the inode fullPathName and
-       * construct an IIP object that give the correct components as above.
+       * {@link INode#getPathComponents(String)} returns a null component
+       * for the root only path "/". Assign an empty string if so.
        */
-      INodesInPath iip = INodesInPath.resolveFromRoot(inode);
-      byte[][] components = iip.getPathComponents();
-      components = Arrays.copyOfRange(components, 1, components.length);
-      inodeAttrs = getAttributesProvider()
-          .getAttributes(components, inodeAttrs);
+      if (pathByNameArr.length == 1 && pathByNameArr[0] == null) {
+        elements[0] = "";
+      } else {
+        for (int i = 0; i < elements.length; i++) {
+          elements[i] = DFSUtil.bytes2String(pathByNameArr[i]);
+        }
+      }
+      inodeAttrs = getAttributesProvider().getAttributes(elements, inodeAttrs);
     }
     return inodeAttrs;
   }
@@ -447,7 +439,7 @@ public class FSPermissionChecker implements AccessControlEnforcer {
       if (!(cList.isEmpty() && ignoreEmptyDir)) {
         //TODO have to figure this out with inodeattribute provider
         INodeAttributes inodeAttr =
-            getINodeAttrs(d, snapshotId);
+            getINodeAttrs(components, pathIdx, d, snapshotId);
         if (!hasPermission(inodeAttr, access)) {
           throw new AccessControlException(
               toAccessControlString(inodeAttr, d.getFullPathName(), access));
@@ -465,7 +457,7 @@ public class FSPermissionChecker implements AccessControlEnforcer {
         if (inodeAttr.getFsPermission().getStickyBit()) {
           for (INode child : cList) {
             INodeAttributes childInodeAttr =
-                getINodeAttrs(child, snapshotId);
+                getINodeAttrs(components, pathIdx, child, snapshotId);
             if (isStickyBitViolated(inodeAttr, childInodeAttr)) {
               List<byte[]> allComponentList = new ArrayList<>();
               for (int i = 0; i <= pathIdx; ++i) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
index 8a150f0..c2cdd48 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
@@ -135,27 +135,6 @@ public class INodesInPath {
     return resolve(startingDir, components, false);
   }
 
-  /**
-   * Retrieves the existing INodes from a path, starting at the root directory.
-   * The root directory is located by following the parent link in the inode
-   * recursively until the final root inode is found.
-   * The inodes returned will depend upon the output of inode.getFullPathName().
-   * For a snapshot path, like /data/.snapshot/snap1, it will be resolved to:
-   *     [null, data, .snapshot/snap1]
-   * For a file in the snapshot, as inode.getFullPathName resolves the snapshot
-   * information, the returned inodes for a path like /data/.snapshot/snap1/d1
-   * would be:
-   *     [null, data, d1]
-   * @param inode the {@link INode} to be resolved
-   * @return INodesInPath
-   */
-  static INodesInPath resolveFromRoot(INode inode) {
-    INode[] inodes = getINodes(inode);
-    byte[][] paths = INode.getPathComponents(inode.getFullPathName());
-    INodeDirectory rootDir = inodes[0].asDirectory();
-    return resolve(rootDir, paths);
-  }
-
   static INodesInPath resolve(final INodeDirectory startingDir,
       byte[][] components, final boolean isRaw) {
     Preconditions.checkArgument(startingDir.compareTo(components[0]) == 0);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
index f4c5763..699ac17 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
@@ -24,6 +24,7 @@ import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
 
 import org.apache.hadoop.conf.Configuration;
@@ -33,9 +34,9 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.permission.*;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.After;
@@ -81,7 +82,6 @@ public class TestINodeAttributeProvider {
               ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir);
         }
         CALLED.add("checkPermission|" + ancestorAccess + "|" + parentAccess + "|" + access);
-        CALLED.add("checkPermission|" + path);
       }
 
       @Override
@@ -89,13 +89,13 @@ public class TestINodeAttributeProvider {
           AuthorizationContext authzContext) throws AccessControlException {
         if (authzContext.getAncestorIndex() > 1
             && authzContext.getInodes()[1].getLocalName().equals("user")
-            && authzContext.getInodes()[2].getLocalName().equals("acl")) {
+            && authzContext.getInodes()[2].getLocalName().equals("acl")
+            || runPermissionCheck) {
           this.ace.checkPermissionWithContext(authzContext);
         }
         CALLED.add("checkPermission|" + authzContext.getAncestorAccess()
             + "|" + authzContext.getParentAccess() + "|" + authzContext
             .getAccess());
-        CALLED.add("checkPermission|" + authzContext.getPath());
       }
     }
 
@@ -112,12 +112,7 @@ public class TestINodeAttributeProvider {
     @Override
     public INodeAttributes getAttributes(String[] pathElements,
         final INodeAttributes inode) {
-      String fullPath = String.join("/", pathElements);
-      if (!fullPath.startsWith("/")) {
-        fullPath = "/" + fullPath;
-      }
       CALLED.add("getAttributes");
-      CALLED.add("getAttributes|"+fullPath);
       final boolean useDefault = useDefault(pathElements);
       final boolean useNullAcl = useNullAclFeature(pathElements);
       return new INodeAttributes() {
@@ -495,109 +490,63 @@ public class TestINodeAttributeProvider {
   }
 
   @Test
-  // HDFS-15372 - Attribute provider should not see the snapshot path as it
-  // should be resolved into the original path name before it hits the provider.
-  public void testAttrProviderSeesResolvedSnapahotPaths() throws Exception {
+  // See HDFS-16132 where an issue was reported after HDFS-15372. The sequence
+  // of operations here causes that change to break and the test fails with:
+  // org.apache.hadoop.ipc.RemoteException(java.lang.AssertionError):
+  //     Absolute path required, but got 'foo'
+  //  at org.apache.hadoop.hdfs.server.namenode.INode.checkAbsolutePath
+  //    (INode.java:838)
+  //  at org.apache.hadoop.hdfs.server.namenode.INode.getPathComponents
+  //    (INode.java:813)
+  // After reverting HDFS-15372 the test passes, so including this test in the
+  // revert for future reference.
+  public void testAttrProviderWorksCorrectlyOnRenamedSnapshotPaths()
+      throws Exception {
+    runPermissionCheck = true;
     FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
     DistributedFileSystem hdfs = miniDFS.getFileSystem();
-    final Path userPath = new Path("/user");
-    final Path authz = new Path("/user/authz");
-    final Path authzChild = new Path("/user/authz/child2");
-
-    fs.mkdirs(userPath);
-    fs.setPermission(userPath, new FsPermission(HDFS_PERMISSION));
-    fs.mkdirs(authz);
-    hdfs.allowSnapshot(userPath);
-    fs.setPermission(authz, new FsPermission(HDFS_PERMISSION));
-    fs.mkdirs(authzChild);
-    fs.setPermission(authzChild, new FsPermission(HDFS_PERMISSION));
-    fs.createSnapshot(userPath, "snapshot_1");
-    UserGroupInformation ugi = UserGroupInformation.createUserForTesting("u1",
-        new String[]{"g1"});
-    ugi.doAs(new PrivilegedExceptionAction<Void>() {
-      @Override
-      public Void run() throws Exception {
-        FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
-        final Path snapChild =
-            new Path("/user/.snapshot/snapshot_1/authz/child2");
-        // Run various methods on the path to access the attributes etc.
-        fs.getAclStatus(snapChild);
-        fs.getContentSummary(snapChild);
-        fs.getFileStatus(snapChild);
-        Assert.assertFalse(CALLED.contains("getAttributes|" +
-            snapChild.toString()));
-        Assert.assertTrue(CALLED.contains("getAttributes|/user/authz/child2"));
-        // The snapshot path should be seen by the permission checker, but when
-        // it checks access, the paths will be resolved so the attributeProvider
-        // only sees the resolved path.
-        Assert.assertTrue(
-            CALLED.contains("checkPermission|" + snapChild.toString()));
-        CALLED.clear();
-        fs.getAclStatus(new Path("/"));
-        Assert.assertTrue(CALLED.contains("checkPermission|/"));
-        Assert.assertTrue(CALLED.contains("getAttributes|/"));
-
-        CALLED.clear();
-        fs.getFileStatus(new Path("/user"));
-        Assert.assertTrue(CALLED.contains("checkPermission|/user"));
-        Assert.assertTrue(CALLED.contains("getAttributes|/user"));
-
-        CALLED.clear();
-        fs.getFileStatus(new Path("/user/.snapshot"));
-        Assert.assertTrue(CALLED.contains("checkPermission|/user/.snapshot"));
-        // attribute provider never sees the .snapshot path directly.
-        Assert.assertFalse(CALLED.contains("getAttributes|/user/.snapshot"));
-
-        CALLED.clear();
-        fs.getFileStatus(new Path("/user/.snapshot/snapshot_1"));
-        Assert.assertTrue(
-            CALLED.contains("checkPermission|/user/.snapshot/snapshot_1"));
-        Assert.assertTrue(
-            CALLED.contains("getAttributes|/user/.snapshot/snapshot_1"));
-
-        CALLED.clear();
-        fs.getFileStatus(new Path("/user/.snapshot/snapshot_1/authz"));
-        Assert.assertTrue(CALLED
-            .contains("checkPermission|/user/.snapshot/snapshot_1/authz"));
-        Assert.assertTrue(CALLED.contains("getAttributes|/user/authz"));
-
-        CALLED.clear();
-        fs.getFileStatus(new Path("/user/authz"));
-        Assert.assertTrue(CALLED.contains("checkPermission|/user/authz"));
-        Assert.assertTrue(CALLED.contains("getAttributes|/user/authz"));
-        return null;
-      }
-    });
-    // Delete the files / folders covered by the snapshot, then re-check they
-    // are all readable correctly.
-    fs.delete(authz, true);
+    final Path parent = new Path("/user");
+    hdfs.mkdirs(parent);
+    fs.setPermission(parent, new FsPermission(HDFS_PERMISSION));
+    final Path sub1 = new Path(parent, "sub1");
+    final Path sub1foo = new Path(sub1, "foo");
+    hdfs.mkdirs(sub1);
+    hdfs.mkdirs(sub1foo);
+    Path f = new Path(sub1foo, "file0");
+    DFSTestUtil.createFile(hdfs, f, 0, (short) 1, 0);
+    hdfs.allowSnapshot(parent);
+    hdfs.createSnapshot(parent, "s0");
+
+    f = new Path(sub1foo, "file1");
+    DFSTestUtil.createFile(hdfs, f, 0, (short) 1, 0);
+    f = new Path(sub1foo, "file2");
+    DFSTestUtil.createFile(hdfs, f, 0, (short) 1, 0);
+
+    final Path sub2 = new Path(parent, "sub2");
+    hdfs.mkdirs(sub2);
+    final Path sub2foo = new Path(sub2, "foo");
+    // mv /parent/sub1/foo to /parent/sub2/foo
+    hdfs.rename(sub1foo, sub2foo);
+
+    hdfs.createSnapshot(parent, "s1");
+    hdfs.createSnapshot(parent, "s2");
+
+    final Path sub3 = new Path(parent, "sub3");
+    hdfs.mkdirs(sub3);
+    // mv /parent/sub2/foo to /parent/sub3/foo
+    hdfs.rename(sub2foo, sub3);
+
+    hdfs.delete(sub3, true);
+    UserGroupInformation ugi =
+        UserGroupInformation.createUserForTesting("u1", new String[] {"g1"});
     ugi.doAs(new PrivilegedExceptionAction<Void>() {
       @Override
       public Void run() throws Exception {
         FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
-
+        ((DistributedFileSystem)fs).getSnapshotDiffReport(parent, "s1", "s2");
         CALLED.clear();
-        fs.getFileStatus(new Path("/user/.snapshot"));
-        Assert.assertTrue(CALLED.contains("checkPermission|/user/.snapshot"));
-        // attribute provider never sees the .snapshot path directly.
-        Assert.assertFalse(CALLED.contains("getAttributes|/user/.snapshot"));
-
-        CALLED.clear();
-        fs.getFileStatus(new Path("/user/.snapshot/snapshot_1"));
-        Assert.assertTrue(
-            CALLED.contains("checkPermission|/user/.snapshot/snapshot_1"));
-        Assert.assertTrue(
-            CALLED.contains("getAttributes|/user/.snapshot/snapshot_1"));
-
-        CALLED.clear();
-        fs.getFileStatus(new Path("/user/.snapshot/snapshot_1/authz"));
-        Assert.assertTrue(CALLED
-            .contains("checkPermission|/user/.snapshot/snapshot_1/authz"));
-        Assert.assertTrue(CALLED.contains("getAttributes|/user/authz"));
-
         return null;
       }
     });
-
   }
 }

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org