You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by we...@apache.org on 2020/04/09 16:19:57 UTC

[hadoop] branch trunk updated: HDFS-15269. NameNode should check the authorization API version only … (#1945)

This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 061afcd  HDFS-15269. NameNode should check the authorization API version only … (#1945)
061afcd is described below

commit 061afcdf30ce10d04986672a0583d925d3f8f741
Author: Wei-Chiu Chuang <we...@apache.org>
AuthorDate: Thu Apr 9 09:19:35 2020 -0700

    HDFS-15269. NameNode should check the authorization API version only … (#1945)
    
    Reviewed-by: Takanobu Asanuma <ta...@apache.org>
    Reviewed-by: Akira Ajisaka <aa...@apache.org>
---
 .../hadoop/hdfs/server/namenode/FSDirectory.java   | 35 +++++++++++++++++++--
 .../hdfs/server/namenode/FSPermissionChecker.java  | 36 ++++++----------------
 .../server/namenode/TestAuthorizationContext.java  |  9 ++----
 3 files changed, 46 insertions(+), 34 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index c06b59f..15389d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -69,6 +69,7 @@ import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import javax.annotation.Nullable;
 import java.io.Closeable;
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -203,8 +204,37 @@ public class FSDirectory implements Closeable {
   // will be bypassed
   private HashSet<String> usersToBypassExtAttrProvider = null;
 
-  public void setINodeAttributeProvider(INodeAttributeProvider provider) {
+  // If external inode attribute provider is configured, use the new
+  // authorizeWithContext() API or not.
+  private boolean useAuthorizationWithContextAPI = false;
+
+  public void setINodeAttributeProvider(
+      @Nullable INodeAttributeProvider provider) {
     attributeProvider = provider;
+
+    if (attributeProvider == null) {
+      // attributeProvider is set to null during NN shutdown.
+      return;
+    }
+
+    // if the runtime external authorization provider doesn't support
+    // checkPermissionWithContext(), fall back to the old API
+    // checkPermission().
+    // This check is done only once during NameNode initialization to reduce
+    // runtime overhead.
+    Class[] cArg = new Class[1];
+    cArg[0] = INodeAttributeProvider.AuthorizationContext.class;
+
+    try {
+      Class<?> clazz = attributeProvider.getClass();
+      clazz.getDeclaredMethod("checkPermissionWithContext", cArg);
+      useAuthorizationWithContextAPI = true;
+      LOG.info("Use the new authorization provider API");
+    } catch (NoSuchMethodException e) {
+      useAuthorizationWithContextAPI = false;
+      LOG.info("Fallback to the old authorization provider API because " +
+          "the expected method is not found.");
+    }
   }
 
   /**
@@ -1784,7 +1814,8 @@ public class FSDirectory implements Closeable {
   FSPermissionChecker getPermissionChecker(String fsOwner, String superGroup,
       UserGroupInformation ugi) throws AccessControlException {
     return new FSPermissionChecker(
-        fsOwner, superGroup, ugi, getUserFilteredAttributeProvider(ugi));
+        fsOwner, superGroup, ugi, getUserFilteredAttributeProvider(ugi),
+        useAuthorizationWithContextAPI);
   }
 
   void checkOwner(FSPermissionChecker pc, INodesInPath iip)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
index c4fd6a6..c697ead7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
@@ -89,11 +89,16 @@ public class FSPermissionChecker implements AccessControlEnforcer {
 
   private static ThreadLocal<String> operationType = new ThreadLocal<>();
 
-
   protected FSPermissionChecker(String fsOwner, String supergroup,
       UserGroupInformation callerUgi,
       INodeAttributeProvider attributeProvider) {
-    boolean useNewAuthorizationWithContextAPI;
+    this(fsOwner, supergroup, callerUgi, attributeProvider, false);
+  }
+
+  protected FSPermissionChecker(String fsOwner, String supergroup,
+      UserGroupInformation callerUgi,
+      INodeAttributeProvider attributeProvider,
+      boolean useAuthorizationWithContextAPI) {
     this.fsOwner = fsOwner;
     this.supergroup = supergroup;
     this.callerUgi = callerUgi;
@@ -102,36 +107,15 @@ public class FSPermissionChecker implements AccessControlEnforcer {
     isSuper = user.equals(fsOwner) || groups.contains(supergroup);
     this.attributeProvider = attributeProvider;
 
-    // If the AccessControlEnforcer supports context enrichment, call
-    // the new API. Otherwise choose the old API.
-    Class[] cArg = new Class[1];
-    cArg[0] = INodeAttributeProvider.AuthorizationContext.class;
-
-    AccessControlEnforcer ace;
     if (attributeProvider == null) {
       // If attribute provider is null, use FSPermissionChecker default
       // implementation to authorize, which supports authorization with context.
-      useNewAuthorizationWithContextAPI = true;
-      LOG.info("Default authorization provider supports the new authorization" +
+      authorizeWithContext = true;
+      LOG.debug("Default authorization provider supports the new authorization" +
           " provider API");
     } else {
-      ace = attributeProvider.getExternalAccessControlEnforcer(this);
-      // if the runtime external authorization provider doesn't support
-      // checkPermissionWithContext(), fall back to the old API
-      // checkPermission().
-      try {
-        Class<?> clazz = ace.getClass();
-        clazz.getDeclaredMethod("checkPermissionWithContext", cArg);
-        useNewAuthorizationWithContextAPI = true;
-        LOG.info("Use the new authorization provider API");
-      } catch (NoSuchMethodException e) {
-        useNewAuthorizationWithContextAPI = false;
-        LOG.info("Fallback to the old authorization provider API because " +
-            "the expected method is not found.");
-      }
+      authorizeWithContext = useAuthorizationWithContextAPI;
     }
-
-    authorizeWithContext = useNewAuthorizationWithContextAPI;
   }
 
   public static void setOperationType(String opType) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuthorizationContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuthorizationContext.java
index eeeea76..1f52cf3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuthorizationContext.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuthorizationContext.java
@@ -103,10 +103,7 @@ public class TestAuthorizationContext {
         thenReturn(mockEnforcer);
 
     FSPermissionChecker checker = new FSPermissionChecker(
-        fsOwner, superGroup, ugi, mockINodeAttributeProvider);
-
-    // set operation type to null to force using the legacy API.
-    FSPermissionChecker.setOperationType(null);
+        fsOwner, superGroup, ugi, mockINodeAttributeProvider, false);
 
     when(iip.getPathSnapshotId()).thenReturn(snapshotId);
     when(iip.getINodesArray()).thenReturn(inodes);
@@ -129,10 +126,10 @@ public class TestAuthorizationContext {
     when(mockINodeAttributeProvider.getExternalAccessControlEnforcer(any())).
         thenReturn(mockEnforcer);
 
+    // force it to use the new, checkPermissionWithContext API.
     FSPermissionChecker checker = new FSPermissionChecker(
-        fsOwner, superGroup, ugi, mockINodeAttributeProvider);
+        fsOwner, superGroup, ugi, mockINodeAttributeProvider, true);
 
-    // force it to use the new, checkPermissionWithContext API.
     String operationName = "abc";
     FSPermissionChecker.setOperationType(operationName);
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org