You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ae...@apache.org on 2017/09/07 21:36:44 UTC
[31/37] hadoop git commit: HDFS-12357. Let NameNode to bypass
external attribute provider for configured users. Contributed by Yongjun
Zhang, Arun Suresh.
HDFS-12357. Let NameNode to bypass external attribute provider for configured users. Contributed by Yongjun Zhang, Arun Suresh.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d77ed238
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d77ed238
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d77ed238
Branch: refs/heads/HDFS-7240
Commit: d77ed238a911fc85d6f4bbce606cac7ec44f557f
Parents: 5ff7416
Author: Yongjun Zhang <yz...@cloudera.com>
Authored: Thu Sep 7 09:45:45 2017 -0700
Committer: Yongjun Zhang <yz...@cloudera.com>
Committed: Thu Sep 7 09:50:36 2017 -0700
----------------------------------------------------------------------
.../org/apache/hadoop/hdfs/DFSConfigKeys.java | 3 +
.../hdfs/server/namenode/FSDirectory.java | 58 +++++++++-
.../src/main/resources/hdfs-default.xml | 12 +++
.../namenode/TestINodeAttributeProvider.java | 105 +++++++++++++++++--
4 files changed, 167 insertions(+), 11 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d77ed238/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index bc7b716..d06e378 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -641,6 +641,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY = "dfs.datanode.min.supported.namenode.version";
public static final String DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT = "2.1.0-beta";
public static final String DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_KEY = "dfs.namenode.inode.attributes.provider.class";
+ public static final String DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_BYPASS_USERS_KEY = "dfs.namenode.inode.attributes.provider.bypass.users";
+ public static final String DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_BYPASS_USERS_DEFAULT = "";
+
public static final String DFS_DATANODE_BP_READY_TIMEOUT_KEY = "dfs.datanode.bp-ready.timeout";
public static final long DFS_DATANODE_BP_READY_TIMEOUT_DEFAULT = 20;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d77ed238/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index e6aa533..6604b5a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -74,6 +74,7 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.EnumSet;
+import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.SortedSet;
@@ -202,6 +203,10 @@ public class FSDirectory implements Closeable {
private INodeAttributeProvider attributeProvider;
+ // A HashSet of principals of users for whom the external attribute provider
+ // will be bypassed
+ private HashSet<String> usersToBypassExtAttrProvider = null;
+
public void setINodeAttributeProvider(INodeAttributeProvider provider) {
attributeProvider = provider;
}
@@ -357,6 +362,49 @@ public class FSDirectory implements Closeable {
this.quotaInitThreads = conf.getInt(
DFSConfigKeys.DFS_NAMENODE_QUOTA_INIT_THREADS_KEY,
DFSConfigKeys.DFS_NAMENODE_QUOTA_INIT_THREADS_DEFAULT);
+
+ initUsersToBypassExtProvider(conf);
+ }
+
+ private void initUsersToBypassExtProvider(Configuration conf) {
+ String[] bypassUsers = conf.getTrimmedStrings(
+ DFSConfigKeys.DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_BYPASS_USERS_KEY,
+ DFSConfigKeys.DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_BYPASS_USERS_DEFAULT);
+ for(int i = 0; i < bypassUsers.length; i++) {
+ String tmp = bypassUsers[i].trim();
+ if (!tmp.isEmpty()) {
+ if (usersToBypassExtAttrProvider == null) {
+ usersToBypassExtAttrProvider = new HashSet<String>();
+ }
+ LOG.info("Add user " + tmp + " to the list that will bypass external"
+ + " attribute provider.");
+ usersToBypassExtAttrProvider.add(tmp);
+ }
+ }
+ }
+
+ /**
+ * Check if a given user is configured to bypass external attribute provider.
+ * @param user user principal
+ * @return true if the user is to bypass external attribute provider
+ */
+ private boolean isUserBypassingExtAttrProvider(final String user) {
+ return (usersToBypassExtAttrProvider != null) &&
+ usersToBypassExtAttrProvider.contains(user);
+ }
+
+ /**
+ * Return attributeProvider or null if ugi is to bypass attributeProvider.
+ * @param ugi
+ * @return configured attributeProvider or null
+ */
+ private INodeAttributeProvider getUserFilteredAttributeProvider(
+ UserGroupInformation ugi) {
+ if (attributeProvider == null ||
+ (ugi != null && isUserBypassingExtAttrProvider(ugi.getUserName()))) {
+ return null;
+ }
+ return attributeProvider;
}
/**
@@ -1711,7 +1759,7 @@ public class FSDirectory implements Closeable {
FSPermissionChecker getPermissionChecker(String fsOwner, String superGroup,
UserGroupInformation ugi) throws AccessControlException {
return new FSPermissionChecker(
- fsOwner, superGroup, ugi, attributeProvider);
+ fsOwner, superGroup, ugi, getUserFilteredAttributeProvider(ugi));
}
void checkOwner(FSPermissionChecker pc, INodesInPath iip)
@@ -1896,18 +1944,20 @@ public class FSDirectory implements Closeable {
}
INodeAttributes getAttributes(INodesInPath iip)
- throws FileNotFoundException {
+ throws IOException {
INode node = FSDirectory.resolveLastINode(iip);
int snapshot = iip.getPathSnapshotId();
INodeAttributes nodeAttrs = node.getSnapshotINode(snapshot);
- if (attributeProvider != null) {
+ UserGroupInformation ugi = NameNode.getRemoteUser();
+ INodeAttributeProvider ap = this.getUserFilteredAttributeProvider(ugi);
+ if (ap != null) {
// permission checking sends the full components array including the
// first empty component for the root. however file status
// related calls are expected to strip out the root component according
// to TestINodeAttributeProvider.
byte[][] components = iip.getPathComponents();
components = Arrays.copyOfRange(components, 1, components.length);
- nodeAttrs = attributeProvider.getAttributes(components, nodeAttrs);
+ nodeAttrs = ap.getAttributes(components, nodeAttrs);
}
return nodeAttrs;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d77ed238/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index d58e54e..36c74f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4160,6 +4160,18 @@
</property>
<property>
+ <name>dfs.namenode.authorization.provider.bypass.users</name>
+ <value></value>
+ <description>
+ A list of user principals (in secure cluster) or user names (in insecure
+ cluster) for whom the external attribute provider will be bypassed for all
+ operations. This means file attributes stored in HDFS instead of the
+ external provider will be used for permission checking and be returned when
+ requested.
+ </description>
+</property>
+
+<property>
<name>dfs.namenode.max-num-blocks-to-log</name>
<value>1000</value>
<description>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d77ed238/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
index ffdc535..bbc5fa0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
@@ -24,6 +24,7 @@ import java.util.Map;
import java.util.Set;
import com.google.common.collect.ImmutableList;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
@@ -33,19 +34,25 @@ import org.apache.hadoop.fs.permission.*;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import com.google.common.collect.Lists;
public class TestINodeAttributeProvider {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestINodeAttributeProvider.class);
+
private MiniDFSCluster miniDFS;
private static final Set<String> CALLED = new HashSet<String>();
+ private static final short HDFS_PERMISSION = 0777;
+ private static final short PROVIDER_PERMISSION = 0770;
public static class MyAuthorizationProvider extends INodeAttributeProvider {
@@ -112,7 +119,8 @@ public class TestINodeAttributeProvider {
@Override
public long getPermissionLong() {
- return (useDefault) ? inode.getPermissionLong() : 0770;
+ return (useDefault) ? inode.getPermissionLong() :
+ (long)PROVIDER_PERMISSION;
}
@Override
@@ -177,6 +185,9 @@ public class TestINodeAttributeProvider {
conf.set(DFSConfigKeys.DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_KEY,
MyAuthorizationProvider.class.getName());
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+ conf.set(
+ DFSConfigKeys.DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_BYPASS_USERS_KEY,
+ " u2,, ,u3, ");
EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
miniDFS = new MiniDFSCluster.Builder(conf).build();
}
@@ -195,8 +206,11 @@ public class TestINodeAttributeProvider {
public void testDelegationToProvider() throws Exception {
Assert.assertTrue(CALLED.contains("start"));
FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
- fs.mkdirs(new Path("/tmp"));
- fs.setPermission(new Path("/tmp"), new FsPermission((short) 0777));
+ final Path tmpPath = new Path("/tmp");
+ final Path fooPath = new Path("/tmp/foo");
+
+ fs.mkdirs(tmpPath);
+ fs.setPermission(tmpPath, new FsPermission(HDFS_PERMISSION));
UserGroupInformation ugi = UserGroupInformation.createUserForTesting("u1",
new String[]{"g1"});
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@@ -204,17 +218,19 @@ public class TestINodeAttributeProvider {
public Void run() throws Exception {
FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
CALLED.clear();
- fs.mkdirs(new Path("/tmp/foo"));
+ fs.mkdirs(fooPath);
Assert.assertTrue(CALLED.contains("getAttributes"));
Assert.assertTrue(CALLED.contains("checkPermission|null|null|null"));
Assert.assertTrue(CALLED.contains("checkPermission|WRITE|null|null"));
+
CALLED.clear();
- fs.listStatus(new Path("/tmp/foo"));
+ fs.listStatus(fooPath);
Assert.assertTrue(CALLED.contains("getAttributes"));
Assert.assertTrue(
CALLED.contains("checkPermission|null|null|READ_EXECUTE"));
+
CALLED.clear();
- fs.getAclStatus(new Path("/tmp/foo"));
+ fs.getAclStatus(fooPath);
Assert.assertTrue(CALLED.contains("getAttributes"));
Assert.assertTrue(CALLED.contains("checkPermission|null|null|null"));
return null;
@@ -222,6 +238,81 @@ public class TestINodeAttributeProvider {
});
}
+ private class AssertHelper {
+ private boolean bypass = true;
+ AssertHelper(boolean bp) {
+ bypass = bp;
+ }
+ public void doAssert(boolean x) {
+ if (bypass) {
+ Assert.assertFalse(x);
+ } else {
+ Assert.assertTrue(x);
+ }
+ }
+ }
+
+ private void testBypassProviderHelper(final String[] users,
+ final short expectedPermission, final boolean bypass) throws Exception {
+ final AssertHelper asserter = new AssertHelper(bypass);
+
+ Assert.assertTrue(CALLED.contains("start"));
+
+ FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
+ final Path userPath = new Path("/user");
+ final Path authz = new Path("/user/authz");
+ final Path authzChild = new Path("/user/authz/child2");
+
+ fs.mkdirs(userPath);
+ fs.setPermission(userPath, new FsPermission(HDFS_PERMISSION));
+ fs.mkdirs(authz);
+ fs.setPermission(authz, new FsPermission(HDFS_PERMISSION));
+ fs.mkdirs(authzChild);
+ fs.setPermission(authzChild, new FsPermission(HDFS_PERMISSION));
+ for(String user : users) {
+ UserGroupInformation ugiBypass =
+ UserGroupInformation.createUserForTesting(user,
+ new String[]{"g1"});
+ ugiBypass.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
+ Assert.assertEquals(expectedPermission,
+ fs.getFileStatus(authzChild).getPermission().toShort());
+ asserter.doAssert(CALLED.contains("getAttributes"));
+ asserter.doAssert(CALLED.contains("checkPermission|null|null|null"));
+
+ CALLED.clear();
+ Assert.assertEquals(expectedPermission,
+ fs.listStatus(userPath)[0].getPermission().toShort());
+ asserter.doAssert(CALLED.contains("getAttributes"));
+ asserter.doAssert(
+ CALLED.contains("checkPermission|null|null|READ_EXECUTE"));
+
+ CALLED.clear();
+ fs.getAclStatus(authzChild);
+ asserter.doAssert(CALLED.contains("getAttributes"));
+ asserter.doAssert(CALLED.contains("checkPermission|null|null|null"));
+ return null;
+ }
+ });
+ }
+ }
+
+ @Test
+ public void testAuthzDelegationToProvider() throws Exception {
+ LOG.info("Test not bypassing provider");
+ String[] users = {"u1"};
+ testBypassProviderHelper(users, PROVIDER_PERMISSION, false);
+ }
+
+ @Test
+ public void testAuthzBypassingProvider() throws Exception {
+ LOG.info("Test bypassing provider");
+ String[] users = {"u2", "u3"};
+ testBypassProviderHelper(users, HDFS_PERMISSION, true);
+ }
+
@Test
public void testCustomProvider() throws Exception {
FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org