You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by vi...@apache.org on 2019/10/24 07:08:49 UTC
[hadoop] branch branch-3.2 updated: HDFS-14921. Remove SuperUser
Check in Setting Storage Policy in FileStatus During Listing. Contributed
by Ayush Saxena.
This is an automated email from the ASF dual-hosted git repository.
vinayakumarb pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/branch-3.2 by this push:
new e1e3d9c HDFS-14921. Remove SuperUser Check in Setting Storage Policy in FileStatus During Listing. Contributed by Ayush Saxena.
e1e3d9c is described below
commit e1e3d9c81de60c110f711933b41e1dab011c10ab
Author: Vinayakumar B <vi...@apache.org>
AuthorDate: Thu Oct 24 12:14:09 2019 +0530
HDFS-14921. Remove SuperUser Check in Setting Storage Policy in FileStatus During Listing. Contributed by Ayush Saxena.
---
.../server/namenode/FSDirStatAndListingOp.java | 18 +++++--------
.../hadoop/hdfs/TestDistributedFileSystem.java | 30 ++++++++++++++++++++++
2 files changed, 37 insertions(+), 11 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 38acfe9..c7f43e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -73,14 +73,12 @@ class FSDirStatAndListingOp {
}
}
- boolean isSuperUser = true;
if (fsd.isPermissionEnabled()) {
if (iip.getLastINode() != null && iip.getLastINode().isDirectory()) {
fsd.checkPathAccess(pc, iip, FsAction.READ_EXECUTE);
}
- isSuperUser = pc.isSuperUser();
}
- return getListing(fsd, iip, startAfter, needLocation, isSuperUser);
+ return getListing(fsd, iip, startAfter, needLocation);
}
/**
@@ -210,11 +208,10 @@ class FSDirStatAndListingOp {
* path
* @param startAfter the name to start listing after
* @param needLocation if block locations are returned
- * @param includeStoragePolicy if storage policy is returned
* @return a partial listing starting after startAfter
*/
private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip,
- byte[] startAfter, boolean needLocation, boolean includeStoragePolicy)
+ byte[] startAfter, boolean needLocation)
throws IOException {
if (FSDirectory.isExactReservedName(iip.getPathComponents())) {
return getReservedListing(fsd);
@@ -231,9 +228,7 @@ class FSDirStatAndListingOp {
return null;
}
- byte parentStoragePolicy = includeStoragePolicy
- ? targetNode.getStoragePolicyID()
- : HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
+ byte parentStoragePolicy = targetNode.getStoragePolicyID();
if (!targetNode.isDirectory()) {
// return the file's status. note that the iip already includes the
@@ -255,9 +250,10 @@ class FSDirStatAndListingOp {
HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
for (int i = 0; i < numOfListing && locationBudget > 0; i++) {
INode child = contents.get(startChild+i);
- byte childStoragePolicy = (includeStoragePolicy && !child.isSymlink())
- ? getStoragePolicyID(child.getLocalStoragePolicyID(),
- parentStoragePolicy)
+ byte childStoragePolicy =
+ !child.isSymlink()
+ ? getStoragePolicyID(child.getLocalStoragePolicyID(),
+ parentStoragePolicy)
: parentStoragePolicy;
listing[i] = createFileStatus(fsd, iip, child, childStoragePolicy,
needLocation, false);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index 57e65a5..8bef655 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -79,6 +79,7 @@ import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.StorageStatistics.LongStatistic;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DistributedFileSystem.HdfsDataOutputStreamBuilder;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
@@ -90,6 +91,7 @@ import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
@@ -1841,6 +1843,34 @@ public class TestDistributedFileSystem {
}
@Test
+ public void testListingStoragePolicyNonSuperUser() throws Exception {
+ HdfsConfiguration conf = new HdfsConfiguration();
+ try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build()) {
+ cluster.waitActive();
+ final DistributedFileSystem dfs = cluster.getFileSystem();
+ Path dir = new Path("/dir");
+ dfs.mkdirs(dir);
+ dfs.setPermission(dir,
+ new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
+
+ // Create a non-super user.
+ UserGroupInformation user = UserGroupInformation.createUserForTesting(
+ "Non_SuperUser", new String[] {"Non_SuperGroup"});
+
+ DistributedFileSystem userfs = (DistributedFileSystem) user.doAs(
+ (PrivilegedExceptionAction<FileSystem>) () -> FileSystem.get(conf));
+ Path sDir = new Path("/dir/sPolicy");
+ userfs.mkdirs(sDir);
+ userfs.setStoragePolicy(sDir, "COLD");
+ HdfsFileStatus[] list = userfs.getClient()
+ .listPaths(dir.toString(), HdfsFileStatus.EMPTY_NAME)
+ .getPartialListing();
+ assertEquals(HdfsConstants.COLD_STORAGE_POLICY_ID,
+ list[0].getStoragePolicy());
+ }
+ }
+
+ @Test
public void testRemoveErasureCodingPolicy() throws Exception {
Configuration conf = getTestConfiguration();
MiniDFSCluster cluster = null;
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org