You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ae...@apache.org on 2016/10/21 16:44:28 UTC
[12/50] [abbrv] hadoop git commit: HDFS-10922. Adding additional unit
tests for Trash (II). Contributed by Weiwei Yang.
HDFS-10922. Adding additional unit tests for Trash (II). Contributed by Weiwei Yang.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8fd4c37c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8fd4c37c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8fd4c37c
Branch: refs/heads/HDFS-7240
Commit: 8fd4c37c45585d761d279f2f6032ff9c6c049895
Parents: b671ee6
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Mon Oct 17 08:22:31 2016 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon Oct 17 14:21:36 2016 -0700
----------------------------------------------------------------------
.../org/apache/hadoop/hdfs/DFSTestUtil.java | 40 +++++
.../apache/hadoop/hdfs/TestDFSPermission.java | 30 ++--
.../org/apache/hadoop/hdfs/TestHDFSTrash.java | 145 ++++++++++++++++++-
3 files changed, 189 insertions(+), 26 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fd4c37c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index f80cd78..963aaa6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -70,6 +70,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
import com.google.common.base.Charsets;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
import com.google.common.base.Supplier;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
@@ -2014,4 +2015,43 @@ public class DFSTestUtil {
}
}, 1000, 60000);
}
+
+ /**
+ * Close current file system and create a new instance as given
+ * {@link UserGroupInformation}.
+ */
+ public static FileSystem login(final FileSystem fs,
+ final Configuration conf, final UserGroupInformation ugi)
+ throws IOException, InterruptedException {
+ if (fs != null) {
+ fs.close();
+ }
+ return DFSTestUtil.getFileSystemAs(ugi, conf);
+ }
+
+ /**
+ * Test if the given {@link FileStatus} user, group owner and its permission
+ * are expected, throw {@link AssertionError} if any value is not expected.
+ */
+ public static void verifyFilePermission(FileStatus stat, String owner,
+ String group, FsAction u, FsAction g, FsAction o) {
+ if(stat != null) {
+ if(!Strings.isNullOrEmpty(owner)) {
+ assertEquals(owner, stat.getOwner());
+ }
+ if(!Strings.isNullOrEmpty(group)) {
+ assertEquals(group, stat.getGroup());
+ }
+ FsPermission permission = stat.getPermission();
+ if(u != null) {
+ assertEquals(u, permission.getUserAction());
+ }
+ if (g != null) {
+ assertEquals(g, permission.getGroupAction());
+ }
+ if (o != null) {
+ assertEquals(o, permission.getOtherAction());
+ }
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fd4c37c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
index d0d00e5..2705e67 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
@@ -305,7 +305,7 @@ public class TestDFSPermission {
fs.mkdirs(rootDir);
fs.setPermission(rootDir, new FsPermission((short) 0777));
- login(USER1);
+ fs = DFSTestUtil.login(fs, conf, USER1);
fs.mkdirs(user1Dir);
fs.setPermission(user1Dir, new FsPermission((short) 0755));
fs.setOwner(user1Dir, USER1.getShortUserName(), GROUP2_NAME);
@@ -318,7 +318,7 @@ public class TestDFSPermission {
// login as user2, attempt to delete /BSS/user1
// this should fail because user2 has no permission to
// its sub directory.
- login(USER2);
+ fs = DFSTestUtil.login(fs, conf, USER2);
fs.delete(user1Dir, true);
fail("User2 should not be allowed to delete user1's dir.");
} catch (AccessControlException e) {
@@ -331,7 +331,7 @@ public class TestDFSPermission {
assertTrue(fs.exists(user1Dir));
try {
- login(SUPERUSER);
+ fs = DFSTestUtil.login(fs, conf, SUPERUSER);
Trash trash = new Trash(fs, conf);
Path trashRoot = trash.getCurrentTrashDir(user1Dir);
while(true) {
@@ -346,7 +346,7 @@ public class TestDFSPermission {
// login as user2, attempt to move /BSS/user1 to trash
// this should also fail otherwise the directory will be
// removed by trash emptier (emptier is running by superuser)
- login(USER2);
+ fs = DFSTestUtil.login(fs, conf, USER2);
Trash userTrash = new Trash(fs, conf);
assertTrue(userTrash.isEnabled());
userTrash.moveToTrash(user1Dir);
@@ -363,7 +363,7 @@ public class TestDFSPermission {
// ensure /BSS/user1 still exists
assertEquals(fs.exists(user1Dir), true);
} finally {
- login(SUPERUSER);
+ fs = DFSTestUtil.login(fs, conf, SUPERUSER);
fs.delete(rootDir, true);
conf.set(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, "0");
}
@@ -405,7 +405,7 @@ public class TestDFSPermission {
setOwner(FILE_DIR_PATH, USER1.getShortUserName(), GROUP3_NAME, false);
// case 3: user1 changes FILE_DIR_PATH's owner to be user2
- login(USER1);
+ fs = DFSTestUtil.login(fs, conf, USER1);
setOwner(FILE_DIR_PATH, USER2.getShortUserName(), null, true);
// case 4: user1 changes FILE_DIR_PATH's group to be group1 which it belongs
@@ -417,14 +417,14 @@ public class TestDFSPermission {
setOwner(FILE_DIR_PATH, null, GROUP3_NAME, true);
// case 6: user2 (non-owner) changes FILE_DIR_PATH's group to be group3
- login(USER2);
+ fs = DFSTestUtil.login(fs, conf, USER2);
setOwner(FILE_DIR_PATH, null, GROUP3_NAME, true);
// case 7: user2 (non-owner) changes FILE_DIR_PATH's user to be user2
setOwner(FILE_DIR_PATH, USER2.getShortUserName(), null, true);
// delete the file/directory
- login(SUPERUSER);
+ fs = DFSTestUtil.login(fs, conf, SUPERUSER);
fs.delete(FILE_DIR_PATH, true);
}
@@ -666,7 +666,7 @@ public class TestDFSPermission {
short[] filePermission, Path[] parentDirs, Path[] files, Path[] dirs)
throws Exception {
boolean[] isDirEmpty = new boolean[NUM_TEST_PERMISSIONS];
- login(SUPERUSER);
+ fs = DFSTestUtil.login(fs, conf, SUPERUSER);
for (int i = 0; i < NUM_TEST_PERMISSIONS; i++) {
create(OpType.CREATE, files[i]);
create(OpType.MKDIRS, dirs[i]);
@@ -682,7 +682,7 @@ public class TestDFSPermission {
isDirEmpty[i] = (fs.listStatus(dirs[i]).length == 0);
}
- login(ugi);
+ fs = DFSTestUtil.login(fs, conf, ugi);
for (int i = 0; i < NUM_TEST_PERMISSIONS; i++) {
testCreateMkdirs(ugi, new Path(parentDirs[i], FILE_DIR_NAME),
ancestorPermission[i], parentPermission[i]);
@@ -1237,16 +1237,6 @@ public class TestDFSPermission {
ddpv.verifyPermission(ugi);
}
- /* log into dfs as the given user */
- private void login(UserGroupInformation ugi) throws IOException,
- InterruptedException {
- if (fs != null) {
- fs.close();
- }
-
- fs = DFSTestUtil.getFileSystemAs(ugi, conf);
- }
-
/* test non-existent file */
private void checkNonExistentFile() {
try {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fd4c37c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java
index ad4d600..b81cdb1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java
@@ -17,27 +17,79 @@
*/
package org.apache.hadoop.hdfs;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
import java.io.IOException;
+import java.util.UUID;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.TestTrash;
-
+import org.apache.hadoop.fs.Trash;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
+import org.mockito.Mockito;
/**
* Test trash using HDFS
*/
public class TestHDFSTrash {
+
+ public static final Log LOG = LogFactory.getLog(TestHDFSTrash.class);
+
private static MiniDFSCluster cluster = null;
+ private static FileSystem fs;
+ private static Configuration conf = new HdfsConfiguration();
+
+ private final static Path TEST_ROOT = new Path("/TestHDFSTrash-ROOT");
+ private final static Path TRASH_ROOT = new Path("/TestHDFSTrash-TRASH");
+
+ final private static String GROUP1_NAME = "group1";
+ final private static String GROUP2_NAME = "group2";
+ final private static String GROUP3_NAME = "group3";
+ final private static String USER1_NAME = "user1";
+ final private static String USER2_NAME = "user2";
+
+ private static UserGroupInformation superUser;
+ private static UserGroupInformation user1;
+ private static UserGroupInformation user2;
@BeforeClass
public static void setUp() throws Exception {
- Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+ fs = FileSystem.get(conf);
+
+ superUser = UserGroupInformation.getCurrentUser();
+ user1 = UserGroupInformation.createUserForTesting(USER1_NAME,
+ new String[] {GROUP1_NAME, GROUP2_NAME});
+ user2 = UserGroupInformation.createUserForTesting(USER2_NAME,
+ new String[] {GROUP2_NAME, GROUP3_NAME});
+
+ // Init test and trash root dirs in HDFS
+ fs.mkdirs(TEST_ROOT);
+ fs.setPermission(TEST_ROOT, new FsPermission((short) 0777));
+ DFSTestUtil.verifyFilePermission(
+ fs.getFileStatus(TEST_ROOT),
+ superUser.getShortUserName(),
+ null, FsAction.ALL, FsAction.ALL, FsAction.ALL);
+
+ fs.mkdirs(TRASH_ROOT);
+ fs.setPermission(TRASH_ROOT, new FsPermission((short) 0777));
+ DFSTestUtil.verifyFilePermission(
+ fs.getFileStatus(TRASH_ROOT),
+ superUser.getShortUserName(),
+ null, FsAction.ALL, FsAction.ALL, FsAction.ALL);
}
@AfterClass
@@ -52,9 +104,90 @@ public class TestHDFSTrash {
@Test
public void testNonDefaultFS() throws IOException {
- FileSystem fs = cluster.getFileSystem();
- Configuration conf = fs.getConf();
- conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, fs.getUri().toString());
- TestTrash.trashNonDefaultFS(conf);
+ FileSystem fileSystem = cluster.getFileSystem();
+ Configuration config = fileSystem.getConf();
+ config.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,
+ fileSystem.getUri().toString());
+ TestTrash.trashNonDefaultFS(config);
+ }
+
+ @Test
+ public void testHDFSTrashPermission() throws IOException {
+ FileSystem fileSystem = cluster.getFileSystem();
+ Configuration config = fileSystem.getConf();
+ config.set(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, "0.2");
+ TestTrash.verifyTrashPermission(fileSystem, config);
+ }
+
+ @Test
+ public void testMoveEmptyDirToTrash() throws IOException {
+ FileSystem fileSystem = cluster.getFileSystem();
+ Configuration config = fileSystem.getConf();
+ config.set(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, "1");
+ TestTrash.verifyMoveEmptyDirToTrash(fileSystem, config);
+ }
+
+ @Test
+ public void testDeleteTrash() throws Exception {
+ Configuration testConf = new Configuration(conf);
+ testConf.set(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, "10");
+
+ Path user1Tmp = new Path(TEST_ROOT, "test-del-u1");
+ Path user2Tmp = new Path(TEST_ROOT, "test-del-u2");
+
+ // login as user1, move something to trash
+ // verify user1 can remove its own trash dir
+ fs = DFSTestUtil.login(fs, testConf, user1);
+ fs.mkdirs(user1Tmp);
+ Trash u1Trash = getPerUserTrash(user1, fs, testConf);
+ Path u1t = u1Trash.getCurrentTrashDir(user1Tmp);
+ assertTrue(String.format("Failed to move %s to trash", user1Tmp),
+ u1Trash.moveToTrash(user1Tmp));
+ assertTrue(
+ String.format(
+ "%s should be allowed to remove its own trash directory %s",
+ user1.getUserName(), u1t),
+ fs.delete(u1t, true));
+ assertFalse(fs.exists(u1t));
+
+ // login as user2, move something to trash
+ fs = DFSTestUtil.login(fs, testConf, user2);
+ fs.mkdirs(user2Tmp);
+ Trash u2Trash = getPerUserTrash(user2, fs, testConf);
+ u2Trash.moveToTrash(user2Tmp);
+ Path u2t = u2Trash.getCurrentTrashDir(user2Tmp);
+
+ try {
+ // user1 should not be able to remove user2's trash dir
+ fs = DFSTestUtil.login(fs, testConf, user1);
+ fs.delete(u2t, true);
+ fail(String.format("%s should not be able to remove %s trash directory",
+ USER1_NAME, USER2_NAME));
+ } catch (AccessControlException e) {
+ assertTrue(e instanceof AccessControlException);
+ assertTrue("Permission denied messages must carry the username",
+ e.getMessage().contains(USER1_NAME));
+ }
+ }
+
+ /**
+ * Return a {@link Trash} instance using giving configuration.
+ * The trash root directory is set to an unique directory under
+ * {@link #TRASH_ROOT}. Use this method to isolate trash
+ * directories for different users.
+ */
+ private Trash getPerUserTrash(UserGroupInformation ugi,
+ FileSystem fileSystem, Configuration config) throws IOException {
+ // generate an unique path per instance
+ UUID trashId = UUID.randomUUID();
+ StringBuffer sb = new StringBuffer()
+ .append(ugi.getUserName())
+ .append("-")
+ .append(trashId.toString());
+ Path userTrashRoot = new Path(TRASH_ROOT, sb.toString());
+ FileSystem spyUserFs = Mockito.spy(fileSystem);
+ Mockito.when(spyUserFs.getTrashRoot(Mockito.any()))
+ .thenReturn(userTrashRoot);
+ return new Trash(spyUserFs, config);
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org