You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by cm...@apache.org on 2014/08/20 01:50:25 UTC
svn commit: r1619012 [27/35] - in
/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project: hadoop-hdfs-httpfs/
hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/
hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop...
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java Tue Aug 19 23:49:39 2014
@@ -30,6 +30,8 @@ import org.apache.commons.logging.LogFac
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
+import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage;
+import org.apache.hadoop.hdfs.server.datanode.DataStorage;
import org.junit.After;
import org.junit.Test;
@@ -61,11 +63,9 @@ public class TestDFSFinalize {
* Verify that the current directory exists and that the previous directory
* does not exist. Verify that current hasn't been modified by comparing
* the checksum of all it's containing files with their original checksum.
- * Note that we do not check that previous is removed on the DataNode
- * because its removal is asynchronous therefore we have no reliable
- * way to know when it will happen.
*/
- static void checkResult(String[] nameNodeDirs, String[] dataNodeDirs) throws Exception {
+ static void checkResult(String[] nameNodeDirs, String[] dataNodeDirs,
+ String bpid) throws Exception {
List<File> dirs = Lists.newArrayList();
for (int i = 0; i < nameNodeDirs.length; i++) {
File curDir = new File(nameNodeDirs[i], "current");
@@ -76,15 +76,31 @@ public class TestDFSFinalize {
FSImageTestUtil.assertParallelFilesAreIdentical(
dirs, Collections.<String>emptySet());
+ File dnCurDirs[] = new File[dataNodeDirs.length];
for (int i = 0; i < dataNodeDirs.length; i++) {
- assertEquals(
- UpgradeUtilities.checksumContents(
- DATA_NODE, new File(dataNodeDirs[i],"current")),
- UpgradeUtilities.checksumMasterDataNodeContents());
+ dnCurDirs[i] = new File(dataNodeDirs[i],"current");
+ assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, dnCurDirs[i],
+ false), UpgradeUtilities.checksumMasterDataNodeContents());
}
for (int i = 0; i < nameNodeDirs.length; i++) {
assertFalse(new File(nameNodeDirs[i],"previous").isDirectory());
}
+
+ if (bpid == null) {
+ for (int i = 0; i < dataNodeDirs.length; i++) {
+ assertFalse(new File(dataNodeDirs[i],"previous").isDirectory());
+ }
+ } else {
+ for (int i = 0; i < dataNodeDirs.length; i++) {
+ File bpRoot = BlockPoolSliceStorage.getBpRoot(bpid, dnCurDirs[i]);
+ assertFalse(new File(bpRoot,"previous").isDirectory());
+
+ File bpCurFinalizeDir = new File(bpRoot,"current/"+DataStorage.STORAGE_DIR_FINALIZED);
+ assertEquals(UpgradeUtilities.checksumContents(DATA_NODE,
+ bpCurFinalizeDir, true),
+ UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
+ }
+ }
}
/**
@@ -106,7 +122,7 @@ public class TestDFSFinalize {
String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
String[] dataNodeDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
- log("Finalize with existing previous dir", numDirs);
+ log("Finalize NN & DN with existing previous dir", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
@@ -118,11 +134,47 @@ public class TestDFSFinalize {
.startupOption(StartupOption.REGULAR)
.build();
cluster.finalizeCluster(conf);
- checkResult(nameNodeDirs, dataNodeDirs);
+ cluster.triggerBlockReports();
+ // 1 second should be enough for asynchronous DN finalize
+ Thread.sleep(1000);
+ checkResult(nameNodeDirs, dataNodeDirs, null);
+
+ log("Finalize NN & DN without existing previous dir", numDirs);
+ cluster.finalizeCluster(conf);
+ cluster.triggerBlockReports();
+ // 1 second should be enough for asynchronous DN finalize
+ Thread.sleep(1000);
+ checkResult(nameNodeDirs, dataNodeDirs, null);
+
+ cluster.shutdown();
+ UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+ UpgradeUtilities.createEmptyDirs(dataNodeDirs);
+
+ log("Finalize NN & BP with existing previous dir", numDirs);
+ String bpid = UpgradeUtilities.getCurrentBlockPoolID(cluster);
+ UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
+ UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
+ UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
+ UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "current", bpid);
+ UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "previous", bpid);
+ cluster = new MiniDFSCluster.Builder(conf)
+ .format(false)
+ .manageDataDfsDirs(false)
+ .manageNameDfsDirs(false)
+ .startupOption(StartupOption.REGULAR)
+ .build();
+ cluster.finalizeCluster(conf);
+ cluster.triggerBlockReports();
+ // 1 second should be enough for asynchronous BP finalize
+ Thread.sleep(1000);
+ checkResult(nameNodeDirs, dataNodeDirs, bpid);
- log("Finalize without existing previous dir", numDirs);
+ log("Finalize NN & BP without existing previous dir", numDirs);
cluster.finalizeCluster(conf);
- checkResult(nameNodeDirs, dataNodeDirs);
+ cluster.triggerBlockReports();
+ // 1 second should be enough for asynchronous BP finalize
+ Thread.sleep(1000);
+ checkResult(nameNodeDirs, dataNodeDirs, bpid);
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java Tue Aug 19 23:49:39 2014
@@ -20,8 +20,11 @@ package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import java.io.FileNotFoundException;
import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
@@ -36,6 +39,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Time;
@@ -421,6 +425,79 @@ public class TestDFSPermission {
}
}
+ @Test
+ public void testAccessOwner() throws IOException, InterruptedException {
+ FileSystem rootFs = FileSystem.get(conf);
+ Path p1 = new Path("/p1");
+ rootFs.mkdirs(p1);
+ rootFs.setOwner(p1, USER1_NAME, GROUP1_NAME);
+ fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
+ @Override
+ public FileSystem run() throws Exception {
+ return FileSystem.get(conf);
+ }
+ });
+ fs.setPermission(p1, new FsPermission((short) 0444));
+ fs.access(p1, FsAction.READ);
+ try {
+ fs.access(p1, FsAction.WRITE);
+ fail("The access call should have failed.");
+ } catch (AccessControlException e) {
+ // expected
+ }
+
+ Path badPath = new Path("/bad/bad");
+ try {
+ fs.access(badPath, FsAction.READ);
+ fail("The access call should have failed");
+ } catch (FileNotFoundException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void testAccessGroupMember() throws IOException, InterruptedException {
+ FileSystem rootFs = FileSystem.get(conf);
+ Path p2 = new Path("/p2");
+ rootFs.mkdirs(p2);
+ rootFs.setOwner(p2, UserGroupInformation.getCurrentUser().getShortUserName(), GROUP1_NAME);
+ rootFs.setPermission(p2, new FsPermission((short) 0740));
+ fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
+ @Override
+ public FileSystem run() throws Exception {
+ return FileSystem.get(conf);
+ }
+ });
+ fs.access(p2, FsAction.READ);
+ try {
+ fs.access(p2, FsAction.EXECUTE);
+ fail("The access call should have failed.");
+ } catch (AccessControlException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void testAccessOthers() throws IOException, InterruptedException {
+ FileSystem rootFs = FileSystem.get(conf);
+ Path p3 = new Path("/p3");
+ rootFs.mkdirs(p3);
+ rootFs.setPermission(p3, new FsPermission((short) 0774));
+ fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
+ @Override
+ public FileSystem run() throws Exception {
+ return FileSystem.get(conf);
+ }
+ });
+ fs.access(p3, FsAction.READ);
+ try {
+ fs.access(p3, FsAction.READ_WRITE);
+ fail("The access call should have failed.");
+ } catch (AccessControlException e) {
+ // expected
+ }
+ }
+
/* Check if namenode performs permission checking correctly
* for the given user for operations mkdir, open, setReplication,
* getFileInfo, isDirectory, exists, getContentLength, list, rename,
@@ -429,6 +506,7 @@ public class TestDFSPermission {
short[] ancestorPermission, short[] parentPermission,
short[] filePermission, Path[] parentDirs, Path[] files, Path[] dirs)
throws Exception {
+ boolean[] isDirEmpty = new boolean[NUM_TEST_PERMISSIONS];
login(SUPERUSER);
for (int i = 0; i < NUM_TEST_PERMISSIONS; i++) {
create(OpType.CREATE, files[i]);
@@ -441,6 +519,8 @@ public class TestDFSPermission {
FsPermission fsPermission = new FsPermission(filePermission[i]);
fs.setPermission(files[i], fsPermission);
fs.setPermission(dirs[i], fsPermission);
+
+ isDirEmpty[i] = (fs.listStatus(dirs[i]).length == 0);
}
login(ugi);
@@ -461,7 +541,7 @@ public class TestDFSPermission {
parentPermission[i], ancestorPermission[next], parentPermission[next]);
testDeleteFile(ugi, files[i], ancestorPermission[i], parentPermission[i]);
testDeleteDir(ugi, dirs[i], ancestorPermission[i], parentPermission[i],
- filePermission[i], null);
+ filePermission[i], null, isDirEmpty[i]);
}
// test non existent file
@@ -924,7 +1004,8 @@ public class TestDFSPermission {
}
/* A class that verifies the permission checking is correct for
- * directory deletion */
+ * directory deletion
+ */
private class DeleteDirPermissionVerifier extends DeletePermissionVerifier {
private short[] childPermissions;
@@ -958,6 +1039,17 @@ public class TestDFSPermission {
}
}
+ /* A class that verifies the permission checking is correct for
+ * empty-directory deletion
+ */
+ private class DeleteEmptyDirPermissionVerifier extends DeleteDirPermissionVerifier {
+ @Override
+ void setOpPermission() {
+ this.opParentPermission = SEARCH_MASK | WRITE_MASK;
+ this.opPermission = NULL_MASK;
+ }
+ }
+
final DeletePermissionVerifier fileDeletionVerifier =
new DeletePermissionVerifier();
@@ -971,14 +1063,19 @@ public class TestDFSPermission {
final DeleteDirPermissionVerifier dirDeletionVerifier =
new DeleteDirPermissionVerifier();
+ final DeleteEmptyDirPermissionVerifier emptyDirDeletionVerifier =
+ new DeleteEmptyDirPermissionVerifier();
+
/* test if the permission checking of directory deletion is correct */
private void testDeleteDir(UserGroupInformation ugi, Path path,
short ancestorPermission, short parentPermission, short permission,
- short[] childPermissions) throws Exception {
- dirDeletionVerifier.set(path, ancestorPermission, parentPermission,
- permission, childPermissions);
- dirDeletionVerifier.verifyPermission(ugi);
-
+ short[] childPermissions,
+ final boolean isDirEmpty) throws Exception {
+ DeleteDirPermissionVerifier ddpv = isDirEmpty?
+ emptyDirDeletionVerifier : dirDeletionVerifier;
+ ddpv.set(path, ancestorPermission, parentPermission, permission,
+ childPermissions);
+ ddpv.verifyPermission(ugi);
}
/* log into dfs as the given user */
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java Tue Aug 19 23:49:39 2014
@@ -81,7 +81,7 @@ public class TestDFSRollback {
break;
case DATA_NODE:
assertEquals(
- UpgradeUtilities.checksumContents(nodeType, curDir),
+ UpgradeUtilities.checksumContents(nodeType, curDir, false),
UpgradeUtilities.checksumMasterDataNodeContents());
break;
}
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java Tue Aug 19 23:49:39 2014
@@ -34,6 +34,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
@@ -54,10 +55,17 @@ import org.apache.hadoop.util.ToolRunner
import org.junit.Test;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
+import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
+import static org.apache.hadoop.fs.permission.AclEntryType.*;
+import static org.apache.hadoop.fs.permission.FsAction.*;
+import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.not;
import static org.junit.Assert.*;
+import com.google.common.collect.Lists;
+
/**
* This class tests commands from DFSShell.
*/
@@ -155,7 +163,7 @@ public class TestDFSShell {
}
@Test (timeout = 30000)
- public void testRecrusiveRm() throws IOException {
+ public void testRecursiveRm() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs = cluster.getFileSystem();
@@ -1471,7 +1479,8 @@ public class TestDFSShell {
Path root = new Path("/test/get");
final Path remotef = new Path(root, fname);
final Configuration conf = new HdfsConfiguration();
-
+ // Set short retry timeouts so this test runs faster
+ conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
TestGetRunner runner = new TestGetRunner() {
private int count = 0;
private final FsShell shell = new FsShell(conf);
@@ -1583,6 +1592,7 @@ public class TestDFSShell {
cluster.shutdown();
}
}
+
private static String runLsr(final FsShell shell, String root, int returnvalue
) throws Exception {
System.out.println("root=" + root + ", returnvalue=" + returnvalue);
@@ -1619,6 +1629,400 @@ public class TestDFSShell {
int res = admin.run(new String[] {"-refreshNodes"});
assertEquals("expected to fail -1", res , -1);
}
+
+ // Preserve Copy Option is -ptopxa (timestamps, ownership, permission, XATTR,
+ // ACLs)
+ @Test (timeout = 120000)
+ public void testCopyCommandsWithPreserveOption() throws Exception {
+ Configuration conf = new Configuration();
+ conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
+ conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+ .format(true).build();
+ FsShell shell = null;
+ FileSystem fs = null;
+ final String testdir = "/tmp/TestDFSShell-testCopyCommandsWithPreserveOption-"
+ + counter.getAndIncrement();
+ final Path hdfsTestDir = new Path(testdir);
+ try {
+ fs = cluster.getFileSystem();
+ fs.mkdirs(hdfsTestDir);
+ Path src = new Path(hdfsTestDir, "srcfile");
+ fs.create(src).close();
+
+ fs.setAcl(src, Lists.newArrayList(
+ aclEntry(ACCESS, USER, ALL),
+ aclEntry(ACCESS, USER, "foo", ALL),
+ aclEntry(ACCESS, GROUP, READ_EXECUTE),
+ aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE),
+ aclEntry(ACCESS, OTHER, EXECUTE)));
+
+ FileStatus status = fs.getFileStatus(src);
+ final long mtime = status.getModificationTime();
+ final long atime = status.getAccessTime();
+ final String owner = status.getOwner();
+ final String group = status.getGroup();
+ final FsPermission perm = status.getPermission();
+
+ fs.setXAttr(src, "user.a1", new byte[]{0x31, 0x32, 0x33});
+ fs.setXAttr(src, "trusted.a1", new byte[]{0x31, 0x31, 0x31});
+
+ shell = new FsShell(conf);
+
+ // -p
+ Path target1 = new Path(hdfsTestDir, "targetfile1");
+ String[] argv = new String[] { "-cp", "-p", src.toUri().toString(),
+ target1.toUri().toString() };
+ int ret = ToolRunner.run(shell, argv);
+ assertEquals("cp -p is not working", SUCCESS, ret);
+ FileStatus targetStatus = fs.getFileStatus(target1);
+ assertEquals(mtime, targetStatus.getModificationTime());
+ assertEquals(atime, targetStatus.getAccessTime());
+ assertEquals(owner, targetStatus.getOwner());
+ assertEquals(group, targetStatus.getGroup());
+ FsPermission targetPerm = targetStatus.getPermission();
+ assertTrue(perm.equals(targetPerm));
+ Map<String, byte[]> xattrs = fs.getXAttrs(target1);
+ assertTrue(xattrs.isEmpty());
+ List<AclEntry> acls = fs.getAclStatus(target1).getEntries();
+ assertTrue(acls.isEmpty());
+ assertFalse(targetPerm.getAclBit());
+
+ // -ptop
+ Path target2 = new Path(hdfsTestDir, "targetfile2");
+ argv = new String[] { "-cp", "-ptop", src.toUri().toString(),
+ target2.toUri().toString() };
+ ret = ToolRunner.run(shell, argv);
+ assertEquals("cp -ptop is not working", SUCCESS, ret);
+ targetStatus = fs.getFileStatus(target2);
+ assertEquals(mtime, targetStatus.getModificationTime());
+ assertEquals(atime, targetStatus.getAccessTime());
+ assertEquals(owner, targetStatus.getOwner());
+ assertEquals(group, targetStatus.getGroup());
+ targetPerm = targetStatus.getPermission();
+ assertTrue(perm.equals(targetPerm));
+ xattrs = fs.getXAttrs(target2);
+ assertTrue(xattrs.isEmpty());
+ acls = fs.getAclStatus(target2).getEntries();
+ assertTrue(acls.isEmpty());
+ assertFalse(targetPerm.getAclBit());
+
+ // -ptopx
+ Path target3 = new Path(hdfsTestDir, "targetfile3");
+ argv = new String[] { "-cp", "-ptopx", src.toUri().toString(),
+ target3.toUri().toString() };
+ ret = ToolRunner.run(shell, argv);
+ assertEquals("cp -ptopx is not working", SUCCESS, ret);
+ targetStatus = fs.getFileStatus(target3);
+ assertEquals(mtime, targetStatus.getModificationTime());
+ assertEquals(atime, targetStatus.getAccessTime());
+ assertEquals(owner, targetStatus.getOwner());
+ assertEquals(group, targetStatus.getGroup());
+ targetPerm = targetStatus.getPermission();
+ assertTrue(perm.equals(targetPerm));
+ xattrs = fs.getXAttrs(target3);
+ assertEquals(xattrs.size(), 2);
+ assertArrayEquals(new byte[]{0x31, 0x32, 0x33}, xattrs.get("user.a1"));
+ assertArrayEquals(new byte[]{0x31, 0x31, 0x31}, xattrs.get("trusted.a1"));
+ acls = fs.getAclStatus(target3).getEntries();
+ assertTrue(acls.isEmpty());
+ assertFalse(targetPerm.getAclBit());
+
+ // -ptopa
+ Path target4 = new Path(hdfsTestDir, "targetfile4");
+ argv = new String[] { "-cp", "-ptopa", src.toUri().toString(),
+ target4.toUri().toString() };
+ ret = ToolRunner.run(shell, argv);
+ assertEquals("cp -ptopa is not working", SUCCESS, ret);
+ targetStatus = fs.getFileStatus(target4);
+ assertEquals(mtime, targetStatus.getModificationTime());
+ assertEquals(atime, targetStatus.getAccessTime());
+ assertEquals(owner, targetStatus.getOwner());
+ assertEquals(group, targetStatus.getGroup());
+ targetPerm = targetStatus.getPermission();
+ assertTrue(perm.equals(targetPerm));
+ xattrs = fs.getXAttrs(target4);
+ assertTrue(xattrs.isEmpty());
+ acls = fs.getAclStatus(target4).getEntries();
+ assertFalse(acls.isEmpty());
+ assertTrue(targetPerm.getAclBit());
+ assertEquals(fs.getAclStatus(src), fs.getAclStatus(target4));
+
+ // -ptoa (verify -pa option will preserve permissions also)
+ Path target5 = new Path(hdfsTestDir, "targetfile5");
+ argv = new String[] { "-cp", "-ptoa", src.toUri().toString(),
+ target5.toUri().toString() };
+ ret = ToolRunner.run(shell, argv);
+ assertEquals("cp -ptoa is not working", SUCCESS, ret);
+ targetStatus = fs.getFileStatus(target5);
+ assertEquals(mtime, targetStatus.getModificationTime());
+ assertEquals(atime, targetStatus.getAccessTime());
+ assertEquals(owner, targetStatus.getOwner());
+ assertEquals(group, targetStatus.getGroup());
+ targetPerm = targetStatus.getPermission();
+ assertTrue(perm.equals(targetPerm));
+ xattrs = fs.getXAttrs(target5);
+ assertTrue(xattrs.isEmpty());
+ acls = fs.getAclStatus(target5).getEntries();
+ assertFalse(acls.isEmpty());
+ assertTrue(targetPerm.getAclBit());
+ assertEquals(fs.getAclStatus(src), fs.getAclStatus(target5));
+ } finally {
+ if (null != shell) {
+ shell.close();
+ }
+
+ if (null != fs) {
+ fs.delete(hdfsTestDir, true);
+ fs.close();
+ }
+ cluster.shutdown();
+ }
+ }
+
+ // verify cp -ptopxa option will preserve directory attributes.
+ @Test (timeout = 120000)
+ public void testCopyCommandsToDirectoryWithPreserveOption()
+ throws Exception {
+ Configuration conf = new Configuration();
+ conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
+ conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+ .format(true).build();
+ FsShell shell = null;
+ FileSystem fs = null;
+ final String testdir =
+ "/tmp/TestDFSShell-testCopyCommandsToDirectoryWithPreserveOption-"
+ + counter.getAndIncrement();
+ final Path hdfsTestDir = new Path(testdir);
+ try {
+ fs = cluster.getFileSystem();
+ fs.mkdirs(hdfsTestDir);
+ Path srcDir = new Path(hdfsTestDir, "srcDir");
+ fs.mkdirs(srcDir);
+
+ fs.setAcl(srcDir, Lists.newArrayList(
+ aclEntry(ACCESS, USER, ALL),
+ aclEntry(ACCESS, USER, "foo", ALL),
+ aclEntry(ACCESS, GROUP, READ_EXECUTE),
+ aclEntry(DEFAULT, GROUP, "bar", READ_EXECUTE),
+ aclEntry(ACCESS, OTHER, EXECUTE)));
+ // set sticky bit
+ fs.setPermission(srcDir,
+ new FsPermission(ALL, READ_EXECUTE, EXECUTE, true));
+
+ // Create a file in srcDir to check if modification time of
+ // srcDir to be preserved after copying the file.
+ // If cp -p command is to preserve modification time and then copy child
+ // (srcFile), modification time will not be preserved.
+ Path srcFile = new Path(srcDir, "srcFile");
+ fs.create(srcFile).close();
+
+ FileStatus status = fs.getFileStatus(srcDir);
+ final long mtime = status.getModificationTime();
+ final long atime = status.getAccessTime();
+ final String owner = status.getOwner();
+ final String group = status.getGroup();
+ final FsPermission perm = status.getPermission();
+
+ fs.setXAttr(srcDir, "user.a1", new byte[]{0x31, 0x32, 0x33});
+ fs.setXAttr(srcDir, "trusted.a1", new byte[]{0x31, 0x31, 0x31});
+
+ shell = new FsShell(conf);
+
+ // -p
+ Path targetDir1 = new Path(hdfsTestDir, "targetDir1");
+ String[] argv = new String[] { "-cp", "-p", srcDir.toUri().toString(),
+ targetDir1.toUri().toString() };
+ int ret = ToolRunner.run(shell, argv);
+ assertEquals("cp -p is not working", SUCCESS, ret);
+ FileStatus targetStatus = fs.getFileStatus(targetDir1);
+ assertEquals(mtime, targetStatus.getModificationTime());
+ assertEquals(atime, targetStatus.getAccessTime());
+ assertEquals(owner, targetStatus.getOwner());
+ assertEquals(group, targetStatus.getGroup());
+ FsPermission targetPerm = targetStatus.getPermission();
+ assertTrue(perm.equals(targetPerm));
+ Map<String, byte[]> xattrs = fs.getXAttrs(targetDir1);
+ assertTrue(xattrs.isEmpty());
+ List<AclEntry> acls = fs.getAclStatus(targetDir1).getEntries();
+ assertTrue(acls.isEmpty());
+ assertFalse(targetPerm.getAclBit());
+
+ // -ptop
+ Path targetDir2 = new Path(hdfsTestDir, "targetDir2");
+ argv = new String[] { "-cp", "-ptop", srcDir.toUri().toString(),
+ targetDir2.toUri().toString() };
+ ret = ToolRunner.run(shell, argv);
+ assertEquals("cp -ptop is not working", SUCCESS, ret);
+ targetStatus = fs.getFileStatus(targetDir2);
+ assertEquals(mtime, targetStatus.getModificationTime());
+ assertEquals(atime, targetStatus.getAccessTime());
+ assertEquals(owner, targetStatus.getOwner());
+ assertEquals(group, targetStatus.getGroup());
+ targetPerm = targetStatus.getPermission();
+ assertTrue(perm.equals(targetPerm));
+ xattrs = fs.getXAttrs(targetDir2);
+ assertTrue(xattrs.isEmpty());
+ acls = fs.getAclStatus(targetDir2).getEntries();
+ assertTrue(acls.isEmpty());
+ assertFalse(targetPerm.getAclBit());
+
+ // -ptopx
+ Path targetDir3 = new Path(hdfsTestDir, "targetDir3");
+ argv = new String[] { "-cp", "-ptopx", srcDir.toUri().toString(),
+ targetDir3.toUri().toString() };
+ ret = ToolRunner.run(shell, argv);
+ assertEquals("cp -ptopx is not working", SUCCESS, ret);
+ targetStatus = fs.getFileStatus(targetDir3);
+ assertEquals(mtime, targetStatus.getModificationTime());
+ assertEquals(atime, targetStatus.getAccessTime());
+ assertEquals(owner, targetStatus.getOwner());
+ assertEquals(group, targetStatus.getGroup());
+ targetPerm = targetStatus.getPermission();
+ assertTrue(perm.equals(targetPerm));
+ xattrs = fs.getXAttrs(targetDir3);
+ assertEquals(xattrs.size(), 2);
+ assertArrayEquals(new byte[]{0x31, 0x32, 0x33}, xattrs.get("user.a1"));
+ assertArrayEquals(new byte[]{0x31, 0x31, 0x31}, xattrs.get("trusted.a1"));
+ acls = fs.getAclStatus(targetDir3).getEntries();
+ assertTrue(acls.isEmpty());
+ assertFalse(targetPerm.getAclBit());
+
+ // -ptopa
+ Path targetDir4 = new Path(hdfsTestDir, "targetDir4");
+ argv = new String[] { "-cp", "-ptopa", srcDir.toUri().toString(),
+ targetDir4.toUri().toString() };
+ ret = ToolRunner.run(shell, argv);
+ assertEquals("cp -ptopa is not working", SUCCESS, ret);
+ targetStatus = fs.getFileStatus(targetDir4);
+ assertEquals(mtime, targetStatus.getModificationTime());
+ assertEquals(atime, targetStatus.getAccessTime());
+ assertEquals(owner, targetStatus.getOwner());
+ assertEquals(group, targetStatus.getGroup());
+ targetPerm = targetStatus.getPermission();
+ assertTrue(perm.equals(targetPerm));
+ xattrs = fs.getXAttrs(targetDir4);
+ assertTrue(xattrs.isEmpty());
+ acls = fs.getAclStatus(targetDir4).getEntries();
+ assertFalse(acls.isEmpty());
+ assertTrue(targetPerm.getAclBit());
+ assertEquals(fs.getAclStatus(srcDir), fs.getAclStatus(targetDir4));
+
+ // -ptoa (verify -pa option will preserve permissions also)
+ Path targetDir5 = new Path(hdfsTestDir, "targetDir5");
+ argv = new String[] { "-cp", "-ptoa", srcDir.toUri().toString(),
+ targetDir5.toUri().toString() };
+ ret = ToolRunner.run(shell, argv);
+ assertEquals("cp -ptoa is not working", SUCCESS, ret);
+ targetStatus = fs.getFileStatus(targetDir5);
+ assertEquals(mtime, targetStatus.getModificationTime());
+ assertEquals(atime, targetStatus.getAccessTime());
+ assertEquals(owner, targetStatus.getOwner());
+ assertEquals(group, targetStatus.getGroup());
+ targetPerm = targetStatus.getPermission();
+ assertTrue(perm.equals(targetPerm));
+ xattrs = fs.getXAttrs(targetDir5);
+ assertTrue(xattrs.isEmpty());
+ acls = fs.getAclStatus(targetDir5).getEntries();
+ assertFalse(acls.isEmpty());
+ assertTrue(targetPerm.getAclBit());
+ assertEquals(fs.getAclStatus(srcDir), fs.getAclStatus(targetDir5));
+ } finally {
+ if (shell != null) {
+ shell.close();
+ }
+ if (fs != null) {
+ fs.delete(hdfsTestDir, true);
+ fs.close();
+ }
+ cluster.shutdown();
+ }
+ }
+
+ // Verify cp -pa option will preserve both ACL and sticky bit.
+ @Test (timeout = 120000)
+ public void testCopyCommandsPreserveAclAndStickyBit() throws Exception {
+ Configuration conf = new Configuration();
+ conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+ .format(true).build();
+ FsShell shell = null;
+ FileSystem fs = null;
+ final String testdir =
+ "/tmp/TestDFSShell-testCopyCommandsPreserveAclAndStickyBit-"
+ + counter.getAndIncrement();
+ final Path hdfsTestDir = new Path(testdir);
+ try {
+ fs = cluster.getFileSystem();
+ fs.mkdirs(hdfsTestDir);
+ Path src = new Path(hdfsTestDir, "srcfile");
+ fs.create(src).close();
+
+ fs.setAcl(src, Lists.newArrayList(
+ aclEntry(ACCESS, USER, ALL),
+ aclEntry(ACCESS, USER, "foo", ALL),
+ aclEntry(ACCESS, GROUP, READ_EXECUTE),
+ aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE),
+ aclEntry(ACCESS, OTHER, EXECUTE)));
+ // set sticky bit
+ fs.setPermission(src,
+ new FsPermission(ALL, READ_EXECUTE, EXECUTE, true));
+
+ FileStatus status = fs.getFileStatus(src);
+ final long mtime = status.getModificationTime();
+ final long atime = status.getAccessTime();
+ final String owner = status.getOwner();
+ final String group = status.getGroup();
+ final FsPermission perm = status.getPermission();
+
+ shell = new FsShell(conf);
+
+ // -p preserves sticky bit and doesn't preserve ACL
+ Path target1 = new Path(hdfsTestDir, "targetfile1");
+ String[] argv = new String[] { "-cp", "-p", src.toUri().toString(),
+ target1.toUri().toString() };
+ int ret = ToolRunner.run(shell, argv);
+ assertEquals("cp is not working", SUCCESS, ret);
+ FileStatus targetStatus = fs.getFileStatus(target1);
+ assertEquals(mtime, targetStatus.getModificationTime());
+ assertEquals(atime, targetStatus.getAccessTime());
+ assertEquals(owner, targetStatus.getOwner());
+ assertEquals(group, targetStatus.getGroup());
+ FsPermission targetPerm = targetStatus.getPermission();
+ assertTrue(perm.equals(targetPerm));
+ List<AclEntry> acls = fs.getAclStatus(target1).getEntries();
+ assertTrue(acls.isEmpty());
+ assertFalse(targetPerm.getAclBit());
+
+ // -ptopa preserves both sticky bit and ACL
+ Path target2 = new Path(hdfsTestDir, "targetfile2");
+ argv = new String[] { "-cp", "-ptopa", src.toUri().toString(),
+ target2.toUri().toString() };
+ ret = ToolRunner.run(shell, argv);
+ assertEquals("cp -ptopa is not working", SUCCESS, ret);
+ targetStatus = fs.getFileStatus(target2);
+ assertEquals(mtime, targetStatus.getModificationTime());
+ assertEquals(atime, targetStatus.getAccessTime());
+ assertEquals(owner, targetStatus.getOwner());
+ assertEquals(group, targetStatus.getGroup());
+ targetPerm = targetStatus.getPermission();
+ assertTrue(perm.equals(targetPerm));
+ acls = fs.getAclStatus(target2).getEntries();
+ assertFalse(acls.isEmpty());
+ assertTrue(targetPerm.getAclBit());
+ assertEquals(fs.getAclStatus(src), fs.getAclStatus(target2));
+ } finally {
+ if (null != shell) {
+ shell.close();
+ }
+ if (null != fs) {
+ fs.delete(hdfsTestDir, true);
+ fs.close();
+ }
+ cluster.shutdown();
+ }
+ }
// force Copy Option is -f
@Test (timeout = 30000)
@@ -1874,6 +2278,449 @@ public class TestDFSShell {
cluster.shutdown();
}
}
+
+ @Test (timeout = 30000)
+ public void testSetXAttrPermission() throws Exception {
+ UserGroupInformation user = UserGroupInformation.
+ createUserForTesting("user", new String[] {"mygroup"});
+ MiniDFSCluster cluster = null;
+ PrintStream bak = null;
+ try {
+ final Configuration conf = new HdfsConfiguration();
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+ cluster.waitActive();
+
+ FileSystem fs = cluster.getFileSystem();
+ Path p = new Path("/foo");
+ fs.mkdirs(p);
+ bak = System.err;
+
+ final FsShell fshell = new FsShell(conf);
+ final ByteArrayOutputStream out = new ByteArrayOutputStream();
+ System.setErr(new PrintStream(out));
+
+ // No permission to write xattr
+ fs.setPermission(p, new FsPermission((short) 0700));
+ user.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ int ret = ToolRunner.run(fshell, new String[]{
+ "-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
+ assertEquals("Returned should be 1", 1, ret);
+ String str = out.toString();
+ assertTrue("Permission denied printed",
+ str.indexOf("Permission denied") != -1);
+ out.reset();
+ return null;
+ }
+ });
+
+ int ret = ToolRunner.run(fshell, new String[]{
+ "-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
+ assertEquals("Returned should be 0", 0, ret);
+ out.reset();
+
+ // No permission to read and remove
+ fs.setPermission(p, new FsPermission((short) 0750));
+ user.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ // Read
+ int ret = ToolRunner.run(fshell, new String[]{
+ "-getfattr", "-n", "user.a1", "/foo"});
+ assertEquals("Returned should be 1", 1, ret);
+ String str = out.toString();
+ assertTrue("Permission denied printed",
+ str.indexOf("Permission denied") != -1);
+ out.reset();
+ // Remove
+ ret = ToolRunner.run(fshell, new String[]{
+ "-setfattr", "-x", "user.a1", "/foo"});
+ assertEquals("Returned should be 1", 1, ret);
+ str = out.toString();
+ assertTrue("Permission denied printed",
+ str.indexOf("Permission denied") != -1);
+ out.reset();
+ return null;
+ }
+ });
+ } finally {
+ if (bak != null) {
+ System.setErr(bak);
+ }
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+
+ /* HDFS-6413 xattr names erroneously handled as case-insensitive */
+ @Test (timeout = 30000)
+ public void testSetXAttrCaseSensitivity() throws Exception {
+ UserGroupInformation user = UserGroupInformation.
+ createUserForTesting("user", new String[] {"mygroup"});
+ MiniDFSCluster cluster = null;
+ PrintStream bak = null;
+ try {
+ final Configuration conf = new HdfsConfiguration();
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+ cluster.waitActive();
+
+ FileSystem fs = cluster.getFileSystem();
+ Path p = new Path("/mydir");
+ fs.mkdirs(p);
+ bak = System.err;
+
+ final FsShell fshell = new FsShell(conf);
+ final ByteArrayOutputStream out = new ByteArrayOutputStream();
+ System.setOut(new PrintStream(out));
+
+ doSetXattr(out, fshell,
+ new String[] {"-setfattr", "-n", "User.Foo", "/mydir"},
+ new String[] {"-getfattr", "-d", "/mydir"},
+ new String[] {"user.Foo"},
+ new String[] {});
+
+ doSetXattr(out, fshell,
+ new String[] {"-setfattr", "-n", "user.FOO", "/mydir"},
+ new String[] {"-getfattr", "-d", "/mydir"},
+ new String[] {"user.Foo", "user.FOO"},
+ new String[] {});
+
+ doSetXattr(out, fshell,
+ new String[] {"-setfattr", "-n", "USER.foo", "/mydir"},
+ new String[] {"-getfattr", "-d", "/mydir"},
+ new String[] {"user.Foo", "user.FOO", "user.foo"},
+ new String[] {});
+
+ doSetXattr(out, fshell,
+ new String[] {"-setfattr", "-n", "USER.fOo", "-v", "myval", "/mydir"},
+ new String[] {"-getfattr", "-d", "/mydir"},
+ new String[] {"user.Foo", "user.FOO", "user.foo", "user.fOo=\"myval\""},
+ new String[] {"user.Foo=", "user.FOO=", "user.foo="});
+
+ doSetXattr(out, fshell,
+ new String[] {"-setfattr", "-x", "useR.foo", "/mydir"},
+ new String[] {"-getfattr", "-d", "/mydir"},
+ new String[] {"user.Foo", "user.FOO"},
+ new String[] {"foo"});
+
+ doSetXattr(out, fshell,
+ new String[] {"-setfattr", "-x", "USER.FOO", "/mydir"},
+ new String[] {"-getfattr", "-d", "/mydir"},
+ new String[] {"user.Foo"},
+ new String[] {"FOO"});
+
+ doSetXattr(out, fshell,
+ new String[] {"-setfattr", "-x", "useR.Foo", "/mydir"},
+ new String[] {"-getfattr", "-n", "User.Foo", "/mydir"},
+ new String[] {},
+ new String[] {"Foo"});
+
+ } finally {
+ if (bak != null) {
+ System.setOut(bak);
+ }
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+
+ private void doSetXattr(ByteArrayOutputStream out, FsShell fshell,
+ String[] setOp, String[] getOp, String[] expectArr,
+ String[] dontExpectArr) throws Exception {
+ int ret = ToolRunner.run(fshell, setOp);
+ out.reset();
+ ret = ToolRunner.run(fshell, getOp);
+ final String str = out.toString();
+ for (int i = 0; i < expectArr.length; i++) {
+ final String expect = expectArr[i];
+ final StringBuilder sb = new StringBuilder
+ ("Incorrect results from getfattr. Expected: ");
+ sb.append(expect).append(" Full Result: ");
+ sb.append(str);
+ assertTrue(sb.toString(),
+ str.indexOf(expect) != -1);
+ }
+
+ for (int i = 0; i < dontExpectArr.length; i++) {
+ String dontExpect = dontExpectArr[i];
+ final StringBuilder sb = new StringBuilder
+ ("Incorrect results from getfattr. Didn't Expect: ");
+ sb.append(dontExpect).append(" Full Result: ");
+ sb.append(str);
+ assertTrue(sb.toString(),
+ str.indexOf(dontExpect) == -1);
+ }
+ out.reset();
+ }
+
+ /**
+ *
+ * Test to make sure that user namespace xattrs can be set only if path has
+ * access and for sticky directorries, only owner/privileged user can write.
+ * Trusted namespace xattrs can be set only with privileged users.
+ *
+ * As user1: Create a directory (/foo) as user1, chown it to user1 (and
+ * user1's group), grant rwx to "other".
+ *
+ * As user2: Set an xattr (should pass with path access).
+ *
+ * As user1: Set an xattr (should pass).
+ *
+ * As user2: Read the xattr (should pass). Remove the xattr (should pass with
+ * path access).
+ *
+ * As user1: Read the xattr (should pass). Remove the xattr (should pass).
+ *
+ * As user1: Change permissions only to owner
+ *
+ * As User2: Set an Xattr (Should fail set with no path access) Remove an
+ * Xattr (Should fail with no path access)
+ *
+ * As SuperUser: Set an Xattr with Trusted (Should pass)
+ */
+ @Test (timeout = 30000)
+ public void testSetXAttrPermissionAsDifferentOwner() throws Exception {
+ final String USER1 = "user1";
+ final String GROUP1 = "supergroup";
+ final UserGroupInformation user1 = UserGroupInformation.
+ createUserForTesting(USER1, new String[] {GROUP1});
+ final UserGroupInformation user2 = UserGroupInformation.
+ createUserForTesting("user2", new String[] {"mygroup2"});
+ final UserGroupInformation SUPERUSER = UserGroupInformation.getCurrentUser();
+ MiniDFSCluster cluster = null;
+ PrintStream bak = null;
+ try {
+ final Configuration conf = new HdfsConfiguration();
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+ cluster.waitActive();
+
+ final FileSystem fs = cluster.getFileSystem();
+ fs.setOwner(new Path("/"), USER1, GROUP1);
+ bak = System.err;
+
+ final FsShell fshell = new FsShell(conf);
+ final ByteArrayOutputStream out = new ByteArrayOutputStream();
+ System.setErr(new PrintStream(out));
+
+ //Test 1. Let user1 be owner for /foo
+ user1.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ final int ret = ToolRunner.run(fshell, new String[]{
+ "-mkdir", "/foo"});
+ assertEquals("Return should be 0", 0, ret);
+ out.reset();
+ return null;
+ }
+ });
+
+ //Test 2. Give access to others
+ user1.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ // Give access to "other"
+ final int ret = ToolRunner.run(fshell, new String[]{
+ "-chmod", "707", "/foo"});
+ assertEquals("Return should be 0", 0, ret);
+ out.reset();
+ return null;
+ }
+ });
+
+ // Test 3. Should be allowed to write xattr if there is a path access to
+ // user (user2).
+ user2.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ final int ret = ToolRunner.run(fshell, new String[]{
+ "-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
+ assertEquals("Returned should be 0", 0, ret);
+ out.reset();
+ return null;
+ }
+ });
+
+ //Test 4. There should be permission to write xattr for
+ // the owning user with write permissions.
+ user1.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ final int ret = ToolRunner.run(fshell, new String[]{
+ "-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
+ assertEquals("Returned should be 0", 0, ret);
+ out.reset();
+ return null;
+ }
+ });
+
+ // Test 5. There should be permission to read non-owning user (user2) if
+ // there is path access to that user and also can remove.
+ user2.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ // Read
+ int ret = ToolRunner.run(fshell, new String[] { "-getfattr", "-n",
+ "user.a1", "/foo" });
+ assertEquals("Returned should be 0", 0, ret);
+ out.reset();
+ // Remove
+ ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-x",
+ "user.a1", "/foo" });
+ assertEquals("Returned should be 0", 0, ret);
+ out.reset();
+ return null;
+ }
+ });
+
+ // Test 6. There should be permission to read/remove for
+ // the owning user with path access.
+ user1.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ return null;
+ }
+ });
+
+ // Test 7. Change permission to have path access only to owner(user1)
+ user1.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ // Give access to "other"
+ final int ret = ToolRunner.run(fshell, new String[]{
+ "-chmod", "700", "/foo"});
+ assertEquals("Return should be 0", 0, ret);
+ out.reset();
+ return null;
+ }
+ });
+
+ // Test 8. There should be no permissions to set for
+ // the non-owning user with no path access.
+ user2.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ // set
+ int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-n",
+ "user.a2", "/foo" });
+ assertEquals("Returned should be 1", 1, ret);
+ final String str = out.toString();
+ assertTrue("Permission denied printed",
+ str.indexOf("Permission denied") != -1);
+ out.reset();
+ return null;
+ }
+ });
+
+ // Test 9. There should be no permissions to remove for
+ // the non-owning user with no path access.
+ user2.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ // set
+ int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-x",
+ "user.a2", "/foo" });
+ assertEquals("Returned should be 1", 1, ret);
+ final String str = out.toString();
+ assertTrue("Permission denied printed",
+ str.indexOf("Permission denied") != -1);
+ out.reset();
+ return null;
+ }
+ });
+
+ // Test 10. Superuser should be allowed to set with trusted namespace
+ SUPERUSER.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ // set
+ int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-n",
+ "trusted.a3", "/foo" });
+ assertEquals("Returned should be 0", 0, ret);
+ out.reset();
+ return null;
+ }
+ });
+ } finally {
+ if (bak != null) {
+ System.setErr(bak);
+ }
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+
+ /*
+ * 1. Test that CLI throws an exception and returns non-0 when user does
+ * not have permission to read an xattr.
+ * 2. Test that CLI throws an exception and returns non-0 when a non-existent
+ * xattr is requested.
+ */
+ @Test (timeout = 120000)
+ public void testGetFAttrErrors() throws Exception {
+ final UserGroupInformation user = UserGroupInformation.
+ createUserForTesting("user", new String[] {"mygroup"});
+ MiniDFSCluster cluster = null;
+ PrintStream bakErr = null;
+ try {
+ final Configuration conf = new HdfsConfiguration();
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+ cluster.waitActive();
+
+ final FileSystem fs = cluster.getFileSystem();
+ final Path p = new Path("/foo");
+ fs.mkdirs(p);
+ bakErr = System.err;
+
+ final FsShell fshell = new FsShell(conf);
+ final ByteArrayOutputStream out = new ByteArrayOutputStream();
+ System.setErr(new PrintStream(out));
+
+ // No permission for "other".
+ fs.setPermission(p, new FsPermission((short) 0700));
+
+ {
+ final int ret = ToolRunner.run(fshell, new String[] {
+ "-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
+ assertEquals("Returned should be 0", 0, ret);
+ out.reset();
+ }
+
+ user.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ int ret = ToolRunner.run(fshell, new String[] {
+ "-getfattr", "-n", "user.a1", "/foo"});
+ String str = out.toString();
+ assertTrue("xattr value was incorrectly returned",
+ str.indexOf("1234") == -1);
+ out.reset();
+ return null;
+ }
+ });
+
+ {
+ final int ret = ToolRunner.run(fshell, new String[]{
+ "-getfattr", "-n", "user.nonexistent", "/foo"});
+ String str = out.toString();
+ assertTrue("xattr value was incorrectly returned",
+ str.indexOf(
+ "getfattr: At least one of the attributes provided was not found")
+ >= 0);
+ out.reset();
+ }
+ } finally {
+ if (bakErr != null) {
+ System.setErr(bakErr);
+ }
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
/**
* Test that the server trash configuration is respected when
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java Tue Aug 19 23:49:39 2014
@@ -239,7 +239,7 @@ public class TestDFSStorageStateRecovery
assertTrue(new File(baseDirs[i],"previous").isDirectory());
assertEquals(
UpgradeUtilities.checksumContents(
- NAME_NODE, new File(baseDirs[i],"previous")),
+ NAME_NODE, new File(baseDirs[i],"previous"), false),
UpgradeUtilities.checksumMasterNameNodeContents());
}
}
@@ -259,7 +259,8 @@ public class TestDFSStorageStateRecovery
if (currentShouldExist) {
for (int i = 0; i < baseDirs.length; i++) {
assertEquals(
- UpgradeUtilities.checksumContents(DATA_NODE, new File(baseDirs[i],"current")),
+ UpgradeUtilities.checksumContents(DATA_NODE,
+ new File(baseDirs[i],"current"), false),
UpgradeUtilities.checksumMasterDataNodeContents());
}
}
@@ -267,7 +268,8 @@ public class TestDFSStorageStateRecovery
for (int i = 0; i < baseDirs.length; i++) {
assertTrue(new File(baseDirs[i],"previous").isDirectory());
assertEquals(
- UpgradeUtilities.checksumContents(DATA_NODE, new File(baseDirs[i],"previous")),
+ UpgradeUtilities.checksumContents(DATA_NODE,
+ new File(baseDirs[i],"previous"), false),
UpgradeUtilities.checksumMasterDataNodeContents());
}
}
@@ -290,8 +292,8 @@ public class TestDFSStorageStateRecovery
if (currentShouldExist) {
for (int i = 0; i < baseDirs.length; i++) {
File bpCurDir = new File(baseDirs[i], Storage.STORAGE_DIR_CURRENT);
- assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, bpCurDir),
- UpgradeUtilities.checksumMasterBlockPoolContents());
+ assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, bpCurDir,
+ false), UpgradeUtilities.checksumMasterBlockPoolContents());
}
}
if (previousShouldExist) {
@@ -299,8 +301,8 @@ public class TestDFSStorageStateRecovery
File bpPrevDir = new File(baseDirs[i], Storage.STORAGE_DIR_PREVIOUS);
assertTrue(bpPrevDir.isDirectory());
assertEquals(
- UpgradeUtilities.checksumContents(DATA_NODE, bpPrevDir),
- UpgradeUtilities.checksumMasterBlockPoolContents());
+ UpgradeUtilities.checksumContents(DATA_NODE, bpPrevDir,
+ false), UpgradeUtilities.checksumMasterBlockPoolContents());
}
}
}
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java Tue Aug 19 23:49:39 2014
@@ -100,7 +100,7 @@ public class TestDFSUpgrade {
File previous = new File(baseDir, "previous");
assertExists(previous);
- assertEquals(UpgradeUtilities.checksumContents(NAME_NODE, previous),
+ assertEquals(UpgradeUtilities.checksumContents(NAME_NODE, previous, false),
UpgradeUtilities.checksumMasterNameNodeContents());
}
}
@@ -114,23 +114,25 @@ public class TestDFSUpgrade {
void checkDataNode(String[] baseDirs, String bpid) throws IOException {
for (int i = 0; i < baseDirs.length; i++) {
File current = new File(baseDirs[i], "current/" + bpid + "/current");
- assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, current),
+ assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, current, false),
UpgradeUtilities.checksumMasterDataNodeContents());
// block files are placed under <sd>/current/<bpid>/current/finalized
File currentFinalized =
MiniDFSCluster.getFinalizedDir(new File(baseDirs[i]), bpid);
- assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, currentFinalized),
+ assertEquals(UpgradeUtilities.checksumContents(DATA_NODE,
+ currentFinalized, true),
UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
File previous = new File(baseDirs[i], "current/" + bpid + "/previous");
assertTrue(previous.isDirectory());
- assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, previous),
+ assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, previous, false),
UpgradeUtilities.checksumMasterDataNodeContents());
File previousFinalized =
new File(baseDirs[i], "current/" + bpid + "/previous"+"/finalized");
- assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, previousFinalized),
+ assertEquals(UpgradeUtilities.checksumContents(DATA_NODE,
+ previousFinalized, true),
UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
}
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java Tue Aug 19 23:49:39 2014
@@ -24,6 +24,7 @@ import static org.junit.Assert.fail;
import java.io.BufferedReader;
import java.io.File;
+import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
@@ -70,6 +71,9 @@ public class TestDFSUpgradeFromImage {
private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt";
private static final String HADOOP22_IMAGE = "hadoop-22-dfs-dir.tgz";
private static final String HADOOP1_BBW_IMAGE = "hadoop1-bbw.tgz";
+ private static final String HADOOP1_RESERVED_IMAGE = "hadoop-1-reserved.tgz";
+ private static final String HADOOP023_RESERVED_IMAGE =
+ "hadoop-0.23-reserved.tgz";
private static final String HADOOP2_RESERVED_IMAGE = "hadoop-2-reserved.tgz";
private static class ReferenceFileInfo {
@@ -77,7 +81,7 @@ public class TestDFSUpgradeFromImage {
long checksum;
}
- private static final Configuration upgradeConf;
+ static final Configuration upgradeConf;
static {
upgradeConf = new HdfsConfiguration();
@@ -92,7 +96,7 @@ public class TestDFSUpgradeFromImage {
boolean printChecksum = false;
- private void unpackStorage(String tarFileName)
+ void unpackStorage(String tarFileName, String referenceName)
throws IOException {
String tarFile = System.getProperty("test.cache.data", "build/test/cache")
+ "/" + tarFileName;
@@ -107,7 +111,7 @@ public class TestDFSUpgradeFromImage {
BufferedReader reader = new BufferedReader(new FileReader(
System.getProperty("test.cache.data", "build/test/cache")
- + "/" + HADOOP_DFS_DIR_TXT));
+ + "/" + referenceName));
String line;
while ( (line = reader.readLine()) != null ) {
@@ -282,7 +286,7 @@ public class TestDFSUpgradeFromImage {
*/
@Test
public void testUpgradeFromRel22Image() throws IOException {
- unpackStorage(HADOOP22_IMAGE);
+ unpackStorage(HADOOP22_IMAGE, HADOOP_DFS_DIR_TXT);
upgradeAndVerify(new MiniDFSCluster.Builder(upgradeConf).
numDataNodes(4));
}
@@ -293,7 +297,7 @@ public class TestDFSUpgradeFromImage {
*/
@Test
public void testUpgradeFromCorruptRel22Image() throws IOException {
- unpackStorage(HADOOP22_IMAGE);
+ unpackStorage(HADOOP22_IMAGE, HADOOP_DFS_DIR_TXT);
// Overwrite the md5 stored in the VERSION files
File baseDir = new File(MiniDFSCluster.getBaseDirectory());
@@ -326,12 +330,146 @@ public class TestDFSUpgradeFromImage {
}
/**
+ * Test upgrade from a branch-1.2 image with reserved paths
+ */
+ @Test
+ public void testUpgradeFromRel1ReservedImage() throws Exception {
+ unpackStorage(HADOOP1_RESERVED_IMAGE, HADOOP_DFS_DIR_TXT);
+ MiniDFSCluster cluster = null;
+ // Try it once without setting the upgrade flag to ensure it fails
+ final Configuration conf = new Configuration();
+ // Try it again with a custom rename string
+ try {
+ FSImageFormat.setRenameReservedPairs(
+ ".snapshot=.user-snapshot," +
+ ".reserved=.my-reserved");
+ cluster =
+ new MiniDFSCluster.Builder(conf)
+ .format(false)
+ .startupOption(StartupOption.UPGRADE)
+ .numDataNodes(0).build();
+ DistributedFileSystem dfs = cluster.getFileSystem();
+ // Make sure the paths were renamed as expected
+ // Also check that paths are present after a restart, checks that the
+ // upgraded fsimage has the same state.
+ final String[] expected = new String[] {
+ "/.my-reserved",
+ "/.user-snapshot",
+ "/.user-snapshot/.user-snapshot",
+ "/.user-snapshot/open",
+ "/dir1",
+ "/dir1/.user-snapshot",
+ "/dir2",
+ "/dir2/.user-snapshot",
+ "/user",
+ "/user/andrew",
+ "/user/andrew/.user-snapshot",
+ };
+ for (int i=0; i<2; i++) {
+ // Restart the second time through this loop
+ if (i==1) {
+ cluster.finalizeCluster(conf);
+ cluster.restartNameNode(true);
+ }
+ ArrayList<Path> toList = new ArrayList<Path>();
+ toList.add(new Path("/"));
+ ArrayList<String> found = new ArrayList<String>();
+ while (!toList.isEmpty()) {
+ Path p = toList.remove(0);
+ FileStatus[] statuses = dfs.listStatus(p);
+ for (FileStatus status: statuses) {
+ final String path = status.getPath().toUri().getPath();
+ System.out.println("Found path " + path);
+ found.add(path);
+ if (status.isDirectory()) {
+ toList.add(status.getPath());
+ }
+ }
+ }
+ for (String s: expected) {
+ assertTrue("Did not find expected path " + s, found.contains(s));
+ }
+ assertEquals("Found an unexpected path while listing filesystem",
+ found.size(), expected.length);
+ }
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+
+ /**
+ * Test upgrade from a 0.23.11 image with reserved paths
+ */
+ @Test
+ public void testUpgradeFromRel023ReservedImage() throws Exception {
+ unpackStorage(HADOOP023_RESERVED_IMAGE, HADOOP_DFS_DIR_TXT);
+ MiniDFSCluster cluster = null;
+ // Try it once without setting the upgrade flag to ensure it fails
+ final Configuration conf = new Configuration();
+ // Try it again with a custom rename string
+ try {
+ FSImageFormat.setRenameReservedPairs(
+ ".snapshot=.user-snapshot," +
+ ".reserved=.my-reserved");
+ cluster =
+ new MiniDFSCluster.Builder(conf)
+ .format(false)
+ .startupOption(StartupOption.UPGRADE)
+ .numDataNodes(0).build();
+ DistributedFileSystem dfs = cluster.getFileSystem();
+ // Make sure the paths were renamed as expected
+ // Also check that paths are present after a restart, checks that the
+ // upgraded fsimage has the same state.
+ final String[] expected = new String[] {
+ "/.user-snapshot",
+ "/dir1",
+ "/dir1/.user-snapshot",
+ "/dir2",
+ "/dir2/.user-snapshot"
+ };
+ for (int i=0; i<2; i++) {
+ // Restart the second time through this loop
+ if (i==1) {
+ cluster.finalizeCluster(conf);
+ cluster.restartNameNode(true);
+ }
+ ArrayList<Path> toList = new ArrayList<Path>();
+ toList.add(new Path("/"));
+ ArrayList<String> found = new ArrayList<String>();
+ while (!toList.isEmpty()) {
+ Path p = toList.remove(0);
+ FileStatus[] statuses = dfs.listStatus(p);
+ for (FileStatus status: statuses) {
+ final String path = status.getPath().toUri().getPath();
+ System.out.println("Found path " + path);
+ found.add(path);
+ if (status.isDirectory()) {
+ toList.add(status.getPath());
+ }
+ }
+ }
+ for (String s: expected) {
+ assertTrue("Did not find expected path " + s, found.contains(s));
+ }
+ assertEquals("Found an unexpected path while listing filesystem",
+ found.size(), expected.length);
+ }
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+
+ /**
* Test upgrade from 2.0 image with a variety of .snapshot and .reserved
* paths to test renaming on upgrade
*/
@Test
public void testUpgradeFromRel2ReservedImage() throws Exception {
- unpackStorage(HADOOP2_RESERVED_IMAGE);
+ unpackStorage(HADOOP2_RESERVED_IMAGE, HADOOP_DFS_DIR_TXT);
MiniDFSCluster cluster = null;
// Try it once without setting the upgrade flag to ensure it fails
final Configuration conf = new Configuration();
@@ -435,7 +573,7 @@ public class TestDFSUpgradeFromImage {
} while (dirList.hasMore());
}
- private void upgradeAndVerify(MiniDFSCluster.Builder bld)
+ void upgradeAndVerify(MiniDFSCluster.Builder bld)
throws IOException {
MiniDFSCluster cluster = null;
try {
@@ -464,7 +602,7 @@ public class TestDFSUpgradeFromImage {
*/
@Test
public void testUpgradeFromRel1BBWImage() throws IOException {
- unpackStorage(HADOOP1_BBW_IMAGE);
+ unpackStorage(HADOOP1_BBW_IMAGE, HADOOP_DFS_DIR_TXT);
Configuration conf = new Configuration(upgradeConf);
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
System.getProperty("test.build.data") + File.separator +
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java Tue Aug 19 23:49:39 2014
@@ -30,8 +30,12 @@ import static org.apache.hadoop.hdfs.DFS
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYPASSWORD_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY;
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
import static org.hamcrest.CoreMatchers.not;
+import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
@@ -39,6 +43,7 @@ import static org.junit.Assert.assertTha
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
+import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
@@ -61,8 +66,12 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.alias.CredentialProvider;
+import org.apache.hadoop.security.alias.CredentialProviderFactory;
+import org.apache.hadoop.security.alias.JavaKeyStoreProvider;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Shell;
+import org.junit.Assert;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
@@ -792,4 +801,68 @@ public class TestDFSUtil {
}
}
}
+
+ @Test
+ public void testGetPassword() throws Exception {
+ File testDir = new File(System.getProperty("test.build.data",
+ "target/test-dir"));
+
+ Configuration conf = new Configuration();
+ final String ourUrl =
+ JavaKeyStoreProvider.SCHEME_NAME + "://file/" + testDir + "/test.jks";
+
+ File file = new File(testDir, "test.jks");
+ file.delete();
+ conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
+
+ CredentialProvider provider =
+ CredentialProviderFactory.getProviders(conf).get(0);
+ char[] keypass = {'k', 'e', 'y', 'p', 'a', 's', 's'};
+ char[] storepass = {'s', 't', 'o', 'r', 'e', 'p', 'a', 's', 's'};
+ char[] trustpass = {'t', 'r', 'u', 's', 't', 'p', 'a', 's', 's'};
+
+ // ensure that we get nulls when the key isn't there
+ assertEquals(null, provider.getCredentialEntry(
+ DFS_SERVER_HTTPS_KEYPASSWORD_KEY));
+ assertEquals(null, provider.getCredentialEntry(
+ DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY));
+ assertEquals(null, provider.getCredentialEntry(
+ DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY));
+
+ // create new aliases
+ try {
+ provider.createCredentialEntry(
+ DFS_SERVER_HTTPS_KEYPASSWORD_KEY, keypass);
+
+ provider.createCredentialEntry(
+ DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY, storepass);
+
+ provider.createCredentialEntry(
+ DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY, trustpass);
+
+ // write out so that it can be found in checks
+ provider.flush();
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw e;
+ }
+ // make sure we get back the right key directly from api
+ assertArrayEquals(keypass, provider.getCredentialEntry(
+ DFS_SERVER_HTTPS_KEYPASSWORD_KEY).getCredential());
+ assertArrayEquals(storepass, provider.getCredentialEntry(
+ DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY).getCredential());
+ assertArrayEquals(trustpass, provider.getCredentialEntry(
+ DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY).getCredential());
+
+ // use WebAppUtils as would be used by loadSslConfiguration
+ Assert.assertEquals("keypass",
+ DFSUtil.getPassword(conf, DFS_SERVER_HTTPS_KEYPASSWORD_KEY));
+ Assert.assertEquals("storepass",
+ DFSUtil.getPassword(conf, DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY));
+ Assert.assertEquals("trustpass",
+ DFSUtil.getPassword(conf, DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY));
+
+ // let's make sure that a password that doesn't exist returns null
+ Assert.assertEquals(null, DFSUtil.getPassword(conf,"invalid-alias"));
+ }
}
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java Tue Aug 19 23:49:39 2014
@@ -125,17 +125,16 @@ public class TestDataTransferProtocol {
throw eof;
}
- LOG.info("Received: " +new String(retBuf));
- LOG.info("Expected: " + StringUtils.byteToHexString(recvBuf.toByteArray()));
+ String received = StringUtils.byteToHexString(retBuf);
+ String expected = StringUtils.byteToHexString(recvBuf.toByteArray());
+ LOG.info("Received: " + received);
+ LOG.info("Expected: " + expected);
if (eofExpected) {
throw new IOException("Did not recieve IOException when an exception " +
"is expected while reading from " + datanode);
}
-
- byte[] needed = recvBuf.toByteArray();
- assertEquals(StringUtils.byteToHexString(needed),
- StringUtils.byteToHexString(retBuf));
+ assertEquals(expected, received);
} finally {
IOUtils.closeSocket(sock);
}
@@ -184,10 +183,7 @@ public class TestDataTransferProtocol {
String description, Boolean eofExcepted) throws IOException {
sendBuf.reset();
recvBuf.reset();
- sender.writeBlock(block, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
- new DatanodeInfo[1], null, stage,
- 0, block.getNumBytes(), block.getNumBytes(), newGS,
- DEFAULT_CHECKSUM, CachingStrategy.newDefaultStrategy());
+ writeBlock(block, stage, newGS, DEFAULT_CHECKSUM);
if (eofExcepted) {
sendResponse(Status.ERROR, null, null, recvOut);
sendRecvData(description, true);
@@ -343,10 +339,7 @@ public class TestDataTransferProtocol {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
try {
cluster.waitActive();
- DFSClient dfsClient = new DFSClient(
- new InetSocketAddress("localhost", cluster.getNameNodePort()),
- conf);
- datanode = dfsClient.datanodeReport(DatanodeReportType.LIVE)[0];
+ datanode = cluster.getFileSystem().getDataNodeStats(DatanodeReportType.LIVE)[0];
dnAddr = NetUtils.createSocketAddr(datanode.getXferAddr());
FileSystem fileSys = cluster.getFileSystem();
@@ -381,23 +374,14 @@ public class TestDataTransferProtocol {
DataChecksum badChecksum = Mockito.spy(DEFAULT_CHECKSUM);
Mockito.doReturn(-1).when(badChecksum).getBytesPerChecksum();
- sender.writeBlock(new ExtendedBlock(poolId, newBlockId),
- BlockTokenSecretManager.DUMMY_TOKEN, "cl",
- new DatanodeInfo[1], null,
- BlockConstructionStage.PIPELINE_SETUP_CREATE,
- 0, 0L, 0L, 0L,
- badChecksum, CachingStrategy.newDefaultStrategy());
+ writeBlock(poolId, newBlockId, badChecksum);
recvBuf.reset();
sendResponse(Status.ERROR, null, null, recvOut);
sendRecvData("wrong bytesPerChecksum while writing", true);
sendBuf.reset();
recvBuf.reset();
- sender.writeBlock(new ExtendedBlock(poolId, ++newBlockId),
- BlockTokenSecretManager.DUMMY_TOKEN, "cl",
- new DatanodeInfo[1], null,
- BlockConstructionStage.PIPELINE_SETUP_CREATE, 0, 0L, 0L, 0L,
- DEFAULT_CHECKSUM, CachingStrategy.newDefaultStrategy());
+ writeBlock(poolId, ++newBlockId, DEFAULT_CHECKSUM);
PacketHeader hdr = new PacketHeader(
4, // size of packet
@@ -416,11 +400,7 @@ public class TestDataTransferProtocol {
// test for writing a valid zero size block
sendBuf.reset();
recvBuf.reset();
- sender.writeBlock(new ExtendedBlock(poolId, ++newBlockId),
- BlockTokenSecretManager.DUMMY_TOKEN, "cl",
- new DatanodeInfo[1], null,
- BlockConstructionStage.PIPELINE_SETUP_CREATE, 0, 0L, 0L, 0L,
- DEFAULT_CHECKSUM, CachingStrategy.newDefaultStrategy());
+ writeBlock(poolId, ++newBlockId, DEFAULT_CHECKSUM);
hdr = new PacketHeader(
8, // size of packet
@@ -532,4 +512,18 @@ public class TestDataTransferProtocol {
assertTrue(hdr.sanityCheck(99));
assertFalse(hdr.sanityCheck(100));
}
+
+ void writeBlock(String poolId, long blockId, DataChecksum checksum) throws IOException {
+ writeBlock(new ExtendedBlock(poolId, blockId),
+ BlockConstructionStage.PIPELINE_SETUP_CREATE, 0L, checksum);
+ }
+
+ void writeBlock(ExtendedBlock block, BlockConstructionStage stage,
+ long newGS, DataChecksum checksum) throws IOException {
+ sender.writeBlock(block, StorageType.DEFAULT,
+ BlockTokenSecretManager.DUMMY_TOKEN, "cl",
+ new DatanodeInfo[1], new StorageType[1], null, stage,
+ 0, block.getNumBytes(), block.getNumBytes(), newGS,
+ checksum, CachingStrategy.newDefaultStrategy());
+ }
}
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java Tue Aug 19 23:49:39 2014
@@ -87,15 +87,15 @@ public class TestDatanodeBlockScanner {
throws IOException, TimeoutException {
URL url = new URL("http://localhost:" + infoPort +
"/blockScannerReport?listblocks");
- long lastWarnTime = Time.now();
+ long lastWarnTime = Time.monotonicNow();
if (newTime <= 0) newTime = 1L;
long verificationTime = 0;
String block = DFSTestUtil.getFirstBlock(fs, file).getBlockName();
long failtime = (timeout <= 0) ? Long.MAX_VALUE
- : Time.now() + timeout;
+ : Time.monotonicNow() + timeout;
while (verificationTime < newTime) {
- if (failtime < Time.now()) {
+ if (failtime < Time.monotonicNow()) {
throw new TimeoutException("failed to achieve block verification after "
+ timeout + " msec. Current verification timestamp = "
+ verificationTime + ", requested verification time > "
@@ -118,7 +118,7 @@ public class TestDatanodeBlockScanner {
}
if (verificationTime < newTime) {
- long now = Time.now();
+ long now = Time.monotonicNow();
if ((now - lastWarnTime) >= 5*1000) {
LOG.info("Waiting for verification of " + block);
lastWarnTime = now;
@@ -134,7 +134,7 @@ public class TestDatanodeBlockScanner {
@Test
public void testDatanodeBlockScanner() throws IOException, TimeoutException {
- long startTime = Time.now();
+ long startTime = Time.monotonicNow();
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
@@ -344,7 +344,7 @@ public class TestDatanodeBlockScanner {
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 3L);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
- long startTime = Time.now();
+ long startTime = Time.monotonicNow();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPLICATION_FACTOR)
.build();
@@ -428,10 +428,10 @@ public class TestDatanodeBlockScanner {
private static void waitForBlockDeleted(ExtendedBlock blk, int dnIndex,
long timeout) throws TimeoutException, InterruptedException {
File blockFile = MiniDFSCluster.getBlockFile(dnIndex, blk);
- long failtime = Time.now()
+ long failtime = Time.monotonicNow()
+ ((timeout > 0) ? timeout : Long.MAX_VALUE);
while (blockFile != null && blockFile.exists()) {
- if (failtime < Time.now()) {
+ if (failtime < Time.monotonicNow()) {
throw new TimeoutException("waited too long for blocks to be deleted: "
+ blockFile.getPath() + (blockFile.exists() ? " still exists; " : " is absent; "));
}
@@ -445,24 +445,19 @@ public class TestDatanodeBlockScanner {
@Test
public void testReplicaInfoParsing() throws Exception {
- testReplicaInfoParsingSingle(BASE_PATH, new int[0]);
- testReplicaInfoParsingSingle(BASE_PATH + "/subdir1", new int[]{1});
- testReplicaInfoParsingSingle(BASE_PATH + "/subdir43", new int[]{43});
- testReplicaInfoParsingSingle(BASE_PATH + "/subdir1/subdir2/subdir3", new int[]{1, 2, 3});
- testReplicaInfoParsingSingle(BASE_PATH + "/subdir1/subdir2/subdir43", new int[]{1, 2, 43});
- testReplicaInfoParsingSingle(BASE_PATH + "/subdir1/subdir23/subdir3", new int[]{1, 23, 3});
- testReplicaInfoParsingSingle(BASE_PATH + "/subdir13/subdir2/subdir3", new int[]{13, 2, 3});
+ testReplicaInfoParsingSingle(BASE_PATH);
+ testReplicaInfoParsingSingle(BASE_PATH + "/subdir1");
+ testReplicaInfoParsingSingle(BASE_PATH + "/subdir1/subdir2/subdir3");
}
- private static void testReplicaInfoParsingSingle(String subDirPath, int[] expectedSubDirs) {
+ private static void testReplicaInfoParsingSingle(String subDirPath) {
File testFile = new File(subDirPath);
- assertArrayEquals(expectedSubDirs, ReplicaInfo.parseSubDirs(testFile).subDirs);
- assertEquals(BASE_PATH, ReplicaInfo.parseSubDirs(testFile).baseDirPath);
+ assertEquals(BASE_PATH, ReplicaInfo.parseBaseDir(testFile).baseDirPath);
}
@Test
public void testDuplicateScans() throws Exception {
- long startTime = Time.now();
+ long startTime = Time.monotonicNow();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration())
.numDataNodes(1).build();
FileSystem fs = null;
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java Tue Aug 19 23:49:39 2014
@@ -51,6 +51,10 @@ public class TestDatanodeConfig {
public static void setUp() throws Exception {
clearBaseDir();
Configuration conf = new HdfsConfiguration();
+ conf.setInt(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY, 0);
+ conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "localhost:0");
+ conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "localhost:0");
+ conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "localhost:0");
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
}
@@ -100,8 +104,14 @@ public class TestDatanodeConfig {
String dnDir3 = dataDir.getAbsolutePath() + "3";
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
dnDir1 + "," + dnDir2 + "," + dnDir3);
- cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
- assertTrue("Data-node should startup.", cluster.isDataNodeUp());
+ try {
+ cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
+ assertTrue("Data-node should startup.", cluster.isDataNodeUp());
+ } finally {
+ if (cluster != null) {
+ cluster.shutdownDataNodes();
+ }
+ }
}
private static String makeURI(String scheme, String host, String path)