You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by wa...@apache.org on 2014/07/07 22:44:09 UTC
svn commit: r1608603 [5/6] - in
/hadoop/common/branches/fs-encryption/hadoop-hdfs-project:
hadoop-hdfs-httpfs/
hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/
hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop-...
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java Mon Jul 7 20:43:56 2014
@@ -56,6 +56,7 @@ import org.junit.Test;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
+import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
@@ -1778,6 +1779,166 @@ public class TestDFSShell {
}
}
+ // verify cp -ptopxa option will preserve directory attributes.
+ @Test (timeout = 120000)
+ public void testCopyCommandsToDirectoryWithPreserveOption()
+ throws Exception {
+ Configuration conf = new Configuration();
+ conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
+ conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+ .format(true).build();
+ FsShell shell = null;
+ FileSystem fs = null;
+ final String testdir =
+ "/tmp/TestDFSShell-testCopyCommandsToDirectoryWithPreserveOption-"
+ + counter.getAndIncrement();
+ final Path hdfsTestDir = new Path(testdir);
+ try {
+ fs = cluster.getFileSystem();
+ fs.mkdirs(hdfsTestDir);
+ Path srcDir = new Path(hdfsTestDir, "srcDir");
+ fs.mkdirs(srcDir);
+
+ fs.setAcl(srcDir, Lists.newArrayList(
+ aclEntry(ACCESS, USER, ALL),
+ aclEntry(ACCESS, USER, "foo", ALL),
+ aclEntry(ACCESS, GROUP, READ_EXECUTE),
+ aclEntry(DEFAULT, GROUP, "bar", READ_EXECUTE),
+ aclEntry(ACCESS, OTHER, EXECUTE)));
+ // set sticky bit
+ fs.setPermission(srcDir,
+ new FsPermission(ALL, READ_EXECUTE, EXECUTE, true));
+
+ // Create a file in srcDir to check if modification time of
+ // srcDir to be preserved after copying the file.
+ // If cp -p command is to preserve modification time and then copy child
+ // (srcFile), modification time will not be preserved.
+ Path srcFile = new Path(srcDir, "srcFile");
+ fs.create(srcFile).close();
+
+ FileStatus status = fs.getFileStatus(srcDir);
+ final long mtime = status.getModificationTime();
+ final long atime = status.getAccessTime();
+ final String owner = status.getOwner();
+ final String group = status.getGroup();
+ final FsPermission perm = status.getPermission();
+
+ fs.setXAttr(srcDir, "user.a1", new byte[]{0x31, 0x32, 0x33});
+ fs.setXAttr(srcDir, "trusted.a1", new byte[]{0x31, 0x31, 0x31});
+
+ shell = new FsShell(conf);
+
+ // -p
+ Path targetDir1 = new Path(hdfsTestDir, "targetDir1");
+ String[] argv = new String[] { "-cp", "-p", srcDir.toUri().toString(),
+ targetDir1.toUri().toString() };
+ int ret = ToolRunner.run(shell, argv);
+ assertEquals("cp -p is not working", SUCCESS, ret);
+ FileStatus targetStatus = fs.getFileStatus(targetDir1);
+ assertEquals(mtime, targetStatus.getModificationTime());
+ assertEquals(atime, targetStatus.getAccessTime());
+ assertEquals(owner, targetStatus.getOwner());
+ assertEquals(group, targetStatus.getGroup());
+ FsPermission targetPerm = targetStatus.getPermission();
+ assertTrue(perm.equals(targetPerm));
+ Map<String, byte[]> xattrs = fs.getXAttrs(targetDir1);
+ assertTrue(xattrs.isEmpty());
+ List<AclEntry> acls = fs.getAclStatus(targetDir1).getEntries();
+ assertTrue(acls.isEmpty());
+ assertFalse(targetPerm.getAclBit());
+
+ // -ptop
+ Path targetDir2 = new Path(hdfsTestDir, "targetDir2");
+ argv = new String[] { "-cp", "-ptop", srcDir.toUri().toString(),
+ targetDir2.toUri().toString() };
+ ret = ToolRunner.run(shell, argv);
+ assertEquals("cp -ptop is not working", SUCCESS, ret);
+ targetStatus = fs.getFileStatus(targetDir2);
+ assertEquals(mtime, targetStatus.getModificationTime());
+ assertEquals(atime, targetStatus.getAccessTime());
+ assertEquals(owner, targetStatus.getOwner());
+ assertEquals(group, targetStatus.getGroup());
+ targetPerm = targetStatus.getPermission();
+ assertTrue(perm.equals(targetPerm));
+ xattrs = fs.getXAttrs(targetDir2);
+ assertTrue(xattrs.isEmpty());
+ acls = fs.getAclStatus(targetDir2).getEntries();
+ assertTrue(acls.isEmpty());
+ assertFalse(targetPerm.getAclBit());
+
+ // -ptopx
+ Path targetDir3 = new Path(hdfsTestDir, "targetDir3");
+ argv = new String[] { "-cp", "-ptopx", srcDir.toUri().toString(),
+ targetDir3.toUri().toString() };
+ ret = ToolRunner.run(shell, argv);
+ assertEquals("cp -ptopx is not working", SUCCESS, ret);
+ targetStatus = fs.getFileStatus(targetDir3);
+ assertEquals(mtime, targetStatus.getModificationTime());
+ assertEquals(atime, targetStatus.getAccessTime());
+ assertEquals(owner, targetStatus.getOwner());
+ assertEquals(group, targetStatus.getGroup());
+ targetPerm = targetStatus.getPermission();
+ assertTrue(perm.equals(targetPerm));
+ xattrs = fs.getXAttrs(targetDir3);
+ assertEquals(xattrs.size(), 2);
+ assertArrayEquals(new byte[]{0x31, 0x32, 0x33}, xattrs.get("user.a1"));
+ assertArrayEquals(new byte[]{0x31, 0x31, 0x31}, xattrs.get("trusted.a1"));
+ acls = fs.getAclStatus(targetDir3).getEntries();
+ assertTrue(acls.isEmpty());
+ assertFalse(targetPerm.getAclBit());
+
+ // -ptopa
+ Path targetDir4 = new Path(hdfsTestDir, "targetDir4");
+ argv = new String[] { "-cp", "-ptopa", srcDir.toUri().toString(),
+ targetDir4.toUri().toString() };
+ ret = ToolRunner.run(shell, argv);
+ assertEquals("cp -ptopa is not working", SUCCESS, ret);
+ targetStatus = fs.getFileStatus(targetDir4);
+ assertEquals(mtime, targetStatus.getModificationTime());
+ assertEquals(atime, targetStatus.getAccessTime());
+ assertEquals(owner, targetStatus.getOwner());
+ assertEquals(group, targetStatus.getGroup());
+ targetPerm = targetStatus.getPermission();
+ assertTrue(perm.equals(targetPerm));
+ xattrs = fs.getXAttrs(targetDir4);
+ assertTrue(xattrs.isEmpty());
+ acls = fs.getAclStatus(targetDir4).getEntries();
+ assertFalse(acls.isEmpty());
+ assertTrue(targetPerm.getAclBit());
+ assertEquals(fs.getAclStatus(srcDir), fs.getAclStatus(targetDir4));
+
+ // -ptoa (verify -pa option will preserve permissions also)
+ Path targetDir5 = new Path(hdfsTestDir, "targetDir5");
+ argv = new String[] { "-cp", "-ptoa", srcDir.toUri().toString(),
+ targetDir5.toUri().toString() };
+ ret = ToolRunner.run(shell, argv);
+ assertEquals("cp -ptoa is not working", SUCCESS, ret);
+ targetStatus = fs.getFileStatus(targetDir5);
+ assertEquals(mtime, targetStatus.getModificationTime());
+ assertEquals(atime, targetStatus.getAccessTime());
+ assertEquals(owner, targetStatus.getOwner());
+ assertEquals(group, targetStatus.getGroup());
+ targetPerm = targetStatus.getPermission();
+ assertTrue(perm.equals(targetPerm));
+ xattrs = fs.getXAttrs(targetDir5);
+ assertTrue(xattrs.isEmpty());
+ acls = fs.getAclStatus(targetDir5).getEntries();
+ assertFalse(acls.isEmpty());
+ assertTrue(targetPerm.getAclBit());
+ assertEquals(fs.getAclStatus(srcDir), fs.getAclStatus(targetDir5));
+ } finally {
+ if (shell != null) {
+ shell.close();
+ }
+ if (fs != null) {
+ fs.delete(hdfsTestDir, true);
+ fs.close();
+ }
+ cluster.shutdown();
+ }
+ }
+
// Verify cp -pa option will preserve both ACL and sticky bit.
@Test (timeout = 120000)
public void testCopyCommandsPreserveAclAndStickyBit() throws Exception {
@@ -2295,38 +2456,39 @@ public class TestDFSShell {
}
/**
- * HDFS-6374 setXAttr should require the user to be the owner of the file
- * or directory.
- *
- * Test to make sure that only the owner of a file or directory can set
- * or remove the xattrs.
- *
- * As user1:
- * Create a directory (/foo) as user1, chown it to user1 (and user1's group),
- * grant rwx to "other".
- *
- * As user2:
- * Set an xattr (should fail).
- *
- * As user1:
- * Set an xattr (should pass).
- *
- * As user2:
- * Read the xattr (should pass).
- * Remove the xattr (should fail).
- *
- * As user1:
- * Read the xattr (should pass).
- * Remove the xattr (should pass).
+ *
+ * Test to make sure that user namespace xattrs can be set only if path has
+ * access and for sticky directorries, only owner/privileged user can write.
+ * Trusted namespace xattrs can be set only with privileged users.
+ *
+ * As user1: Create a directory (/foo) as user1, chown it to user1 (and
+ * user1's group), grant rwx to "other".
+ *
+ * As user2: Set an xattr (should pass with path access).
+ *
+ * As user1: Set an xattr (should pass).
+ *
+ * As user2: Read the xattr (should pass). Remove the xattr (should pass with
+ * path access).
+ *
+ * As user1: Read the xattr (should pass). Remove the xattr (should pass).
+ *
+ * As user1: Change permissions only to owner
+ *
+ * As User2: Set an Xattr (Should fail set with no path access) Remove an
+ * Xattr (Should fail with no path access)
+ *
+ * As SuperUser: Set an Xattr with Trusted (Should pass)
*/
@Test (timeout = 30000)
public void testSetXAttrPermissionAsDifferentOwner() throws Exception {
final String USER1 = "user1";
- final String GROUP1 = "mygroup1";
+ final String GROUP1 = "supergroup";
final UserGroupInformation user1 = UserGroupInformation.
createUserForTesting(USER1, new String[] {GROUP1});
final UserGroupInformation user2 = UserGroupInformation.
createUserForTesting("user2", new String[] {"mygroup2"});
+ final UserGroupInformation SUPERUSER = UserGroupInformation.getCurrentUser();
MiniDFSCluster cluster = null;
PrintStream bak = null;
try {
@@ -2342,7 +2504,7 @@ public class TestDFSShell {
final ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setErr(new PrintStream(out));
- // mkdir foo as user1
+ //Test 1. Let user1 be owner for /foo
user1.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
@@ -2353,7 +2515,8 @@ public class TestDFSShell {
return null;
}
});
-
+
+ //Test 2. Give access to others
user1.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
@@ -2366,23 +2529,21 @@ public class TestDFSShell {
}
});
- // No permission to write xattr for non-owning user (user2).
+ // Test 3. Should be allowed to write xattr if there is a path access to
+ // user (user2).
user2.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final int ret = ToolRunner.run(fshell, new String[]{
"-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
- assertEquals("Returned should be 1", 1, ret);
- final String str = out.toString();
- assertTrue("Permission denied printed",
- str.indexOf("Permission denied") != -1);
+ assertEquals("Returned should be 0", 0, ret);
out.reset();
return null;
}
});
- // But there should be permission to write xattr for
- // the owning user.
+ //Test 4. There should be permission to write xattr for
+ // the owning user with write permissions.
user1.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
@@ -2394,19 +2555,55 @@ public class TestDFSShell {
}
});
- // There should be permission to read,but not to remove for
- // non-owning user (user2).
+ // Test 5. There should be permission to read non-owning user (user2) if
+ // there is path access to that user and also can remove.
user2.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
// Read
- int ret = ToolRunner.run(fshell, new String[]{
- "-getfattr", "-n", "user.a1", "/foo"});
+ int ret = ToolRunner.run(fshell, new String[] { "-getfattr", "-n",
+ "user.a1", "/foo" });
assertEquals("Returned should be 0", 0, ret);
out.reset();
// Remove
- ret = ToolRunner.run(fshell, new String[]{
- "-setfattr", "-x", "user.a1", "/foo"});
+ ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-x",
+ "user.a1", "/foo" });
+ assertEquals("Returned should be 0", 0, ret);
+ out.reset();
+ return null;
+ }
+ });
+
+ // Test 6. There should be permission to read/remove for
+ // the owning user with path access.
+ user1.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ return null;
+ }
+ });
+
+ // Test 7. Change permission to have path access only to owner(user1)
+ user1.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ // Give access to "other"
+ final int ret = ToolRunner.run(fshell, new String[]{
+ "-chmod", "700", "/foo"});
+ assertEquals("Return should be 0", 0, ret);
+ out.reset();
+ return null;
+ }
+ });
+
+ // Test 8. There should be no permissions to set for
+ // the non-owning user with no path access.
+ user2.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ // set
+ int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-n",
+ "user.a2", "/foo" });
assertEquals("Returned should be 1", 1, ret);
final String str = out.toString();
assertTrue("Permission denied printed",
@@ -2415,20 +2612,31 @@ public class TestDFSShell {
return null;
}
});
-
- // But there should be permission to read/remove for
- // the owning user.
- user1.doAs(new PrivilegedExceptionAction<Object>() {
+
+ // Test 9. There should be no permissions to remove for
+ // the non-owning user with no path access.
+ user2.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
- // Read
- int ret = ToolRunner.run(fshell, new String[]{
- "-getfattr", "-n", "user.a1", "/foo"});
- assertEquals("Returned should be 0", 0, ret);
+ // set
+ int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-x",
+ "user.a2", "/foo" });
+ assertEquals("Returned should be 1", 1, ret);
+ final String str = out.toString();
+ assertTrue("Permission denied printed",
+ str.indexOf("Permission denied") != -1);
out.reset();
- // Remove
- ret = ToolRunner.run(fshell, new String[]{
- "-setfattr", "-x", "user.a1", "/foo"});
+ return null;
+ }
+ });
+
+ // Test 10. Superuser should be allowed to set with trusted namespace
+ SUPERUSER.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ // set
+ int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-n",
+ "trusted.a3", "/foo" });
assertEquals("Returned should be 0", 0, ret);
out.reset();
return null;
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java Mon Jul 7 20:43:56 2014
@@ -153,6 +153,15 @@ public class TestLeaseRecovery2 {
verifyFile(dfs, filepath1, actual, size);
}
+ @Test
+ public void testLeaseRecoverByAnotherUser() throws Exception {
+ byte [] actual = new byte[FILE_SIZE];
+ cluster.setLeasePeriod(SHORT_LEASE_PERIOD, LONG_LEASE_PERIOD);
+ Path filepath = createFile("/immediateRecoverLease-x", 0, true);
+ recoverLeaseUsingCreate2(filepath);
+ verifyFile(dfs, filepath, actual, 0);
+ }
+
private Path createFile(final String filestr, final int size,
final boolean triggerLeaseRenewerInterrupt)
throws IOException, InterruptedException {
@@ -196,7 +205,7 @@ public class TestLeaseRecovery2 {
}
private void recoverLeaseUsingCreate(Path filepath)
- throws IOException, InterruptedException {
+ throws IOException, InterruptedException {
FileSystem dfs2 = getFSAsAnotherUser(conf);
for(int i = 0; i < 10; i++) {
AppendTestUtil.LOG.info("i=" + i);
@@ -216,6 +225,20 @@ public class TestLeaseRecovery2 {
fail("recoverLeaseUsingCreate failed");
}
+ private void recoverLeaseUsingCreate2(Path filepath)
+ throws Exception {
+ FileSystem dfs2 = getFSAsAnotherUser(conf);
+ int size = AppendTestUtil.nextInt(FILE_SIZE);
+ DistributedFileSystem dfsx = (DistributedFileSystem) dfs2;
+ //create file using dfsx
+ Path filepath2 = new Path("/immediateRecoverLease-x2");
+ FSDataOutputStream stm = dfsx.create(filepath2, true, BUF_SIZE,
+ REPLICATION_NUM, BLOCK_SIZE);
+ assertTrue(dfsx.dfs.exists("/immediateRecoverLease-x2"));
+ try {Thread.sleep(10000);} catch (InterruptedException e) {}
+ dfsx.append(filepath);
+ }
+
private void verifyFile(FileSystem dfs, Path filepath, byte[] actual,
int size) throws IOException {
AppendTestUtil.LOG.info("Lease for file " + filepath + " is recovered. "
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java Mon Jul 7 20:43:56 2014
@@ -31,12 +31,16 @@ import java.util.concurrent.Future;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.io.IOUtils;
import org.apache.log4j.Level;
+import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
@@ -49,7 +53,14 @@ import org.mockito.stubbing.Answer;
public class TestPread {
static final long seed = 0xDEADBEEFL;
static final int blockSize = 4096;
- boolean simulatedStorage = false;
+ boolean simulatedStorage;
+ boolean isHedgedRead;
+
+ @Before
+ public void setup() {
+ simulatedStorage = false;
+ isHedgedRead = false;
+ }
private void writeFile(FileSystem fileSys, Path name) throws IOException {
int replication = 3;// We need > 1 blocks to test out the hedged reads.
@@ -73,7 +84,7 @@ public class TestPread {
// now create the real file
DFSTestUtil.createFile(fileSys, name, 12 * blockSize, 12 * blockSize,
- blockSize, (short) 1, seed);
+ blockSize, (short) replication, seed);
}
private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
@@ -104,8 +115,13 @@ public class TestPread {
}
if (dfstm != null) {
- assertEquals("Expected read statistic to be incremented", length, dfstm
- .getReadStatistics().getTotalBytesRead() - totalRead);
+ if (isHedgedRead) {
+ assertTrue("Expected read statistic to be incremented", length <= dfstm
+ .getReadStatistics().getTotalBytesRead() - totalRead);
+ } else {
+ assertEquals("Expected read statistic to be incremented", length, dfstm
+ .getReadStatistics().getTotalBytesRead() - totalRead);
+ }
}
}
@@ -208,7 +224,7 @@ public class TestPread {
stm.readFully(0, actual);
checkAndEraseData(actual, 0, expected, "Pread Datanode Restart Test");
}
-
+
private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
assertTrue(fileSys.exists(name));
assertTrue(fileSys.delete(name, true));
@@ -249,6 +265,7 @@ public class TestPread {
*/
@Test
public void testHedgedPreadDFSBasic() throws IOException {
+ isHedgedRead = true;
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE, 5);
conf.setLong(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS, 1);
@@ -258,8 +275,72 @@ public class TestPread {
}
@Test
+ public void testHedgedReadLoopTooManyTimes() throws IOException {
+ Configuration conf = new Configuration();
+ int numHedgedReadPoolThreads = 5;
+ final int hedgedReadTimeoutMillis = 50;
+
+ conf.setInt(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE,
+ numHedgedReadPoolThreads);
+ conf.setLong(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS,
+ hedgedReadTimeoutMillis);
+ // Set up the InjectionHandler
+ DFSClientFaultInjector.instance = Mockito
+ .mock(DFSClientFaultInjector.class);
+ DFSClientFaultInjector injector = DFSClientFaultInjector.instance;
+ Mockito.doAnswer(new Answer<Void>() {
+ @Override
+ public Void answer(InvocationOnMock invocation) throws Throwable {
+ if (true) {
+ Thread.sleep(hedgedReadTimeoutMillis + 1);
+ if (DFSClientFaultInjector.exceptionNum.compareAndSet(0, 1)) {
+ System.out.println("-------------- throw Checksum Exception");
+ throw new ChecksumException("ChecksumException test", 100);
+ }
+ }
+ return null;
+ }
+ }).when(injector).fetchFromDatanodeException();
+
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
+ .format(true).build();
+ DistributedFileSystem fileSys = cluster.getFileSystem();
+ DFSClient dfsClient = fileSys.getClient();
+ FSDataOutputStream output = null;
+ DFSInputStream input = null;
+ String filename = "/hedgedReadMaxOut.dat";
+ try {
+
+ Path file = new Path(filename);
+ output = fileSys.create(file, (short) 2);
+ byte[] data = new byte[64 * 1024];
+ output.write(data);
+ output.flush();
+ output.write(data);
+ output.flush();
+ output.write(data);
+ output.flush();
+ output.close();
+ byte[] buffer = new byte[64 * 1024];
+ input = dfsClient.open(filename);
+ input.read(0, buffer, 0, 1024);
+ input.close();
+ assertEquals(3, input.getHedgedReadOpsLoopNumForTesting());
+ } catch (BlockMissingException e) {
+ assertTrue(false);
+ } finally {
+ IOUtils.cleanup(null, input);
+ IOUtils.cleanup(null, output);
+ fileSys.close();
+ cluster.shutdown();
+ Mockito.reset(injector);
+ }
+ }
+
+ @Test
public void testMaxOutHedgedReadPool() throws IOException,
InterruptedException, ExecutionException {
+ isHedgedRead = true;
Configuration conf = new Configuration();
int numHedgedReadPoolThreads = 5;
final int initialHedgedReadTimeoutMillis = 50000;
@@ -342,6 +423,8 @@ public class TestPread {
throws IOException {
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
conf.setLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, 4096);
+ // Set short retry timeouts so this test runs faster
+ conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 0);
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
@@ -367,7 +450,6 @@ public class TestPread {
public void testPreadDFSSimulated() throws IOException {
simulatedStorage = true;
testPreadDFS();
- simulatedStorage = false;
}
/**
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java Mon Jul 7 20:43:56 2014
@@ -390,6 +390,10 @@ public class TestRollingUpgrade {
// Once finalized, there should be no more fsimage for rollbacks.
Assert.assertFalse(fsimage.hasRollbackFSImage());
+
+ // Should have no problem in restart and replaying edits that include
+ // the FINALIZE op.
+ dfsCluster.restartNameNode(0);
} finally {
if (cluster != null) {
cluster.shutdown();
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java Mon Jul 7 20:43:56 2014
@@ -370,8 +370,13 @@ public class TestBalancer {
// start rebalancing
Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, conf);
- assertEquals(Balancer.ReturnStatus.SUCCESS.code, r);
-
+ if (conf.getInt(DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
+ DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT) ==0) {
+ assertEquals(Balancer.ReturnStatus.NO_MOVE_PROGRESS.code, r);
+ return;
+ } else {
+ assertEquals(Balancer.ReturnStatus.SUCCESS.code, r);
+ }
waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
LOG.info("Rebalancing with default ctor.");
waitForBalancer(totalUsedSpace, totalCapacity, client, cluster);
@@ -463,6 +468,20 @@ public class TestBalancer {
}
@Test(timeout=100000)
+ public void testBalancerWithZeroThreadsForMove() throws Exception {
+ Configuration conf = new HdfsConfiguration();
+ conf.setInt(DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, 0);
+ testBalancer1Internal (conf);
+ }
+
+ @Test(timeout=100000)
+ public void testBalancerWithNonZeroThreadsForMove() throws Exception {
+ Configuration conf = new HdfsConfiguration();
+ conf.setInt(DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, 8);
+ testBalancer1Internal (conf);
+ }
+
+ @Test(timeout=100000)
public void testBalancer2() throws Exception {
testBalancer2Internal(new HdfsConfiguration());
}
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java Mon Jul 7 20:43:56 2014
@@ -55,7 +55,7 @@ public class TestPendingInvalidateBlock
conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
// block deletion pending period
- conf.setLong(DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_MS_KEY, 1000 * 5);
+ conf.setLong(DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY, 5L);
// set the block report interval to 2s
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 2000);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java Mon Jul 7 20:43:56 2014
@@ -435,8 +435,9 @@ public class TestBPOfferService {
}
private ReceivedDeletedBlockInfo[] waitForBlockReceived(
- ExtendedBlock fakeBlock,
- DatanodeProtocolClientSideTranslatorPB mockNN) throws Exception {
+ final ExtendedBlock fakeBlock,
+ final DatanodeProtocolClientSideTranslatorPB mockNN) throws Exception {
+ final String fakeBlockPoolId = fakeBlock.getBlockPoolId();
final ArgumentCaptor<StorageReceivedDeletedBlocks[]> captor =
ArgumentCaptor.forClass(StorageReceivedDeletedBlocks[].class);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@@ -444,9 +445,9 @@ public class TestBPOfferService {
@Override
public Boolean get() {
try {
- Mockito.verify(mockNN1).blockReceivedAndDeleted(
+ Mockito.verify(mockNN).blockReceivedAndDeleted(
Mockito.<DatanodeRegistration>anyObject(),
- Mockito.eq(FAKE_BPID),
+ Mockito.eq(fakeBlockPoolId),
captor.capture());
return true;
} catch (Throwable t) {
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java Mon Jul 7 20:43:56 2014
@@ -160,7 +160,8 @@ public class TestDeleteBlockPool {
conf.set(DFSConfigKeys.DFS_NAMESERVICES,
"namesServerId1,namesServerId2");
cluster = new MiniDFSCluster.Builder(conf)
- .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
+ .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(
+ conf.get(DFSConfigKeys.DFS_NAMESERVICES)))
.numDataNodes(1).build();
cluster.waitActive();
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java Mon Jul 7 20:43:56 2014
@@ -83,7 +83,7 @@ public class CreateEditsLog {
final INodeFile inode = new INodeFile(inodeId.nextValue(), null,
p, 0L, 0L, blocks, replication, blockSize);
- inode.toUnderConstruction("", "", null);
+ inode.toUnderConstruction("", "");
// Append path to filename with information about blockIDs
String path = "_" + iF + "_B" + blocks[0].getBlockId() +
@@ -98,7 +98,7 @@ public class CreateEditsLog {
}
INodeFile fileUc = new INodeFile(inodeId.nextValue(), null,
p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize);
- fileUc.toUnderConstruction("", "", null);
+ fileUc.toUnderConstruction("", "");
editLog.logOpenFile(filePath, fileUc, false);
editLog.logCloseFile(filePath, inode);
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java Mon Jul 7 20:43:56 2014
@@ -32,12 +32,20 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.test.GenericTestUtils;
+
+import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
+import static org.apache.hadoop.fs.permission.AclEntryType.USER;
+import static org.apache.hadoop.fs.permission.FsAction.ALL;
+import static org.apache.hadoop.fs.permission.FsAction.READ;
+import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import org.junit.After;
@@ -60,7 +68,7 @@ public class FSXAttrBaseTest {
protected static MiniDFSCluster dfsCluster;
protected static Configuration conf;
private static int pathCount = 0;
- private static Path path;
+ protected static Path path;
// XAttrs
protected static final String name1 = "user.a1";
@@ -73,10 +81,16 @@ public class FSXAttrBaseTest {
protected FileSystem fs;
+ private static final UserGroupInformation BRUCE =
+ UserGroupInformation.createUserForTesting("bruce", new String[] { });
+ private static final UserGroupInformation DIANA =
+ UserGroupInformation.createUserForTesting("diana", new String[] { });
+
@BeforeClass
public static void init() throws Exception {
conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
+ conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, 3);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, MAX_SIZE);
initCluster(true);
@@ -388,6 +402,21 @@ public class FSXAttrBaseTest {
fs.removeXAttr(path, name3);
}
+ @Test(timeout = 120000)
+ public void testRenameFileWithXAttr() throws Exception {
+ FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
+ fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
+ fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
+ Path renamePath = new Path(path.toString() + "-rename");
+ fs.rename(path, renamePath);
+ Map<String, byte[]> xattrs = fs.getXAttrs(renamePath);
+ Assert.assertEquals(xattrs.size(), 2);
+ Assert.assertArrayEquals(value1, xattrs.get(name1));
+ Assert.assertArrayEquals(value2, xattrs.get(name2));
+ fs.removeXAttr(renamePath, name1);
+ fs.removeXAttr(renamePath, name2);
+ }
+
/**
* Test the listXAttrs api.
* listXAttrs on a path that doesn't exist.
@@ -535,6 +564,50 @@ public class FSXAttrBaseTest {
Assert.assertArrayEquals(value1, xattrs.get(name1));
Assert.assertArrayEquals(value2, xattrs.get(name2));
}
+
+ @Test(timeout = 120000)
+ public void testXAttrAcl() throws Exception {
+ FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 0750));
+ fs.setOwner(path, BRUCE.getUserName(), null);
+ FileSystem fsAsBruce = createFileSystem(BRUCE);
+ FileSystem fsAsDiana = createFileSystem(DIANA);
+ fsAsBruce.setXAttr(path, name1, value1);
+
+ Map<String, byte[]> xattrs;
+ try {
+ xattrs = fsAsDiana.getXAttrs(path);
+ Assert.fail("Diana should not have read access to get xattrs");
+ } catch (AccessControlException e) {
+ // Ignore
+ }
+
+ // Give Diana read permissions to the path
+ fsAsBruce.modifyAclEntries(path, Lists.newArrayList(
+ aclEntry(ACCESS, USER, DIANA.getUserName(), READ)));
+ xattrs = fsAsDiana.getXAttrs(path);
+ Assert.assertArrayEquals(value1, xattrs.get(name1));
+
+ try {
+ fsAsDiana.removeXAttr(path, name1);
+ Assert.fail("Diana should not have write access to remove xattrs");
+ } catch (AccessControlException e) {
+ // Ignore
+ }
+
+ try {
+ fsAsDiana.setXAttr(path, name2, value2);
+ Assert.fail("Diana should not have write access to set xattrs");
+ } catch (AccessControlException e) {
+ // Ignore
+ }
+
+ fsAsBruce.modifyAclEntries(path, Lists.newArrayList(
+ aclEntry(ACCESS, USER, DIANA.getUserName(), ALL)));
+ fsAsDiana.setXAttr(path, name2, value2);
+ Assert.assertArrayEquals(value2, fsAsDiana.getXAttrs(path).get(name2));
+ fsAsDiana.removeXAttr(path, name1);
+ fsAsDiana.removeXAttr(path, name2);
+ }
/**
* Creates a FileSystem for the super-user.
@@ -545,6 +618,18 @@ public class FSXAttrBaseTest {
protected FileSystem createFileSystem() throws Exception {
return dfsCluster.getFileSystem();
}
+
+ /**
+ * Creates a FileSystem for a specific user.
+ *
+ * @param user UserGroupInformation specific user
+ * @return FileSystem for specific user
+ * @throws Exception if creation fails
+ */
+ protected FileSystem createFileSystem(UserGroupInformation user)
+ throws Exception {
+ return DFSTestUtil.getFileSystemAs(user, conf);
+ }
/**
* Initializes all FileSystem instances used in the tests.
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java Mon Jul 7 20:43:56 2014
@@ -45,6 +45,7 @@ import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.security.AccessControlException;
import org.mockito.Mockito;
+import org.mockito.internal.util.reflection.Whitebox;
/**
* This is a utility class to expose NameNode functionality for unit tests.
@@ -177,8 +178,9 @@ public class NameNodeAdapter {
}
public static FSImage spyOnFsImage(NameNode nn1) {
- FSImage spy = Mockito.spy(nn1.getNamesystem().dir.fsImage);
- nn1.getNamesystem().dir.fsImage = spy;
+ FSNamesystem fsn = nn1.getNamesystem();
+ FSImage spy = Mockito.spy(fsn.getFSImage());
+ Whitebox.setInternalState(fsn, "fsImage", spy);
return spy;
}
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java Mon Jul 7 20:43:56 2014
@@ -34,6 +34,7 @@ import static org.junit.Assert.fail;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Date;
import java.util.EnumSet;
import java.util.Iterator;
@@ -682,6 +683,12 @@ public class TestCacheDirectives {
} finally {
namesystem.readUnlock();
}
+
+ LOG.info(logString + " cached blocks: have " + numCachedBlocks +
+ " / " + expectedCachedBlocks + ". " +
+ "cached replicas: have " + numCachedReplicas +
+ " / " + expectedCachedReplicas);
+
if (expectedCachedBlocks == -1 ||
numCachedBlocks == expectedCachedBlocks) {
if (expectedCachedReplicas == -1 ||
@@ -689,10 +696,6 @@ public class TestCacheDirectives {
return true;
}
}
- LOG.info(logString + " cached blocks: have " + numCachedBlocks +
- " / " + expectedCachedBlocks + ". " +
- "cached replicas: have " + numCachedReplicas +
- " / " + expectedCachedReplicas);
return false;
}
}, 500, 60000);
@@ -1415,7 +1418,10 @@ public class TestCacheDirectives {
for (DataNode dn : cluster.getDataNodes()) {
DatanodeDescriptor descriptor =
datanodeManager.getDatanode(dn.getDatanodeId());
- Assert.assertTrue(descriptor.getPendingCached().isEmpty());
+ Assert.assertTrue("Pending cached list of " + descriptor +
+ " is not empty, "
+ + Arrays.toString(descriptor.getPendingCached().toArray()),
+ descriptor.getPendingCached().isEmpty());
}
} finally {
cluster.getNamesystem().readUnlock();
@@ -1430,10 +1436,6 @@ public class TestCacheDirectives {
int numCachedReplicas = (int) ((CACHE_CAPACITY*NUM_DATANODES)/BLOCK_SIZE);
DFSTestUtil.createFile(dfs, fileName, fileLen, (short) NUM_DATANODES,
0xFADED);
- // Set up a log appender watcher
- final LogVerificationAppender appender = new LogVerificationAppender();
- final Logger logger = Logger.getRootLogger();
- logger.addAppender(appender);
dfs.addCachePool(new CachePoolInfo("pool"));
dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool")
.setPath(fileName).setReplication((short) 1).build());
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java Mon Jul 7 20:43:56 2014
@@ -1334,7 +1334,8 @@ public class TestCheckpoint {
SecondaryNameNode secondary2 = null;
try {
cluster = new MiniDFSCluster.Builder(conf)
- .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
+ .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(
+ conf.get(DFSConfigKeys.DFS_NAMESERVICES)))
.build();
Configuration snConf1 = new HdfsConfiguration(cluster.getConfiguration(0));
Configuration snConf2 = new HdfsConfiguration(cluster.getConfiguration(1));
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java Mon Jul 7 20:43:56 2014
@@ -18,10 +18,12 @@
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
import java.util.EnumSet;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -36,7 +38,9 @@ import org.junit.Test;
public class TestDiskspaceQuotaUpdate {
private static final int BLOCKSIZE = 1024;
- private static final short REPLICATION = 1;
+ private static final short REPLICATION = 4;
+ static final long seed = 0L;
+ private static final Path dir = new Path("/TestQuotaUpdate");
private Configuration conf;
private MiniDFSCluster cluster;
@@ -63,41 +67,83 @@ public class TestDiskspaceQuotaUpdate {
}
/**
+ * Test if the quota can be correctly updated for create file
+ */
+ @Test (timeout=60000)
+ public void testQuotaUpdateWithFileCreate() throws Exception {
+ final Path foo = new Path(dir, "foo");
+ Path createdFile = new Path(foo, "created_file.data");
+ dfs.mkdirs(foo);
+ dfs.setQuota(foo, Long.MAX_VALUE-1, Long.MAX_VALUE-1);
+ long fileLen = BLOCKSIZE * 2 + BLOCKSIZE / 2;
+ DFSTestUtil.createFile(dfs, createdFile, BLOCKSIZE / 16,
+ fileLen, BLOCKSIZE, REPLICATION, seed);
+ INode fnode = fsdir.getINode4Write(foo.toString());
+ assertTrue(fnode.isDirectory());
+ assertTrue(fnode.isQuotaSet());
+ Quota.Counts cnt = fnode.asDirectory().getDirectoryWithQuotaFeature()
+ .getSpaceConsumed();
+ assertEquals(2, cnt.get(Quota.NAMESPACE));
+ assertEquals(fileLen * REPLICATION, cnt.get(Quota.DISKSPACE));
+ }
+
+ /**
* Test if the quota can be correctly updated for append
*/
- @Test
+ @Test (timeout=60000)
public void testUpdateQuotaForAppend() throws Exception {
- final Path foo = new Path("/foo");
+ final Path foo = new Path(dir ,"foo");
final Path bar = new Path(foo, "bar");
- DFSTestUtil.createFile(dfs, bar, BLOCKSIZE, REPLICATION, 0L);
+ long currentFileLen = BLOCKSIZE;
+ DFSTestUtil.createFile(dfs, bar, currentFileLen, REPLICATION, seed);
dfs.setQuota(foo, Long.MAX_VALUE-1, Long.MAX_VALUE-1);
- // append half of the block data
+ // append half of the block data, the previous file length is at block
+ // boundary
DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE / 2);
+ currentFileLen += (BLOCKSIZE / 2);
INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
+ assertTrue(fooNode.isQuotaSet());
Quota.Counts quota = fooNode.getDirectoryWithQuotaFeature()
.getSpaceConsumed();
long ns = quota.get(Quota.NAMESPACE);
long ds = quota.get(Quota.DISKSPACE);
assertEquals(2, ns); // foo and bar
- assertEquals((BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION, ds);
+ assertEquals(currentFileLen * REPLICATION, ds);
+ ContentSummary c = dfs.getContentSummary(foo);
+ assertEquals(c.getSpaceConsumed(), ds);
- // append another block
+ // append another block, the previous file length is not at block boundary
DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE);
+ currentFileLen += BLOCKSIZE;
quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
ns = quota.get(Quota.NAMESPACE);
ds = quota.get(Quota.DISKSPACE);
assertEquals(2, ns); // foo and bar
- assertEquals((BLOCKSIZE * 2 + BLOCKSIZE / 2) * REPLICATION, ds);
+ assertEquals(currentFileLen * REPLICATION, ds);
+ c = dfs.getContentSummary(foo);
+ assertEquals(c.getSpaceConsumed(), ds);
+
+ // append several blocks
+ DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE * 3 + BLOCKSIZE / 8);
+ currentFileLen += (BLOCKSIZE * 3 + BLOCKSIZE / 8);
+
+ quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
+ ns = quota.get(Quota.NAMESPACE);
+ ds = quota.get(Quota.DISKSPACE);
+ assertEquals(2, ns); // foo and bar
+ assertEquals(currentFileLen * REPLICATION, ds);
+ c = dfs.getContentSummary(foo);
+ assertEquals(c.getSpaceConsumed(), ds);
}
/**
* Test if the quota can be correctly updated when file length is updated
* through fsync
*/
- @Test
+ @Test (timeout=60000)
public void testUpdateQuotaForFSync() throws Exception {
final Path foo = new Path("/foo");
final Path bar = new Path(foo, "bar");
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Mon Jul 7 20:43:56 2014
@@ -195,7 +195,7 @@ public class TestEditLog {
for (int i = 0; i < numTransactions; i++) {
INodeFile inode = new INodeFile(namesystem.allocateNewInodeId(), null,
p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize);
- inode.toUnderConstruction("", "", null);
+ inode.toUnderConstruction("", "");
editLog.logOpenFile("/filename" + (startIndex + i), inode, false);
editLog.logCloseFile("/filename" + (startIndex + i), inode);
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithXAttr.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithXAttr.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithXAttr.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithXAttr.java Mon Jul 7 20:43:56 2014
@@ -49,6 +49,8 @@ public class TestFSImageWithXAttr {
private static final byte[] newValue1 = {0x31, 0x31, 0x31};
private static final String name2 = "user.a2";
private static final byte[] value2 = {0x37, 0x38, 0x39};
+ private static final String name3 = "user.a3";
+ private static final byte[] value3 = {};
@BeforeClass
public static void setUp() throws IOException {
@@ -70,25 +72,29 @@ public class TestFSImageWithXAttr {
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
+ fs.setXAttr(path, name3, null, EnumSet.of(XAttrSetFlag.CREATE));
restart(fs, persistNamespace);
Map<String, byte[]> xattrs = fs.getXAttrs(path);
- Assert.assertEquals(xattrs.size(), 2);
+ Assert.assertEquals(xattrs.size(), 3);
Assert.assertArrayEquals(value1, xattrs.get(name1));
Assert.assertArrayEquals(value2, xattrs.get(name2));
+ Assert.assertArrayEquals(value3, xattrs.get(name3));
fs.setXAttr(path, name1, newValue1, EnumSet.of(XAttrSetFlag.REPLACE));
restart(fs, persistNamespace);
xattrs = fs.getXAttrs(path);
- Assert.assertEquals(xattrs.size(), 2);
+ Assert.assertEquals(xattrs.size(), 3);
Assert.assertArrayEquals(newValue1, xattrs.get(name1));
Assert.assertArrayEquals(value2, xattrs.get(name2));
+ Assert.assertArrayEquals(value3, xattrs.get(name3));
fs.removeXAttr(path, name1);
fs.removeXAttr(path, name2);
+ fs.removeXAttr(path, name3);
restart(fs, persistNamespace);
xattrs = fs.getXAttrs(path);
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java Mon Jul 7 20:43:56 2014
@@ -75,8 +75,7 @@ public class TestFSPermissionChecker {
return new PermissionStatus(SUPERUSER, SUPERGROUP, perm);
}
}).when(fsn).createFsOwnerPermissions(any(FsPermission.class));
- FSImage image = mock(FSImage.class);
- dir = new FSDirectory(image, fsn, conf);
+ dir = new FSDirectory(fsn, conf);
inodeRoot = dir.getRoot();
}
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Mon Jul 7 20:43:56 2014
@@ -29,8 +29,6 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
-import org.junit.Assert;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -318,7 +316,7 @@ public class TestINodeFile {
{//cast from INodeFileUnderConstruction
final INode from = new INodeFile(
INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, replication, 1024L);
- from.asFile().toUnderConstruction("client", "machine", null);
+ from.asFile().toUnderConstruction("client", "machine");
//cast to INodeFile, should success
final INodeFile f = INodeFile.valueOf(from, path);
@@ -1070,12 +1068,11 @@ public class TestINodeFile {
final String clientName = "client";
final String clientMachine = "machine";
- file.toUnderConstruction(clientName, clientMachine, null);
+ file.toUnderConstruction(clientName, clientMachine);
assertTrue(file.isUnderConstruction());
FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature();
assertEquals(clientName, uc.getClientName());
assertEquals(clientMachine, uc.getClientMachine());
- Assert.assertNull(uc.getClientNode());
file.toCompleteFile(Time.now());
assertFalse(file.isUnderConstruction());
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java Mon Jul 7 20:43:56 2014
@@ -61,6 +61,7 @@ import org.apache.hadoop.test.GenericTes
import org.apache.log4j.Level;
import org.junit.Test;
import org.mockito.Mockito;
+import org.mockito.internal.util.reflection.Whitebox;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
@@ -124,14 +125,14 @@ public class TestSaveNamespace {
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
// Replace the FSImage with a spy
- FSImage originalImage = fsn.dir.fsImage;
+ FSImage originalImage = fsn.getFSImage();
NNStorage storage = originalImage.getStorage();
NNStorage spyStorage = spy(storage);
originalImage.storage = spyStorage;
FSImage spyImage = spy(originalImage);
- fsn.dir.fsImage = spyImage;
+ Whitebox.setInternalState(fsn, "fsImage", spyImage);
boolean shouldFail = false; // should we expect the save operation to fail
// inject fault
@@ -233,11 +234,11 @@ public class TestSaveNamespace {
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
// Replace the FSImage with a spy
- FSImage originalImage = fsn.dir.fsImage;
+ FSImage originalImage = fsn.getFSImage();
NNStorage storage = originalImage.getStorage();
FSImage spyImage = spy(originalImage);
- fsn.dir.fsImage = spyImage;
+ Whitebox.setInternalState(fsn, "fsImage", spyImage);
FileSystem fs = FileSystem.getLocal(conf);
File rootDir = storage.getStorageDir(0).getRoot();
@@ -367,14 +368,15 @@ public class TestSaveNamespace {
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
// Replace the FSImage with a spy
- final FSImage originalImage = fsn.dir.fsImage;
+ final FSImage originalImage = fsn.getFSImage();
NNStorage storage = originalImage.getStorage();
storage.close(); // unlock any directories that FSNamesystem's initialization may have locked
NNStorage spyStorage = spy(storage);
originalImage.storage = spyStorage;
FSImage spyImage = spy(originalImage);
- fsn.dir.fsImage = spyImage;
+ Whitebox.setInternalState(fsn, "fsImage", spyImage);
+
spyImage.storage.setStorageDirectories(
FSNamesystem.getNamespaceDirs(conf),
FSNamesystem.getNamespaceEditsDirs(conf));
@@ -504,7 +506,7 @@ public class TestSaveNamespace {
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
// Replace the FSImage with a spy
- final FSImage image = fsn.dir.fsImage;
+ final FSImage image = fsn.getFSImage();
NNStorage storage = image.getStorage();
storage.close(); // unlock any directories that FSNamesystem's initialization may have locked
storage.setStorageDirectories(
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java Mon Jul 7 20:43:56 2014
@@ -35,6 +35,7 @@ import org.apache.hadoop.ha.ZKFailoverCo
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.tools.DFSHAAdmin;
import org.apache.hadoop.hdfs.tools.DFSZKFailoverController;
@@ -53,6 +54,11 @@ public class TestDFSZKFailoverController
private TestContext ctx;
private ZKFCThread thr1, thr2;
private FileSystem fs;
+
+ static {
+ // Make tests run faster by avoiding fsync()
+ EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
+ }
@Before
public void setup() throws Exception {
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java Mon Jul 7 20:43:56 2014
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertNot
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
+import static org.mockito.Mockito.mock;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
@@ -32,6 +33,10 @@ import java.net.URI;
import java.security.PrivilegedExceptionAction;
import java.util.Collection;
import java.util.HashSet;
+import java.util.Map;
+
+import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.core.Response;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -51,7 +56,10 @@ import org.apache.hadoop.hdfs.security.t
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.web.JsonUtil;
+import org.apache.hadoop.hdfs.web.resources.ExceptionHandler;
import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.security.SecurityUtil;
@@ -64,6 +72,7 @@ import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.internal.util.reflection.Whitebox;
+import org.mortbay.util.ajax.JSON;
import com.google.common.base.Joiner;
@@ -372,6 +381,90 @@ public class TestDelegationTokensWithHA
token.cancel(conf);
}
+ /**
+ * Test if StandbyException can be thrown from StandbyNN, when it's requested for
+ * password. (HDFS-6475). With StandbyException, the client can failover to try
+ * activeNN.
+ */
+ @Test
+ public void testDelegationTokenStandbyNNAppearFirst() throws Exception {
+ // make nn0 the standby NN, and nn1 the active NN
+ cluster.transitionToStandby(0);
+ cluster.transitionToActive(1);
+
+ final DelegationTokenSecretManager stSecretManager =
+ NameNodeAdapter.getDtSecretManager(
+ nn1.getNamesystem());
+
+ // create token
+ final Token<DelegationTokenIdentifier> token =
+ getDelegationToken(fs, "JobTracker");
+ final DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
+ byte[] tokenId = token.getIdentifier();
+ identifier.readFields(new DataInputStream(
+ new ByteArrayInputStream(tokenId)));
+
+ assertTrue(null != stSecretManager.retrievePassword(identifier));
+
+ final UserGroupInformation ugi = UserGroupInformation
+ .createRemoteUser("JobTracker");
+ ugi.addToken(token);
+
+ ugi.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() {
+ try {
+ try {
+ byte[] tmppw = dtSecretManager.retrievePassword(identifier);
+ fail("InvalidToken with cause StandbyException is expected"
+ + " since nn0 is standby");
+ return tmppw;
+ } catch (IOException e) {
+ // Mimic the UserProvider class logic (server side) by throwing
+ // SecurityException here
+ throw new SecurityException(
+ "Failed to obtain user group information: " + e, e);
+ }
+ } catch (Exception oe) {
+ //
+ // The exception oe caught here is
+ // java.lang.SecurityException: Failed to obtain user group
+ // information: org.apache.hadoop.security.token.
+ // SecretManager$InvalidToken: StandbyException
+ //
+ HttpServletResponse response = mock(HttpServletResponse.class);
+ ExceptionHandler eh = new ExceptionHandler();
+ eh.initResponse(response);
+
+ // The Response (resp) below is what the server will send to client
+ //
+ // BEFORE HDFS-6475 fix, the resp.entity is
+ // {"RemoteException":{"exception":"SecurityException",
+ // "javaClassName":"java.lang.SecurityException",
+ // "message":"Failed to obtain user group information:
+ // org.apache.hadoop.security.token.SecretManager$InvalidToken:
+ // StandbyException"}}
+ // AFTER the fix, the resp.entity is
+ // {"RemoteException":{"exception":"StandbyException",
+ // "javaClassName":"org.apache.hadoop.ipc.StandbyException",
+ // "message":"Operation category READ is not supported in
+ // state standby"}}
+ //
+ Response resp = eh.toResponse(oe);
+
+ // Mimic the client side logic by parsing the response from server
+ //
+ Map<?, ?> m = (Map<?, ?>)JSON.parse(resp.getEntity().toString());
+ RemoteException re = JsonUtil.toRemoteException(m);
+ Exception unwrapped = ((RemoteException)re).unwrapRemoteException(
+ StandbyException.class);
+ assertTrue (unwrapped instanceof StandbyException);
+ return null;
+ }
+ }
+ });
+ }
+
@SuppressWarnings("unchecked")
private Token<DelegationTokenIdentifier> getDelegationToken(FileSystem fs,
String renewer) throws IOException {
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java Mon Jul 7 20:43:56 2014
@@ -169,10 +169,11 @@ public class TestRenameWithSnapshots {
}
private static boolean existsInDiffReport(List<DiffReportEntry> entries,
- DiffType type, String relativePath) {
+ DiffType type, String sourcePath, String targetPath) {
for (DiffReportEntry entry : entries) {
- if ((entry.getType() == type)
- && ((new String(entry.getRelativePath())).compareTo(relativePath) == 0)) {
+ if (entry.equals(new DiffReportEntry(type, DFSUtil
+ .string2Bytes(sourcePath), targetPath == null ? null : DFSUtil
+ .string2Bytes(targetPath)))) {
return true;
}
}
@@ -195,8 +196,9 @@ public class TestRenameWithSnapshots {
SnapshotDiffReport diffReport = hdfs.getSnapshotDiffReport(sub1, snap1, "");
List<DiffReportEntry> entries = diffReport.getDiffList();
assertTrue(entries.size() == 2);
- assertTrue(existsInDiffReport(entries, DiffType.MODIFY, ""));
- assertTrue(existsInDiffReport(entries, DiffType.CREATE, file2.getName()));
+ assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
+ assertTrue(existsInDiffReport(entries, DiffType.CREATE, file2.getName(),
+ null));
}
/**
@@ -215,10 +217,10 @@ public class TestRenameWithSnapshots {
SnapshotDiffReport diffReport = hdfs.getSnapshotDiffReport(sub1, snap1, "");
System.out.println("DiffList is " + diffReport.toString());
List<DiffReportEntry> entries = diffReport.getDiffList();
- assertTrue(entries.size() == 3);
- assertTrue(existsInDiffReport(entries, DiffType.MODIFY, ""));
- assertTrue(existsInDiffReport(entries, DiffType.CREATE, file2.getName()));
- assertTrue(existsInDiffReport(entries, DiffType.DELETE, file1.getName()));
+ assertTrue(entries.size() == 2);
+ assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
+ assertTrue(existsInDiffReport(entries, DiffType.RENAME, file1.getName(),
+ file2.getName()));
}
@Test (timeout=60000)
@@ -238,26 +240,26 @@ public class TestRenameWithSnapshots {
diffReport = hdfs.getSnapshotDiffReport(sub1, snap1, snap2);
LOG.info("DiffList is " + diffReport.toString());
List<DiffReportEntry> entries = diffReport.getDiffList();
- assertTrue(entries.size() == 3);
- assertTrue(existsInDiffReport(entries, DiffType.MODIFY, ""));
- assertTrue(existsInDiffReport(entries, DiffType.CREATE, file2.getName()));
- assertTrue(existsInDiffReport(entries, DiffType.DELETE, file1.getName()));
+ assertTrue(entries.size() == 2);
+ assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
+ assertTrue(existsInDiffReport(entries, DiffType.RENAME, file1.getName(),
+ file2.getName()));
diffReport = hdfs.getSnapshotDiffReport(sub1, snap2, "");
LOG.info("DiffList is " + diffReport.toString());
entries = diffReport.getDiffList();
- assertTrue(entries.size() == 3);
- assertTrue(existsInDiffReport(entries, DiffType.MODIFY, ""));
- assertTrue(existsInDiffReport(entries, DiffType.CREATE, file3.getName()));
- assertTrue(existsInDiffReport(entries, DiffType.DELETE, file2.getName()));
+ assertTrue(entries.size() == 2);
+ assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
+ assertTrue(existsInDiffReport(entries, DiffType.RENAME, file2.getName(),
+ file3.getName()));
diffReport = hdfs.getSnapshotDiffReport(sub1, snap1, "");
LOG.info("DiffList is " + diffReport.toString());
entries = diffReport.getDiffList();
- assertTrue(entries.size() == 3);
- assertTrue(existsInDiffReport(entries, DiffType.MODIFY, ""));
- assertTrue(existsInDiffReport(entries, DiffType.CREATE, file3.getName()));
- assertTrue(existsInDiffReport(entries, DiffType.DELETE, file1.getName()));
+ assertTrue(entries.size() == 2);
+ assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
+ assertTrue(existsInDiffReport(entries, DiffType.RENAME, file1.getName(),
+ file3.getName()));
}
@Test (timeout=60000)
@@ -280,11 +282,10 @@ public class TestRenameWithSnapshots {
"");
LOG.info("DiffList is \n\"" + diffReport.toString() + "\"");
List<DiffReportEntry> entries = diffReport.getDiffList();
- assertTrue(existsInDiffReport(entries, DiffType.MODIFY, sub2.getName()));
- assertTrue(existsInDiffReport(entries, DiffType.CREATE, sub2.getName()
- + "/" + sub2file2.getName()));
- assertTrue(existsInDiffReport(entries, DiffType.DELETE, sub2.getName()
- + "/" + sub2file1.getName()));
+ assertTrue(existsInDiffReport(entries, DiffType.MODIFY, sub2.getName(),
+ null));
+ assertTrue(existsInDiffReport(entries, DiffType.RENAME, sub2.getName()
+ + "/" + sub2file1.getName(), sub2.getName() + "/" + sub2file2.getName()));
}
@Test (timeout=60000)
@@ -307,10 +308,10 @@ public class TestRenameWithSnapshots {
"");
LOG.info("DiffList is \n\"" + diffReport.toString() + "\"");
List<DiffReportEntry> entries = diffReport.getDiffList();
- assertEquals(3, entries.size());
- assertTrue(existsInDiffReport(entries, DiffType.MODIFY, ""));
- assertTrue(existsInDiffReport(entries, DiffType.CREATE, sub3.getName()));
- assertTrue(existsInDiffReport(entries, DiffType.DELETE, sub2.getName()));
+ assertEquals(2, entries.size());
+ assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
+ assertTrue(existsInDiffReport(entries, DiffType.RENAME, sub2.getName(),
+ sub3.getName()));
}
/**
@@ -2406,12 +2407,12 @@ public class TestRenameWithSnapshots {
LOG.info("DiffList is \n\"" + report.toString() + "\"");
List<DiffReportEntry> entries = report.getDiffList();
assertEquals(7, entries.size());
- assertTrue(existsInDiffReport(entries, DiffType.MODIFY, ""));
- assertTrue(existsInDiffReport(entries, DiffType.MODIFY, foo.getName()));
- assertTrue(existsInDiffReport(entries, DiffType.DELETE, bar.getName()));
- assertTrue(existsInDiffReport(entries, DiffType.CREATE, newDir.getName()));
- assertTrue(existsInDiffReport(entries, DiffType.DELETE, "foo/file1"));
- assertTrue(existsInDiffReport(entries, DiffType.DELETE, "foo/file2"));
- assertTrue(existsInDiffReport(entries, DiffType.DELETE, "foo/file3"));
+ assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
+ assertTrue(existsInDiffReport(entries, DiffType.MODIFY, foo.getName(), null));
+ assertTrue(existsInDiffReport(entries, DiffType.MODIFY, bar.getName(), null));
+ assertTrue(existsInDiffReport(entries, DiffType.DELETE, "foo/file1", null));
+ assertTrue(existsInDiffReport(entries, DiffType.RENAME, "bar", "newDir"));
+ assertTrue(existsInDiffReport(entries, DiffType.RENAME, "foo/file2", "newDir/file2"));
+ assertTrue(existsInDiffReport(entries, DiffType.RENAME, "foo/file3", "newDir/file1"));
}
}
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java Mon Jul 7 20:43:56 2014
@@ -25,6 +25,7 @@ import java.io.IOException;
import java.util.HashMap;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
@@ -143,7 +144,7 @@ public class TestSnapshotDiffReport {
hdfs.createSnapshot(snapshotDir, genSnapshotName(snapshotDir));
}
// modify file10
- hdfs.setReplication(file10, (short) (REPLICATION - 1));
+ hdfs.setReplication(file10, (short) (REPLICATION + 1));
}
/** check the correctness of the diff reports */
@@ -166,11 +167,11 @@ public class TestSnapshotDiffReport {
} else if (entry.getType() == DiffType.DELETE) {
assertTrue(report.getDiffList().contains(entry));
assertTrue(inverseReport.getDiffList().contains(
- new DiffReportEntry(DiffType.CREATE, entry.getRelativePath())));
+ new DiffReportEntry(DiffType.CREATE, entry.getSourcePath())));
} else if (entry.getType() == DiffType.CREATE) {
assertTrue(report.getDiffList().contains(entry));
assertTrue(inverseReport.getDiffList().contains(
- new DiffReportEntry(DiffType.DELETE, entry.getRelativePath())));
+ new DiffReportEntry(DiffType.DELETE, entry.getSourcePath())));
}
}
}
@@ -329,5 +330,166 @@ public class TestSnapshotDiffReport {
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("subsub1")));
}
-
+
+ /**
+ * Rename a directory to its prior descendant, and verify the diff report.
+ */
+ @Test
+ public void testDiffReportWithRename() throws Exception {
+ final Path root = new Path("/");
+ final Path sdir1 = new Path(root, "dir1");
+ final Path sdir2 = new Path(root, "dir2");
+ final Path foo = new Path(sdir1, "foo");
+ final Path bar = new Path(foo, "bar");
+ hdfs.mkdirs(bar);
+ hdfs.mkdirs(sdir2);
+
+ // create snapshot on root
+ SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
+
+ // /dir1/foo/bar -> /dir2/bar
+ final Path bar2 = new Path(sdir2, "bar");
+ hdfs.rename(bar, bar2);
+
+ // /dir1/foo -> /dir2/bar/foo
+ final Path foo2 = new Path(bar2, "foo");
+ hdfs.rename(foo, foo2);
+
+ SnapshotTestHelper.createSnapshot(hdfs, root, "s2");
+ // let's delete /dir2 to make things more complicated
+ hdfs.delete(sdir2, true);
+
+ verifyDiffReport(root, "s1", "s2",
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1")),
+ new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("dir1/foo"),
+ DFSUtil.string2Bytes("dir2/bar/foo")),
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir2")),
+ new DiffReportEntry(DiffType.MODIFY,
+ DFSUtil.string2Bytes("dir1/foo/bar")),
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1/foo")),
+ new DiffReportEntry(DiffType.RENAME, DFSUtil
+ .string2Bytes("dir1/foo/bar"), DFSUtil.string2Bytes("dir2/bar")));
+ }
+
+ /**
+ * Rename a file/dir outside of the snapshottable dir should be reported as
+ * deleted. Rename a file/dir from outside should be reported as created.
+ */
+ @Test
+ public void testDiffReportWithRenameOutside() throws Exception {
+ final Path root = new Path("/");
+ final Path dir1 = new Path(root, "dir1");
+ final Path dir2 = new Path(root, "dir2");
+ final Path foo = new Path(dir1, "foo");
+ final Path fileInFoo = new Path(foo, "file");
+ final Path bar = new Path(dir2, "bar");
+ final Path fileInBar = new Path(bar, "file");
+ DFSTestUtil.createFile(hdfs, fileInFoo, BLOCKSIZE, REPLICATION, seed);
+ DFSTestUtil.createFile(hdfs, fileInBar, BLOCKSIZE, REPLICATION, seed);
+
+ // create snapshot on /dir1
+ SnapshotTestHelper.createSnapshot(hdfs, dir1, "s0");
+
+ // move bar into dir1
+ final Path newBar = new Path(dir1, "newBar");
+ hdfs.rename(bar, newBar);
+ // move foo out of dir1 into dir2
+ final Path newFoo = new Path(dir2, "new");
+ hdfs.rename(foo, newFoo);
+
+ SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
+ verifyDiffReport(dir1, "s0", "s1",
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
+ new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes(newBar
+ .getName())),
+ new DiffReportEntry(DiffType.DELETE,
+ DFSUtil.string2Bytes(foo.getName())));
+ }
+
+ /**
+ * Renaming a file/dir then delete the ancestor dir of the rename target
+ * should be reported as deleted.
+ */
+ @Test
+ public void testDiffReportWithRenameAndDelete() throws Exception {
+ final Path root = new Path("/");
+ final Path dir1 = new Path(root, "dir1");
+ final Path dir2 = new Path(root, "dir2");
+ final Path foo = new Path(dir1, "foo");
+ final Path fileInFoo = new Path(foo, "file");
+ final Path bar = new Path(dir2, "bar");
+ final Path fileInBar = new Path(bar, "file");
+ DFSTestUtil.createFile(hdfs, fileInFoo, BLOCKSIZE, REPLICATION, seed);
+ DFSTestUtil.createFile(hdfs, fileInBar, BLOCKSIZE, REPLICATION, seed);
+
+ SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
+ hdfs.rename(fileInFoo, fileInBar, Rename.OVERWRITE);
+ SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
+ verifyDiffReport(root, "s0", "s1",
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1/foo")),
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir2/bar")),
+ new DiffReportEntry(DiffType.DELETE, DFSUtil
+ .string2Bytes("dir2/bar/file")),
+ new DiffReportEntry(DiffType.RENAME,
+ DFSUtil.string2Bytes("dir1/foo/file"),
+ DFSUtil.string2Bytes("dir2/bar/file")));
+
+ // delete bar
+ hdfs.delete(bar, true);
+ SnapshotTestHelper.createSnapshot(hdfs, root, "s2");
+ verifyDiffReport(root, "s0", "s2",
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1/foo")),
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir2")),
+ new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("dir2/bar")),
+ new DiffReportEntry(DiffType.DELETE,
+ DFSUtil.string2Bytes("dir1/foo/file")));
+ }
+
+ @Test
+ public void testDiffReportWithRenameToNewDir() throws Exception {
+ final Path root = new Path("/");
+ final Path foo = new Path(root, "foo");
+ final Path fileInFoo = new Path(foo, "file");
+ DFSTestUtil.createFile(hdfs, fileInFoo, BLOCKSIZE, REPLICATION, seed);
+
+ SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
+ final Path bar = new Path(root, "bar");
+ hdfs.mkdirs(bar);
+ final Path fileInBar = new Path(bar, "file");
+ hdfs.rename(fileInFoo, fileInBar);
+ SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
+
+ verifyDiffReport(root, "s0", "s1",
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("foo")),
+ new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("bar")),
+ new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("foo/file"),
+ DFSUtil.string2Bytes("bar/file")));
+ }
+
+ /**
+ * Rename a file and then append some data to it
+ */
+ @Test
+ public void testDiffReportWithRenameAndAppend() throws Exception {
+ final Path root = new Path("/");
+ final Path foo = new Path(root, "foo");
+ DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPLICATION, seed);
+
+ SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
+ final Path bar = new Path(root, "bar");
+ hdfs.rename(foo, bar);
+ DFSTestUtil.appendFile(hdfs, bar, 10); // append 10 bytes
+ SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
+
+ // we always put modification on the file before rename
+ verifyDiffReport(root, "s0", "s1",
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("foo")),
+ new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("foo"),
+ DFSUtil.string2Bytes("bar")));
+ }
}
\ No newline at end of file