You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2014/07/12 04:24:55 UTC
svn commit: r1609878 [8/9] - in
/hadoop/common/branches/YARN-1051/hadoop-hdfs-project: hadoop-hdfs-httpfs/
hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/
hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/
hadoop-hdfs...
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithXAttr.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithXAttr.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithXAttr.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithXAttr.java Sat Jul 12 02:24:40 2014
@@ -49,6 +49,8 @@ public class TestFSImageWithXAttr {
private static final byte[] newValue1 = {0x31, 0x31, 0x31};
private static final String name2 = "user.a2";
private static final byte[] value2 = {0x37, 0x38, 0x39};
+ private static final String name3 = "user.a3";
+ private static final byte[] value3 = {};
@BeforeClass
public static void setUp() throws IOException {
@@ -70,25 +72,29 @@ public class TestFSImageWithXAttr {
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
+ fs.setXAttr(path, name3, null, EnumSet.of(XAttrSetFlag.CREATE));
restart(fs, persistNamespace);
Map<String, byte[]> xattrs = fs.getXAttrs(path);
- Assert.assertEquals(xattrs.size(), 2);
+ Assert.assertEquals(xattrs.size(), 3);
Assert.assertArrayEquals(value1, xattrs.get(name1));
Assert.assertArrayEquals(value2, xattrs.get(name2));
+ Assert.assertArrayEquals(value3, xattrs.get(name3));
fs.setXAttr(path, name1, newValue1, EnumSet.of(XAttrSetFlag.REPLACE));
restart(fs, persistNamespace);
xattrs = fs.getXAttrs(path);
- Assert.assertEquals(xattrs.size(), 2);
+ Assert.assertEquals(xattrs.size(), 3);
Assert.assertArrayEquals(newValue1, xattrs.get(name1));
Assert.assertArrayEquals(value2, xattrs.get(name2));
+ Assert.assertArrayEquals(value3, xattrs.get(name3));
fs.removeXAttr(path, name1);
fs.removeXAttr(path, name2);
+ fs.removeXAttr(path, name3);
restart(fs, persistNamespace);
xattrs = fs.getXAttrs(path);
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java Sat Jul 12 02:24:40 2014
@@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.MiniDFSClu
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.junit.After;
import org.junit.Test;
import org.mockito.Mockito;
@@ -194,4 +195,22 @@ public class TestFSNamesystem {
assertFalse(rwLock.isWriteLockedByCurrentThread());
assertEquals(0, rwLock.getWriteHoldCount());
}
+
+ @Test
+ public void testReset() throws Exception {
+ Configuration conf = new Configuration();
+ FSEditLog fsEditLog = Mockito.mock(FSEditLog.class);
+ FSImage fsImage = Mockito.mock(FSImage.class);
+ Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
+ FSNamesystem fsn = new FSNamesystem(conf, fsImage);
+ fsn.imageLoadComplete();
+ assertTrue(fsn.isImageLoaded());
+ fsn.clear();
+ assertFalse(fsn.isImageLoaded());
+ final INodeDirectory root = (INodeDirectory) fsn.getFSDirectory()
+ .getINode("/");
+ assertTrue(root.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
+ fsn.imageLoadComplete();
+ assertTrue(fsn.isImageLoaded());
+ }
}
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java Sat Jul 12 02:24:40 2014
@@ -75,8 +75,7 @@ public class TestFSPermissionChecker {
return new PermissionStatus(SUPERUSER, SUPERGROUP, perm);
}
}).when(fsn).createFsOwnerPermissions(any(FsPermission.class));
- FSImage image = mock(FSImage.class);
- dir = new FSDirectory(image, fsn, conf);
+ dir = new FSDirectory(fsn, conf);
inodeRoot = dir.getRoot();
}
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java Sat Jul 12 02:24:40 2014
@@ -19,12 +19,9 @@
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
-import static org.apache.hadoop.util.Time.now;
import static org.junit.Assert.assertEquals;
-import static org.mockito.Matchers.anyObject;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
import java.io.File;
import java.io.IOException;
@@ -57,7 +54,7 @@ public class TestFsLimits {
FSEditLog editLog = mock(FSEditLog.class);
doReturn(editLog).when(fsImage).getEditLog();
FSNamesystem fsn = new FSNamesystem(conf, fsImage);
- fsn.getFSDirectory().setReady(fsIsReady);
+ fsn.setImageLoaded(fsIsReady);
return fsn;
}
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Sat Jul 12 02:24:40 2014
@@ -612,6 +612,8 @@ public class TestFsck {
public void testCorruptBlock() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
+ // Set short retry timeouts so this test runs faster
+ conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
FileSystem fs = null;
DFSClient dfsClient = null;
LocatedBlocks blocks = null;
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Sat Jul 12 02:24:40 2014
@@ -29,8 +29,6 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
-import org.junit.Assert;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -79,7 +77,7 @@ public class TestINodeFile {
private final PermissionStatus perm = new PermissionStatus(
"userName", null, FsPermission.getDefault());
private short replication;
- private long preferredBlockSize;
+ private long preferredBlockSize = 1024;
INodeFile createINodeFile(short replication, long preferredBlockSize) {
return new INodeFile(INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,
@@ -318,7 +316,7 @@ public class TestINodeFile {
{//cast from INodeFileUnderConstruction
final INode from = new INodeFile(
INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, replication, 1024L);
- from.asFile().toUnderConstruction("client", "machine", null);
+ from.asFile().toUnderConstruction("client", "machine");
//cast to INodeFile, should success
final INodeFile f = INodeFile.valueOf(from, path);
@@ -1070,12 +1068,11 @@ public class TestINodeFile {
final String clientName = "client";
final String clientMachine = "machine";
- file.toUnderConstruction(clientName, clientMachine, null);
+ file.toUnderConstruction(clientName, clientMachine);
assertTrue(file.isUnderConstruction());
FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature();
assertEquals(clientName, uc.getClientName());
assertEquals(clientMachine, uc.getClientMachine());
- Assert.assertNull(uc.getClientNode());
file.toCompleteFile(Time.now());
assertFalse(file.isUnderConstruction());
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java Sat Jul 12 02:24:40 2014
@@ -64,6 +64,8 @@ public class TestListCorruptFileBlocks {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode scans directories
conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000); // datanode sends block reports
+ // Set short retry timeouts so this test runs faster
+ conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
@@ -148,6 +150,8 @@ public class TestListCorruptFileBlocks {
// start populating repl queues immediately
conf.setFloat(DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY,
0f);
+ // Set short retry timeouts so this test runs faster
+ conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
cluster = new MiniDFSCluster.Builder(conf).waitSafeMode(false).build();
cluster.getNameNodeRpc().setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java Sat Jul 12 02:24:40 2014
@@ -61,6 +61,7 @@ import org.apache.hadoop.test.GenericTes
import org.apache.log4j.Level;
import org.junit.Test;
import org.mockito.Mockito;
+import org.mockito.internal.util.reflection.Whitebox;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
@@ -124,14 +125,14 @@ public class TestSaveNamespace {
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
// Replace the FSImage with a spy
- FSImage originalImage = fsn.dir.fsImage;
+ FSImage originalImage = fsn.getFSImage();
NNStorage storage = originalImage.getStorage();
NNStorage spyStorage = spy(storage);
originalImage.storage = spyStorage;
FSImage spyImage = spy(originalImage);
- fsn.dir.fsImage = spyImage;
+ Whitebox.setInternalState(fsn, "fsImage", spyImage);
boolean shouldFail = false; // should we expect the save operation to fail
// inject fault
@@ -233,11 +234,11 @@ public class TestSaveNamespace {
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
// Replace the FSImage with a spy
- FSImage originalImage = fsn.dir.fsImage;
+ FSImage originalImage = fsn.getFSImage();
NNStorage storage = originalImage.getStorage();
FSImage spyImage = spy(originalImage);
- fsn.dir.fsImage = spyImage;
+ Whitebox.setInternalState(fsn, "fsImage", spyImage);
FileSystem fs = FileSystem.getLocal(conf);
File rootDir = storage.getStorageDir(0).getRoot();
@@ -367,14 +368,15 @@ public class TestSaveNamespace {
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
// Replace the FSImage with a spy
- final FSImage originalImage = fsn.dir.fsImage;
+ final FSImage originalImage = fsn.getFSImage();
NNStorage storage = originalImage.getStorage();
storage.close(); // unlock any directories that FSNamesystem's initialization may have locked
NNStorage spyStorage = spy(storage);
originalImage.storage = spyStorage;
FSImage spyImage = spy(originalImage);
- fsn.dir.fsImage = spyImage;
+ Whitebox.setInternalState(fsn, "fsImage", spyImage);
+
spyImage.storage.setStorageDirectories(
FSNamesystem.getNamespaceDirs(conf),
FSNamesystem.getNamespaceEditsDirs(conf));
@@ -504,7 +506,7 @@ public class TestSaveNamespace {
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
// Replace the FSImage with a spy
- final FSImage image = fsn.dir.fsImage;
+ final FSImage image = fsn.getFSImage();
NNStorage storage = image.getStorage();
storage.close(); // unlock any directories that FSNamesystem's initialization may have locked
storage.setStorageDirectories(
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java Sat Jul 12 02:24:40 2014
@@ -30,7 +30,6 @@ import org.apache.hadoop.hdfs.DFSTestUti
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.junit.AfterClass;
import org.junit.Assert;
@@ -90,22 +89,20 @@ public class TestSnapshotPathINodes {
final INode before = fsdir.getINode(pathStr);
// Before a directory is snapshottable
- Assert.assertTrue(before instanceof INodeDirectory);
- Assert.assertFalse(before instanceof INodeDirectorySnapshottable);
+ Assert.assertFalse(before.asDirectory().isSnapshottable());
// After a directory is snapshottable
final Path path = new Path(pathStr);
hdfs.allowSnapshot(path);
{
final INode after = fsdir.getINode(pathStr);
- Assert.assertTrue(after instanceof INodeDirectorySnapshottable);
+ Assert.assertTrue(after.asDirectory().isSnapshottable());
}
hdfs.disallowSnapshot(path);
{
final INode after = fsdir.getINode(pathStr);
- Assert.assertTrue(after instanceof INodeDirectory);
- Assert.assertFalse(after instanceof INodeDirectorySnapshottable);
+ Assert.assertFalse(after.asDirectory().isSnapshottable());
}
}
@@ -115,8 +112,7 @@ public class TestSnapshotPathINodes {
}
final int i = inodesInPath.getSnapshotRootIndex() - 1;
final INode inode = inodesInPath.getINodes()[i];
- return ((INodeDirectorySnapshottable)inode).getSnapshot(
- DFSUtil.string2Bytes(name));
+ return inode.asDirectory().getSnapshot(DFSUtil.string2Bytes(name));
}
static void assertSnapshot(INodesInPath inodesInPath, boolean isSnapshot,
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java Sat Jul 12 02:24:40 2014
@@ -35,6 +35,7 @@ import org.apache.hadoop.ha.ZKFailoverCo
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.tools.DFSHAAdmin;
import org.apache.hadoop.hdfs.tools.DFSZKFailoverController;
@@ -53,6 +54,11 @@ public class TestDFSZKFailoverController
private TestContext ctx;
private ZKFCThread thr1, thr2;
private FileSystem fs;
+
+ static {
+ // Make tests run faster by avoiding fsync()
+ EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
+ }
@Before
public void setup() throws Exception {
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java Sat Jul 12 02:24:40 2014
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertNot
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
+import static org.mockito.Mockito.mock;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
@@ -32,6 +33,10 @@ import java.net.URI;
import java.security.PrivilegedExceptionAction;
import java.util.Collection;
import java.util.HashSet;
+import java.util.Map;
+
+import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.core.Response;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -51,7 +56,10 @@ import org.apache.hadoop.hdfs.security.t
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.web.JsonUtil;
+import org.apache.hadoop.hdfs.web.resources.ExceptionHandler;
import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.security.SecurityUtil;
@@ -64,6 +72,7 @@ import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.internal.util.reflection.Whitebox;
+import org.mortbay.util.ajax.JSON;
import com.google.common.base.Joiner;
@@ -372,6 +381,90 @@ public class TestDelegationTokensWithHA
token.cancel(conf);
}
+ /**
+ * Test if StandbyException can be thrown from StandbyNN, when it's requested for
+ * password. (HDFS-6475). With StandbyException, the client can failover to try
+ * activeNN.
+ */
+ @Test
+ public void testDelegationTokenStandbyNNAppearFirst() throws Exception {
+ // make nn0 the standby NN, and nn1 the active NN
+ cluster.transitionToStandby(0);
+ cluster.transitionToActive(1);
+
+ final DelegationTokenSecretManager stSecretManager =
+ NameNodeAdapter.getDtSecretManager(
+ nn1.getNamesystem());
+
+ // create token
+ final Token<DelegationTokenIdentifier> token =
+ getDelegationToken(fs, "JobTracker");
+ final DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
+ byte[] tokenId = token.getIdentifier();
+ identifier.readFields(new DataInputStream(
+ new ByteArrayInputStream(tokenId)));
+
+ assertTrue(null != stSecretManager.retrievePassword(identifier));
+
+ final UserGroupInformation ugi = UserGroupInformation
+ .createRemoteUser("JobTracker");
+ ugi.addToken(token);
+
+ ugi.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() {
+ try {
+ try {
+ byte[] tmppw = dtSecretManager.retrievePassword(identifier);
+ fail("InvalidToken with cause StandbyException is expected"
+ + " since nn0 is standby");
+ return tmppw;
+ } catch (IOException e) {
+ // Mimic the UserProvider class logic (server side) by throwing
+ // SecurityException here
+ throw new SecurityException(
+ "Failed to obtain user group information: " + e, e);
+ }
+ } catch (Exception oe) {
+ //
+ // The exception oe caught here is
+ // java.lang.SecurityException: Failed to obtain user group
+ // information: org.apache.hadoop.security.token.
+ // SecretManager$InvalidToken: StandbyException
+ //
+ HttpServletResponse response = mock(HttpServletResponse.class);
+ ExceptionHandler eh = new ExceptionHandler();
+ eh.initResponse(response);
+
+ // The Response (resp) below is what the server will send to client
+ //
+ // BEFORE HDFS-6475 fix, the resp.entity is
+ // {"RemoteException":{"exception":"SecurityException",
+ // "javaClassName":"java.lang.SecurityException",
+ // "message":"Failed to obtain user group information:
+ // org.apache.hadoop.security.token.SecretManager$InvalidToken:
+ // StandbyException"}}
+ // AFTER the fix, the resp.entity is
+ // {"RemoteException":{"exception":"StandbyException",
+ // "javaClassName":"org.apache.hadoop.ipc.StandbyException",
+ // "message":"Operation category READ is not supported in
+ // state standby"}}
+ //
+ Response resp = eh.toResponse(oe);
+
+ // Mimic the client side logic by parsing the response from server
+ //
+ Map<?, ?> m = (Map<?, ?>)JSON.parse(resp.getEntity().toString());
+ RemoteException re = JsonUtil.toRemoteException(m);
+ Exception unwrapped = ((RemoteException)re).unwrapRemoteException(
+ StandbyException.class);
+ assertTrue (unwrapped instanceof StandbyException);
+ return null;
+ }
+ }
+ });
+ }
+
@SuppressWarnings("unchecked")
private Token<DelegationTokenIdentifier> getDelegationToken(FileSystem fs,
String renewer) throws IOException {
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java Sat Jul 12 02:24:40 2014
@@ -63,6 +63,8 @@ public class TestFailoverWithBlockTokens
public void startCluster() throws IOException {
conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
+ // Set short retry timeouts so this test runs faster
+ conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.numDataNodes(1)
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java Sat Jul 12 02:24:40 2014
@@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.protocol.L
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode;
+import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
import org.apache.log4j.Level;
@@ -153,8 +154,7 @@ public class TestINodeFileUnderConstruct
// deleted list, with size BLOCKSIZE*2
INodeFile fileNode = (INodeFile) fsdir.getINode(file.toString());
assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize());
- INodeDirectorySnapshottable dirNode = (INodeDirectorySnapshottable) fsdir
- .getINode(dir.toString());
+ INodeDirectory dirNode = fsdir.getINode(dir.toString()).asDirectory();
DirectoryDiff last = dirNode.getDiffs().getLast();
// 2. append without closing stream
@@ -162,7 +162,7 @@ public class TestINodeFileUnderConstruct
out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
// re-check nodeInDeleted_S0
- dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
+ dirNode = fsdir.getINode(dir.toString()).asDirectory();
assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize(last.getSnapshotId()));
// 3. take snapshot --> close stream
@@ -172,7 +172,7 @@ public class TestINodeFileUnderConstruct
// check: an INodeFileUnderConstructionWithSnapshot with size BLOCKSIZE*3 should
// have been stored in s1's deleted list
fileNode = (INodeFile) fsdir.getINode(file.toString());
- dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
+ dirNode = fsdir.getINode(dir.toString()).asDirectory();
last = dirNode.getDiffs().getLast();
assertTrue(fileNode.isWithSnapshot());
assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(last.getSnapshotId()));
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java Sat Jul 12 02:24:40 2014
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
-import static org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SNAPSHOT_LIMIT;
+import static org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature.SNAPSHOT_LIMIT;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
@@ -312,10 +312,9 @@ public class TestNestedSnapshots {
public void testIdCmp() {
final PermissionStatus perm = PermissionStatus.createImmutable(
"user", "group", FsPermission.createImmutable((short)0));
- final INodeDirectory dir = new INodeDirectory(0,
+ final INodeDirectory snapshottable = new INodeDirectory(0,
DFSUtil.string2Bytes("foo"), perm, 0L);
- final INodeDirectorySnapshottable snapshottable
- = new INodeDirectorySnapshottable(dir);
+ snapshottable.addSnapshottableFeature();
final Snapshot[] snapshots = {
new Snapshot(1, "s1", snapshottable),
new Snapshot(1, "s1", snapshottable),
@@ -362,7 +361,7 @@ public class TestNestedSnapshots {
hdfs.allowSnapshot(sub);
subNode = fsdir.getINode(sub.toString());
- assertTrue(subNode instanceof INodeDirectorySnapshottable);
+ assertTrue(subNode.isDirectory() && subNode.asDirectory().isSnapshottable());
hdfs.disallowSnapshot(sub);
subNode = fsdir.getINode(sub.toString());
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java Sat Jul 12 02:24:40 2014
@@ -169,12 +169,11 @@ public class TestRenameWithSnapshots {
}
private static boolean existsInDiffReport(List<DiffReportEntry> entries,
- DiffType type, String relativePath) {
+ DiffType type, String sourcePath, String targetPath) {
for (DiffReportEntry entry : entries) {
- System.out.println("DiffEntry is:" + entry.getType() + "\""
- + new String(entry.getRelativePath()) + "\"");
- if ((entry.getType() == type)
- && ((new String(entry.getRelativePath())).compareTo(relativePath) == 0)) {
+ if (entry.equals(new DiffReportEntry(type, DFSUtil
+ .string2Bytes(sourcePath), targetPath == null ? null : DFSUtil
+ .string2Bytes(targetPath)))) {
return true;
}
}
@@ -197,8 +196,9 @@ public class TestRenameWithSnapshots {
SnapshotDiffReport diffReport = hdfs.getSnapshotDiffReport(sub1, snap1, "");
List<DiffReportEntry> entries = diffReport.getDiffList();
assertTrue(entries.size() == 2);
- assertTrue(existsInDiffReport(entries, DiffType.MODIFY, ""));
- assertTrue(existsInDiffReport(entries, DiffType.CREATE, file2.getName()));
+ assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
+ assertTrue(existsInDiffReport(entries, DiffType.CREATE, file2.getName(),
+ null));
}
/**
@@ -217,10 +217,10 @@ public class TestRenameWithSnapshots {
SnapshotDiffReport diffReport = hdfs.getSnapshotDiffReport(sub1, snap1, "");
System.out.println("DiffList is " + diffReport.toString());
List<DiffReportEntry> entries = diffReport.getDiffList();
- assertTrue(entries.size() == 3);
- assertTrue(existsInDiffReport(entries, DiffType.MODIFY, ""));
- assertTrue(existsInDiffReport(entries, DiffType.CREATE, file2.getName()));
- assertTrue(existsInDiffReport(entries, DiffType.DELETE, file1.getName()));
+ assertTrue(entries.size() == 2);
+ assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
+ assertTrue(existsInDiffReport(entries, DiffType.RENAME, file1.getName(),
+ file2.getName()));
}
@Test (timeout=60000)
@@ -240,26 +240,26 @@ public class TestRenameWithSnapshots {
diffReport = hdfs.getSnapshotDiffReport(sub1, snap1, snap2);
LOG.info("DiffList is " + diffReport.toString());
List<DiffReportEntry> entries = diffReport.getDiffList();
- assertTrue(entries.size() == 3);
- assertTrue(existsInDiffReport(entries, DiffType.MODIFY, ""));
- assertTrue(existsInDiffReport(entries, DiffType.CREATE, file2.getName()));
- assertTrue(existsInDiffReport(entries, DiffType.DELETE, file1.getName()));
+ assertTrue(entries.size() == 2);
+ assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
+ assertTrue(existsInDiffReport(entries, DiffType.RENAME, file1.getName(),
+ file2.getName()));
diffReport = hdfs.getSnapshotDiffReport(sub1, snap2, "");
LOG.info("DiffList is " + diffReport.toString());
entries = diffReport.getDiffList();
- assertTrue(entries.size() == 3);
- assertTrue(existsInDiffReport(entries, DiffType.MODIFY, ""));
- assertTrue(existsInDiffReport(entries, DiffType.CREATE, file3.getName()));
- assertTrue(existsInDiffReport(entries, DiffType.DELETE, file2.getName()));
+ assertTrue(entries.size() == 2);
+ assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
+ assertTrue(existsInDiffReport(entries, DiffType.RENAME, file2.getName(),
+ file3.getName()));
diffReport = hdfs.getSnapshotDiffReport(sub1, snap1, "");
LOG.info("DiffList is " + diffReport.toString());
entries = diffReport.getDiffList();
- assertTrue(entries.size() == 3);
- assertTrue(existsInDiffReport(entries, DiffType.MODIFY, ""));
- assertTrue(existsInDiffReport(entries, DiffType.CREATE, file3.getName()));
- assertTrue(existsInDiffReport(entries, DiffType.DELETE, file1.getName()));
+ assertTrue(entries.size() == 2);
+ assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
+ assertTrue(existsInDiffReport(entries, DiffType.RENAME, file1.getName(),
+ file3.getName()));
}
@Test (timeout=60000)
@@ -282,11 +282,10 @@ public class TestRenameWithSnapshots {
"");
LOG.info("DiffList is \n\"" + diffReport.toString() + "\"");
List<DiffReportEntry> entries = diffReport.getDiffList();
- assertTrue(existsInDiffReport(entries, DiffType.MODIFY, sub2.getName()));
- assertTrue(existsInDiffReport(entries, DiffType.CREATE, sub2.getName()
- + "/" + sub2file2.getName()));
- assertTrue(existsInDiffReport(entries, DiffType.DELETE, sub2.getName()
- + "/" + sub2file1.getName()));
+ assertTrue(existsInDiffReport(entries, DiffType.MODIFY, sub2.getName(),
+ null));
+ assertTrue(existsInDiffReport(entries, DiffType.RENAME, sub2.getName()
+ + "/" + sub2file1.getName(), sub2.getName() + "/" + sub2file2.getName()));
}
@Test (timeout=60000)
@@ -309,10 +308,10 @@ public class TestRenameWithSnapshots {
"");
LOG.info("DiffList is \n\"" + diffReport.toString() + "\"");
List<DiffReportEntry> entries = diffReport.getDiffList();
- assertEquals(3, entries.size());
- assertTrue(existsInDiffReport(entries, DiffType.MODIFY, ""));
- assertTrue(existsInDiffReport(entries, DiffType.CREATE, sub3.getName()));
- assertTrue(existsInDiffReport(entries, DiffType.DELETE, sub2.getName()));
+ assertEquals(2, entries.size());
+ assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
+ assertTrue(existsInDiffReport(entries, DiffType.RENAME, sub2.getName(),
+ sub3.getName()));
}
/**
@@ -403,8 +402,7 @@ public class TestRenameWithSnapshots {
final Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3",
"foo");
assertFalse(hdfs.exists(foo_s3));
- INodeDirectorySnapshottable sdir2Node =
- (INodeDirectorySnapshottable) fsdir.getINode(sdir2.toString());
+ INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
INodeFile sfoo = fsdir.getINode(newfoo.toString()).asFile();
assertEquals(s2.getId(), sfoo.getDiffs().getLastSnapshotId());
@@ -607,8 +605,7 @@ public class TestRenameWithSnapshots {
INodeFile snode = fsdir.getINode(newfoo.toString()).asFile();
assertEquals(1, snode.getDiffs().asList().size());
- INodeDirectorySnapshottable sdir2Node =
- (INodeDirectorySnapshottable) fsdir.getINode(sdir2.toString());
+ INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
assertEquals(s2.getId(), snode.getDiffs().getLastSnapshotId());
@@ -763,8 +760,7 @@ public class TestRenameWithSnapshots {
assertEquals(2, fooWithCount.getReferenceCount());
INodeDirectory foo = fooWithCount.asDirectory();
assertEquals(1, foo.getDiffs().asList().size());
- INodeDirectorySnapshottable sdir1Node =
- (INodeDirectorySnapshottable) fsdir.getINode(sdir1.toString());
+ INodeDirectory sdir1Node = fsdir.getINode(sdir1.toString()).asDirectory();
Snapshot s1 = sdir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(), foo.getDirectoryWithSnapshotFeature()
.getLastSnapshotId());
@@ -973,12 +969,9 @@ public class TestRenameWithSnapshots {
hdfs.rename(bar_dir2, bar_dir1);
// check the internal details
- INodeDirectorySnapshottable sdir1Node =
- (INodeDirectorySnapshottable) fsdir.getINode(sdir1.toString());
- INodeDirectorySnapshottable sdir2Node =
- (INodeDirectorySnapshottable) fsdir.getINode(sdir2.toString());
- INodeDirectorySnapshottable sdir3Node =
- (INodeDirectorySnapshottable) fsdir.getINode(sdir3.toString());
+ INodeDirectory sdir1Node = fsdir.getINode(sdir1.toString()).asDirectory();
+ INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
+ INodeDirectory sdir3Node = fsdir.getINode(sdir3.toString()).asDirectory();
INodeReference fooRef = fsdir.getINode4Write(foo_dir1.toString())
.asReference();
@@ -1183,8 +1176,7 @@ public class TestRenameWithSnapshots {
assertTrue(hdfs.exists(bar_s2));
// check internal details
- INodeDirectorySnapshottable sdir2Node =
- (INodeDirectorySnapshottable) fsdir.getINode(sdir2.toString());
+ INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo");
INodeReference fooRef = fsdir.getINode(foo_s2.toString()).asReference();
@@ -1291,8 +1283,8 @@ public class TestRenameWithSnapshots {
assertFalse(result);
// check the current internal details
- INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir
- .getINode4Write(sdir1.toString());
+ INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
+ .asDirectory();
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
ReadOnlyList<INode> dir1Children = dir1Node
.getChildrenList(Snapshot.CURRENT_STATE_ID);
@@ -1361,8 +1353,8 @@ public class TestRenameWithSnapshots {
assertFalse(result);
// check the current internal details
- INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir
- .getINode4Write(sdir1.toString());
+ INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
+ .asDirectory();
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
ReadOnlyList<INode> dir1Children = dir1Node
.getChildrenList(Snapshot.CURRENT_STATE_ID);
@@ -1428,11 +1420,11 @@ public class TestRenameWithSnapshots {
assertFalse(result);
// check the current internal details
- INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir
- .getINode4Write(sdir1.toString());
+ INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
+ .asDirectory();
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
- INodeDirectorySnapshottable dir2Node = (INodeDirectorySnapshottable) fsdir
- .getINode4Write(sdir2.toString());
+ INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
+ .asDirectory();
Snapshot s2 = dir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
ReadOnlyList<INode> dir2Children = dir2Node
.getChildrenList(Snapshot.CURRENT_STATE_ID);
@@ -1459,8 +1451,7 @@ public class TestRenameWithSnapshots {
assertFalse(result);
// check internal details again
- dir2Node = (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir2
- .toString());
+ dir2Node = fsdir.getINode4Write(sdir2.toString()).asDirectory();
Snapshot s3 = dir2Node.getSnapshot(DFSUtil.string2Bytes("s3"));
fooNode = fsdir.getINode4Write(foo_dir2.toString());
dir2Children = dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
@@ -1600,8 +1591,8 @@ public class TestRenameWithSnapshots {
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
// check dir2
- INode dir2Node = fsdir.getINode4Write(dir2.toString());
- assertTrue(dir2Node.getClass() == INodeDirectorySnapshottable.class);
+ INodeDirectory dir2Node = fsdir.getINode4Write(dir2.toString()).asDirectory();
+ assertTrue(dir2Node.isSnapshottable());
Quota.Counts counts = dir2Node.computeQuotaUsage();
assertEquals(3, counts.get(Quota.NAMESPACE));
assertEquals(0, counts.get(Quota.DISKSPACE));
@@ -1611,8 +1602,7 @@ public class TestRenameWithSnapshots {
INode subdir2Node = childrenList.get(0);
assertSame(dir2Node, subdir2Node.getParent());
assertSame(subdir2Node, fsdir.getINode4Write(subdir2.toString()));
- diffList = ((INodeDirectorySnapshottable) dir2Node)
- .getDiffs().asList();
+ diffList = dir2Node.getDiffs().asList();
assertEquals(1, diffList.size());
diff = diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
@@ -1674,8 +1664,8 @@ public class TestRenameWithSnapshots {
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
// check dir2
- INode dir2Node = fsdir.getINode4Write(dir2.toString());
- assertTrue(dir2Node.getClass() == INodeDirectorySnapshottable.class);
+ INodeDirectory dir2Node = fsdir.getINode4Write(dir2.toString()).asDirectory();
+ assertTrue(dir2Node.isSnapshottable());
Quota.Counts counts = dir2Node.computeQuotaUsage();
assertEquals(4, counts.get(Quota.NAMESPACE));
assertEquals(0, counts.get(Quota.DISKSPACE));
@@ -1690,7 +1680,7 @@ public class TestRenameWithSnapshots {
assertTrue(subsubdir2Node.getClass() == INodeDirectory.class);
assertSame(subdir2Node, subsubdir2Node.getParent());
- diffList = ((INodeDirectorySnapshottable) dir2Node).getDiffs().asList();
+ diffList = ( dir2Node).getDiffs().asList();
assertEquals(1, diffList.size());
diff = diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
@@ -1724,8 +1714,8 @@ public class TestRenameWithSnapshots {
}
// check
- INodeDirectorySnapshottable rootNode = (INodeDirectorySnapshottable) fsdir
- .getINode4Write(root.toString());
+ INodeDirectory rootNode = fsdir.getINode4Write(root.toString())
+ .asDirectory();
INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
ReadOnlyList<INode> children = fooNode
.getChildrenList(Snapshot.CURRENT_STATE_ID);
@@ -1795,7 +1785,7 @@ public class TestRenameWithSnapshots {
// check dir2
INode dir2Node = fsdir.getINode4Write(dir2.toString());
- assertTrue(dir2Node.getClass() == INodeDirectorySnapshottable.class);
+ assertTrue(dir2Node.asDirectory().isSnapshottable());
Quota.Counts counts = dir2Node.computeQuotaUsage();
assertEquals(7, counts.get(Quota.NAMESPACE));
assertEquals(BLOCKSIZE * REPL * 2, counts.get(Quota.DISKSPACE));
@@ -1962,12 +1952,12 @@ public class TestRenameWithSnapshots {
hdfs.deleteSnapshot(sdir2, "s3");
// check
- final INodeDirectorySnapshottable dir1Node =
- (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir1.toString());
+ final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
+ .asDirectory();
Quota.Counts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(4, q1.get(Quota.NAMESPACE));
- final INodeDirectorySnapshottable dir2Node =
- (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir2.toString());
+ final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
+ .asDirectory();
Quota.Counts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(2, q2.get(Quota.NAMESPACE));
@@ -2031,13 +2021,13 @@ public class TestRenameWithSnapshots {
hdfs.deleteSnapshot(sdir2, "s3");
// check
- final INodeDirectorySnapshottable dir1Node =
- (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir1.toString());
+ final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
+ .asDirectory();
// sdir1 + s1 + foo_s1 (foo) + foo (foo + s1 + bar~bar3)
Quota.Counts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(9, q1.get(Quota.NAMESPACE));
- final INodeDirectorySnapshottable dir2Node =
- (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir2.toString());
+ final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
+ .asDirectory();
Quota.Counts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(2, q2.get(Quota.NAMESPACE));
@@ -2253,8 +2243,8 @@ public class TestRenameWithSnapshots {
List<DirectoryDiff> barDiffList = barNode.getDiffs().asList();
assertEquals(1, barDiffList.size());
DirectoryDiff diff = barDiffList.get(0);
- INodeDirectorySnapshottable testNode =
- (INodeDirectorySnapshottable) fsdir.getINode4Write(test.toString());
+ INodeDirectory testNode = fsdir.getINode4Write(test.toString())
+ .asDirectory();
Snapshot s0 = testNode.getSnapshot(DFSUtil.string2Bytes("s0"));
assertEquals(s0.getId(), diff.getSnapshotId());
// and file should be stored in the deleted list of this snapshot diff
@@ -2266,14 +2256,10 @@ public class TestRenameWithSnapshots {
INodeDirectory dir2Node = fsdir.getINode4Write(dir2.toString())
.asDirectory();
List<DirectoryDiff> dir2DiffList = dir2Node.getDiffs().asList();
- // dir2Node should contain 2 snapshot diffs, one for s2, and the other was
- // originally s1 (created when dir2 was transformed to a snapshottable dir),
- // and currently is s0
- assertEquals(2, dir2DiffList.size());
- dList = dir2DiffList.get(1).getChildrenDiff().getList(ListType.DELETED);
+ // dir2Node should contain 1 snapshot diffs for s2
+ assertEquals(1, dir2DiffList.size());
+ dList = dir2DiffList.get(0).getChildrenDiff().getList(ListType.DELETED);
assertEquals(1, dList.size());
- cList = dir2DiffList.get(0).getChildrenDiff().getList(ListType.CREATED);
- assertTrue(cList.isEmpty());
final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(dir2, "s2",
foo.getName());
INodeReference.WithName fooNode_s2 =
@@ -2374,4 +2360,46 @@ public class TestRenameWithSnapshots {
// save namespace and restart
restartClusterAndCheckImage(true);
}
+
+ @Test
+ public void testRenameWithOverWrite() throws Exception {
+ final Path root = new Path("/");
+ final Path foo = new Path(root, "foo");
+ final Path file1InFoo = new Path(foo, "file1");
+ final Path file2InFoo = new Path(foo, "file2");
+ final Path file3InFoo = new Path(foo, "file3");
+ DFSTestUtil.createFile(hdfs, file1InFoo, 1L, REPL, SEED);
+ DFSTestUtil.createFile(hdfs, file2InFoo, 1L, REPL, SEED);
+ DFSTestUtil.createFile(hdfs, file3InFoo, 1L, REPL, SEED);
+ final Path bar = new Path(root, "bar");
+ hdfs.mkdirs(bar);
+
+ SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
+ // move file1 from foo to bar
+ final Path fileInBar = new Path(bar, "file1");
+ hdfs.rename(file1InFoo, fileInBar);
+ // rename bar to newDir
+ final Path newDir = new Path(root, "newDir");
+ hdfs.rename(bar, newDir);
+ // move file2 from foo to newDir
+ final Path file2InNewDir = new Path(newDir, "file2");
+ hdfs.rename(file2InFoo, file2InNewDir);
+ // move file3 from foo to newDir and rename it to file1, this will overwrite
+ // the original file1
+ final Path file1InNewDir = new Path(newDir, "file1");
+ hdfs.rename(file3InFoo, file1InNewDir, Rename.OVERWRITE);
+ SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
+
+ SnapshotDiffReport report = hdfs.getSnapshotDiffReport(root, "s0", "s1");
+ LOG.info("DiffList is \n\"" + report.toString() + "\"");
+ List<DiffReportEntry> entries = report.getDiffList();
+ assertEquals(7, entries.size());
+ assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
+ assertTrue(existsInDiffReport(entries, DiffType.MODIFY, foo.getName(), null));
+ assertTrue(existsInDiffReport(entries, DiffType.MODIFY, bar.getName(), null));
+ assertTrue(existsInDiffReport(entries, DiffType.DELETE, "foo/file1", null));
+ assertTrue(existsInDiffReport(entries, DiffType.RENAME, "bar", "newDir"));
+ assertTrue(existsInDiffReport(entries, DiffType.RENAME, "foo/file2", "newDir/file2"));
+ assertTrue(existsInDiffReport(entries, DiffType.RENAME, "foo/file3", "newDir/file1"));
+ }
}
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSetQuotaWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSetQuotaWithSnapshot.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSetQuotaWithSnapshot.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSetQuotaWithSnapshot.java Sat Jul 12 02:24:40 2014
@@ -112,23 +112,20 @@ public class TestSetQuotaWithSnapshot {
hdfs.allowSnapshot(dir);
hdfs.setQuota(dir, HdfsConstants.QUOTA_DONT_SET,
HdfsConstants.QUOTA_DONT_SET);
- INode dirNode = fsdir.getINode4Write(dir.toString());
- assertTrue(dirNode instanceof INodeDirectorySnapshottable);
- assertEquals(0, ((INodeDirectorySnapshottable) dirNode).getDiffs().asList()
- .size());
+ INodeDirectory dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
+ assertTrue(dirNode.isSnapshottable());
+ assertEquals(0, dirNode.getDiffs().asList().size());
hdfs.setQuota(dir, HdfsConstants.QUOTA_DONT_SET - 1,
HdfsConstants.QUOTA_DONT_SET - 1);
- dirNode = fsdir.getINode4Write(dir.toString());
- assertTrue(dirNode instanceof INodeDirectorySnapshottable);
- assertEquals(0, ((INodeDirectorySnapshottable) dirNode).getDiffs().asList()
- .size());
+ dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
+ assertTrue(dirNode.isSnapshottable());
+ assertEquals(0, dirNode.getDiffs().asList().size());
hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
- dirNode = fsdir.getINode4Write(dir.toString());
- assertTrue(dirNode instanceof INodeDirectorySnapshottable);
- assertEquals(0, ((INodeDirectorySnapshottable) dirNode).getDiffs().asList()
- .size());
+ dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
+ assertTrue(dirNode.isSnapshottable());
+ assertEquals(0, dirNode.getDiffs().asList().size());
// allow snapshot on dir and create snapshot s1
SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
@@ -136,10 +133,9 @@ public class TestSetQuotaWithSnapshot {
// clear quota of dir
hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
// dir should still be a snapshottable directory
- dirNode = fsdir.getINode4Write(dir.toString());
- assertTrue(dirNode instanceof INodeDirectorySnapshottable);
- assertEquals(1, ((INodeDirectorySnapshottable) dirNode).getDiffs().asList()
- .size());
+ dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
+ assertTrue(dirNode.isSnapshottable());
+ assertEquals(1, dirNode.getDiffs().asList().size());
SnapshottableDirectoryStatus[] status = hdfs.getSnapshottableDirListing();
assertEquals(1, status.length);
assertEquals(dir, status[0].getFullPath());
@@ -154,8 +150,7 @@ public class TestSetQuotaWithSnapshot {
assertTrue(subNode.asDirectory().isWithSnapshot());
List<DirectoryDiff> diffList = subNode.asDirectory().getDiffs().asList();
assertEquals(1, diffList.size());
- Snapshot s2 = ((INodeDirectorySnapshottable) dirNode).getSnapshot(DFSUtil
- .string2Bytes("s2"));
+ Snapshot s2 = dirNode.getSnapshot(DFSUtil.string2Bytes("s2"));
assertEquals(s2.getId(), diffList.get(0).getSnapshotId());
List<INode> createdList = diffList.get(0).getChildrenDiff().getList(ListType.CREATED);
assertEquals(1, createdList.size());
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java Sat Jul 12 02:24:40 2014
@@ -430,30 +430,31 @@ public class TestSnapshot {
.asDirectory();
assertTrue(rootNode.isSnapshottable());
// root is snapshottable dir, but with 0 snapshot quota
- assertEquals(0, ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
+ assertEquals(0, rootNode.getDirectorySnapshottableFeature()
+ .getSnapshotQuota());
hdfs.allowSnapshot(root);
rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
assertTrue(rootNode.isSnapshottable());
- assertEquals(INodeDirectorySnapshottable.SNAPSHOT_LIMIT,
- ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
+ assertEquals(DirectorySnapshottableFeature.SNAPSHOT_LIMIT,
+ rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
// call allowSnapshot again
hdfs.allowSnapshot(root);
rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
assertTrue(rootNode.isSnapshottable());
- assertEquals(INodeDirectorySnapshottable.SNAPSHOT_LIMIT,
- ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
+ assertEquals(DirectorySnapshottableFeature.SNAPSHOT_LIMIT,
+ rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
// disallowSnapshot on dir
hdfs.disallowSnapshot(root);
rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
assertTrue(rootNode.isSnapshottable());
- assertEquals(0, ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
+ assertEquals(0, rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
// do it again
hdfs.disallowSnapshot(root);
rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
assertTrue(rootNode.isSnapshottable());
- assertEquals(0, ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
+ assertEquals(0, rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
}
/**
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java Sat Jul 12 02:24:40 2014
@@ -28,12 +28,14 @@ import static org.junit.Assert.fail;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
@@ -396,4 +398,39 @@ public class TestSnapshotBlocksMap {
assertEquals(1, blks.length);
assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
+
+ /**
+ * Make sure that a delete of a non-zero-length file which results in a
+ * zero-length file in a snapshot works.
+ */
+ @Test
+ public void testDeletionOfLaterBlocksWithZeroSizeFirstBlock() throws Exception {
+ final Path foo = new Path("/foo");
+ final Path bar = new Path(foo, "bar");
+ final byte[] testData = "foo bar baz".getBytes();
+
+ // Create a zero-length file.
+ DFSTestUtil.createFile(hdfs, bar, 0, REPLICATION, 0L);
+ assertEquals(0, fsdir.getINode4Write(bar.toString()).asFile().getBlocks().length);
+
+ // Create a snapshot that includes that file.
+ SnapshotTestHelper.createSnapshot(hdfs, foo, "s0");
+
+ // Extend that file.
+ FSDataOutputStream out = hdfs.append(bar);
+ out.write(testData);
+ out.close();
+ INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
+ BlockInfo[] blks = barNode.getBlocks();
+ assertEquals(1, blks.length);
+ assertEquals(testData.length, blks[0].getNumBytes());
+
+ // Delete the file.
+ hdfs.delete(bar, true);
+
+ // Now make sure that the NN can still save an fsimage successfully.
+ cluster.getNameNode().getRpcServer().setSafeMode(
+ SafeModeAction.SAFEMODE_ENTER, false);
+ cluster.getNameNode().getRpcServer().saveNamespace();
+ }
}
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java Sat Jul 12 02:24:40 2014
@@ -281,10 +281,10 @@ public class TestSnapshotDeletion {
checkQuotaUsageComputation(dir, 14L, BLOCKSIZE * REPLICATION * 4);
// get two snapshots for later use
- Snapshot snapshot0 = ((INodeDirectorySnapshottable) fsdir.getINode(dir
- .toString())).getSnapshot(DFSUtil.string2Bytes("s0"));
- Snapshot snapshot1 = ((INodeDirectorySnapshottable) fsdir.getINode(dir
- .toString())).getSnapshot(DFSUtil.string2Bytes("s1"));
+ Snapshot snapshot0 = fsdir.getINode(dir.toString()).asDirectory()
+ .getSnapshot(DFSUtil.string2Bytes("s0"));
+ Snapshot snapshot1 = fsdir.getINode(dir.toString()).asDirectory()
+ .getSnapshot(DFSUtil.string2Bytes("s1"));
// Case 2 + Case 3: delete noChangeDirParent, noChangeFile, and
// metaChangeFile2. Note that when we directly delete a directory, the
@@ -509,8 +509,7 @@ public class TestSnapshotDeletion {
}
// check 1. there is no snapshot s0
- final INodeDirectorySnapshottable dirNode =
- (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
+ final INodeDirectory dirNode = fsdir.getINode(dir.toString()).asDirectory();
Snapshot snapshot0 = dirNode.getSnapshot(DFSUtil.string2Bytes("s0"));
assertNull(snapshot0);
Snapshot snapshot1 = dirNode.getSnapshot(DFSUtil.string2Bytes("s1"));
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java Sat Jul 12 02:24:40 2014
@@ -25,6 +25,7 @@ import java.io.IOException;
import java.util.HashMap;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
@@ -143,7 +144,7 @@ public class TestSnapshotDiffReport {
hdfs.createSnapshot(snapshotDir, genSnapshotName(snapshotDir));
}
// modify file10
- hdfs.setReplication(file10, (short) (REPLICATION - 1));
+ hdfs.setReplication(file10, (short) (REPLICATION + 1));
}
/** check the correctness of the diff reports */
@@ -166,11 +167,11 @@ public class TestSnapshotDiffReport {
} else if (entry.getType() == DiffType.DELETE) {
assertTrue(report.getDiffList().contains(entry));
assertTrue(inverseReport.getDiffList().contains(
- new DiffReportEntry(DiffType.CREATE, entry.getRelativePath())));
+ new DiffReportEntry(DiffType.CREATE, entry.getSourcePath())));
} else if (entry.getType() == DiffType.CREATE) {
assertTrue(report.getDiffList().contains(entry));
assertTrue(inverseReport.getDiffList().contains(
- new DiffReportEntry(DiffType.DELETE, entry.getRelativePath())));
+ new DiffReportEntry(DiffType.DELETE, entry.getSourcePath())));
}
}
}
@@ -329,5 +330,166 @@ public class TestSnapshotDiffReport {
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("subsub1")));
}
-
+
+ /**
+ * Rename a directory to its prior descendant, and verify the diff report.
+ */
+ @Test
+ public void testDiffReportWithRename() throws Exception {
+ final Path root = new Path("/");
+ final Path sdir1 = new Path(root, "dir1");
+ final Path sdir2 = new Path(root, "dir2");
+ final Path foo = new Path(sdir1, "foo");
+ final Path bar = new Path(foo, "bar");
+ hdfs.mkdirs(bar);
+ hdfs.mkdirs(sdir2);
+
+ // create snapshot on root
+ SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
+
+ // /dir1/foo/bar -> /dir2/bar
+ final Path bar2 = new Path(sdir2, "bar");
+ hdfs.rename(bar, bar2);
+
+ // /dir1/foo -> /dir2/bar/foo
+ final Path foo2 = new Path(bar2, "foo");
+ hdfs.rename(foo, foo2);
+
+ SnapshotTestHelper.createSnapshot(hdfs, root, "s2");
+ // let's delete /dir2 to make things more complicated
+ hdfs.delete(sdir2, true);
+
+ verifyDiffReport(root, "s1", "s2",
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1")),
+ new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("dir1/foo"),
+ DFSUtil.string2Bytes("dir2/bar/foo")),
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir2")),
+ new DiffReportEntry(DiffType.MODIFY,
+ DFSUtil.string2Bytes("dir1/foo/bar")),
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1/foo")),
+ new DiffReportEntry(DiffType.RENAME, DFSUtil
+ .string2Bytes("dir1/foo/bar"), DFSUtil.string2Bytes("dir2/bar")));
+ }
+
+ /**
+ * Rename a file/dir outside of the snapshottable dir should be reported as
+ * deleted. Rename a file/dir from outside should be reported as created.
+ */
+ @Test
+ public void testDiffReportWithRenameOutside() throws Exception {
+ final Path root = new Path("/");
+ final Path dir1 = new Path(root, "dir1");
+ final Path dir2 = new Path(root, "dir2");
+ final Path foo = new Path(dir1, "foo");
+ final Path fileInFoo = new Path(foo, "file");
+ final Path bar = new Path(dir2, "bar");
+ final Path fileInBar = new Path(bar, "file");
+ DFSTestUtil.createFile(hdfs, fileInFoo, BLOCKSIZE, REPLICATION, seed);
+ DFSTestUtil.createFile(hdfs, fileInBar, BLOCKSIZE, REPLICATION, seed);
+
+ // create snapshot on /dir1
+ SnapshotTestHelper.createSnapshot(hdfs, dir1, "s0");
+
+ // move bar into dir1
+ final Path newBar = new Path(dir1, "newBar");
+ hdfs.rename(bar, newBar);
+ // move foo out of dir1 into dir2
+ final Path newFoo = new Path(dir2, "new");
+ hdfs.rename(foo, newFoo);
+
+ SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
+ verifyDiffReport(dir1, "s0", "s1",
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
+ new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes(newBar
+ .getName())),
+ new DiffReportEntry(DiffType.DELETE,
+ DFSUtil.string2Bytes(foo.getName())));
+ }
+
+ /**
+ * Renaming a file/dir then delete the ancestor dir of the rename target
+ * should be reported as deleted.
+ */
+ @Test
+ public void testDiffReportWithRenameAndDelete() throws Exception {
+ final Path root = new Path("/");
+ final Path dir1 = new Path(root, "dir1");
+ final Path dir2 = new Path(root, "dir2");
+ final Path foo = new Path(dir1, "foo");
+ final Path fileInFoo = new Path(foo, "file");
+ final Path bar = new Path(dir2, "bar");
+ final Path fileInBar = new Path(bar, "file");
+ DFSTestUtil.createFile(hdfs, fileInFoo, BLOCKSIZE, REPLICATION, seed);
+ DFSTestUtil.createFile(hdfs, fileInBar, BLOCKSIZE, REPLICATION, seed);
+
+ SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
+ hdfs.rename(fileInFoo, fileInBar, Rename.OVERWRITE);
+ SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
+ verifyDiffReport(root, "s0", "s1",
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1/foo")),
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir2/bar")),
+ new DiffReportEntry(DiffType.DELETE, DFSUtil
+ .string2Bytes("dir2/bar/file")),
+ new DiffReportEntry(DiffType.RENAME,
+ DFSUtil.string2Bytes("dir1/foo/file"),
+ DFSUtil.string2Bytes("dir2/bar/file")));
+
+ // delete bar
+ hdfs.delete(bar, true);
+ SnapshotTestHelper.createSnapshot(hdfs, root, "s2");
+ verifyDiffReport(root, "s0", "s2",
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1/foo")),
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir2")),
+ new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("dir2/bar")),
+ new DiffReportEntry(DiffType.DELETE,
+ DFSUtil.string2Bytes("dir1/foo/file")));
+ }
+
+ @Test
+ public void testDiffReportWithRenameToNewDir() throws Exception {
+ final Path root = new Path("/");
+ final Path foo = new Path(root, "foo");
+ final Path fileInFoo = new Path(foo, "file");
+ DFSTestUtil.createFile(hdfs, fileInFoo, BLOCKSIZE, REPLICATION, seed);
+
+ SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
+ final Path bar = new Path(root, "bar");
+ hdfs.mkdirs(bar);
+ final Path fileInBar = new Path(bar, "file");
+ hdfs.rename(fileInFoo, fileInBar);
+ SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
+
+ verifyDiffReport(root, "s0", "s1",
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("foo")),
+ new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("bar")),
+ new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("foo/file"),
+ DFSUtil.string2Bytes("bar/file")));
+ }
+
+ /**
+ * Rename a file and then append some data to it
+ */
+ @Test
+ public void testDiffReportWithRenameAndAppend() throws Exception {
+ final Path root = new Path("/");
+ final Path foo = new Path(root, "foo");
+ DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPLICATION, seed);
+
+ SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
+ final Path bar = new Path(root, "bar");
+ hdfs.rename(foo, bar);
+ DFSTestUtil.appendFile(hdfs, bar, 10); // append 10 bytes
+ SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
+
+ // we always put modification on the file before rename
+ verifyDiffReport(root, "s0", "s1",
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
+ new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("foo")),
+ new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("foo"),
+ DFSUtil.string2Bytes("bar")));
+ }
}
\ No newline at end of file
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java Sat Jul 12 02:24:40 2014
@@ -18,13 +18,19 @@
package org.apache.hadoop.hdfs.server.namenode.snapshot;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+
import java.util.ArrayList;
import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.INode;
-import org.junit.*;
-import static org.mockito.Mockito.*;
+import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
+import org.junit.Assert;
+import org.junit.Test;
/**
@@ -40,7 +46,7 @@ public class TestSnapshotManager {
public void testSnapshotLimits() throws Exception {
// Setup mock objects for SnapshotManager.createSnapshot.
//
- INodeDirectorySnapshottable ids = mock(INodeDirectorySnapshottable.class);
+ INodeDirectory ids = mock(INodeDirectory.class);
FSDirectory fsdir = mock(FSDirectory.class);
SnapshotManager sm = spy(new SnapshotManager(fsdir));
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java Sat Jul 12 02:24:40 2014
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.protocol.H
import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import org.apache.hadoop.ipc.RemoteException;
@@ -88,12 +89,13 @@ public class TestSnapshotRename {
public ExpectedException exception = ExpectedException.none();
/**
- * Check the correctness of snapshot list within
- * {@link INodeDirectorySnapshottable}
+ * Check the correctness of snapshot list within snapshottable dir
*/
- private void checkSnapshotList(INodeDirectorySnapshottable srcRoot,
+ private void checkSnapshotList(INodeDirectory srcRoot,
String[] sortedNames, String[] names) {
- ReadOnlyList<Snapshot> listByName = srcRoot.getSnapshotsByNames();
+ assertTrue(srcRoot.isSnapshottable());
+ ReadOnlyList<Snapshot> listByName = srcRoot
+ .getDirectorySnapshottableFeature().getSnapshotList();
assertEquals(sortedNames.length, listByName.size());
for (int i = 0; i < listByName.size(); i++) {
assertEquals(sortedNames[i], listByName.get(i).getRoot().getLocalName());
@@ -101,7 +103,8 @@ public class TestSnapshotRename {
List<DirectoryDiff> listByTime = srcRoot.getDiffs().asList();
assertEquals(names.length, listByTime.size());
for (int i = 0; i < listByTime.size(); i++) {
- Snapshot s = srcRoot.getSnapshotById(listByTime.get(i).getSnapshotId());
+ Snapshot s = srcRoot.getDirectorySnapshottableFeature().getSnapshotById(
+ listByTime.get(i).getSnapshotId());
assertEquals(names[i], s.getRoot().getLocalName());
}
}
@@ -121,8 +124,7 @@ public class TestSnapshotRename {
// Rename s3 to s22
hdfs.renameSnapshot(sub1, "s3", "s22");
// Check the snapshots list
- INodeDirectorySnapshottable srcRoot = INodeDirectorySnapshottable.valueOf(
- fsdir.getINode(sub1.toString()), sub1.toString());
+ INodeDirectory srcRoot = fsdir.getINode(sub1.toString()).asDirectory();
checkSnapshotList(srcRoot, new String[] { "s1", "s2", "s22" },
new String[] { "s1", "s2", "s22" });
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java Sat Jul 12 02:24:40 2014
@@ -26,6 +26,7 @@ import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.FsPermission;
@@ -38,6 +39,7 @@ import org.apache.hadoop.hdfs.protocol.S
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.ToolRunner;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
@@ -55,8 +57,9 @@ public class TestXAttrWithSnapshot {
private static Configuration conf;
private static DistributedFileSystem hdfs;
private static int pathCount = 0;
- private static Path path, snapshotPath;
- private static String snapshotName;
+ private static Path path, snapshotPath, snapshotPath2, snapshotPath3;
+ private static String snapshotName, snapshotName2, snapshotName3;
+ private final int SUCCESS = 0;
// XAttrs
private static final String name1 = "user.a1";
private static final byte[] value1 = { 0x31, 0x32, 0x33 };
@@ -87,7 +90,11 @@ public class TestXAttrWithSnapshot {
++pathCount;
path = new Path("/p" + pathCount);
snapshotName = "snapshot" + pathCount;
+ snapshotName2 = snapshotName + "-2";
+ snapshotName3 = snapshotName + "-3";
snapshotPath = new Path(path, new Path(".snapshot", snapshotName));
+ snapshotPath2 = new Path(path, new Path(".snapshot", snapshotName2));
+ snapshotPath3 = new Path(path, new Path(".snapshot", snapshotName3));
}
/**
@@ -249,15 +256,71 @@ public class TestXAttrWithSnapshot {
private static void doSnapshotRootRemovalAssertions(Path path,
Path snapshotPath) throws Exception {
Map<String, byte[]> xattrs = hdfs.getXAttrs(path);
- Assert.assertEquals(xattrs.size(), 0);
+ Assert.assertEquals(0, xattrs.size());
xattrs = hdfs.getXAttrs(snapshotPath);
- Assert.assertEquals(xattrs.size(), 2);
+ Assert.assertEquals(2, xattrs.size());
Assert.assertArrayEquals(value1, xattrs.get(name1));
Assert.assertArrayEquals(value2, xattrs.get(name2));
}
/**
+ * Test successive snapshots in between modifications of XAttrs.
+ * Also verify that snapshot XAttrs are not altered when a
+ * snapshot is deleted.
+ */
+ @Test
+ public void testSuccessiveSnapshotXAttrChanges() throws Exception {
+ // First snapshot
+ FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
+ hdfs.setXAttr(path, name1, value1);
+ SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
+ Map<String, byte[]> xattrs = hdfs.getXAttrs(snapshotPath);
+ Assert.assertEquals(1, xattrs.size());
+ Assert.assertArrayEquals(value1, xattrs.get(name1));
+
+ // Second snapshot
+ hdfs.setXAttr(path, name1, newValue1);
+ hdfs.setXAttr(path, name2, value2);
+ SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName2);
+ xattrs = hdfs.getXAttrs(snapshotPath2);
+ Assert.assertEquals(2, xattrs.size());
+ Assert.assertArrayEquals(newValue1, xattrs.get(name1));
+ Assert.assertArrayEquals(value2, xattrs.get(name2));
+
+ // Third snapshot
+ hdfs.setXAttr(path, name1, value1);
+ hdfs.removeXAttr(path, name2);
+ SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName3);
+ xattrs = hdfs.getXAttrs(snapshotPath3);
+ Assert.assertEquals(1, xattrs.size());
+ Assert.assertArrayEquals(value1, xattrs.get(name1));
+
+ // Check that the first and second snapshots'
+ // XAttrs have stayed constant
+ xattrs = hdfs.getXAttrs(snapshotPath);
+ Assert.assertEquals(1, xattrs.size());
+ Assert.assertArrayEquals(value1, xattrs.get(name1));
+ xattrs = hdfs.getXAttrs(snapshotPath2);
+ Assert.assertEquals(2, xattrs.size());
+ Assert.assertArrayEquals(newValue1, xattrs.get(name1));
+ Assert.assertArrayEquals(value2, xattrs.get(name2));
+
+ // Remove the second snapshot and verify the first and
+ // third snapshots' XAttrs have stayed constant
+ hdfs.deleteSnapshot(path, snapshotName2);
+ xattrs = hdfs.getXAttrs(snapshotPath);
+ Assert.assertEquals(1, xattrs.size());
+ Assert.assertArrayEquals(value1, xattrs.get(name1));
+ xattrs = hdfs.getXAttrs(snapshotPath3);
+ Assert.assertEquals(1, xattrs.size());
+ Assert.assertArrayEquals(value1, xattrs.get(name1));
+
+ hdfs.deleteSnapshot(path, snapshotName);
+ hdfs.deleteSnapshot(path, snapshotName3);
+ }
+
+ /**
* Assert exception of setting xattr on read-only snapshot.
*/
@Test
@@ -269,6 +332,18 @@ public class TestXAttrWithSnapshot {
}
/**
+ * Assert exception of removing xattr on read-only snapshot.
+ */
+ @Test
+ public void testRemoveXAttrSnapshotPath() throws Exception {
+ FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
+ hdfs.setXAttr(path, name1, value1);
+ SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
+ exception.expect(SnapshotAccessControlException.class);
+ hdfs.removeXAttr(snapshotPath, name1);
+ }
+
+ /**
* Assert exception of setting xattr when exceeding quota.
*/
@Test
@@ -340,6 +415,26 @@ public class TestXAttrWithSnapshot {
}
/**
+ * Test that users can copy a snapshot while preserving its xattrs.
+ */
+ @Test (timeout = 120000)
+ public void testCopySnapshotShouldPreserveXAttrs() throws Exception {
+ FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
+ hdfs.setXAttr(path, name1, value1);
+ hdfs.setXAttr(path, name2, value2);
+ SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
+ Path snapshotCopy = new Path(path.toString() + "-copy");
+ String[] argv = new String[] { "-cp", "-px", snapshotPath.toUri().toString(),
+ snapshotCopy.toUri().toString() };
+ int ret = ToolRunner.run(new FsShell(conf), argv);
+ assertEquals("cp -px is not working on a snapshot", SUCCESS, ret);
+
+ Map<String, byte[]> xattrs = hdfs.getXAttrs(snapshotCopy);
+ assertArrayEquals(value1, xattrs.get(name1));
+ assertArrayEquals(value2, xattrs.get(name2));
+ }
+
+ /**
* Initialize the cluster, wait for it to become active, and get FileSystem
* instances for our test users.
*
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java Sat Jul 12 02:24:40 2014
@@ -197,11 +197,12 @@ public class TestShortCircuitCache {
@Test(timeout=60000)
public void testExpiry() throws Exception {
final ShortCircuitCache cache =
- new ShortCircuitCache(2, 1, 1, 10000000, 1, 10000, 0);
+ new ShortCircuitCache(2, 1, 1, 10000000, 1, 10000000, 0);
final TestFileDescriptorPair pair = new TestFileDescriptorPair();
ShortCircuitReplicaInfo replicaInfo1 =
cache.fetchOrCreate(
- new ExtendedBlockId(123, "test_bp1"), new SimpleReplicaCreator(123, cache, pair));
+ new ExtendedBlockId(123, "test_bp1"),
+ new SimpleReplicaCreator(123, cache, pair));
Preconditions.checkNotNull(replicaInfo1.getReplica());
Preconditions.checkState(replicaInfo1.getInvalidTokenException() == null);
pair.compareWith(replicaInfo1.getReplica().getDataStream(),