You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by sh...@apache.org on 2019/12/24 20:52:18 UTC
[hadoop] branch trunk updated: HDFS-15076. Fix tests that hold
FSDirectory lock,
without holding FSNamesystem lock. Contributed by Konstantin V Shvachko.
This is an automated email from the ASF dual-hosted git repository.
shv pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/trunk by this push:
new b98ac2a HDFS-15076. Fix tests that hold FSDirectory lock, without holding FSNamesystem lock. Contributed by Konstantin V Shvachko.
b98ac2a is described below
commit b98ac2a3af50ccf2af07790ab0760d4c51820836
Author: Konstantin V Shvachko <sh...@apache.org>
AuthorDate: Tue Dec 24 12:08:34 2019 -0800
HDFS-15076. Fix tests that hold FSDirectory lock, without holding FSNamesystem lock. Contributed by Konstantin V Shvachko.
---
.../server/namenode/TestDiskspaceQuotaUpdate.java | 16 +++++--
.../hdfs/server/namenode/TestFSNamesystem.java | 15 +++++--
.../server/namenode/TestGetBlockLocations.java | 51 +++++++++++++++-------
3 files changed, 61 insertions(+), 21 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
index 815f4ac..771caef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
@@ -380,16 +380,26 @@ public class TestDiskspaceQuotaUpdate {
HashMap<String, Long> dsMap = new HashMap<String, Long>();
scanDirsWithQuota(root, nsMap, dsMap, false);
- getFSDirectory().updateCountForQuota(1);
+ updateCountForQuota(1);
scanDirsWithQuota(root, nsMap, dsMap, true);
- getFSDirectory().updateCountForQuota(2);
+ updateCountForQuota(2);
scanDirsWithQuota(root, nsMap, dsMap, true);
- getFSDirectory().updateCountForQuota(4);
+ updateCountForQuota(4);
scanDirsWithQuota(root, nsMap, dsMap, true);
}
+ private void updateCountForQuota(int i) {
+ FSNamesystem fsn = cluster.getNamesystem();
+ fsn.writeLock();
+ try {
+ getFSDirectory().updateCountForQuota(1);
+ } finally {
+ fsn.writeUnlock();
+ }
+ }
+
private void scanDirsWithQuota(INodeDirectory dir,
HashMap<String, Long> nsMap,
HashMap<String, Long> dsMap, boolean verify) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
index 33067f7..cb18bcf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
@@ -90,7 +90,7 @@ public class TestFSNamesystem {
LeaseManager leaseMan = fsn.getLeaseManager();
leaseMan.addLease("client1", fsn.getFSDirectory().allocateNewInodeId());
assertEquals(1, leaseMan.countLease());
- fsn.clear();
+ clearNamesystem(fsn);
leaseMan = fsn.getLeaseManager();
assertEquals(0, leaseMan.countLease());
}
@@ -185,8 +185,7 @@ public class TestFSNamesystem {
FSNamesystem fsn = new FSNamesystem(conf, fsImage);
fsn.imageLoadComplete();
assertTrue(fsn.isImageLoaded());
- fsn.clear();
- assertFalse(fsn.isImageLoaded());
+ clearNamesystem(fsn);
final INodeDirectory root = (INodeDirectory) fsn.getFSDirectory()
.getINode("/");
assertTrue(root.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
@@ -194,6 +193,16 @@ public class TestFSNamesystem {
assertTrue(fsn.isImageLoaded());
}
+ private void clearNamesystem(FSNamesystem fsn) {
+ fsn.writeLock();
+ try {
+ fsn.clear();
+ assertFalse(fsn.isImageLoaded());
+ } finally {
+ fsn.writeUnlock();
+ }
+ }
+
@Test
public void testGetEffectiveLayoutVersion() {
assertEquals(-63,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetBlockLocations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetBlockLocations.java
index 214c9a9..1975549 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetBlockLocations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetBlockLocations.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
+import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
@@ -64,19 +65,28 @@ public class TestGetBlockLocations {
FSNamesystem fsn = spy(setupFileSystem());
final FSDirectory fsd = fsn.getFSDirectory();
FSEditLog editlog = fsn.getEditLog();
+ final boolean[] deleted = new boolean[]{false};
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
- INodesInPath iip = fsd.getINodesInPath(FILE_PATH, DirOp.READ);
- FSDirDeleteOp.delete(fsd, iip, new INode.BlocksMapUpdateInfo(),
- new ArrayList<INode>(), new ArrayList<Long>(),
- now());
+ if(!deleted[0]) {
+ fsn.writeLock();
+ try {
+ INodesInPath iip = fsd.getINodesInPath(FILE_PATH, DirOp.READ);
+ FSDirDeleteOp.delete(fsd, iip, new INode.BlocksMapUpdateInfo(),
+ new ArrayList<INode>(), new ArrayList<Long>(),
+ now());
+ } finally {
+ fsn.writeUnlock();
+ }
+ deleted[0] = true;
+ }
invocation.callRealMethod();
return null;
}
- }).when(fsn).writeLock();
+ }).when(fsn).checkOperation(OperationCategory.WRITE);
fsn.getBlockLocations("dummy", RESERVED_PATH, 0, 1024);
verify(editlog, never()).logTimes(anyString(), anyLong(), anyLong());
@@ -89,22 +99,27 @@ public class TestGetBlockLocations {
final FSDirectory fsd = fsn.getFSDirectory();
FSEditLog editlog = fsn.getEditLog();
final String DST_PATH = "/bar";
- final boolean[] renamed = new boolean[1];
+ final boolean[] renamed = new boolean[] {false};
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
- invocation.callRealMethod();
if (!renamed[0]) {
- FSDirRenameOp.renameTo(fsd, fsd.getPermissionChecker(), FILE_PATH,
- DST_PATH, new INode.BlocksMapUpdateInfo(),
- false);
- renamed[0] = true;
+ fsn.writeLock();
+ try {
+ FSDirRenameOp.renameTo(fsd, fsd.getPermissionChecker(), FILE_PATH,
+ DST_PATH, new INode.BlocksMapUpdateInfo(),
+ false);
+ renamed[0] = true;
+ } finally {
+ fsn.writeUnlock();
+ }
}
+ invocation.callRealMethod();
return null;
}
- }).when(fsn).writeLock();
+ }).when(fsn).checkOperation(OperationCategory.WRITE);
fsn.getBlockLocations("dummy", RESERVED_PATH, 0, 1024);
verify(editlog).logTimes(eq(DST_PATH), anyLong(), anyLong());
@@ -119,8 +134,6 @@ public class TestGetBlockLocations {
when(image.getEditLog()).thenReturn(editlog);
final FSNamesystem fsn = new FSNamesystem(conf, image, true);
- final FSDirectory fsd = fsn.getFSDirectory();
- INodesInPath iip = fsd.getINodesInPath("/", DirOp.READ);
PermissionStatus perm = new PermissionStatus(
"hdfs", "supergroup",
FsPermission.createImmutable((short) 0x1ff));
@@ -128,7 +141,15 @@ public class TestGetBlockLocations {
MOCK_INODE_ID, FILE_NAME.getBytes(StandardCharsets.UTF_8),
perm, 1, 1, new BlockInfo[] {}, (short) 1,
DFS_BLOCK_SIZE_DEFAULT);
- fsn.getFSDirectory().addINode(iip, file, null);
+
+ fsn.writeLock();
+ try {
+ final FSDirectory fsd = fsn.getFSDirectory();
+ INodesInPath iip = fsd.getINodesInPath("/", DirOp.READ);
+ fsd.addINode(iip, file, null);
+ } finally {
+ fsn.writeUnlock();
+ }
return fsn;
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org