You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by in...@apache.org on 2019/12/17 02:42:05 UTC
[hadoop] branch trunk updated: HDFS-14908. LeaseManager should
check parent-child relationship when filter open files. Contributed by
Jinglun.
This is an automated email from the ASF dual-hosted git repository.
inigoiri pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/trunk by this push:
new 2408066 HDFS-14908. LeaseManager should check parent-child relationship when filter open files. Contributed by Jinglun.
2408066 is described below
commit 24080666e5e2214d4a362c889cd9aa617be5de81
Author: Inigo Goiri <in...@apache.org>
AuthorDate: Mon Dec 16 18:41:45 2019 -0800
HDFS-14908. LeaseManager should check parent-child relationship when filter open files. Contributed by Jinglun.
---
.../hadoop/hdfs/server/namenode/FSNamesystem.java | 7 ++--
.../hadoop/hdfs/server/namenode/LeaseManager.java | 4 +-
.../java/org/apache/hadoop/hdfs/DFSTestUtil.java | 21 +++++++++-
.../hdfs/server/namenode/TestListOpenFiles.java | 48 ++++++++++++++++++++--
4 files changed, 71 insertions(+), 9 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 8d1884e..b626271 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1826,16 +1826,17 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
checkSuperuserPrivilege();
checkOperation(OperationCategory.READ);
BatchedListEntries<OpenFileEntry> batchedListEntries;
+ String normalizedPath = new Path(path).toString(); // normalize path.
try {
readLock();
try {
checkOperation(OperationCategory.READ);
if (openFilesTypes.contains(OpenFilesType.ALL_OPEN_FILES)) {
batchedListEntries = leaseManager.getUnderConstructionFiles(prevId,
- path);
+ normalizedPath);
} else {
if (openFilesTypes.contains(OpenFilesType.BLOCKING_DECOMMISSION)) {
- batchedListEntries = getFilesBlockingDecom(prevId, path);
+ batchedListEntries = getFilesBlockingDecom(prevId, normalizedPath);
} else {
throw new IllegalArgumentException("Unknown OpenFileType: "
+ openFilesTypes);
@@ -1874,7 +1875,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
String fullPathName = inodeFile.getFullPathName();
if (org.apache.commons.lang3.StringUtils.isEmpty(path)
- || fullPathName.startsWith(path)) {
+ || DFSUtil.isParentEntry(fullPathName, path)) {
openFileEntries.add(new OpenFileEntry(inodeFile.getId(),
inodeFile.getFullPathName(),
inodeFile.getFileUnderConstructionFeature().getClientName(),
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
index 9961440..2efadfc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
@@ -42,6 +42,7 @@ import com.google.common.collect.Lists;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
+import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
@@ -315,7 +316,8 @@ public class LeaseManager {
}
fullPathName = inodeFile.getFullPathName();
- if (StringUtils.isEmpty(path) || fullPathName.startsWith(path)) {
+ if (StringUtils.isEmpty(path) ||
+ DFSUtil.isParentEntry(fullPathName, path)) {
openFileEntries.add(new OpenFileEntry(inodeFile.getId(), fullPathName,
inodeFile.getFileUnderConstructionFeature().getClientName(),
inodeFile.getFileUnderConstructionFeature().getClientMachine()));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 36f2eb2..9646676 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -2392,13 +2392,32 @@ public class DFSTestUtil {
}
}
+ /**
+ * Create open files under root path.
+ * @param fs the filesystem.
+ * @param filePrefix the prefix of the files.
+ * @param numFilesToCreate the number of files to create.
+ */
public static Map<Path, FSDataOutputStream> createOpenFiles(FileSystem fs,
String filePrefix, int numFilesToCreate) throws IOException {
+ return createOpenFiles(fs, new Path("/"), filePrefix, numFilesToCreate);
+ }
+
+ /**
+ * Create open files.
+ * @param fs the filesystem.
+ * @param baseDir the base path of the files.
+ * @param filePrefix the prefix of the files.
+ * @param numFilesToCreate the number of files to create.
+ */
+ public static Map<Path, FSDataOutputStream> createOpenFiles(FileSystem fs,
+ Path baseDir, String filePrefix, int numFilesToCreate)
+ throws IOException {
final Map<Path, FSDataOutputStream> filesCreated = new HashMap<>();
final byte[] buffer = new byte[(int) (1024 * 1.75)];
final Random rand = new Random(0xFEED0BACL);
for (int i = 0; i < numFilesToCreate; i++) {
- Path file = new Path("/" + filePrefix + "-" + i);
+ Path file = new Path(baseDir, filePrefix + "-" + i);
FSDataOutputStream stm = fs.create(file, true, 1024, (short) 1, 1024);
rand.nextBytes(buffer);
stm.write(buffer);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java
index 337e372..2158bc7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java
@@ -157,13 +157,22 @@ public class TestListOpenFiles {
remainingFiles.size() == 0);
}
+ /**
+ * Verify all open files.
+ */
private void verifyOpenFiles(Map<Path, FSDataOutputStream> openFiles)
throws IOException {
- verifyOpenFiles(openFiles, EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
- OpenFilesIterator.FILTER_PATH_DEFAULT);
+ verifyOpenFiles(openFiles, OpenFilesIterator.FILTER_PATH_DEFAULT);
+ }
+
+ /**
+ * Verify open files with specified filter path.
+ */
+ private void verifyOpenFiles(Map<Path, FSDataOutputStream> openFiles,
+ String path) throws IOException {
+ verifyOpenFiles(openFiles, EnumSet.of(OpenFilesType.ALL_OPEN_FILES), path);
verifyOpenFiles(new HashMap<>(),
- EnumSet.of(OpenFilesType.BLOCKING_DECOMMISSION),
- OpenFilesIterator.FILTER_PATH_DEFAULT);
+ EnumSet.of(OpenFilesType.BLOCKING_DECOMMISSION), path);
}
private Set<Path> createFiles(FileSystem fileSystem, String fileNamePrefix,
@@ -255,4 +264,35 @@ public class TestListOpenFiles {
}
}
}
+
+ @Test(timeout = 120000)
+ public void testListOpenFilesWithFilterPath() throws IOException {
+ HashMap<Path, FSDataOutputStream> openFiles = new HashMap<>();
+ createFiles(fs, "closed", 10);
+ verifyOpenFiles(openFiles, OpenFilesIterator.FILTER_PATH_DEFAULT);
+
+ BatchedEntries<OpenFileEntry> openFileEntryBatchedEntries = nnRpc
+ .listOpenFiles(0, EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
+ OpenFilesIterator.FILTER_PATH_DEFAULT);
+ assertTrue("Open files list should be empty!",
+ openFileEntryBatchedEntries.size() == 0);
+ BatchedEntries<OpenFileEntry> openFilesBlockingDecomEntries = nnRpc
+ .listOpenFiles(0, EnumSet.of(OpenFilesType.BLOCKING_DECOMMISSION),
+ OpenFilesIterator.FILTER_PATH_DEFAULT);
+ assertTrue("Open files list blocking decommission should be empty!",
+ openFilesBlockingDecomEntries.size() == 0);
+
+ openFiles.putAll(
+ DFSTestUtil.createOpenFiles(fs, new Path("/base"), "open-1", 1));
+ Map<Path, FSDataOutputStream> baseOpen =
+ DFSTestUtil.createOpenFiles(fs, new Path("/base-open"), "open-1", 1);
+ verifyOpenFiles(openFiles, "/base");
+ verifyOpenFiles(openFiles, "/base/");
+
+ openFiles.putAll(baseOpen);
+ while (openFiles.size() > 0) {
+ DFSTestUtil.closeOpenFiles(openFiles, 1);
+ verifyOpenFiles(openFiles, OpenFilesIterator.FILTER_PATH_DEFAULT);
+ }
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org