You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ay...@apache.org on 2020/05/11 11:26:12 UTC

[hadoop] branch branch-3.3 updated: HDFS-15338. listOpenFiles() should throw InvalidPathException in case of invalid paths. Contributed by Jinglun.

This is an automated email from the ASF dual-hosted git repository.

ayushsaxena pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
     new 61a4cd5  HDFS-15338. listOpenFiles() should throw InvalidPathException in case of invalid paths. Contributed by Jinglun.
61a4cd5 is described below

commit 61a4cd55399ec0b30ecd76eb5cf718f42dc8908b
Author: Ayush Saxena <ay...@apache.org>
AuthorDate: Mon May 11 16:48:34 2020 +0530

    HDFS-15338. listOpenFiles() should throw InvalidPathException in case of invalid paths. Contributed by Jinglun.
---
 .../apache/hadoop/hdfs/DistributedFileSystem.java  |  3 ++-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  |  1 +
 .../apache/hadoop/hdfs/server/namenode/INode.java  |  2 +-
 .../hdfs/server/namenode/TestListOpenFiles.java    | 26 ++++++++++++++++++++++
 4 files changed, 30 insertions(+), 2 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index d1babe3..84a59cb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -3549,7 +3549,8 @@ public class DistributedFileSystem extends FileSystem
 
   public RemoteIterator<OpenFileEntry> listOpenFiles(
       EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException {
-    return dfs.listOpenFiles(openFilesTypes, path);
+    Path absF = fixRelativePart(new Path(path));
+    return dfs.listOpenFiles(openFilesTypes, getPathName(absF));
   }
 
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 99ad6f2..d2725f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1904,6 +1904,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    */
   BatchedListEntries<OpenFileEntry> listOpenFiles(long prevId,
       EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException {
+    INode.checkAbsolutePath(path);
     final String operationName = "listOpenFiles";
     checkSuperuserPrivilege();
     checkOperation(OperationCategory.READ);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index 6b29b33..6a52440 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -806,7 +806,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
     return path != null && path.startsWith(Path.SEPARATOR);
   }
 
-  private static void checkAbsolutePath(final String path) {
+  static void checkAbsolutePath(final String path) {
     if (!isValidAbsolutePath(path)) {
       throw new AssertionError("Absolute path required, but got '"
           + path + "'");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java
index 2158bc7..c6603cf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -295,4 +296,29 @@ public class TestListOpenFiles {
       verifyOpenFiles(openFiles, OpenFilesIterator.FILTER_PATH_DEFAULT);
     }
   }
+
+  @Test
+  public void testListOpenFilesWithInvalidPathServerSide() throws Exception {
+    HashMap<Path, FSDataOutputStream> openFiles = new HashMap<>();
+    openFiles.putAll(
+        DFSTestUtil.createOpenFiles(fs, new Path("/base"), "open-1", 1));
+    verifyOpenFiles(openFiles, EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
+        "/base");
+    intercept(AssertionError.class, "Absolute path required",
+        "Expect InvalidPathException", () -> verifyOpenFiles(new HashMap<>(),
+            EnumSet.of(OpenFilesType.ALL_OPEN_FILES), "hdfs://cluster/base"));
+    while(openFiles.size() > 0) {
+      DFSTestUtil.closeOpenFiles(openFiles, 1);
+      verifyOpenFiles(openFiles);
+    }
+  }
+
+  @Test
+  public void testListOpenFilesWithInvalidPathClientSide() throws Exception {
+    intercept(IllegalArgumentException.class, "Wrong FS",
+        "Expect IllegalArgumentException", () -> fs
+            .listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
+                "hdfs://non-cluster/"));
+    fs.listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES), "/path");
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org