You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ki...@apache.org on 2020/10/14 23:19:38 UTC

[hadoop] branch branch-3.2 updated: HDFS-15628. HttpFS server throws NPE if a file is a symlink. Contributed by Ahmed Hussein.

This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
     new 392b26c  HDFS-15628. HttpFS server throws NPE if a file is a symlink. Contributed by Ahmed Hussein.
392b26c is described below

commit 392b26c23ba38cc6fe01323748d1b10c5cc3f50d
Author: Kihwal Lee <ki...@apache.org>
AuthorDate: Wed Oct 14 18:19:16 2020 -0500

    HDFS-15628. HttpFS server throws NPE if a file is a symlink. Contributed by Ahmed Hussein.
    
    (cherry picked from commit e45407128d4e9a9804c777c8f845ad41e1280177)
---
 .../hadoop/fs/http/client/HttpFSFileSystem.java    |  9 ++++--
 .../apache/hadoop/fs/http/server/FSOperations.java | 14 ++++++--
 .../hadoop/fs/http/client/BaseTestHttpFSWith.java  | 37 ++++++++++++++++++++++
 3 files changed, 56 insertions(+), 4 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index 66c821c..d1fdaf1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -188,6 +188,7 @@ public class HttpFSFileSystem extends FileSystem
   public static final String XATTR_NAME_JSON = "name";
   public static final String XATTR_VALUE_JSON = "value";
   public static final String XATTRNAMES_JSON = "XAttrNames";
+  public static final String SYMLINK_JSON = "symlink";
 
   public static final String FILE_CHECKSUM_JSON = "FileChecksum";
   public static final String CHECKSUM_ALGORITHM_JSON = "algorithm";
@@ -1083,6 +1084,9 @@ public class HttpFSFileSystem extends FileSystem
     String pathSuffix = (String) json.get(PATH_SUFFIX_JSON);
     Path path = (pathSuffix.equals("")) ? parent : new Path(parent, pathSuffix);
     FILE_TYPE type = FILE_TYPE.valueOf((String) json.get(TYPE_JSON));
+    String symLinkValue =
+        type == FILE_TYPE.SYMLINK ? (String) json.get(SYMLINK_JSON) : null;
+    Path symLink = symLinkValue == null ? null : new Path(symLinkValue);
     long len = (Long) json.get(LENGTH_JSON);
     String owner = (String) json.get(OWNER_JSON);
     String group = (String) json.get(GROUP_JSON);
@@ -1107,11 +1111,12 @@ public class HttpFSFileSystem extends FileSystem
           new FsPermissionExtension(permission, aBit, eBit, ecBit);
       FileStatus fileStatus = new FileStatus(len, FILE_TYPE.DIRECTORY == type,
           replication, blockSize, mTime, aTime, deprecatedPerm, owner, group,
-          null, path, FileStatus.attributes(aBit, eBit, ecBit, seBit));
+          symLink, path, FileStatus.attributes(aBit, eBit, ecBit, seBit));
       return fileStatus;
     } else {
       return new FileStatus(len, FILE_TYPE.DIRECTORY == type,
-          replication, blockSize, mTime, aTime, permission, owner, group, path);
+          replication, blockSize, mTime, aTime, permission, owner, group,
+          symLink, path);
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index 043f3e1..896a18e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.XAttrCodec;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
+import org.apache.hadoop.fs.http.client.HttpFSFileSystem.FILE_TYPE;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -107,8 +108,17 @@ public class FSOperations {
     Map<String, Object> json = new LinkedHashMap<String, Object>();
     json.put(HttpFSFileSystem.PATH_SUFFIX_JSON,
         (emptyPathSuffix) ? "" : fileStatus.getPath().getName());
-    json.put(HttpFSFileSystem.TYPE_JSON,
-        HttpFSFileSystem.FILE_TYPE.getType(fileStatus).toString());
+    FILE_TYPE fileType = HttpFSFileSystem.FILE_TYPE.getType(fileStatus);
+    json.put(HttpFSFileSystem.TYPE_JSON, fileType.toString());
+    if (fileType.equals(FILE_TYPE.SYMLINK)) {
+      // put the symlink into Json
+      try {
+        json.put(HttpFSFileSystem.SYMLINK_JSON,
+            fileStatus.getSymlink().getName());
+      } catch (IOException e) {
+        // Can't happen.
+      }
+    }
     json.put(HttpFSFileSystem.LENGTH_JSON, fileStatus.getLen());
     json.put(HttpFSFileSystem.OWNER_JSON, fileStatus.getOwner());
     json.put(HttpFSFileSystem.GROUP_JSON, fileStatus.getGroup());
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
index 08cdb12..eace82d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
@@ -349,6 +349,42 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
     fs.close();
   }
 
+  private void testListSymLinkStatus() throws Exception {
+    if (isLocalFS()) {
+      // do not test the the symlink for local FS.
+      return;
+    }
+    FileSystem fs = FileSystem.get(getProxiedFSConf());
+    boolean isWebhdfs = fs instanceof WebHdfsFileSystem;
+    Path path =
+        new Path(getProxiedFSTestDir() + "-symlink", "targetFoo.txt");
+    OutputStream os = fs.create(path);
+    os.write(1);
+    os.close();
+    Path linkPath =
+        new Path(getProxiedFSTestDir()+ "-symlink", "symlinkFoo.txt");
+    fs.createSymlink(path, linkPath, false);
+    fs = getHttpFSFileSystem();
+    FileStatus linkStatus = fs.getFileStatus(linkPath);
+    FileStatus status1 = fs.getFileStatus(path);
+
+    FileStatus[] stati = fs.listStatus(path.getParent());
+    assertEquals(2, stati.length);
+
+    int countSymlink = 0;
+    for (int i = 0; i < stati.length; i++) {
+      FileStatus fStatus = stati[i];
+      countSymlink += fStatus.isSymlink() ? 1 : 0;
+    }
+    assertEquals(1, countSymlink);
+
+    assertFalse(status1.isSymlink());
+    if (isWebhdfs) {
+      assertTrue(linkStatus.isSymlink());
+    }
+    fs.close();
+  }
+
   private void testListStatus() throws Exception {
     FileSystem fs = FileSystem.get(getProxiedFSConf());
     boolean isDFS = fs instanceof DistributedFileSystem;
@@ -1162,6 +1198,7 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
       break;
     case LIST_STATUS:
       testListStatus();
+      testListSymLinkStatus();
       break;
     case WORKING_DIRECTORY:
       testWorkingdirectory();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org