You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by xy...@apache.org on 2018/05/31 15:50:38 UTC

[44/50] [abbrv] hadoop git commit: HDFS-13582. Improve backward compatibility for HDFS-13176 (WebHdfs file path gets truncated when having semicolon (; ) inside). Contributed by Zsolt Venczel.

HDFS-13582. Improve backward compatibility for HDFS-13176 (WebHdfs file path gets truncated when having semicolon (;) inside). Contributed by Zsolt Venczel.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1361030e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1361030e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1361030e

Branch: refs/heads/HDDS-4
Commit: 1361030e59d7557a2bffac0ea8df116ce2eaae4a
Parents: 6bc92e3
Author: Sean Mackrory <ma...@apache.org>
Authored: Thu May 31 07:56:57 2018 -0600
Committer: Sean Mackrory <ma...@apache.org>
Committed: Thu May 31 07:59:21 2018 -0600

----------------------------------------------------------------------
 .../hadoop/hdfs/web/WebHdfsFileSystem.java      | 11 +++-
 .../apache/hadoop/hdfs/web/TestWebHdfsUrl.java  | 55 ++++++++++++++++++++
 2 files changed, 65 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1361030e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index f7deab9..673acd6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -146,6 +146,8 @@ public class WebHdfsFileSystem extends FileSystem
   public static final String EZ_HEADER = "X-Hadoop-Accept-EZ";
   public static final String FEFINFO_HEADER = "X-Hadoop-feInfo";
 
+  public static final String SPECIAL_FILENAME_CHARACTERS_REGEX = ".*[;+%].*";
+
   /**
    * Default connection factory may be overridden in tests to use smaller
    * timeout values
@@ -606,8 +608,10 @@ public class WebHdfsFileSystem extends FileSystem
     if (fspath != null) {
       URI fspathUri = fspath.toUri();
       String fspathUriDecoded = fspathUri.getPath();
+      boolean pathAlreadyEncoded = false;
       try {
         fspathUriDecoded = URLDecoder.decode(fspathUri.getPath(), "UTF-8");
+        pathAlreadyEncoded = true;
       } catch (IllegalArgumentException ex) {
         LOG.trace("Cannot decode URL encoded file", ex);
       }
@@ -617,7 +621,12 @@ public class WebHdfsFileSystem extends FileSystem
         StringBuilder fsPathEncodedItems = new StringBuilder();
         for (String fsPathItem : fspathItems) {
           fsPathEncodedItems.append("/");
-          fsPathEncodedItems.append(URLEncoder.encode(fsPathItem, "UTF-8"));
+          if (fsPathItem.matches(SPECIAL_FILENAME_CHARACTERS_REGEX) ||
+              pathAlreadyEncoded) {
+            fsPathEncodedItems.append(URLEncoder.encode(fsPathItem, "UTF-8"));
+          } else {
+            fsPathEncodedItems.append(fsPathItem);
+          }
         }
         encodedFSPath = new Path(fspathUri.getScheme(),
                 fspathUri.getAuthority(), fsPathEncodedItems.substring(1));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1361030e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
index ecd53f6..02a68ea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
@@ -414,4 +414,59 @@ public class TestWebHdfsUrl {
     }
   }
 
+  private static final String BACKWARD_COMPATIBLE_SPECIAL_CHARACTER_FILENAME =
+          "specialFile ?\"\\()[]_-=&,{}#'`~!@$^*|<>.";
+
+  @Test
+  public void testWebHdfsBackwardCompatibleSpecialCharacterFile()
+          throws Exception {
+
+    assertFalse(BACKWARD_COMPATIBLE_SPECIAL_CHARACTER_FILENAME
+            .matches(WebHdfsFileSystem.SPECIAL_FILENAME_CHARACTERS_REGEX));
+
+    UserGroupInformation ugi =
+            UserGroupInformation.createRemoteUser("test-user");
+    ugi.setAuthenticationMethod(KERBEROS);
+    UserGroupInformation.setLoginUser(ugi);
+
+    final Configuration conf = WebHdfsTestUtil.createConf();
+    final Path dir = new Path("/testWebHdfsSpecialCharacterFile");
+
+    final short numDatanodes = 1;
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+            .numDataNodes(numDatanodes)
+            .build();
+    try {
+      cluster.waitActive();
+      final FileSystem fs = WebHdfsTestUtil
+              .getWebHdfsFileSystem(conf, WebHdfs.SCHEME);
+
+      //create a file
+      final long length = 1L << 10;
+      final Path file1 = new Path(dir,
+              BACKWARD_COMPATIBLE_SPECIAL_CHARACTER_FILENAME);
+
+      DFSTestUtil.createFile(fs, file1, length, numDatanodes, 20120406L);
+
+      //get file status and check that it was written properly.
+      final FileStatus s1 = fs.getFileStatus(file1);
+      assertEquals("Write failed for file " + file1, length, s1.getLen());
+
+      boolean found = false;
+      RemoteIterator<LocatedFileStatus> statusRemoteIterator =
+              fs.listFiles(dir, false);
+      while (statusRemoteIterator.hasNext()) {
+        LocatedFileStatus locatedFileStatus = statusRemoteIterator.next();
+        if (locatedFileStatus.isFile() &&
+                BACKWARD_COMPATIBLE_SPECIAL_CHARACTER_FILENAME
+                        .equals(locatedFileStatus.getPath().getName())) {
+          found = true;
+        }
+      }
+      assertFalse("Could not find file with special character", !found);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org