You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by aa...@apache.org on 2020/01/08 03:49:06 UTC
[hadoop] branch branch-3.1 updated: HDFS-15072. HDFS MiniCluster
fails to start when run in directory path with a %. (#1775)
This is an automated email from the ASF dual-hosted git repository.
aajisaka pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/branch-3.1 by this push:
new 4c8b01a HDFS-15072. HDFS MiniCluster fails to start when run in directory path with a %. (#1775)
4c8b01a is described below
commit 4c8b01abcb7ca4433a60e812d702902ca27b89b8
Author: Masatake Iwasaki <iw...@apache.org>
AuthorDate: Wed Jan 8 11:28:34 2020 +0900
HDFS-15072. HDFS MiniCluster fails to start when run in directory path with a %. (#1775)
(cherry picked from commit a43c177f1d4c2b6149a2680dd23d91103eca3be0)
---
.../hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java | 3 ++-
.../server/datanode/fsdataset/impl/TestFsDatasetImpl.java | 15 ++++++++++++++-
2 files changed, 16 insertions(+), 2 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 4dc556d..5a8229a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -196,9 +196,10 @@ public class FsVolumeImpl implements FsVolumeSpi {
DFSConfigKeys.DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_KEY,
DFSConfigKeys.DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_DEFAULT);
+ String escapedPath = parent.toString().replaceAll("%", "%%");
ThreadFactory workerFactory = new ThreadFactoryBuilder()
.setDaemon(true)
- .setNameFormat("FsVolumeImplWorker-" + parent.toString() + "-%d")
+ .setNameFormat("FsVolumeImplWorker-" + escapedPath + "-%d")
.build();
ThreadPoolExecutor executor = new ThreadPoolExecutor(
1, maxNumThreads,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
index 9270be8..51a6e6e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
@@ -951,4 +951,17 @@ public class TestFsDatasetImpl {
}
}
-}
\ No newline at end of file
+ @Test(timeout=30000)
+ public void testDataDirWithPercent() throws IOException {
+ String baseDir = new FileSystemTestHelper().getTestRootDir();
+ File dataDir = new File(baseDir, "invalidFormatString-%z");
+ dataDir.mkdirs();
+ FsVolumeImpl volumeFixed = new FsVolumeImplBuilder()
+ .setConf(new HdfsConfiguration())
+ .setDataset(dataset)
+ .setStorageID("storage-id")
+ .setStorageDirectory(
+ new StorageDirectory(StorageLocation.parse(dataDir.getPath())))
+ .build();
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org