You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@uniffle.apache.org by ro...@apache.org on 2023/05/24 06:47:36 UTC

[incubator-uniffle] branch master updated: [#895] improvement: Rename Hdfs*.java to Hadoop*.java to support other Hadoop FS-compatible distributed filesystem (#898)

This is an automated email from the ASF dual-hosted git repository.

roryqi pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-uniffle.git


The following commit(s) were added to refs/heads/master by this push:
     new 3e58805e [#895] improvement: Rename Hdfs*.java to Hadoop*.java to support other Hadoop FS-compatible distributed filesystem (#898)
3e58805e is described below

commit 3e58805e4e371bec3733870f12000257543bdc17
Author: jiafu zhang <ji...@intel.com>
AuthorDate: Wed May 24 06:47:31 2023 +0000

    [#895] improvement: Rename Hdfs*.java to Hadoop*.java to support other Hadoop FS-compatible distributed filesystem (#898)
    
    ### What changes were proposed in this pull request?
    
    In server and storage modules, there are many classes prefixed with Hdfs which use Hadoop FS API and are thus impl agnostic, not depending on specific Hdfs impl. So, it's better to rename them to Hadoop* so that we can support other Hadoop FS compatible distributed filesystem by extending existing classes. It'll make code look more naturally.
    
    There may be some slight differences among different Hadoop FS impls, like [hadoop-daos](https://github.com/daos-stack/daos/tree/master/src/client/java/hadoop-daos) not have a dedicated thread for reading and writing data, which is different from hdfs. Thus, we don't need to close outputstream at each flush to FS.
    
    ### Why are the changes needed?
    
    Fix: #895
    
    ### Does this PR introduce _any_ user-facing change?
    
    No.
    
    ### How was this patch tested?
    
    CI passed.
    
    Signed-off-by: jiafu zhang <ji...@intel.com>
---
 .../shuffle/reader/AbstractRssReaderTest.java      |  4 +-
 .../shuffle/reader/RssShuffleDataIteratorTest.java | 34 +++++------
 .../spark/shuffle/reader/RssShuffleReaderTest.java |  6 +-
 .../spark/shuffle/reader/RssShuffleReaderTest.java | 10 ++--
 .../apache/uniffle/client/util/ClientUtils.java    |  2 +-
 .../client/impl/ShuffleReadClientImplTest.java     | 70 +++++++++++-----------
 .../{KerberizedHdfs.java => KerberizedHadoop.java} |  8 +--
 ...izedHdfsBase.java => KerberizedHadoopBase.java} | 20 +++----
 .../filesystem/HadoopFilesystemProviderTest.java   | 25 ++++----
 .../common/security/HadoopSecurityContextTest.java | 38 ++++++------
 .../security/SecurityContextFactoryTest.java       | 10 ++--
 .../uniffle/coordinator/ApplicationManager.java    |  2 +-
 .../uniffle/coordinator/CoordinatorConf.java       |  4 +-
 .../storage/AbstractSelectStorageStrategy.java     | 14 ++---
 docs/client_guide.md                               | 48 +++++++--------
 docs/coordinator_guide.md                          | 62 +++++++++----------
 docs/server_guide.md                               | 70 +++++++++++-----------
 ...java => AccessCandidatesCheckerHadoopTest.java} |  6 +-
 ...cessCandidatesCheckerKerberizedHadoopTest.java} | 18 +++---
 ...sTest.java => ClientConfManagerHadoopTest.java} |  6 +-
 ...=> ClientConfManagerKerberlizedHadoopTest.java} | 20 +++----
 .../apache/uniffle/test/IntegrationTestBase.java   |  4 +-
 ...st.java => MultiStorageHadoopFallbackTest.java} |  2 +-
 ... ShuffleServerConcurrentWriteOfHadoopTest.java} |  6 +-
 ...sTest.java => ShuffleServerWithHadoopTest.java} |  4 +-
 ... => ShuffleServerWithKerberizedHadoopTest.java} | 14 ++---
 ...va => ShuffleServerWithMemLocalHadoopTest.java} | 13 ++--
 ... RepartitionWithHadoopMultiStorageRssTest.java} |  4 +-
 ...t.java => ShuffleUnregisterWithHadoopTest.java} |  2 +-
 .../org/apache/uniffle/server/ShuffleServer.java   |  2 +-
 .../uniffle/server/ShuffleServerMetrics.java       |  6 +-
 ...orageManager.java => HadoopStorageManager.java} | 28 ++++-----
 ...a => HadoopStorageManagerFallbackStrategy.java} |  6 +-
 .../server/storage/MultiStorageManager.java        |  4 +-
 .../server/storage/StorageManagerFactory.java      |  2 +-
 ...ShuffleFlushManagerOnKerberizedHadoopTest.java} | 30 +++++-----
 .../uniffle/server/ShuffleFlushManagerTest.java    | 26 ++++----
 .../uniffle/server/ShuffleTaskManagerTest.java     |  8 +--
 ...agerTest.java => HadoopStorageManagerTest.java} | 36 +++++------
 .../server/storage/MultiStorageManagerTest.java    | 10 ++--
 .../StorageManagerFallbackStrategyTest.java        | 10 ++--
 .../common/DefaultStorageMediaProvider.java        |  2 +-
 .../{HdfsStorage.java => HadoopStorage.java}       | 16 ++---
 .../storage/factory/ShuffleHandlerFactory.java     | 16 ++---
 ...adHandler.java => HadoopClientReadHandler.java} | 26 ++++----
 .../{HdfsFileReader.java => HadoopFileReader.java} |  6 +-
 .../{HdfsFileWriter.java => HadoopFileWriter.java} |  6 +-
 ...andler.java => HadoopShuffleDeleteHandler.java} | 12 ++--
 ...dHandler.java => HadoopShuffleReadHandler.java} | 22 +++----
 ...Handler.java => HadoopShuffleWriteHandler.java} | 16 ++---
 ...r.java => PooledHadoopShuffleWriteHandler.java} | 22 +++----
 .../uniffle/storage/util/ShuffleStorageUtils.java  | 18 +++---
 .../apache/uniffle/storage/util/StorageType.java   |  2 +-
 ...Base.java => HadoopShuffleHandlerTestBase.java} | 18 +++---
 .../{HdfsTestBase.java => HadoopTestBase.java}     |  2 +-
 ...rTest.java => HadoopClientReadHandlerTest.java} | 20 +++----
 ...leReaderTest.java => HadoopFileReaderTest.java} | 12 ++--
 ...leWriterTest.java => HadoopFileWriterTest.java} | 22 +++----
 ...HdfsHandlerTest.java => HadoopHandlerTest.java} | 16 ++---
 ...Test.java => HadoopShuffleReadHandlerTest.java} | 40 ++++++-------
 ... => KerberizedHadoopClientReadHandlerTest.java} | 14 ++---
 ...=> KerberizedHadoopShuffleReadHandlerTest.java} | 14 ++---
 ...va => PooledHadoopShuffleWriteHandlerTest.java} |  8 +--
 ...est.java => ShuffleHadoopStorageUtilsTest.java} | 10 ++--
 ...> ShuffleKerberizedHadoopStorageUtilsTest.java} | 16 ++---
 .../uniffle/storage/util/StorageTypeTest.java      | 14 ++---
 66 files changed, 534 insertions(+), 530 deletions(-)

diff --git a/client-spark/common/src/test/java/org/apache/spark/shuffle/reader/AbstractRssReaderTest.java b/client-spark/common/src/test/java/org/apache/spark/shuffle/reader/AbstractRssReaderTest.java
index 489e7fde..93f80ede 100644
--- a/client-spark/common/src/test/java/org/apache/spark/shuffle/reader/AbstractRssReaderTest.java
+++ b/client-spark/common/src/test/java/org/apache/spark/shuffle/reader/AbstractRssReaderTest.java
@@ -39,13 +39,13 @@ import org.apache.uniffle.common.ShufflePartitionedBlock;
 import org.apache.uniffle.common.compression.Codec;
 import org.apache.uniffle.common.config.RssConf;
 import org.apache.uniffle.common.util.ChecksumUtils;
-import org.apache.uniffle.storage.HdfsTestBase;
+import org.apache.uniffle.storage.HadoopTestBase;
 import org.apache.uniffle.storage.handler.api.ShuffleWriteHandler;
 
 import static org.junit.jupiter.api.Assertions.assertEquals;
 
 
-public abstract class AbstractRssReaderTest extends HdfsTestBase {
+public abstract class AbstractRssReaderTest extends HadoopTestBase {
 
   private AtomicInteger atomicInteger = new AtomicInteger(0);
 
diff --git a/client-spark/common/src/test/java/org/apache/spark/shuffle/reader/RssShuffleDataIteratorTest.java b/client-spark/common/src/test/java/org/apache/spark/shuffle/reader/RssShuffleDataIteratorTest.java
index b1be1ec3..ce92f349 100644
--- a/client-spark/common/src/test/java/org/apache/spark/shuffle/reader/RssShuffleDataIteratorTest.java
+++ b/client-spark/common/src/test/java/org/apache/spark/shuffle/reader/RssShuffleDataIteratorTest.java
@@ -46,7 +46,7 @@ import org.apache.uniffle.common.ShuffleServerInfo;
 import org.apache.uniffle.common.config.RssConf;
 import org.apache.uniffle.common.util.ChecksumUtils;
 import org.apache.uniffle.common.util.Constants;
-import org.apache.uniffle.storage.handler.impl.HdfsShuffleWriteHandler;
+import org.apache.uniffle.storage.handler.impl.HadoopShuffleWriteHandler;
 import org.apache.uniffle.storage.util.StorageType;
 
 import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -69,8 +69,8 @@ public class RssShuffleDataIteratorTest extends AbstractRssReaderTest {
   @Test
   public void readTest1() throws Exception {
     String basePath = HDFS_URI + "readTest1";
-    HdfsShuffleWriteHandler writeHandler =
-        new HdfsShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi1.getId(), conf);
+    HadoopShuffleWriteHandler writeHandler =
+        new HadoopShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi1.getId(), conf);
 
     Map<String, String> expectedData = Maps.newHashMap();
     Roaring64NavigableMap blockIdBitmap = Roaring64NavigableMap.bitmapOf();
@@ -130,10 +130,10 @@ public class RssShuffleDataIteratorTest extends AbstractRssReaderTest {
   @Test
   public void readTest3() throws Exception {
     String basePath = HDFS_URI + "readTest3";
-    HdfsShuffleWriteHandler writeHandler1 =
-        new HdfsShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi1.getId(), conf);
-    HdfsShuffleWriteHandler writeHandler2 =
-        new HdfsShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi2.getId(), conf);
+    HadoopShuffleWriteHandler writeHandler1 =
+        new HadoopShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi1.getId(), conf);
+    HadoopShuffleWriteHandler writeHandler2 =
+        new HadoopShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi2.getId(), conf);
 
     Map<String, String> expectedData = Maps.newHashMap();
     Roaring64NavigableMap blockIdBitmap = Roaring64NavigableMap.bitmapOf();
@@ -165,8 +165,8 @@ public class RssShuffleDataIteratorTest extends AbstractRssReaderTest {
   @Test
   public void readTest4() throws Exception {
     String basePath = HDFS_URI + "readTest4";
-    HdfsShuffleWriteHandler writeHandler =
-        new HdfsShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi1.getId(), conf);
+    HadoopShuffleWriteHandler writeHandler =
+        new HadoopShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi1.getId(), conf);
 
     Map<String, String> expectedData = Maps.newHashMap();
     Roaring64NavigableMap blockIdBitmap = Roaring64NavigableMap.bitmapOf();
@@ -199,8 +199,8 @@ public class RssShuffleDataIteratorTest extends AbstractRssReaderTest {
   @Test
   public void readTest5() throws Exception {
     String basePath = HDFS_URI + "readTest5";
-    HdfsShuffleWriteHandler writeHandler =
-        new HdfsShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi1.getId(), conf);
+    HadoopShuffleWriteHandler writeHandler =
+        new HadoopShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi1.getId(), conf);
 
     Map<String, String> expectedData = Maps.newHashMap();
     Roaring64NavigableMap blockIdBitmap = Roaring64NavigableMap.bitmapOf();
@@ -225,8 +225,8 @@ public class RssShuffleDataIteratorTest extends AbstractRssReaderTest {
   @Test
   public void readTest7() throws Exception {
     String basePath = HDFS_URI + "readTest7";
-    HdfsShuffleWriteHandler writeHandler =
-        new HdfsShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi1.getId(), conf);
+    HadoopShuffleWriteHandler writeHandler =
+        new HadoopShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi1.getId(), conf);
 
     Map<String, String> expectedData = Maps.newHashMap();
     Roaring64NavigableMap blockIdBitmap = Roaring64NavigableMap.bitmapOf();
@@ -269,10 +269,10 @@ public class RssShuffleDataIteratorTest extends AbstractRssReaderTest {
 
   private void readTestCompressOrNot(String path, boolean compress) throws Exception {
     String basePath = HDFS_URI + path;
-    HdfsShuffleWriteHandler writeHandler1 =
-        new HdfsShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi1.getId(), conf);
-    HdfsShuffleWriteHandler writeHandler2 =
-        new HdfsShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi2.getId(), conf);
+    HadoopShuffleWriteHandler writeHandler1 =
+        new HadoopShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi1.getId(), conf);
+    HadoopShuffleWriteHandler writeHandler2 =
+        new HadoopShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi2.getId(), conf);
 
     Map<String, String> expectedData = Maps.newHashMap();
     Roaring64NavigableMap blockIdBitmap = Roaring64NavigableMap.bitmapOf();
diff --git a/client-spark/spark2/src/test/java/org/apache/spark/shuffle/reader/RssShuffleReaderTest.java b/client-spark/spark2/src/test/java/org/apache/spark/shuffle/reader/RssShuffleReaderTest.java
index 61a7fa07..daf7fae0 100644
--- a/client-spark/spark2/src/test/java/org/apache/spark/shuffle/reader/RssShuffleReaderTest.java
+++ b/client-spark/spark2/src/test/java/org/apache/spark/shuffle/reader/RssShuffleReaderTest.java
@@ -38,7 +38,7 @@ import org.roaringbitmap.longlong.Roaring64NavigableMap;
 import org.apache.uniffle.common.ShuffleServerInfo;
 import org.apache.uniffle.common.config.RssClientConf;
 import org.apache.uniffle.common.config.RssConf;
-import org.apache.uniffle.storage.handler.impl.HdfsShuffleWriteHandler;
+import org.apache.uniffle.storage.handler.impl.HadoopShuffleWriteHandler;
 import org.apache.uniffle.storage.util.StorageType;
 
 import static org.mockito.Mockito.doNothing;
@@ -54,8 +54,8 @@ public class RssShuffleReaderTest extends AbstractRssReaderTest {
   public void readTest() throws Exception {
     ShuffleServerInfo ssi = new ShuffleServerInfo("127.0.0.1", 0);
     String basePath = HDFS_URI + "readTest1";
-    HdfsShuffleWriteHandler writeHandler =
-        new HdfsShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi.getId(), conf);
+    HadoopShuffleWriteHandler writeHandler =
+        new HadoopShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi.getId(), conf);
 
     Map<String, String> expectedData = Maps.newHashMap();
     Roaring64NavigableMap blockIdBitmap = Roaring64NavigableMap.bitmapOf();
diff --git a/client-spark/spark3/src/test/java/org/apache/spark/shuffle/reader/RssShuffleReaderTest.java b/client-spark/spark3/src/test/java/org/apache/spark/shuffle/reader/RssShuffleReaderTest.java
index 83593976..1bbd5def 100644
--- a/client-spark/spark3/src/test/java/org/apache/spark/shuffle/reader/RssShuffleReaderTest.java
+++ b/client-spark/spark3/src/test/java/org/apache/spark/shuffle/reader/RssShuffleReaderTest.java
@@ -40,7 +40,7 @@ import org.apache.uniffle.common.ShuffleDataDistributionType;
 import org.apache.uniffle.common.ShuffleServerInfo;
 import org.apache.uniffle.common.config.RssClientConf;
 import org.apache.uniffle.common.config.RssConf;
-import org.apache.uniffle.storage.handler.impl.HdfsShuffleWriteHandler;
+import org.apache.uniffle.storage.handler.impl.HadoopShuffleWriteHandler;
 import org.apache.uniffle.storage.util.StorageType;
 
 import static org.mockito.Mockito.doNothing;
@@ -56,10 +56,10 @@ public class RssShuffleReaderTest extends AbstractRssReaderTest {
   public void readTest() throws Exception {
     ShuffleServerInfo ssi = new ShuffleServerInfo("127.0.0.1", 0);
     String basePath = HDFS_URI + "readTest1";
-    HdfsShuffleWriteHandler writeHandler =
-        new HdfsShuffleWriteHandler("appId", 0, 0, 0, basePath, ssi.getId(), conf);
-    final HdfsShuffleWriteHandler writeHandler1 =
-        new HdfsShuffleWriteHandler("appId", 0, 1, 1, basePath, ssi.getId(), conf);
+    HadoopShuffleWriteHandler writeHandler =
+        new HadoopShuffleWriteHandler("appId", 0, 0, 0, basePath, ssi.getId(), conf);
+    final HadoopShuffleWriteHandler writeHandler1 =
+        new HadoopShuffleWriteHandler("appId", 0, 1, 1, basePath, ssi.getId(), conf);
 
     Roaring64NavigableMap blockIdBitmap = Roaring64NavigableMap.bitmapOf();
     final Roaring64NavigableMap taskIdBitmap = Roaring64NavigableMap.bitmapOf(0);
diff --git a/client/src/main/java/org/apache/uniffle/client/util/ClientUtils.java b/client/src/main/java/org/apache/uniffle/client/util/ClientUtils.java
index d9b51883..7787c957 100644
--- a/client/src/main/java/org/apache/uniffle/client/util/ClientUtils.java
+++ b/client/src/main/java/org/apache/uniffle/client/util/ClientUtils.java
@@ -122,7 +122,7 @@ public class ClientUtils {
   public static void validateTestModeConf(boolean testMode, String storageType) {
     if (!testMode && (StorageType.LOCALFILE.name().equals(storageType)
             || (StorageType.HDFS.name()).equals(storageType))) {
-      throw new IllegalArgumentException("LOCALFILE or HDFS storage type should be used in test mode only, "
+      throw new IllegalArgumentException("LOCALFILE or HADOOP storage type should be used in test mode only, "
               + "because of the poor performance of these two types.");
     }
   }
diff --git a/client/src/test/java/org/apache/uniffle/client/impl/ShuffleReadClientImplTest.java b/client/src/test/java/org/apache/uniffle/client/impl/ShuffleReadClientImplTest.java
index 3ca35ebb..0d27f1e9 100644
--- a/client/src/test/java/org/apache/uniffle/client/impl/ShuffleReadClientImplTest.java
+++ b/client/src/test/java/org/apache/uniffle/client/impl/ShuffleReadClientImplTest.java
@@ -41,8 +41,8 @@ import org.apache.uniffle.common.ShufflePartitionedBlock;
 import org.apache.uniffle.common.ShuffleServerInfo;
 import org.apache.uniffle.common.util.ChecksumUtils;
 import org.apache.uniffle.common.util.Constants;
-import org.apache.uniffle.storage.HdfsTestBase;
-import org.apache.uniffle.storage.handler.impl.HdfsShuffleWriteHandler;
+import org.apache.uniffle.storage.HadoopTestBase;
+import org.apache.uniffle.storage.handler.impl.HadoopShuffleWriteHandler;
 import org.apache.uniffle.storage.util.StorageType;
 
 import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -51,7 +51,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.fail;
 import static org.mockito.ArgumentMatchers.any;
 
-public class ShuffleReadClientImplTest extends HdfsTestBase {
+public class ShuffleReadClientImplTest extends HadoopTestBase {
 
   private static final String EXPECTED_EXCEPTION_MESSAGE = "Exception should be thrown";
   private static AtomicLong ATOMIC_LONG = new AtomicLong(0);
@@ -62,8 +62,8 @@ public class ShuffleReadClientImplTest extends HdfsTestBase {
   @Test
   public void readTest1() throws Exception {
     String basePath = HDFS_URI + "clientReadTest1";
-    HdfsShuffleWriteHandler writeHandler =
-        new HdfsShuffleWriteHandler("appId", 0, 1, 1, basePath, ssi1.getId(), conf);
+    HadoopShuffleWriteHandler writeHandler =
+        new HadoopShuffleWriteHandler("appId", 0, 1, 1, basePath, ssi1.getId(), conf);
 
     Map<Long, byte[]> expectedData = Maps.newHashMap();
     Roaring64NavigableMap blockIdBitmap = Roaring64NavigableMap.bitmapOf();
@@ -99,10 +99,10 @@ public class ShuffleReadClientImplTest extends HdfsTestBase {
   @Test
   public void readTest2() throws Exception {
     String basePath = HDFS_URI + "clientReadTest2";
-    HdfsShuffleWriteHandler writeHandler1 =
-        new HdfsShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi1.getId(), conf);
-    HdfsShuffleWriteHandler writeHandler2 =
-        new HdfsShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi2.getId(), conf);
+    HadoopShuffleWriteHandler writeHandler1 =
+        new HadoopShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi1.getId(), conf);
+    HadoopShuffleWriteHandler writeHandler2 =
+        new HadoopShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi2.getId(), conf);
 
     Map<Long, byte[]> expectedData = Maps.newHashMap();
     Roaring64NavigableMap blockIdBitmap = Roaring64NavigableMap.bitmapOf();
@@ -123,10 +123,10 @@ public class ShuffleReadClientImplTest extends HdfsTestBase {
   @Test
   public void readTest3() throws Exception {
     String basePath = HDFS_URI + "clientReadTest3";
-    HdfsShuffleWriteHandler writeHandler1 =
-        new HdfsShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi1.getId(), conf);
-    HdfsShuffleWriteHandler writeHandler2 =
-        new HdfsShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi2.getId(), conf);
+    HadoopShuffleWriteHandler writeHandler1 =
+        new HadoopShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi1.getId(), conf);
+    HadoopShuffleWriteHandler writeHandler2 =
+        new HadoopShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi2.getId(), conf);
 
     Map<Long, byte[]> expectedData = Maps.newHashMap();
     final Roaring64NavigableMap blockIdBitmap = Roaring64NavigableMap.bitmapOf();
@@ -158,8 +158,8 @@ public class ShuffleReadClientImplTest extends HdfsTestBase {
   @Test
   public void readTest4() throws Exception {
     String basePath = HDFS_URI + "clientReadTest4";
-    HdfsShuffleWriteHandler writeHandler =
-        new HdfsShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi1.getId(), conf);
+    HadoopShuffleWriteHandler writeHandler =
+        new HadoopShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi1.getId(), conf);
 
     Map<Long, byte[]> expectedData = Maps.newHashMap();
     Roaring64NavigableMap blockIdBitmap = Roaring64NavigableMap.bitmapOf();
@@ -193,8 +193,8 @@ public class ShuffleReadClientImplTest extends HdfsTestBase {
   @Test
   public void readTest5() throws Exception {
     String basePath = HDFS_URI + "clientReadTest5";
-    HdfsShuffleWriteHandler writeHandler =
-        new HdfsShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi1.getId(), conf);
+    HadoopShuffleWriteHandler writeHandler =
+        new HadoopShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi1.getId(), conf);
 
     Map<Long, byte[]> expectedData = Maps.newHashMap();
     Roaring64NavigableMap blockIdBitmap = Roaring64NavigableMap.bitmapOf();
@@ -215,8 +215,8 @@ public class ShuffleReadClientImplTest extends HdfsTestBase {
   @Test
   public void readTest7() throws Exception {
     String basePath = HDFS_URI + "clientReadTest7";
-    HdfsShuffleWriteHandler writeHandler =
-        new HdfsShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi1.getId(), conf);
+    HadoopShuffleWriteHandler writeHandler =
+        new HadoopShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi1.getId(), conf);
 
     Map<Long, byte[]> expectedData1 = Maps.newHashMap();
     Map<Long, byte[]> expectedData2 = Maps.newHashMap();
@@ -247,8 +247,8 @@ public class ShuffleReadClientImplTest extends HdfsTestBase {
   @Test
   public void readTest8() throws Exception {
     String basePath = HDFS_URI + "clientReadTest8";
-    HdfsShuffleWriteHandler writeHandler =
-        new HdfsShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi1.getId(), conf);
+    HadoopShuffleWriteHandler writeHandler =
+        new HadoopShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi1.getId(), conf);
 
     Map<Long, byte[]> expectedData = Maps.newHashMap();
     Roaring64NavigableMap blockIdBitmap = Roaring64NavigableMap.bitmapOf();
@@ -296,8 +296,8 @@ public class ShuffleReadClientImplTest extends HdfsTestBase {
   @Test
   public void readTest10() throws Exception {
     String basePath = HDFS_URI + "clientReadTest10";
-    HdfsShuffleWriteHandler writeHandler =
-        new HdfsShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi1.getId(), conf);
+    HadoopShuffleWriteHandler writeHandler =
+        new HadoopShuffleWriteHandler("appId", 0, 0, 1, basePath, ssi1.getId(), conf);
 
     Map<Long, byte[]> expectedData = Maps.newHashMap();
     Roaring64NavigableMap blockIdBitmap = Roaring64NavigableMap.bitmapOf();
@@ -325,8 +325,8 @@ public class ShuffleReadClientImplTest extends HdfsTestBase {
   @Test
   public void readTest11() throws Exception {
     String basePath = HDFS_URI + "clientReadTest11";
-    HdfsShuffleWriteHandler writeHandler =
-        new HdfsShuffleWriteHandler("appId", 0, 1, 1, basePath, ssi1.getId(), conf);
+    HadoopShuffleWriteHandler writeHandler =
+        new HadoopShuffleWriteHandler("appId", 0, 1, 1, basePath, ssi1.getId(), conf);
 
     Map<Long, byte[]> expectedData = Maps.newHashMap();
     Roaring64NavigableMap blockIdBitmap = Roaring64NavigableMap.bitmapOf();
@@ -378,8 +378,8 @@ public class ShuffleReadClientImplTest extends HdfsTestBase {
   @Test
   public void readTest12() throws Exception {
     String basePath = HDFS_URI + "clientReadTest12";
-    HdfsShuffleWriteHandler writeHandler =
-        new HdfsShuffleWriteHandler("appId", 0, 1, 1, basePath, ssi1.getId(), conf);
+    HadoopShuffleWriteHandler writeHandler =
+        new HadoopShuffleWriteHandler("appId", 0, 1, 1, basePath, ssi1.getId(), conf);
 
     Map<Long, byte[]> expectedData = Maps.newHashMap();
     final Roaring64NavigableMap blockIdBitmap = Roaring64NavigableMap.bitmapOf();
@@ -402,8 +402,8 @@ public class ShuffleReadClientImplTest extends HdfsTestBase {
   @Test
   public void readTest13() throws Exception {
     String basePath = HDFS_URI + "clientReadTest13";
-    HdfsShuffleWriteHandler writeHandler =
-        new HdfsShuffleWriteHandler("appId", 0, 1, 1, basePath, ssi1.getId(), conf);
+    HadoopShuffleWriteHandler writeHandler =
+        new HadoopShuffleWriteHandler("appId", 0, 1, 1, basePath, ssi1.getId(), conf);
 
     Map<Long, byte[]> expectedData = Maps.newHashMap();
     final Roaring64NavigableMap blockIdBitmap = Roaring64NavigableMap.bitmapOf();
@@ -429,8 +429,8 @@ public class ShuffleReadClientImplTest extends HdfsTestBase {
   @Test
   public void readTest14() throws Exception {
     String basePath = HDFS_URI + "clientReadTest14";
-    HdfsShuffleWriteHandler writeHandler =
-        new HdfsShuffleWriteHandler("appId", 0, 1, 1, basePath, ssi1.getId(), conf);
+    HadoopShuffleWriteHandler writeHandler =
+        new HadoopShuffleWriteHandler("appId", 0, 1, 1, basePath, ssi1.getId(), conf);
 
     Map<Long, byte[]> expectedData = Maps.newHashMap();
     final Roaring64NavigableMap blockIdBitmap = Roaring64NavigableMap.bitmapOf();
@@ -453,8 +453,8 @@ public class ShuffleReadClientImplTest extends HdfsTestBase {
   @Test
   public void readTest15() throws Exception {
     String basePath = HDFS_URI + "clientReadTest15";
-    HdfsShuffleWriteHandler writeHandler =
-        new HdfsShuffleWriteHandler("appId", 0, 1, 1, basePath, ssi1.getId(), conf);
+    HadoopShuffleWriteHandler writeHandler =
+        new HadoopShuffleWriteHandler("appId", 0, 1, 1, basePath, ssi1.getId(), conf);
 
     Map<Long, byte[]> expectedData = Maps.newHashMap();
     final Roaring64NavigableMap blockIdBitmap = Roaring64NavigableMap.bitmapOf();
@@ -477,7 +477,7 @@ public class ShuffleReadClientImplTest extends HdfsTestBase {
   }
 
   private void writeTestData(
-      HdfsShuffleWriteHandler writeHandler,
+      HadoopShuffleWriteHandler writeHandler,
       int num, int length, long taskAttemptId,
       Map<Long, byte[]> expectedData,
       Roaring64NavigableMap blockIdBitmap) throws Exception {
@@ -496,7 +496,7 @@ public class ShuffleReadClientImplTest extends HdfsTestBase {
   }
 
   private void writeDuplicatedData(
-      HdfsShuffleWriteHandler writeHandler,
+      HadoopShuffleWriteHandler writeHandler,
       int num, int length, long taskAttemptId,
       Map<Long, byte[]> expectedData,
       Roaring64NavigableMap blockIdBitmap) throws Exception {
diff --git a/common/src/test/java/org/apache/uniffle/common/KerberizedHdfs.java b/common/src/test/java/org/apache/uniffle/common/KerberizedHadoop.java
similarity index 98%
rename from common/src/test/java/org/apache/uniffle/common/KerberizedHdfs.java
rename to common/src/test/java/org/apache/uniffle/common/KerberizedHadoop.java
index 38f551b5..935f17dc 100644
--- a/common/src/test/java/org/apache/uniffle/common/KerberizedHdfs.java
+++ b/common/src/test/java/org/apache/uniffle/common/KerberizedHadoop.java
@@ -70,8 +70,8 @@ import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSF
 import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 
-public class KerberizedHdfs implements Serializable {
-  private static final Logger LOGGER = LoggerFactory.getLogger(KerberizedHdfs.class);
+public class KerberizedHadoop implements Serializable {
+  private static final Logger LOGGER = LoggerFactory.getLogger(KerberizedHadoop.class);
 
   private MiniKdc kdc;
   private File workDir;
@@ -80,7 +80,7 @@ public class KerberizedHdfs implements Serializable {
 
   private MiniDFSCluster kerberizedDfsCluster;
 
-  private Class testRunnerCls = KerberizedHdfs.class;
+  private Class testRunnerCls = KerberizedHadoop.class;
 
   // The superuser for accessing HDFS
   private String hdfsKeytab;
@@ -237,7 +237,7 @@ public class KerberizedHdfs implements Serializable {
     if (kdc != null) {
       kdc.stop();
     }
-    setTestRunner(KerberizedHdfs.class);
+    setTestRunner(KerberizedHadoop.class);
     UserGroupInformation.reset();
   }
 
diff --git a/common/src/test/java/org/apache/uniffle/common/KerberizedHdfsBase.java b/common/src/test/java/org/apache/uniffle/common/KerberizedHadoopBase.java
similarity index 80%
rename from common/src/test/java/org/apache/uniffle/common/KerberizedHdfsBase.java
rename to common/src/test/java/org/apache/uniffle/common/KerberizedHadoopBase.java
index aec36275..9b61f83d 100644
--- a/common/src/test/java/org/apache/uniffle/common/KerberizedHdfsBase.java
+++ b/common/src/test/java/org/apache/uniffle/common/KerberizedHadoopBase.java
@@ -26,28 +26,28 @@ import org.apache.uniffle.common.security.SecurityContextFactory;
 
 import static org.junit.jupiter.api.Assertions.assertEquals;
 
-public class KerberizedHdfsBase {
-  protected static KerberizedHdfs kerberizedHdfs;
-  protected static Class<?> testRunner = KerberizedHdfsBase.class;
+public class KerberizedHadoopBase {
+  protected static KerberizedHadoop kerberizedHadoop;
+  protected static Class<?> testRunner = KerberizedHadoopBase.class;
 
   public static void init() throws Exception {
-    kerberizedHdfs = new KerberizedHdfs();
-    kerberizedHdfs.setTestRunner(testRunner);
-    kerberizedHdfs.setup();
+    kerberizedHadoop = new KerberizedHadoop();
+    kerberizedHadoop.setTestRunner(testRunner);
+    kerberizedHadoop.setup();
   }
 
   @AfterAll
   public static void clear() throws Exception {
-    kerberizedHdfs.tearDown();
-    kerberizedHdfs = null;
+    kerberizedHadoop.tearDown();
+    kerberizedHadoop = null;
   }
 
   public static void initHadoopSecurityContext() throws Exception {
     // init the security context
     SecurityConfig securityConfig = SecurityConfig
         .newBuilder()
-        .keytabFilePath(kerberizedHdfs.getHdfsKeytab())
-        .principal(kerberizedHdfs.getHdfsPrincipal())
+        .keytabFilePath(kerberizedHadoop.getHdfsKeytab())
+        .principal(kerberizedHadoop.getHdfsPrincipal())
         .reloginIntervalSec(1000)
         .build();
     SecurityContextFactory.get().init(securityConfig);
diff --git a/common/src/test/java/org/apache/uniffle/common/filesystem/HadoopFilesystemProviderTest.java b/common/src/test/java/org/apache/uniffle/common/filesystem/HadoopFilesystemProviderTest.java
index fd3de044..5442946d 100644
--- a/common/src/test/java/org/apache/uniffle/common/filesystem/HadoopFilesystemProviderTest.java
+++ b/common/src/test/java/org/apache/uniffle/common/filesystem/HadoopFilesystemProviderTest.java
@@ -31,17 +31,17 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Test;
 
-import org.apache.uniffle.common.KerberizedHdfsBase;
+import org.apache.uniffle.common.KerberizedHadoopBase;
 
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 
-public class HadoopFilesystemProviderTest extends KerberizedHdfsBase {
+public class HadoopFilesystemProviderTest extends KerberizedHadoopBase {
 
   @BeforeAll
   public static void beforeAll() throws Exception {
     testRunner = HadoopFilesystemProvider.class;
-    KerberizedHdfsBase.init();
+    KerberizedHadoopBase.init();
     UserGroupInformation.reset();
   }
 
@@ -54,7 +54,8 @@ public class HadoopFilesystemProviderTest extends KerberizedHdfsBase {
     removeHadoopSecurityContext();
 
     try {
-      FileSystem fileSystem = HadoopFilesystemProvider.getFilesystem(new Path("/hdfs"), kerberizedHdfs.getConf());
+      FileSystem fileSystem = HadoopFilesystemProvider.getFilesystem(new Path("/hdfs"),
+              kerberizedHadoop.getConf());
       fileSystem.mkdirs(new Path("/hdfs/HadoopFilesystemProviderTest"));
     } catch (AccessControlException e) {
       // ignore
@@ -67,20 +68,22 @@ public class HadoopFilesystemProviderTest extends KerberizedHdfsBase {
 
     // case1: it should throw exception when user is empty or null.
     try {
-      FileSystem fileSystem = HadoopFilesystemProvider.getFilesystem(null, new Path("/hdfs"), kerberizedHdfs.getConf());
+      FileSystem fileSystem = HadoopFilesystemProvider.getFilesystem(null, new Path("/hdfs"),
+              kerberizedHadoop.getConf());
     } catch (Exception e) {
       assertTrue(e.getMessage().contains("User must be not null or empty"));
     }
 
     // case2: it should return the proxy user's filesystem
-    FileSystem fileSystem = HadoopFilesystemProvider.getFilesystem("alex", new Path("/alex"), kerberizedHdfs.getConf());
+    FileSystem fileSystem = HadoopFilesystemProvider.getFilesystem("alex", new Path("/alex"),
+            kerberizedHadoop.getConf());
     Path alexPath = new Path("/alex/HadoopFilesystemProviderTest-testGetSecuredFilesystem");
     assertTrue(fileSystem.mkdirs(alexPath));
 
     assertEquals("alex", fileSystem.getFileStatus(alexPath).getOwner());
 
     // case3: it should return the login user's filesystem
-    fileSystem = HadoopFilesystemProvider.getFilesystem(new Path("/hdfs"), kerberizedHdfs.getConf());
+    fileSystem = HadoopFilesystemProvider.getFilesystem(new Path("/hdfs"), kerberizedHadoop.getConf());
     Path hdfsPath = new Path("/hdfs/HadoopFilesystemProviderTest-testGetSecuredFilesystem");
     assertTrue(fileSystem.mkdirs(hdfsPath));
 
@@ -94,7 +97,7 @@ public class HadoopFilesystemProviderTest extends KerberizedHdfsBase {
     // write file by proxy user.
     String fileContent = "hello world";
     Path filePath = new Path("/alex/HadoopFilesystemProviderTest-testWriteAndReadBySecuredFilesystem.file");
-    FileSystem writeFs = HadoopFilesystemProvider.getFilesystem("alex", filePath, kerberizedHdfs.getConf());
+    FileSystem writeFs = HadoopFilesystemProvider.getFilesystem("alex", filePath, kerberizedHadoop.getConf());
 
     boolean ok = writeFs.exists(new org.apache.hadoop.fs.Path("/alex"));
     assertTrue(ok);
@@ -110,11 +113,11 @@ public class HadoopFilesystemProviderTest extends KerberizedHdfsBase {
 
     // Read content from HDFS by alex user directly
     UserGroupInformation readerUGI = UserGroupInformation.loginUserFromKeytabAndReturnUGI(
-        kerberizedHdfs.getAlexPrincipal() + "@" + kerberizedHdfs.getKdc().getRealm(),
-        kerberizedHdfs.getAlexKeytab()
+        kerberizedHadoop.getAlexPrincipal() + "@" + kerberizedHadoop.getKdc().getRealm(),
+        kerberizedHadoop.getAlexKeytab()
     );
     readerUGI.doAs((PrivilegedExceptionAction<Object>) () -> {
-      FileSystem fs = FileSystem.get(kerberizedHdfs.getConf());
+      FileSystem fs = FileSystem.get(kerberizedHadoop.getConf());
       FSDataInputStream inputStream = fs.open(filePath);
       String fetchedResult = IOUtils.toString(inputStream);
       assertEquals(fileContent, fetchedResult);
diff --git a/common/src/test/java/org/apache/uniffle/common/security/HadoopSecurityContextTest.java b/common/src/test/java/org/apache/uniffle/common/security/HadoopSecurityContextTest.java
index f4f79f76..13839557 100644
--- a/common/src/test/java/org/apache/uniffle/common/security/HadoopSecurityContextTest.java
+++ b/common/src/test/java/org/apache/uniffle/common/security/HadoopSecurityContextTest.java
@@ -28,26 +28,26 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Test;
 
-import org.apache.uniffle.common.KerberizedHdfsBase;
+import org.apache.uniffle.common.KerberizedHadoopBase;
 
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.fail;
 
-public class HadoopSecurityContextTest extends KerberizedHdfsBase {
+public class HadoopSecurityContextTest extends KerberizedHadoopBase {
 
   @BeforeAll
   public static void beforeAll() throws Exception {
     testRunner = HadoopSecurityContextTest.class;
-    KerberizedHdfsBase.init();
+    KerberizedHadoopBase.init();
   }
 
   @Test
   public void testSecuredCallable() throws Exception {
     try (HadoopSecurityContext context = new HadoopSecurityContext(
               null,
-              kerberizedHdfs.getHdfsKeytab(),
-              kerberizedHdfs.getHdfsPrincipal(),
+              kerberizedHadoop.getHdfsKeytab(),
+              kerberizedHadoop.getHdfsPrincipal(),
               1000)) {
 
       // case1: when user is empty or null, it should throw exception
@@ -61,10 +61,10 @@ public class HadoopSecurityContextTest extends KerberizedHdfsBase {
       // case2: run by the login user, there is no need to wrap proxy action
       Path pathWithHdfsUser = new Path("/hdfs/HadoopSecurityContextTest");
       context.runSecured("hdfs", (Callable<Void>) () -> {
-        kerberizedHdfs.getFileSystem().mkdirs(pathWithHdfsUser);
+        kerberizedHadoop.getFileSystem().mkdirs(pathWithHdfsUser);
         return null;
       });
-      FileStatus fileStatus = kerberizedHdfs.getFileSystem().getFileStatus(pathWithHdfsUser);
+      FileStatus fileStatus = kerberizedHadoop.getFileSystem().getFileStatus(pathWithHdfsUser);
       assertEquals("hdfs", fileStatus.getOwner());
 
       // case3: run by the proxy user
@@ -72,10 +72,10 @@ public class HadoopSecurityContextTest extends KerberizedHdfsBase {
       AtomicReference<UserGroupInformation> ugi1 = new AtomicReference<>();
       context.runSecured("alex", (Callable<Void>) () -> {
         ugi1.set(UserGroupInformation.getCurrentUser());
-        kerberizedHdfs.getFileSystem().mkdirs(pathWithAlexUser);
+        kerberizedHadoop.getFileSystem().mkdirs(pathWithAlexUser);
         return null;
       });
-      fileStatus = kerberizedHdfs.getFileSystem().getFileStatus(pathWithAlexUser);
+      fileStatus = kerberizedHadoop.getFileSystem().getFileStatus(pathWithAlexUser);
       assertEquals("alex", fileStatus.getOwner());
 
       // case4: run by the proxy user again, it will always return the same
@@ -88,8 +88,8 @@ public class HadoopSecurityContextTest extends KerberizedHdfsBase {
       assertTrue(ugi1.get() == ugi2.get());
       assertTrue(ugi1.get() == context.getProxyUserUgiPool().get("alex"));
 
-      FileSystem fileSystem1 = context.runSecured("alex", () -> FileSystem.get(kerberizedHdfs.getConf()));
-      FileSystem fileSystem2 = context.runSecured("alex", () -> FileSystem.get(kerberizedHdfs.getConf()));
+      FileSystem fileSystem1 = context.runSecured("alex", () -> FileSystem.get(kerberizedHadoop.getConf()));
+      FileSystem fileSystem2 = context.runSecured("alex", () -> FileSystem.get(kerberizedHadoop.getConf()));
       assertTrue(fileSystem1 == fileSystem2);
     }
   }
@@ -101,7 +101,7 @@ public class HadoopSecurityContextTest extends KerberizedHdfsBase {
     // case1: lack principal, should throw exception
     try (HadoopSecurityContext context = new HadoopSecurityContext(
             null,
-            kerberizedHdfs.getHdfsKeytab(),
+            kerberizedHadoop.getHdfsKeytab(),
             null,
             1000)) {
       fail();
@@ -113,7 +113,7 @@ public class HadoopSecurityContextTest extends KerberizedHdfsBase {
     try (HadoopSecurityContext context = new HadoopSecurityContext(
             null,
             null,
-            kerberizedHdfs.getHdfsPrincipal(),
+            kerberizedHadoop.getHdfsPrincipal(),
             1000)) {
       fail();
     } catch (Exception e) {
@@ -123,8 +123,8 @@ public class HadoopSecurityContextTest extends KerberizedHdfsBase {
     // case3: illegal re-login interval sec
     try (HadoopSecurityContext context = new HadoopSecurityContext(
             null,
-            kerberizedHdfs.getHdfsKeytab(),
-            kerberizedHdfs.getHdfsPrincipal(),
+            kerberizedHadoop.getHdfsKeytab(),
+            kerberizedHadoop.getHdfsPrincipal(),
             0)) {
       fail();
     } catch (Exception e) {
@@ -136,8 +136,8 @@ public class HadoopSecurityContextTest extends KerberizedHdfsBase {
     System.clearProperty("java.security.krb5.conf");
     HadoopSecurityContext context = new HadoopSecurityContext(
             krbConfFilePath,
-            kerberizedHdfs.getHdfsKeytab(),
-            kerberizedHdfs.getHdfsPrincipal(),
+            kerberizedHadoop.getHdfsKeytab(),
+            kerberizedHadoop.getHdfsPrincipal(),
             100
     );
     context.close();
@@ -153,8 +153,8 @@ public class HadoopSecurityContextTest extends KerberizedHdfsBase {
     System.clearProperty("java.security.krb5.conf");
     try (HadoopSecurityContext context2 = new HadoopSecurityContext(
             null,
-            kerberizedHdfs.getHdfsKeytab(),
-            kerberizedHdfs.getHdfsPrincipal(),
+            kerberizedHadoop.getHdfsKeytab(),
+            kerberizedHadoop.getHdfsPrincipal(),
             100)) {
       fail();
     } catch (Exception e) {
diff --git a/common/src/test/java/org/apache/uniffle/common/security/SecurityContextFactoryTest.java b/common/src/test/java/org/apache/uniffle/common/security/SecurityContextFactoryTest.java
index 7fcde9ed..0f7e92d5 100644
--- a/common/src/test/java/org/apache/uniffle/common/security/SecurityContextFactoryTest.java
+++ b/common/src/test/java/org/apache/uniffle/common/security/SecurityContextFactoryTest.java
@@ -21,17 +21,17 @@ import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Test;
 
-import org.apache.uniffle.common.KerberizedHdfsBase;
+import org.apache.uniffle.common.KerberizedHadoopBase;
 
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.fail;
 
-public class SecurityContextFactoryTest extends KerberizedHdfsBase {
+public class SecurityContextFactoryTest extends KerberizedHadoopBase {
 
   @BeforeAll
   public static void beforeAll() throws Exception {
     testRunner = SecurityContextFactoryTest.class;
-    KerberizedHdfsBase.init();
+    KerberizedHadoopBase.init();
   }
 
   @AfterEach
@@ -70,8 +70,8 @@ public class SecurityContextFactoryTest extends KerberizedHdfsBase {
     // case2: create the correct hadoop security context
     final SecurityConfig correctConfig = SecurityConfig
         .newBuilder()
-        .keytabFilePath(kerberizedHdfs.getHdfsKeytab())
-        .principal(kerberizedHdfs.getHdfsPrincipal())
+        .keytabFilePath(kerberizedHadoop.getHdfsKeytab())
+        .principal(kerberizedHadoop.getHdfsPrincipal())
         .reloginIntervalSec(60)
         .build();
     SecurityContextFactory.get().init(correctConfig);
diff --git a/coordinator/src/main/java/org/apache/uniffle/coordinator/ApplicationManager.java b/coordinator/src/main/java/org/apache/uniffle/coordinator/ApplicationManager.java
index 4aea5e12..84885914 100644
--- a/coordinator/src/main/java/org/apache/uniffle/coordinator/ApplicationManager.java
+++ b/coordinator/src/main/java/org/apache/uniffle/coordinator/ApplicationManager.java
@@ -223,7 +223,7 @@ public class ApplicationManager implements Closeable {
   public synchronized void removePathFromCounter(String storagePath) {
     RankValue atomic = remoteStoragePathRankValue.get(storagePath);
     // The time spent reading and writing cannot be used to determine whether the current path is still used by apps.
-    // Therefore, determine whether the HDFS path is still used by the number of apps
+    // Therefore, determine whether the Hadoop FS path is still used by the number of apps
     if (atomic != null && atomic.getAppNum().get() == 0) {
       remoteStoragePathRankValue.remove(storagePath);
     }
diff --git a/coordinator/src/main/java/org/apache/uniffle/coordinator/CoordinatorConf.java b/coordinator/src/main/java/org/apache/uniffle/coordinator/CoordinatorConf.java
index 9dfe7d33..63cb5840 100644
--- a/coordinator/src/main/java/org/apache/uniffle/coordinator/CoordinatorConf.java
+++ b/coordinator/src/main/java/org/apache/uniffle/coordinator/CoordinatorConf.java
@@ -143,7 +143,7 @@ public class CoordinatorConf extends RssBaseConf {
       .key("rss.coordinator.remote.storage.schedule.time")
       .longType()
       .defaultValue(60 * 1000L)
-      .withDescription("The time of scheduling the read and write time of the paths to obtain different HDFS");
+      .withDescription("The time of scheduling the read and write time of the paths to obtain different Hadoop FS");
   public static final ConfigOption<Integer> COORDINATOR_REMOTE_STORAGE_SCHEDULE_FILE_SIZE = ConfigOptions
       .key("rss.coordinator.remote.storage.schedule.file.size")
       .intType()
@@ -153,7 +153,7 @@ public class CoordinatorConf extends RssBaseConf {
       .key("rss.coordinator.remote.storage.schedule.access.times")
       .intType()
       .defaultValue(3)
-      .withDescription("The number of times to read and write HDFS files");
+      .withDescription("The number of times to read and write Hadoop FS files");
   public static final ConfigOption<AbstractAssignmentStrategy.HostAssignmentStrategyName>
       COORDINATOR_ASSIGNMENT_HOST_STRATEGY =
       ConfigOptions.key("rss.coordinator.assignment.host.strategy")
diff --git a/coordinator/src/main/java/org/apache/uniffle/coordinator/strategy/storage/AbstractSelectStorageStrategy.java b/coordinator/src/main/java/org/apache/uniffle/coordinator/strategy/storage/AbstractSelectStorageStrategy.java
index 0524cc76..ac0f8ece 100644
--- a/coordinator/src/main/java/org/apache/uniffle/coordinator/strategy/storage/AbstractSelectStorageStrategy.java
+++ b/coordinator/src/main/java/org/apache/uniffle/coordinator/strategy/storage/AbstractSelectStorageStrategy.java
@@ -57,7 +57,7 @@ public abstract class AbstractSelectStorageStrategy implements SelectStorageStra
   protected final Map<String, RankValue> remoteStoragePathRankValue;
   protected final int fileSize;
   private final String coordinatorId;
-  private final Configuration hdfsConf;
+  private final Configuration hadoopConf;
   private final CoordinatorConf conf;
   protected List<Map.Entry<String, RankValue>> uris;
 
@@ -65,14 +65,14 @@ public abstract class AbstractSelectStorageStrategy implements SelectStorageStra
       Map<String, RankValue> remoteStoragePathRankValue,
       CoordinatorConf conf) {
     this.remoteStoragePathRankValue = remoteStoragePathRankValue;
-    this.hdfsConf = new Configuration();
+    this.hadoopConf = new Configuration();
     this.fileSize = conf.getInteger(CoordinatorConf.COORDINATOR_REMOTE_STORAGE_SCHEDULE_FILE_SIZE);
     this.coordinatorId = conf.getString(CoordinatorUtils.COORDINATOR_ID, UUID.randomUUID().toString());
     this.conf = conf;
   }
 
-  public void readAndWriteHdfsStorage(FileSystem fs, Path testPath,
-      String uri, RankValue rankValue) throws IOException {
+  public void readAndWriteHadoopStorage(FileSystem fs, Path testPath,
+                                        String uri, RankValue rankValue) throws IOException {
     byte[] data = RandomUtils.nextBytes(fileSize);
     try (FSDataOutputStream fos = fs.create(testPath)) {
       fos.write(data);
@@ -112,9 +112,9 @@ public abstract class AbstractSelectStorageStrategy implements SelectStorageStra
           rankValue.setHealthy(new AtomicBoolean(true));
           long startWriteTime = System.currentTimeMillis();
           try {
-            FileSystem fs = HadoopFilesystemProvider.getFilesystem(remotePath, hdfsConf);
+            FileSystem fs = HadoopFilesystemProvider.getFilesystem(remotePath, hadoopConf);
             for (int j = 0; j < readAndWriteTimes(conf); j++) {
-              readAndWriteHdfsStorage(fs, testPath, uri.getKey(), rankValue);
+              readAndWriteHadoopStorage(fs, testPath, uri.getKey(), rankValue);
             }
           } catch (Exception e) {
             LOG.error("Storage read and write error, we will not use this remote path {}.", uri, e);
@@ -138,7 +138,7 @@ public abstract class AbstractSelectStorageStrategy implements SelectStorageStra
       String path, String testPath, long startWrite) {
     RankValue rankValue = remoteStoragePathRankValue.get(path);
     try {
-      FileSystem fs = HadoopFilesystemProvider.getFilesystem(new Path(path), hdfsConf);
+      FileSystem fs = HadoopFilesystemProvider.getFilesystem(new Path(path), hadoopConf);
       fs.delete(new Path(testPath), true);
       if (rankValue.getHealthy().get()) {
         rankValue.setCostTime(new AtomicLong(System.currentTimeMillis() - startWrite));
diff --git a/docs/client_guide.md b/docs/client_guide.md
index 786de504..7864a2cb 100644
--- a/docs/client_guide.md
+++ b/docs/client_guide.md
@@ -105,23 +105,23 @@ The important configuration of client is listed as following.
 ### Common Setting
 These configurations are shared by all types of clients.
 
-|Property Name|Default|Description|
-|---|---|---|
-|<client_type>.rss.coordinator.quorum|-|Coordinator quorum|
-|<client_type>.rss.writer.buffer.size|3m|Buffer size for single partition data|
-|<client_type>.rss.storage.type|-|Supports MEMORY_LOCALFILE, MEMORY_HDFS, MEMORY_LOCALFILE_HDFS|
-|<client_type>.rss.client.read.buffer.size|14m|The max data size read from storage|
-|<client_type>.rss.client.send.threadPool.size|5|The thread size for send shuffle data to shuffle server|
-|<client_type>.rss.client.assignment.tags|-|The comma-separated list of tags for deciding assignment shuffle servers. Notice that the SHUFFLE_SERVER_VERSION will always as the assignment tag whether this conf is set or not|
-|<client_type>.rss.client.data.commit.pool.size|The number of assigned shuffle servers|The thread size for sending commit to shuffle servers|
-|<client_type>.rss.client.assignment.shuffle.nodes.max|-1|The number of required assignment shuffle servers. If it is less than 0 or equals to 0 or greater than the coordinator's config of "rss.coordinator.shuffle.nodes.max", it will use the size of "rss.coordinator.shuffle.nodes.max" default|
-|<client_type>.rss.client.io.compression.codec|lz4|The compression codec is used to compress the shuffle data. Default codec is `lz4`. Other options are`ZSTD` and `SNAPPY`.|
-|<client_type>.rss.client.io.compression.zstd.level|3|The zstd compression level, the default level is 3|
-|<client_type>.rss.client.shuffle.data.distribution.type|NORMAL|The type of partition shuffle data distribution, including normal and local_order. The default value is normal. Now this config is only valid in Spark3.x|
-|<client_type>.rss.estimate.task.concurrency.dynamic.factor|1.0|Between 0 and 1, used to estimate task concurrency, when the client is spark, it represents how likely is this part of the resource between spark.dynamicAllocation.minExecutors and spark.dynamicAllocation.maxExecutors to be allocated, when the client is mr, it represents how likely the resources of map and reduce are satisfied. Effective when <client_type>.rss.estimate.server.assignment.enabled=true or Coordinator's rss.coor [...]
-|<client_type>.rss.estimate.server.assignment.enabled|false|Support mr and spark, whether to enable estimation of the number of ShuffleServers that need to be allocated based on the number of concurrent tasks.|
-|<client_type>.rss.estimate.task.concurrency.per.server|80|It takes effect when rss.estimate.server.assignment.enabled=true, how many tasks are concurrently assigned to a ShuffleServer.|
-|<client_type>.rss.client.max.concurrency.of.per-partition.write|-|The maximum number of files that can be written concurrently to a single partition is determined. This value will only be respected by the remote shuffle server if it is greater than 0.|
+|Property Name|Default| Description                                                                                                                                                                                                                                                                                                                                                                                                                                                                          [...]
+|---|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+|<client_type>.rss.coordinator.quorum|-| Coordinator quorum                                                                                                                                                                                                                                                                                                                                                                                                                                                  [...]
+|<client_type>.rss.writer.buffer.size|3m| Buffer size for single partition data                                                                                                                                                                                                                                                                                                                                                                                                                              [...]
+|<client_type>.rss.storage.type|-| Supports MEMORY_LOCALFILE, MEMORY_HDFS, MEMORY_LOCALFILE_HDFS                                                                                                                                                                                                                                                                                                                                                                                                             [...]
+|<client_type>.rss.client.read.buffer.size|14m| The max data size read from storage                                                                                                                                                                                                                                                                                                                                                                                                                          [...]
+|<client_type>.rss.client.send.threadPool.size|5| The thread size for send shuffle data to shuffle server                                                                                                                                                                                                                                                                                                                                                                                                    [...]
+|<client_type>.rss.client.assignment.tags|-| The comma-separated list of tags for deciding assignment shuffle servers. Notice that the SHUFFLE_SERVER_VERSION will always as the assignment tag whether this conf is set or not                                                                                                                                                                                                                                                                              [...]
+|<client_type>.rss.client.data.commit.pool.size|The number of assigned shuffle servers| The thread size for sending commit to shuffle servers                                                                                                                                                                                                                                                                                                                                                                [...]
+|<client_type>.rss.client.assignment.shuffle.nodes.max|-1| The number of required assignment shuffle servers. If it is less than 0 or equals to 0 or greater than the coordinator's config of "rss.coordinator.shuffle.nodes.max", it will use the size of "rss.coordinator.shuffle.nodes.max" default                                                                                                                                                                                                       [...]
+|<client_type>.rss.client.io.compression.codec|lz4| The compression codec is used to compress the shuffle data. Default codec is `lz4`. Other options are`ZSTD` and `SNAPPY`.                                                                                                                                                                                                                                                                                                                                [...]
+|<client_type>.rss.client.io.compression.zstd.level|3| The zstd compression level, the default level is 3                                                                                                                                                                                                                                                                                                                                                                                                    [...]
+|<client_type>.rss.client.shuffle.data.distribution.type|NORMAL| The type of partition shuffle data distribution, including normal and local_order. The default value is normal. Now this config is only valid in Spark3.x                                                                                                                                                                                                                                                                                   [...]
+|<client_type>.rss.estimate.task.concurrency.dynamic.factor|1.0| Between 0 and 1, used to estimate task concurrency, when the client is spark, it represents how likely is this part of the resource between spark.dynamicAllocation.minExecutors and spark.dynamicAllocation.maxExecutors to be allocated, when the client is mr, it represents how likely the resources of map and reduce are satisfied. Effective when <client_type>.rss.estimate.server.assignment.enabled=true or Coordinator's rss.coo [...]
+|<client_type>.rss.estimate.server.assignment.enabled|false| Support mr and spark, whether to enable estimation of the number of ShuffleServers that need to be allocated based on the number of concurrent tasks.                                                                                                                                                                                                                                                                                           [...]
+|<client_type>.rss.estimate.task.concurrency.per.server|80| It takes effect when rss.estimate.server.assignment.enabled=true, how many tasks are concurrently assigned to a ShuffleServer.                                                                                                                                                                                                                                                                                                                   [...]
+|<client_type>.rss.client.max.concurrency.of.per-partition.write|-| The maximum number of files that can be written concurrently to a single partition is determined. This value will only be respected by the remote shuffle server if it is greater than 0.                                                                                                                                                                                                                                                [...]
 Notice:
 
 1. `<client_type>` should be `spark` or `mapreduce`
@@ -210,12 +210,12 @@ The important configuration is listed as following.
 In cloud environment, VM may have very limited disk space and performance.
 This experimental feature allows reduce tasks to spill data to remote storage (e.g., hdfs)
 
-|Property Name|Default|Description|
-|---|---|---|
-|mapreduce.rss.reduce.remote.spill.enable|false|Whether to use remote spill|
-|mapreduce.rss.reduce.remote.spill.attempt.inc|1|Increase reduce attempts as hdfs is easier to crash than disk|
-|mapreduce.rss.reduce.remote.spill.replication|1|The replication number to spill data to hdfs|
-|mapreduce.rss.reduce.remote.spill.retries|5|The retry number to spill data to hdfs|
+|Property Name|Default| Description                                                            |
+|---|---|------------------------------------------------------------------------|
+|mapreduce.rss.reduce.remote.spill.enable|false| Whether to use remote spill                                            |
+|mapreduce.rss.reduce.remote.spill.attempt.inc|1| Increase reduce attempts as Hadoop FS may be easier to crash than disk |
+|mapreduce.rss.reduce.remote.spill.replication|1| The replication number to spill data to Hadoop FS                      |
+|mapreduce.rss.reduce.remote.spill.retries|5| The retry number to spill data to Hadoop FS                            |
 
-Notice: this feature requires the MEMORY_LOCAL_HDFS mode.
+Notice: this feature requires the MEMORY_LOCAL_HADOOP mode.
  
\ No newline at end of file
diff --git a/docs/coordinator_guide.md b/docs/coordinator_guide.md
index ff8662d9..acf0ca00 100644
--- a/docs/coordinator_guide.md
+++ b/docs/coordinator_guide.md
@@ -77,33 +77,33 @@ This document will introduce how to deploy Uniffle coordinators.
 ## Configuration
 
 ### Common settings
-|Property Name|Default|	Description|
-|---|---|---|
-|rss.coordinator.server.heartbeat.timeout|30000|Timeout if can't get heartbeat from shuffle server|
-|rss.coordinator.server.periodic.output.interval.times|30|The periodic interval times of output alive nodes. The interval sec can be calculated by (rss.coordinator.server.heartbeat.timeout/3 * rss.coordinator.server.periodic.output.interval.times). Default output interval is 5min.|
-|rss.coordinator.assignment.strategy|PARTITION_BALANCE|Strategy for assigning shuffle server, PARTITION_BALANCE should be used for workload balance|
-|rss.coordinator.app.expired|60000|Application expired time (ms), the heartbeat interval should be less than it|
-|rss.coordinator.shuffle.nodes.max|9|The max number of shuffle server when do the assignment|
-|rss.coordinator.dynamicClientConf.path|-|The path of configuration file which have default conf for rss client|
-|rss.coordinator.exclude.nodes.file.path|-|The path of configuration file which have exclude nodes|
-|rss.coordinator.exclude.nodes.check.interval.ms|60000|Update interval (ms) for exclude nodes|
-|rss.coordinator.access.checkers|org.apache.uniffle.coordinator.access.checker.AccessClusterLoadChecker|The access checkers will be used when the spark client use the DelegationShuffleManager, which will decide whether to use rss according to the result of the specified access checkers|
-|rss.coordinator.access.loadChecker.memory.percentage|15.0|The minimal percentage of available memory percentage of a server|
-|rss.coordinator.dynamicClientConf.enabled|false|whether to enable dynamic client conf, which will be fetched by spark client|
-|rss.coordinator.dynamicClientConf.path|-|The dynamic client conf of this cluster and can be stored in HDFS or local|
-|rss.coordinator.dynamicClientConf.updateIntervalSec|120|The dynamic client conf update interval in seconds|
-|rss.coordinator.remote.storage.cluster.conf|-|Remote Storage Cluster related conf with format $clusterId,$key=$value, separated by ';'|
-|rss.rpc.server.port|-|RPC port for coordinator|
-|rss.jetty.http.port|-|Http port for coordinator|
-|rss.coordinator.remote.storage.select.strategy|APP_BALANCE|Strategy for selecting the remote path|
-|rss.coordinator.remote.storage.io.sample.schedule.time|60000|The time of scheduling the read and write time of the paths to obtain different HDFS|
-|rss.coordinator.remote.storage.io.sample.file.size|204800000|The size of the file that the scheduled thread reads and writes|
-|rss.coordinator.remote.storage.io.sample.access.times|3|The number of times to read and write HDFS files|
-|rss.coordinator.startup-silent-period.enabled|false|Enable the startup-silent-period to reject the assignment requests for avoiding partial assignments. To avoid service interruption, this mechanism is disabled by default. Especially it's recommended to use in coordinator HA mode when restarting single coordinator.|
-|rss.coordinator.startup-silent-period.duration|20000|The waiting duration(ms) when conf of rss.coordinator.startup-silent-period.enabled is enabled.|
-|rss.coordinator.select.partition.strategy|ROUND|There are two strategies for selecting partitions: ROUND and CONTINUOUS. ROUND will poll to allocate partitions to ShuffleServer, and CONTINUOUS will try to allocate consecutive partitions to ShuffleServer, this feature can improve performance in AQE scenarios.|
-|rss.metrics.reporter.class|-|The class of metrics reporter.|
-|rss.reconfigure.interval.sec|5|Reconfigure check interval.|
+|Property Name|Default| 	Description                                                                                                                                                                                                                                                             |
+|---|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+|rss.coordinator.server.heartbeat.timeout|30000| Timeout if can't get heartbeat from shuffle server                                                                                                                                                                                                                       |
+|rss.coordinator.server.periodic.output.interval.times|30| The periodic interval times of output alive nodes. The interval sec can be calculated by (rss.coordinator.server.heartbeat.timeout/3 * rss.coordinator.server.periodic.output.interval.times). Default output interval is 5min.                                          |
+|rss.coordinator.assignment.strategy|PARTITION_BALANCE| Strategy for assigning shuffle server, PARTITION_BALANCE should be used for workload balance                                                                                                                                                                             |
+|rss.coordinator.app.expired|60000| Application expired time (ms), the heartbeat interval should be less than it                                                                                                                                                                                             |
+|rss.coordinator.shuffle.nodes.max|9| The max number of shuffle server when do the assignment                                                                                                                                                                                                                  |
+|rss.coordinator.dynamicClientConf.path|-| The path of configuration file which have default conf for rss client                                                                                                                                                                                                    |
+|rss.coordinator.exclude.nodes.file.path|-| The path of configuration file which have exclude nodes                                                                                                                                                                                                                  |
+|rss.coordinator.exclude.nodes.check.interval.ms|60000| Update interval (ms) for exclude nodes                                                                                                                                                                                                                                   |
+|rss.coordinator.access.checkers|org.apache.uniffle.coordinator.access.checker.AccessClusterLoadChecker| The access checkers will be used when the spark client use the DelegationShuffleManager, which will decide whether to use rss according to the result of the specified access checkers                                                                                   |
+|rss.coordinator.access.loadChecker.memory.percentage|15.0| The minimal percentage of available memory percentage of a server                                                                                                                                                                                                        |
+|rss.coordinator.dynamicClientConf.enabled|false| whether to enable dynamic client conf, which will be fetched by spark client                                                                                                                                                                                             |
+|rss.coordinator.dynamicClientConf.path|-| The dynamic client conf of this cluster and can be stored in HADOOP FS or local                                                                                                                                                                                          |
+|rss.coordinator.dynamicClientConf.updateIntervalSec|120| The dynamic client conf update interval in seconds                                                                                                                                                                                                                       |
+|rss.coordinator.remote.storage.cluster.conf|-| Remote Storage Cluster related conf with format $clusterId,$key=$value, separated by ';'                                                                                                                                                                                 |
+|rss.rpc.server.port|-| RPC port for coordinator                                                                                                                                                                                                                                                 |
+|rss.jetty.http.port|-| Http port for coordinator                                                                                                                                                                                                                                                |
+|rss.coordinator.remote.storage.select.strategy|APP_BALANCE| Strategy for selecting the remote path                                                                                                                                                                                                                                   |
+|rss.coordinator.remote.storage.io.sample.schedule.time|60000| The time of scheduling the read and write time of the paths to obtain different HADOOP FS                                                                                                                                                                                |
+|rss.coordinator.remote.storage.io.sample.file.size|204800000| The size of the file that the scheduled thread reads and writes                                                                                                                                                                                                          |
+|rss.coordinator.remote.storage.io.sample.access.times|3| The number of times to read and write HADOOP FS files                                                                                                                                                                                                                    |
+|rss.coordinator.startup-silent-period.enabled|false| Enable the startup-silent-period to reject the assignment requests for avoiding partial assignments. To avoid service interruption, this mechanism is disabled by default. Especially it's recommended to use in coordinator HA mode when restarting single coordinator. |
+|rss.coordinator.startup-silent-period.duration|20000| The waiting duration(ms) when conf of rss.coordinator.startup-silent-period.enabled is enabled.                                                                                                                                                                          |
+|rss.coordinator.select.partition.strategy|ROUND| There are two strategies for selecting partitions: ROUND and CONTINUOUS. ROUND will poll to allocate partitions to ShuffleServer, and CONTINUOUS will try to allocate consecutive partitions to ShuffleServer, this feature can improve performance in AQE scenarios.    |
+|rss.metrics.reporter.class|-| The class of metrics reporter.                                                                                                                                                                                                                                           |
+|rss.reconfigure.interval.sec|5| Reconfigure check interval.                                                                                                                                                                                                                                              |
 
 ### AccessClusterLoadChecker settings
 |Property Name|Default|	Description|
@@ -113,10 +113,10 @@ This document will introduce how to deploy Uniffle coordinators.
 ### AccessCandidatesChecker settings
 AccessCandidatesChecker is one of the built-in access checker, which will allow user to define the candidates list to use rss.  
 
-|Property Name|Default|	Description|
-|---|---|---|
-|rss.coordinator.access.candidates.updateIntervalSec|120|Accessed candidates update interval in seconds, which is only valid when AccessCandidatesChecker is enabled.|
-|rss.coordinator.access.candidates.path|-|Accessed candidates file path, the file can be stored on HDFS|
+|Property Name|Default| 	Description                                                                                                 |
+|---|---|--------------------------------------------------------------------------------------------------------------|
+|rss.coordinator.access.candidates.updateIntervalSec|120| Accessed candidates update interval in seconds, which is only valid when AccessCandidatesChecker is enabled. |
+|rss.coordinator.access.candidates.path|-| Accessed candidates file path, the file can be stored on HADOOP FS                                           |
 
 ### AccessQuotaChecker settings
 AccessQuotaChecker is a checker when the number of concurrent tasks submitted by users increases sharply, some important apps may be affected. Therefore, we restrict users to submit to the uniffle cluster, and rejected apps will be submitted to ESS.
diff --git a/docs/server_guide.md b/docs/server_guide.md
index 5804680f..4f801143 100644
--- a/docs/server_guide.md
+++ b/docs/server_guide.md
@@ -64,34 +64,34 @@ This document will introduce how to deploy Uniffle shuffle servers.
    ```
 
 ## Configuration
-| Property Name                                         | Default | Description                                                                                                                                                                                                                                                                                                                                                                              |
-|-------------------------------------------------------|---------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| rss.coordinator.quorum                                | -       | Coordinator quorum                                                                                                                                                                                                                                                                                                                                                                       |
-| rss.rpc.server.port                                   | -       | RPC port for Shuffle server, if set zero, grpc server start on random port.                                                                                                                                                                                                                                                                                                              |
-| rss.jetty.http.port                                   | -       | Http port for Shuffle server                                                                                                                                                                                                                                                                                                                                                             |
-| rss.server.netty.port                                 | -1      | Netty port for Shuffle server, if set zero, netty server start on random port.                                                                                                                                                                                                                                                                                                           |
-| rss.server.buffer.capacity                            | -1      | Max memory of buffer manager for shuffle server. If negative, JVM heap size * buffer.ratio is used                                                                                                                                                                                                                                                                                       |
-| rss.server.buffer.capacity.ratio                      | 0.8     | when `rss.server.buffer.capacity`=-1, then the buffer capacity is JVM heap size * ratio                                                                                                                                                                                                                                                                                                  |
-| rss.server.memory.shuffle.highWaterMark.percentage    | 75.0    | Threshold of spill data to storage, percentage of rss.server.buffer.capacity                                                                                                                                                                                                                                                                                                             |
-| rss.server.memory.shuffle.lowWaterMark.percentage     | 25.0    | Threshold of keep data in memory, percentage of rss.server.buffer.capacity                                                                                                                                                                                                                                                                                                               |
-| rss.server.read.buffer.capacity                       | -1      | Max size of buffer for reading data. If negative, JVM heap size * read.buffer.ratio is used                                                                                                                                                                                                                                                                                              |
-| rss.server.read.buffer.capacity.ratio                 | 0.4     | when `rss.server.read.buffer.capacity`=-1, then read buffer capacity is JVM heap size * ratio                                                                                                                                                                                                                                                                                            |
-| rss.server.heartbeat.interval                         | 10000   | Heartbeat interval to Coordinator (ms)                                                                                                                                                                                                                                                                                                                                                   |
-| rss.server.flush.threadPool.size                      | 10      | Thread pool for flush data to file                                                                                                                                                                                                                                                                                                                                                       |
-| rss.server.commit.timeout                             | 600000  | Timeout when commit shuffle data (ms)                                                                                                                                                                                                                                                                                                                                                    |
-| rss.storage.type                                      | -       | Supports MEMORY_LOCALFILE, MEMORY_HDFS, MEMORY_LOCALFILE_HDFS                                                                                                                                                                                                                                                                                                                            |
-| rss.server.flush.cold.storage.threshold.size          | 64M     | The threshold of data size for LOACALFILE and HDFS if MEMORY_LOCALFILE_HDFS is used                                                                                                                                                                                                                                                                                                      |
-| rss.server.tags                                       | -       | The comma-separated list of tags to indicate the shuffle server's attributes. It will be used as the assignment basis for the coordinator                                                                                                                                                                                                                                                |
-| rss.server.single.buffer.flush.enabled                | false   | Whether single buffer flush when size exceeded rss.server.single.buffer.flush.threshold                                                                                                                                                                                                                                                                                                  |
-| rss.server.single.buffer.flush.threshold              | 64M     | The threshold of single shuffle buffer flush                                                                                                                                                                                                                                                                                                                                             |
-| rss.server.disk.capacity                              | -1      | Disk capacity that shuffle server can use. If negative, it will use disk whole space * ratio                                                                                                                                                                                                                                                                                             |
-| rss.server.disk.capacity.ratio                        | 0.9     | When `rss.server.disk.capacity` is negative, disk whole space * ratio is used                                                                                                                                                                                                                                                                                                            |
-| rss.server.multistorage.fallback.strategy.class       | -       | The fallback strategy for `MEMORY_LOCALFILE_HDFS`. Support `org.apache.uniffle.server.storage.RotateStorageManagerFallbackStrategy`,`org.apache.uniffle.server.storage.LocalStorageManagerFallbackStrategy` and `org.apache.uniffle.server.storage.HdfsStorageManagerFallbackStrategy`. If not set, `org.apache.uniffle.server.storage.HdfsStorageManagerFallbackStrategy` will be used. |
-| rss.server.leak.shuffledata.check.interval            | 3600000 | The interval of leak shuffle data check (ms)                                                                                                                                                                                                                                                                                                                                             |
-| rss.server.max.concurrency.of.per-partition.write | 1       | The max concurrency of single partition writer, the data partition file number is equal to this value. Default value is 1. This config could improve the writing speed, especially for huge partition.                                                                                                                                                                                   |
-| rss.server.max.concurrency.limit.of.per-partition.write | - | The limit for max concurrency per-partition write specified by client, this won't be enabled by default.                                                                                                                                                                                                                                                                                 |
-| rss.metrics.reporter.class                            | -       | The class of metrics reporter.                                                                                                                                                                                                                                                                                                                                                           |
-|rss.server.multistorage.manager.selector.class         | org.apache.uniffle.server.storage.multi.DefaultStorageManagerSelector | The manager selector strategy for `MEMORY_LOCALFILE_HDFS`. Default value is `DefaultStorageManagerSelector`, and another `HugePartitionSensitiveStorageManagerSelector` will flush only huge partition's data to cold storage.                                                                                             |
+| Property Name                                         | Default | Description                                                                                                                                                                                                                                                                                                                                                                                  |
+|-------------------------------------------------------|---------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| rss.coordinator.quorum                                | -       | Coordinator quorum                                                                                                                                                                                                                                                                                                                                                                           |
+| rss.rpc.server.port                                   | -       | RPC port for Shuffle server, if set zero, grpc server start on random port.                                                                                                                                                                                                                                                                                                                  |
+| rss.jetty.http.port                                   | -       | Http port for Shuffle server                                                                                                                                                                                                                                                                                                                                                                 |
+| rss.server.netty.port                                 | -1      | Netty port for Shuffle server, if set zero, netty server start on random port.                                                                                                                                                                                                                                                                                                               |
+| rss.server.buffer.capacity                            | -1      | Max memory of buffer manager for shuffle server. If negative, JVM heap size * buffer.ratio is used                                                                                                                                                                                                                                                                                           |
+| rss.server.buffer.capacity.ratio                      | 0.8     | when `rss.server.buffer.capacity`=-1, then the buffer capacity is JVM heap size * ratio                                                                                                                                                                                                                                                                                                      |
+| rss.server.memory.shuffle.highWaterMark.percentage    | 75.0    | Threshold of spill data to storage, percentage of rss.server.buffer.capacity                                                                                                                                                                                                                                                                                                                 |
+| rss.server.memory.shuffle.lowWaterMark.percentage     | 25.0    | Threshold of keep data in memory, percentage of rss.server.buffer.capacity                                                                                                                                                                                                                                                                                                                   |
+| rss.server.read.buffer.capacity                       | -1      | Max size of buffer for reading data. If negative, JVM heap size * read.buffer.ratio is used                                                                                                                                                                                                                                                                                                  |
+| rss.server.read.buffer.capacity.ratio                 | 0.4     | when `rss.server.read.buffer.capacity`=-1, then read buffer capacity is JVM heap size * ratio                                                                                                                                                                                                                                                                                                |
+| rss.server.heartbeat.interval                         | 10000   | Heartbeat interval to Coordinator (ms)                                                                                                                                                                                                                                                                                                                                                       |
+| rss.server.flush.threadPool.size                      | 10      | Thread pool for flush data to file                                                                                                                                                                                                                                                                                                                                                           |
+| rss.server.commit.timeout                             | 600000  | Timeout when commit shuffle data (ms)                                                                                                                                                                                                                                                                                                                                                        |
+| rss.storage.type                                      | -       | Supports MEMORY_LOCALFILE, MEMORY_HDFS, MEMORY_LOCALFILE_HDFS                                                                                                                                                                                                                                                                                                                                |
+| rss.server.flush.cold.storage.threshold.size          | 64M     | The threshold of data size for LOACALFILE and HADOOP if MEMORY_LOCALFILE_HDFS is used                                                                                                                                                                                                                                                                                                        |
+| rss.server.tags                                       | -       | The comma-separated list of tags to indicate the shuffle server's attributes. It will be used as the assignment basis for the coordinator                                                                                                                                                                                                                                                    |
+| rss.server.single.buffer.flush.enabled                | false   | Whether single buffer flush when size exceeded rss.server.single.buffer.flush.threshold                                                                                                                                                                                                                                                                                                      |
+| rss.server.single.buffer.flush.threshold              | 64M     | The threshold of single shuffle buffer flush                                                                                                                                                                                                                                                                                                                                                 |
+| rss.server.disk.capacity                              | -1      | Disk capacity that shuffle server can use. If negative, it will use disk whole space * ratio                                                                                                                                                                                                                                                                                                 |
+| rss.server.disk.capacity.ratio                        | 0.9     | When `rss.server.disk.capacity` is negative, disk whole space * ratio is used                                                                                                                                                                                                                                                                                                                |
+| rss.server.multistorage.fallback.strategy.class       | -       | The fallback strategy for `MEMORY_LOCALFILE_HDFS`. Support `org.apache.uniffle.server.storage.RotateStorageManagerFallbackStrategy`,`org.apache.uniffle.server.storage.LocalStorageManagerFallbackStrategy` and `org.apache.uniffle.server.storage.HadoopStorageManagerFallbackStrategy`. If not set, `org.apache.uniffle.server.storage.HadoopStorageManagerFallbackStrategy` will be used. |
+| rss.server.leak.shuffledata.check.interval            | 3600000 | The interval of leak shuffle data check (ms)                                                                                                                                                                                                                                                                                                                                                 |
+| rss.server.max.concurrency.of.per-partition.write | 1       | The max concurrency of single partition writer, the data partition file number is equal to this value. Default value is 1. This config could improve the writing speed, especially for huge partition.                                                                                                                                                                                       |
+| rss.server.max.concurrency.limit.of.per-partition.write | - | The limit for max concurrency per-partition write specified by client, this won't be enabled by default.                                                                                                                                                                                                                                                                                     |
+| rss.metrics.reporter.class                            | -       | The class of metrics reporter.                                                                                                                                                                                                                                                                                                                                                               |
+|rss.server.multistorage.manager.selector.class         | org.apache.uniffle.server.storage.multi.DefaultStorageManagerSelector | The manager selector strategy for `MEMORY_LOCALFILE_HDFS`. Default value is `DefaultStorageManagerSelector`, and another `HugePartitionSensitiveStorageManagerSelector` will flush only huge partition's data to cold storage.                                                                                                                                             [...]
 
 ### Advanced Configurations
 |Property Name|Default| Description                                                                                                                                                                                 |
@@ -118,19 +118,19 @@ A huge partition is a common problem for Spark/MR and so on, caused by data skew
 #### Memory usage limit
 To do this, we introduce the extra configs
 
-|Property Name|Default|Description|
-|---|---|---|
-|rss.server.huge-partition.size.threshold|20g|Threshold of huge partition size, once exceeding threshold, memory usage limitation and huge partition buffer flushing will be triggered. This value depends on the capacity of per disk in shuffle server. For example, per disk capacity is 1TB, and the max size of huge partition in per disk is 5. So the total size of huge partition in local disk is 100g (10%),this is an acceptable config value. Once reaching this threshold, it will be better to [...]
-|rss.server.huge-partition.memory.limit.ratio|0.2|The memory usage limit ratio for huge partition, it will only triggered when partition's size exceeds the threshold of 'rss.server.huge-partition.size.threshold'. If the buffer capacity is 10g, this means the default memory usage for huge partition is 2g. Samely, this config value depends on max size of huge partitions on per shuffle server.|
+|Property Name|Default| Description                                                                                                                                                                                                                                                                                                                                                                                                                                                                          [...]
+|---|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- [...]
+|rss.server.huge-partition.size.threshold|20g| Threshold of huge partition size, once exceeding threshold, memory usage limitation and huge partition buffer flushing will be triggered. This value depends on the capacity of per disk in shuffle server. For example, per disk capacity is 1TB, and the max size of huge partition in per disk is 5. So the total size of huge partition in local disk is 100g (10%),this is an acceptable config value. Once reaching this threshold, it will be better t [...]
+|rss.server.huge-partition.memory.limit.ratio|0.2| The memory usage limit ratio for huge partition, it will only triggered when partition's size exceeds the threshold of 'rss.server.huge-partition.size.threshold'. If the buffer capacity is 10g, this means the default memory usage for huge partition is 2g. Samely, this config value depends on max size of huge partitions on per shuffle server.                                                                                                   [...]
 
 #### Data flush
 Once the huge partition threshold is reached, the partition is marked as a huge partition. And then single buffer flush is triggered (writing to persistent storage as soon as possible). By default, single buffer flush is only enabled by configuring `rss.server.single.buffer.flush.enabled`, but it's automatically valid for huge partition. 
 
-If you don't use HDFS, the huge partition may be flushed to local disk, which is dangerous if the partition size is larger than the free disk space. Therefore, it is recommended to use a mixed storage type, including HDFS or other distributed file systems.
+If you don't use HADOOP FS, the huge partition may be flushed to local disk, which is dangerous if the partition size is larger than the free disk space. Therefore, it is recommended to use a mixed storage type, including HDFS or other distributed file systems.
 
-For HDFS, the conf value of `rss.server.single.buffer.flush.threshold` should be greater than the value of `rss.server.flush.cold.storage.threshold.size`, which will flush data directly to HDFS. 
+For HADOOP FS, the conf value of `rss.server.single.buffer.flush.threshold` should be greater than the value of `rss.server.flush.cold.storage.threshold.size`, which will flush data directly to Hadoop FS. 
 
-Finally, to improve the speed of writing to HDFS for a single partition, the value of `rss.server.max.concurrency.of.per-partition.write` and `rss.server.flush.threadPool.size` could be increased to 10 or 20.
+Finally, to improve the speed of writing to HADOOP FS for a single partition, the value of `rss.server.max.concurrency.of.per-partition.write` and `rss.server.flush.threadPool.size` could be increased to 10 or 20.
 
 #### Example of server conf
 ```
diff --git a/integration-test/common/src/test/java/org/apache/uniffle/test/AccessCandidatesCheckerHdfsTest.java b/integration-test/common/src/test/java/org/apache/uniffle/test/AccessCandidatesCheckerHadoopTest.java
similarity index 96%
rename from integration-test/common/src/test/java/org/apache/uniffle/test/AccessCandidatesCheckerHdfsTest.java
rename to integration-test/common/src/test/java/org/apache/uniffle/test/AccessCandidatesCheckerHadoopTest.java
index 9c483192..4e10a3ae 100644
--- a/integration-test/common/src/test/java/org/apache/uniffle/test/AccessCandidatesCheckerHdfsTest.java
+++ b/integration-test/common/src/test/java/org/apache/uniffle/test/AccessCandidatesCheckerHadoopTest.java
@@ -35,7 +35,7 @@ import org.apache.uniffle.coordinator.CoordinatorConf;
 import org.apache.uniffle.coordinator.access.AccessInfo;
 import org.apache.uniffle.coordinator.access.checker.AccessCandidatesChecker;
 import org.apache.uniffle.coordinator.metric.CoordinatorMetrics;
-import org.apache.uniffle.storage.HdfsTestBase;
+import org.apache.uniffle.storage.HadoopTestBase;
 
 import static java.lang.Thread.sleep;
 import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -43,7 +43,7 @@ import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 
-public class AccessCandidatesCheckerHdfsTest extends HdfsTestBase {
+public class AccessCandidatesCheckerHadoopTest extends HadoopTestBase {
   @BeforeEach
   public void setUp() {
     CoordinatorMetrics.register();
@@ -57,7 +57,7 @@ public class AccessCandidatesCheckerHdfsTest extends HdfsTestBase {
   @Test
   public void test() throws Exception {
     String candidatesFile = HDFS_URI + "/test/access_checker_candidates";
-    createAndRunCases(HDFS_URI, candidatesFile, fs, HdfsTestBase.conf);
+    createAndRunCases(HDFS_URI, candidatesFile, fs, HadoopTestBase.conf);
   }
 
   public static void createAndRunCases(
diff --git a/integration-test/common/src/test/java/org/apache/uniffle/test/AccessCandidatesCheckerKerberizedHdfsTest.java b/integration-test/common/src/test/java/org/apache/uniffle/test/AccessCandidatesCheckerKerberizedHadoopTest.java
similarity index 72%
rename from integration-test/common/src/test/java/org/apache/uniffle/test/AccessCandidatesCheckerKerberizedHdfsTest.java
rename to integration-test/common/src/test/java/org/apache/uniffle/test/AccessCandidatesCheckerKerberizedHadoopTest.java
index d997e8c4..af6ce5b5 100644
--- a/integration-test/common/src/test/java/org/apache/uniffle/test/AccessCandidatesCheckerKerberizedHdfsTest.java
+++ b/integration-test/common/src/test/java/org/apache/uniffle/test/AccessCandidatesCheckerKerberizedHadoopTest.java
@@ -22,15 +22,15 @@ import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
 
-import org.apache.uniffle.common.KerberizedHdfsBase;
+import org.apache.uniffle.common.KerberizedHadoopBase;
 import org.apache.uniffle.coordinator.metric.CoordinatorMetrics;
 
-public class AccessCandidatesCheckerKerberizedHdfsTest extends KerberizedHdfsBase {
+public class AccessCandidatesCheckerKerberizedHadoopTest extends KerberizedHadoopBase {
 
   @BeforeAll
   public static void beforeAll() throws Exception {
-    testRunner = AccessCandidatesCheckerKerberizedHdfsTest.class;
-    KerberizedHdfsBase.init();
+    testRunner = AccessCandidatesCheckerKerberizedHadoopTest.class;
+    KerberizedHadoopBase.init();
   }
 
   @BeforeEach
@@ -47,12 +47,12 @@ public class AccessCandidatesCheckerKerberizedHdfsTest extends KerberizedHdfsBas
 
   @Test
   public void test() throws Exception {
-    String candidatesFile =  kerberizedHdfs.getSchemeAndAuthorityPrefix() + "/test/access_checker_candidates";
-    AccessCandidatesCheckerHdfsTest.createAndRunCases(
-        kerberizedHdfs.getSchemeAndAuthorityPrefix(),
+    String candidatesFile =  kerberizedHadoop.getSchemeAndAuthorityPrefix() + "/test/access_checker_candidates";
+    AccessCandidatesCheckerHadoopTest.createAndRunCases(
+        kerberizedHadoop.getSchemeAndAuthorityPrefix(),
         candidatesFile,
-        kerberizedHdfs.getFileSystem(),
-        kerberizedHdfs.getConf()
+        kerberizedHadoop.getFileSystem(),
+        kerberizedHadoop.getConf()
     );
   }
 }
diff --git a/integration-test/common/src/test/java/org/apache/uniffle/test/ClientConfManagerHdfsTest.java b/integration-test/common/src/test/java/org/apache/uniffle/test/ClientConfManagerHadoopTest.java
similarity index 96%
rename from integration-test/common/src/test/java/org/apache/uniffle/test/ClientConfManagerHdfsTest.java
rename to integration-test/common/src/test/java/org/apache/uniffle/test/ClientConfManagerHadoopTest.java
index 3354f22c..f9831a25 100644
--- a/integration-test/common/src/test/java/org/apache/uniffle/test/ClientConfManagerHdfsTest.java
+++ b/integration-test/common/src/test/java/org/apache/uniffle/test/ClientConfManagerHadoopTest.java
@@ -30,7 +30,7 @@ import org.junit.jupiter.api.Test;
 import org.apache.uniffle.coordinator.ApplicationManager;
 import org.apache.uniffle.coordinator.ClientConfManager;
 import org.apache.uniffle.coordinator.CoordinatorConf;
-import org.apache.uniffle.storage.HdfsTestBase;
+import org.apache.uniffle.storage.HadoopTestBase;
 
 import static java.lang.Thread.sleep;
 import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -38,12 +38,12 @@ import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 
-public class ClientConfManagerHdfsTest extends HdfsTestBase {
+public class ClientConfManagerHadoopTest extends HadoopTestBase {
 
   @Test
   public void test() throws Exception {
     String cfgFile = HDFS_URI + "/test/client_conf";
-    createAndRunClientConfManagerCases(HDFS_URI, cfgFile, fs, HdfsTestBase.conf);
+    createAndRunClientConfManagerCases(HDFS_URI, cfgFile, fs, HadoopTestBase.conf);
   }
 
   public static void createAndRunClientConfManagerCases(
diff --git a/integration-test/common/src/test/java/org/apache/uniffle/test/ClientConfManagerKerberlizedHdfsTest.java b/integration-test/common/src/test/java/org/apache/uniffle/test/ClientConfManagerKerberlizedHadoopTest.java
similarity index 63%
rename from integration-test/common/src/test/java/org/apache/uniffle/test/ClientConfManagerKerberlizedHdfsTest.java
rename to integration-test/common/src/test/java/org/apache/uniffle/test/ClientConfManagerKerberlizedHadoopTest.java
index eff88b8a..53ebf6b8 100644
--- a/integration-test/common/src/test/java/org/apache/uniffle/test/ClientConfManagerKerberlizedHdfsTest.java
+++ b/integration-test/common/src/test/java/org/apache/uniffle/test/ClientConfManagerKerberlizedHadoopTest.java
@@ -20,24 +20,24 @@ package org.apache.uniffle.test;
 import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Test;
 
-import org.apache.uniffle.common.KerberizedHdfsBase;
+import org.apache.uniffle.common.KerberizedHadoopBase;
 
-public class ClientConfManagerKerberlizedHdfsTest extends KerberizedHdfsBase {
+public class ClientConfManagerKerberlizedHadoopTest extends KerberizedHadoopBase {
 
   @BeforeAll
   public static void beforeAll() throws Exception {
-    testRunner = ClientConfManagerKerberlizedHdfsTest.class;
-    KerberizedHdfsBase.init();
+    testRunner = ClientConfManagerKerberlizedHadoopTest.class;
+    KerberizedHadoopBase.init();
   }
 
   @Test
-  public void testConfInHDFS() throws Exception {
-    String cfgFile = kerberizedHdfs.getSchemeAndAuthorityPrefix() + "/test/client_conf";
-    ClientConfManagerHdfsTest.createAndRunClientConfManagerCases(
-        kerberizedHdfs.getSchemeAndAuthorityPrefix(),
+  public void testConfInHadoop() throws Exception {
+    String cfgFile = kerberizedHadoop.getSchemeAndAuthorityPrefix() + "/test/client_conf";
+    ClientConfManagerHadoopTest.createAndRunClientConfManagerCases(
+        kerberizedHadoop.getSchemeAndAuthorityPrefix(),
         cfgFile,
-        kerberizedHdfs.getFileSystem(),
-        kerberizedHdfs.getConf()
+        kerberizedHadoop.getFileSystem(),
+        kerberizedHadoop.getConf()
     );
   }
 }
diff --git a/integration-test/common/src/test/java/org/apache/uniffle/test/IntegrationTestBase.java b/integration-test/common/src/test/java/org/apache/uniffle/test/IntegrationTestBase.java
index d080fb2e..c5a8b507 100644
--- a/integration-test/common/src/test/java/org/apache/uniffle/test/IntegrationTestBase.java
+++ b/integration-test/common/src/test/java/org/apache/uniffle/test/IntegrationTestBase.java
@@ -35,10 +35,10 @@ import org.apache.uniffle.server.MockedShuffleServer;
 import org.apache.uniffle.server.ShuffleServer;
 import org.apache.uniffle.server.ShuffleServerConf;
 import org.apache.uniffle.server.ShuffleServerMetrics;
-import org.apache.uniffle.storage.HdfsTestBase;
+import org.apache.uniffle.storage.HadoopTestBase;
 import org.apache.uniffle.storage.util.StorageType;
 
-public abstract class IntegrationTestBase extends HdfsTestBase {
+public abstract class IntegrationTestBase extends HadoopTestBase {
 
   protected static final int SHUFFLE_SERVER_PORT = 20001;
   protected static final String LOCALHOST;
diff --git a/integration-test/common/src/test/java/org/apache/uniffle/test/MultiStorageHdfsFallbackTest.java b/integration-test/common/src/test/java/org/apache/uniffle/test/MultiStorageHadoopFallbackTest.java
similarity index 96%
rename from integration-test/common/src/test/java/org/apache/uniffle/test/MultiStorageHdfsFallbackTest.java
rename to integration-test/common/src/test/java/org/apache/uniffle/test/MultiStorageHadoopFallbackTest.java
index cc7baef8..d1d7bf9a 100644
--- a/integration-test/common/src/test/java/org/apache/uniffle/test/MultiStorageHdfsFallbackTest.java
+++ b/integration-test/common/src/test/java/org/apache/uniffle/test/MultiStorageHadoopFallbackTest.java
@@ -29,7 +29,7 @@ import org.apache.uniffle.storage.util.StorageType;
 
 import static org.junit.jupiter.api.Assertions.assertEquals;
 
-public class MultiStorageHdfsFallbackTest extends MultiStorageFaultToleranceBase {
+public class MultiStorageHadoopFallbackTest extends MultiStorageFaultToleranceBase {
 
   @BeforeAll
   public static void setupServers(@TempDir File tmpDir) throws Exception {
diff --git a/integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerConcurrentWriteOfHdfsTest.java b/integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerConcurrentWriteOfHadoopTest.java
similarity index 95%
rename from integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerConcurrentWriteOfHdfsTest.java
rename to integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerConcurrentWriteOfHadoopTest.java
index f928eeb3..57a49356 100644
--- a/integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerConcurrentWriteOfHdfsTest.java
+++ b/integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerConcurrentWriteOfHadoopTest.java
@@ -57,7 +57,7 @@ import org.apache.uniffle.storage.util.StorageType;
 import static org.apache.uniffle.common.util.Constants.SHUFFLE_DATA_FILE_SUFFIX;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 
-public class ShuffleServerConcurrentWriteOfHdfsTest extends ShuffleServerWithHdfsTest {
+public class ShuffleServerConcurrentWriteOfHadoopTest extends ShuffleServerWithHadoopTest {
   private static final int MAX_CONCURRENCY = 3;
 
   @BeforeAll
@@ -82,8 +82,8 @@ public class ShuffleServerConcurrentWriteOfHdfsTest extends ShuffleServerWithHdf
 
   @ParameterizedTest
   @MethodSource("clientConcurrencyAndExpectedProvider")
-  public void testConcurrentWrite2Hdfs(int clientSpecifiedConcurrency, int expectedConcurrency) throws Exception {
-    String appId = "testConcurrentWrite2Hdfs_" + new Random().nextInt();
+  public void testConcurrentWrite2Hadoop(int clientSpecifiedConcurrency, int expectedConcurrency) throws Exception {
+    String appId = "testConcurrentWrite2Hadoop_" + new Random().nextInt();
     String dataBasePath = HDFS_URI + "rss/test";
     RssRegisterShuffleRequest rrsr = new RssRegisterShuffleRequest(
         appId,
diff --git a/integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerWithHdfsTest.java b/integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerWithHadoopTest.java
similarity index 98%
rename from integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerWithHdfsTest.java
rename to integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerWithHadoopTest.java
index c27968c0..c43122c9 100644
--- a/integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerWithHdfsTest.java
+++ b/integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerWithHadoopTest.java
@@ -49,7 +49,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNull;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 
-public class ShuffleServerWithHdfsTest extends ShuffleReadWriteBase {
+public class ShuffleServerWithHadoopTest extends ShuffleReadWriteBase {
 
   protected ShuffleServerGrpcClient shuffleServerClient;
 
@@ -74,7 +74,7 @@ public class ShuffleServerWithHdfsTest extends ShuffleReadWriteBase {
   }
 
   @Test
-  public void hdfsWriteReadTest() {
+  public void hadoopWriteReadTest() {
     String appId = "app_hdfs_read_write";
     String dataBasePath = HDFS_URI + "rss/test";
     RssRegisterShuffleRequest rrsr = new RssRegisterShuffleRequest(appId, 0,
diff --git a/integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerWithKerberizedHdfsTest.java b/integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerWithKerberizedHadoopTest.java
similarity index 96%
rename from integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerWithKerberizedHdfsTest.java
rename to integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerWithKerberizedHadoopTest.java
index 00bd8a6e..8b698407 100644
--- a/integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerWithKerberizedHdfsTest.java
+++ b/integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerWithKerberizedHadoopTest.java
@@ -42,7 +42,7 @@ import org.apache.uniffle.client.request.RssSendCommitRequest;
 import org.apache.uniffle.client.request.RssSendShuffleDataRequest;
 import org.apache.uniffle.client.response.CompressedShuffleBlock;
 import org.apache.uniffle.client.util.DefaultIdHelper;
-import org.apache.uniffle.common.KerberizedHdfsBase;
+import org.apache.uniffle.common.KerberizedHadoopBase;
 import org.apache.uniffle.common.PartitionRange;
 import org.apache.uniffle.common.RemoteStorageInfo;
 import org.apache.uniffle.common.ShuffleBlockInfo;
@@ -60,7 +60,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNull;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 
-public class ShuffleServerWithKerberizedHdfsTest extends KerberizedHdfsBase {
+public class ShuffleServerWithKerberizedHadoopTest extends KerberizedHadoopBase {
 
   protected static final String LOCALHOST;
 
@@ -107,8 +107,8 @@ public class ShuffleServerWithKerberizedHdfsTest extends KerberizedHdfsBase {
 
   @BeforeAll
   public static void setup() throws Exception {
-    testRunner = ShuffleServerWithKerberizedHdfsTest.class;
-    KerberizedHdfsBase.init();
+    testRunner = ShuffleServerWithKerberizedHadoopTest.class;
+    KerberizedHadoopBase.init();
 
     CoordinatorConf coordinatorConf = new CoordinatorConf();
     coordinatorConf.setInteger(CoordinatorConf.RPC_SERVER_PORT, 19999);
@@ -175,8 +175,8 @@ public class ShuffleServerWithKerberizedHdfsTest extends KerberizedHdfsBase {
   }
 
   @Test
-  public void hdfsWriteReadTest() throws Exception {
-    String alexDir = kerberizedHdfs.getSchemeAndAuthorityPrefix() + "/alex/";
+  public void hadoopWriteReadTest() throws Exception {
+    String alexDir = kerberizedHadoop.getSchemeAndAuthorityPrefix() + "/alex/";
 
     String user = "alex";
     String appId = "app_hdfs_read_write";
@@ -184,7 +184,7 @@ public class ShuffleServerWithKerberizedHdfsTest extends KerberizedHdfsBase {
 
     RemoteStorageInfo remoteStorageInfo = new RemoteStorageInfo(
         dataBasePath,
-        conf2Map(kerberizedHdfs.getConf())
+        conf2Map(kerberizedHadoop.getConf())
     );
 
     RssRegisterShuffleRequest rrsr = new RssRegisterShuffleRequest(
diff --git a/integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerWithMemLocalHdfsTest.java b/integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerWithMemLocalHadoopTest.java
similarity index 96%
rename from integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerWithMemLocalHdfsTest.java
rename to integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerWithMemLocalHadoopTest.java
index 89397649..16941499 100644
--- a/integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerWithMemLocalHdfsTest.java
+++ b/integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerWithMemLocalHadoopTest.java
@@ -45,7 +45,7 @@ import org.apache.uniffle.server.ShuffleServerConf;
 import org.apache.uniffle.server.buffer.ShuffleBuffer;
 import org.apache.uniffle.storage.handler.api.ClientReadHandler;
 import org.apache.uniffle.storage.handler.impl.ComposedClientReadHandler;
-import org.apache.uniffle.storage.handler.impl.HdfsClientReadHandler;
+import org.apache.uniffle.storage.handler.impl.HadoopClientReadHandler;
 import org.apache.uniffle.storage.handler.impl.LocalFileClientReadHandler;
 import org.apache.uniffle.storage.handler.impl.MemoryClientReadHandler;
 import org.apache.uniffle.storage.util.StorageType;
@@ -56,7 +56,7 @@ import static org.junit.jupiter.api.Assertions.assertNull;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.fail;
 
-public class ShuffleServerWithMemLocalHdfsTest extends ShuffleReadWriteBase {
+public class ShuffleServerWithMemLocalHadoopTest extends ShuffleReadWriteBase {
 
   private ShuffleServerGrpcClient shuffleServerClient;
   private static String REMOTE_STORAGE = HDFS_URI + "rss/test";
@@ -91,12 +91,12 @@ public class ShuffleServerWithMemLocalHdfsTest extends ShuffleReadWriteBase {
   }
 
   @Test
-  public void memoryLocalFileHDFSReadWithFilterAndSkipTest() throws Exception {
+  public void memoryLocalFileHadoopReadWithFilterAndSkipTest() throws Exception {
     runTest(true);
   }
   
   @Test
-  public void memoryLocalFileHDFSReadWithFilterTest() throws Exception {
+  public void memoryLocalFileHadoopReadWithFilterTest() throws Exception {
     runTest(false);
   }
   
@@ -132,12 +132,13 @@ public class ShuffleServerWithMemLocalHdfsTest extends ShuffleReadWriteBase {
     LocalFileClientReadHandler localFileClientReadHandler = new LocalFileClientReadHandler(
         testAppId, shuffleId, partitionId, 0, 1, 3,
         75, expectBlockIds, processBlockIds, shuffleServerClient);
-    HdfsClientReadHandler hdfsClientReadHandler = new HdfsClientReadHandler(testAppId, shuffleId, partitionId, 0, 1, 3,
+    HadoopClientReadHandler hadoopClientReadHandler = new HadoopClientReadHandler(testAppId, shuffleId,
+            partitionId, 0, 1, 3,
         500, expectBlockIds, processBlockIds, REMOTE_STORAGE, conf);
     ClientReadHandler[] handlers = new ClientReadHandler[3];
     handlers[0] = memoryClientReadHandler;
     handlers[1] = localFileClientReadHandler;
-    handlers[2] = hdfsClientReadHandler;
+    handlers[2] = hadoopClientReadHandler;
     ShuffleServerInfo ssi = new ShuffleServerInfo(LOCALHOST, SHUFFLE_SERVER_PORT);
     ComposedClientReadHandler composedClientReadHandler = new ComposedClientReadHandler(
         ssi, handlers);
diff --git a/integration-test/spark-common/src/test/java/org/apache/uniffle/test/RepartitionWithHdfsMultiStorageRssTest.java b/integration-test/spark-common/src/test/java/org/apache/uniffle/test/RepartitionWithHadoopMultiStorageRssTest.java
similarity index 95%
rename from integration-test/spark-common/src/test/java/org/apache/uniffle/test/RepartitionWithHdfsMultiStorageRssTest.java
rename to integration-test/spark-common/src/test/java/org/apache/uniffle/test/RepartitionWithHadoopMultiStorageRssTest.java
index 9bbc0e28..2c7e9d3a 100644
--- a/integration-test/spark-common/src/test/java/org/apache/uniffle/test/RepartitionWithHdfsMultiStorageRssTest.java
+++ b/integration-test/spark-common/src/test/java/org/apache/uniffle/test/RepartitionWithHadoopMultiStorageRssTest.java
@@ -34,9 +34,9 @@ import org.apache.uniffle.coordinator.CoordinatorConf;
 import org.apache.uniffle.server.ShuffleServerConf;
 import org.apache.uniffle.storage.util.StorageType;
 
-public class RepartitionWithHdfsMultiStorageRssTest extends RepartitionTest {
+public class RepartitionWithHadoopMultiStorageRssTest extends RepartitionTest {
 
-  private static final Logger LOG = LoggerFactory.getLogger(RepartitionWithHdfsMultiStorageRssTest.class);
+  private static final Logger LOG = LoggerFactory.getLogger(RepartitionWithHadoopMultiStorageRssTest.class);
 
   @BeforeAll
   public static void setupServers(@TempDir File tmpDir) throws Exception {
diff --git a/integration-test/spark-common/src/test/java/org/apache/uniffle/test/ShuffleUnregisterWithHdfsTest.java b/integration-test/spark-common/src/test/java/org/apache/uniffle/test/ShuffleUnregisterWithHadoopTest.java
similarity index 98%
rename from integration-test/spark-common/src/test/java/org/apache/uniffle/test/ShuffleUnregisterWithHdfsTest.java
rename to integration-test/spark-common/src/test/java/org/apache/uniffle/test/ShuffleUnregisterWithHadoopTest.java
index 394bb242..1f85d1f4 100644
--- a/integration-test/spark-common/src/test/java/org/apache/uniffle/test/ShuffleUnregisterWithHdfsTest.java
+++ b/integration-test/spark-common/src/test/java/org/apache/uniffle/test/ShuffleUnregisterWithHadoopTest.java
@@ -39,7 +39,7 @@ import org.apache.uniffle.storage.util.StorageType;
 import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 
-public class ShuffleUnregisterWithHdfsTest extends SparkIntegrationTestBase {
+public class ShuffleUnregisterWithHadoopTest extends SparkIntegrationTestBase {
 
   @BeforeAll
   public static void setupServers() throws Exception {
diff --git a/server/src/main/java/org/apache/uniffle/server/ShuffleServer.java b/server/src/main/java/org/apache/uniffle/server/ShuffleServer.java
index cb9c7484..fe100b62 100644
--- a/server/src/main/java/org/apache/uniffle/server/ShuffleServer.java
+++ b/server/src/main/java/org/apache/uniffle/server/ShuffleServer.java
@@ -195,7 +195,7 @@ public class ShuffleServer {
     String storageType = shuffleServerConf.getString(RSS_STORAGE_TYPE);
     if (!testMode && (StorageType.LOCALFILE.name().equals(storageType)
             || (StorageType.HDFS.name()).equals(storageType))) {
-      throw new IllegalArgumentException("RSS storage type about LOCALFILE and HDFS should be used in test mode, "
+      throw new IllegalArgumentException("RSS storage type about LOCALFILE and HADOOP should be used in test mode, "
               + "because of the poor performance of these two types.");
     }
     ip = RssUtils.getHostIp();
diff --git a/server/src/main/java/org/apache/uniffle/server/ShuffleServerMetrics.java b/server/src/main/java/org/apache/uniffle/server/ShuffleServerMetrics.java
index 131394dc..7a83cd40 100644
--- a/server/src/main/java/org/apache/uniffle/server/ShuffleServerMetrics.java
+++ b/server/src/main/java/org/apache/uniffle/server/ShuffleServerMetrics.java
@@ -69,7 +69,7 @@ public class ShuffleServerMetrics {
   private static final String READ_USED_BUFFER_SIZE = "read_used_buffer_size";
   private static final String TOTAL_FAILED_WRITTEN_EVENT_NUM = "total_failed_written_event_num";
   private static final String TOTAL_DROPPED_EVENT_NUM = "total_dropped_event_num";
-  private static final String TOTAL_HDFS_WRITE_DATA = "total_hdfs_write_data";
+  private static final String TOTAL_HADOOP_WRITE_DATA = "total_hadoop_write_data";
   private static final String TOTAL_LOCALFILE_WRITE_DATA = "total_localfile_write_data";
   private static final String TOTAL_REQUIRE_BUFFER_FAILED = "total_require_buffer_failed";
   private static final String TOTAL_REQUIRE_BUFFER_FAILED_FOR_HUGE_PARTITION =
@@ -117,7 +117,7 @@ public class ShuffleServerMetrics {
   public static Counter.Child  counterTotalReadTime;
   public static Counter.Child  counterTotalFailedWrittenEventNum;
   public static Counter.Child  counterTotalDroppedEventNum;
-  public static Counter.Child  counterTotalHdfsWriteDataSize;
+  public static Counter.Child counterTotalHadoopWriteDataSize;
   public static Counter.Child  counterTotalLocalFileWriteDataSize;
   public static Counter.Child  counterTotalRequireBufferFailed;
   public static Counter.Child  counterTotalRequireBufferFailedForHugePartition;
@@ -239,7 +239,7 @@ public class ShuffleServerMetrics {
     counterTotalReadTime = metricsManager.addLabeledCounter(TOTAL_READ_TIME);
     counterTotalDroppedEventNum = metricsManager.addLabeledCounter(TOTAL_DROPPED_EVENT_NUM);
     counterTotalFailedWrittenEventNum = metricsManager.addLabeledCounter(TOTAL_FAILED_WRITTEN_EVENT_NUM);
-    counterTotalHdfsWriteDataSize = metricsManager.addLabeledCounter(TOTAL_HDFS_WRITE_DATA);
+    counterTotalHadoopWriteDataSize = metricsManager.addLabeledCounter(TOTAL_HADOOP_WRITE_DATA);
     counterTotalLocalFileWriteDataSize = metricsManager.addLabeledCounter(TOTAL_LOCALFILE_WRITE_DATA);
     counterTotalRequireBufferFailed = metricsManager.addLabeledCounter(TOTAL_REQUIRE_BUFFER_FAILED);
     counterTotalRequireBufferFailedForRegularPartition =
diff --git a/server/src/main/java/org/apache/uniffle/server/storage/HdfsStorageManager.java b/server/src/main/java/org/apache/uniffle/server/storage/HadoopStorageManager.java
similarity index 86%
rename from server/src/main/java/org/apache/uniffle/server/storage/HdfsStorageManager.java
rename to server/src/main/java/org/apache/uniffle/server/storage/HadoopStorageManager.java
index 2cebe0ca..bdd07afc 100644
--- a/server/src/main/java/org/apache/uniffle/server/storage/HdfsStorageManager.java
+++ b/server/src/main/java/org/apache/uniffle/server/storage/HadoopStorageManager.java
@@ -44,7 +44,7 @@ import org.apache.uniffle.server.ShuffleServerConf;
 import org.apache.uniffle.server.ShuffleServerMetrics;
 import org.apache.uniffle.server.event.AppPurgeEvent;
 import org.apache.uniffle.server.event.PurgeEvent;
-import org.apache.uniffle.storage.common.HdfsStorage;
+import org.apache.uniffle.storage.common.HadoopStorage;
 import org.apache.uniffle.storage.common.Storage;
 import org.apache.uniffle.storage.factory.ShuffleHandlerFactory;
 import org.apache.uniffle.storage.handler.api.ShuffleDeleteHandler;
@@ -52,15 +52,15 @@ import org.apache.uniffle.storage.request.CreateShuffleDeleteHandlerRequest;
 import org.apache.uniffle.storage.util.ShuffleStorageUtils;
 import org.apache.uniffle.storage.util.StorageType;
 
-public class HdfsStorageManager extends SingleStorageManager {
+public class HadoopStorageManager extends SingleStorageManager {
 
-  private static final Logger LOG = LoggerFactory.getLogger(HdfsStorageManager.class);
+  private static final Logger LOG = LoggerFactory.getLogger(HadoopStorageManager.class);
 
   private final Configuration hadoopConf;
-  private Map<String, HdfsStorage> appIdToStorages = JavaUtils.newConcurrentMap();
-  private Map<String, HdfsStorage> pathToStorages = JavaUtils.newConcurrentMap();
+  private Map<String, HadoopStorage> appIdToStorages = JavaUtils.newConcurrentMap();
+  private Map<String, HadoopStorage> pathToStorages = JavaUtils.newConcurrentMap();
 
-  HdfsStorageManager(ShuffleServerConf conf) {
+  HadoopStorageManager(ShuffleServerConf conf) {
     super(conf);
     hadoopConf = conf.getHadoopConf();
   }
@@ -68,7 +68,7 @@ public class HdfsStorageManager extends SingleStorageManager {
   @Override
   public void updateWriteMetrics(ShuffleDataFlushEvent event, long writeTime) {
     super.updateWriteMetrics(event, writeTime);
-    ShuffleServerMetrics.counterTotalHdfsWriteDataSize.inc(event.getSize());
+    ShuffleServerMetrics.counterTotalHadoopWriteDataSize.inc(event.getSize());
   }
 
   @Override
@@ -86,7 +86,7 @@ public class HdfsStorageManager extends SingleStorageManager {
   @Override
   public void removeResources(PurgeEvent event) {
     String appId = event.getAppId();
-    HdfsStorage storage = getStorageByAppId(appId);
+    HadoopStorage storage = getStorageByAppId(appId);
     if (storage != null) {
       if (event instanceof AppPurgeEvent) {
         storage.removeHandlers(appId);
@@ -130,8 +130,8 @@ public class HdfsStorageManager extends SingleStorageManager {
           remoteStorageHadoopConf.setStrings(entry.getKey(), entry.getValue());
         }
       }
-      HdfsStorage hdfsStorage = new HdfsStorage(remoteStorage, remoteStorageHadoopConf);
-      return hdfsStorage;
+      HadoopStorage hadoopStorage = new HadoopStorage(remoteStorage, remoteStorageHadoopConf);
+      return hadoopStorage;
     });
     appIdToStorages.computeIfAbsent(appId, key -> pathToStorages.get(remoteStorage));
   }
@@ -146,7 +146,7 @@ public class HdfsStorageManager extends SingleStorageManager {
     return Maps.newHashMap();
   }
 
-  public HdfsStorage getStorageByAppId(String appId) {
+  public HadoopStorage getStorageByAppId(String appId) {
     if (!appIdToStorages.containsKey(appId)) {
       synchronized (this) {
         FileSystem fs;
@@ -156,7 +156,7 @@ public class HdfsStorageManager extends SingleStorageManager {
           for (Path path : appStoragePath) {
             fs = HadoopFilesystemProvider.getFilesystem(path, hadoopConf);
             if (fs.isDirectory(path)) {
-              return new HdfsStorage(path.getParent().toString(), hadoopConf);
+              return new HadoopStorage(path.getParent().toString(), hadoopConf);
             }
           }
         } catch (Exception e) {
@@ -171,12 +171,12 @@ public class HdfsStorageManager extends SingleStorageManager {
   }
 
   @VisibleForTesting
-  public Map<String, HdfsStorage> getAppIdToStorages() {
+  public Map<String, HadoopStorage> getAppIdToStorages() {
     return appIdToStorages;
   }
 
   @VisibleForTesting
-  public Map<String, HdfsStorage> getPathToStorages() {
+  public Map<String, HadoopStorage> getPathToStorages() {
     return pathToStorages;
   }
 }
diff --git a/server/src/main/java/org/apache/uniffle/server/storage/HdfsStorageManagerFallbackStrategy.java b/server/src/main/java/org/apache/uniffle/server/storage/HadoopStorageManagerFallbackStrategy.java
similarity index 87%
rename from server/src/main/java/org/apache/uniffle/server/storage/HdfsStorageManagerFallbackStrategy.java
rename to server/src/main/java/org/apache/uniffle/server/storage/HadoopStorageManagerFallbackStrategy.java
index 5f92bb6f..c6c8477a 100644
--- a/server/src/main/java/org/apache/uniffle/server/storage/HdfsStorageManagerFallbackStrategy.java
+++ b/server/src/main/java/org/apache/uniffle/server/storage/HadoopStorageManagerFallbackStrategy.java
@@ -25,11 +25,11 @@ import org.apache.uniffle.server.ShuffleDataFlushEvent;
 import org.apache.uniffle.server.ShuffleServerConf;
 
 
-public class HdfsStorageManagerFallbackStrategy extends AbstractStorageManagerFallbackStrategy {
+public class HadoopStorageManagerFallbackStrategy extends AbstractStorageManagerFallbackStrategy {
   private final Long fallBackTimes;
-  private Set<Class<? extends StorageManager>> excludeTypes = Sets.newHashSet(HdfsStorageManager.class);
+  private Set<Class<? extends StorageManager>> excludeTypes = Sets.newHashSet(HadoopStorageManager.class);
 
-  public HdfsStorageManagerFallbackStrategy(ShuffleServerConf conf) {
+  public HadoopStorageManagerFallbackStrategy(ShuffleServerConf conf) {
     super(conf);
     fallBackTimes = conf.get(ShuffleServerConf.FALLBACK_MAX_FAIL_TIMES);
   }
diff --git a/server/src/main/java/org/apache/uniffle/server/storage/MultiStorageManager.java b/server/src/main/java/org/apache/uniffle/server/storage/MultiStorageManager.java
index 615d0d09..cdf8824c 100644
--- a/server/src/main/java/org/apache/uniffle/server/storage/MultiStorageManager.java
+++ b/server/src/main/java/org/apache/uniffle/server/storage/MultiStorageManager.java
@@ -50,7 +50,7 @@ public class MultiStorageManager implements StorageManager {
 
   MultiStorageManager(ShuffleServerConf conf) {
     warmStorageManager = new LocalStorageManager(conf);
-    coldStorageManager = new HdfsStorageManager(conf);
+    coldStorageManager = new HadoopStorageManager(conf);
 
     try {
       AbstractStorageManagerFallbackStrategy storageManagerFallbackStrategy = loadFallbackStrategy(conf);
@@ -95,7 +95,7 @@ public class MultiStorageManager implements StorageManager {
   public static AbstractStorageManagerFallbackStrategy loadFallbackStrategy(
       ShuffleServerConf conf) throws Exception {
     String name = conf.getString(ShuffleServerConf.MULTISTORAGE_FALLBACK_STRATEGY_CLASS,
-        HdfsStorageManagerFallbackStrategy.class.getCanonicalName());
+        HadoopStorageManagerFallbackStrategy.class.getCanonicalName());
     Class<?> klass = Class.forName(name);
     Constructor<?> constructor;
     AbstractStorageManagerFallbackStrategy instance;
diff --git a/server/src/main/java/org/apache/uniffle/server/storage/StorageManagerFactory.java b/server/src/main/java/org/apache/uniffle/server/storage/StorageManagerFactory.java
index 209a9de4..2990c914 100644
--- a/server/src/main/java/org/apache/uniffle/server/storage/StorageManagerFactory.java
+++ b/server/src/main/java/org/apache/uniffle/server/storage/StorageManagerFactory.java
@@ -35,7 +35,7 @@ public class StorageManagerFactory {
     if (StorageType.LOCALFILE.equals(type) || StorageType.MEMORY_LOCALFILE.equals(type)) {
       return new LocalStorageManager(conf);
     } else if (StorageType.HDFS.equals(type) || StorageType.MEMORY_HDFS.equals(type)) {
-      return new HdfsStorageManager(conf);
+      return new HadoopStorageManager(conf);
     } else if (StorageType.LOCALFILE_HDFS.equals(type)
         || StorageType.MEMORY_LOCALFILE_HDFS.equals(type)) {
       return new MultiStorageManager(conf);
diff --git a/server/src/test/java/org/apache/uniffle/server/ShuffleFlushManagerOnKerberizedHdfsTest.java b/server/src/test/java/org/apache/uniffle/server/ShuffleFlushManagerOnKerberizedHadoopTest.java
similarity index 83%
rename from server/src/test/java/org/apache/uniffle/server/ShuffleFlushManagerOnKerberizedHdfsTest.java
rename to server/src/test/java/org/apache/uniffle/server/ShuffleFlushManagerOnKerberizedHadoopTest.java
index 45a69ada..4b3a66e7 100644
--- a/server/src/test/java/org/apache/uniffle/server/ShuffleFlushManagerOnKerberizedHdfsTest.java
+++ b/server/src/test/java/org/apache/uniffle/server/ShuffleFlushManagerOnKerberizedHadoopTest.java
@@ -34,11 +34,11 @@ import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.uniffle.common.KerberizedHdfsBase;
+import org.apache.uniffle.common.KerberizedHadoopBase;
 import org.apache.uniffle.common.RemoteStorageInfo;
 import org.apache.uniffle.server.buffer.ShuffleBufferManager;
 import org.apache.uniffle.server.event.AppPurgeEvent;
-import org.apache.uniffle.server.storage.HdfsStorageManager;
+import org.apache.uniffle.server.storage.HadoopStorageManager;
 import org.apache.uniffle.server.storage.StorageManager;
 import org.apache.uniffle.server.storage.StorageManagerFactory;
 import org.apache.uniffle.storage.common.AbstractStorage;
@@ -53,8 +53,8 @@ import static org.junit.jupiter.api.Assertions.fail;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
-public class ShuffleFlushManagerOnKerberizedHdfsTest extends KerberizedHdfsBase {
-  private static final Logger LOGGER = LoggerFactory.getLogger(ShuffleFlushManagerOnKerberizedHdfsTest.class);
+public class ShuffleFlushManagerOnKerberizedHadoopTest extends KerberizedHadoopBase {
+  private static final Logger LOGGER = LoggerFactory.getLogger(ShuffleFlushManagerOnKerberizedHadoopTest.class);
 
   private ShuffleServerConf shuffleServerConf = new ShuffleServerConf();
 
@@ -79,8 +79,8 @@ public class ShuffleFlushManagerOnKerberizedHdfsTest extends KerberizedHdfsBase
 
   @BeforeAll
   public static void beforeAll() throws Exception {
-    testRunner = ShuffleFlushManagerOnKerberizedHdfsTest.class;
-    KerberizedHdfsBase.init();
+    testRunner = ShuffleFlushManagerOnKerberizedHadoopTest.class;
+    KerberizedHadoopBase.init();
 
     ShuffleTaskManager shuffleTaskManager = mock(ShuffleTaskManager.class);
     ShuffleBufferManager shuffleBufferManager = mock(ShuffleBufferManager.class);
@@ -88,9 +88,9 @@ public class ShuffleFlushManagerOnKerberizedHdfsTest extends KerberizedHdfsBase
     when(mockShuffleServer.getShuffleTaskManager()).thenReturn(shuffleTaskManager);
     when(mockShuffleServer.getShuffleBufferManager()).thenReturn(shuffleBufferManager);
 
-    String storedPath = kerberizedHdfs.getSchemeAndAuthorityPrefix() + "/alex/rss-data/";
+    String storedPath = kerberizedHadoop.getSchemeAndAuthorityPrefix() + "/alex/rss-data/";
     Map<String, String> confMap = new HashMap<>();
-    for (Map.Entry<String, String> entry : kerberizedHdfs.getConf()) {
+    for (Map.Entry<String, String> entry : kerberizedHadoop.getConf()) {
       confMap.put(entry.getKey(), entry.getValue());
     }
     remoteStorage = new RemoteStorageInfo(
@@ -127,7 +127,7 @@ public class ShuffleFlushManagerOnKerberizedHdfsTest extends KerberizedHdfsBase
     int size = storage.getHandlerSize();
     assertEquals(2, size);
 
-    FileStatus[] fileStatus = kerberizedHdfs.getFileSystem()
+    FileStatus[] fileStatus = kerberizedHadoop.getFileSystem()
         .listStatus(new Path(remoteStorage.getPath() + "/" + appId1 + "/"));
     for (FileStatus fileState : fileStatus) {
       assertEquals("alex", fileState.getOwner());
@@ -135,30 +135,30 @@ public class ShuffleFlushManagerOnKerberizedHdfsTest extends KerberizedHdfsBase
     assertTrue(fileStatus.length > 0);
     manager.removeResources(appId1);
 
-    assertTrue(((HdfsStorageManager)storageManager).getAppIdToStorages().containsKey(appId1));
+    assertTrue(((HadoopStorageManager)storageManager).getAppIdToStorages().containsKey(appId1));
     storageManager.removeResources(
         new AppPurgeEvent(appId1, "alex", Arrays.asList(1))
     );
-    assertFalse(((HdfsStorageManager)storageManager).getAppIdToStorages().containsKey(appId1));
+    assertFalse(((HadoopStorageManager)storageManager).getAppIdToStorages().containsKey(appId1));
     try {
-      kerberizedHdfs.getFileSystem().listStatus(new Path(remoteStorage.getPath() + "/" + appId1 + "/"));
+      kerberizedHadoop.getFileSystem().listStatus(new Path(remoteStorage.getPath() + "/" + appId1 + "/"));
       fail("Exception should be thrown");
     } catch (FileNotFoundException fnfe) {
       // expected exception
     }
 
-    assertTrue(kerberizedHdfs.getFileSystem().exists(new Path(remoteStorage.getPath())));
+    assertTrue(kerberizedHadoop.getFileSystem().exists(new Path(remoteStorage.getPath())));
 
     assertEquals(0, manager.getCommittedBlockIds(appId1, 1).getLongCardinality());
     assertEquals(5, manager.getCommittedBlockIds(appId2, 1).getLongCardinality());
     size = storage.getHandlerSize();
     assertEquals(1, size);
     manager.removeResources(appId2);
-    assertTrue(((HdfsStorageManager)storageManager).getAppIdToStorages().containsKey(appId2));
+    assertTrue(((HadoopStorageManager)storageManager).getAppIdToStorages().containsKey(appId2));
     storageManager.removeResources(
         new AppPurgeEvent(appId2, "alex", Arrays.asList(1))
     );
-    assertFalse(((HdfsStorageManager)storageManager).getAppIdToStorages().containsKey(appId2));
+    assertFalse(((HadoopStorageManager)storageManager).getAppIdToStorages().containsKey(appId2));
     assertEquals(0, manager.getCommittedBlockIds(appId2, 1).getLongCardinality());
     size = storage.getHandlerSize();
     assertEquals(0, size);
diff --git a/server/src/test/java/org/apache/uniffle/server/ShuffleFlushManagerTest.java b/server/src/test/java/org/apache/uniffle/server/ShuffleFlushManagerTest.java
index 659a9724..4817b812 100644
--- a/server/src/test/java/org/apache/uniffle/server/ShuffleFlushManagerTest.java
+++ b/server/src/test/java/org/apache/uniffle/server/ShuffleFlushManagerTest.java
@@ -55,16 +55,16 @@ import org.apache.uniffle.common.util.ChecksumUtils;
 import org.apache.uniffle.common.util.Constants;
 import org.apache.uniffle.server.buffer.ShuffleBufferManager;
 import org.apache.uniffle.server.event.AppPurgeEvent;
-import org.apache.uniffle.server.storage.HdfsStorageManager;
+import org.apache.uniffle.server.storage.HadoopStorageManager;
 import org.apache.uniffle.server.storage.LocalStorageManagerFallbackStrategy;
 import org.apache.uniffle.server.storage.MultiStorageManager;
 import org.apache.uniffle.server.storage.StorageManager;
 import org.apache.uniffle.server.storage.StorageManagerFactory;
-import org.apache.uniffle.storage.HdfsTestBase;
+import org.apache.uniffle.storage.HadoopTestBase;
 import org.apache.uniffle.storage.common.AbstractStorage;
-import org.apache.uniffle.storage.common.HdfsStorage;
+import org.apache.uniffle.storage.common.HadoopStorage;
 import org.apache.uniffle.storage.common.LocalStorage;
-import org.apache.uniffle.storage.handler.impl.HdfsClientReadHandler;
+import org.apache.uniffle.storage.handler.impl.HadoopClientReadHandler;
 import org.apache.uniffle.storage.util.StorageType;
 
 import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -75,7 +75,7 @@ import static org.junit.jupiter.api.Assertions.fail;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
-public class ShuffleFlushManagerTest extends HdfsTestBase {
+public class ShuffleFlushManagerTest extends HadoopTestBase {
 
   private static AtomicInteger ATOMIC_INT = new AtomicInteger(0);
   private static AtomicLong ATOMIC_LONG = new AtomicLong(0);
@@ -301,11 +301,11 @@ public class ShuffleFlushManagerTest extends HdfsTestBase {
     assertTrue(fileStatus.length > 0);
     manager.removeResources(appId1);
 
-    assertTrue(((HdfsStorageManager)storageManager).getAppIdToStorages().containsKey(appId1));
+    assertTrue(((HadoopStorageManager)storageManager).getAppIdToStorages().containsKey(appId1));
     storageManager.removeResources(
         new AppPurgeEvent(appId1, StringUtils.EMPTY, Arrays.asList(1))
     );
-    assertFalse(((HdfsStorageManager)storageManager).getAppIdToStorages().containsKey(appId1));
+    assertFalse(((HadoopStorageManager)storageManager).getAppIdToStorages().containsKey(appId1));
     try {
       fs.listStatus(new Path(remoteStorage.getPath() + "/" + appId1 + "/"));
       fail("Exception should be thrown");
@@ -318,11 +318,11 @@ public class ShuffleFlushManagerTest extends HdfsTestBase {
     size = storage.getHandlerSize();
     assertEquals(1, size);
     manager.removeResources(appId2);
-    assertTrue(((HdfsStorageManager)storageManager).getAppIdToStorages().containsKey(appId2));
+    assertTrue(((HadoopStorageManager)storageManager).getAppIdToStorages().containsKey(appId2));
     storageManager.removeResources(
         new AppPurgeEvent(appId2, StringUtils.EMPTY, Arrays.asList(1))
     );
-    assertFalse(((HdfsStorageManager)storageManager).getAppIdToStorages().containsKey(appId2));
+    assertFalse(((HadoopStorageManager)storageManager).getAppIdToStorages().containsKey(appId2));
     assertEquals(0, manager.getCommittedBlockIds(appId2, 1).getLongCardinality());
     size = storage.getHandlerSize();
     assertEquals(0, size);
@@ -334,7 +334,7 @@ public class ShuffleFlushManagerTest extends HdfsTestBase {
         new AppPurgeEvent(appId2, StringUtils.EMPTY, Lists.newArrayList(1))
     );
     assertFalse(fs.exists(path));
-    HdfsStorage storageByAppId = ((HdfsStorageManager) storageManager).getStorageByAppId(appId2);
+    HadoopStorage storageByAppId = ((HadoopStorageManager) storageManager).getStorageByAppId(appId2);
     assertNull(storageByAppId);
   }
 
@@ -475,7 +475,7 @@ public class ShuffleFlushManagerTest extends HdfsTestBase {
       expectBlockIds.addLong(spb.getBlockId());
       remainIds.add(spb.getBlockId());
     }
-    HdfsClientReadHandler handler = new HdfsClientReadHandler(
+    HadoopClientReadHandler handler = new HadoopClientReadHandler(
         appId,
         shuffleId,
         partitionId,
@@ -534,7 +534,7 @@ public class ShuffleFlushManagerTest extends HdfsTestBase {
     event = createShuffleDataFlushEvent(appId, 1, 1, 1, null, 100000);
     flushManager.addToFlushQueue(event);
     Thread.sleep(1000);
-    assertTrue(event.getUnderStorage() instanceof HdfsStorage);
+    assertTrue(event.getUnderStorage() instanceof HadoopStorage);
     assertEquals(0, event.getRetryTimes());
 
     // case3: local disk is full or corrupted, fallback to HDFS
@@ -548,7 +548,7 @@ public class ShuffleFlushManagerTest extends HdfsTestBase {
     event = createShuffleDataFlushEvent(appId, 1, 1, 1, null, 100);
     flushManager.addToFlushQueue(event);
     Thread.sleep(1000);
-    assertTrue(event.getUnderStorage() instanceof HdfsStorage);
+    assertTrue(event.getUnderStorage() instanceof HadoopStorage);
     assertEquals(1, event.getRetryTimes());
   }
 
diff --git a/server/src/test/java/org/apache/uniffle/server/ShuffleTaskManagerTest.java b/server/src/test/java/org/apache/uniffle/server/ShuffleTaskManagerTest.java
index 76769764..293aae4c 100644
--- a/server/src/test/java/org/apache/uniffle/server/ShuffleTaskManagerTest.java
+++ b/server/src/test/java/org/apache/uniffle/server/ShuffleTaskManagerTest.java
@@ -58,9 +58,9 @@ import org.apache.uniffle.server.buffer.ShuffleBuffer;
 import org.apache.uniffle.server.buffer.ShuffleBufferManager;
 import org.apache.uniffle.server.storage.LocalStorageManager;
 import org.apache.uniffle.server.storage.StorageManager;
-import org.apache.uniffle.storage.HdfsTestBase;
+import org.apache.uniffle.storage.HadoopTestBase;
 import org.apache.uniffle.storage.common.LocalStorage;
-import org.apache.uniffle.storage.handler.impl.HdfsClientReadHandler;
+import org.apache.uniffle.storage.handler.impl.HadoopClientReadHandler;
 import org.apache.uniffle.storage.util.ShuffleStorageUtils;
 import org.apache.uniffle.storage.util.StorageType;
 
@@ -74,7 +74,7 @@ import static org.junit.jupiter.api.Assertions.assertNull;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.fail;
 
-public class ShuffleTaskManagerTest extends HdfsTestBase {
+public class ShuffleTaskManagerTest extends HadoopTestBase {
 
   private static final AtomicInteger ATOMIC_INT = new AtomicInteger(0);
 
@@ -1010,7 +1010,7 @@ public class ShuffleTaskManagerTest extends HdfsTestBase {
       expectBlockIds.addLong(spb.getBlockId());
       remainIds.add(spb.getBlockId());
     }
-    HdfsClientReadHandler handler = new HdfsClientReadHandler(appId, shuffleId, partitionId,
+    HadoopClientReadHandler handler = new HadoopClientReadHandler(appId, shuffleId, partitionId,
         100, 1, 10, 1000, expectBlockIds, processBlockIds, basePath, new Configuration());
 
     ShuffleDataResult sdr = handler.readShuffleData();
diff --git a/server/src/test/java/org/apache/uniffle/server/storage/HdfsStorageManagerTest.java b/server/src/test/java/org/apache/uniffle/server/storage/HadoopStorageManagerTest.java
similarity index 76%
rename from server/src/test/java/org/apache/uniffle/server/storage/HdfsStorageManagerTest.java
rename to server/src/test/java/org/apache/uniffle/server/storage/HadoopStorageManagerTest.java
index 1def59d4..54b0e0ff 100644
--- a/server/src/test/java/org/apache/uniffle/server/storage/HdfsStorageManagerTest.java
+++ b/server/src/test/java/org/apache/uniffle/server/storage/HadoopStorageManagerTest.java
@@ -32,14 +32,14 @@ import org.apache.uniffle.server.ShuffleServerConf;
 import org.apache.uniffle.server.ShuffleServerMetrics;
 import org.apache.uniffle.server.event.AppPurgeEvent;
 import org.apache.uniffle.server.event.ShufflePurgeEvent;
-import org.apache.uniffle.storage.common.HdfsStorage;
+import org.apache.uniffle.storage.common.HadoopStorage;
 import org.apache.uniffle.storage.util.StorageType;
 
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNull;
 import static org.junit.jupiter.api.Assertions.assertSame;
 
-public class HdfsStorageManagerTest {
+public class HadoopStorageManagerTest {
 
   @BeforeAll
   public static void prepare() {
@@ -55,24 +55,24 @@ public class HdfsStorageManagerTest {
   public void testRemoveResources() {
     ShuffleServerConf conf = new ShuffleServerConf();
     conf.setString(ShuffleServerConf.RSS_STORAGE_TYPE, StorageType.MEMORY_LOCALFILE_HDFS.name());
-    HdfsStorageManager hdfsStorageManager = new HdfsStorageManager(conf);
+    HadoopStorageManager hadoopStorageManager = new HadoopStorageManager(conf);
     final String remoteStoragePath1 = "hdfs://path1";
     String appId = "testRemoveResources_appId";
-    hdfsStorageManager.registerRemoteStorage(
+    hadoopStorageManager.registerRemoteStorage(
         appId,
         new RemoteStorageInfo(remoteStoragePath1, ImmutableMap.of("k1", "v1", "k2", "v2"))
     );
-    Map<String, HdfsStorage> appStorageMap =  hdfsStorageManager.getAppIdToStorages();
+    Map<String, HadoopStorage> appStorageMap =  hadoopStorageManager.getAppIdToStorages();
 
     // case1
     assertEquals(1, appStorageMap.size());
     ShufflePurgeEvent shufflePurgeEvent = new ShufflePurgeEvent(appId, "", Arrays.asList(1));
-    hdfsStorageManager.removeResources(shufflePurgeEvent);
+    hadoopStorageManager.removeResources(shufflePurgeEvent);
     assertEquals(1, appStorageMap.size());
 
     // case2
     AppPurgeEvent appPurgeEvent = new AppPurgeEvent(appId, "");
-    hdfsStorageManager.removeResources(appPurgeEvent);
+    hadoopStorageManager.removeResources(appPurgeEvent);
     assertEquals(0, appStorageMap.size());
   }
 
@@ -83,37 +83,37 @@ public class HdfsStorageManagerTest {
     conf.set(ShuffleServerConf.RSS_STORAGE_BASE_PATH, Arrays.asList("test"));
     conf.setLong(ShuffleServerConf.DISK_CAPACITY, 1024L);
     conf.setString(ShuffleServerConf.RSS_STORAGE_TYPE, StorageType.MEMORY_LOCALFILE_HDFS.name());
-    HdfsStorageManager hdfsStorageManager = new HdfsStorageManager(conf);
+    HadoopStorageManager hadoopStorageManager = new HadoopStorageManager(conf);
     final String remoteStoragePath1 = "hdfs://path1";
     final String remoteStoragePath2 = "hdfs://path2";
     final String remoteStoragePath3 = "hdfs://path3";
-    hdfsStorageManager.registerRemoteStorage(
+    hadoopStorageManager.registerRemoteStorage(
         "app1",
         new RemoteStorageInfo(remoteStoragePath1, ImmutableMap.of("k1", "v1", "k2", "v2"))
     );
-    hdfsStorageManager.registerRemoteStorage(
+    hadoopStorageManager.registerRemoteStorage(
         "app2",
         new RemoteStorageInfo(remoteStoragePath2, ImmutableMap.of("k3", "v3"))
     );
-    hdfsStorageManager.registerRemoteStorage(
+    hadoopStorageManager.registerRemoteStorage(
         "app3",
         new RemoteStorageInfo(remoteStoragePath3, Maps.newHashMap())
     );
-    Map<String, HdfsStorage> appStorageMap =  hdfsStorageManager.getAppIdToStorages();
+    Map<String, HadoopStorage> appStorageMap =  hadoopStorageManager.getAppIdToStorages();
     assertEquals(3, appStorageMap.size());
     assertEquals(Sets.newHashSet("app1", "app2", "app3"), appStorageMap.keySet());
-    HdfsStorage hs1 = hdfsStorageManager.getAppIdToStorages().get("app1");
-    assertSame(hdfsStorageManager.getPathToStorages().get(remoteStoragePath1), hs1);
+    HadoopStorage hs1 = hadoopStorageManager.getAppIdToStorages().get("app1");
+    assertSame(hadoopStorageManager.getPathToStorages().get(remoteStoragePath1), hs1);
     assertEquals("v1", hs1.getConf().get("k1"));
     assertEquals("v2", hs1.getConf().get("k2"));
     assertNull(hs1.getConf().get("k3"));
-    HdfsStorage hs2 = hdfsStorageManager.getAppIdToStorages().get("app2");
-    assertSame(hdfsStorageManager.getPathToStorages().get(remoteStoragePath2), hs2);
+    HadoopStorage hs2 = hadoopStorageManager.getAppIdToStorages().get("app2");
+    assertSame(hadoopStorageManager.getPathToStorages().get(remoteStoragePath2), hs2);
     assertEquals("v3", hs2.getConf().get("k3"));
     assertNull(hs2.getConf().get("k1"));
     assertNull(hs2.getConf().get("k2"));
-    HdfsStorage hs3 = hdfsStorageManager.getAppIdToStorages().get("app3");
-    assertSame(hdfsStorageManager.getPathToStorages().get(remoteStoragePath3), hs3);
+    HadoopStorage hs3 = hadoopStorageManager.getAppIdToStorages().get("app3");
+    assertSame(hadoopStorageManager.getPathToStorages().get(remoteStoragePath3), hs3);
     assertNull(hs3.getConf().get("k1"));
     assertNull(hs3.getConf().get("k2"));
     assertNull(hs3.getConf().get("k3"));
diff --git a/server/src/test/java/org/apache/uniffle/server/storage/MultiStorageManagerTest.java b/server/src/test/java/org/apache/uniffle/server/storage/MultiStorageManagerTest.java
index 1c66ad43..106ec423 100644
--- a/server/src/test/java/org/apache/uniffle/server/storage/MultiStorageManagerTest.java
+++ b/server/src/test/java/org/apache/uniffle/server/storage/MultiStorageManagerTest.java
@@ -27,7 +27,7 @@ import org.apache.uniffle.common.RemoteStorageInfo;
 import org.apache.uniffle.common.ShufflePartitionedBlock;
 import org.apache.uniffle.server.ShuffleDataFlushEvent;
 import org.apache.uniffle.server.ShuffleServerConf;
-import org.apache.uniffle.storage.common.HdfsStorage;
+import org.apache.uniffle.storage.common.HadoopStorage;
 import org.apache.uniffle.storage.common.LocalStorage;
 import org.apache.uniffle.storage.common.Storage;
 import org.apache.uniffle.storage.util.StorageType;
@@ -54,7 +54,7 @@ public class MultiStorageManagerTest {
     assertTrue((manager.selectStorage(event) instanceof LocalStorage));
     event = new ShuffleDataFlushEvent(
         1, appId, 1, 1, 1, 1000000, blocks, null, null);
-    assertTrue((manager.selectStorage(event) instanceof HdfsStorage));
+    assertTrue((manager.selectStorage(event) instanceof HadoopStorage));
   }
 
   @Test
@@ -92,7 +92,7 @@ public class MultiStorageManagerTest {
     ShuffleDataFlushEvent event1 = new ShuffleDataFlushEvent(1, appId, 1, 1, 1, 10, blocks, null, null);
     event1.markOwnedByHugePartition();
     storage = manager.selectStorage(event1);
-    assertTrue(storage instanceof HdfsStorage);
+    assertTrue(storage instanceof HadoopStorage);
   }
 
   @Test
@@ -117,7 +117,7 @@ public class MultiStorageManagerTest {
     );
     ShuffleDataFlushEvent hugeEvent = new ShuffleDataFlushEvent(
         1, appId, 1, 1, 1, 10001, blocks, null, null);
-    assertTrue(manager.selectStorage(hugeEvent) instanceof HdfsStorage);
+    assertTrue(manager.selectStorage(hugeEvent) instanceof HadoopStorage);
 
     /**
      * case2: fallback when disk can not write
@@ -130,6 +130,6 @@ public class MultiStorageManagerTest {
     ((LocalStorage)storage).markCorrupted();
     event = new ShuffleDataFlushEvent(
         1, appId, 1, 1, 1, 1000, blocks, null, null);
-    assertTrue((manager.selectStorage(event) instanceof HdfsStorage));
+    assertTrue((manager.selectStorage(event) instanceof HadoopStorage));
   }
 }
diff --git a/server/src/test/java/org/apache/uniffle/server/storage/StorageManagerFallbackStrategyTest.java b/server/src/test/java/org/apache/uniffle/server/storage/StorageManagerFallbackStrategyTest.java
index 2446c9af..db545453 100644
--- a/server/src/test/java/org/apache/uniffle/server/storage/StorageManagerFallbackStrategyTest.java
+++ b/server/src/test/java/org/apache/uniffle/server/storage/StorageManagerFallbackStrategyTest.java
@@ -48,7 +48,7 @@ public class StorageManagerFallbackStrategyTest {
   public void testDefaultFallbackStrategy() {
     RotateStorageManagerFallbackStrategy fallbackStrategy = new RotateStorageManagerFallbackStrategy(conf);
     LocalStorageManager warmStorageManager = new LocalStorageManager(conf);
-    HdfsStorageManager coldStorageManager = new HdfsStorageManager(conf);
+    HadoopStorageManager coldStorageManager = new HadoopStorageManager(conf);
     StorageManager current = warmStorageManager;
     String remoteStorage = "test";
     String appId = "testDefaultFallbackStrategy_appId";
@@ -82,10 +82,10 @@ public class StorageManagerFallbackStrategyTest {
   }
 
   @Test
-  public void testHdfsFallbackStrategy() {
-    HdfsStorageManagerFallbackStrategy fallbackStrategy = new HdfsStorageManagerFallbackStrategy(conf);
+  public void testHadoopFallbackStrategy() {
+    HadoopStorageManagerFallbackStrategy fallbackStrategy = new HadoopStorageManagerFallbackStrategy(conf);
     LocalStorageManager warmStorageManager = new LocalStorageManager(conf);
-    HdfsStorageManager coldStorageManager = new HdfsStorageManager(conf);
+    HadoopStorageManager coldStorageManager = new HadoopStorageManager(conf);
     String remoteStorage = "test";
     String appId = "testHdfsFallbackStrategy_appId";
     coldStorageManager.registerRemoteStorage(appId, new RemoteStorageInfo(remoteStorage));
@@ -107,7 +107,7 @@ public class StorageManagerFallbackStrategyTest {
   public void testLocalFallbackStrategy() {
     LocalStorageManagerFallbackStrategy fallbackStrategy = new LocalStorageManagerFallbackStrategy(conf);
     LocalStorageManager warmStorageManager = new LocalStorageManager(conf);
-    HdfsStorageManager coldStorageManager = new HdfsStorageManager(conf);
+    HadoopStorageManager coldStorageManager = new HadoopStorageManager(conf);
     String remoteStorage = "test";
     String appId = "testLocalFallbackStrategy_appId";
     coldStorageManager.registerRemoteStorage(appId, new RemoteStorageInfo(remoteStorage));
diff --git a/storage/src/main/java/org/apache/uniffle/storage/common/DefaultStorageMediaProvider.java b/storage/src/main/java/org/apache/uniffle/storage/common/DefaultStorageMediaProvider.java
index 51d6dd49..7204c575 100644
--- a/storage/src/main/java/org/apache/uniffle/storage/common/DefaultStorageMediaProvider.java
+++ b/storage/src/main/java/org/apache/uniffle/storage/common/DefaultStorageMediaProvider.java
@@ -40,7 +40,7 @@ public class DefaultStorageMediaProvider implements StorageMediaProvider {
   private static final String NUMBERIC_STRING = "0123456789";
   private static final String BLOCK_PATH_FORMAT = "/sys/block/%s/queue/rotational";
   private static final String HDFS = "hdfs";
-  private static final List<String> OBJECT_STORE_SCHEMAS = Arrays.asList("s3", "oss", "cos", "gcs", "obs");
+  private static final List<String> OBJECT_STORE_SCHEMAS = Arrays.asList("s3", "oss", "cos", "gcs", "obs", "daos");
 
   @Override
   public StorageMedia getStorageMediaFor(String baseDir) {
diff --git a/storage/src/main/java/org/apache/uniffle/storage/common/HdfsStorage.java b/storage/src/main/java/org/apache/uniffle/storage/common/HadoopStorage.java
similarity index 86%
rename from storage/src/main/java/org/apache/uniffle/storage/common/HdfsStorage.java
rename to storage/src/main/java/org/apache/uniffle/storage/common/HadoopStorage.java
index 118686a7..214c48a7 100644
--- a/storage/src/main/java/org/apache/uniffle/storage/common/HdfsStorage.java
+++ b/storage/src/main/java/org/apache/uniffle/storage/common/HadoopStorage.java
@@ -27,20 +27,20 @@ import org.slf4j.LoggerFactory;
 import org.apache.uniffle.common.exception.RssException;
 import org.apache.uniffle.storage.handler.api.ServerReadHandler;
 import org.apache.uniffle.storage.handler.api.ShuffleWriteHandler;
-import org.apache.uniffle.storage.handler.impl.HdfsShuffleWriteHandler;
-import org.apache.uniffle.storage.handler.impl.PooledHdfsShuffleWriteHandler;
+import org.apache.uniffle.storage.handler.impl.HadoopShuffleWriteHandler;
+import org.apache.uniffle.storage.handler.impl.PooledHadoopShuffleWriteHandler;
 import org.apache.uniffle.storage.request.CreateShuffleReadHandlerRequest;
 import org.apache.uniffle.storage.request.CreateShuffleWriteHandlerRequest;
 
-public class HdfsStorage extends AbstractStorage {
+public class HadoopStorage extends AbstractStorage {
 
-  private static final Logger LOG = LoggerFactory.getLogger(HdfsStorage.class);
+  private static final Logger LOG = LoggerFactory.getLogger(HadoopStorage.class);
 
   private final String storagePath;
   private final Configuration conf;
   private String storageHost;
 
-  public HdfsStorage(String path, Configuration conf) {
+  public HadoopStorage(String path, Configuration conf) {
     this.storagePath = path;
     this.conf = conf;
     try {
@@ -82,7 +82,7 @@ public class HdfsStorage extends AbstractStorage {
     try {
       String user = request.getUser();
       if (request.getMaxFileNumber() == 1) {
-        return new HdfsShuffleWriteHandler(
+        return new HadoopShuffleWriteHandler(
             request.getAppId(),
             request.getShuffleId(),
             request.getStartPartition(),
@@ -93,7 +93,7 @@ public class HdfsStorage extends AbstractStorage {
             user
         );
       } else {
-        return new PooledHdfsShuffleWriteHandler(
+        return new PooledHadoopShuffleWriteHandler(
             request.getAppId(),
             request.getShuffleId(),
             request.getStartPartition(),
@@ -112,7 +112,7 @@ public class HdfsStorage extends AbstractStorage {
 
   @Override
   protected ServerReadHandler newReadHandler(CreateShuffleReadHandlerRequest request) {
-    throw new RssException("Hdfs storage don't support to read from sever");
+    throw new RssException("Hadoop FS storage don't support to read from sever");
   }
 
   @Override
diff --git a/storage/src/main/java/org/apache/uniffle/storage/factory/ShuffleHandlerFactory.java b/storage/src/main/java/org/apache/uniffle/storage/factory/ShuffleHandlerFactory.java
index 2039cdc5..92f962fe 100644
--- a/storage/src/main/java/org/apache/uniffle/storage/factory/ShuffleHandlerFactory.java
+++ b/storage/src/main/java/org/apache/uniffle/storage/factory/ShuffleHandlerFactory.java
@@ -34,8 +34,8 @@ import org.apache.uniffle.common.util.RssUtils;
 import org.apache.uniffle.storage.handler.api.ClientReadHandler;
 import org.apache.uniffle.storage.handler.api.ShuffleDeleteHandler;
 import org.apache.uniffle.storage.handler.impl.ComposedClientReadHandler;
-import org.apache.uniffle.storage.handler.impl.HdfsClientReadHandler;
-import org.apache.uniffle.storage.handler.impl.HdfsShuffleDeleteHandler;
+import org.apache.uniffle.storage.handler.impl.HadoopClientReadHandler;
+import org.apache.uniffle.storage.handler.impl.HadoopShuffleDeleteHandler;
 import org.apache.uniffle.storage.handler.impl.LocalFileClientReadHandler;
 import org.apache.uniffle.storage.handler.impl.LocalFileDeleteHandler;
 import org.apache.uniffle.storage.handler.impl.MemoryClientReadHandler;
@@ -86,7 +86,7 @@ public class ShuffleHandlerFactory {
     }
 
     if (StorageType.HDFS == type) {
-      return getHdfsClientReadHandler(request, serverInfo);
+      return getHadoopClientReadHandler(request, serverInfo);
     }
     if (StorageType.LOCALFILE == type) {
       return getLocalfileClientReaderHandler(request, serverInfo);
@@ -103,9 +103,9 @@ public class ShuffleHandlerFactory {
           () -> getLocalfileClientReaderHandler(request, serverInfo)
       );
     }
-    if (StorageType.withHDFS(type)) {
+    if (StorageType.withHadoop(type)) {
       handlers.add(
-          () -> getHdfsClientReadHandler(request, serverInfo)
+          () -> getHadoopClientReadHandler(request, serverInfo)
       );
     }
     if (handlers.isEmpty()) {
@@ -147,8 +147,8 @@ public class ShuffleHandlerFactory {
     );
   }
 
-  private ClientReadHandler getHdfsClientReadHandler(CreateShuffleReadHandlerRequest request, ShuffleServerInfo ssi) {
-    return new HdfsClientReadHandler(
+  private ClientReadHandler getHadoopClientReadHandler(CreateShuffleReadHandlerRequest request, ShuffleServerInfo ssi) {
+    return new HadoopClientReadHandler(
         request.getAppId(),
         request.getShuffleId(),
         request.getPartitionId(),
@@ -168,7 +168,7 @@ public class ShuffleHandlerFactory {
 
   public ShuffleDeleteHandler createShuffleDeleteHandler(CreateShuffleDeleteHandlerRequest request) {
     if (StorageType.HDFS.name().equals(request.getStorageType())) {
-      return new HdfsShuffleDeleteHandler(request.getConf());
+      return new HadoopShuffleDeleteHandler(request.getConf());
     } else if (StorageType.LOCALFILE.name().equals(request.getStorageType())) {
       return new LocalFileDeleteHandler();
     } else {
diff --git a/storage/src/main/java/org/apache/uniffle/storage/handler/impl/HdfsClientReadHandler.java b/storage/src/main/java/org/apache/uniffle/storage/handler/impl/HadoopClientReadHandler.java
similarity index 88%
rename from storage/src/main/java/org/apache/uniffle/storage/handler/impl/HdfsClientReadHandler.java
rename to storage/src/main/java/org/apache/uniffle/storage/handler/impl/HadoopClientReadHandler.java
index 003d03e4..b24f9557 100644
--- a/storage/src/main/java/org/apache/uniffle/storage/handler/impl/HdfsClientReadHandler.java
+++ b/storage/src/main/java/org/apache/uniffle/storage/handler/impl/HadoopClientReadHandler.java
@@ -38,9 +38,9 @@ import org.apache.uniffle.common.filesystem.HadoopFilesystemProvider;
 import org.apache.uniffle.common.util.Constants;
 import org.apache.uniffle.storage.util.ShuffleStorageUtils;
 
-public class HdfsClientReadHandler extends AbstractClientReadHandler {
+public class HadoopClientReadHandler extends AbstractClientReadHandler {
 
-  private static final Logger LOG = LoggerFactory.getLogger(HdfsClientReadHandler.class);
+  private static final Logger LOG = LoggerFactory.getLogger(HadoopClientReadHandler.class);
 
   protected final int partitionNumPerRange;
   protected final int partitionNum;
@@ -50,13 +50,13 @@ public class HdfsClientReadHandler extends AbstractClientReadHandler {
   protected Roaring64NavigableMap processBlockIds;
   protected final String storageBasePath;
   protected final Configuration hadoopConf;
-  protected final List<HdfsShuffleReadHandler> readHandlers = Lists.newArrayList();
+  protected final List<HadoopShuffleReadHandler> readHandlers = Lists.newArrayList();
   private int readHandlerIndex;
   private ShuffleDataDistributionType distributionType;
   private Roaring64NavigableMap expectTaskIds;
   private boolean offHeapEnable = false;
 
-  public HdfsClientReadHandler(
+  public HadoopClientReadHandler(
       String appId,
       int shuffleId,
       int partitionId,
@@ -90,7 +90,7 @@ public class HdfsClientReadHandler extends AbstractClientReadHandler {
   }
 
   // Only for test
-  public HdfsClientReadHandler(
+  public HadoopClientReadHandler(
       String appId,
       int shuffleId,
       int partitionId,
@@ -139,7 +139,7 @@ public class HdfsClientReadHandler extends AbstractClientReadHandler {
             + partitionId + "] " + status.getPath());
         String filePrefix = getFileNamePrefix(status.getPath().toUri().toString());
         try {
-          HdfsShuffleReadHandler handler = new HdfsShuffleReadHandler(
+          HadoopShuffleReadHandler handler = new HadoopShuffleReadHandler(
               appId, shuffleId, partitionId, filePrefix,
               readBufferSize, expectBlockIds, processBlockIds, hadoopConf,
               distributionType, expectTaskIds, offHeapEnable);
@@ -149,7 +149,7 @@ public class HdfsClientReadHandler extends AbstractClientReadHandler {
         }
       }
       Collections.shuffle(readHandlers);
-      LOG.info("Reading order of HDFS files with name prefix: {}",
+      LOG.info("Reading order of Hadoop files with name prefix: {}",
           readHandlers.stream().map(x -> x.filePrefix).collect(Collectors.toList())
       );
     }
@@ -169,16 +169,16 @@ public class HdfsClientReadHandler extends AbstractClientReadHandler {
       return new ShuffleDataResult();
     }
 
-    HdfsShuffleReadHandler hdfsShuffleFileReader = readHandlers.get(readHandlerIndex);
-    ShuffleDataResult shuffleDataResult = hdfsShuffleFileReader.readShuffleData();
+    HadoopShuffleReadHandler hadoopShuffleFileReader = readHandlers.get(readHandlerIndex);
+    ShuffleDataResult shuffleDataResult = hadoopShuffleFileReader.readShuffleData();
 
     while (shuffleDataResult == null) {
       ++readHandlerIndex;
       if (readHandlerIndex >= readHandlers.size()) {
         return new ShuffleDataResult();
       }
-      hdfsShuffleFileReader = readHandlers.get(readHandlerIndex);
-      shuffleDataResult = hdfsShuffleFileReader.readShuffleData();
+      hadoopShuffleFileReader = readHandlers.get(readHandlerIndex);
+      shuffleDataResult = hadoopShuffleFileReader.readShuffleData();
     }
 
     return shuffleDataResult;
@@ -191,12 +191,12 @@ public class HdfsClientReadHandler extends AbstractClientReadHandler {
 
   @Override
   public synchronized void close() {
-    for (HdfsShuffleReadHandler handler : readHandlers) {
+    for (HadoopShuffleReadHandler handler : readHandlers) {
       handler.close();
     }
   }
 
-  protected List<HdfsShuffleReadHandler> getHdfsShuffleFileReadHandlers() {
+  protected List<HadoopShuffleReadHandler> getHdfsShuffleFileReadHandlers() {
     return readHandlers;
   }
 
diff --git a/storage/src/main/java/org/apache/uniffle/storage/handler/impl/HdfsFileReader.java b/storage/src/main/java/org/apache/uniffle/storage/handler/impl/HadoopFileReader.java
similarity index 94%
rename from storage/src/main/java/org/apache/uniffle/storage/handler/impl/HdfsFileReader.java
rename to storage/src/main/java/org/apache/uniffle/storage/handler/impl/HadoopFileReader.java
index 1c9cb41a..b8f4fe7f 100644
--- a/storage/src/main/java/org/apache/uniffle/storage/handler/impl/HdfsFileReader.java
+++ b/storage/src/main/java/org/apache/uniffle/storage/handler/impl/HadoopFileReader.java
@@ -32,15 +32,15 @@ import org.slf4j.LoggerFactory;
 import org.apache.uniffle.common.filesystem.HadoopFilesystemProvider;
 import org.apache.uniffle.storage.api.FileReader;
 
-public class HdfsFileReader implements FileReader, Closeable {
+public class HadoopFileReader implements FileReader, Closeable {
 
-  private static final Logger LOG = LoggerFactory.getLogger(HdfsFileReader.class);
+  private static final Logger LOG = LoggerFactory.getLogger(HadoopFileReader.class);
   private Path path;
   private Configuration hadoopConf;
   private FSDataInputStream fsDataInputStream;
   private FileSystem fileSystem;
 
-  public HdfsFileReader(Path path, Configuration hadoopConf) throws Exception {
+  public HadoopFileReader(Path path, Configuration hadoopConf) throws Exception {
     this.path = path;
     this.hadoopConf = hadoopConf;
     createStream();
diff --git a/storage/src/main/java/org/apache/uniffle/storage/handler/impl/HdfsFileWriter.java b/storage/src/main/java/org/apache/uniffle/storage/handler/impl/HadoopFileWriter.java
similarity index 94%
rename from storage/src/main/java/org/apache/uniffle/storage/handler/impl/HdfsFileWriter.java
rename to storage/src/main/java/org/apache/uniffle/storage/handler/impl/HadoopFileWriter.java
index 5e179a6b..fb7f3afe 100644
--- a/storage/src/main/java/org/apache/uniffle/storage/handler/impl/HdfsFileWriter.java
+++ b/storage/src/main/java/org/apache/uniffle/storage/handler/impl/HadoopFileWriter.java
@@ -33,9 +33,9 @@ import org.slf4j.LoggerFactory;
 import org.apache.uniffle.storage.api.FileWriter;
 import org.apache.uniffle.storage.common.FileBasedShuffleSegment;
 
-public class HdfsFileWriter implements FileWriter, Closeable {
+public class HadoopFileWriter implements FileWriter, Closeable {
 
-  private static final Logger LOG = LoggerFactory.getLogger(HdfsFileWriter.class);
+  private static final Logger LOG = LoggerFactory.getLogger(HadoopFileWriter.class);
 
   private final FileSystem fileSystem;
 
@@ -44,7 +44,7 @@ public class HdfsFileWriter implements FileWriter, Closeable {
   private FSDataOutputStream fsDataOutputStream;
   private long nextOffset;
 
-  public HdfsFileWriter(FileSystem fileSystem, Path path, Configuration hadoopConf) throws IOException {
+  public HadoopFileWriter(FileSystem fileSystem, Path path, Configuration hadoopConf) throws IOException {
     this.path = path;
     this.hadoopConf = hadoopConf;
     this.fileSystem = fileSystem;
diff --git a/storage/src/main/java/org/apache/uniffle/storage/handler/impl/HdfsShuffleDeleteHandler.java b/storage/src/main/java/org/apache/uniffle/storage/handler/impl/HadoopShuffleDeleteHandler.java
similarity index 80%
rename from storage/src/main/java/org/apache/uniffle/storage/handler/impl/HdfsShuffleDeleteHandler.java
rename to storage/src/main/java/org/apache/uniffle/storage/handler/impl/HadoopShuffleDeleteHandler.java
index 43994d69..beae8a6b 100644
--- a/storage/src/main/java/org/apache/uniffle/storage/handler/impl/HdfsShuffleDeleteHandler.java
+++ b/storage/src/main/java/org/apache/uniffle/storage/handler/impl/HadoopShuffleDeleteHandler.java
@@ -26,13 +26,13 @@ import org.slf4j.LoggerFactory;
 import org.apache.uniffle.common.filesystem.HadoopFilesystemProvider;
 import org.apache.uniffle.storage.handler.api.ShuffleDeleteHandler;
 
-public class HdfsShuffleDeleteHandler implements ShuffleDeleteHandler {
+public class HadoopShuffleDeleteHandler implements ShuffleDeleteHandler {
 
-  private static final Logger LOG = LoggerFactory.getLogger(HdfsShuffleDeleteHandler.class);
+  private static final Logger LOG = LoggerFactory.getLogger(HadoopShuffleDeleteHandler.class);
 
   private Configuration hadoopConf;
 
-  public HdfsShuffleDeleteHandler(Configuration hadoopConf) {
+  public HadoopShuffleDeleteHandler(Configuration hadoopConf) {
     this.hadoopConf = hadoopConf;
   }
 
@@ -44,7 +44,7 @@ public class HdfsShuffleDeleteHandler implements ShuffleDeleteHandler {
       int times = 0;
       int retryMax = 5;
       long start = System.currentTimeMillis();
-      LOG.info("Try delete shuffle data in HDFS for appId[{}] of user[{}] with {}",appId, user, path);
+      LOG.info("Try delete shuffle data in Hadoop FS for appId[{}] of user[{}] with {}",appId, user, path);
       while (!isSuccess && times < retryMax) {
         try {
           FileSystem fileSystem = HadoopFilesystemProvider.getFilesystem(user, path, hadoopConf);
@@ -61,10 +61,10 @@ public class HdfsShuffleDeleteHandler implements ShuffleDeleteHandler {
         }
       }
       if (isSuccess) {
-        LOG.info("Delete shuffle data in HDFS for appId[" + appId + "] with " + path + " successfully in "
+        LOG.info("Delete shuffle data in Hadoop FS for appId[" + appId + "] with " + path + " successfully in "
             + (System.currentTimeMillis() - start) + " ms");
       } else {
-        LOG.info("Failed to delete shuffle data in HDFS for appId[" + appId + "] with " + path + " in "
+        LOG.info("Failed to delete shuffle data in Hadoop FS for appId[" + appId + "] with " + path + " in "
             + (System.currentTimeMillis() - start) + " ms");
       }
     }
diff --git a/storage/src/main/java/org/apache/uniffle/storage/handler/impl/HdfsShuffleReadHandler.java b/storage/src/main/java/org/apache/uniffle/storage/handler/impl/HadoopShuffleReadHandler.java
similarity index 90%
rename from storage/src/main/java/org/apache/uniffle/storage/handler/impl/HdfsShuffleReadHandler.java
rename to storage/src/main/java/org/apache/uniffle/storage/handler/impl/HadoopShuffleReadHandler.java
index 56151198..d747a55b 100644
--- a/storage/src/main/java/org/apache/uniffle/storage/handler/impl/HdfsShuffleReadHandler.java
+++ b/storage/src/main/java/org/apache/uniffle/storage/handler/impl/HadoopShuffleReadHandler.java
@@ -35,18 +35,18 @@ import org.apache.uniffle.storage.common.FileBasedShuffleSegment;
 import org.apache.uniffle.storage.util.ShuffleStorageUtils;
 
 /**
- * HdfsShuffleFileReadHandler is a shuffle-specific file read handler, it contains two HdfsFileReader
+ * HadoopShuffleFileReadHandler is a shuffle-specific file read handler, it contains two HadoopFileReader
  * instances created by using the index file and its indexed data file.
  */
-public class HdfsShuffleReadHandler extends DataSkippableReadHandler {
-  private static final Logger LOG = LoggerFactory.getLogger(HdfsShuffleReadHandler.class);
+public class HadoopShuffleReadHandler extends DataSkippableReadHandler {
+  private static final Logger LOG = LoggerFactory.getLogger(HadoopShuffleReadHandler.class);
 
   protected final String filePrefix;
-  protected final HdfsFileReader indexReader;
-  protected final HdfsFileReader dataReader;
+  protected final HadoopFileReader indexReader;
+  protected final HadoopFileReader dataReader;
   protected final boolean offHeapEnabled;
 
-  public HdfsShuffleReadHandler(
+  public HadoopShuffleReadHandler(
       String appId,
       int shuffleId,
       int partitionId,
@@ -61,13 +61,13 @@ public class HdfsShuffleReadHandler extends DataSkippableReadHandler {
     super(appId, shuffleId, partitionId, readBufferSize, expectBlockIds, processBlockIds,
         distributionType, expectTaskIds);
     this.filePrefix = filePrefix;
-    this.indexReader = createHdfsReader(ShuffleStorageUtils.generateIndexFileName(filePrefix), conf);
-    this.dataReader = createHdfsReader(ShuffleStorageUtils.generateDataFileName(filePrefix), conf);
+    this.indexReader = createHadoopReader(ShuffleStorageUtils.generateIndexFileName(filePrefix), conf);
+    this.dataReader = createHadoopReader(ShuffleStorageUtils.generateDataFileName(filePrefix), conf);
     this.offHeapEnabled = offHeapEnabled;
   }
 
   // Only for test
-  public HdfsShuffleReadHandler(
+  public HadoopShuffleReadHandler(
       String appId,
       int shuffleId,
       int partitionId,
@@ -183,10 +183,10 @@ public class HdfsShuffleReadHandler extends DataSkippableReadHandler {
     }
   }
 
-  protected HdfsFileReader createHdfsReader(
+  protected HadoopFileReader createHadoopReader(
       String fileName, Configuration hadoopConf) throws Exception {
     Path path = new Path(fileName);
-    return new HdfsFileReader(path, hadoopConf);
+    return new HadoopFileReader(path, hadoopConf);
   }
 
   public List<ShuffleDataSegment> getShuffleDataSegments() {
diff --git a/storage/src/main/java/org/apache/uniffle/storage/handler/impl/HdfsShuffleWriteHandler.java b/storage/src/main/java/org/apache/uniffle/storage/handler/impl/HadoopShuffleWriteHandler.java
similarity index 90%
rename from storage/src/main/java/org/apache/uniffle/storage/handler/impl/HdfsShuffleWriteHandler.java
rename to storage/src/main/java/org/apache/uniffle/storage/handler/impl/HadoopShuffleWriteHandler.java
index 776115ef..920acef9 100644
--- a/storage/src/main/java/org/apache/uniffle/storage/handler/impl/HdfsShuffleWriteHandler.java
+++ b/storage/src/main/java/org/apache/uniffle/storage/handler/impl/HadoopShuffleWriteHandler.java
@@ -37,9 +37,9 @@ import org.apache.uniffle.storage.common.FileBasedShuffleSegment;
 import org.apache.uniffle.storage.handler.api.ShuffleWriteHandler;
 import org.apache.uniffle.storage.util.ShuffleStorageUtils;
 
-public class HdfsShuffleWriteHandler implements ShuffleWriteHandler {
+public class HadoopShuffleWriteHandler implements ShuffleWriteHandler {
 
-  private static final Logger LOG = LoggerFactory.getLogger(HdfsShuffleWriteHandler.class);
+  private static final Logger LOG = LoggerFactory.getLogger(HadoopShuffleWriteHandler.class);
 
   private Configuration hadoopConf;
   private String basePath;
@@ -51,7 +51,7 @@ public class HdfsShuffleWriteHandler implements ShuffleWriteHandler {
 
   // Only for test cases when using non-kerberized dfs cluster.
   @VisibleForTesting
-  public HdfsShuffleWriteHandler(
+  public HadoopShuffleWriteHandler(
       String appId,
       int shuffleId,
       int startPartition,
@@ -66,7 +66,7 @@ public class HdfsShuffleWriteHandler implements ShuffleWriteHandler {
     initialize();
   }
 
-  public HdfsShuffleWriteHandler(
+  public HadoopShuffleWriteHandler(
       String appId,
       int shuffleId,
       int startPartition,
@@ -113,8 +113,8 @@ public class HdfsShuffleWriteHandler implements ShuffleWriteHandler {
       // change the prefix of file name if write failed before
       String dataFileName = ShuffleStorageUtils.generateDataFileName(fileNamePrefix + "_" + failTimes);
       String indexFileName = ShuffleStorageUtils.generateIndexFileName(fileNamePrefix + "_" + failTimes);
-      try (HdfsFileWriter dataWriter = createWriter(dataFileName);
-           HdfsFileWriter indexWriter = createWriter(indexFileName)) {
+      try (HadoopFileWriter dataWriter = createWriter(dataFileName);
+           HadoopFileWriter indexWriter = createWriter(indexFileName)) {
         for (ShufflePartitionedBlock block : shuffleBlocks) {
           long blockId = block.getBlockId();
           long crc = block.getCrc();
@@ -145,9 +145,9 @@ public class HdfsShuffleWriteHandler implements ShuffleWriteHandler {
   }
 
   @VisibleForTesting
-  public HdfsFileWriter createWriter(String fileName) throws IOException, IllegalStateException {
+  public HadoopFileWriter createWriter(String fileName) throws IOException, IllegalStateException {
     Path path = new Path(basePath, fileName);
-    HdfsFileWriter writer = new HdfsFileWriter(fileSystem, path, hadoopConf);
+    HadoopFileWriter writer = new HadoopFileWriter(fileSystem, path, hadoopConf);
     return writer;
   }
 
diff --git a/storage/src/main/java/org/apache/uniffle/storage/handler/impl/PooledHdfsShuffleWriteHandler.java b/storage/src/main/java/org/apache/uniffle/storage/handler/impl/PooledHadoopShuffleWriteHandler.java
similarity index 85%
rename from storage/src/main/java/org/apache/uniffle/storage/handler/impl/PooledHdfsShuffleWriteHandler.java
rename to storage/src/main/java/org/apache/uniffle/storage/handler/impl/PooledHadoopShuffleWriteHandler.java
index c971e6cb..31113ce5 100644
--- a/storage/src/main/java/org/apache/uniffle/storage/handler/impl/PooledHdfsShuffleWriteHandler.java
+++ b/storage/src/main/java/org/apache/uniffle/storage/handler/impl/PooledHadoopShuffleWriteHandler.java
@@ -33,15 +33,15 @@ import org.apache.uniffle.storage.handler.api.ShuffleWriteHandler;
 import org.apache.uniffle.storage.util.ShuffleStorageUtils;
 
 /**
- * The {@link PooledHdfsShuffleWriteHandler} is a wrapper of underlying multiple
- * {@link HdfsShuffleWriteHandler} to support concurrency control of writing single
+ * The {@link PooledHadoopShuffleWriteHandler} is a wrapper of underlying multiple
+ * {@link HadoopShuffleWriteHandler} to support concurrency control of writing single
  * partition to multi files.
  *
  * By leveraging {@link LinkedBlockingDeque}, it will always write the same file when
- * no race condition, which is good for reducing file numbers for HDFS.
+ * no race condition, which is good for reducing file numbers for Hadoop FS.
  */
-public class PooledHdfsShuffleWriteHandler implements ShuffleWriteHandler {
-  private static final Logger LOGGER = LoggerFactory.getLogger(PooledHdfsShuffleWriteHandler.class);
+public class PooledHadoopShuffleWriteHandler implements ShuffleWriteHandler {
+  private static final Logger LOGGER = LoggerFactory.getLogger(PooledHadoopShuffleWriteHandler.class);
 
   private final LinkedBlockingDeque<ShuffleWriteHandler> queue;
   private final int maxConcurrency;
@@ -51,14 +51,14 @@ public class PooledHdfsShuffleWriteHandler implements ShuffleWriteHandler {
 
   // Only for tests
   @VisibleForTesting
-  public PooledHdfsShuffleWriteHandler(LinkedBlockingDeque<ShuffleWriteHandler> queue) {
+  public PooledHadoopShuffleWriteHandler(LinkedBlockingDeque<ShuffleWriteHandler> queue) {
     this.queue = queue;
     this.maxConcurrency = queue.size();
     this.basePath = StringUtils.EMPTY;
   }
 
   @VisibleForTesting
-  public PooledHdfsShuffleWriteHandler(
+  public PooledHadoopShuffleWriteHandler(
       LinkedBlockingDeque<ShuffleWriteHandler> queue,
       int maxConcurrency,
       Function<Integer, ShuffleWriteHandler> createWriterFunc) {
@@ -68,7 +68,7 @@ public class PooledHdfsShuffleWriteHandler implements ShuffleWriteHandler {
     this.createWriterFunc = createWriterFunc;
   }
 
-  public PooledHdfsShuffleWriteHandler(
+  public PooledHadoopShuffleWriteHandler(
       String appId,
       int shuffleId,
       int startPartition,
@@ -85,7 +85,7 @@ public class PooledHdfsShuffleWriteHandler implements ShuffleWriteHandler {
 
     this.createWriterFunc = index -> {
       try {
-        return new HdfsShuffleWriteHandler(
+        return new HadoopShuffleWriteHandler(
             appId,
             shuffleId,
             startPartition,
@@ -96,7 +96,7 @@ public class PooledHdfsShuffleWriteHandler implements ShuffleWriteHandler {
             user
         );
       } catch (Exception e) {
-        throw new RssException("Errors on initializing Hdfs writer handler.", e);
+        throw new RssException("Errors on initializing Hadoop FS writer handler.", e);
       }
     };
   }
@@ -112,7 +112,7 @@ public class PooledHdfsShuffleWriteHandler implements ShuffleWriteHandler {
     }
 
     if (queue.isEmpty()) {
-      LOGGER.warn("No free hdfs writer handler, it will wait. storage path: {}", basePath);
+      LOGGER.warn("No free Hadoop FS writer handler, it will wait. storage path: {}", basePath);
     }
     ShuffleWriteHandler writeHandler = queue.take();
     try {
diff --git a/storage/src/main/java/org/apache/uniffle/storage/util/ShuffleStorageUtils.java b/storage/src/main/java/org/apache/uniffle/storage/util/ShuffleStorageUtils.java
index d268775c..d9599d71 100644
--- a/storage/src/main/java/org/apache/uniffle/storage/util/ShuffleStorageUtils.java
+++ b/storage/src/main/java/org/apache/uniffle/storage/util/ShuffleStorageUtils.java
@@ -37,12 +37,12 @@ import org.apache.uniffle.common.exception.RssException;
 import org.apache.uniffle.common.util.Constants;
 import org.apache.uniffle.storage.common.FileBasedShuffleSegment;
 import org.apache.uniffle.storage.handler.impl.DataFileSegment;
-import org.apache.uniffle.storage.handler.impl.HdfsFileWriter;
+import org.apache.uniffle.storage.handler.impl.HadoopFileWriter;
 
 public class ShuffleStorageUtils {
 
-  static final String HDFS_PATH_SEPARATOR = "/";
-  static final String HDFS_DIRNAME_SEPARATOR = "-";
+  static final String HADOOP_PATH_SEPARATOR = "/";
+  static final String HADOOP_DIRNAME_SEPARATOR = "-";
   private static final Logger LOG = LoggerFactory.getLogger(ShuffleStorageUtils.class);
 
   private ShuffleStorageUtils() {
@@ -107,29 +107,29 @@ public class ShuffleStorageUtils {
 
   public static String getShuffleDataPath(String appId, int shuffleId) {
     return String.join(
-        HDFS_PATH_SEPARATOR,
+            HADOOP_PATH_SEPARATOR,
         appId,
         String.valueOf(shuffleId));
   }
 
   public static String getShuffleDataPath(String appId, int shuffleId, int start, int end) {
     return String.join(
-        HDFS_PATH_SEPARATOR,
+            HADOOP_PATH_SEPARATOR,
         appId,
         String.valueOf(shuffleId),
-        String.join(HDFS_DIRNAME_SEPARATOR, String.valueOf(start), String.valueOf(end)));
+        String.join(HADOOP_DIRNAME_SEPARATOR, String.valueOf(start), String.valueOf(end)));
   }
 
   public static String getCombineDataPath(String appId, int shuffleId) {
     return String.join(
-        HDFS_PATH_SEPARATOR,
+            HADOOP_PATH_SEPARATOR,
         appId,
         String.valueOf(shuffleId),
         "combine");
   }
 
   public static String getFullShuffleDataFolder(String basePath, String subPath) {
-    return String.join(HDFS_PATH_SEPARATOR, basePath, subPath);
+    return String.join(HADOOP_PATH_SEPARATOR, basePath, subPath);
   }
 
   public static String getShuffleDataPathWithRange(
@@ -186,7 +186,7 @@ public class ShuffleStorageUtils {
   }
 
 
-  public static long uploadFile(File file, HdfsFileWriter writer, int bufferSize) throws IOException {
+  public static long uploadFile(File file, HadoopFileWriter writer, int bufferSize) throws IOException {
     try (FileInputStream inputStream = new FileInputStream(file)) {
       return writer.copy(inputStream, bufferSize);
     } catch (IOException e) {
diff --git a/storage/src/main/java/org/apache/uniffle/storage/util/StorageType.java b/storage/src/main/java/org/apache/uniffle/storage/util/StorageType.java
index 37493548..ee420d27 100644
--- a/storage/src/main/java/org/apache/uniffle/storage/util/StorageType.java
+++ b/storage/src/main/java/org/apache/uniffle/storage/util/StorageType.java
@@ -44,7 +44,7 @@ public enum StorageType {
     return (storageType.getVal() & LOCALFILE.getVal()) != 0;
   }
 
-  public static boolean withHDFS(StorageType storageType) {
+  public static boolean withHadoop(StorageType storageType) {
     return (storageType.getVal() & HDFS.getVal()) != 0;
   }
 }
diff --git a/storage/src/test/java/org/apache/uniffle/storage/HdfsShuffleHandlerTestBase.java b/storage/src/test/java/org/apache/uniffle/storage/HadoopShuffleHandlerTestBase.java
similarity index 90%
rename from storage/src/test/java/org/apache/uniffle/storage/HdfsShuffleHandlerTestBase.java
rename to storage/src/test/java/org/apache/uniffle/storage/HadoopShuffleHandlerTestBase.java
index 52d34efc..4abf7738 100644
--- a/storage/src/test/java/org/apache/uniffle/storage/HdfsShuffleHandlerTestBase.java
+++ b/storage/src/test/java/org/apache/uniffle/storage/HadoopShuffleHandlerTestBase.java
@@ -34,19 +34,19 @@ import org.apache.uniffle.common.util.ByteBufUtils;
 import org.apache.uniffle.common.util.ChecksumUtils;
 import org.apache.uniffle.common.util.Constants;
 import org.apache.uniffle.storage.common.FileBasedShuffleSegment;
-import org.apache.uniffle.storage.handler.impl.HdfsFileReader;
-import org.apache.uniffle.storage.handler.impl.HdfsFileWriter;
-import org.apache.uniffle.storage.handler.impl.HdfsShuffleWriteHandler;
+import org.apache.uniffle.storage.handler.impl.HadoopFileReader;
+import org.apache.uniffle.storage.handler.impl.HadoopFileWriter;
+import org.apache.uniffle.storage.handler.impl.HadoopShuffleWriteHandler;
 
 import static org.junit.jupiter.api.Assertions.assertArrayEquals;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 
-public class HdfsShuffleHandlerTestBase {
+public class HadoopShuffleHandlerTestBase {
 
   private static final AtomicLong ATOMIC_LONG = new AtomicLong(0);
 
   public static void writeTestData(
-      HdfsShuffleWriteHandler writeHandler,
+      HadoopShuffleWriteHandler writeHandler,
       int num, int length, long taskAttemptId,
       Map<Long, byte[]> expectedData) throws Exception {
     List<ShufflePartitionedBlock> blocks = Lists.newArrayList();
@@ -63,7 +63,7 @@ public class HdfsShuffleHandlerTestBase {
   }
 
   public static void writeTestData(
-      HdfsFileWriter writer,
+      HadoopFileWriter writer,
       int partitionId,
       int num, int length, long taskAttemptId,
       Map<Long, byte[]> expectedData,
@@ -95,7 +95,7 @@ public class HdfsShuffleHandlerTestBase {
     expectedIndexSegments.put(partitionId, segments);
   }
 
-  public static byte[] writeData(HdfsFileWriter writer, int len) throws IOException {
+  public static byte[] writeData(HadoopFileWriter writer, int len) throws IOException {
     byte[] data = new byte[len];
     new Random().nextBytes(data);
     writer.writeData(data);
@@ -133,10 +133,10 @@ public class HdfsShuffleHandlerTestBase {
     }
   }
 
-  public static HdfsFileReader createHdfsReader(
+  public static HadoopFileReader createHadoopReader(
       String folder, String fileName, Configuration hadoopConf) throws Exception {
     Path path = new Path(folder, fileName);
-    HdfsFileReader reader = new HdfsFileReader(path, hadoopConf);
+    HadoopFileReader reader = new HadoopFileReader(path, hadoopConf);
     return reader;
   }
 
diff --git a/storage/src/test/java/org/apache/uniffle/storage/HdfsTestBase.java b/storage/src/test/java/org/apache/uniffle/storage/HadoopTestBase.java
similarity index 97%
rename from storage/src/test/java/org/apache/uniffle/storage/HdfsTestBase.java
rename to storage/src/test/java/org/apache/uniffle/storage/HadoopTestBase.java
index acab2e83..3aa79b79 100644
--- a/storage/src/test/java/org/apache/uniffle/storage/HdfsTestBase.java
+++ b/storage/src/test/java/org/apache/uniffle/storage/HadoopTestBase.java
@@ -32,7 +32,7 @@ import org.junit.jupiter.api.io.TempDir;
 
 import static org.junit.jupiter.api.Assertions.assertEquals;
 
-public class HdfsTestBase implements Serializable {
+public class HadoopTestBase implements Serializable {
 
   public static Configuration conf;
   protected static String HDFS_URI;
diff --git a/storage/src/test/java/org/apache/uniffle/storage/handler/impl/HdfsClientReadHandlerTest.java b/storage/src/test/java/org/apache/uniffle/storage/handler/impl/HadoopClientReadHandlerTest.java
similarity index 87%
rename from storage/src/test/java/org/apache/uniffle/storage/handler/impl/HdfsClientReadHandlerTest.java
rename to storage/src/test/java/org/apache/uniffle/storage/handler/impl/HadoopClientReadHandlerTest.java
index 1688cf65..157faff1 100644
--- a/storage/src/test/java/org/apache/uniffle/storage/handler/impl/HdfsClientReadHandlerTest.java
+++ b/storage/src/test/java/org/apache/uniffle/storage/handler/impl/HadoopClientReadHandlerTest.java
@@ -32,24 +32,24 @@ import org.roaringbitmap.longlong.Roaring64NavigableMap;
 import org.apache.uniffle.common.BufferSegment;
 import org.apache.uniffle.common.ShuffleDataResult;
 import org.apache.uniffle.common.ShuffleIndexResult;
-import org.apache.uniffle.storage.HdfsTestBase;
+import org.apache.uniffle.storage.HadoopTestBase;
 import org.apache.uniffle.storage.common.FileBasedShuffleSegment;
 import org.apache.uniffle.storage.util.ShuffleStorageUtils;
 
-import static org.apache.uniffle.storage.HdfsShuffleHandlerTestBase.calcExpectedSegmentNum;
-import static org.apache.uniffle.storage.HdfsShuffleHandlerTestBase.checkData;
-import static org.apache.uniffle.storage.HdfsShuffleHandlerTestBase.writeTestData;
+import static org.apache.uniffle.storage.HadoopShuffleHandlerTestBase.calcExpectedSegmentNum;
+import static org.apache.uniffle.storage.HadoopShuffleHandlerTestBase.checkData;
+import static org.apache.uniffle.storage.HadoopShuffleHandlerTestBase.writeTestData;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.fail;
 
-public class HdfsClientReadHandlerTest extends HdfsTestBase {
+public class HadoopClientReadHandlerTest extends HadoopTestBase {
 
   public static void createAndRunCases(String clusterPathPrefix, Configuration hadoopConf, String writeUser)
       throws Exception {
     String basePath = clusterPathPrefix + "clientReadTest1";
-    HdfsShuffleWriteHandler writeHandler =
-        new HdfsShuffleWriteHandler(
+    HadoopShuffleWriteHandler writeHandler =
+        new HadoopShuffleWriteHandler(
             "appId",
             0,
             1,
@@ -80,14 +80,14 @@ public class HdfsClientReadHandlerTest extends HdfsTestBase {
      * This part is to check the fault tolerance of reading HDFS incomplete index file
      */
     String indexFileName = ShuffleStorageUtils.generateIndexFileName("test_0");
-    HdfsFileWriter indexWriter = writeHandler.createWriter(indexFileName);
+    HadoopFileWriter indexWriter = writeHandler.createWriter(indexFileName);
     indexWriter.writeData(ByteBuffer.allocate(4).putInt(169560).array());
     indexWriter.writeData(ByteBuffer.allocate(4).putInt(999).array());
     indexWriter.close();
 
     Roaring64NavigableMap processBlockIds = Roaring64NavigableMap.bitmapOf();
 
-    HdfsShuffleReadHandler indexReader = new HdfsShuffleReadHandler(
+    HadoopShuffleReadHandler indexReader = new HadoopShuffleReadHandler(
         "appId", 0, 1, basePath + "/appId/0/1-1/test_0",
         readBufferSize, expectBlockIds, processBlockIds, hadoopConf);
     try {
@@ -97,7 +97,7 @@ public class HdfsClientReadHandlerTest extends HdfsTestBase {
       fail();
     }
 
-    HdfsClientReadHandler handler = new HdfsClientReadHandler(
+    HadoopClientReadHandler handler = new HadoopClientReadHandler(
         "appId",
         0,
         1,
diff --git a/storage/src/test/java/org/apache/uniffle/storage/handler/impl/HdfsFileReaderTest.java b/storage/src/test/java/org/apache/uniffle/storage/handler/impl/HadoopFileReaderTest.java
similarity index 88%
rename from storage/src/test/java/org/apache/uniffle/storage/handler/impl/HdfsFileReaderTest.java
rename to storage/src/test/java/org/apache/uniffle/storage/handler/impl/HadoopFileReaderTest.java
index 437ed639..a343ff18 100644
--- a/storage/src/test/java/org/apache/uniffle/storage/handler/impl/HdfsFileReaderTest.java
+++ b/storage/src/test/java/org/apache/uniffle/storage/handler/impl/HadoopFileReaderTest.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.fs.Path;
 import org.junit.jupiter.api.Test;
 
 import org.apache.uniffle.common.util.ChecksumUtils;
-import org.apache.uniffle.storage.HdfsTestBase;
+import org.apache.uniffle.storage.HadoopTestBase;
 import org.apache.uniffle.storage.common.FileBasedShuffleSegment;
 
 import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -33,14 +33,14 @@ import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 
-public class HdfsFileReaderTest extends HdfsTestBase {
+public class HadoopFileReaderTest extends HadoopTestBase {
 
   @Test
   public void createStreamTest() throws Exception {
     Path path = new Path(HDFS_URI, "createStreamTest");
     fs.create(path);
 
-    try (HdfsFileReader reader = new HdfsFileReader(path, conf)) {
+    try (HadoopFileReader reader = new HadoopFileReader(path, conf)) {
       assertTrue(fs.isFile(path));
       assertEquals(0L, reader.getOffset());
     }
@@ -53,7 +53,7 @@ public class HdfsFileReaderTest extends HdfsTestBase {
     Path path = new Path(HDFS_URI, "createStreamFirstTest");
 
     assertFalse(fs.isFile(path));
-    Throwable ise = assertThrows(IllegalStateException.class, () -> new HdfsFileReader(path, conf));
+    Throwable ise = assertThrows(IllegalStateException.class, () -> new HadoopFileReader(path, conf));
     assertTrue(ise.getMessage().startsWith(HDFS_URI + "createStreamFirstTest don't exist"));
   }
 
@@ -66,11 +66,11 @@ public class HdfsFileReaderTest extends HdfsTestBase {
     new Random().nextBytes(data);
     long crc11 = ChecksumUtils.getCrc32(ByteBuffer.wrap(data, offset, length));
 
-    try (HdfsFileWriter writer = new HdfsFileWriter(fs, path, conf)) {
+    try (HadoopFileWriter writer = new HadoopFileWriter(fs, path, conf)) {
       writer.writeData(data);
     }
     FileBasedShuffleSegment segment = new FileBasedShuffleSegment(23, offset, length, length, 0xdeadbeef, 1);
-    try (HdfsFileReader reader = new HdfsFileReader(path, conf)) {
+    try (HadoopFileReader reader = new HadoopFileReader(path, conf)) {
       byte[] actual = reader.read(segment.getOffset(), segment.getLength());
       long crc22 = ChecksumUtils.getCrc32(actual);
 
diff --git a/storage/src/test/java/org/apache/uniffle/storage/handler/impl/HdfsFileWriterTest.java b/storage/src/test/java/org/apache/uniffle/storage/handler/impl/HadoopFileWriterTest.java
similarity index 87%
rename from storage/src/test/java/org/apache/uniffle/storage/handler/impl/HdfsFileWriterTest.java
rename to storage/src/test/java/org/apache/uniffle/storage/handler/impl/HadoopFileWriterTest.java
index f816698e..293f619f 100644
--- a/storage/src/test/java/org/apache/uniffle/storage/handler/impl/HdfsFileWriterTest.java
+++ b/storage/src/test/java/org/apache/uniffle/storage/handler/impl/HadoopFileWriterTest.java
@@ -27,19 +27,19 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.junit.jupiter.api.Test;
 
-import org.apache.uniffle.storage.HdfsTestBase;
+import org.apache.uniffle.storage.HadoopTestBase;
 import org.apache.uniffle.storage.common.FileBasedShuffleSegment;
 
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 
-public class HdfsFileWriterTest extends HdfsTestBase {
+public class HadoopFileWriterTest extends HadoopTestBase {
 
   @Test
   public void createStreamFirstTest() throws IOException {
     Path path = new Path(HDFS_URI, "createStreamFirstTest");
-    try (HdfsFileWriter writer = new HdfsFileWriter(fs, path, conf)) {
+    try (HadoopFileWriter writer = new HadoopFileWriter(fs, path, conf)) {
       assertTrue(fs.isFile(path));
       assertEquals(0, writer.nextOffset());
     }
@@ -52,14 +52,14 @@ public class HdfsFileWriterTest extends HdfsTestBase {
 
     // create a file and fill 32 bytes
     Path path = new Path(HDFS_URI, "createStreamAppendTest");
-    try (HdfsFileWriter writer = new HdfsFileWriter(fs, path, conf)) {
+    try (HadoopFileWriter writer = new HadoopFileWriter(fs, path, conf)) {
       assertEquals(0, writer.nextOffset());
       writer.writeData(data);
       assertEquals(32, writer.nextOffset());
     }
 
     // open existing file using append
-    try (HdfsFileWriter writer = new HdfsFileWriter(fs, path, conf)) {
+    try (HadoopFileWriter writer = new HadoopFileWriter(fs, path, conf)) {
       assertTrue(fs.isFile(path));
       assertEquals(32, writer.nextOffset());
     }
@@ -67,7 +67,7 @@ public class HdfsFileWriterTest extends HdfsTestBase {
     // disable the append support
     conf.setBoolean("dfs.support.append", false);
     assertTrue(fs.isFile(path));
-    Throwable ise = assertThrows(IllegalStateException.class, () -> new HdfsFileWriter(fs, path, conf));
+    Throwable ise = assertThrows(IllegalStateException.class, () -> new HadoopFileWriter(fs, path, conf));
     assertTrue(ise.getMessage().startsWith(path + " exists but append mode is not support!"));
   }
 
@@ -77,7 +77,7 @@ public class HdfsFileWriterTest extends HdfsTestBase {
     Path path = new Path(HDFS_URI, "createStreamDirectory");
     fs.mkdirs(path);
 
-    Throwable ise = assertThrows(IllegalStateException.class, () -> new HdfsFileWriter(fs, path, conf));
+    Throwable ise = assertThrows(IllegalStateException.class, () -> new HadoopFileWriter(fs, path, conf));
     assertTrue(ise.getMessage().startsWith(HDFS_URI + "createStreamDirectory is a directory!"));
   }
 
@@ -89,7 +89,7 @@ public class HdfsFileWriterTest extends HdfsTestBase {
     buf.put(data);
     Path path = new Path(HDFS_URI, "createStreamTest");
 
-    try (HdfsFileWriter writer = new HdfsFileWriter(fs, path, conf)) {
+    try (HadoopFileWriter writer = new HadoopFileWriter(fs, path, conf)) {
       assertEquals(0, writer.nextOffset());
       buf.flip();
       writer.writeData(buf.array());
@@ -103,7 +103,7 @@ public class HdfsFileWriterTest extends HdfsTestBase {
     new Random().nextBytes(data);
 
     Path path = new Path(HDFS_URI, "writeBufferTest");
-    try (HdfsFileWriter writer = new HdfsFileWriter(fs, path, conf)) {
+    try (HadoopFileWriter writer = new HadoopFileWriter(fs, path, conf)) {
       assertEquals(0, writer.nextOffset());
       writer.writeData(data);
       assertEquals(32, writer.nextOffset());
@@ -128,7 +128,7 @@ public class HdfsFileWriterTest extends HdfsTestBase {
     buf.asIntBuffer().put(data);
 
     Path path = new Path(HDFS_URI, "writeBufferArrayTest");
-    try (HdfsFileWriter writer = new HdfsFileWriter(fs, path, conf)) {
+    try (HadoopFileWriter writer = new HadoopFileWriter(fs, path, conf)) {
       assertEquals(0, writer.nextOffset());
       writer.writeData(buf.array());
       assertEquals(20, writer.nextOffset());
@@ -150,7 +150,7 @@ public class HdfsFileWriterTest extends HdfsTestBase {
         23, 128, 32, 32, 0xdeadbeef, 0);
 
     Path path = new Path(HDFS_URI, "writeSegmentTest");
-    try (HdfsFileWriter writer = new HdfsFileWriter(fs, path, conf)) {
+    try (HadoopFileWriter writer = new HadoopFileWriter(fs, path, conf)) {
       writer.writeIndex(segment);
     }
 
diff --git a/storage/src/test/java/org/apache/uniffle/storage/handler/impl/HdfsHandlerTest.java b/storage/src/test/java/org/apache/uniffle/storage/handler/impl/HadoopHandlerTest.java
similarity index 88%
rename from storage/src/test/java/org/apache/uniffle/storage/handler/impl/HdfsHandlerTest.java
rename to storage/src/test/java/org/apache/uniffle/storage/handler/impl/HadoopHandlerTest.java
index 3f8ea968..87df9dd3 100644
--- a/storage/src/test/java/org/apache/uniffle/storage/handler/impl/HdfsHandlerTest.java
+++ b/storage/src/test/java/org/apache/uniffle/storage/handler/impl/HadoopHandlerTest.java
@@ -33,17 +33,17 @@ import org.roaringbitmap.longlong.Roaring64NavigableMap;
 import org.apache.uniffle.common.BufferSegment;
 import org.apache.uniffle.common.ShuffleDataResult;
 import org.apache.uniffle.common.ShufflePartitionedBlock;
-import org.apache.uniffle.storage.HdfsTestBase;
+import org.apache.uniffle.storage.HadoopTestBase;
 import org.apache.uniffle.storage.common.FileBasedShuffleSegment;
 
 import static org.junit.jupiter.api.Assertions.assertTrue;
 
-public class HdfsHandlerTest extends HdfsTestBase {
+public class HadoopHandlerTest extends HadoopTestBase {
 
   @Test
   public void initTest() throws Exception {
     String basePath = HDFS_URI + "test_base";
-    new HdfsShuffleWriteHandler("appId", 0, 0, 0, basePath, "test", conf);
+    new HadoopShuffleWriteHandler("appId", 0, 0, 0, basePath, "test", conf);
     Path path = new Path(basePath);
     assertTrue(fs.isDirectory(path));
   }
@@ -51,8 +51,8 @@ public class HdfsHandlerTest extends HdfsTestBase {
   @Test
   public void writeTest() throws Exception {
     String basePath = HDFS_URI + "writeTest";
-    HdfsShuffleWriteHandler writeHandler =
-        new HdfsShuffleWriteHandler("appId", 1, 1, 1, basePath, "test", conf);
+    HadoopShuffleWriteHandler writeHandler =
+        new HadoopShuffleWriteHandler("appId", 1, 1, 1, basePath, "test", conf);
     List<ShufflePartitionedBlock> blocks = new LinkedList<>();
     List<Long> expectedBlockId = new LinkedList<>();
     List<byte[]> expectedData = new LinkedList<>();
@@ -84,7 +84,7 @@ public class HdfsHandlerTest extends HdfsTestBase {
       pos += i * 8;
     }
     writeHandler =
-        new HdfsShuffleWriteHandler("appId", 1, 1, 1, basePath, "test", conf);
+        new HadoopShuffleWriteHandler("appId", 1, 1, 1, basePath, "test", conf);
     writeHandler.write(blocksAppend);
 
     compareDataAndIndex("appId", 1, 1, basePath, expectedData, expectedBlockId);
@@ -103,7 +103,7 @@ public class HdfsHandlerTest extends HdfsTestBase {
       expectBlockIds.addLong(blockId);
     }
     // read directly and compare
-    HdfsClientReadHandler readHandler = new HdfsClientReadHandler(
+    HadoopClientReadHandler readHandler = new HadoopClientReadHandler(
         appId, shuffleId, partitionId, 100, 1, 10,
         10000, expectBlockIds, processBlockIds, basePath, new Configuration());
     try {
@@ -114,7 +114,7 @@ public class HdfsHandlerTest extends HdfsTestBase {
     }
   }
 
-  private List<ByteBuffer> readData(HdfsClientReadHandler handler, Set<Long> blockIds) throws IllegalStateException {
+  private List<ByteBuffer> readData(HadoopClientReadHandler handler, Set<Long> blockIds) throws IllegalStateException {
     ShuffleDataResult sdr = handler.readShuffleData();
     List<BufferSegment> bufferSegments = sdr.getBufferSegments();
     List<ByteBuffer> result = Lists.newArrayList();
diff --git a/storage/src/test/java/org/apache/uniffle/storage/handler/impl/HdfsShuffleReadHandlerTest.java b/storage/src/test/java/org/apache/uniffle/storage/handler/impl/HadoopShuffleReadHandlerTest.java
similarity index 85%
rename from storage/src/test/java/org/apache/uniffle/storage/handler/impl/HdfsShuffleReadHandlerTest.java
rename to storage/src/test/java/org/apache/uniffle/storage/handler/impl/HadoopShuffleReadHandlerTest.java
index 1e12aff9..1c4a0d03 100644
--- a/storage/src/test/java/org/apache/uniffle/storage/handler/impl/HdfsShuffleReadHandlerTest.java
+++ b/storage/src/test/java/org/apache/uniffle/storage/handler/impl/HadoopShuffleReadHandlerTest.java
@@ -38,20 +38,20 @@ import org.apache.uniffle.common.ShuffleDataResult;
 import org.apache.uniffle.common.ShufflePartitionedBlock;
 import org.apache.uniffle.common.util.ChecksumUtils;
 import org.apache.uniffle.common.util.Constants;
-import org.apache.uniffle.storage.HdfsShuffleHandlerTestBase;
-import org.apache.uniffle.storage.HdfsTestBase;
+import org.apache.uniffle.storage.HadoopShuffleHandlerTestBase;
+import org.apache.uniffle.storage.HadoopTestBase;
 import org.apache.uniffle.storage.common.FileBasedShuffleSegment;
 import org.apache.uniffle.storage.util.ShuffleStorageUtils;
 
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNull;
 
-public class HdfsShuffleReadHandlerTest extends HdfsTestBase {
+public class HadoopShuffleReadHandlerTest extends HadoopTestBase {
 
   public static void createAndRunCases(String clusterPathPrefix, Configuration conf, String user) throws Exception {
     String basePath = clusterPathPrefix + "HdfsShuffleFileReadHandlerTest";
-    HdfsShuffleWriteHandler writeHandler =
-        new HdfsShuffleWriteHandler(
+    HadoopShuffleWriteHandler writeHandler =
+        new HadoopShuffleWriteHandler(
             "appId",
             0,
             1,
@@ -67,23 +67,23 @@ public class HdfsShuffleReadHandlerTest extends HdfsTestBase {
     int totalBlockNum = 0;
     int expectTotalBlockNum = new Random().nextInt(37);
     int blockSize = new Random().nextInt(7) + 1;
-    HdfsShuffleHandlerTestBase.writeTestData(writeHandler, expectTotalBlockNum, blockSize, 0, expectedData);
-    int total = HdfsShuffleHandlerTestBase.calcExpectedSegmentNum(expectTotalBlockNum, blockSize, readBufferSize);
+    HadoopShuffleHandlerTestBase.writeTestData(writeHandler, expectTotalBlockNum, blockSize, 0, expectedData);
+    int total = HadoopShuffleHandlerTestBase.calcExpectedSegmentNum(expectTotalBlockNum, blockSize, readBufferSize);
     Roaring64NavigableMap expectBlockIds = Roaring64NavigableMap.bitmapOf();
     Roaring64NavigableMap processBlockIds =  Roaring64NavigableMap.bitmapOf();
     expectedData.forEach((id, block) -> expectBlockIds.addLong(id));
     String fileNamePrefix = ShuffleStorageUtils.getFullShuffleDataFolder(basePath,
         ShuffleStorageUtils.getShuffleDataPathWithRange("appId",
             0, 1, 1, 10)) + "/test_0";
-    HdfsShuffleReadHandler handler =
-        new HdfsShuffleReadHandler("appId", 0, 1, fileNamePrefix,
+    HadoopShuffleReadHandler handler =
+        new HadoopShuffleReadHandler("appId", 0, 1, fileNamePrefix,
             readBufferSize, expectBlockIds, processBlockIds, conf);
 
     Set<Long> actualBlockIds = Sets.newHashSet();
     for (int i = 0; i < total; ++i) {
       ShuffleDataResult shuffleDataResult = handler.readShuffleData();
       totalBlockNum += shuffleDataResult.getBufferSegments().size();
-      HdfsShuffleHandlerTestBase.checkData(shuffleDataResult, expectedData);
+      HadoopShuffleHandlerTestBase.checkData(shuffleDataResult, expectedData);
       for (BufferSegment bufferSegment : shuffleDataResult.getBufferSegments()) {
         actualBlockIds.add(bufferSegment.getBlockId());
       }
@@ -105,8 +105,8 @@ public class HdfsShuffleReadHandlerTest extends HdfsTestBase {
   @Test
   public void testDataInconsistent() throws Exception {
     String basePath = HDFS_URI + "HdfsShuffleFileReadHandlerTest#testDataInconsistent";
-    TestHdfsShuffleWriteHandler writeHandler =
-        new TestHdfsShuffleWriteHandler(
+    TestHadoopShuffleWriteHandler writeHandler =
+        new TestHadoopShuffleWriteHandler(
             "appId",
             0,
             1,
@@ -123,7 +123,7 @@ public class HdfsShuffleReadHandlerTest extends HdfsTestBase {
     int taskAttemptId = 0;
 
     // write expectTotalBlockNum - 1 complete block
-    HdfsShuffleHandlerTestBase.writeTestData(writeHandler, expectTotalBlockNum - 1,
+    HadoopShuffleHandlerTestBase.writeTestData(writeHandler, expectTotalBlockNum - 1,
         blockSize, taskAttemptId, expectedData);
 
     // write 1 incomplete block , which only write index file
@@ -138,22 +138,22 @@ public class HdfsShuffleReadHandlerTest extends HdfsTestBase {
     writeHandler.writeIndex(blocks);
 
     int readBufferSize = 13;
-    int total = HdfsShuffleHandlerTestBase.calcExpectedSegmentNum(expectTotalBlockNum, blockSize, readBufferSize);
+    int total = HadoopShuffleHandlerTestBase.calcExpectedSegmentNum(expectTotalBlockNum, blockSize, readBufferSize);
     Roaring64NavigableMap expectBlockIds = Roaring64NavigableMap.bitmapOf();
     Roaring64NavigableMap processBlockIds =  Roaring64NavigableMap.bitmapOf();
     expectedData.forEach((id, block) -> expectBlockIds.addLong(id));
     String fileNamePrefix = ShuffleStorageUtils.getFullShuffleDataFolder(basePath,
         ShuffleStorageUtils.getShuffleDataPathWithRange("appId",
             0, 1, 1, 10)) + "/test_0";
-    HdfsShuffleReadHandler handler =
-        new HdfsShuffleReadHandler("appId", 0, 1, fileNamePrefix,
+    HadoopShuffleReadHandler handler =
+        new HadoopShuffleReadHandler("appId", 0, 1, fileNamePrefix,
             readBufferSize, expectBlockIds, processBlockIds, conf);
 
     Set<Long> actualBlockIds = Sets.newHashSet();
     for (int i = 0; i < total; ++i) {
       ShuffleDataResult shuffleDataResult = handler.readShuffleData();
       totalBlockNum += shuffleDataResult.getBufferSegments().size();
-      HdfsShuffleHandlerTestBase.checkData(shuffleDataResult, expectedData);
+      HadoopShuffleHandlerTestBase.checkData(shuffleDataResult, expectedData);
       for (BufferSegment bufferSegment : shuffleDataResult.getBufferSegments()) {
         actualBlockIds.add(bufferSegment.getBlockId());
       }
@@ -168,7 +168,7 @@ public class HdfsShuffleReadHandlerTest extends HdfsTestBase {
     assertEquals(expectedData.keySet(), actualBlockIds);
   }
 
-  static class TestHdfsShuffleWriteHandler extends HdfsShuffleWriteHandler {
+  static class TestHadoopShuffleWriteHandler extends HadoopShuffleWriteHandler {
 
     private Configuration hadoopConf;
     private Lock writeLock = new ReentrantLock();
@@ -176,7 +176,7 @@ public class HdfsShuffleReadHandlerTest extends HdfsTestBase {
     private String fileNamePrefix;
     private int failTimes = 0;
 
-    TestHdfsShuffleWriteHandler(
+    TestHadoopShuffleWriteHandler(
         String appId,
         int shuffleId,
         int startPartition,
@@ -196,7 +196,7 @@ public class HdfsShuffleReadHandlerTest extends HdfsTestBase {
     // only write index file
     public void writeIndex(
         List<ShufflePartitionedBlock> shuffleBlocks) throws IOException, IllegalStateException {
-      HdfsFileWriter indexWriter = null;
+      HadoopFileWriter indexWriter = null;
       writeLock.lock();
       try {
         try {
diff --git a/storage/src/test/java/org/apache/uniffle/storage/handler/impl/KerberizedHdfsShuffleReadHandlerTest.java b/storage/src/test/java/org/apache/uniffle/storage/handler/impl/KerberizedHadoopClientReadHandlerTest.java
similarity index 76%
rename from storage/src/test/java/org/apache/uniffle/storage/handler/impl/KerberizedHdfsShuffleReadHandlerTest.java
rename to storage/src/test/java/org/apache/uniffle/storage/handler/impl/KerberizedHadoopClientReadHandlerTest.java
index 5e4ea693..e90b61d7 100644
--- a/storage/src/test/java/org/apache/uniffle/storage/handler/impl/KerberizedHdfsShuffleReadHandlerTest.java
+++ b/storage/src/test/java/org/apache/uniffle/storage/handler/impl/KerberizedHadoopClientReadHandlerTest.java
@@ -21,14 +21,14 @@ import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
 
-import org.apache.uniffle.common.KerberizedHdfsBase;
+import org.apache.uniffle.common.KerberizedHadoopBase;
 
-public class KerberizedHdfsShuffleReadHandlerTest extends KerberizedHdfsBase {
+public class KerberizedHadoopClientReadHandlerTest extends KerberizedHadoopBase {
 
   @BeforeAll
   public static void beforeAll() throws Exception {
-    testRunner = KerberizedHdfsShuffleReadHandlerTest.class;
-    KerberizedHdfsBase.init();
+    testRunner = KerberizedHadoopClientReadHandlerTest.class;
+    KerberizedHadoopBase.init();
   }
 
   @BeforeEach
@@ -38,9 +38,9 @@ public class KerberizedHdfsShuffleReadHandlerTest extends KerberizedHdfsBase {
 
   @Test
   public void test() throws Exception {
-    HdfsShuffleReadHandlerTest.createAndRunCases(
-        kerberizedHdfs.getSchemeAndAuthorityPrefix(),
-        kerberizedHdfs.getConf(),
+    HadoopClientReadHandlerTest.createAndRunCases(
+        kerberizedHadoop.getSchemeAndAuthorityPrefix(),
+        kerberizedHadoop.getConf(),
         "alex"
     );
   }
diff --git a/storage/src/test/java/org/apache/uniffle/storage/handler/impl/KerberizedHdfsClientReadHandlerTest.java b/storage/src/test/java/org/apache/uniffle/storage/handler/impl/KerberizedHadoopShuffleReadHandlerTest.java
similarity index 76%
rename from storage/src/test/java/org/apache/uniffle/storage/handler/impl/KerberizedHdfsClientReadHandlerTest.java
rename to storage/src/test/java/org/apache/uniffle/storage/handler/impl/KerberizedHadoopShuffleReadHandlerTest.java
index 78d1521f..9ebc2226 100644
--- a/storage/src/test/java/org/apache/uniffle/storage/handler/impl/KerberizedHdfsClientReadHandlerTest.java
+++ b/storage/src/test/java/org/apache/uniffle/storage/handler/impl/KerberizedHadoopShuffleReadHandlerTest.java
@@ -21,14 +21,14 @@ import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
 
-import org.apache.uniffle.common.KerberizedHdfsBase;
+import org.apache.uniffle.common.KerberizedHadoopBase;
 
-public class KerberizedHdfsClientReadHandlerTest extends KerberizedHdfsBase {
+public class KerberizedHadoopShuffleReadHandlerTest extends KerberizedHadoopBase {
 
   @BeforeAll
   public static void beforeAll() throws Exception {
-    testRunner = KerberizedHdfsClientReadHandlerTest.class;
-    KerberizedHdfsBase.init();
+    testRunner = KerberizedHadoopShuffleReadHandlerTest.class;
+    KerberizedHadoopBase.init();
   }
 
   @BeforeEach
@@ -38,9 +38,9 @@ public class KerberizedHdfsClientReadHandlerTest extends KerberizedHdfsBase {
 
   @Test
   public void test() throws Exception {
-    HdfsClientReadHandlerTest.createAndRunCases(
-        kerberizedHdfs.getSchemeAndAuthorityPrefix(),
-        kerberizedHdfs.getConf(),
+    HadoopShuffleReadHandlerTest.createAndRunCases(
+        kerberizedHadoop.getSchemeAndAuthorityPrefix(),
+        kerberizedHadoop.getConf(),
         "alex"
     );
   }
diff --git a/storage/src/test/java/org/apache/uniffle/storage/handler/impl/PooledHdfsShuffleWriteHandlerTest.java b/storage/src/test/java/org/apache/uniffle/storage/handler/impl/PooledHadoopShuffleWriteHandlerTest.java
similarity index 94%
rename from storage/src/test/java/org/apache/uniffle/storage/handler/impl/PooledHdfsShuffleWriteHandlerTest.java
rename to storage/src/test/java/org/apache/uniffle/storage/handler/impl/PooledHadoopShuffleWriteHandlerTest.java
index e3d655b2..4c01b710 100644
--- a/storage/src/test/java/org/apache/uniffle/storage/handler/impl/PooledHdfsShuffleWriteHandlerTest.java
+++ b/storage/src/test/java/org/apache/uniffle/storage/handler/impl/PooledHadoopShuffleWriteHandlerTest.java
@@ -34,7 +34,7 @@ import org.apache.uniffle.storage.handler.api.ShuffleWriteHandler;
 
 import static org.junit.jupiter.api.Assertions.assertEquals;
 
-public class PooledHdfsShuffleWriteHandlerTest {
+public class PooledHadoopShuffleWriteHandlerTest {
 
   static class FakedShuffleWriteHandler implements ShuffleWriteHandler {
     private List<Integer> invokedList;
@@ -69,7 +69,7 @@ public class PooledHdfsShuffleWriteHandlerTest {
     CopyOnWriteArrayList<Integer> invokedList = new CopyOnWriteArrayList<>();
     CopyOnWriteArrayList<Integer> initializedList = new CopyOnWriteArrayList<>();
 
-    PooledHdfsShuffleWriteHandler handler = new PooledHdfsShuffleWriteHandler(
+    PooledHadoopShuffleWriteHandler handler = new PooledHadoopShuffleWriteHandler(
         deque,
         maxConcurrency,
         index -> new FakedShuffleWriteHandler(initializedList, invokedList, index, () -> {
@@ -123,7 +123,7 @@ public class PooledHdfsShuffleWriteHandlerTest {
           })
       );
     }
-    PooledHdfsShuffleWriteHandler handler = new PooledHdfsShuffleWriteHandler(deque);
+    PooledHadoopShuffleWriteHandler handler = new PooledHadoopShuffleWriteHandler(deque);
 
     for (int i = 0; i < 10; i++) {
       handler.write(Collections.emptyList());
@@ -148,7 +148,7 @@ public class PooledHdfsShuffleWriteHandlerTest {
           })
       );
     }
-    PooledHdfsShuffleWriteHandler handler = new PooledHdfsShuffleWriteHandler(deque);
+    PooledHadoopShuffleWriteHandler handler = new PooledHadoopShuffleWriteHandler(deque);
 
     ExecutorService executorService = Executors.newFixedThreadPool(concurrency);
     for (int i = 0; i < concurrency; i++) {
diff --git a/storage/src/test/java/org/apache/uniffle/storage/util/ShuffleHdfsStorageUtilsTest.java b/storage/src/test/java/org/apache/uniffle/storage/util/ShuffleHadoopStorageUtilsTest.java
similarity index 85%
rename from storage/src/test/java/org/apache/uniffle/storage/util/ShuffleHdfsStorageUtilsTest.java
rename to storage/src/test/java/org/apache/uniffle/storage/util/ShuffleHadoopStorageUtilsTest.java
index bc68f068..fb79a578 100644
--- a/storage/src/test/java/org/apache/uniffle/storage/util/ShuffleHdfsStorageUtilsTest.java
+++ b/storage/src/test/java/org/apache/uniffle/storage/util/ShuffleHadoopStorageUtilsTest.java
@@ -28,16 +28,16 @@ import org.apache.hadoop.fs.Path;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.io.TempDir;
 
-import org.apache.uniffle.storage.HdfsTestBase;
-import org.apache.uniffle.storage.handler.impl.HdfsFileWriter;
+import org.apache.uniffle.storage.HadoopTestBase;
+import org.apache.uniffle.storage.handler.impl.HadoopFileWriter;
 
 import static org.junit.jupiter.api.Assertions.assertEquals;
 
-public class ShuffleHdfsStorageUtilsTest extends HdfsTestBase {
+public class ShuffleHadoopStorageUtilsTest extends HadoopTestBase {
 
   @Test
   public void testUploadFile(@TempDir File tempDir) throws Exception {
-    createAndRunCases(tempDir, fs, HDFS_URI, HdfsTestBase.conf);
+    createAndRunCases(tempDir, fs, HDFS_URI, HadoopTestBase.conf);
   }
 
   public static void createAndRunCases(
@@ -54,7 +54,7 @@ public class ShuffleHdfsStorageUtilsTest extends HdfsTestBase {
     dataOut.close();
     fileOut.close();
     String path = clusterPathPrefix + "test";
-    HdfsFileWriter writer = new HdfsFileWriter(fileSystem, new Path(path), hadoopConf);
+    HadoopFileWriter writer = new HadoopFileWriter(fileSystem, new Path(path), hadoopConf);
     long size = ShuffleStorageUtils.uploadFile(file, writer, 1024);
     assertEquals(2096, size);
     size = ShuffleStorageUtils.uploadFile(file, writer, 100);
diff --git a/storage/src/test/java/org/apache/uniffle/storage/util/ShuffleKerberizedHdfsStorageUtilsTest.java b/storage/src/test/java/org/apache/uniffle/storage/util/ShuffleKerberizedHadoopStorageUtilsTest.java
similarity index 73%
rename from storage/src/test/java/org/apache/uniffle/storage/util/ShuffleKerberizedHdfsStorageUtilsTest.java
rename to storage/src/test/java/org/apache/uniffle/storage/util/ShuffleKerberizedHadoopStorageUtilsTest.java
index d6530ce8..447c7f2a 100644
--- a/storage/src/test/java/org/apache/uniffle/storage/util/ShuffleKerberizedHdfsStorageUtilsTest.java
+++ b/storage/src/test/java/org/apache/uniffle/storage/util/ShuffleKerberizedHadoopStorageUtilsTest.java
@@ -23,24 +23,24 @@ import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.io.TempDir;
 
-import org.apache.uniffle.common.KerberizedHdfsBase;
+import org.apache.uniffle.common.KerberizedHadoopBase;
 
-public class ShuffleKerberizedHdfsStorageUtilsTest extends KerberizedHdfsBase {
+public class ShuffleKerberizedHadoopStorageUtilsTest extends KerberizedHadoopBase {
 
   @BeforeAll
   public static void beforeAll() throws Exception {
-    testRunner = ShuffleKerberizedHdfsStorageUtilsTest.class;
-    KerberizedHdfsBase.init();
+    testRunner = ShuffleKerberizedHadoopStorageUtilsTest.class;
+    KerberizedHadoopBase.init();
   }
 
   @Test
   public void testUploadFile(@TempDir File tempDir) throws Exception {
     initHadoopSecurityContext();
-    ShuffleHdfsStorageUtilsTest.createAndRunCases(
+    ShuffleHadoopStorageUtilsTest.createAndRunCases(
         tempDir,
-        kerberizedHdfs.getFileSystem(),
-        kerberizedHdfs.getSchemeAndAuthorityPrefix(),
-        kerberizedHdfs.getConf()
+        kerberizedHadoop.getFileSystem(),
+        kerberizedHadoop.getSchemeAndAuthorityPrefix(),
+        kerberizedHadoop.getConf()
     );
   }
 }
diff --git a/storage/src/test/java/org/apache/uniffle/storage/util/StorageTypeTest.java b/storage/src/test/java/org/apache/uniffle/storage/util/StorageTypeTest.java
index afaf4ab0..b1cf81cb 100644
--- a/storage/src/test/java/org/apache/uniffle/storage/util/StorageTypeTest.java
+++ b/storage/src/test/java/org/apache/uniffle/storage/util/StorageTypeTest.java
@@ -33,36 +33,36 @@ public class StorageTypeTest {
     StorageType storageType = StorageType.MEMORY;
     assertTrue(StorageType.withMemory(storageType));
     assertFalse(StorageType.withLocalfile(storageType));
-    assertFalse(StorageType.withHDFS(storageType));
+    assertFalse(StorageType.withHadoop(storageType));
 
     storageType = StorageType.LOCALFILE;
     assertFalse(StorageType.withMemory(storageType));
     assertTrue(StorageType.withLocalfile(storageType));
-    assertFalse(StorageType.withHDFS(storageType));
+    assertFalse(StorageType.withHadoop(storageType));
 
     storageType = StorageType.HDFS;
     assertFalse(StorageType.withMemory(storageType));
     assertFalse(StorageType.withLocalfile(storageType));
-    assertTrue(StorageType.withHDFS(storageType));
+    assertTrue(StorageType.withHadoop(storageType));
 
     storageType = StorageType.MEMORY_HDFS;
     assertTrue(StorageType.withMemory(storageType));
     assertFalse(StorageType.withLocalfile(storageType));
-    assertTrue(StorageType.withHDFS(storageType));
+    assertTrue(StorageType.withHadoop(storageType));
 
     storageType = StorageType.MEMORY_LOCALFILE;
     assertTrue(StorageType.withMemory(storageType));
     assertTrue(StorageType.withLocalfile(storageType));
-    assertFalse(StorageType.withHDFS(storageType));
+    assertFalse(StorageType.withHadoop(storageType));
 
     storageType = StorageType.MEMORY_LOCALFILE_HDFS;
     assertTrue(StorageType.withMemory(storageType));
     assertTrue(StorageType.withLocalfile(storageType));
-    assertTrue(StorageType.withHDFS(storageType));
+    assertTrue(StorageType.withHadoop(storageType));
 
     storageType = StorageType.LOCALFILE_HDFS;
     assertFalse(StorageType.withMemory(storageType));
     assertTrue(StorageType.withLocalfile(storageType));
-    assertTrue(StorageType.withHDFS(storageType));
+    assertTrue(StorageType.withHadoop(storageType));
   }
 }