You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by ja...@apache.org on 2022/12/06 02:52:22 UTC

[iotdb] branch IDataRegionForQuery created (now 04e0908c6a)

This is an automated email from the ASF dual-hosted git repository.

jackietien pushed a change to branch IDataRegionForQuery
in repository https://gitbox.apache.org/repos/asf/iotdb.git


      at 04e0908c6a [To rel/1.0] Add an interface for query engine which storage engine must provide

This branch includes the following new commits:

     new 04e0908c6a [To rel/1.0] Add an interface for query engine which storage engine must provide

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



[iotdb] 01/01: [To rel/1.0] Add an interface for query engine which storage engine must provide

Posted by ja...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jackietien pushed a commit to branch IDataRegionForQuery
in repository https://gitbox.apache.org/repos/asf/iotdb.git

commit 04e0908c6a106d90c3ee402d09ec0509173d8501
Author: JackieTien97 <ja...@gmail.com>
AuthorDate: Tue Dec 6 10:51:42 2022 +0800

    [To rel/1.0] Add an interface for query engine which storage engine must provide
---
 .../statemachine/DataRegionStateMachine.java       |  12 +--
 .../apache/iotdb/db/engine/StorageEngineV2.java    |   6 +-
 .../iotdb/db/engine/load/LoadTsFileManager.java    |   2 +-
 .../iotdb/db/engine/snapshot/SnapshotTaker.java    |   8 +-
 .../iotdb/db/engine/storagegroup/DataRegion.java   | 114 ++++++++++-----------
 .../db/engine/storagegroup/DataRegionMetrics.java  |   2 +-
 .../engine/storagegroup/IDataRegionForQuery.java   |  47 +++++++++
 .../engine/storagegroup/TsFileProcessorInfo.java   |   2 +-
 .../db/engine/storagegroup/VirtualDataRegion.java  |  78 ++++++++++++++
 .../iotdb/db/mpp/execution/driver/DataDriver.java  |   4 +-
 .../db/mpp/execution/driver/DataDriverContext.java |   8 +-
 .../fragment/FragmentInstanceManager.java          |   4 +-
 .../last/AbstractUpdateLastCacheOperator.java      |   2 +-
 .../db/mpp/plan/planner/LocalExecutionPlanner.java |   4 +-
 .../org/apache/iotdb/db/rescon/SystemInfo.java     |  10 +-
 .../db/sync/sender/manager/LocalSyncManager.java   |   2 +-
 .../iotdb/db/sync/sender/pipe/TsFilePipe.java      |   2 +-
 .../sync/transport/client/SyncClientFactory.java   |   2 +-
 .../db/engine/snapshot/IoTDBSnapshotTest.java      |   2 +-
 .../db/engine/storagegroup/DataRegionTest.java     |   4 +-
 20 files changed, 219 insertions(+), 96 deletions(-)

diff --git a/server/src/main/java/org/apache/iotdb/db/consensus/statemachine/DataRegionStateMachine.java b/server/src/main/java/org/apache/iotdb/db/consensus/statemachine/DataRegionStateMachine.java
index b40154288e..d8b72779f1 100644
--- a/server/src/main/java/org/apache/iotdb/db/consensus/statemachine/DataRegionStateMachine.java
+++ b/server/src/main/java/org/apache/iotdb/db/consensus/statemachine/DataRegionStateMachine.java
@@ -103,7 +103,7 @@ public class DataRegionStateMachine extends BaseStateMachine {
     } catch (Exception e) {
       logger.error(
           "Exception occurs when taking snapshot for {}-{} in {}",
-          region.getStorageGroupName(),
+          region.getDatabaseName(),
           region.getDataRegionId(),
           snapshotDir,
           e);
@@ -119,7 +119,7 @@ public class DataRegionStateMachine extends BaseStateMachine {
     } catch (Exception e) {
       logger.error(
           "Exception occurs when taking snapshot for {}-{} in {}",
-          region.getStorageGroupName(),
+          region.getDatabaseName(),
           region.getDataRegionId(),
           snapshotDir,
           e);
@@ -132,7 +132,7 @@ public class DataRegionStateMachine extends BaseStateMachine {
     DataRegion newRegion =
         new SnapshotLoader(
                 latestSnapshotRootDir.getAbsolutePath(),
-                region.getStorageGroupName(),
+                region.getDatabaseName(),
                 region.getDataRegionId())
             .loadSnapshotForStateMachine();
     if (newRegion == null) {
@@ -316,13 +316,13 @@ public class DataRegionStateMachine extends BaseStateMachine {
     try {
       return new SnapshotLoader(
               latestSnapshotRootDir.getAbsolutePath(),
-              region.getStorageGroupName(),
+              region.getDatabaseName(),
               region.getDataRegionId())
           .getSnapshotFileInfo().stream().map(File::toPath).collect(Collectors.toList());
     } catch (IOException e) {
       logger.error(
           "Meets error when getting snapshot files for {}-{}",
-          region.getStorageGroupName(),
+          region.getDatabaseName(),
           region.getDataRegionId(),
           e);
       return null;
@@ -450,7 +450,7 @@ public class DataRegionStateMachine extends BaseStateMachine {
     String snapshotDir =
         IoTDBDescriptor.getInstance().getConfig().getRatisDataRegionSnapshotDir()
             + File.separator
-            + region.getStorageGroupName()
+            + region.getDatabaseName()
             + "-"
             + region.getDataRegionId();
     try {
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/StorageEngineV2.java b/server/src/main/java/org/apache/iotdb/db/engine/StorageEngineV2.java
index e5aa77d06b..e80c823bf1 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/StorageEngineV2.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/StorageEngineV2.java
@@ -508,7 +508,7 @@ public class StorageEngineV2 implements IService {
   public void closeStorageGroupProcessor(String storageGroupPath, boolean isSeq) {
     List<Future<Void>> tasks = new ArrayList<>();
     for (DataRegion dataRegion : dataRegionMap.values()) {
-      if (dataRegion.getStorageGroupName().equals(storageGroupPath)) {
+      if (dataRegion.getDatabaseName().equals(storageGroupPath)) {
         if (isSeq) {
           for (TsFileProcessor tsFileProcessor : dataRegion.getWorkSequenceTsFileProcessors()) {
             tasks.add(
@@ -655,13 +655,13 @@ public class StorageEngineV2 implements IService {
                 .equals(ConsensusFactory.IOT_CONSENSUS)) {
           WALManager.getInstance()
               .deleteWALNode(
-                  region.getStorageGroupName() + FILE_NAME_SEPARATOR + region.getDataRegionId());
+                  region.getDatabaseName() + FILE_NAME_SEPARATOR + region.getDataRegionId());
         }
         SyncService.getInstance().unregisterDataRegion(region.getDataRegionId());
       } catch (Exception e) {
         logger.error(
             "Error occurs when deleting data region {}-{}",
-            region.getStorageGroupName(),
+            region.getDatabaseName(),
             region.getDataRegionId(),
             e);
       } finally {
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/load/LoadTsFileManager.java b/server/src/main/java/org/apache/iotdb/db/engine/load/LoadTsFileManager.java
index fa9f7abd9b..4532977a67 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/load/LoadTsFileManager.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/load/LoadTsFileManager.java
@@ -290,7 +290,7 @@ public class LoadTsFileManager {
     public String toString() {
       return String.join(
           IoTDBConstant.FILE_NAME_SEPARATOR,
-          dataRegion.getStorageGroupName(),
+          dataRegion.getDatabaseName(),
           dataRegion.getDataRegionId(),
           Long.toString(timePartitionSlot.getStartTime()));
     }
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/snapshot/SnapshotTaker.java b/server/src/main/java/org/apache/iotdb/db/engine/snapshot/SnapshotTaker.java
index c01e5a0c25..f6c27f1fbc 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/snapshot/SnapshotTaker.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/snapshot/SnapshotTaker.java
@@ -105,14 +105,14 @@ public class SnapshotTaker {
       if (!success) {
         LOGGER.warn(
             "Failed to take snapshot for {}-{}, clean up",
-            dataRegion.getStorageGroupName(),
+            dataRegion.getDatabaseName(),
             dataRegion.getDataRegionId());
         cleanUpWhenFail(finalSnapshotId);
       } else {
         snapshotLogger.logEnd();
         LOGGER.info(
             "Successfully take snapshot for {}-{}, snapshot directory is {}",
-            dataRegion.getStorageGroupName(),
+            dataRegion.getDatabaseName(),
             dataRegion.getDataRegionId(),
             snapshotDir.getParentFile().getAbsolutePath() + File.separator + finalSnapshotId);
       }
@@ -121,7 +121,7 @@ public class SnapshotTaker {
     } catch (Exception e) {
       LOGGER.error(
           "Exception occurs when taking snapshot for {}-{}",
-          dataRegion.getStorageGroupName(),
+          dataRegion.getDatabaseName(),
           dataRegion.getDataRegionId(),
           e);
       return false;
@@ -226,7 +226,7 @@ public class SnapshotTaker {
     }
     stringBuilder.append(IoTDBConstant.SNAPSHOT_FOLDER_NAME);
     stringBuilder.append(File.separator);
-    stringBuilder.append(dataRegion.getStorageGroupName());
+    stringBuilder.append(dataRegion.getDatabaseName());
     stringBuilder.append("-");
     stringBuilder.append(dataRegion.getDataRegionId());
     stringBuilder.append(File.separator);
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/DataRegion.java b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/DataRegion.java
index 8e7eb63e70..0be9f26df5 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/DataRegion.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/DataRegion.java
@@ -155,7 +155,7 @@ import static org.apache.iotdb.tsfile.common.constant.TsFileConstant.TSFILE_SUFF
  * <p>When a TsFileProcessor is closed, the closeUnsealedTsFileProcessorCallBack() method will be
  * called as a callback.
  */
-public class DataRegion {
+public class DataRegion implements IDataRegionForQuery {
 
   private static final IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
   private static final Logger DEBUG_LOGGER = LoggerFactory.getLogger("QUERY_DEBUG");
@@ -213,7 +213,7 @@ public class DataRegion {
   /** data region id */
   private String dataRegionId;
   /** database name */
-  private String storageGroupName;
+  private String databaseName;
   /** database system directory */
   private File storageGroupSysDir;
   /** manage seqFileList and unSeqFileList */
@@ -273,21 +273,18 @@ public class DataRegion {
    * @param systemDir system dir path
    * @param dataRegionId data region id e.g. 1
    * @param fileFlushPolicy file flush policy
-   * @param storageGroupName database name e.g. root.sg1
+   * @param databaseName database name e.g. root.sg1
    */
   public DataRegion(
-      String systemDir,
-      String dataRegionId,
-      TsFileFlushPolicy fileFlushPolicy,
-      String storageGroupName)
+      String systemDir, String dataRegionId, TsFileFlushPolicy fileFlushPolicy, String databaseName)
       throws DataRegionException {
     this.dataRegionId = dataRegionId;
-    this.storageGroupName = storageGroupName;
+    this.databaseName = databaseName;
     this.fileFlushPolicy = fileFlushPolicy;
 
     storageGroupSysDir = SystemFileFactory.INSTANCE.getFile(systemDir, dataRegionId);
     this.tsFileManager =
-        new TsFileManager(storageGroupName, dataRegionId, storageGroupSysDir.getPath());
+        new TsFileManager(databaseName, dataRegionId, storageGroupSysDir.getPath());
     if (storageGroupSysDir.mkdirs()) {
       logger.info(
           "Database system Directory {} doesn't exist, create it", storageGroupSysDir.getPath());
@@ -297,7 +294,7 @@ public class DataRegion {
 
     // if use id table, we use id table flush time manager
     if (config.isEnableIDTable()) {
-      idTable = IDTableManager.getInstance().getIDTableDirectly(storageGroupName);
+      idTable = IDTableManager.getInstance().getIDTableDirectly(databaseName);
       lastFlushTimeMap = new IDTableLastFlushTimeMap(idTable, tsFileManager);
     } else {
       lastFlushTimeMap = new HashLastFlushTimeMap(tsFileManager);
@@ -309,11 +306,11 @@ public class DataRegion {
         && !StorageEngineV2.getInstance().isAllSgReady()) {
       logger.debug(
           "Skip recovering data region {}[{}] when consensus protocol is ratis and storage engine is not ready.",
-          storageGroupName,
+          databaseName,
           dataRegionId);
       for (String fileFolder : DirectoryManager.getInstance().getAllFilesFolders()) {
         File dataRegionFolder =
-            fsFactory.getFile(fileFolder, storageGroupName + File.separator + dataRegionId);
+            fsFactory.getFile(fileFolder, databaseName + File.separator + dataRegionId);
         if (dataRegionFolder.exists()) {
           File[] timePartitions = dataRegionFolder.listFiles();
           if (timePartitions != null) {
@@ -324,7 +321,7 @@ public class DataRegion {
                 logger.error(
                     "Exception occurs when deleting time partition directory {} for {}-{}",
                     timePartitions,
-                    storageGroupName,
+                    databaseName,
                     dataRegionId,
                     e);
               }
@@ -340,16 +337,17 @@ public class DataRegion {
   }
 
   @TestOnly
-  public DataRegion(String storageGroupName, String id) {
-    this.storageGroupName = storageGroupName;
+  public DataRegion(String databaseName, String id) {
+    this.databaseName = databaseName;
     this.dataRegionId = id;
-    this.tsFileManager = new TsFileManager(storageGroupName, id, "");
+    this.tsFileManager = new TsFileManager(databaseName, id, "");
     this.partitionMaxFileVersions = new HashMap<>();
     partitionMaxFileVersions.put(0L, 0L);
   }
 
-  public String getStorageGroupName() {
-    return storageGroupName;
+  @Override
+  public String getDatabaseName() {
+    return databaseName;
   }
 
   public boolean isReady() {
@@ -409,7 +407,7 @@ public class DataRegion {
         if (lastLogTime + config.getRecoveryLogIntervalInMs() < System.currentTimeMillis()) {
           logger.info(
               "The data region {}[{}] has recovered {}%, please wait a moment.",
-              storageGroupName, dataRegionId, recoveredFilesNum * 1.0 / numOfFilesToRecover);
+              databaseName, dataRegionId, recoveredFilesNum * 1.0 / numOfFilesToRecover);
           lastLogTime = System.currentTimeMillis();
         }
       }
@@ -553,10 +551,9 @@ public class DataRegion {
     initCompaction();
 
     if (StorageEngineV2.getInstance().isAllSgReady()) {
-      logger.info("The data region {}[{}] is created successfully", storageGroupName, dataRegionId);
+      logger.info("The data region {}[{}] is created successfully", databaseName, dataRegionId);
     } else {
-      logger.info(
-          "The data region {}[{}] is recovered successfully", storageGroupName, dataRegionId);
+      logger.info("The data region {}[{}] is recovered successfully", databaseName, dataRegionId);
     }
   }
 
@@ -582,7 +579,7 @@ public class DataRegion {
     }
     timedCompactionScheduleTask =
         IoTDBThreadPoolFactory.newSingleThreadScheduledExecutor(
-            ThreadName.COMPACTION_SCHEDULE.getName() + "-" + storageGroupName + "-" + dataRegionId);
+            ThreadName.COMPACTION_SCHEDULE.getName() + "-" + databaseName + "-" + dataRegionId);
     ScheduledExecutorUtil.safelyScheduleWithFixedDelay(
         timedCompactionScheduleTask,
         this::executeCompaction,
@@ -593,7 +590,7 @@ public class DataRegion {
 
   private void recoverCompaction() {
     CompactionRecoverManager compactionRecoverManager =
-        new CompactionRecoverManager(tsFileManager, storageGroupName, dataRegionId);
+        new CompactionRecoverManager(tsFileManager, databaseName, dataRegionId);
     compactionRecoverManager.recoverInnerSpaceCompaction(true);
     compactionRecoverManager.recoverInnerSpaceCompaction(false);
     compactionRecoverManager.recoverCrossSpaceCompaction();
@@ -654,8 +651,7 @@ public class DataRegion {
     List<File> tsFiles = new ArrayList<>();
     List<File> upgradeFiles = new ArrayList<>();
     for (String baseDir : folders) {
-      File fileFolder =
-          fsFactory.getFile(baseDir + File.separator + storageGroupName, dataRegionId);
+      File fileFolder = fsFactory.getFile(baseDir + File.separator + databaseName, dataRegionId);
       if (!fileFolder.exists()) {
         continue;
       }
@@ -735,7 +731,7 @@ public class DataRegion {
           String.format(
               "data region %s[%s] is down, because the time of tsfile %s is larger than system current time, "
                   + "file time is %d while system current time is %d, please check it.",
-              storageGroupName, dataRegionId, tsFile.getAbsolutePath(), fileTime, currentTime));
+              databaseName, dataRegionId, tsFile.getAbsolutePath(), fileTime, currentTime));
     }
   }
 
@@ -1275,7 +1271,7 @@ public class DataRegion {
     String filePath =
         TsFileNameGenerator.generateNewTsFilePathWithMkdir(
             sequence,
-            storageGroupName,
+            databaseName,
             dataRegionId,
             timePartitionId,
             System.currentTimeMillis(),
@@ -1292,7 +1288,7 @@ public class DataRegion {
     if (sequence) {
       tsFileProcessor =
           new TsFileProcessor(
-              storageGroupName + FILE_NAME_SEPARATOR + dataRegionId,
+              databaseName + FILE_NAME_SEPARATOR + dataRegionId,
               fsFactory.getFileWithParent(filePath),
               dataRegionInfo,
               this::closeUnsealedTsFileProcessorCallBack,
@@ -1301,7 +1297,7 @@ public class DataRegion {
     } else {
       tsFileProcessor =
           new TsFileProcessor(
-              storageGroupName + FILE_NAME_SEPARATOR + dataRegionId,
+              databaseName + FILE_NAME_SEPARATOR + dataRegionId,
               fsFactory.getFileWithParent(filePath),
               dataRegionInfo,
               this::closeUnsealedTsFileProcessorCallBack,
@@ -1354,7 +1350,7 @@ public class DataRegion {
           if (System.currentTimeMillis() - startTime > 60_000) {
             logger.warn(
                 "{} has spent {}s to wait for closing one tsfile.",
-                storageGroupName + "-" + this.dataRegionId,
+                databaseName + "-" + this.dataRegionId,
                 (System.currentTimeMillis() - startTime) / 1000);
           }
         }
@@ -1363,7 +1359,7 @@ public class DataRegion {
         logger.error(
             "syncCloseOneTsFileProcessor error occurs while waiting for closing the storage "
                 + "group {}",
-            storageGroupName + "-" + dataRegionId,
+            databaseName + "-" + dataRegionId,
             e);
       }
     }
@@ -1397,7 +1393,7 @@ public class DataRegion {
       if (!workUnsequenceTsFileProcessors.containsKey(tsFileProcessor.getTimeRangeId())) {
         timePartitionIdVersionControllerMap.remove(tsFileProcessor.getTimeRangeId());
       }
-      logger.info("close a sequence tsfile processor {}", storageGroupName + "-" + dataRegionId);
+      logger.info("close a sequence tsfile processor {}", databaseName + "-" + dataRegionId);
     } else {
       closingUnSequenceTsFileProcessor.add(tsFileProcessor);
       tsFileProcessor.asyncClose();
@@ -1419,13 +1415,13 @@ public class DataRegion {
   public void deleteFolder(String systemDir) {
     logger.info(
         "{} will close all files for deleting data folder {}",
-        storageGroupName + "-" + dataRegionId,
+        databaseName + "-" + dataRegionId,
         systemDir);
     writeLock("deleteFolder");
     try {
       File dataRegionSystemFolder =
           SystemFileFactory.INSTANCE.getFile(
-              systemDir + File.separator + storageGroupName, dataRegionId);
+              systemDir + File.separator + databaseName, dataRegionId);
       org.apache.iotdb.commons.utils.FileUtils.deleteDirectoryAndEmptyParent(
           dataRegionSystemFolder);
     } finally {
@@ -1454,7 +1450,7 @@ public class DataRegion {
   /** delete tsfile */
   public void syncDeleteDataFiles() {
     logger.info(
-        "{} will close all files for deleting data files", storageGroupName + "-" + dataRegionId);
+        "{} will close all files for deleting data files", databaseName + "-" + dataRegionId);
     writeLock("syncDeleteDataFiles");
     try {
 
@@ -1477,7 +1473,7 @@ public class DataRegion {
   private void deleteAllSGFolders(List<String> folder) {
     for (String tsfilePath : folder) {
       File dataRegionDataFolder =
-          fsFactory.getFile(tsfilePath, storageGroupName + File.separator + dataRegionId);
+          fsFactory.getFile(tsfilePath, databaseName + File.separator + dataRegionId);
       if (dataRegionDataFolder.exists()) {
         org.apache.iotdb.commons.utils.FileUtils.deleteDirectoryAndEmptyParent(
             dataRegionDataFolder);
@@ -1488,13 +1484,13 @@ public class DataRegion {
   /** Iterate each TsFile and try to lock and remove those out of TTL. */
   public synchronized void checkFilesTTL() {
     if (dataTTL == Long.MAX_VALUE) {
-      logger.debug("{}: TTL not set, ignore the check", storageGroupName + "-" + dataRegionId);
+      logger.debug("{}: TTL not set, ignore the check", databaseName + "-" + dataRegionId);
       return;
     }
     long ttlLowerBound = DateTimeUtils.currentTime() - dataTTL;
     logger.debug(
         "{}: TTL removing files before {}",
-        storageGroupName + "-" + dataRegionId,
+        databaseName + "-" + dataRegionId,
         new Date(ttlLowerBound));
 
     // copy to avoid concurrent modification of deletion
@@ -1545,7 +1541,7 @@ public class DataRegion {
           logger.info(
               "Exceed sequence memtable flush interval, so flush working memtable of time partition {} in database {}[{}]",
               tsFileProcessor.getTimeRangeId(),
-              storageGroupName,
+              databaseName,
               dataRegionId);
           fileFlushPolicy.apply(this, tsFileProcessor, tsFileProcessor.isSequence());
         }
@@ -1568,7 +1564,7 @@ public class DataRegion {
           logger.info(
               "Exceed unsequence memtable flush interval, so flush working memtable of time partition {} in database {}[{}]",
               tsFileProcessor.getTimeRangeId(),
-              storageGroupName,
+              databaseName,
               dataRegionId);
           fileFlushPolicy.apply(this, tsFileProcessor, tsFileProcessor.isSequence());
         }
@@ -1590,7 +1586,7 @@ public class DataRegion {
           if (System.currentTimeMillis() - startTime > 60_000) {
             logger.warn(
                 "{} has spent {}s to wait for closing all TsFiles.",
-                storageGroupName + "-" + this.dataRegionId,
+                databaseName + "-" + this.dataRegionId,
                 (System.currentTimeMillis() - startTime) / 1000);
           }
         }
@@ -1598,7 +1594,7 @@ public class DataRegion {
         logger.error(
             "CloseFileNodeCondition error occurs while waiting for closing the storage "
                 + "group {}",
-            storageGroupName + "-" + dataRegionId,
+            databaseName + "-" + dataRegionId,
             e);
         Thread.currentThread().interrupt();
       }
@@ -1609,8 +1605,7 @@ public class DataRegion {
   public void asyncCloseAllWorkingTsFileProcessors() {
     writeLock("asyncCloseAllWorkingTsFileProcessors");
     try {
-      logger.info(
-          "async force close all files in database: {}", storageGroupName + "-" + dataRegionId);
+      logger.info("async force close all files in database: {}", databaseName + "-" + dataRegionId);
       // to avoid concurrent modification problem, we need a new array list
       for (TsFileProcessor tsFileProcessor :
           new ArrayList<>(workSequenceTsFileProcessors.values())) {
@@ -1630,8 +1625,7 @@ public class DataRegion {
   public void forceCloseAllWorkingTsFileProcessors() throws TsFileProcessorException {
     writeLock("forceCloseAllWorkingTsFileProcessors");
     try {
-      logger.info(
-          "force close all processors in database: {}", storageGroupName + "-" + dataRegionId);
+      logger.info("force close all processors in database: {}", databaseName + "-" + dataRegionId);
       // to avoid concurrent modification problem, we need a new array list
       for (TsFileProcessor tsFileProcessor :
           new ArrayList<>(workSequenceTsFileProcessors.values())) {
@@ -1701,6 +1695,7 @@ public class DataRegion {
   }
 
   /** used for mpp */
+  @Override
   public QueryDataSource query(
       List<PartialPath> pathList, String singleDeviceId, QueryContext context, Filter timeFilter)
       throws QueryProcessException {
@@ -1732,6 +1727,7 @@ public class DataRegion {
   }
 
   /** lock the read lock of the insert lock */
+  @Override
   public void readLock() {
     // apply read lock for SG insert lock to prevent inconsistent with concurrently writing memtable
     insertLock.readLock().lock();
@@ -1740,6 +1736,7 @@ public class DataRegion {
   }
 
   /** unlock the read lock of insert lock */
+  @Override
   public void readUnlock() {
     tsFileManager.readUnlock();
     insertLock.readLock().unlock();
@@ -1946,7 +1943,7 @@ public class DataRegion {
       if (timePartitionStartId <= entry.getKey()
           && entry.getKey() <= timePartitionEndId
           && (timePartitionFilter == null
-              || timePartitionFilter.satisfy(storageGroupName, entry.getKey()))) {
+              || timePartitionFilter.satisfy(databaseName, entry.getKey()))) {
         WALFlushListener walFlushListener = entry.getValue().logDeleteDataNodeInWAL(deleteDataNode);
         walFlushListeners.add(walFlushListener);
       }
@@ -1955,7 +1952,7 @@ public class DataRegion {
       if (timePartitionStartId <= entry.getKey()
           && entry.getKey() <= timePartitionEndId
           && (timePartitionFilter == null
-              || timePartitionFilter.satisfy(storageGroupName, entry.getKey()))) {
+              || timePartitionFilter.satisfy(databaseName, entry.getKey()))) {
         WALFlushListener walFlushListener = entry.getValue().logDeleteDataNodeInWAL(deleteDataNode);
         walFlushListeners.add(walFlushListener);
       }
@@ -1970,7 +1967,7 @@ public class DataRegion {
       long deleteEnd,
       TimePartitionFilter timePartitionFilter) {
     if (timePartitionFilter != null
-        && !timePartitionFilter.satisfy(storageGroupName, tsFileResource.getTimePartition())) {
+        && !timePartitionFilter.satisfy(databaseName, tsFileResource.getTimePartition())) {
       return true;
     }
 
@@ -2128,7 +2125,7 @@ public class DataRegion {
     synchronized (closeStorageGroupCondition) {
       closeStorageGroupCondition.notifyAll();
     }
-    logger.info("signal closing database condition in {}", storageGroupName + "-" + dataRegionId);
+    logger.info("signal closing database condition in {}", databaseName + "-" + dataRegionId);
   }
 
   private void executeCompaction() {
@@ -2627,7 +2624,7 @@ public class DataRegion {
         targetFile =
             fsFactory.getFile(
                 DirectoryManager.getInstance().getNextFolderForUnSequenceFile(),
-                storageGroupName
+                databaseName
                     + File.separatorChar
                     + dataRegionId
                     + File.separatorChar
@@ -2649,7 +2646,7 @@ public class DataRegion {
         targetFile =
             fsFactory.getFile(
                 DirectoryManager.getInstance().getNextFolderForSequenceFile(),
-                storageGroupName
+                databaseName
                     + File.separatorChar
                     + dataRegionId
                     + File.separatorChar
@@ -2913,7 +2910,7 @@ public class DataRegion {
    * @return data region path, like root.sg1/0
    */
   public String getStorageGroupPath() {
-    return storageGroupName + File.separator + dataRegionId;
+    return databaseName + File.separator + dataRegionId;
   }
 
   /**
@@ -3005,7 +3002,7 @@ public class DataRegion {
   public void abortCompaction() {
     tsFileManager.setAllowCompaction(false);
     List<AbstractCompactionTask> runningTasks =
-        CompactionTaskManager.getInstance().abortCompaction(storageGroupName + "-" + dataRegionId);
+        CompactionTaskManager.getInstance().abortCompaction(databaseName + "-" + dataRegionId);
     while (CompactionTaskManager.getInstance().isAnyTaskInListStillRunning(runningTasks)) {
       try {
         TimeUnit.MILLISECONDS.sleep(10);
@@ -3029,7 +3026,7 @@ public class DataRegion {
       TimePartitionManager.getInstance()
           .removePartition(new DataRegionId(Integer.valueOf(dataRegionId)), partitionId);
       TsFileProcessor processor = longTsFileProcessorEntry.getValue();
-      if (filter.satisfy(storageGroupName, partitionId)) {
+      if (filter.satisfy(databaseName, partitionId)) {
         processor.syncClose();
         iterator.remove();
         processor.getTsFileResource().remove();
@@ -3046,7 +3043,7 @@ public class DataRegion {
       TimePartitionFilter filter, Iterator<TsFileResource> iterator, boolean sequence) {
     while (iterator.hasNext()) {
       TsFileResource tsFileResource = iterator.next();
-      if (filter.satisfy(storageGroupName, tsFileResource.getTimePartition())) {
+      if (filter.satisfy(databaseName, tsFileResource.getTimePartition())) {
         tsFileResource.remove();
         tsFileManager.remove(tsFileResource, sequence);
         lastFlushTimeMap.removePartition(tsFileResource.getTimePartition());
@@ -3337,7 +3334,7 @@ public class DataRegion {
     }
     // identifier should be same with getTsFileProcessor method
     return WALManager.getInstance()
-        .applyForWALNode(storageGroupName + FILE_NAME_SEPARATOR + dataRegionId);
+        .applyForWALNode(databaseName + FILE_NAME_SEPARATOR + dataRegionId);
   }
 
   /** Wait for this data region successfully deleted */
@@ -3374,6 +3371,7 @@ public class DataRegion {
     return dataRegionInfo.getMemCost();
   }
 
+  @Override
   public long getDataTTL() {
     return dataTTL;
   }
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/DataRegionMetrics.java b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/DataRegionMetrics.java
index 194ea37822..40ab8ffbc8 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/DataRegionMetrics.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/DataRegionMetrics.java
@@ -34,7 +34,7 @@ public class DataRegionMetrics implements IMetricSet {
 
   public DataRegionMetrics(DataRegion dataRegion) {
     this.dataRegion = dataRegion;
-    this.storageGroupName = dataRegion.getStorageGroupName();
+    this.storageGroupName = dataRegion.getDatabaseName();
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/IDataRegionForQuery.java b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/IDataRegionForQuery.java
new file mode 100644
index 0000000000..57c3ad436d
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/IDataRegionForQuery.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.db.engine.storagegroup;
+
+import org.apache.iotdb.commons.path.PartialPath;
+import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
+import org.apache.iotdb.db.exception.query.QueryProcessException;
+import org.apache.iotdb.db.query.context.QueryContext;
+import org.apache.iotdb.tsfile.read.filter.basic.Filter;
+
+import java.util.List;
+
+/** It's an interface that storage engine must provide for query engine */
+public interface IDataRegionForQuery {
+
+  /** lock the read lock for thread-safe */
+  void readLock();
+
+  void readUnlock();
+
+  /** Get satisfied QueryDataSource from DataRegion */
+  QueryDataSource query(
+      List<PartialPath> pathList, String singleDeviceId, QueryContext context, Filter timeFilter)
+      throws QueryProcessException;
+
+  /** Get TTL of this DataRegion */
+  long getDataTTL();
+
+  /** Get database name of this DataRegion */
+  String getDatabaseName();
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/TsFileProcessorInfo.java b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/TsFileProcessorInfo.java
index 80f61bc86a..93fa8035e7 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/TsFileProcessorInfo.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/TsFileProcessorInfo.java
@@ -36,7 +36,7 @@ public class TsFileProcessorInfo {
       MetricService.getInstance()
           .addMetricSet(
               new TsFileProcessorInfoMetrics(
-                  dataRegionInfo.getDataRegion().getStorageGroupName(), memCost));
+                  dataRegionInfo.getDataRegion().getDatabaseName(), memCost));
     }
   }
 
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/VirtualDataRegion.java b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/VirtualDataRegion.java
new file mode 100644
index 0000000000..8aa30e5068
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/VirtualDataRegion.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.db.engine.storagegroup;
+
+import org.apache.iotdb.commons.path.PartialPath;
+import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
+import org.apache.iotdb.db.exception.query.QueryProcessException;
+import org.apache.iotdb.db.query.context.QueryContext;
+import org.apache.iotdb.tsfile.read.filter.basic.Filter;
+
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * It's a virtual data region used for query which contains time series that don't belong to any
+ * data region.
+ */
+public class VirtualDataRegion implements IDataRegionForQuery {
+
+  private static final String VIRTUAL_DB_NAME = "root.__virtual";
+
+  private static final QueryDataSource EMPTY_QUERY_DATA_SOURCE =
+      new QueryDataSource(Collections.emptyList(), Collections.emptyList());
+
+  public static VirtualDataRegion getInstance() {
+    return VirtualDataRegion.InstanceHolder.INSTANCE;
+  }
+
+  @Override
+  public void readLock() {
+    // do nothing, because itself is thread-safe already
+  }
+
+  @Override
+  public void readUnlock() {
+    // do nothing, because itself is thread-safe already
+  }
+
+  @Override
+  public QueryDataSource query(
+      List<PartialPath> pathList, String singleDeviceId, QueryContext context, Filter timeFilter)
+      throws QueryProcessException {
+    return EMPTY_QUERY_DATA_SOURCE;
+  }
+
+  @Override
+  public long getDataTTL() {
+    return Long.MAX_VALUE;
+  }
+
+  @Override
+  public String getDatabaseName() {
+    return VIRTUAL_DB_NAME;
+  }
+
+  private static class InstanceHolder {
+
+    private InstanceHolder() {}
+
+    private static final VirtualDataRegion INSTANCE = new VirtualDataRegion();
+  }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/driver/DataDriver.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/driver/DataDriver.java
index 340a8f4bcc..3fc0aaa042 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/driver/DataDriver.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/driver/DataDriver.java
@@ -20,7 +20,7 @@ package org.apache.iotdb.db.mpp.execution.driver;
 
 import org.apache.iotdb.commons.path.PartialPath;
 import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
-import org.apache.iotdb.db.engine.storagegroup.DataRegion;
+import org.apache.iotdb.db.engine.storagegroup.IDataRegionForQuery;
 import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
 import org.apache.iotdb.db.exception.query.QueryProcessException;
 import org.apache.iotdb.db.metadata.idtable.IDTable;
@@ -122,7 +122,7 @@ public class DataDriver extends Driver {
    */
   private QueryDataSource initQueryDataSource() throws QueryProcessException {
     DataDriverContext context = (DataDriverContext) driverContext;
-    DataRegion dataRegion = context.getDataRegion();
+    IDataRegionForQuery dataRegion = context.getDataRegion();
     dataRegion.readLock();
     try {
       List<PartialPath> pathList =
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/driver/DataDriverContext.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/driver/DataDriverContext.java
index f35c0da438..dec4cf656c 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/driver/DataDriverContext.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/driver/DataDriverContext.java
@@ -19,7 +19,7 @@
 package org.apache.iotdb.db.mpp.execution.driver;
 
 import org.apache.iotdb.commons.path.PartialPath;
-import org.apache.iotdb.db.engine.storagegroup.DataRegion;
+import org.apache.iotdb.db.engine.storagegroup.IDataRegionForQuery;
 import org.apache.iotdb.db.mpp.execution.fragment.FragmentInstanceContext;
 import org.apache.iotdb.db.mpp.execution.operator.source.DataSourceOperator;
 import org.apache.iotdb.tsfile.read.filter.basic.Filter;
@@ -30,14 +30,14 @@ import java.util.List;
 public class DataDriverContext extends DriverContext {
   private final List<PartialPath> paths;
   private final Filter timeFilter;
-  private final DataRegion dataRegion;
+  private final IDataRegionForQuery dataRegion;
   private final List<DataSourceOperator> sourceOperators;
 
   public DataDriverContext(
       FragmentInstanceContext fragmentInstanceContext,
       List<PartialPath> paths,
       Filter timeFilter,
-      DataRegion dataRegion,
+      IDataRegionForQuery dataRegion,
       List<DataSourceOperator> sourceOperators) {
     super(fragmentInstanceContext);
     this.paths = paths;
@@ -54,7 +54,7 @@ public class DataDriverContext extends DriverContext {
     return timeFilter;
   }
 
-  public DataRegion getDataRegion() {
+  public IDataRegionForQuery getDataRegion() {
     return dataRegion;
   }
 
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/fragment/FragmentInstanceManager.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/fragment/FragmentInstanceManager.java
index f3da54b6c4..5e37ac29aa 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/fragment/FragmentInstanceManager.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/fragment/FragmentInstanceManager.java
@@ -21,7 +21,7 @@ package org.apache.iotdb.db.mpp.execution.fragment;
 import org.apache.iotdb.commons.concurrent.IoTDBThreadPoolFactory;
 import org.apache.iotdb.commons.concurrent.threadpool.ScheduledExecutorUtil;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.engine.storagegroup.DataRegion;
+import org.apache.iotdb.db.engine.storagegroup.IDataRegionForQuery;
 import org.apache.iotdb.db.metadata.schemaregion.ISchemaRegion;
 import org.apache.iotdb.db.mpp.common.FragmentInstanceId;
 import org.apache.iotdb.db.mpp.execution.driver.DataDriver;
@@ -100,7 +100,7 @@ public class FragmentInstanceManager {
   }
 
   public FragmentInstanceInfo execDataQueryFragmentInstance(
-      FragmentInstance instance, DataRegion dataRegion) {
+      FragmentInstance instance, IDataRegionForQuery dataRegion) {
 
     FragmentInstanceId instanceId = instance.getId();
     try (SetThreadName fragmentInstanceName = new SetThreadName(instanceId.getFullId())) {
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/AbstractUpdateLastCacheOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/AbstractUpdateLastCacheOperator.java
index 0b4fad3bda..4850fb45c4 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/AbstractUpdateLastCacheOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/AbstractUpdateLastCacheOperator.java
@@ -75,7 +75,7 @@ public abstract class AbstractUpdateLastCacheOperator implements ProcessOperator
       databaseName =
           ((DataDriverContext) operatorContext.getInstanceContext().getDriverContext())
               .getDataRegion()
-              .getStorageGroupName();
+              .getDatabaseName();
     }
     return databaseName;
   }
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/plan/planner/LocalExecutionPlanner.java b/server/src/main/java/org/apache/iotdb/db/mpp/plan/planner/LocalExecutionPlanner.java
index a08f9af268..6015e20c71 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/plan/planner/LocalExecutionPlanner.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/plan/planner/LocalExecutionPlanner.java
@@ -19,7 +19,7 @@
 package org.apache.iotdb.db.mpp.plan.planner;
 
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.engine.storagegroup.DataRegion;
+import org.apache.iotdb.db.engine.storagegroup.IDataRegionForQuery;
 import org.apache.iotdb.db.metadata.schemaregion.ISchemaRegion;
 import org.apache.iotdb.db.mpp.exception.MemoryNotEnoughException;
 import org.apache.iotdb.db.mpp.execution.driver.DataDriver;
@@ -61,7 +61,7 @@ public class LocalExecutionPlanner {
       TypeProvider types,
       FragmentInstanceContext instanceContext,
       Filter timeFilter,
-      DataRegion dataRegion)
+      IDataRegionForQuery dataRegion)
       throws MemoryNotEnoughException {
     LocalExecutionPlanContext context =
         new LocalExecutionPlanContext(types, instanceContext, dataRegion.getDataTTL());
diff --git a/server/src/main/java/org/apache/iotdb/db/rescon/SystemInfo.java b/server/src/main/java/org/apache/iotdb/db/rescon/SystemInfo.java
index 0e8f67cd91..fd0214b519 100644
--- a/server/src/main/java/org/apache/iotdb/db/rescon/SystemInfo.java
+++ b/server/src/main/java/org/apache/iotdb/db/rescon/SystemInfo.java
@@ -99,7 +99,7 @@ public class SystemInfo {
     } else {
       logger.info(
           "Change system to reject status. Triggered by: logical SG ({}), mem cost delta ({}), totalSgMemCost ({}), REJECT_THERSHOLD ({})",
-          dataRegionInfo.getDataRegion().getStorageGroupName(),
+          dataRegionInfo.getDataRegion().getDatabaseName(),
           delta,
           totalStorageGroupMemCost,
           REJECT_THERSHOLD);
@@ -140,13 +140,13 @@ public class SystemInfo {
         && totalStorageGroupMemCost < REJECT_THERSHOLD) {
       logger.debug(
           "SG ({}) released memory (delta: {}) but still exceeding flush proportion (totalSgMemCost: {}), call flush.",
-          dataRegionInfo.getDataRegion().getStorageGroupName(),
+          dataRegionInfo.getDataRegion().getDatabaseName(),
           delta,
           totalStorageGroupMemCost);
       if (rejected) {
         logger.info(
             "SG ({}) released memory (delta: {}), set system to normal status (totalSgMemCost: {}).",
-            dataRegionInfo.getDataRegion().getStorageGroupName(),
+            dataRegionInfo.getDataRegion().getDatabaseName(),
             delta,
             totalStorageGroupMemCost);
       }
@@ -155,7 +155,7 @@ public class SystemInfo {
     } else if (totalStorageGroupMemCost >= REJECT_THERSHOLD) {
       logger.warn(
           "SG ({}) released memory (delta: {}), but system is still in reject status (totalSgMemCost: {}).",
-          dataRegionInfo.getDataRegion().getStorageGroupName(),
+          dataRegionInfo.getDataRegion().getDatabaseName(),
           delta,
           totalStorageGroupMemCost);
       logCurrentTotalSGMemory();
@@ -163,7 +163,7 @@ public class SystemInfo {
     } else {
       logger.debug(
           "SG ({}) released memory (delta: {}), system is in normal status (totalSgMemCost: {}).",
-          dataRegionInfo.getDataRegion().getStorageGroupName(),
+          dataRegionInfo.getDataRegion().getDatabaseName(),
           delta,
           totalStorageGroupMemCost);
       logCurrentTotalSGMemory();
diff --git a/server/src/main/java/org/apache/iotdb/db/sync/sender/manager/LocalSyncManager.java b/server/src/main/java/org/apache/iotdb/db/sync/sender/manager/LocalSyncManager.java
index 04e073ac80..a3608e82b6 100644
--- a/server/src/main/java/org/apache/iotdb/db/sync/sender/manager/LocalSyncManager.java
+++ b/server/src/main/java/org/apache/iotdb/db/sync/sender/manager/LocalSyncManager.java
@@ -47,7 +47,7 @@ public class LocalSyncManager implements ISyncManager {
   /** tsfile */
   @Override
   public void syncRealTimeDeletion(Deletion deletion) {
-    syncPipe.collectRealTimeDeletion(deletion, dataRegion.getStorageGroupName(), dataRegionId);
+    syncPipe.collectRealTimeDeletion(deletion, dataRegion.getDatabaseName(), dataRegionId);
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/iotdb/db/sync/sender/pipe/TsFilePipe.java b/server/src/main/java/org/apache/iotdb/db/sync/sender/pipe/TsFilePipe.java
index 26852c4429..49fc78de8d 100644
--- a/server/src/main/java/org/apache/iotdb/db/sync/sender/pipe/TsFilePipe.java
+++ b/server/src/main/java/org/apache/iotdb/db/sync/sender/pipe/TsFilePipe.java
@@ -157,7 +157,7 @@ public class TsFilePipe implements Pipe {
       logger.info(
           logFormat(
               "init syncManager for %s-%s",
-              dataRegion.getStorageGroupName(), dataRegion.getDataRegionId()));
+              dataRegion.getDatabaseName(), dataRegion.getDataRegionId()));
       getOrCreateSyncManager(dataRegion.getDataRegionId());
     }
     try {
diff --git a/server/src/main/java/org/apache/iotdb/db/sync/transport/client/SyncClientFactory.java b/server/src/main/java/org/apache/iotdb/db/sync/transport/client/SyncClientFactory.java
index f8bd4fa0ec..f4bd1759a2 100644
--- a/server/src/main/java/org/apache/iotdb/db/sync/transport/client/SyncClientFactory.java
+++ b/server/src/main/java/org/apache/iotdb/db/sync/transport/client/SyncClientFactory.java
@@ -40,7 +40,7 @@ public class SyncClientFactory {
       case IoTDB:
         IoTDBPipeSink ioTDBPipeSink = (IoTDBPipeSink) pipeSink;
         return new IoTDBSyncClient(
-            pipe, ioTDBPipeSink.getIp(), ioTDBPipeSink.getPort(), dataRegion.getStorageGroupName());
+            pipe, ioTDBPipeSink.getIp(), ioTDBPipeSink.getPort(), dataRegion.getDatabaseName());
       case ExternalPipe:
       default:
         throw new UnsupportedOperationException();
diff --git a/server/src/test/java/org/apache/iotdb/db/engine/snapshot/IoTDBSnapshotTest.java b/server/src/test/java/org/apache/iotdb/db/engine/snapshot/IoTDBSnapshotTest.java
index 9db5350ada..8c9f5f8ff0 100644
--- a/server/src/test/java/org/apache/iotdb/db/engine/snapshot/IoTDBSnapshotTest.java
+++ b/server/src/test/java/org/apache/iotdb/db/engine/snapshot/IoTDBSnapshotTest.java
@@ -207,7 +207,7 @@ public class IoTDBSnapshotTest {
                 + File.separator
                 + "1-1-0-0.tsfile");
     DataRegion region = Mockito.mock(DataRegion.class);
-    Mockito.when(region.getStorageGroupName()).thenReturn("root.test");
+    Mockito.when(region.getDatabaseName()).thenReturn("root.test");
     Mockito.when(region.getDataRegionId()).thenReturn("0");
     File snapshotFile =
         new SnapshotTaker(region).getSnapshotFilePathForTsFile(tsFile, "test-snapshotId");
diff --git a/server/src/test/java/org/apache/iotdb/db/engine/storagegroup/DataRegionTest.java b/server/src/test/java/org/apache/iotdb/db/engine/storagegroup/DataRegionTest.java
index 11d421e8d8..f20411a87b 100644
--- a/server/src/test/java/org/apache/iotdb/db/engine/storagegroup/DataRegionTest.java
+++ b/server/src/test/java/org/apache/iotdb/db/engine/storagegroup/DataRegionTest.java
@@ -824,7 +824,7 @@ public class DataRegionTest {
       List<DataRegion> dataRegions = StorageEngineV2.getInstance().getAllDataRegions();
       List<DataRegion> regionsToBeDeleted = new ArrayList<>();
       for (DataRegion region : dataRegions) {
-        if (region.getStorageGroupName().equals(storageGroup)) {
+        if (region.getDatabaseName().equals(storageGroup)) {
           regionsToBeDeleted.add(region);
         }
       }
@@ -1012,7 +1012,7 @@ public class DataRegionTest {
     List<DataRegion> dataRegions = StorageEngineV2.getInstance().getAllDataRegions();
     List<DataRegion> regionsToBeDeleted = new ArrayList<>();
     for (DataRegion region : dataRegions) {
-      if (region.getStorageGroupName().equals(storageGroup)) {
+      if (region.getDatabaseName().equals(storageGroup)) {
         regionsToBeDeleted.add(region);
       }
     }