You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by zy...@apache.org on 2023/03/21 12:37:37 UTC

[iotdb] branch rc/1.1.0 updated (4aa755de7f -> e62a9eec72)

This is an automated email from the ASF dual-hosted git repository.

zyk pushed a change to branch rc/1.1.0
in repository https://gitbox.apache.org/repos/asf/iotdb.git


    from 4aa755de7f [To rel/1.1] Fix possible npe when closing IdentitySinkOperator
     new d1d896b5b2 [To rel/1.1][IOTDB-5580] Add limitation of time and tsBlock size to MergeSortOperator (#9220)
     new d1397cf969 [IOTDB-5700] Clean temporary files created by UDF query after it finishes (#9376) (#9380)
     new 7dcba80854 [To rel/1.1][IOTDB-5686] Fix devices with the same name but different alignment properties meets error in inner seq compaction
     new bf8a6c6ac7 [IOTDB-5705] Replace data_region_per_processor by data_region_per_data_node (#9386) (#9393)
     new 115a46dc6a [To rel/1.1][IOTDB-5704] change default write parameter for wal part (#9395)
     new e62a9eec72 [TO rel/1.1][IOTDB-5704] fix default comments for previously changed parameters (#9397)

The 6 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../iotdb/confignode/conf/ConfigNodeConfig.java    |  10 +-
 .../confignode/conf/ConfigNodeDescriptor.java      |   4 +-
 .../confignode/manager/ClusterSchemaManager.java   |  19 +-
 .../iotdb/confignode/manager/ConfigManager.java    |   6 +-
 docs/UserGuide/Cluster/Cluster-Maintenance.md      |   2 +-
 docs/zh/UserGuide/Cluster/Cluster-Maintenance.md   |   2 +-
 .../it/cluster/IoTDBClusterNodeGetterIT.java       |   4 +-
 .../confignode/it/utils/ConfigNodeTestUtils.java   |   2 +-
 .../resources/conf/iotdb-common.properties         |  20 +-
 .../java/org/apache/iotdb/db/conf/IoTDBConfig.java |   8 +-
 .../impl/ReadChunkCompactionPerformer.java         |   7 +-
 .../db/mpp/common/header/ColumnHeaderConstant.java |   2 +-
 .../operator/process/MergeSortOperator.java        |   9 +
 .../config/metadata/ShowVariablesTask.java         |   4 +-
 .../db/mpp/transformation/dag/udf/UDTFContext.java |   3 +
 .../engine/compaction/AbstractCompactionTest.java  |   7 +-
 .../compaction/FastAlignedCrossCompactionTest.java | 139 ++--
 .../FastNonAlignedCrossCompactionTest.java         | 138 ++--
 .../utils/MultiTsFileDeviceIteratorTest.java       | 734 +++++++++++++++++++++
 .../execution/operator/MergeSortOperatorTest.java  |  96 ++-
 .../src/main/thrift/confignode.thrift              |   2 +-
 21 files changed, 1000 insertions(+), 218 deletions(-)


[iotdb] 06/06: [TO rel/1.1][IOTDB-5704] fix default comments for previously changed parameters (#9397)

Posted by zy...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zyk pushed a commit to branch rc/1.1.0
in repository https://gitbox.apache.org/repos/asf/iotdb.git

commit e62a9eec723612bcbae97cfbc7af649f07514291
Author: Zhang.Jinrui <xi...@gmail.com>
AuthorDate: Tue Mar 21 18:04:18 2023 +0800

    [TO rel/1.1][IOTDB-5704] fix default comments for previously changed parameters (#9397)
---
 node-commons/src/assembly/resources/conf/iotdb-common.properties | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/node-commons/src/assembly/resources/conf/iotdb-common.properties b/node-commons/src/assembly/resources/conf/iotdb-common.properties
index d579e25189..0a61caa03c 100644
--- a/node-commons/src/assembly/resources/conf/iotdb-common.properties
+++ b/node-commons/src/assembly/resources/conf/iotdb-common.properties
@@ -657,12 +657,12 @@ cluster_name=defaultCluster
 
 # Duration a wal flush operation will wait before calling fsync
 # A duration greater than 0 batches multiple wal fsync calls into one. This is useful when disks are slow or WAL write contention exists.
-# Notice: this value affects write performance significantly, values in the range of 0ms-10ms are recommended.
+# Notice: this value affects write performance significantly, values in the range of 0ms-2000ms are recommended.
 # Datatype: long
 # fsync_wal_delay_in_ms=1000
 
 # Buffer size of each wal node
-# If it's a value smaller than 0, use the default value 16 * 1024 * 1024 bytes (16MB).
+# If it's a value smaller than 0, use the default value 32 * 1024 * 1024 bytes (32MB).
 # Datatype: int
 # wal_buffer_size_in_byte=33554432
 
@@ -672,7 +672,7 @@ cluster_name=defaultCluster
 
 # Size threshold of each wal file
 # When a wal file's size exceeds this, the wal file will be closed and a new wal file will be created.
-# If it's a value smaller than 0, use the default value 10 * 1024 * 1024 (10MB).
+# If it's a value smaller than 0, use the default value 30 * 1024 * 1024 (30MB).
 # Datatype: long
 # wal_file_size_threshold_in_byte=31457280
 


[iotdb] 04/06: [IOTDB-5705] Replace data_region_per_processor by data_region_per_data_node (#9386) (#9393)

Posted by zy...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zyk pushed a commit to branch rc/1.1.0
in repository https://gitbox.apache.org/repos/asf/iotdb.git

commit bf8a6c6ac70583f68214cbefc0f4680a582a3977
Author: YongzaoDan <33...@users.noreply.github.com>
AuthorDate: Tue Mar 21 12:25:15 2023 +0800

    [IOTDB-5705] Replace data_region_per_processor by data_region_per_data_node (#9386) (#9393)
---
 .../iotdb/confignode/conf/ConfigNodeConfig.java       | 10 +++++-----
 .../iotdb/confignode/conf/ConfigNodeDescriptor.java   |  4 ++--
 .../confignode/manager/ClusterSchemaManager.java      | 19 +++++++++----------
 .../iotdb/confignode/manager/ConfigManager.java       |  6 +++---
 docs/UserGuide/Cluster/Cluster-Maintenance.md         |  2 +-
 docs/zh/UserGuide/Cluster/Cluster-Maintenance.md      |  2 +-
 .../it/cluster/IoTDBClusterNodeGetterIT.java          |  4 ++--
 .../confignode/it/utils/ConfigNodeTestUtils.java      |  2 +-
 .../assembly/resources/conf/iotdb-common.properties   |  6 +++---
 .../db/mpp/common/header/ColumnHeaderConstant.java    |  2 +-
 .../execution/config/metadata/ShowVariablesTask.java  |  4 ++--
 thrift-confignode/src/main/thrift/confignode.thrift   |  2 +-
 12 files changed, 31 insertions(+), 32 deletions(-)

diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConfig.java b/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConfig.java
index 71cac276b7..b4d0b77c7f 100644
--- a/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConfig.java
+++ b/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConfig.java
@@ -101,7 +101,7 @@ public class ConfigNodeConfig {
   private int defaultDataRegionGroupNumPerDatabase = 2;
 
   /** The maximum number of DataRegions expected to be managed by each DataNode. */
-  private double dataRegionPerProcessor = 1.0;
+  private double dataRegionPerDataNode = 5.0;
 
   /** RegionGroup allocate policy. */
   private RegionBalancer.RegionGroupAllocatePolicy regionGroupAllocatePolicy =
@@ -537,12 +537,12 @@ public class ConfigNodeConfig {
     this.dataRegionConsensusProtocolClass = dataRegionConsensusProtocolClass;
   }
 
-  public double getDataRegionPerProcessor() {
-    return dataRegionPerProcessor;
+  public double getDataRegionPerDataNode() {
+    return dataRegionPerDataNode;
   }
 
-  public void setDataRegionPerProcessor(double dataRegionPerProcessor) {
-    this.dataRegionPerProcessor = dataRegionPerProcessor;
+  public void setDataRegionPerDataNode(double dataRegionPerDataNode) {
+    this.dataRegionPerDataNode = dataRegionPerDataNode;
   }
 
   public RegionBalancer.RegionGroupAllocatePolicy getRegionGroupAllocatePolicy() {
diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeDescriptor.java b/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeDescriptor.java
index 6e508da618..5ff6618930 100644
--- a/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeDescriptor.java
+++ b/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeDescriptor.java
@@ -250,11 +250,11 @@ public class ConfigNodeDescriptor {
                 "default_data_region_group_num_per_database",
                 String.valueOf(conf.getDefaultDataRegionGroupNumPerDatabase()).trim())));
 
-    conf.setDataRegionPerProcessor(
+    conf.setDataRegionPerDataNode(
         Double.parseDouble(
             properties
                 .getProperty(
-                    "data_region_per_processor", String.valueOf(conf.getDataRegionPerProcessor()))
+                    "data_region_per_data_node", String.valueOf(conf.getDataRegionPerDataNode()))
                 .trim()));
 
     try {
diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterSchemaManager.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterSchemaManager.java
index 8bd2458ed9..e6c0529255 100644
--- a/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterSchemaManager.java
+++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterSchemaManager.java
@@ -103,7 +103,7 @@ public class ClusterSchemaManager {
 
   private static final ConfigNodeConfig CONF = ConfigNodeDescriptor.getInstance().getConf();
   private static final double SCHEMA_REGION_PER_DATA_NODE = CONF.getSchemaRegionPerDataNode();
-  private static final double DATA_REGION_PER_PROCESSOR = CONF.getDataRegionPerProcessor();
+  private static final double DATA_REGION_PER_DATA_NODE = CONF.getDataRegionPerDataNode();
 
   private final IManager configManager;
   private final ClusterSchemaInfo clusterSchemaInfo;
@@ -377,7 +377,6 @@ public class ClusterSchemaManager {
     }
 
     int dataNodeNum = getNodeManager().getRegisteredDataNodeCount();
-    int totalCpuCoreNum = getNodeManager().getTotalCpuCoreCount();
     int databaseNum = databaseSchemaMap.size();
 
     for (TDatabaseSchema databaseSchema : databaseSchemaMap.values()) {
@@ -390,8 +389,8 @@ public class ClusterSchemaManager {
     AdjustMaxRegionGroupNumPlan adjustMaxRegionGroupNumPlan = new AdjustMaxRegionGroupNumPlan();
     for (TDatabaseSchema databaseSchema : databaseSchemaMap.values()) {
       try {
-        // Adjust maxSchemaRegionGroupNum for each StorageGroup.
-        // All StorageGroups share the DataNodes equally.
+        // Adjust maxSchemaRegionGroupNum for each Database.
+        // All Databases share the DataNodes equally.
         // The allocated SchemaRegionGroups will not be shrunk.
         int allocatedSchemaRegionGroupCount;
         try {
@@ -416,8 +415,8 @@ public class ClusterSchemaManager {
             databaseSchema.getName(),
             maxSchemaRegionGroupNum);
 
-        // Adjust maxDataRegionGroupNum for each StorageGroup.
-        // All StorageGroups divide the total cpu cores equally.
+        // Adjust maxDataRegionGroupNum for each Database.
+        // All Databases share the DataNodes equally.
         // The allocated DataRegionGroups will not be shrunk.
         int allocatedDataRegionGroupCount =
             getPartitionManager()
@@ -425,8 +424,8 @@ public class ClusterSchemaManager {
         int maxDataRegionGroupNum =
             calcMaxRegionGroupNum(
                 databaseSchema.getMinDataRegionGroupNum(),
-                DATA_REGION_PER_PROCESSOR,
-                totalCpuCoreNum,
+                DATA_REGION_PER_DATA_NODE,
+                dataNodeNum,
                 databaseNum,
                 databaseSchema.getDataReplicationFactor(),
                 allocatedDataRegionGroupCount);
@@ -448,7 +447,7 @@ public class ClusterSchemaManager {
       int minRegionGroupNum,
       double resourceWeight,
       int resource,
-      int storageGroupNum,
+      int databaseNum,
       int replicationFactor,
       int allocatedRegionGroupCount) {
     return Math.max(
@@ -461,7 +460,7 @@ public class ClusterSchemaManager {
                 Math.ceil(
                     // The maxRegionGroupNum of the current StorageGroup is expected to be:
                     // (resourceWeight * resource) / (createdStorageGroupNum * replicationFactor)
-                    resourceWeight * resource / (double) (storageGroupNum * replicationFactor)),
+                    resourceWeight * resource / (databaseNum * replicationFactor)),
             // The maxRegionGroupNum should be great or equal to the allocatedRegionGroupCount
             allocatedRegionGroupCount));
   }
diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java
index 52aa33493f..6a4b8ddb15 100644
--- a/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java
+++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java
@@ -456,7 +456,7 @@ public class ConfigManager implements IManager {
     clusterParameters.setTimePartitionInterval(CONF.getTimePartitionInterval());
     clusterParameters.setDataReplicationFactor(CONF.getDataReplicationFactor());
     clusterParameters.setSchemaReplicationFactor(CONF.getSchemaReplicationFactor());
-    clusterParameters.setDataRegionPerProcessor(CONF.getDataRegionPerProcessor());
+    clusterParameters.setDataRegionPerDataNode(CONF.getDataRegionPerDataNode());
     clusterParameters.setSchemaRegionPerDataNode(CONF.getSchemaRegionPerDataNode());
     clusterParameters.setDiskSpaceWarningThreshold(COMMON_CONF.getDiskSpaceWarningThreshold());
     clusterParameters.setReadConsistencyLevel(CONF.getReadConsistencyLevel());
@@ -1071,8 +1071,8 @@ public class ConfigManager implements IManager {
     if (clusterParameters.getSchemaRegionPerDataNode() != CONF.getSchemaRegionPerDataNode()) {
       return errorStatus.setMessage(errorPrefix + "schema_region_per_data_node" + errorSuffix);
     }
-    if (clusterParameters.getDataRegionPerProcessor() != CONF.getDataRegionPerProcessor()) {
-      return errorStatus.setMessage(errorPrefix + "data_region_per_processor" + errorSuffix);
+    if (clusterParameters.getDataRegionPerDataNode() != CONF.getDataRegionPerDataNode()) {
+      return errorStatus.setMessage(errorPrefix + "data_region_per_data_node" + errorSuffix);
     }
 
     if (!clusterParameters.getReadConsistencyLevel().equals(CONF.getReadConsistencyLevel())) {
diff --git a/docs/UserGuide/Cluster/Cluster-Maintenance.md b/docs/UserGuide/Cluster/Cluster-Maintenance.md
index f10a4ed906..8c3ab7c809 100644
--- a/docs/UserGuide/Cluster/Cluster-Maintenance.md
+++ b/docs/UserGuide/Cluster/Cluster-Maintenance.md
@@ -44,7 +44,7 @@ IoTDB> show variables
 |                    DefaultTTL(ms)|                                              9223372036854775807|
 |              ReadConsistencyLevel|                                                           strong|
 |           SchemaRegionPerDataNode|                                                              1.0|
-|            DataRegionPerProcessor|                                                              1.0|
+|             DataRegionPerDataNode|                                                              5.0|
 |           LeastDataRegionGroupNum|                                                                5|
 |                     SeriesSlotNum|                                                            10000|
 |           SeriesSlotExecutorClass|org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor|
diff --git a/docs/zh/UserGuide/Cluster/Cluster-Maintenance.md b/docs/zh/UserGuide/Cluster/Cluster-Maintenance.md
index ea7dd151d0..84bc486703 100644
--- a/docs/zh/UserGuide/Cluster/Cluster-Maintenance.md
+++ b/docs/zh/UserGuide/Cluster/Cluster-Maintenance.md
@@ -44,7 +44,7 @@ IoTDB> show variables
 |                    DefaultTTL(ms)|                                              9223372036854775807|
 |              ReadConsistencyLevel|                                                           strong|
 |           SchemaRegionPerDataNode|                                                              1.0|
-|            DataRegionPerProcessor|                                                              1.0|
+|             DataRegionPerDataNode|                                                              5.0|
 |           LeastDataRegionGroupNum|                                                                5|
 |                     SeriesSlotNum|                                                            10000|
 |           SeriesSlotExecutorClass|org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor|
diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/cluster/IoTDBClusterNodeGetterIT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/cluster/IoTDBClusterNodeGetterIT.java
index 351d69a303..5694dbd25e 100644
--- a/integration-test/src/test/java/org/apache/iotdb/confignode/it/cluster/IoTDBClusterNodeGetterIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/cluster/IoTDBClusterNodeGetterIT.java
@@ -162,8 +162,8 @@ public class IoTDBClusterNodeGetterIT {
           expectedParameters.getSchemaReplicationFactor(),
           clusterParameters.getSchemaReplicationFactor());
       Assert.assertEquals(
-          expectedParameters.getDataRegionPerProcessor(),
-          clusterParameters.getDataRegionPerProcessor(),
+          expectedParameters.getDataRegionPerDataNode(),
+          clusterParameters.getDataRegionPerDataNode(),
           0.01);
       Assert.assertEquals(
           expectedParameters.getSchemaRegionPerDataNode(),
diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/utils/ConfigNodeTestUtils.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/utils/ConfigNodeTestUtils.java
index 735082c5d7..2bca1e15cd 100644
--- a/integration-test/src/test/java/org/apache/iotdb/confignode/it/utils/ConfigNodeTestUtils.java
+++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/utils/ConfigNodeTestUtils.java
@@ -208,7 +208,7 @@ public class ConfigNodeTestUtils {
     clusterParameters.setTimePartitionInterval(604800000);
     clusterParameters.setDataReplicationFactor(1);
     clusterParameters.setSchemaReplicationFactor(1);
-    clusterParameters.setDataRegionPerProcessor(1.0);
+    clusterParameters.setDataRegionPerDataNode(5.0);
     clusterParameters.setSchemaRegionPerDataNode(1.0);
     clusterParameters.setDiskSpaceWarningThreshold(0.05);
     clusterParameters.setReadConsistencyLevel("strong");
diff --git a/node-commons/src/assembly/resources/conf/iotdb-common.properties b/node-commons/src/assembly/resources/conf/iotdb-common.properties
index 3f42740fa1..9f487ca068 100644
--- a/node-commons/src/assembly/resources/conf/iotdb-common.properties
+++ b/node-commons/src/assembly/resources/conf/iotdb-common.properties
@@ -103,7 +103,7 @@ cluster_name=defaultCluster
 # This parameter is the maximum number of SchemaRegions expected to be managed by each DataNode.
 # Notice: Since each Database requires at least one SchemaRegionGroup to manage its schema,
 # this parameter doesn't limit the upper bound of cluster SchemaRegions when there are too many Databases.
-# Default is equal to the schema_replication_factor.
+# Default is equal to the schema_replication_factor to ensure each DataNode will have a SchemaRegionGroupLeader.
 # Datatype: Double
 # schema_region_per_data_node=1.0
 
@@ -122,11 +122,11 @@ cluster_name=defaultCluster
 # default_data_region_group_num_per_database=2
 
 # Only take effect when set data_region_group_extension_policy=AUTO.
-# This parameter is the maximum number of DataRegions expected to be managed by each processor.
+# This parameter is the maximum number of DataRegions expected to be managed by each DataNode.
 # Notice: Since each Database requires at least two DataRegionGroups to manage its data,
 # this parameter doesn't limit the upper bound of cluster DataRegions when there are too many Databases.
 # Datatype: Double
-# data_region_per_processor=1.0
+# data_region_per_data_node=5.0
 
 
 # Whether to enable the DataPartition inherit policy.
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/common/header/ColumnHeaderConstant.java b/server/src/main/java/org/apache/iotdb/db/mpp/common/header/ColumnHeaderConstant.java
index baaf9a773c..4a11b2dd99 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/common/header/ColumnHeaderConstant.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/common/header/ColumnHeaderConstant.java
@@ -110,7 +110,7 @@ public class ColumnHeaderConstant {
   public static final String SERIES_SLOT_EXECUTOR_CLASS = "SeriesSlotExecutorClass";
   public static final String DEFAULT_TTL = "DefaultTTL(ms)";
   public static final String SCHEMA_REGION_PER_DATA_NODE = "SchemaRegionPerDataNode";
-  public static final String DATA_REGION_PER_PROCESSOR = "DataRegionPerProcessor";
+  public static final String DATA_REGION_PER_DATA_NODE = "DataRegionPerDataNode";
   public static final String READ_CONSISTENCY_LEVEL = "ReadConsistencyLevel";
   public static final String DISK_SPACE_WARNING_THRESHOLD = "DiskSpaceWarningThreshold";
 
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/plan/execution/config/metadata/ShowVariablesTask.java b/server/src/main/java/org/apache/iotdb/db/mpp/plan/execution/config/metadata/ShowVariablesTask.java
index 90e3ffd166..a43715fe9c 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/plan/execution/config/metadata/ShowVariablesTask.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/plan/execution/config/metadata/ShowVariablesTask.java
@@ -103,8 +103,8 @@ public class ShowVariablesTask implements IConfigTask {
         new Binary(String.valueOf(clusterParameters.getSchemaRegionPerDataNode())));
     buildTSBlock(
         builder,
-        new Binary(ColumnHeaderConstant.DATA_REGION_PER_PROCESSOR),
-        new Binary(String.valueOf(clusterParameters.getDataRegionPerProcessor())));
+        new Binary(ColumnHeaderConstant.DATA_REGION_PER_DATA_NODE),
+        new Binary(String.valueOf(clusterParameters.getDataRegionPerDataNode())));
     buildTSBlock(
         builder,
         new Binary(ColumnHeaderConstant.SERIES_SLOT_NUM),
diff --git a/thrift-confignode/src/main/thrift/confignode.thrift b/thrift-confignode/src/main/thrift/confignode.thrift
index b29b460d5f..d6858a6555 100644
--- a/thrift-confignode/src/main/thrift/confignode.thrift
+++ b/thrift-confignode/src/main/thrift/confignode.thrift
@@ -332,7 +332,7 @@ struct TClusterParameters {
   8: required i64 defaultTTL
   9: required string readConsistencyLevel
   10: required double schemaRegionPerDataNode
-  11: required double dataRegionPerProcessor
+  11: required double dataRegionPerDataNode
   12: required i32 seriesPartitionSlotNum
   13: required string seriesPartitionExecutorClass
   14: required double diskSpaceWarningThreshold


[iotdb] 05/06: [To rel/1.1][IOTDB-5704] change default write parameter for wal part (#9395)

Posted by zy...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zyk pushed a commit to branch rc/1.1.0
in repository https://gitbox.apache.org/repos/asf/iotdb.git

commit 115a46dc6ae0fa932ecff5c7f06a93164093dac6
Author: Zhang.Jinrui <xi...@gmail.com>
AuthorDate: Tue Mar 21 14:49:43 2023 +0800

    [To rel/1.1][IOTDB-5704] change default write parameter for wal part (#9395)
---
 node-commons/src/assembly/resources/conf/iotdb-common.properties | 8 ++++----
 server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java   | 8 ++++----
 2 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/node-commons/src/assembly/resources/conf/iotdb-common.properties b/node-commons/src/assembly/resources/conf/iotdb-common.properties
index 9f487ca068..d579e25189 100644
--- a/node-commons/src/assembly/resources/conf/iotdb-common.properties
+++ b/node-commons/src/assembly/resources/conf/iotdb-common.properties
@@ -659,22 +659,22 @@ cluster_name=defaultCluster
 # A duration greater than 0 batches multiple wal fsync calls into one. This is useful when disks are slow or WAL write contention exists.
 # Notice: this value affects write performance significantly, values in the range of 0ms-10ms are recommended.
 # Datatype: long
-# fsync_wal_delay_in_ms=3
+# fsync_wal_delay_in_ms=1000
 
 # Buffer size of each wal node
 # If it's a value smaller than 0, use the default value 16 * 1024 * 1024 bytes (16MB).
 # Datatype: int
-# wal_buffer_size_in_byte=16777216
+# wal_buffer_size_in_byte=33554432
 
 # Blocking queue capacity of each wal buffer, restricts maximum number of WALEdits cached in the blocking queue.
 # Datatype: int
-# wal_buffer_queue_capacity=50
+# wal_buffer_queue_capacity=500
 
 # Size threshold of each wal file
 # When a wal file's size exceeds this, the wal file will be closed and a new wal file will be created.
 # If it's a value smaller than 0, use the default value 10 * 1024 * 1024 (10MB).
 # Datatype: long
-# wal_file_size_threshold_in_byte=10485760
+# wal_file_size_threshold_in_byte=31457280
 
 # Minimum ratio of effective information in wal files
 # This value should be between 0.0 and 1.0
diff --git a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
index 70a0b0f3da..38bc963235 100644
--- a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
+++ b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
@@ -183,19 +183,19 @@ public class IoTDBConfig {
   private int maxWalNodesNum = 0;
 
   /** Duration a wal flush operation will wait before calling fsync. Unit: millisecond */
-  private volatile long fsyncWalDelayInMs = 3;
+  private volatile long fsyncWalDelayInMs = 1000;
 
   /** Buffer size of each wal node. Unit: byte */
-  private int walBufferSize = 16 * 1024 * 1024;
+  private int walBufferSize = 32 * 1024 * 1024;
 
   /** Buffer entry size of each wal buffer. Unit: byte */
   private int walBufferEntrySize = 16 * 1024;
 
   /** Blocking queue capacity of each wal buffer */
-  private int walBufferQueueCapacity = 50;
+  private int walBufferQueueCapacity = 500;
 
   /** Size threshold of each wal file. Unit: byte */
-  private volatile long walFileSizeThresholdInByte = 10 * 1024 * 1024L;
+  private volatile long walFileSizeThresholdInByte = 30 * 1024 * 1024L;
 
   /** Size threshold of each checkpoint file. Unit: byte */
   private volatile long checkpointFileSizeThresholdInByte = 3 * 1024 * 1024L;


[iotdb] 02/06: [IOTDB-5700] Clean temporary files created by UDF query after it finishes (#9376) (#9380)

Posted by zy...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zyk pushed a commit to branch rc/1.1.0
in repository https://gitbox.apache.org/repos/asf/iotdb.git

commit d1397cf969d1178a44dcd1c48b9b1314a0338474
Author: Liao Lanyu <14...@qq.com>
AuthorDate: Mon Mar 20 16:15:08 2023 +0800

    [IOTDB-5700] Clean temporary files created by UDF query after it finishes (#9376) (#9380)
---
 .../org/apache/iotdb/db/mpp/transformation/dag/udf/UDTFContext.java    | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/transformation/dag/udf/UDTFContext.java b/server/src/main/java/org/apache/iotdb/db/mpp/transformation/dag/udf/UDTFContext.java
index be40778823..293fc97ac4 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/transformation/dag/udf/UDTFContext.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/transformation/dag/udf/UDTFContext.java
@@ -22,6 +22,7 @@ package org.apache.iotdb.db.mpp.transformation.dag.udf;
 import org.apache.iotdb.commons.udf.service.UDFClassLoaderManager;
 import org.apache.iotdb.db.mpp.plan.expression.Expression;
 import org.apache.iotdb.db.mpp.plan.expression.multi.FunctionExpression;
+import org.apache.iotdb.db.service.TemporaryQueryDataFileService;
 
 import java.time.ZoneId;
 import java.util.HashMap;
@@ -50,6 +51,8 @@ public class UDTFContext {
       }
     } finally {
       UDFClassLoaderManager.getInstance().finalizeUDFQuery(queryId);
+      // close and delete UDF temp files
+      TemporaryQueryDataFileService.getInstance().deregister(queryId);
     }
   }
 


[iotdb] 03/06: [To rel/1.1][IOTDB-5686] Fix devices with the same name but different alignment properties meets error in inner seq compaction

Posted by zy...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zyk pushed a commit to branch rc/1.1.0
in repository https://gitbox.apache.org/repos/asf/iotdb.git

commit 7dcba8085413b95e3c4f398ed91652248f19bf36
Author: 周沛辰 <45...@users.noreply.github.com>
AuthorDate: Tue Mar 21 10:38:07 2023 +0800

    [To rel/1.1][IOTDB-5686] Fix devices with the same name but different alignment properties meets error in inner seq compaction
---
 .../impl/ReadChunkCompactionPerformer.java         |   7 +-
 .../engine/compaction/AbstractCompactionTest.java  |   7 +-
 .../compaction/FastAlignedCrossCompactionTest.java | 139 ++--
 .../FastNonAlignedCrossCompactionTest.java         | 138 ++--
 .../utils/MultiTsFileDeviceIteratorTest.java       | 734 +++++++++++++++++++++
 5 files changed, 883 insertions(+), 142 deletions(-)

diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/execute/performer/impl/ReadChunkCompactionPerformer.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/execute/performer/impl/ReadChunkCompactionPerformer.java
index e97f2e5fe3..58d2a9e25e 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/compaction/execute/performer/impl/ReadChunkCompactionPerformer.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/execute/performer/impl/ReadChunkCompactionPerformer.java
@@ -170,8 +170,13 @@ public class ReadChunkCompactionPerformer implements ISeqCompactionPerformer {
         deviceIterator.iterateNotAlignedSeries(device, true);
     while (seriesIterator.hasNextSeries()) {
       checkThreadInterrupted();
+      String seriesID = seriesIterator.nextSeries();
+      if (seriesID.equals("")) {
+        // encounter a deleted aligned device, return
+        return;
+      }
       // TODO: we can provide a configuration item to enable concurrent between each series
-      PartialPath p = new PartialPath(device, seriesIterator.nextSeries());
+      PartialPath p = new PartialPath(device, seriesID);
       // TODO: seriesIterator needs to be refactor.
       // This statement must be called before next hasNextSeries() called, or it may be trapped in a
       // dead-loop.
diff --git a/server/src/test/java/org/apache/iotdb/db/engine/compaction/AbstractCompactionTest.java b/server/src/test/java/org/apache/iotdb/db/engine/compaction/AbstractCompactionTest.java
index 049bc778fb..c3c8e6366e 100644
--- a/server/src/test/java/org/apache/iotdb/db/engine/compaction/AbstractCompactionTest.java
+++ b/server/src/test/java/org/apache/iotdb/db/engine/compaction/AbstractCompactionTest.java
@@ -449,8 +449,8 @@ public class AbstractCompactionTest {
               path,
               FragmentInstanceContext.createFragmentInstanceContextForCompaction(
                   EnvironmentUtils.TEST_QUERY_CONTEXT.getQueryId()),
-              seqResources,
-              unseqResources,
+              tsFileManager.getTsFileList(true),
+              tsFileManager.getTsFileList(false),
               true);
       while (tsBlockReader.hasNextBatch()) {
         TsBlock block = tsBlockReader.nextBatch();
@@ -490,7 +490,8 @@ public class AbstractCompactionTest {
         }
       }
       if (timeseriesData.size() > 0) {
-        // there are still data points left, which are not in the target file
+        // there are still data points left, which are not in the target file. Lost the data after
+        // compaction.
         fail();
       }
     }
diff --git a/server/src/test/java/org/apache/iotdb/db/engine/compaction/FastAlignedCrossCompactionTest.java b/server/src/test/java/org/apache/iotdb/db/engine/compaction/FastAlignedCrossCompactionTest.java
index 08cd53cacd..06e7f59697 100644
--- a/server/src/test/java/org/apache/iotdb/db/engine/compaction/FastAlignedCrossCompactionTest.java
+++ b/server/src/test/java/org/apache/iotdb/db/engine/compaction/FastAlignedCrossCompactionTest.java
@@ -223,12 +223,13 @@ public class FastAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
+
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -462,12 +463,12 @@ public class FastAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -700,12 +701,12 @@ public class FastAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -955,12 +956,12 @@ public class FastAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -1303,12 +1304,12 @@ public class FastAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -1659,12 +1660,12 @@ public class FastAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -2025,12 +2026,12 @@ public class FastAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -2375,12 +2376,12 @@ public class FastAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -2726,12 +2727,12 @@ public class FastAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -3087,12 +3088,12 @@ public class FastAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     validateSeqFiles(true);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
@@ -3501,12 +3502,12 @@ public class FastAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -3917,12 +3918,12 @@ public class FastAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -4333,12 +4334,12 @@ public class FastAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -4798,12 +4799,12 @@ public class FastAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -5211,12 +5212,12 @@ public class FastAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -5417,12 +5418,12 @@ public class FastAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -5682,12 +5683,12 @@ public class FastAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -6065,12 +6066,12 @@ public class FastAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -6416,12 +6417,12 @@ public class FastAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -6655,12 +6656,12 @@ public class FastAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -6789,12 +6790,12 @@ public class FastAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -6971,12 +6972,12 @@ public class FastAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -7187,12 +7188,12 @@ public class FastAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
diff --git a/server/src/test/java/org/apache/iotdb/db/engine/compaction/FastNonAlignedCrossCompactionTest.java b/server/src/test/java/org/apache/iotdb/db/engine/compaction/FastNonAlignedCrossCompactionTest.java
index afb10bafeb..a91cab2062 100644
--- a/server/src/test/java/org/apache/iotdb/db/engine/compaction/FastNonAlignedCrossCompactionTest.java
+++ b/server/src/test/java/org/apache/iotdb/db/engine/compaction/FastNonAlignedCrossCompactionTest.java
@@ -244,12 +244,12 @@ public class FastNonAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -479,12 +479,12 @@ public class FastNonAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -713,12 +713,12 @@ public class FastNonAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -965,12 +965,12 @@ public class FastNonAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -1310,12 +1310,12 @@ public class FastNonAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -1662,12 +1662,12 @@ public class FastNonAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -2024,12 +2024,12 @@ public class FastNonAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -2371,12 +2371,12 @@ public class FastNonAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -2719,12 +2719,12 @@ public class FastNonAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -3077,12 +3077,12 @@ public class FastNonAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -3487,12 +3487,12 @@ public class FastNonAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -3900,12 +3900,12 @@ public class FastNonAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -4313,12 +4313,12 @@ public class FastNonAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -4775,12 +4775,12 @@ public class FastNonAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -5185,12 +5185,12 @@ public class FastNonAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -5391,12 +5391,12 @@ public class FastNonAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -5656,12 +5656,12 @@ public class FastNonAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -6039,12 +6039,12 @@ public class FastNonAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -6387,12 +6387,12 @@ public class FastNonAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -6626,12 +6626,12 @@ public class FastNonAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -6760,12 +6760,12 @@ public class FastNonAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -6942,12 +6942,12 @@ public class FastNonAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
@@ -7151,12 +7151,12 @@ public class FastNonAlignedCrossCompactionTest extends AbstractCompactionTest {
     resource.serialize();
     unseqResources.add(resource);
 
-    Map<PartialPath, List<TimeValuePair>> sourceDatas =
-        readSourceFiles(timeserisPathList, tsDataTypes);
-
     // start compacting
     tsFileManager.addAll(seqResources, true);
     tsFileManager.addAll(unseqResources, false);
+
+    Map<PartialPath, List<TimeValuePair>> sourceDatas =
+        readSourceFiles(timeserisPathList, tsDataTypes);
     CrossSpaceCompactionTask task =
         new CrossSpaceCompactionTask(
             0,
diff --git a/server/src/test/java/org/apache/iotdb/db/engine/compaction/utils/MultiTsFileDeviceIteratorTest.java b/server/src/test/java/org/apache/iotdb/db/engine/compaction/utils/MultiTsFileDeviceIteratorTest.java
index 9ab358f6e4..5e4c60e4ed 100644
--- a/server/src/test/java/org/apache/iotdb/db/engine/compaction/utils/MultiTsFileDeviceIteratorTest.java
+++ b/server/src/test/java/org/apache/iotdb/db/engine/compaction/utils/MultiTsFileDeviceIteratorTest.java
@@ -19,16 +19,30 @@
 package org.apache.iotdb.db.engine.compaction.utils;
 
 import org.apache.iotdb.commons.exception.MetadataException;
+import org.apache.iotdb.commons.path.AlignedPath;
+import org.apache.iotdb.commons.path.MeasurementPath;
+import org.apache.iotdb.commons.path.PartialPath;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
 import org.apache.iotdb.db.engine.compaction.AbstractCompactionTest;
+import org.apache.iotdb.db.engine.compaction.execute.performer.impl.FastCompactionPerformer;
+import org.apache.iotdb.db.engine.compaction.execute.performer.impl.ReadChunkCompactionPerformer;
+import org.apache.iotdb.db.engine.compaction.execute.performer.impl.ReadPointCompactionPerformer;
+import org.apache.iotdb.db.engine.compaction.execute.task.CompactionTaskSummary;
+import org.apache.iotdb.db.engine.compaction.execute.task.InnerSpaceCompactionTask;
+import org.apache.iotdb.db.engine.compaction.execute.task.subtask.FastCompactionTaskSummary;
+import org.apache.iotdb.db.engine.compaction.execute.utils.CompactionUtils;
 import org.apache.iotdb.db.engine.compaction.execute.utils.MultiTsFileDeviceIterator;
 import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
+import org.apache.iotdb.db.engine.storagegroup.TsFileResourceStatus;
 import org.apache.iotdb.db.exception.StorageEngineException;
 import org.apache.iotdb.db.query.control.FileReaderManager;
 import org.apache.iotdb.tsfile.common.conf.TSFileDescriptor;
 import org.apache.iotdb.tsfile.exception.write.WriteProcessException;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.read.TimeValuePair;
 import org.apache.iotdb.tsfile.utils.Pair;
 import org.apache.iotdb.tsfile.utils.TsFileGeneratorUtils;
+import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
 
 import org.junit.After;
 import org.junit.Assert;
@@ -37,17 +51,22 @@ import org.junit.Test;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import static org.apache.iotdb.commons.conf.IoTDBConstant.PATH_SEPARATOR;
 
 public class MultiTsFileDeviceIteratorTest extends AbstractCompactionTest {
+  private final String oldThreadName = Thread.currentThread().getName();
 
   @Before
   public void setUp()
       throws IOException, WriteProcessException, MetadataException, InterruptedException {
     super.setUp();
     IoTDBDescriptor.getInstance().getConfig().setTargetChunkSize(1024);
+    Thread.currentThread().setName("pool-1-IoTDB-Compaction-1");
   }
 
   @After
@@ -59,6 +78,7 @@ public class MultiTsFileDeviceIteratorTest extends AbstractCompactionTest {
     for (TsFileResource tsFileResource : unseqResources) {
       FileReaderManager.getInstance().closeFileAndRemoveReader(tsFileResource.getTsFilePath());
     }
+    Thread.currentThread().setName(oldThreadName);
   }
 
   @Test
@@ -297,4 +317,718 @@ public class MultiTsFileDeviceIteratorTest extends AbstractCompactionTest {
     Assert.assertEquals(30, deviceNum);
     TsFileGeneratorUtils.alignDeviceOffset = oldAlignedDeviceOffset;
   }
+
+  /**
+   * Create device with nonAligned property. Deleted it and create new device with same deviceID but
+   * aligned property. Compact it. Then deleted it and create new device with same deviceID but
+   * nonAligned property. Check whether the deviceID and its property can be obtained correctly.
+   */
+  @Test
+  public void getDeletedDevicesWithSameNameFromSeqFilesByReadChunkPerformer()
+      throws MetadataException, IOException, WriteProcessException, StorageEngineException,
+          InterruptedException {
+    TSFileDescriptor.getInstance().getConfig().setMaxDegreeOfIndexNode(3);
+    int oldAlignedDeviceOffset = TsFileGeneratorUtils.alignDeviceOffset;
+    TsFileGeneratorUtils.alignDeviceOffset = 0;
+    registerTimeseriesInMManger(30, 5, false);
+    createFiles(3, 10, 5, 100, 0, 0, 50, 50, false, true);
+    createFiles(4, 30, 5, 100, 1000, 0, 50, 50, false, true);
+
+    // generate mods file, delete d0 ~ d9 with nonAligned property
+    List<String> seriesPaths = new ArrayList<>();
+    for (int i = 0; i < 10; i++) {
+      for (int j = 0; j < 5; j++) {
+        seriesPaths.add(COMPACTION_TEST_SG + PATH_SEPARATOR + "d" + i + PATH_SEPARATOR + "s" + j);
+      }
+    }
+    generateModsFile(seriesPaths, seqResources, Long.MIN_VALUE, Long.MAX_VALUE);
+    generateModsFile(seriesPaths, unseqResources, Long.MIN_VALUE, Long.MAX_VALUE);
+    deleteTimeseriesInMManager(seriesPaths);
+
+    // generate d0 ~ d9 with aligned property
+    createFiles(2, 10, 15, 100, 2000, 2000, 50, 50, true, true);
+    tsFileManager.addAll(seqResources, true);
+
+    List<PartialPath> timeseriesPaths = new ArrayList<>();
+    for (int i = 0; i < 30; i++) {
+      for (int j = 0; j < 15; j++) {
+        if (i < 10) {
+          timeseriesPaths.add(
+              new AlignedPath(
+                  COMPACTION_TEST_SG + PATH_SEPARATOR + "d" + i,
+                  Collections.singletonList("s" + j),
+                  Collections.singletonList(new MeasurementSchema("s" + j, TSDataType.INT64))));
+        } else {
+          timeseriesPaths.add(
+              new MeasurementPath(
+                  COMPACTION_TEST_SG + PATH_SEPARATOR + "d" + i + PATH_SEPARATOR + "s" + j,
+                  TSDataType.INT64));
+        }
+      }
+    }
+    Map<PartialPath, List<TimeValuePair>> sourceData =
+        readSourceFiles(timeseriesPaths, Collections.emptyList());
+
+    InnerSpaceCompactionTask task =
+        new InnerSpaceCompactionTask(
+            0,
+            tsFileManager,
+            tsFileManager.getTsFileList(true),
+            true,
+            new ReadChunkCompactionPerformer(),
+            new AtomicInteger(),
+            0L);
+    task.start();
+
+    validateSeqFiles(true);
+    validateTargetDatas(sourceData, Collections.emptyList());
+
+    // generate mods file, delete d0 ~ d9 with aligned property
+    seriesPaths.clear();
+    for (int i = 0; i < 10; i++) {
+      for (int j = 0; j < 15; j++) {
+        seriesPaths.add(COMPACTION_TEST_SG + PATH_SEPARATOR + "d" + i + PATH_SEPARATOR + "s" + j);
+      }
+    }
+    generateModsFile(
+        seriesPaths, tsFileManager.getTsFileList(true), Long.MIN_VALUE, Long.MAX_VALUE);
+
+    deleteTimeseriesInMManager(seriesPaths);
+
+    // generate mods file, delete d0 ~ d9 with nonAligned property
+    createFiles(1, 10, 5, 100, 2000, 2000, 50, 50, false, true);
+    tsFileManager.add(seqResources.get(seqResources.size() - 1), true);
+
+    sourceData = readSourceFiles(timeseriesPaths, Collections.emptyList());
+
+    // sort the deviceId in lexicographical order from small to large
+    List<String> deviceIds = new ArrayList<>();
+    for (int i = 0; i < 30; i++) {
+      deviceIds.add("root.testsg.d" + (i + TsFileGeneratorUtils.getAlignDeviceOffset()));
+    }
+    deviceIds.sort(String::compareTo);
+
+    int deviceNum = 0;
+    try (MultiTsFileDeviceIterator multiTsFileDeviceIterator =
+        new MultiTsFileDeviceIterator(tsFileManager.getTsFileList(true))) {
+      while (multiTsFileDeviceIterator.hasNextDevice()) {
+        Pair<String, Boolean> deviceInfo = multiTsFileDeviceIterator.nextDevice();
+        Assert.assertEquals(deviceIds.get(deviceNum), deviceInfo.left);
+        Assert.assertFalse(deviceInfo.right);
+        deviceNum++;
+      }
+    }
+    Assert.assertEquals(30, deviceNum);
+    TsFileGeneratorUtils.alignDeviceOffset = oldAlignedDeviceOffset;
+    List<TsFileResource> targetResources =
+        CompactionFileGeneratorUtils.getInnerCompactionTargetTsFileResources(
+            tsFileManager.getTsFileList(true), true);
+    ReadChunkCompactionPerformer performer =
+        new ReadChunkCompactionPerformer(tsFileManager.getTsFileList(true), targetResources.get(0));
+    performer.setSummary(new CompactionTaskSummary());
+    performer.perform();
+
+    CompactionUtils.moveTargetFile(targetResources, true, COMPACTION_TEST_SG);
+    tsFileManager.replace(
+        tsFileManager.getTsFileList(true), Collections.emptyList(), targetResources, 0, true);
+    tsFileManager.getTsFileList(true).get(0).setStatus(TsFileResourceStatus.CLOSED);
+
+    validateSeqFiles(true);
+    validateTargetDatas(sourceData, Collections.emptyList());
+  }
+
+  /**
+   * Create device with aligned property. Deleted it and create new device with same deviceID but
+   * nonAligned property. Compact it. Then deleted it and create new device with same deviceID but
+   * aligned property. Check whether the deviceID and its property can be obtained correctly.
+   */
+  @Test
+  public void getDeletedDevicesWithSameNameFromSeqFilesByReadChunkPerformer2()
+      throws MetadataException, IOException, WriteProcessException, StorageEngineException,
+          InterruptedException {
+    TSFileDescriptor.getInstance().getConfig().setMaxDegreeOfIndexNode(3);
+    int oldAlignedDeviceOffset = TsFileGeneratorUtils.alignDeviceOffset;
+    TsFileGeneratorUtils.alignDeviceOffset = 0;
+    registerTimeseriesInMManger(30, 5, true);
+    createFiles(3, 10, 5, 100, 0, 0, 50, 50, true, true);
+    createFiles(4, 30, 5, 100, 1000, 0, 50, 50, true, true);
+
+    // generate mods file, delete d0 ~ d9 with nonAligned property
+    List<String> seriesPaths = new ArrayList<>();
+    for (int i = 0; i < 10; i++) {
+      for (int j = 0; j < 5; j++) {
+        seriesPaths.add(COMPACTION_TEST_SG + PATH_SEPARATOR + "d" + i + PATH_SEPARATOR + "s" + j);
+      }
+    }
+    generateModsFile(seriesPaths, seqResources, Long.MIN_VALUE, Long.MAX_VALUE);
+    generateModsFile(seriesPaths, unseqResources, Long.MIN_VALUE, Long.MAX_VALUE);
+    deleteTimeseriesInMManager(seriesPaths);
+
+    // generate d0 ~ d9 with aligned property
+    createFiles(2, 10, 15, 100, 2000, 2000, 50, 50, false, true);
+    tsFileManager.addAll(seqResources, true);
+
+    List<PartialPath> timeseriesPaths = new ArrayList<>();
+    for (int i = 0; i < 30; i++) {
+      for (int j = 0; j < 15; j++) {
+        if (i < 10) {
+          timeseriesPaths.add(
+              new AlignedPath(
+                  COMPACTION_TEST_SG + PATH_SEPARATOR + "d" + i,
+                  Collections.singletonList("s" + j),
+                  Collections.singletonList(new MeasurementSchema("s" + j, TSDataType.INT64))));
+        } else {
+          timeseriesPaths.add(
+              new MeasurementPath(
+                  COMPACTION_TEST_SG + PATH_SEPARATOR + "d" + i + PATH_SEPARATOR + "s" + j,
+                  TSDataType.INT64));
+        }
+      }
+    }
+    Map<PartialPath, List<TimeValuePair>> sourceData =
+        readSourceFiles(timeseriesPaths, Collections.emptyList());
+
+    InnerSpaceCompactionTask task =
+        new InnerSpaceCompactionTask(
+            0,
+            tsFileManager,
+            tsFileManager.getTsFileList(true),
+            true,
+            new ReadChunkCompactionPerformer(),
+            new AtomicInteger(),
+            0L);
+    task.start();
+
+    validateSeqFiles(true);
+    validateTargetDatas(sourceData, Collections.emptyList());
+
+    // generate mods file, delete d0 ~ d9 with aligned property
+    seriesPaths.clear();
+    for (int i = 0; i < 10; i++) {
+      for (int j = 0; j < 15; j++) {
+        seriesPaths.add(COMPACTION_TEST_SG + PATH_SEPARATOR + "d" + i + PATH_SEPARATOR + "s" + j);
+      }
+    }
+    generateModsFile(
+        seriesPaths, tsFileManager.getTsFileList(true), Long.MIN_VALUE, Long.MAX_VALUE);
+
+    deleteTimeseriesInMManager(seriesPaths);
+
+    // generate mods file, delete d0 ~ d9 with nonAligned property
+    createFiles(1, 10, 5, 100, 2000, 2000, 50, 50, true, true);
+    tsFileManager.add(seqResources.get(seqResources.size() - 1), true);
+
+    sourceData = readSourceFiles(timeseriesPaths, Collections.emptyList());
+
+    // sort the deviceId in lexicographical order from small to large
+    List<String> deviceIds = new ArrayList<>();
+    for (int i = 0; i < 30; i++) {
+      deviceIds.add("root.testsg.d" + (i + TsFileGeneratorUtils.getAlignDeviceOffset()));
+    }
+    deviceIds.sort(String::compareTo);
+
+    int deviceNum = 0;
+    try (MultiTsFileDeviceIterator multiTsFileDeviceIterator =
+        new MultiTsFileDeviceIterator(tsFileManager.getTsFileList(true))) {
+      while (multiTsFileDeviceIterator.hasNextDevice()) {
+        Pair<String, Boolean> deviceInfo = multiTsFileDeviceIterator.nextDevice();
+        Assert.assertEquals(deviceIds.get(deviceNum), deviceInfo.left);
+        Assert.assertTrue(deviceInfo.right);
+        deviceNum++;
+      }
+    }
+    Assert.assertEquals(30, deviceNum);
+    TsFileGeneratorUtils.alignDeviceOffset = oldAlignedDeviceOffset;
+    List<TsFileResource> targetResources =
+        CompactionFileGeneratorUtils.getInnerCompactionTargetTsFileResources(
+            tsFileManager.getTsFileList(true), true);
+    ReadChunkCompactionPerformer performer =
+        new ReadChunkCompactionPerformer(tsFileManager.getTsFileList(true), targetResources.get(0));
+    performer.setSummary(new CompactionTaskSummary());
+    performer.perform();
+
+    CompactionUtils.moveTargetFile(targetResources, true, COMPACTION_TEST_SG);
+    tsFileManager.replace(
+        tsFileManager.getTsFileList(true), Collections.emptyList(), targetResources, 0, true);
+    tsFileManager.getTsFileList(true).get(0).setStatus(TsFileResourceStatus.CLOSED);
+
+    validateSeqFiles(true);
+    validateTargetDatas(sourceData, Collections.emptyList());
+  }
+
+  /**
+   * Create device with nonAligned property. Deleted it and create new device with same deviceID but
+   * aligned property. Compact it. Then deleted it and create new device with same deviceID but
+   * nonAligned property. Check whether the deviceID and its property can be obtained correctly.
+   */
+  @Test
+  public void getDeletedDevicesWithSameNameFromSeqFilesByReadPointPerformer() throws Exception {
+    TSFileDescriptor.getInstance().getConfig().setMaxDegreeOfIndexNode(3);
+    int oldAlignedDeviceOffset = TsFileGeneratorUtils.alignDeviceOffset;
+    TsFileGeneratorUtils.alignDeviceOffset = 0;
+    registerTimeseriesInMManger(30, 5, false);
+    createFiles(3, 10, 5, 100, 0, 0, 50, 50, false, true);
+    createFiles(4, 30, 5, 100, 1000, 0, 50, 50, false, true);
+
+    // generate mods file, delete d0 ~ d9 with nonAligned property
+    List<String> seriesPaths = new ArrayList<>();
+    for (int i = 0; i < 10; i++) {
+      for (int j = 0; j < 5; j++) {
+        seriesPaths.add(COMPACTION_TEST_SG + PATH_SEPARATOR + "d" + i + PATH_SEPARATOR + "s" + j);
+      }
+    }
+    generateModsFile(seriesPaths, seqResources, Long.MIN_VALUE, Long.MAX_VALUE);
+    generateModsFile(seriesPaths, unseqResources, Long.MIN_VALUE, Long.MAX_VALUE);
+    deleteTimeseriesInMManager(seriesPaths);
+
+    // generate d0 ~ d9 with aligned property
+    createFiles(2, 10, 15, 100, 2000, 2000, 50, 50, true, true);
+    tsFileManager.addAll(seqResources, true);
+
+    List<PartialPath> timeseriesPaths = new ArrayList<>();
+    for (int i = 0; i < 30; i++) {
+      for (int j = 0; j < 15; j++) {
+        if (i < 10) {
+          timeseriesPaths.add(
+              new AlignedPath(
+                  COMPACTION_TEST_SG + PATH_SEPARATOR + "d" + i,
+                  Collections.singletonList("s" + j),
+                  Collections.singletonList(new MeasurementSchema("s" + j, TSDataType.INT64))));
+        } else {
+          timeseriesPaths.add(
+              new MeasurementPath(
+                  COMPACTION_TEST_SG + PATH_SEPARATOR + "d" + i + PATH_SEPARATOR + "s" + j,
+                  TSDataType.INT64));
+        }
+      }
+    }
+    Map<PartialPath, List<TimeValuePair>> sourceData =
+        readSourceFiles(timeseriesPaths, Collections.emptyList());
+
+    InnerSpaceCompactionTask task =
+        new InnerSpaceCompactionTask(
+            0,
+            tsFileManager,
+            tsFileManager.getTsFileList(true),
+            true,
+            new ReadPointCompactionPerformer(),
+            new AtomicInteger(),
+            0L);
+    task.start();
+
+    validateSeqFiles(true);
+    validateTargetDatas(sourceData, Collections.emptyList());
+
+    // generate mods file, delete d0 ~ d9 with aligned property
+    seriesPaths.clear();
+    for (int i = 0; i < 10; i++) {
+      for (int j = 0; j < 15; j++) {
+        seriesPaths.add(COMPACTION_TEST_SG + PATH_SEPARATOR + "d" + i + PATH_SEPARATOR + "s" + j);
+      }
+    }
+    generateModsFile(
+        seriesPaths, tsFileManager.getTsFileList(true), Long.MIN_VALUE, Long.MAX_VALUE);
+
+    deleteTimeseriesInMManager(seriesPaths);
+
+    // generate mods file, delete d0 ~ d9 with nonAligned property
+    createFiles(1, 10, 5, 100, 2000, 2000, 50, 50, false, true);
+    tsFileManager.add(seqResources.get(seqResources.size() - 1), true);
+
+    sourceData = readSourceFiles(timeseriesPaths, Collections.emptyList());
+
+    // sort the deviceId in lexicographical order from small to large
+    List<String> deviceIds = new ArrayList<>();
+    for (int i = 0; i < 30; i++) {
+      deviceIds.add("root.testsg.d" + (i + TsFileGeneratorUtils.getAlignDeviceOffset()));
+    }
+    deviceIds.sort(String::compareTo);
+
+    int deviceNum = 0;
+    try (MultiTsFileDeviceIterator multiTsFileDeviceIterator =
+        new MultiTsFileDeviceIterator(tsFileManager.getTsFileList(true))) {
+      while (multiTsFileDeviceIterator.hasNextDevice()) {
+        Pair<String, Boolean> deviceInfo = multiTsFileDeviceIterator.nextDevice();
+        Assert.assertEquals(deviceIds.get(deviceNum), deviceInfo.left);
+        Assert.assertFalse(deviceInfo.right);
+        deviceNum++;
+      }
+    }
+    Assert.assertEquals(30, deviceNum);
+    TsFileGeneratorUtils.alignDeviceOffset = oldAlignedDeviceOffset;
+    List<TsFileResource> targetResources =
+        CompactionFileGeneratorUtils.getInnerCompactionTargetTsFileResources(
+            tsFileManager.getTsFileList(true), true);
+    ReadPointCompactionPerformer performer =
+        new ReadPointCompactionPerformer(
+            tsFileManager.getTsFileList(true), Collections.emptyList(), targetResources);
+    performer.setSummary(new CompactionTaskSummary());
+    performer.perform();
+
+    CompactionUtils.moveTargetFile(targetResources, true, COMPACTION_TEST_SG);
+    tsFileManager.replace(
+        tsFileManager.getTsFileList(true), Collections.emptyList(), targetResources, 0, true);
+    tsFileManager.getTsFileList(true).get(0).setStatus(TsFileResourceStatus.CLOSED);
+
+    validateSeqFiles(true);
+    validateTargetDatas(sourceData, Collections.emptyList());
+  }
+
+  /**
+   * Create device with aligned property. Deleted it and create new device with same deviceID but
+   * nonAligned property. Compact it. Then deleted it and create new device with same deviceID but
+   * aligned property. Check whether the deviceID and its property can be obtained correctly.
+   */
+  @Test
+  public void getDeletedDevicesWithSameNameFromSeqFilesByReadPointPerformer2() throws Exception {
+    TSFileDescriptor.getInstance().getConfig().setMaxDegreeOfIndexNode(3);
+    int oldAlignedDeviceOffset = TsFileGeneratorUtils.alignDeviceOffset;
+    TsFileGeneratorUtils.alignDeviceOffset = 0;
+    registerTimeseriesInMManger(30, 5, true);
+    createFiles(3, 10, 5, 100, 0, 0, 50, 50, true, true);
+    createFiles(4, 30, 5, 100, 1000, 0, 50, 50, true, true);
+
+    // generate mods file, delete d0 ~ d9 with nonAligned property
+    List<String> seriesPaths = new ArrayList<>();
+    for (int i = 0; i < 10; i++) {
+      for (int j = 0; j < 5; j++) {
+        seriesPaths.add(COMPACTION_TEST_SG + PATH_SEPARATOR + "d" + i + PATH_SEPARATOR + "s" + j);
+      }
+    }
+    generateModsFile(seriesPaths, seqResources, Long.MIN_VALUE, Long.MAX_VALUE);
+    generateModsFile(seriesPaths, unseqResources, Long.MIN_VALUE, Long.MAX_VALUE);
+    deleteTimeseriesInMManager(seriesPaths);
+
+    // generate d0 ~ d9 with aligned property
+    createFiles(2, 10, 15, 100, 2000, 2000, 50, 50, false, true);
+    tsFileManager.addAll(seqResources, true);
+
+    List<PartialPath> timeseriesPaths = new ArrayList<>();
+    for (int i = 0; i < 30; i++) {
+      for (int j = 0; j < 15; j++) {
+        if (i < 10) {
+          timeseriesPaths.add(
+              new AlignedPath(
+                  COMPACTION_TEST_SG + PATH_SEPARATOR + "d" + i,
+                  Collections.singletonList("s" + j),
+                  Collections.singletonList(new MeasurementSchema("s" + j, TSDataType.INT64))));
+        } else {
+          timeseriesPaths.add(
+              new MeasurementPath(
+                  COMPACTION_TEST_SG + PATH_SEPARATOR + "d" + i + PATH_SEPARATOR + "s" + j,
+                  TSDataType.INT64));
+        }
+      }
+    }
+    Map<PartialPath, List<TimeValuePair>> sourceData =
+        readSourceFiles(timeseriesPaths, Collections.emptyList());
+
+    InnerSpaceCompactionTask task =
+        new InnerSpaceCompactionTask(
+            0,
+            tsFileManager,
+            tsFileManager.getTsFileList(true),
+            true,
+            new ReadPointCompactionPerformer(),
+            new AtomicInteger(),
+            0L);
+    task.start();
+
+    validateSeqFiles(true);
+    validateTargetDatas(sourceData, Collections.emptyList());
+
+    // generate mods file, delete d0 ~ d9 with aligned property
+    seriesPaths.clear();
+    for (int i = 0; i < 10; i++) {
+      for (int j = 0; j < 15; j++) {
+        seriesPaths.add(COMPACTION_TEST_SG + PATH_SEPARATOR + "d" + i + PATH_SEPARATOR + "s" + j);
+      }
+    }
+    generateModsFile(
+        seriesPaths, tsFileManager.getTsFileList(true), Long.MIN_VALUE, Long.MAX_VALUE);
+
+    deleteTimeseriesInMManager(seriesPaths);
+
+    // generate mods file, delete d0 ~ d9 with nonAligned property
+    createFiles(1, 10, 5, 100, 2000, 2000, 50, 50, true, true);
+    tsFileManager.add(seqResources.get(seqResources.size() - 1), true);
+
+    sourceData = readSourceFiles(timeseriesPaths, Collections.emptyList());
+
+    // sort the deviceId in lexicographical order from small to large
+    List<String> deviceIds = new ArrayList<>();
+    for (int i = 0; i < 30; i++) {
+      deviceIds.add("root.testsg.d" + (i + TsFileGeneratorUtils.getAlignDeviceOffset()));
+    }
+    deviceIds.sort(String::compareTo);
+
+    int deviceNum = 0;
+    try (MultiTsFileDeviceIterator multiTsFileDeviceIterator =
+        new MultiTsFileDeviceIterator(tsFileManager.getTsFileList(true))) {
+      while (multiTsFileDeviceIterator.hasNextDevice()) {
+        Pair<String, Boolean> deviceInfo = multiTsFileDeviceIterator.nextDevice();
+        Assert.assertEquals(deviceIds.get(deviceNum), deviceInfo.left);
+        Assert.assertTrue(deviceInfo.right);
+        deviceNum++;
+      }
+    }
+    Assert.assertEquals(30, deviceNum);
+    TsFileGeneratorUtils.alignDeviceOffset = oldAlignedDeviceOffset;
+    List<TsFileResource> targetResources =
+        CompactionFileGeneratorUtils.getInnerCompactionTargetTsFileResources(
+            tsFileManager.getTsFileList(true), true);
+    ReadPointCompactionPerformer performer =
+        new ReadPointCompactionPerformer(
+            tsFileManager.getTsFileList(true), Collections.emptyList(), targetResources);
+    performer.setSummary(new CompactionTaskSummary());
+    performer.perform();
+
+    CompactionUtils.moveTargetFile(targetResources, true, COMPACTION_TEST_SG);
+    tsFileManager.replace(
+        tsFileManager.getTsFileList(true), Collections.emptyList(), targetResources, 0, true);
+    tsFileManager.getTsFileList(true).get(0).setStatus(TsFileResourceStatus.CLOSED);
+
+    validateSeqFiles(true);
+    validateTargetDatas(sourceData, Collections.emptyList());
+  }
+
+  /**
+   * Create device with nonAligned property. Deleted it and create new device with same deviceID but
+   * aligned property. Compact it. Then deleted it and create new device with same deviceID but
+   * nonAligned property. Check whether the deviceID and its property can be obtained correctly.
+   */
+  @Test
+  public void getDeletedDevicesWithSameNameFromSeqFilesByFastPerformer()
+      throws MetadataException, IOException, WriteProcessException, StorageEngineException,
+          InterruptedException {
+    TSFileDescriptor.getInstance().getConfig().setMaxDegreeOfIndexNode(3);
+    int oldAlignedDeviceOffset = TsFileGeneratorUtils.alignDeviceOffset;
+    TsFileGeneratorUtils.alignDeviceOffset = 0;
+    registerTimeseriesInMManger(30, 5, false);
+    createFiles(3, 10, 5, 100, 0, 0, 50, 50, false, true);
+    createFiles(4, 30, 5, 100, 1000, 0, 50, 50, false, true);
+
+    // generate mods file, delete d0 ~ d9 with nonAligned property
+    List<String> seriesPaths = new ArrayList<>();
+    for (int i = 0; i < 10; i++) {
+      for (int j = 0; j < 5; j++) {
+        seriesPaths.add(COMPACTION_TEST_SG + PATH_SEPARATOR + "d" + i + PATH_SEPARATOR + "s" + j);
+      }
+    }
+    generateModsFile(seriesPaths, seqResources, Long.MIN_VALUE, Long.MAX_VALUE);
+    generateModsFile(seriesPaths, unseqResources, Long.MIN_VALUE, Long.MAX_VALUE);
+    deleteTimeseriesInMManager(seriesPaths);
+
+    // generate d0 ~ d9 with aligned property
+    createFiles(2, 10, 15, 100, 2000, 2000, 50, 50, true, true);
+    tsFileManager.addAll(seqResources, true);
+
+    List<PartialPath> timeseriesPaths = new ArrayList<>();
+    for (int i = 0; i < 30; i++) {
+      for (int j = 0; j < 15; j++) {
+        if (i < 10) {
+          timeseriesPaths.add(
+              new AlignedPath(
+                  COMPACTION_TEST_SG + PATH_SEPARATOR + "d" + i,
+                  Collections.singletonList("s" + j),
+                  Collections.singletonList(new MeasurementSchema("s" + j, TSDataType.INT64))));
+        } else {
+          timeseriesPaths.add(
+              new MeasurementPath(
+                  COMPACTION_TEST_SG + PATH_SEPARATOR + "d" + i + PATH_SEPARATOR + "s" + j,
+                  TSDataType.INT64));
+        }
+      }
+    }
+    Map<PartialPath, List<TimeValuePair>> sourceData =
+        readSourceFiles(timeseriesPaths, Collections.emptyList());
+
+    InnerSpaceCompactionTask task =
+        new InnerSpaceCompactionTask(
+            0,
+            tsFileManager,
+            tsFileManager.getTsFileList(true),
+            true,
+            new FastCompactionPerformer(false),
+            new AtomicInteger(),
+            0L);
+    task.start();
+
+    validateSeqFiles(true);
+    validateTargetDatas(sourceData, Collections.emptyList());
+
+    // generate mods file, delete d0 ~ d9 with aligned property
+    seriesPaths.clear();
+    for (int i = 0; i < 10; i++) {
+      for (int j = 0; j < 15; j++) {
+        seriesPaths.add(COMPACTION_TEST_SG + PATH_SEPARATOR + "d" + i + PATH_SEPARATOR + "s" + j);
+      }
+    }
+    generateModsFile(
+        seriesPaths, tsFileManager.getTsFileList(true), Long.MIN_VALUE, Long.MAX_VALUE);
+
+    deleteTimeseriesInMManager(seriesPaths);
+
+    // generate mods file, delete d0 ~ d9 with nonAligned property
+    createFiles(1, 10, 5, 100, 2000, 2000, 50, 50, false, true);
+    tsFileManager.add(seqResources.get(seqResources.size() - 1), true);
+
+    sourceData = readSourceFiles(timeseriesPaths, Collections.emptyList());
+
+    // sort the deviceId in lexicographical order from small to large
+    List<String> deviceIds = new ArrayList<>();
+    for (int i = 0; i < 30; i++) {
+      deviceIds.add("root.testsg.d" + (i + TsFileGeneratorUtils.getAlignDeviceOffset()));
+    }
+    deviceIds.sort(String::compareTo);
+
+    int deviceNum = 0;
+    try (MultiTsFileDeviceIterator multiTsFileDeviceIterator =
+        new MultiTsFileDeviceIterator(tsFileManager.getTsFileList(true))) {
+      while (multiTsFileDeviceIterator.hasNextDevice()) {
+        Pair<String, Boolean> deviceInfo = multiTsFileDeviceIterator.nextDevice();
+        Assert.assertEquals(deviceIds.get(deviceNum), deviceInfo.left);
+        Assert.assertFalse(deviceInfo.right);
+        deviceNum++;
+      }
+    }
+    Assert.assertEquals(30, deviceNum);
+    TsFileGeneratorUtils.alignDeviceOffset = oldAlignedDeviceOffset;
+    List<TsFileResource> targetResources =
+        CompactionFileGeneratorUtils.getInnerCompactionTargetTsFileResources(
+            tsFileManager.getTsFileList(true), true);
+    FastCompactionPerformer performer =
+        new FastCompactionPerformer(
+            tsFileManager.getTsFileList(true), Collections.emptyList(), targetResources);
+    performer.setSummary(new FastCompactionTaskSummary());
+    performer.perform();
+
+    CompactionUtils.moveTargetFile(targetResources, true, COMPACTION_TEST_SG);
+    tsFileManager.replace(
+        tsFileManager.getTsFileList(true), Collections.emptyList(), targetResources, 0, true);
+    tsFileManager.getTsFileList(true).get(0).setStatus(TsFileResourceStatus.CLOSED);
+
+    validateSeqFiles(true);
+    validateTargetDatas(sourceData, Collections.emptyList());
+  }
+
+  /**
+   * Create device with aligned property. Deleted it and create new device with same deviceID but
+   * nonAligned property. Compact it. Then deleted it and create new device with same deviceID but
+   * aligned property. Check whether the deviceID and its property can be obtained correctly.
+   */
+  @Test
+  public void getDeletedDevicesWithSameNameFromSeqFilesByFastPerformer2()
+      throws MetadataException, IOException, WriteProcessException, StorageEngineException,
+          InterruptedException {
+    TSFileDescriptor.getInstance().getConfig().setMaxDegreeOfIndexNode(3);
+    int oldAlignedDeviceOffset = TsFileGeneratorUtils.alignDeviceOffset;
+    TsFileGeneratorUtils.alignDeviceOffset = 0;
+    registerTimeseriesInMManger(30, 5, true);
+    createFiles(3, 10, 5, 100, 0, 0, 50, 50, true, true);
+    createFiles(4, 30, 5, 100, 1000, 0, 50, 50, true, true);
+
+    // generate mods file, delete d0 ~ d9 with nonAligned property
+    List<String> seriesPaths = new ArrayList<>();
+    for (int i = 0; i < 10; i++) {
+      for (int j = 0; j < 5; j++) {
+        seriesPaths.add(COMPACTION_TEST_SG + PATH_SEPARATOR + "d" + i + PATH_SEPARATOR + "s" + j);
+      }
+    }
+    generateModsFile(seriesPaths, seqResources, Long.MIN_VALUE, Long.MAX_VALUE);
+    generateModsFile(seriesPaths, unseqResources, Long.MIN_VALUE, Long.MAX_VALUE);
+    deleteTimeseriesInMManager(seriesPaths);
+
+    // generate d0 ~ d9 with aligned property
+    createFiles(2, 10, 15, 100, 2000, 2000, 50, 50, false, true);
+    tsFileManager.addAll(seqResources, true);
+
+    List<PartialPath> timeseriesPaths = new ArrayList<>();
+    for (int i = 0; i < 30; i++) {
+      for (int j = 0; j < 15; j++) {
+        if (i < 10) {
+          timeseriesPaths.add(
+              new AlignedPath(
+                  COMPACTION_TEST_SG + PATH_SEPARATOR + "d" + i,
+                  Collections.singletonList("s" + j),
+                  Collections.singletonList(new MeasurementSchema("s" + j, TSDataType.INT64))));
+        } else {
+          timeseriesPaths.add(
+              new MeasurementPath(
+                  COMPACTION_TEST_SG + PATH_SEPARATOR + "d" + i + PATH_SEPARATOR + "s" + j,
+                  TSDataType.INT64));
+        }
+      }
+    }
+    Map<PartialPath, List<TimeValuePair>> sourceData =
+        readSourceFiles(timeseriesPaths, Collections.emptyList());
+
+    InnerSpaceCompactionTask task =
+        new InnerSpaceCompactionTask(
+            0,
+            tsFileManager,
+            tsFileManager.getTsFileList(true),
+            true,
+            new FastCompactionPerformer(false),
+            new AtomicInteger(),
+            0L);
+    task.start();
+
+    validateSeqFiles(true);
+    validateTargetDatas(sourceData, Collections.emptyList());
+
+    // generate mods file, delete d0 ~ d9 with aligned property
+    seriesPaths.clear();
+    for (int i = 0; i < 10; i++) {
+      for (int j = 0; j < 15; j++) {
+        seriesPaths.add(COMPACTION_TEST_SG + PATH_SEPARATOR + "d" + i + PATH_SEPARATOR + "s" + j);
+      }
+    }
+    generateModsFile(
+        seriesPaths, tsFileManager.getTsFileList(true), Long.MIN_VALUE, Long.MAX_VALUE);
+
+    deleteTimeseriesInMManager(seriesPaths);
+
+    // generate mods file, delete d0 ~ d9 with nonAligned property
+    createFiles(1, 10, 5, 100, 2000, 2000, 50, 50, true, true);
+    tsFileManager.add(seqResources.get(seqResources.size() - 1), true);
+
+    sourceData = readSourceFiles(timeseriesPaths, Collections.emptyList());
+
+    // sort the deviceId in lexicographical order from small to large
+    List<String> deviceIds = new ArrayList<>();
+    for (int i = 0; i < 30; i++) {
+      deviceIds.add("root.testsg.d" + (i + TsFileGeneratorUtils.getAlignDeviceOffset()));
+    }
+    deviceIds.sort(String::compareTo);
+
+    int deviceNum = 0;
+    try (MultiTsFileDeviceIterator multiTsFileDeviceIterator =
+        new MultiTsFileDeviceIterator(tsFileManager.getTsFileList(true))) {
+      while (multiTsFileDeviceIterator.hasNextDevice()) {
+        Pair<String, Boolean> deviceInfo = multiTsFileDeviceIterator.nextDevice();
+        Assert.assertEquals(deviceIds.get(deviceNum), deviceInfo.left);
+        Assert.assertTrue(deviceInfo.right);
+        deviceNum++;
+      }
+    }
+    Assert.assertEquals(30, deviceNum);
+    TsFileGeneratorUtils.alignDeviceOffset = oldAlignedDeviceOffset;
+    List<TsFileResource> targetResources =
+        CompactionFileGeneratorUtils.getInnerCompactionTargetTsFileResources(
+            tsFileManager.getTsFileList(true), true);
+    FastCompactionPerformer performer =
+        new FastCompactionPerformer(
+            tsFileManager.getTsFileList(true), Collections.emptyList(), targetResources);
+    performer.setSummary(new FastCompactionTaskSummary());
+    performer.perform();
+
+    CompactionUtils.moveTargetFile(targetResources, true, COMPACTION_TEST_SG);
+    tsFileManager.replace(
+        tsFileManager.getTsFileList(true), Collections.emptyList(), targetResources, 0, true);
+    tsFileManager.getTsFileList(true).get(0).setStatus(TsFileResourceStatus.CLOSED);
+
+    validateSeqFiles(true);
+    validateTargetDatas(sourceData, Collections.emptyList());
+  }
 }


[iotdb] 01/06: [To rel/1.1][IOTDB-5580] Add limitation of time and tsBlock size to MergeSortOperator (#9220)

Posted by zy...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zyk pushed a commit to branch rc/1.1.0
in repository https://gitbox.apache.org/repos/asf/iotdb.git

commit d1d896b5b22c672c276e6324f2efb632530babd4
Author: YangCaiyin <yc...@gmail.com>
AuthorDate: Mon Mar 20 16:02:01 2023 +0800

    [To rel/1.1][IOTDB-5580] Add limitation of time and tsBlock size to MergeSortOperator (#9220)
---
 .../operator/process/MergeSortOperator.java        |  9 ++
 .../execution/operator/MergeSortOperatorTest.java  | 96 ++++++++++++++--------
 2 files changed, 72 insertions(+), 33 deletions(-)

diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/MergeSortOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/MergeSortOperator.java
index a8f413acb6..0fdc3f2630 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/MergeSortOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/MergeSortOperator.java
@@ -34,6 +34,7 @@ import com.google.common.util.concurrent.ListenableFuture;
 import java.util.ArrayList;
 import java.util.Comparator;
 import java.util.List;
+import java.util.concurrent.TimeUnit;
 
 import static com.google.common.util.concurrent.Futures.successfulAsList;
 
@@ -83,6 +84,10 @@ public class MergeSortOperator extends AbstractConsumeAllOperator {
 
   @Override
   public TsBlock next() throws Exception {
+    // start stopwatch
+    long startTime = System.nanoTime();
+    long maxRuntime = operatorContext.getMaxRunTime().roundTo(TimeUnit.NANOSECONDS);
+
     // 1. fill consumed up TsBlock
     if (!prepareInput()) {
       return null;
@@ -130,6 +135,10 @@ public class MergeSortOperator extends AbstractConsumeAllOperator {
         mergeSortKey.rowIndex++;
         mergeSortHeap.push(mergeSortKey);
       }
+      // break if time is out or tsBlockBuilder is full
+      if (System.nanoTime() - startTime > maxRuntime || tsBlockBuilder.isFull()) {
+        break;
+      }
     }
     return tsBlockBuilder.build();
   }
diff --git a/server/src/test/java/org/apache/iotdb/db/mpp/execution/operator/MergeSortOperatorTest.java b/server/src/test/java/org/apache/iotdb/db/mpp/execution/operator/MergeSortOperatorTest.java
index 5826d2c4e7..f91bc21956 100644
--- a/server/src/test/java/org/apache/iotdb/db/mpp/execution/operator/MergeSortOperatorTest.java
+++ b/server/src/test/java/org/apache/iotdb/db/mpp/execution/operator/MergeSortOperatorTest.java
@@ -20,7 +20,6 @@ package org.apache.iotdb.db.mpp.execution.operator;
 
 import org.apache.iotdb.commons.concurrent.IoTDBThreadPoolFactory;
 import org.apache.iotdb.commons.exception.IllegalPathException;
-import org.apache.iotdb.commons.exception.IoTDBException;
 import org.apache.iotdb.commons.exception.MetadataException;
 import org.apache.iotdb.commons.path.MeasurementPath;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
@@ -175,6 +174,7 @@ public class MergeSortOperatorTest {
           9, new PlanNodeId("9"), RowBasedTimeJoinOperator.class.getSimpleName());
       driverContext.addOperatorContext(
           10, new PlanNodeId("10"), SingleDeviceViewOperator.class.getSimpleName());
+
       driverContext.addOperatorContext(
           11, new PlanNodeId("11"), MergeSortOperator.class.getSimpleName());
 
@@ -318,17 +318,22 @@ public class MergeSortOperatorTest {
               Arrays.asList(4, 5),
               tsDataTypes);
 
-      return new MergeSortOperator(
-          driverContext.getOperatorContexts().get(10),
-          Arrays.asList(
-              singleDeviceViewOperator1, singleDeviceViewOperator2, singleDeviceViewOperator3),
-          tsDataTypes,
-          MergeSortComparator.getComparator(
+      MergeSortOperator mergeSortOperator =
+          new MergeSortOperator(
+              driverContext.getOperatorContexts().get(10),
               Arrays.asList(
-                  new SortItem(SortKey.TIME, timeOrdering),
-                  new SortItem(SortKey.DEVICE, deviceOrdering)),
-              null,
-              null));
+                  singleDeviceViewOperator1, singleDeviceViewOperator2, singleDeviceViewOperator3),
+              tsDataTypes,
+              MergeSortComparator.getComparator(
+                  Arrays.asList(
+                      new SortItem(SortKey.TIME, timeOrdering),
+                      new SortItem(SortKey.DEVICE, deviceOrdering)),
+                  null,
+                  null));
+      mergeSortOperator
+          .getOperatorContext()
+          .setMaxRunTime(new Duration(500, TimeUnit.MILLISECONDS));
+      return mergeSortOperator;
     } catch (IllegalPathException e) {
       e.printStackTrace();
       fail();
@@ -344,6 +349,7 @@ public class MergeSortOperatorTest {
     int count = 0;
     while (mergeSortOperator.isBlocked().isDone() && mergeSortOperator.hasNext()) {
       TsBlock tsBlock = mergeSortOperator.next();
+      if (tsBlock == null) continue;
       assertEquals(6, tsBlock.getValueColumnCount());
       count += tsBlock.getPositionCount();
       for (int i = 0; i < tsBlock.getPositionCount(); i++) {
@@ -391,6 +397,7 @@ public class MergeSortOperatorTest {
     int count = 0;
     while (mergeSortOperator.isBlocked().isDone() && mergeSortOperator.hasNext()) {
       TsBlock tsBlock = mergeSortOperator.next();
+      if (tsBlock == null) continue;
       assertEquals(6, tsBlock.getValueColumnCount());
       count += tsBlock.getPositionCount();
       for (int i = 0; i < tsBlock.getPositionCount(); i++) {
@@ -438,6 +445,7 @@ public class MergeSortOperatorTest {
     int count = 0;
     while (mergeSortOperator.isBlocked().isDone() && mergeSortOperator.hasNext()) {
       TsBlock tsBlock = mergeSortOperator.next();
+      if (tsBlock == null) continue;
       assertEquals(6, tsBlock.getValueColumnCount());
       count += tsBlock.getPositionCount();
       for (int i = 0; i < tsBlock.getPositionCount(); i++) {
@@ -485,6 +493,7 @@ public class MergeSortOperatorTest {
     int count = 0;
     while (mergeSortOperator.isBlocked().isDone() && mergeSortOperator.hasNext()) {
       TsBlock tsBlock = mergeSortOperator.next();
+      if (tsBlock == null) continue;
       assertEquals(6, tsBlock.getValueColumnCount());
       count += tsBlock.getPositionCount();
       for (int i = 0; i < tsBlock.getPositionCount(); i++) {
@@ -789,6 +798,9 @@ public class MergeSortOperatorTest {
                       new SortItem(SortKey.DEVICE, deviceOrdering)),
                   null,
                   null));
+      mergeSortOperator1
+          .getOperatorContext()
+          .setMaxRunTime(new Duration(500, TimeUnit.MILLISECONDS));
       MergeSortOperator mergeSortOperator2 =
           new MergeSortOperator(
               driverContext.getOperatorContexts().get(15),
@@ -800,17 +812,25 @@ public class MergeSortOperatorTest {
                       new SortItem(SortKey.DEVICE, deviceOrdering)),
                   null,
                   null));
+      mergeSortOperator2
+          .getOperatorContext()
+          .setMaxRunTime(new Duration(500, TimeUnit.MILLISECONDS));
 
-      return new MergeSortOperator(
-          driverContext.getOperatorContexts().get(16),
-          Arrays.asList(mergeSortOperator1, mergeSortOperator2),
-          tsDataTypes,
-          MergeSortComparator.getComparator(
-              Arrays.asList(
-                  new SortItem(SortKey.TIME, timeOrdering),
-                  new SortItem(SortKey.DEVICE, deviceOrdering)),
-              null,
-              null));
+      MergeSortOperator mergeSortOperator =
+          new MergeSortOperator(
+              driverContext.getOperatorContexts().get(16),
+              Arrays.asList(mergeSortOperator1, mergeSortOperator2),
+              tsDataTypes,
+              MergeSortComparator.getComparator(
+                  Arrays.asList(
+                      new SortItem(SortKey.TIME, timeOrdering),
+                      new SortItem(SortKey.DEVICE, deviceOrdering)),
+                  null,
+                  null));
+      mergeSortOperator
+          .getOperatorContext()
+          .setMaxRunTime(new Duration(500, TimeUnit.MILLISECONDS));
+      return mergeSortOperator;
     } catch (IllegalPathException e) {
       e.printStackTrace();
       fail();
@@ -826,6 +846,7 @@ public class MergeSortOperatorTest {
     int count = 0;
     while (mergeSortOperator.isBlocked().isDone() && mergeSortOperator.hasNext()) {
       TsBlock tsBlock = mergeSortOperator.next();
+      if (tsBlock == null) continue;
       assertEquals(3, tsBlock.getValueColumnCount());
       count += tsBlock.getPositionCount();
       for (int i = 0; i < tsBlock.getPositionCount(); i++) {
@@ -870,6 +891,7 @@ public class MergeSortOperatorTest {
 
     while (mergeSortOperator.isBlocked().isDone() && mergeSortOperator.hasNext()) {
       TsBlock tsBlock = mergeSortOperator.next();
+      if (tsBlock == null) continue;
       assertEquals(3, tsBlock.getValueColumnCount());
       count += tsBlock.getPositionCount();
       for (int i = 0; i < tsBlock.getPositionCount(); i++) {
@@ -913,6 +935,7 @@ public class MergeSortOperatorTest {
     int count = 0;
     while (mergeSortOperator.isBlocked().isDone() && mergeSortOperator.hasNext()) {
       TsBlock tsBlock = mergeSortOperator.next();
+      if (tsBlock == null) continue;
       assertEquals(3, tsBlock.getValueColumnCount());
       count += tsBlock.getPositionCount();
       for (int i = 0; i < tsBlock.getPositionCount(); i++) {
@@ -956,6 +979,7 @@ public class MergeSortOperatorTest {
     int count = 0;
     while (mergeSortOperator.isBlocked().isDone() && mergeSortOperator.hasNext()) {
       TsBlock tsBlock = mergeSortOperator.next();
+      if (tsBlock == null) continue;
       assertEquals(3, tsBlock.getValueColumnCount());
       count += tsBlock.getPositionCount();
       for (int i = 0; i < tsBlock.getPositionCount(); i++) {
@@ -1236,16 +1260,21 @@ public class MergeSortOperatorTest {
                   : Arrays.asList(timeJoinOperator3, timeJoinOperator2),
               deviceColumnIndex,
               tsDataTypes);
-      return new MergeSortOperator(
-          driverContext.getOperatorContexts().get(12),
-          Arrays.asList(deviceViewOperator1, deviceViewOperator2),
-          tsDataTypes,
-          MergeSortComparator.getComparator(
-              Arrays.asList(
-                  new SortItem(SortKey.DEVICE, deviceOrdering),
-                  new SortItem(SortKey.TIME, timeOrdering)),
-              null,
-              null));
+      MergeSortOperator mergeSortOperator =
+          new MergeSortOperator(
+              driverContext.getOperatorContexts().get(12),
+              Arrays.asList(deviceViewOperator1, deviceViewOperator2),
+              tsDataTypes,
+              MergeSortComparator.getComparator(
+                  Arrays.asList(
+                      new SortItem(SortKey.DEVICE, deviceOrdering),
+                      new SortItem(SortKey.TIME, timeOrdering)),
+                  null,
+                  null));
+      mergeSortOperator
+          .getOperatorContext()
+          .setMaxRunTime(new Duration(500, TimeUnit.MILLISECONDS));
+      return mergeSortOperator;
     } catch (IllegalPathException e) {
       e.printStackTrace();
       fail();
@@ -1541,6 +1570,7 @@ public class MergeSortOperatorTest {
               ImmutableList.of(sortOperator1, sortOperator2),
               dataTypes,
               comparator);
+      root.getOperatorContext().setMaxRunTime(new Duration(500, TimeUnit.MILLISECONDS));
 
       int index = 0;
       // Time ASC
@@ -1635,12 +1665,12 @@ public class MergeSortOperatorTest {
     }
 
     @Override
-    public Optional<TsBlock> getBatchResult() throws IoTDBException {
+    public Optional<TsBlock> getBatchResult() {
       return Optional.empty();
     }
 
     @Override
-    public Optional<ByteBuffer> getByteBufferBatchResult() throws IoTDBException {
+    public Optional<ByteBuffer> getByteBufferBatchResult() {
       return Optional.empty();
     }