You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by ha...@apache.org on 2022/11/06 05:13:53 UTC

[iotdb] branch master updated: Adjust configuration files (#7909)

This is an automated email from the ASF dual-hosted git repository.

haonan pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/iotdb.git


The following commit(s) were added to refs/heads/master by this push:
     new abbb45b891 Adjust configuration files (#7909)
abbb45b891 is described below

commit abbb45b891dbbd3ca8c30a96fa5b858156117f1b
Author: Jialin Qiao <qj...@mails.tsinghua.edu.cn>
AuthorDate: Sun Nov 6 13:13:47 2022 +0800

    Adjust configuration files (#7909)
---
 .../resources/conf/iotdb-confignode.properties     |   41 +-
 .../iotdb/confignode/conf/ConfigNodeConfig.java    |  167 ++-
 .../confignode/conf/ConfigNodeDescriptor.java      |   91 +-
 .../confignode/conf/ConfigNodeStartupCheck.java    |    6 +-
 .../statemachine/PartitionRegionStateMachine.java  |    3 +-
 .../iotdb/confignode/manager/ConsensusManager.java |   25 +-
 .../iotdb/confignode/manager/ProcedureManager.java |    4 +-
 .../iotdb/confignode/manager/UDFManager.java       |    4 +-
 .../iotdb/confignode/manager/node/NodeManager.java |    2 +-
 .../confignode1conf/iotdb-common.properties        |    2 +-
 .../confignode1conf/iotdb-confignode.properties    |    2 +-
 .../confignode2conf/iotdb-common.properties        |    2 +-
 .../confignode2conf/iotdb-confignode.properties    |    2 +-
 .../confignode3conf/iotdb-common.properties        |    2 +-
 .../confignode3conf/iotdb-confignode.properties    |    2 +-
 docker/src/main/Dockerfile-1c1d                    |    3 +
 docker/src/main/Dockerfile-1c1d-influxdb           |    5 +-
 docs/UserGuide/Process-Data/Triggers.md            |    2 +-
 docs/zh/UserGuide/Process-Data/Triggers.md         |    2 +-
 integration-test/README.md                         |    2 +-
 .../org/apache/iotdb/it/env/ConfigNodeWrapper.java |    2 +-
 .../org/apache/iotdb/it/env/DataNodeWrapper.java   |    2 +-
 .../java/org/apache/iotdb/it/env/MppConfig.java    |    2 +-
 .../resources/conf/iotdb-common.properties         | 1126 +++++++++-----------
 .../apache/iotdb/commons/conf/CommonConfig.java    |    2 +-
 .../apache/iotdb/commons/conf/IoTDBConstant.java   |    4 +-
 .../resources/conf/iotdb-datanode.properties       |   47 +-
 .../java/org/apache/iotdb/db/conf/IoTDBConfig.java |    6 +-
 .../org/apache/iotdb/db/conf/IoTDBDescriptor.java  |    2 +-
 .../java/org/apache/iotdb/db/service/DataNode.java |    2 +-
 .../impl/DataNodeInternalRPCServiceImpl.java       |    4 +-
 .../datanode1conf/iotdb-datanode.properties        |    2 +-
 .../datanode2conf/iotdb-datanode.properties        |    2 +-
 .../datanode3conf/iotdb-datanode.properties        |    2 +-
 34 files changed, 736 insertions(+), 838 deletions(-)

diff --git a/confignode/src/assembly/resources/conf/iotdb-confignode.properties b/confignode/src/assembly/resources/conf/iotdb-confignode.properties
index 17c91681b8..a1919a054f 100644
--- a/confignode/src/assembly/resources/conf/iotdb-confignode.properties
+++ b/confignode/src/assembly/resources/conf/iotdb-confignode.properties
@@ -21,19 +21,16 @@
 ### Config Node RPC Configuration
 ####################
 
-
-# Used for cluster internal RPC communication.
-# Could set 0.0.0.0, 127.0.0.1(for local test) or ipv4 address.
+# Used for RPC communication inside cluster.
+# Could set 127.0.0.1(for local test) or ipv4 address.
 # Datatype: String
-cn_internal_address=0.0.0.0
-
+cn_internal_address=127.0.0.1
 
-# Used for cluster internal RPC communication
+# Used for RPC communication inside cluster.
 # Datatype: int
 cn_internal_port=22277
 
-
-# Used for ConfigNodeGroup's ConsensusLayer interior communication
+# Used for consensus communication among ConfigNodes inside cluster.
 # Datatype: int
 cn_consensus_port=22278
 
@@ -41,22 +38,16 @@ cn_consensus_port=22278
 ### Target Config Nodes
 ####################
 
-# At least one running ConfigNode should be set for joining the cluster
-# Format: ip:port
-# where the ip should be consistent with the target ConfigNode's confignode_internal_address,
-# and the port should be consistent with the target ConfigNode's confignode_internal_port.
-# For the first ConfigNode to start, target_config_nodes points to its own internal_address:internal_port.
-# For other ConfigNodes that are started or restarted, target_config_nodes points to any running ConfigNode's internal_address:internal_port.
-# Notice: The ip for any target_config_node should never be 0.0.0.0
+# For the first ConfigNode to start, cn_target_config_node_list points to its own cn_internal_address:cn_internal_port.
+# For other ConfigNodes that to join the cluster, target_config_node_list points to any running ConfigNode's cn_internal_address:cn_internal_port.
+# Format: address:port(,address:port)*   e.g. 127.0.0.1:22277,127.0.0.1:22279
 # Datatype: String
-cn_target_config_nodes=127.0.0.1:22277
-
+cn_target_config_node_list=127.0.0.1:22277
 
 ####################
 ### Directory configuration
 ####################
 
-
 # system dir
 # If this property is unset, system will save the data in the default relative path directory under the confignode folder(i.e., %CONFIGNODE_HOME%/data/confignode/system).
 # If it is absolute, system will save the data in exact location it points to.
@@ -68,7 +59,6 @@ cn_target_config_nodes=127.0.0.1:22277
 # If its prefix is "/", then the path is absolute. Otherwise, it is relative.
 # cn_system_dir=data/confignode/system
 
-
 # consensus dir
 # If this property is unset, system will save the data in the default relative path directory under the confignode folder(i.e., %CONFIGNODE_HOME%/data/confignode/consensus).
 # If it is absolute, system will save the data in exact location it points to.
@@ -81,45 +71,34 @@ cn_target_config_nodes=127.0.0.1:22277
 # If its prefix is "/", then the path is absolute. Otherwise, it is relative.
 # cn_consensus_dir=data/confignode/consensus
 
-
-
 ####################
 ### thrift rpc configuration
 ####################
 
-
 # this feature is under development, set this as false before it is done.
 # Datatype: boolean
 # cn_rpc_thrift_compression_enable=false
 
-
 # if true, a snappy based compression method will be called before sending data by the network
 # Datatype: boolean
 # this feature is under development, set this as false before it is done.
 # cn_rpc_advanced_compression_enable=false
 
-
 # Datatype: int
 # cn_rpc_max_concurrent_client_num=65535
 
-
 # thrift max frame size, 512MB by default
 # Datatype: int
 # cn_thrift_max_frame_size=536870912
 
-
 # thrift init buffer size
 # Datatype: int
 # cn_thrift_init_buffer_size=1024
 
-
 # Thrift socket and connection timeout between raft nodes, in milliseconds.
 # Datatype: int
 # cn_connection_timeout_ms=20000
 
-
 # selector thread (TAsyncClientManager) nums for async thread in a clientManager
 # Datatype: int
-# cn_selector_thread_nums_of_client_manager=1
-
-
+# cn_selector_thread_nums_of_client_manager=1
\ No newline at end of file
diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConfig.java b/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConfig.java
index ff06c839e3..3990b02f48 100644
--- a/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConfig.java
+++ b/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConfig.java
@@ -35,7 +35,7 @@ public class ConfigNodeConfig {
   private volatile int configNodeId = -1;
 
   /** could set ip or hostname */
-  private String internalAddress = "0.0.0.0";
+  private String internalAddress = "127.0.0.1";
 
   /** used for communication between data node and config node */
   private int internalPort = 22277;
@@ -146,7 +146,7 @@ public class ConfigNodeConfig {
   /** RatisConsensus protocol, Max size for a single log append request from leader */
   private long dataRegionRatisConsensusLogAppenderBufferSize = 4 * 1024 * 1024L;
 
-  private long partitionRegionRatisConsensusLogAppenderBufferSize = 4 * 1024 * 1024L;
+  private long configNodeRatisConsensusLogAppenderBufferSize = 4 * 1024 * 1024L;
   private long schemaRegionRatisConsensusLogAppenderBufferSize = 4 * 1024 * 1024L;
 
   /**
@@ -155,39 +155,39 @@ public class ConfigNodeConfig {
    */
   private long dataRegionRatisSnapshotTriggerThreshold = 400000L;
 
-  private long partitionRegionRatisSnapshotTriggerThreshold = 400000L;
-  private long partitionRegionOneCopySnapshotTriggerThreshold = 400000L;
+  private long configNodeRatisSnapshotTriggerThreshold = 400000L;
+  private long configNodeSimpleConsensusSnapshotTriggerThreshold = 400000L;
   private long schemaRegionRatisSnapshotTriggerThreshold = 400000L;
 
   /** RatisConsensus protocol, allow flushing Raft Log asynchronously */
   private boolean dataRegionRatisLogUnsafeFlushEnable = false;
 
-  private boolean partitionRegionRatisLogUnsafeFlushEnable = false;
+  private boolean configNodeRatisLogUnsafeFlushEnable = false;
   private boolean schemaRegionRatisLogUnsafeFlushEnable = false;
 
   /** RatisConsensus protocol, max capacity of a single Raft Log segment */
   private long dataRegionRatisLogSegmentSizeMax = 24 * 1024 * 1024L;
 
-  private long partitionRegionRatisLogSegmentSizeMax = 24 * 1024 * 1024L;
+  private long configNodeRatisLogSegmentSizeMax = 24 * 1024 * 1024L;
   private long schemaRegionRatisLogSegmentSizeMax = 24 * 1024 * 1024L;
-  private long partitionRegionOneCopyLogSegmentSizeMax = 24 * 1024 * 1024L;
+  private long configNodeSimpleConsensusLogSegmentSizeMax = 24 * 1024 * 1024L;
 
   /** RatisConsensus protocol, flow control window for ratis grpc log appender */
   private long dataRegionRatisGrpcFlowControlWindow = 4 * 1024 * 1024L;
 
-  private long partitionRegionRatisGrpcFlowControlWindow = 4 * 1024 * 1024L;
+  private long configNodeRatisGrpcFlowControlWindow = 4 * 1024 * 1024L;
   private long schemaRegionRatisGrpcFlowControlWindow = 4 * 1024 * 1024L;
 
   /** RatisConsensus protocol, min election timeout for leader election */
   private long dataRegionRatisRpcLeaderElectionTimeoutMinMs = 2000L;
 
-  private long partitionRegionRatisRpcLeaderElectionTimeoutMinMs = 2000L;
+  private long configNodeRatisRpcLeaderElectionTimeoutMinMs = 2000L;
   private long schemaRegionRatisRpcLeaderElectionTimeoutMinMs = 2000L;
 
   /** RatisConsensus protocol, max election timeout for leader election */
   private long dataRegionRatisRpcLeaderElectionTimeoutMaxMs = 4000L;
 
-  private long partitionRegionRatisRpcLeaderElectionTimeoutMaxMs = 4000L;
+  private long configNodeRatisRpcLeaderElectionTimeoutMaxMs = 4000L;
   private long schemaRegionRatisRpcLeaderElectionTimeoutMaxMs = 4000L;
 
   /** CQ related */
@@ -198,14 +198,14 @@ public class ConfigNodeConfig {
   /** RatisConsensus protocol, request timeout for ratis client */
   private long dataRegionRatisRequestTimeoutMs = 10000L;
 
-  private long partitionRegionRatisRequestTimeoutMs = 10000L;
+  private long configNodeRatisRequestTimeoutMs = 10000L;
   private long schemaRegionRatisRequestTimeoutMs = 10000L;
 
   /** RatisConsensus protocol, exponential back-off retry policy params */
-  private int partitionRegionRatisMaxRetryAttempts = 10;
+  private int configNodeRatisMaxRetryAttempts = 10;
 
-  private long partitionRegionRatisInitialSleepTimeMs = 100;
-  private long partitionRegionRatisMaxSleepTimeMs = 10000;
+  private long configNodeRatisInitialSleepTimeMs = 100;
+  private long configNodeRatisMaxSleepTimeMs = 10000;
 
   private int dataRegionRatisMaxRetryAttempts = 10;
   private long dataRegionRatisInitialSleepTimeMs = 100;
@@ -215,7 +215,7 @@ public class ConfigNodeConfig {
   private long schemaRegionRatisInitialSleepTimeMs = 100;
   private long schemaRegionRatisMaxSleepTimeMs = 10000;
 
-  private long partitionRegionRatisPreserveLogsWhenPurge = 1000;
+  private long configNodeRatisPreserveLogsWhenPurge = 1000;
   private long schemaRegionRatisPreserveLogsWhenPurge = 1000;
   private long dataRegionRatisPreserveLogsWhenPurge = 1000;
 
@@ -600,80 +600,77 @@ public class ConfigNodeConfig {
         dataRegionRatisRpcLeaderElectionTimeoutMaxMs;
   }
 
-  public long getPartitionRegionRatisConsensusLogAppenderBufferSize() {
-    return partitionRegionRatisConsensusLogAppenderBufferSize;
+  public long getConfigNodeRatisConsensusLogAppenderBufferSize() {
+    return configNodeRatisConsensusLogAppenderBufferSize;
   }
 
-  public void setPartitionRegionRatisConsensusLogAppenderBufferSize(
-      long partitionRegionRatisConsensusLogAppenderBufferSize) {
-    this.partitionRegionRatisConsensusLogAppenderBufferSize =
-        partitionRegionRatisConsensusLogAppenderBufferSize;
+  public void setConfigNodeRatisConsensusLogAppenderBufferSize(
+      long configNodeRatisConsensusLogAppenderBufferSize) {
+    this.configNodeRatisConsensusLogAppenderBufferSize =
+        configNodeRatisConsensusLogAppenderBufferSize;
   }
 
-  public long getPartitionRegionRatisSnapshotTriggerThreshold() {
-    return partitionRegionRatisSnapshotTriggerThreshold;
+  public long getConfigNodeRatisSnapshotTriggerThreshold() {
+    return configNodeRatisSnapshotTriggerThreshold;
   }
 
-  public void setPartitionRegionRatisSnapshotTriggerThreshold(
-      long partitionRegionRatisSnapshotTriggerThreshold) {
-    this.partitionRegionRatisSnapshotTriggerThreshold =
-        partitionRegionRatisSnapshotTriggerThreshold;
+  public void setConfigNodeRatisSnapshotTriggerThreshold(
+      long configNodeRatisSnapshotTriggerThreshold) {
+    this.configNodeRatisSnapshotTriggerThreshold = configNodeRatisSnapshotTriggerThreshold;
   }
 
-  public long getPartitionRegionOneCopySnapshotTriggerThreshold() {
-    return partitionRegionOneCopySnapshotTriggerThreshold;
+  public long getConfigNodeSimpleConsensusSnapshotTriggerThreshold() {
+    return configNodeSimpleConsensusSnapshotTriggerThreshold;
   }
 
-  public void setPartitionRegionOneCopySnapshotTriggerThreshold(
-      long partitionRegionOneCopySnapshotTriggerThreshold) {
-    this.partitionRegionOneCopySnapshotTriggerThreshold =
-        partitionRegionOneCopySnapshotTriggerThreshold;
+  public void setConfigNodeSimpleConsensusSnapshotTriggerThreshold(
+      long configNodeSimpleConsensusSnapshotTriggerThreshold) {
+    this.configNodeSimpleConsensusSnapshotTriggerThreshold =
+        configNodeSimpleConsensusSnapshotTriggerThreshold;
   }
 
-  public boolean isPartitionRegionRatisLogUnsafeFlushEnable() {
-    return partitionRegionRatisLogUnsafeFlushEnable;
+  public boolean isConfigNodeRatisLogUnsafeFlushEnable() {
+    return configNodeRatisLogUnsafeFlushEnable;
   }
 
-  public void setPartitionRegionRatisLogUnsafeFlushEnable(
-      boolean partitionRegionRatisLogUnsafeFlushEnable) {
-    this.partitionRegionRatisLogUnsafeFlushEnable = partitionRegionRatisLogUnsafeFlushEnable;
+  public void setConfigNodeRatisLogUnsafeFlushEnable(boolean configNodeRatisLogUnsafeFlushEnable) {
+    this.configNodeRatisLogUnsafeFlushEnable = configNodeRatisLogUnsafeFlushEnable;
   }
 
-  public long getPartitionRegionRatisLogSegmentSizeMax() {
-    return partitionRegionRatisLogSegmentSizeMax;
+  public long getConfigNodeRatisLogSegmentSizeMax() {
+    return configNodeRatisLogSegmentSizeMax;
   }
 
-  public void setPartitionRegionRatisLogSegmentSizeMax(long partitionRegionRatisLogSegmentSizeMax) {
-    this.partitionRegionRatisLogSegmentSizeMax = partitionRegionRatisLogSegmentSizeMax;
+  public void setConfigNodeRatisLogSegmentSizeMax(long configNodeRatisLogSegmentSizeMax) {
+    this.configNodeRatisLogSegmentSizeMax = configNodeRatisLogSegmentSizeMax;
   }
 
-  public long getPartitionRegionRatisGrpcFlowControlWindow() {
-    return partitionRegionRatisGrpcFlowControlWindow;
+  public long getConfigNodeRatisGrpcFlowControlWindow() {
+    return configNodeRatisGrpcFlowControlWindow;
   }
 
-  public void setPartitionRegionRatisGrpcFlowControlWindow(
-      long partitionRegionRatisGrpcFlowControlWindow) {
-    this.partitionRegionRatisGrpcFlowControlWindow = partitionRegionRatisGrpcFlowControlWindow;
+  public void setConfigNodeRatisGrpcFlowControlWindow(long configNodeRatisGrpcFlowControlWindow) {
+    this.configNodeRatisGrpcFlowControlWindow = configNodeRatisGrpcFlowControlWindow;
   }
 
-  public long getPartitionRegionRatisRpcLeaderElectionTimeoutMinMs() {
-    return partitionRegionRatisRpcLeaderElectionTimeoutMinMs;
+  public long getConfigNodeRatisRpcLeaderElectionTimeoutMinMs() {
+    return configNodeRatisRpcLeaderElectionTimeoutMinMs;
   }
 
-  public void setPartitionRegionRatisRpcLeaderElectionTimeoutMinMs(
-      long partitionRegionRatisRpcLeaderElectionTimeoutMinMs) {
-    this.partitionRegionRatisRpcLeaderElectionTimeoutMinMs =
-        partitionRegionRatisRpcLeaderElectionTimeoutMinMs;
+  public void setConfigNodeRatisRpcLeaderElectionTimeoutMinMs(
+      long configNodeRatisRpcLeaderElectionTimeoutMinMs) {
+    this.configNodeRatisRpcLeaderElectionTimeoutMinMs =
+        configNodeRatisRpcLeaderElectionTimeoutMinMs;
   }
 
-  public long getPartitionRegionRatisRpcLeaderElectionTimeoutMaxMs() {
-    return partitionRegionRatisRpcLeaderElectionTimeoutMaxMs;
+  public long getConfigNodeRatisRpcLeaderElectionTimeoutMaxMs() {
+    return configNodeRatisRpcLeaderElectionTimeoutMaxMs;
   }
 
-  public void setPartitionRegionRatisRpcLeaderElectionTimeoutMaxMs(
-      long partitionRegionRatisRpcLeaderElectionTimeoutMaxMs) {
-    this.partitionRegionRatisRpcLeaderElectionTimeoutMaxMs =
-        partitionRegionRatisRpcLeaderElectionTimeoutMaxMs;
+  public void setConfigNodeRatisRpcLeaderElectionTimeoutMaxMs(
+      long configNodeRatisRpcLeaderElectionTimeoutMaxMs) {
+    this.configNodeRatisRpcLeaderElectionTimeoutMaxMs =
+        configNodeRatisRpcLeaderElectionTimeoutMaxMs;
   }
 
   public long getSchemaRegionRatisConsensusLogAppenderBufferSize() {
@@ -712,13 +709,13 @@ public class ConfigNodeConfig {
     this.schemaRegionRatisLogSegmentSizeMax = schemaRegionRatisLogSegmentSizeMax;
   }
 
-  public long getPartitionRegionOneCopyLogSegmentSizeMax() {
-    return partitionRegionOneCopyLogSegmentSizeMax;
+  public long getConfigNodeSimpleConsensusLogSegmentSizeMax() {
+    return configNodeSimpleConsensusLogSegmentSizeMax;
   }
 
-  public void setPartitionRegionOneCopyLogSegmentSizeMax(
-      long partitionRegionOneCopyLogSegmentSizeMax) {
-    this.partitionRegionOneCopyLogSegmentSizeMax = partitionRegionOneCopyLogSegmentSizeMax;
+  public void setConfigNodeSimpleConsensusLogSegmentSizeMax(
+      long configNodeSimpleConsensusLogSegmentSizeMax) {
+    this.configNodeSimpleConsensusLogSegmentSizeMax = configNodeSimpleConsensusLogSegmentSizeMax;
   }
 
   public long getSchemaRegionRatisGrpcFlowControlWindow() {
@@ -774,12 +771,12 @@ public class ConfigNodeConfig {
     this.dataRegionRatisRequestTimeoutMs = dataRegionRatisRequestTimeoutMs;
   }
 
-  public long getPartitionRegionRatisRequestTimeoutMs() {
-    return partitionRegionRatisRequestTimeoutMs;
+  public long getConfigNodeRatisRequestTimeoutMs() {
+    return configNodeRatisRequestTimeoutMs;
   }
 
-  public void setPartitionRegionRatisRequestTimeoutMs(long partitionRegionRatisRequestTimeoutMs) {
-    this.partitionRegionRatisRequestTimeoutMs = partitionRegionRatisRequestTimeoutMs;
+  public void setConfigNodeRatisRequestTimeoutMs(long configNodeRatisRequestTimeoutMs) {
+    this.configNodeRatisRequestTimeoutMs = configNodeRatisRequestTimeoutMs;
   }
 
   public long getSchemaRegionRatisRequestTimeoutMs() {
@@ -790,29 +787,28 @@ public class ConfigNodeConfig {
     this.schemaRegionRatisRequestTimeoutMs = schemaRegionRatisRequestTimeoutMs;
   }
 
-  public int getPartitionRegionRatisMaxRetryAttempts() {
-    return partitionRegionRatisMaxRetryAttempts;
+  public int getConfigNodeRatisMaxRetryAttempts() {
+    return configNodeRatisMaxRetryAttempts;
   }
 
-  public void setPartitionRegionRatisMaxRetryAttempts(int partitionRegionRatisMaxRetryAttempts) {
-    this.partitionRegionRatisMaxRetryAttempts = partitionRegionRatisMaxRetryAttempts;
+  public void setConfigNodeRatisMaxRetryAttempts(int configNodeRatisMaxRetryAttempts) {
+    this.configNodeRatisMaxRetryAttempts = configNodeRatisMaxRetryAttempts;
   }
 
-  public long getPartitionRegionRatisInitialSleepTimeMs() {
-    return partitionRegionRatisInitialSleepTimeMs;
+  public long getConfigNodeRatisInitialSleepTimeMs() {
+    return configNodeRatisInitialSleepTimeMs;
   }
 
-  public void setPartitionRegionRatisInitialSleepTimeMs(
-      long partitionRegionRatisInitialSleepTimeMs) {
-    this.partitionRegionRatisInitialSleepTimeMs = partitionRegionRatisInitialSleepTimeMs;
+  public void setConfigNodeRatisInitialSleepTimeMs(long configNodeRatisInitialSleepTimeMs) {
+    this.configNodeRatisInitialSleepTimeMs = configNodeRatisInitialSleepTimeMs;
   }
 
-  public long getPartitionRegionRatisMaxSleepTimeMs() {
-    return partitionRegionRatisMaxSleepTimeMs;
+  public long getConfigNodeRatisMaxSleepTimeMs() {
+    return configNodeRatisMaxSleepTimeMs;
   }
 
-  public void setPartitionRegionRatisMaxSleepTimeMs(long partitionRegionRatisMaxSleepTimeMs) {
-    this.partitionRegionRatisMaxSleepTimeMs = partitionRegionRatisMaxSleepTimeMs;
+  public void setConfigNodeRatisMaxSleepTimeMs(long configNodeRatisMaxSleepTimeMs) {
+    this.configNodeRatisMaxSleepTimeMs = configNodeRatisMaxSleepTimeMs;
   }
 
   public int getDataRegionRatisMaxRetryAttempts() {
@@ -863,13 +859,12 @@ public class ConfigNodeConfig {
     this.schemaRegionRatisMaxSleepTimeMs = schemaRegionRatisMaxSleepTimeMs;
   }
 
-  public long getPartitionRegionRatisPreserveLogsWhenPurge() {
-    return partitionRegionRatisPreserveLogsWhenPurge;
+  public long getConfigNodeRatisPreserveLogsWhenPurge() {
+    return configNodeRatisPreserveLogsWhenPurge;
   }
 
-  public void setPartitionRegionRatisPreserveLogsWhenPurge(
-      long partitionRegionRatisPreserveLogsWhenPurge) {
-    this.partitionRegionRatisPreserveLogsWhenPurge = partitionRegionRatisPreserveLogsWhenPurge;
+  public void setConfigNodeRatisPreserveLogsWhenPurge(long configNodeRatisPreserveLogsWhenPurge) {
+    this.configNodeRatisPreserveLogsWhenPurge = configNodeRatisPreserveLogsWhenPurge;
   }
 
   public long getSchemaRegionRatisPreserveLogsWhenPurge() {
diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeDescriptor.java b/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeDescriptor.java
index f31fb6cf42..db98d70fcf 100644
--- a/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeDescriptor.java
+++ b/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeDescriptor.java
@@ -153,8 +153,9 @@ public class ConfigNodeDescriptor {
             properties.getProperty(
                 IoTDBConstant.CN_CONSENSUS_PORT, String.valueOf(conf.getConsensusPort()))));
 
-    // TODO: Enable multiple target_config_nodes
-    String targetConfigNodes = properties.getProperty(IoTDBConstant.CN_TARGET_CONFIG_NODES, null);
+    // TODO: Enable multiple target_config_node_list
+    String targetConfigNodes =
+        properties.getProperty(IoTDBConstant.CN_TARGET_CONFIG_NODE_LIST, null);
     if (targetConfigNodes != null) {
       conf.setTargetConfigNode(NodeUrlUtils.parseTEndPointUrl(targetConfigNodes));
     }
@@ -310,11 +311,11 @@ public class ConfigNodeDescriptor {
                 "data_region_ratis_log_appender_buffer_size_max",
                 String.valueOf(conf.getDataRegionRatisConsensusLogAppenderBufferSize()))));
 
-    conf.setPartitionRegionRatisConsensusLogAppenderBufferSize(
+    conf.setConfigNodeRatisConsensusLogAppenderBufferSize(
         Long.parseLong(
             properties.getProperty(
-                "partition_region_ratis_log_appender_buffer_size_max",
-                String.valueOf(conf.getPartitionRegionRatisConsensusLogAppenderBufferSize()))));
+                "config_node_ratis_log_appender_buffer_size_max",
+                String.valueOf(conf.getConfigNodeRatisConsensusLogAppenderBufferSize()))));
 
     conf.setSchemaRegionRatisConsensusLogAppenderBufferSize(
         Long.parseLong(
@@ -328,17 +329,17 @@ public class ConfigNodeDescriptor {
                 "data_region_ratis_snapshot_trigger_threshold",
                 String.valueOf(conf.getDataRegionRatisSnapshotTriggerThreshold()))));
 
-    conf.setPartitionRegionRatisSnapshotTriggerThreshold(
+    conf.setConfigNodeRatisSnapshotTriggerThreshold(
         Long.parseLong(
             properties.getProperty(
-                "partition_region_ratis_snapshot_trigger_threshold",
-                String.valueOf(conf.getPartitionRegionRatisSnapshotTriggerThreshold()))));
+                "config_node_ratis_snapshot_trigger_threshold",
+                String.valueOf(conf.getConfigNodeRatisSnapshotTriggerThreshold()))));
 
-    conf.setPartitionRegionOneCopySnapshotTriggerThreshold(
+    conf.setConfigNodeSimpleConsensusSnapshotTriggerThreshold(
         Long.parseLong(
             properties.getProperty(
-                "partition_region_one_copy_snapshot_trigger_threshold",
-                String.valueOf(conf.getPartitionRegionOneCopySnapshotTriggerThreshold()))));
+                "config_node_simple_consensus_snapshot_trigger_threshold",
+                String.valueOf(conf.getConfigNodeSimpleConsensusSnapshotTriggerThreshold()))));
 
     conf.setSchemaRegionRatisSnapshotTriggerThreshold(
         Long.parseLong(
@@ -352,11 +353,11 @@ public class ConfigNodeDescriptor {
                 "data_region_ratis_log_unsafe_flush_enable",
                 String.valueOf(conf.isDataRegionRatisLogUnsafeFlushEnable()))));
 
-    conf.setPartitionRegionRatisLogUnsafeFlushEnable(
+    conf.setConfigNodeRatisLogUnsafeFlushEnable(
         Boolean.parseBoolean(
             properties.getProperty(
-                "partition_region_ratis_log_unsafe_flush_enable",
-                String.valueOf(conf.isPartitionRegionRatisLogUnsafeFlushEnable()))));
+                "config_node_ratis_log_unsafe_flush_enable",
+                String.valueOf(conf.isConfigNodeRatisLogUnsafeFlushEnable()))));
 
     conf.setSchemaRegionRatisLogUnsafeFlushEnable(
         Boolean.parseBoolean(
@@ -370,11 +371,11 @@ public class ConfigNodeDescriptor {
                 "data_region_ratis_log_segment_size_max_in_byte",
                 String.valueOf(conf.getDataRegionRatisLogSegmentSizeMax()))));
 
-    conf.setPartitionRegionRatisLogSegmentSizeMax(
+    conf.setConfigNodeRatisLogSegmentSizeMax(
         Long.parseLong(
             properties.getProperty(
-                "partition_region_ratis_log_segment_size_max_in_byte",
-                String.valueOf(conf.getPartitionRegionRatisLogSegmentSizeMax()))));
+                "config_node_ratis_log_segment_size_max_in_byte",
+                String.valueOf(conf.getConfigNodeRatisLogSegmentSizeMax()))));
 
     conf.setSchemaRegionRatisLogSegmentSizeMax(
         Long.parseLong(
@@ -382,11 +383,11 @@ public class ConfigNodeDescriptor {
                 "schema_region_ratis_log_segment_size_max_in_byte",
                 String.valueOf(conf.getSchemaRegionRatisLogSegmentSizeMax()))));
 
-    conf.setPartitionRegionOneCopyLogSegmentSizeMax(
+    conf.setConfigNodeSimpleConsensusLogSegmentSizeMax(
         Long.parseLong(
             properties.getProperty(
-                "partition_region_one_copy_log_segment_size_max_in_byte",
-                String.valueOf(conf.getPartitionRegionOneCopyLogSegmentSizeMax()))));
+                "config_node_simple_consensus_log_segment_size_max_in_byte",
+                String.valueOf(conf.getConfigNodeSimpleConsensusLogSegmentSizeMax()))));
 
     conf.setDataRegionRatisGrpcFlowControlWindow(
         Long.parseLong(
@@ -394,11 +395,11 @@ public class ConfigNodeDescriptor {
                 "data_region_ratis_grpc_flow_control_window",
                 String.valueOf(conf.getDataRegionRatisGrpcFlowControlWindow()))));
 
-    conf.setPartitionRegionRatisGrpcFlowControlWindow(
+    conf.setConfigNodeRatisGrpcFlowControlWindow(
         Long.parseLong(
             properties.getProperty(
-                "partition_region_ratis_grpc_flow_control_window",
-                String.valueOf(conf.getPartitionRegionRatisGrpcFlowControlWindow()))));
+                "config_node_ratis_grpc_flow_control_window",
+                String.valueOf(conf.getConfigNodeRatisGrpcFlowControlWindow()))));
 
     conf.setSchemaRegionRatisGrpcFlowControlWindow(
         Long.parseLong(
@@ -412,11 +413,11 @@ public class ConfigNodeDescriptor {
                 "data_region_ratis_rpc_leader_election_timeout_min_ms",
                 String.valueOf(conf.getDataRegionRatisRpcLeaderElectionTimeoutMinMs()))));
 
-    conf.setPartitionRegionRatisRpcLeaderElectionTimeoutMinMs(
+    conf.setConfigNodeRatisRpcLeaderElectionTimeoutMinMs(
         Long.parseLong(
             properties.getProperty(
-                "partition_region_ratis_rpc_leader_election_timeout_min_ms",
-                String.valueOf(conf.getPartitionRegionRatisRpcLeaderElectionTimeoutMinMs()))));
+                "config_node_ratis_rpc_leader_election_timeout_min_ms",
+                String.valueOf(conf.getConfigNodeRatisRpcLeaderElectionTimeoutMinMs()))));
 
     conf.setSchemaRegionRatisRpcLeaderElectionTimeoutMinMs(
         Long.parseLong(
@@ -430,11 +431,11 @@ public class ConfigNodeDescriptor {
                 "data_region_ratis_rpc_leader_election_timeout_max_ms",
                 String.valueOf(conf.getDataRegionRatisRpcLeaderElectionTimeoutMaxMs()))));
 
-    conf.setPartitionRegionRatisRpcLeaderElectionTimeoutMaxMs(
+    conf.setConfigNodeRatisRpcLeaderElectionTimeoutMaxMs(
         Long.parseLong(
             properties.getProperty(
-                "partition_region_ratis_rpc_leader_election_timeout_max_ms",
-                String.valueOf(conf.getPartitionRegionRatisRpcLeaderElectionTimeoutMaxMs()))));
+                "config_node_ratis_rpc_leader_election_timeout_max_ms",
+                String.valueOf(conf.getConfigNodeRatisRpcLeaderElectionTimeoutMaxMs()))));
 
     conf.setSchemaRegionRatisRpcLeaderElectionTimeoutMaxMs(
         Long.parseLong(
@@ -442,11 +443,11 @@ public class ConfigNodeDescriptor {
                 "schema_region_ratis_rpc_leader_election_timeout_max_ms",
                 String.valueOf(conf.getSchemaRegionRatisRpcLeaderElectionTimeoutMaxMs()))));
 
-    conf.setPartitionRegionRatisRequestTimeoutMs(
+    conf.setConfigNodeRatisRequestTimeoutMs(
         Long.parseLong(
             properties.getProperty(
-                "partition_region_ratis_request_timeout_ms",
-                String.valueOf(conf.getPartitionRegionRatisRequestTimeoutMs()))));
+                "config_node_ratis_request_timeout_ms",
+                String.valueOf(conf.getConfigNodeRatisRequestTimeoutMs()))));
     conf.setSchemaRegionRatisRequestTimeoutMs(
         Long.parseLong(
             properties.getProperty(
@@ -458,21 +459,21 @@ public class ConfigNodeDescriptor {
                 "data_region_ratis_request_timeout_ms",
                 String.valueOf(conf.getDataRegionRatisRequestTimeoutMs()))));
 
-    conf.setPartitionRegionRatisMaxRetryAttempts(
+    conf.setConfigNodeRatisMaxRetryAttempts(
         Integer.parseInt(
             properties.getProperty(
-                "partition_region_ratis_max_retry_attempts",
-                String.valueOf(conf.getPartitionRegionRatisMaxRetryAttempts()))));
-    conf.setPartitionRegionRatisInitialSleepTimeMs(
+                "config_node_ratis_max_retry_attempts",
+                String.valueOf(conf.getConfigNodeRatisMaxRetryAttempts()))));
+    conf.setConfigNodeRatisInitialSleepTimeMs(
         Long.parseLong(
             properties.getProperty(
-                "partition_region_ratis_initial_sleep_time_ms",
-                String.valueOf(conf.getPartitionRegionRatisInitialSleepTimeMs()))));
-    conf.setPartitionRegionRatisMaxSleepTimeMs(
+                "config_node_ratis_initial_sleep_time_ms",
+                String.valueOf(conf.getConfigNodeRatisInitialSleepTimeMs()))));
+    conf.setConfigNodeRatisMaxSleepTimeMs(
         Long.parseLong(
             properties.getProperty(
-                "partition_region_ratis_max_sleep_time_ms",
-                String.valueOf(conf.getPartitionRegionRatisMaxSleepTimeMs()))));
+                "config_node_ratis_max_sleep_time_ms",
+                String.valueOf(conf.getConfigNodeRatisMaxSleepTimeMs()))));
 
     conf.setDataRegionRatisMaxRetryAttempts(
         Integer.parseInt(
@@ -506,11 +507,11 @@ public class ConfigNodeDescriptor {
                 "schema_region_ratis_max_sleep_time_ms",
                 String.valueOf(conf.getSchemaRegionRatisMaxSleepTimeMs()))));
 
-    conf.setPartitionRegionRatisPreserveLogsWhenPurge(
+    conf.setConfigNodeRatisPreserveLogsWhenPurge(
         Long.parseLong(
             properties.getProperty(
-                "partition_region_ratis_preserve_logs_num_when_purge",
-                String.valueOf(conf.getPartitionRegionRatisPreserveLogsWhenPurge()))));
+                "config_node_ratis_preserve_logs_num_when_purge",
+                String.valueOf(conf.getConfigNodeRatisPreserveLogsWhenPurge()))));
 
     conf.setSchemaRegionRatisPreserveLogsWhenPurge(
         Long.parseLong(
@@ -570,7 +571,7 @@ public class ConfigNodeDescriptor {
   /**
    * Check if the current ConfigNode is SeedConfigNode.
    *
-   * @return True if the target_config_nodes points to itself
+   * @return True if the target_config_node_list points to itself
    */
   public boolean isSeedConfigNode() {
     return (conf.getInternalAddress().equals(conf.getTargetConfigNode().getIp())
diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeStartupCheck.java b/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeStartupCheck.java
index 0f015e5d11..9b91c9ca84 100644
--- a/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeStartupCheck.java
+++ b/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeStartupCheck.java
@@ -51,12 +51,12 @@ public class ConfigNodeStartupCheck {
   /** Check whether the global configuration of the cluster is correct */
   private void checkGlobalConfig() throws ConfigurationException {
     // When the ConfigNode consensus protocol is set to SIMPLE_CONSENSUS,
-    // the target_config_nodes needs to point to itself
+    // the target_config_node_list needs to point to itself
     if (CONF.getConfigNodeConsensusProtocolClass().equals(ConsensusFactory.SIMPLE_CONSENSUS)
         && (!CONF.getInternalAddress().equals(CONF.getTargetConfigNode().getIp())
             || CONF.getInternalPort() != CONF.getTargetConfigNode().getPort())) {
       throw new ConfigurationException(
-          IoTDBConstant.CN_TARGET_CONFIG_NODES,
+          IoTDBConstant.CN_TARGET_CONFIG_NODE_LIST,
           CONF.getTargetConfigNode().getIp() + ":" + CONF.getTargetConfigNode().getPort(),
           CONF.getInternalAddress() + ":" + CONF.getInternalPort());
     }
@@ -102,7 +102,7 @@ public class ConfigNodeStartupCheck {
     // The ip of target ConfigNode couldn't be 0.0.0.0
     if (CONF.getTargetConfigNode().getIp().equals("0.0.0.0")) {
       throw new ConfigurationException(
-          "The ip address of any target_config_nodes couldn't be 0.0.0.0");
+          "The ip address of any target_config_node_list couldn't be 0.0.0.0");
     }
   }
 
diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/statemachine/PartitionRegionStateMachine.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/statemachine/PartitionRegionStateMachine.java
index d8187b56ee..fc2b8a8210 100644
--- a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/statemachine/PartitionRegionStateMachine.java
+++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/statemachine/PartitionRegionStateMachine.java
@@ -73,7 +73,8 @@ public class PartitionRegionStateMachine
   private static final String progressFilePath =
       currentFileDir + File.separator + "log_inprogress_";
   private static final String filePath = currentFileDir + File.separator + "log_";
-  private static final long LOG_FILE_MAX_SIZE = CONF.getPartitionRegionOneCopyLogSegmentSizeMax();
+  private static final long LOG_FILE_MAX_SIZE =
+      CONF.getConfigNodeSimpleConsensusLogSegmentSizeMax();
   private final TEndPoint currentNodeTEndPoint;
 
   public PartitionRegionStateMachine(ConfigManager configManager, ConfigPlanExecutor executor) {
diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConsensusManager.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConsensusManager.java
index 9e86e91825..fa6487b0fe 100644
--- a/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConsensusManager.java
+++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConsensusManager.java
@@ -109,43 +109,42 @@ public class ConsensusManager {
                               .setLeaderLogAppender(
                                   RatisConfig.LeaderLogAppender.newBuilder()
                                       .setBufferByteLimit(
-                                          CONF
-                                              .getPartitionRegionRatisConsensusLogAppenderBufferSize())
+                                          CONF.getConfigNodeRatisConsensusLogAppenderBufferSize())
                                       .build())
                               .setSnapshot(
                                   RatisConfig.Snapshot.newBuilder()
                                       .setAutoTriggerThreshold(
-                                          CONF.getPartitionRegionRatisSnapshotTriggerThreshold())
+                                          CONF.getConfigNodeRatisSnapshotTriggerThreshold())
                                       .build())
                               .setLog(
                                   RatisConfig.Log.newBuilder()
                                       .setUnsafeFlushEnabled(
-                                          CONF.isPartitionRegionRatisLogUnsafeFlushEnable())
+                                          CONF.isConfigNodeRatisLogUnsafeFlushEnable())
                                       .setSegmentCacheSizeMax(
                                           SizeInBytes.valueOf(
-                                              CONF.getPartitionRegionRatisLogSegmentSizeMax()))
+                                              CONF.getConfigNodeRatisLogSegmentSizeMax()))
                                       .build())
                               .setGrpc(
                                   RatisConfig.Grpc.newBuilder()
                                       .setFlowControlWindow(
                                           SizeInBytes.valueOf(
-                                              CONF.getPartitionRegionRatisGrpcFlowControlWindow()))
+                                              CONF.getConfigNodeRatisGrpcFlowControlWindow()))
                                       .build())
                               .setRpc(
                                   RatisConfig.Rpc.newBuilder()
                                       .setTimeoutMin(
                                           TimeDuration.valueOf(
                                               CONF
-                                                  .getPartitionRegionRatisRpcLeaderElectionTimeoutMinMs(),
+                                                  .getConfigNodeRatisRpcLeaderElectionTimeoutMinMs(),
                                               TimeUnit.MILLISECONDS))
                                       .setTimeoutMax(
                                           TimeDuration.valueOf(
                                               CONF
-                                                  .getPartitionRegionRatisRpcLeaderElectionTimeoutMaxMs(),
+                                                  .getConfigNodeRatisRpcLeaderElectionTimeoutMaxMs(),
                                               TimeUnit.MILLISECONDS))
                                       .setRequestTimeout(
                                           TimeDuration.valueOf(
-                                              CONF.getPartitionRegionRatisRequestTimeoutMs(),
+                                              CONF.getConfigNodeRatisRequestTimeoutMs(),
                                               TimeUnit.MILLISECONDS))
                                       .setFirstElectionTimeoutMin(
                                           TimeDuration.valueOf(
@@ -159,13 +158,13 @@ public class ConsensusManager {
                               .setRatisConsensus(
                                   RatisConfig.RatisConsensus.newBuilder()
                                       .setClientRequestTimeoutMillis(
-                                          CONF.getPartitionRegionRatisRequestTimeoutMs())
+                                          CONF.getConfigNodeRatisRequestTimeoutMs())
                                       .setClientMaxRetryAttempt(
-                                          CONF.getPartitionRegionRatisMaxRetryAttempts())
+                                          CONF.getConfigNodeRatisMaxRetryAttempts())
                                       .setClientRetryInitialSleepTimeMs(
-                                          CONF.getPartitionRegionRatisInitialSleepTimeMs())
+                                          CONF.getConfigNodeRatisInitialSleepTimeMs())
                                       .setClientRetryMaxSleepTimeMs(
-                                          CONF.getPartitionRegionRatisMaxSleepTimeMs())
+                                          CONF.getConfigNodeRatisMaxSleepTimeMs())
                                       .build())
                               .build())
                       .setStorageDir(CONF.getConsensusDir())
diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/ProcedureManager.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/ProcedureManager.java
index 50a290793e..4deef73d26 100644
--- a/confignode/src/main/java/org/apache/iotdb/confignode/manager/ProcedureManager.java
+++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/ProcedureManager.java
@@ -111,7 +111,7 @@ public class ProcedureManager {
     this.planSizeLimit =
         ConfigNodeDescriptor.getInstance()
                 .getConf()
-                .getPartitionRegionRatisConsensusLogAppenderBufferSize()
+                .getConfigNodeRatisConsensusLogAppenderBufferSize()
             - IoTDBConstant.RAFT_LOG_BASIC_SIZE;
   }
 
@@ -366,7 +366,7 @@ public class ProcedureManager {
         return new TSStatus(TSStatusCode.CREATE_TRIGGER_ERROR.getStatusCode())
             .setMessage(
                 String.format(
-                    "Fail to create trigger[%s], the size of Jar is too large, you can increase the value of property 'partition_region_ratis_log_appender_buffer_size_max' on ConfigNode",
+                    "Fail to create trigger[%s], the size of Jar is too large, you can increase the value of property 'config_node_ratis_log_appender_buffer_size_max' on ConfigNode",
                     triggerInformation.getTriggerName()));
       }
     } catch (IOException e) {
diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/UDFManager.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/UDFManager.java
index 4dcf74d1ce..38d177911c 100644
--- a/confignode/src/main/java/org/apache/iotdb/confignode/manager/UDFManager.java
+++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/UDFManager.java
@@ -62,7 +62,7 @@ public class UDFManager {
   private final long planSizeLimit =
       ConfigNodeDescriptor.getInstance()
               .getConf()
-              .getPartitionRegionRatisConsensusLogAppenderBufferSize()
+              .getConfigNodeRatisConsensusLogAppenderBufferSize()
           - IoTDBConstant.RAFT_LOG_BASIC_SIZE;
 
   public UDFManager(ConfigManager configManager, UDFInfo udfInfo) {
@@ -104,7 +104,7 @@ public class UDFManager {
         return new TSStatus(TSStatusCode.CREATE_TRIGGER_ERROR.getStatusCode())
             .setMessage(
                 String.format(
-                    "Fail to create UDF[%s], the size of Jar is too large, you can increase the value of property 'partition_region_ratis_log_appender_buffer_size_max' on ConfigNode",
+                    "Fail to create UDF[%s], the size of Jar is too large, you can increase the value of property 'config_node_ratis_log_appender_buffer_size_max' on ConfigNode",
                     udfName));
       }
 
diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/node/NodeManager.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/node/NodeManager.java
index 1eb840c88a..bbfbf5e6ef 100644
--- a/confignode/src/main/java/org/apache/iotdb/confignode/manager/node/NodeManager.java
+++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/node/NodeManager.java
@@ -203,7 +203,7 @@ public class NodeManager {
     ratisConfig.setSchemaInitialSleepTime(conf.getSchemaRegionRatisInitialSleepTimeMs());
     ratisConfig.setSchemaMaxSleepTime(conf.getSchemaRegionRatisMaxSleepTimeMs());
 
-    ratisConfig.setSchemaPreserveWhenPurge(conf.getPartitionRegionRatisPreserveLogsWhenPurge());
+    ratisConfig.setSchemaPreserveWhenPurge(conf.getConfigNodeRatisPreserveLogsWhenPurge());
     ratisConfig.setDataPreserveWhenPurge(conf.getDataRegionRatisPreserveLogsWhenPurge());
 
     ratisConfig.setFirstElectionTimeoutMin(conf.getRatisFirstElectionTimeoutMinMs());
diff --git a/confignode/src/test/resources/confignode1conf/iotdb-common.properties b/confignode/src/test/resources/confignode1conf/iotdb-common.properties
index 365fb4f09b..b79e4d8e7a 100644
--- a/confignode/src/test/resources/confignode1conf/iotdb-common.properties
+++ b/confignode/src/test/resources/confignode1conf/iotdb-common.properties
@@ -24,4 +24,4 @@ schema_replication_factor=3
 data_replication_factor=3
 udf_lib_dir=target/confignode1/ext/udf
 trigger_root_dir=target/confignode1/ext/trigger
-partition_region_ratis_log_appender_buffer_size_max = 14194304
\ No newline at end of file
+config_node_ratis_log_appender_buffer_size_max = 14194304
\ No newline at end of file
diff --git a/confignode/src/test/resources/confignode1conf/iotdb-confignode.properties b/confignode/src/test/resources/confignode1conf/iotdb-confignode.properties
index 29f2bcccbf..33c4d08a8b 100644
--- a/confignode/src/test/resources/confignode1conf/iotdb-confignode.properties
+++ b/confignode/src/test/resources/confignode1conf/iotdb-confignode.properties
@@ -20,7 +20,7 @@
 cn_internal_address=127.0.0.1
 cn_internal_port=22277
 cn_consensus_port=22278
-cn_target_config_nodes=127.0.0.1:22277
+cn_target_config_node_list=127.0.0.1:22277
 cn_system_dir=target/confignode1/system
 cn_data_dirs=target/confignode1/data
 cn_consensus_dir=target/confignode1/consensus
\ No newline at end of file
diff --git a/confignode/src/test/resources/confignode2conf/iotdb-common.properties b/confignode/src/test/resources/confignode2conf/iotdb-common.properties
index 2efa2b39ed..675d97c842 100644
--- a/confignode/src/test/resources/confignode2conf/iotdb-common.properties
+++ b/confignode/src/test/resources/confignode2conf/iotdb-common.properties
@@ -24,4 +24,4 @@ schema_replication_factor=3
 data_replication_factor=3
 udf_lib_dir=target/confignode2/ext/udf
 trigger_root_dir=target/confignode2/ext/trigger
-partition_region_ratis_log_appender_buffer_size_max = 14194304
\ No newline at end of file
+config_node_ratis_log_appender_buffer_size_max = 14194304
\ No newline at end of file
diff --git a/confignode/src/test/resources/confignode2conf/iotdb-confignode.properties b/confignode/src/test/resources/confignode2conf/iotdb-confignode.properties
index 2ce515d909..3db341089a 100644
--- a/confignode/src/test/resources/confignode2conf/iotdb-confignode.properties
+++ b/confignode/src/test/resources/confignode2conf/iotdb-confignode.properties
@@ -20,7 +20,7 @@
 cn_internal_address=127.0.0.1
 cn_internal_port=22279
 cn_consensus_port=22280
-cn_target_config_nodes=127.0.0.1:22277
+cn_target_config_node_list=127.0.0.1:22277
 cn_system_dir=target/confignode2/system
 cn_data_dirs=target/confignode2/data
 cn_consensus_dir=target/confignode2/consensus
\ No newline at end of file
diff --git a/confignode/src/test/resources/confignode3conf/iotdb-common.properties b/confignode/src/test/resources/confignode3conf/iotdb-common.properties
index 30e5391585..b723170581 100644
--- a/confignode/src/test/resources/confignode3conf/iotdb-common.properties
+++ b/confignode/src/test/resources/confignode3conf/iotdb-common.properties
@@ -24,4 +24,4 @@ schema_replication_factor=3
 data_replication_factor=3
 udf_lib_dir=target/confignode3/ext/udf
 trigger_root_dir=target/confignode3/ext/trigger
-partition_region_ratis_log_appender_buffer_size_max = 14194304
\ No newline at end of file
+config_node_ratis_log_appender_buffer_size_max = 14194304
\ No newline at end of file
diff --git a/confignode/src/test/resources/confignode3conf/iotdb-confignode.properties b/confignode/src/test/resources/confignode3conf/iotdb-confignode.properties
index 6ff171cfe2..c79e26c78b 100644
--- a/confignode/src/test/resources/confignode3conf/iotdb-confignode.properties
+++ b/confignode/src/test/resources/confignode3conf/iotdb-confignode.properties
@@ -20,7 +20,7 @@
 cn_internal_address=127.0.0.1
 cn_internal_port=22281
 cn_consensus_port=22282
-cn_target_config_nodes=127.0.0.1:22277
+cn_target_config_node_list=127.0.0.1:22277
 cn_system_dir=target/confignode3/system
 cn_data_dirs=target/confignode3/data
 cn_consensus_dir=target/confignode3/consensus
\ No newline at end of file
diff --git a/docker/src/main/Dockerfile-1c1d b/docker/src/main/Dockerfile-1c1d
index 92e007cddd..abc710200f 100644
--- a/docker/src/main/Dockerfile-1c1d
+++ b/docker/src/main/Dockerfile-1c1d
@@ -30,6 +30,9 @@ RUN apt update \
   && rm /apache-iotdb-*-bin.zip \
   && mv /apache-iotdb-* /iotdb \
   && mv /start-1c1d.sh /iotdb/sbin \
+  && sed -i 's/dn_rpc_address=127.0.0.1/dn_rpc_address=0.0.0.0/g' /iotdb/conf/iotdb-datanode.properties \
+  && sed -i 's/dn_internal_address=127.0.0.1/dn_internal_address=0.0.0.0/g' /iotdb/conf/iotdb-datanode.properties \
+  && sed -i 's/cn_internal_address=127.0.0.1/cn_internal_address=0.0.0.0/g' /iotdb/conf/iotdb-confignode.properties \
   && apt remove unzip -y \
   && apt autoremove -y \
   && apt purge --auto-remove -y \
diff --git a/docker/src/main/Dockerfile-1c1d-influxdb b/docker/src/main/Dockerfile-1c1d-influxdb
index a9c64b4c0e..9877cfa334 100644
--- a/docker/src/main/Dockerfile-1c1d-influxdb
+++ b/docker/src/main/Dockerfile-1c1d-influxdb
@@ -30,7 +30,10 @@ RUN apt update \
   && rm /apache-iotdb-*-bin.zip \
   && mv /apache-iotdb-* /iotdb \
   && mv /start-1c1d.sh /iotdb/sbin \
-  && sed -i '/^# enable_influxdb_rpc_service=false/a enable_influxdb_rpc_service=true' /iotdb/sbin/../conf/iotdb-common.properties \
+  && sed -i 's/# enable_influxdb_rpc_service=false/enable_influxdb_rpc_service=true/g' /iotdb/conf/iotdb-common.properties \
+  && sed -i 's/dn_rpc_address=127.0.0.1/dn_rpc_address=0.0.0.0/g' /iotdb/conf/iotdb-datanode.properties \
+  && sed -i 's/dn_internal_address=127.0.0.1/dn_internal_address=0.0.0.0/g' /iotdb/conf/iotdb-datanode.properties \
+  && sed -i 's/cn_internal_address=127.0.0.1/cn_internal_address=0.0.0.0/g' /iotdb/conf/iotdb-confignode.properties \
   && apt remove unzip -y \
   && apt autoremove -y \
   && apt purge --auto-remove -y \
diff --git a/docs/UserGuide/Process-Data/Triggers.md b/docs/UserGuide/Process-Data/Triggers.md
index bc0cfc354f..d9a4b4d04a 100644
--- a/docs/UserGuide/Process-Data/Triggers.md
+++ b/docs/UserGuide/Process-Data/Triggers.md
@@ -459,7 +459,7 @@ During the process of creating and dropping triggers in the cluster, we maintain
 - Please do no register too many triggers in the cluster. Because the trigger information is fully stored in the ConfigNode, and there is a copy of the information in all DataNodes
 - **It is recommended to stop writing when registering triggers**. Registering a trigger is not an atomic operation. When registering a trigger, there will be an intermediate state in which some nodes in the cluster have registered the trigger, and some nodes have not yet registered successfully. To avoid write requests on some nodes being listened to by triggers and not being listened to on some nodes, we recommend not to perform writes when registering triggers.
 - When the node holding the stateful trigger instance goes down, we will try to restore the corresponding instance on another node. During the recovery process, we will call the restore interface of the trigger class once.
-- The trigger JAR package has a size limit, which must be less than min(`partition_region_ratis_log_appender_buffer_size_max`, 2G), where `partition_region_ratis_log_appender_buffer_size_max` is a configuration item. For the specific meaning, please refer to the IOTDB configuration item description.
+- The trigger JAR package has a size limit, which must be less than min(`config_node_ratis_log_appender_buffer_size_max`, 2G), where `config_node_ratis_log_appender_buffer_size_max` is a configuration item. For the specific meaning, please refer to the IOTDB configuration item description.
 - **It is better not to have classes with the same full class name but different function implementations in different JAR packages.** For example, trigger1 and trigger2 correspond to resources trigger1.jar and trigger2.jar respectively. If two JAR packages contain a `org.apache.iotdb.trigger.example.AlertListener` class, when `CREATE TRIGGER` uses this class, the system will randomly load the class in one of the JAR packages, which will eventually leads the inconsistent behavior of trig [...]
 
 ## Configuration Parameters
diff --git a/docs/zh/UserGuide/Process-Data/Triggers.md b/docs/zh/UserGuide/Process-Data/Triggers.md
index 29bbb6d168..0afa019c88 100644
--- a/docs/zh/UserGuide/Process-Data/Triggers.md
+++ b/docs/zh/UserGuide/Process-Data/Triggers.md
@@ -446,7 +446,7 @@ SHOW TRIGGERS
 - **建议注册触发器时停止写入**。注册触发器并不是一个原子操作,注册触发器时,会出现集群内部分节点已经注册了该触发器,部分节点尚未注册成功的中间状态。为了避免部分节点上的写入请求被触发器侦听到,部分节点上没有被侦听到的情况,我们建议注册触发器时不要执行写入。
 - 触发器将作为进程内程序执行,如果您的触发器编写不慎,内存占用过多,由于 IoTDB 并没有办法监控触发器所使用的内存,所以有 OOM 的风险。
 - 持有有状态触发器实例的节点宕机时,我们会尝试在另外的节点上恢复相应实例,在恢复过程中我们会调用一次触发器类的 restore 接口,您可以在该接口中实现恢复触发器所维护的状态的逻辑。
-- 触发器 JAR 包有大小限制,必须小于 min(`partition_region_ratis_log_appender_buffer_size_max`, 2G),其中 `partition_region_ratis_log_appender_buffer_size_max` 是一个配置项,具体含义可以参考 IOTDB 配置项说明。
+- 触发器 JAR 包有大小限制,必须小于 min(`config_node_ratis_log_appender_buffer_size_max`, 2G),其中 `config_node_ratis_log_appender_buffer_size_max` 是一个配置项,具体含义可以参考 IOTDB 配置项说明。
 - **不同的 JAR 包中最好不要有全类名相同但功能实现不一样的类**。例如:触发器 trigger1、trigger2 分别对应资源 trigger1.jar、trigger2.jar。如果两个 JAR 包里都包含一个 `org.apache.iotdb.trigger.example.AlertListener` 类,当 `CREATE TRIGGER` 使用到这个类时,系统会随机加载其中一个 JAR 包中的类,最终导致触发器执行行为不一致以及其他的问题。
 
 ## 配置参数
diff --git a/integration-test/README.md b/integration-test/README.md
index af67714882..27a6eff7fc 100644
--- a/integration-test/README.md
+++ b/integration-test/README.md
@@ -28,7 +28,7 @@ Now integration testing supports two kinds of architecture.
 - `Simple`: A cluster with 1 config node and 1 data node.
 - `Cluster1`: A cluster with 1 config node and 3 data nodes.
 
-## Integration Testing with One Copy Mode
+## Integration Testing with Simple Consensus Mode
 
 Integration testing in `Simple` mode can be run with both maven and IDEs like IntelliJ easily.
 
diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/ConfigNodeWrapper.java b/integration-test/src/main/java/org/apache/iotdb/it/env/ConfigNodeWrapper.java
index f6a08286f5..77636f5418 100644
--- a/integration-test/src/main/java/org/apache/iotdb/it/env/ConfigNodeWrapper.java
+++ b/integration-test/src/main/java/org/apache/iotdb/it/env/ConfigNodeWrapper.java
@@ -52,7 +52,7 @@ public class ConfigNodeWrapper extends AbstractNodeWrapper {
     properties.setProperty(IoTDBConstant.CN_INTERNAL_ADDRESS, super.getIp());
     properties.setProperty(IoTDBConstant.CN_INTERNAL_PORT, String.valueOf(getPort()));
     properties.setProperty(IoTDBConstant.CN_CONSENSUS_PORT, String.valueOf(this.consensusPort));
-    properties.setProperty(IoTDBConstant.CN_TARGET_CONFIG_NODES, this.targetConfigNodes);
+    properties.setProperty(IoTDBConstant.CN_TARGET_CONFIG_NODE_LIST, this.targetConfigNodes);
     properties.setProperty(
         "config_node_consensus_protocol_class",
         "org.apache.iotdb.consensus.simple.SimpleConsensus");
diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/DataNodeWrapper.java b/integration-test/src/main/java/org/apache/iotdb/it/env/DataNodeWrapper.java
index 55f0e62ea1..15f573a7a6 100644
--- a/integration-test/src/main/java/org/apache/iotdb/it/env/DataNodeWrapper.java
+++ b/integration-test/src/main/java/org/apache/iotdb/it/env/DataNodeWrapper.java
@@ -63,7 +63,7 @@ public class DataNodeWrapper extends AbstractNodeWrapper {
     properties.setProperty("mqtt_port", String.valueOf(this.mqttPort));
     properties.setProperty("connection_timeout_ms", "30000");
     if (this.targetConfigNode != null) {
-      properties.setProperty(IoTDBConstant.DN_TARGET_CONFIG_NODES, this.targetConfigNode);
+      properties.setProperty(IoTDBConstant.DN_TARGET_CONFIG_NODE_LIST, this.targetConfigNode);
     }
     properties.setProperty("max_tsblock_size_in_bytes", "1024");
     properties.setProperty("page_size_in_byte", "1024");
diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/MppConfig.java b/integration-test/src/main/java/org/apache/iotdb/it/env/MppConfig.java
index 306e4be413..70195ada74 100644
--- a/integration-test/src/main/java/org/apache/iotdb/it/env/MppConfig.java
+++ b/integration-test/src/main/java/org/apache/iotdb/it/env/MppConfig.java
@@ -265,7 +265,7 @@ public class MppConfig implements BaseConfig {
   @Override
   public BaseConfig setRatisSnapshotTriggerThreshold(int ratisSnapshotTriggerThreshold) {
     confignodeProperties.setProperty(
-        "partition_region_ratis_snapshot_trigger_threshold",
+        "config_node_ratis_snapshot_trigger_threshold",
         String.valueOf(ratisSnapshotTriggerThreshold));
     return this;
   }
diff --git a/node-commons/src/assembly/resources/conf/iotdb-common.properties b/node-commons/src/assembly/resources/conf/iotdb-common.properties
index 8e78af7200..13a36b72b7 100644
--- a/node-commons/src/assembly/resources/conf/iotdb-common.properties
+++ b/node-commons/src/assembly/resources/conf/iotdb-common.properties
@@ -15,77 +15,311 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-#
 
 ####################
-### Thrift Configuration
+### Replication configuration
 ####################
 
-# Datatype: boolean
-# rpc_thrift_compression_enable=false
+# ConfigNode consensus protocol type.
+# This parameter is unmodifiable after ConfigNode starts for the first time.
+# These consensus protocols are currently supported:
+# 1. org.apache.iotdb.consensus.ratis.RatisConsensus
+# Datatype: String
+# config_node_consensus_protocol_class=org.apache.iotdb.consensus.ratis.RatisConsensus
 
-# if true, a snappy based compression method will be called before sending data by the network
+# Default number of schema replicas
+# Can not be changed after the first start
+# Datatype: int
+# schema_replication_factor=1
+
+# SchemaRegion consensus protocol type.
+# This parameter is unmodifiable after ConfigNode starts for the first time.
+# These consensus protocols are currently supported:
+# 1. org.apache.iotdb.consensus.simple.SimpleConsensus
+# 2. org.apache.iotdb.consensus.ratis.RatisConsensus
+# Datatype: String
+# schema_region_consensus_protocol_class=org.apache.iotdb.consensus.simple.SimpleConsensus
+
+# Default number of data replicas
+# Can not be changed after the first start
+# Datatype: int
+# data_replication_factor=1
+
+# DataRegion consensus protocol type.
+# This parameter is unmodifiable after ConfigNode starts for the first time.
+# These consensus protocols are currently supported:
+# 1. org.apache.iotdb.consensus.simple.SimpleConsensus
+# 2. org.apache.iotdb.consensus.multileader.MultiLeaderConsensus
+# 3. org.apache.iotdb.consensus.ratis.RatisConsensus
+# Datatype: String
+# data_region_consensus_protocol_class=org.apache.iotdb.consensus.simple.SimpleConsensus
+
+####################
+### Partition (Load balancing) configuration
+####################
+
+# All parameters in Partition configuration is unmodifiable after ConfigNode starts for the first time.
+# And these parameters should be consistent within the ConfigNodeGroup.
+# Number of SeriesPartitionSlots per StorageGroup
+# Datatype: int
+# series_partition_slot_num=10000
+
+# SeriesPartitionSlot executor class
+# These hashing algorithms are currently supported:
+# 1. BKDRHashExecutor(Default)
+# 2. APHashExecutor
+# 3. JSHashExecutor
+# 4. SDBMHashExecutor
+# Also, if you want to implement your own SeriesPartition executor, you can inherit the SeriesPartitionExecutor class and
+# modify this parameter to correspond to your Java class
+# Datatype: String
+# series_partition_executor_class=org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor
+
+# The maximum number of SchemaRegion expected to be managed by each DataNode.
+# Notice: Since each StorageGroup requires at least one SchemaRegion to manage its schema,
+# this parameter doesn't limit the number of SchemaRegions when there are too many StorageGroups.
+# Datatype: Double
+# schema_region_per_data_node=1.0
+
+# The maximum number of DataRegion expected to be managed by each processor.
+# Notice: Since each StorageGroup requires at least two DataRegions to manage its data,
+# this parameter doesn't limit the number of DataRegions when there are too many StorageGroups.
+# Datatype: Double
+# data_region_per_processor=0.5
+
+# Region allocate strategy
+# These allocate strategies are currently supported:
+# 1. GREEDY(Default, when region is allocated, always choose the dataNode that has been allocated the minimum regions)
+# 2. COPY_SET(Random replication according to weight calculated from number of regions on all online dataNodes, suitable for large clusters)
+# Datatype: String
+# region_allocate_strategy=GREEDY
+
+# The routing policy of read/write requests
+# These routing policy are currently supported:
+# 1. leader(Default, routing to leader replica)
+# 2. greedy(Routing to replica with the lowest load, might cause read un-consistent)
+# Datatype: string
+# routing_policy=leader
+
+####################
+### Cluster management
+####################
+
+# Time partition interval in milliseconds, default is equal to one week
+# Can not be changed after the first start
+# Datatype: long
+# time_partition_interval_for_routing=604800000
+
+# The heartbeat interval in milliseconds, default is 1000ms
+# Datatype: long
+# heartbeat_interval_in_ms=1000
+
+# Disk remaining threshold at which DataNode is set to ReadOnly status
+# Datatype: double(percentage)
+# disk_space_warning_threshold=0.05
+
+####################
+### Memory Control Configuration
+####################
+
+# Whether to enable memory control
 # Datatype: boolean
-# this feature is under development, set this as false before it is done.
-# rpc_advanced_compression_enable=false
+# enable_mem_control=true
 
+# Memory Allocation Ratio: Write, Read, Schema, Consensus and Free Memory.
+# The parameter form is a:b:c:d:e, where a, b, c, d and e are integers. for example: 1:1:1:1:1 , 6:2:1:1:1
+# If you have high level of writing pressure and low level of reading pressure, please adjust it to for example 6:1:1:1:2
+# write_read_schema_free_memory_proportion=3:3:1:1:2
+
+# Schema Memory Allocation Ratio: SchemaRegion, SchemaCache, PartitionCache and LastCache.
+# The parameter form is a:b:c:d, where a, b, c and d are integers. for example: 1:1:1:1 , 6:2:1:1
+# In cluster mode, we recommend 5:3:1:1. In standalone mode, we recommend 8:1:0:1
+# schema_memory_allocate_proportion=5:3:1:1
+
+# Memory allocation ratio in StorageEngine: MemTable, Compaction
+# The parameter form is a:b:c:d, where a, b, c and d are integers. for example: 8:2 , 7:3
+# storage_engine_memory_proportion=8:2
+
+# Max number of concurrent writing time partitions in one storage group
+# This parameter is used to control total memTable number when memory control is disabled
+# The max number of memTable is 4 * concurrent_writing_time_partition * storage group number
+# Datatype: long
+# concurrent_writing_time_partition=1
+
+# primitive array size (length of each array) in array pool
 # Datatype: int
-# rpc_selector_thread_count=1
+# primitive_array_size=32
+
+# size proportion for chunk metadata maintains in memory when writing tsfile
+# Datatype: double
+# chunk_metadata_size_proportion=0.1
+
+# Ratio of write memory for invoking flush disk, 0.4 by default
+# If you have extremely high write load (like batch=1000), it can be set lower than the default value like 0.2
+# Datatype: double
+# flush_proportion=0.4
+
+# Ratio of write memory allocated for buffered arrays, 0.6 by default
+# Datatype: double
+# buffered_arrays_memory_proportion=0.6
+
+# Ratio of write memory for rejecting insertion, 0.8 by default
+# If you have extremely high write load (like batch=1000) and the physical memory size is large enough,
+# it can be set higher than the default value like 0.9
+# Datatype: double
+# reject_proportion=0.8
 
+# If memory cost of data region increased more than proportion of allocated memory for write, report to system. The default value is 0.001
+# Datatype: double
+# write_memory_variation_report_proportion=0.001
+
+# When an inserting is rejected, waiting period (in ms) to check system again, 50 by default.
+# If the insertion has been rejected and the read load is low, it can be set larger.
 # Datatype: int
-# rpc_min_concurrent_client_num=1
+# check_period_when_insert_blocked=50
 
+# size of ioTaskQueue. The default value is 10
 # Datatype: int
-# rpc_max_concurrent_client_num=65535
+# io_task_queue_size_for_flushing=10
 
-# thrift max frame size, 512MB by default
+# If true, we will estimate each query's possible memory footprint before executing it and deny it if its estimated memory exceeds current free memory
+# Datatype: bool
+# enable_query_memory_estimation=true
+
+####################
+### Schema Engine Configuration
+####################
+
+# cache size for SchemaRegion.
+# This cache is used to improve insert speed where all path check and TSDataType will be cached in SchemaRegion with corresponding Path.
 # Datatype: int
-# thrift_max_frame_size=536870912
+# schema_region_device_node_cache_size=10000
 
-# thrift init buffer size
+# thread pool size for read operation in DataNode's coordinator.
 # Datatype: int
-# thrift_init_buffer_size=1024
+# coordinator_read_executor_size=20
 
-# Thrift socket and connection timeout between raft nodes, in milliseconds.
+# thread pool size for write operation in DataNode's coordinator.
 # Datatype: int
-# connection_timeout_ms=20000
+# coordinator_write_executor_size=50
 
-# The maximum number of clients that can be idle for a node's InternalService.
-# When the number of idle clients on a node exceeds this number, newly returned clients will be released
+# cache size for partition.
+# This cache is used to improve partition fetch from config node.
 # Datatype: int
-# core_connection_for_internal_service=100
+# partition_cache_size=1000
 
-# The maximum number of clients that can be applied for a node's InternalService
+# Size of log buffer in each metadata operation plan(in byte).
+# If the size of a metadata operation plan is larger than this parameter, then it will be rejected by SchemaRegion
+# If it sets a value smaller than 0, use the default value 1024*1024
 # Datatype: int
-# max_connection_for_internal_service=100
+# mlog_buffer_size=1048576
 
-# selector thread (TAsyncClientManager) nums for async thread in a clientManager
+# The cycle when metadata log is periodically forced to be written to disk(in milliseconds)
+# If sync_mlog_period_in_ms=0 it means force metadata log to be written to disk after each refreshment
+# Set this parameter to 0 may slow down the operation on slow disk.
+# sync_mlog_period_in_ms=100
+
+# interval num for tag and attribute records when force flushing to disk
+# When a certain amount of tag and attribute records is reached, they will be force flushed to disk
+# It is possible to lose at most tag_attribute_flush_interval records
+# tag_attribute_flush_interval=1000
+
+# max size for tag and attribute of one time series
+# the unit is byte
 # Datatype: int
-# selector_thread_count_of_client_manager=1
+# tag_attribute_total_size=700
 
 ####################
-### Procedure Configuration
+### Configurations for creating schema automatically
 ####################
 
+# Whether creating schema automatically is enabled
+# If true, then create storage group and timeseries automatically when not exists in insertion
+# Or else, user need to create storage group and timeseries before insertion.
+# Datatype: boolean
+# enable_auto_create_schema=true
 
-# Default number of worker thread count
+# Storage group level when creating schema automatically is enabled
+# e.g. root.sg0.d1.s2
+#      we will set root.sg0 as the storage group if storage group level is 1
 # Datatype: int
-# procedure_core_worker_thread_count=4
+# default_storage_group_level=1
 
+# ALL data types: BOOLEAN, INT32, INT64, FLOAT, DOUBLE, TEXT
 
-# Default time interval of completed procedure cleaner work in, time unit is second
-# Datatype: int
-# procedure_completed_clean_interval=30
+# register time series as which type when receiving boolean string "true" or "false"
+# Datatype: TSDataType
+# boolean_string_infer_type=BOOLEAN
 
+# register time series as which type when receiving an integer string "67"
+# Datatype: TSDataType
+# integer_string_infer_type=FLOAT
 
-# Default ttl of completed procedure, time unit is second
-# Datatype: int
-# procedure_completed_evict_ttl=800
+# register time series as which type when receiving an integer string and using float may lose precision
+# num > 2 ^ 24
+# Datatype: TSDataType
+# long_string_infer_type=DOUBLE
+
+# register time series as which type when receiving a floating number string "6.7"
+# Datatype: TSDataType
+# floating_string_infer_type=FLOAT
+
+# register time series as which type when receiving the Literal NaN. Values can be DOUBLE, FLOAT or TEXT
+# Datatype: TSDataType
+# nan_string_infer_type=DOUBLE
+
+# BOOLEAN encoding when creating schema automatically is enabled
+# Datatype: TSEncoding
+# default_boolean_encoding=RLE
+
+# INT32 encoding when creating schema automatically is enabled
+# Datatype: TSEncoding
+# default_int32_encoding=RLE
+
+# INT64 encoding when creating schema automatically is enabled
+# Datatype: TSEncoding
+# default_int64_encoding=RLE
+
+# FLOAT encoding when creating schema automatically is enabled
+# Datatype: TSEncoding
+# default_float_encoding=GORILLA
+
+# DOUBLE encoding when creating schema automatically is enabled
+# Datatype: TSEncoding
+# default_double_encoding=GORILLA
+
+# TEXT encoding when creating schema automatically is enabled
+# Datatype: TSEncoding
+# default_text_encoding=PLAIN
 
 ####################
-### MPP Data Exchange Configuration
+### Query Configurations
 ####################
 
+# The read consistency level
+# These consistency levels are currently supported:
+# 1. strong(Default, read from the leader replica)
+# 2. weak(Read from a random replica)
+# Datatype: string
+# read_consistency_level=strong
+
+# whether to cache meta data(BloomFilter, ChunkMetadata and TimeSeriesMetadata) or not.
+# Datatype: boolean
+# meta_data_cache_enable=true
+
+# Read memory Allocation Ratio: BloomFilterCache : ChunkCache : TimeSeriesMetadataCache : Coordinator : Operators : DataExchange : timeIndex in TsFileResourceList : others.
+# The parameter form is a:b:c:d:e:f:g:h, where a, b, c, d, e, f, g and h are integers. for example: 1:1:1:1:1:1:1:1 , 1:100:200:50:200:200:200:50
+# chunk_timeseriesmeta_free_memory_proportion=1:100:200:50:200:200:200:50
+
+# Whether to enable LAST cache
+# Datatype: boolean
+# enable_last_cache=true
+
+# allowed max numbers of deduplicated path in one query
+# it's just an advised value, the real limitation will be the smaller one between this and the one we calculated
+# Datatype: int
+# max_deduplicated_path_num=1000
+
 # Datatype: int
 # mpp_data_exchange_core_pool_size=10
 
@@ -95,48 +329,74 @@
 # Datatype: int
 # mpp_data_exchange_keep_alive_time_in_ms=1000
 
-####################
-### Continuous Query Configuration
-####################
+# the default time period that used in fill query, -1 by default means infinite past time
+# Datatype: int, Unit: ms
+# default_fill_interval=-1
 
-# Maximum number of continuous query tasks that can be pending for execution. When <= 0, the value is
-# 64 by default.
-# Datatype: int
-# max_pending_continuous_query_tasks=64
+# Datatype: float
+# group_by_fill_cache_size_in_mb=1.0
 
-# The size of log buffer for every CQ management operation plan. If the size of a CQ
-# management operation plan is larger than this parameter, the CQ management operation plan
-# will be rejected by CQManager.
-# Datatype: int
-# cqlog_buffer_size=1048576
+# the max execution time of a DriverTask
+# Datatype: int, Unit: ms
+# driver_task_execution_time_slice_in_ms=100
 
-# The number of threads in the scheduled thread pool that submit continuous query tasks periodically
+# the max capacity of a TsBlock
+# Datatype: int, Unit: byte
+# max_tsblock_size_in_bytes=1048576
+
+# the max number of lines in a single TsBlock
 # Datatype: int
-# continuous_query_submit_thread_count=2
+# max_tsblock_line_numbers=1000
 
-# The minimum value of the continuous query execution time interval
-# Datatype: long(duration)
-# continuous_query_min_every_interval_in_ms=1000
+# time cost(ms) threshold for slow query
+# Datatype: long
+# slow_query_threshold=5000
+
+# Is external sort enable
+# Datatype: boolean
+# enable_external_sort=true
 
+# The maximum number of simultaneous chunk reading for a single time series.
+# If the num of simultaneous chunk reading is greater than external_sort_threshold, external sorting is used.
+# When external_sort_threshold increases, the number of chunks sorted at the same time in memory may increase and this will occupy more memory.
+# When external_sort_threshold decreases, triggering external sorting will increase the time-consuming.
+# Datatype: int
+# external_sort_threshold=1000
 
 ####################
-### Mlog Buffer Configuration
+### Storage Engine Configuration
 ####################
 
-# Size of log buffer in each metadata operation plan(in byte).
-# If the size of a metadata operation plan is larger than this parameter, then it will be rejected by SchemaRegion
-# If it sets a value smaller than 0, use the default value 1024*1024
+# Use this value to set timestamp precision as "ms", "us" or "ns".
+# Once the precision has been set, it can not be changed.
+# Datatype: String
+# timestamp_precision=ms
+
+# Default TTL for storage groups that are not set TTL by statements, If not set (default),
+# the TTL will be unlimited.
+# Notice: if this property is changed, previous created storage group which are not set TTL will also be affected.
+# data.
+# Datatype: long
+# Unit: ms
+# default_ttl_in_ms=36000000
+
+# When the waiting time (in ms) of an inserting exceeds this, throw an exception. 10000 by default.
+# If the insertion has been rejected and the read load is low, it can be set larger
 # Datatype: int
-# mlog_buffer_size=1048576
+# max_waiting_time_when_insert_blocked=10000
 
-# The cycle when metadata log is periodically forced to be written to disk(in milliseconds)
-# If sync_mlog_period_in_ms=0 it means force metadata log to be written to disk after each refreshment
-# Set this parameter to 0 may slow down the operation on slow disk.
-# sync_mlog_period_in_ms=100
+# Add a switch to drop ouf-of-order data
+# Out-of-order data will impact the aggregation query a lot. Users may not care about discarding some out-of-order data.
+# Datatype: boolean
+# enable_discard_out_of_order_data=false
 
-####################
-### Storage Engine Configuration
-####################
+# whether enable data partition. If disabled, all data belongs to partition 0
+# Datatype: boolean
+# enable_partition=true
+
+# time range for partitioning data inside each data region, the unit is millisecond, default is equal to one week
+# Datatype: long
+# time_partition_interval_for_storage=604800000
 
 # What will the system do when unrecoverable error occurs.
 # Datatype: String
@@ -145,6 +405,7 @@
 # 2. SHUTDOWN: the system will be shutdown.
 # handle_system_error=CHANGE_TO_READ_ONLY
 
+# Only take effects when enable_mem_control is false.
 # When a memTable's size (in byte) exceeds this, the memtable is flushed to disk. The default threshold is 1 GB.
 # Datatype: long
 # memtable_size_threshold=1073741824
@@ -190,178 +451,50 @@
 # avg_series_point_number_threshold=100000
 
 # How many threads can concurrently flush. When <= 0, use CPU core number.
-# Datatype: int
-# flush_thread_count=0
-
-# How many threads can concurrently execute query statement. When <= 0, use CPU core number.
-# Datatype: int
-# query_thread_count=0
-
-# How many threads can concurrently read data for raw data query. When <= 0, use CPU core number.
-# Datatype: int
-# sub_rawQuery_thread_count=8
-
-# Blocking queue size for read task in raw data query. Must >= 1.
-# Datatype: int
-# raw_query_blocking_queue_capacity=5
-
-# whether take over the memory management by IoTDB rather than JVM when serializing memtable as bytes in memory
-# (i.e., whether use ChunkBufferPool), value true, false
-# Datatype: boolean
-# chunk_buffer_pool_enable=false
-
-# The amount of data iterate each time in server (the number of data strips, that is, the number of different timestamps.)
-# Datatype: int
-# batch_size=100000
-
-
-# In one insert (one device, one timestamp, multiple measurements),
-# if enable partial insert, one measurement failure will not impact other measurements
-# Datatype: boolean
-# enable_partial_insert=true
-
-# the interval to log recover progress of each vsg when starting iotdb
-# Datatype: int
-# recovery_log_interval_in_ms=5000
-
-# When the insert plan column count reaches the specified threshold, which means that the plan is relatively large. At this time, may be enabled multithreading.
-# If the tablet is small, the time of each insertion is short.
-# If we enable multithreading, we also need to consider the switching loss between threads,
-# so we need to judge the size of the tablet.
-# Datatype: int
-# insert_multi_tablet_enable_multithreading_column_threshold=10
-
-####################
-### Upgrade Configurations
-####################
-
-# When there exists old version(0.9.x/v1) data, how many thread will be set up to perform upgrade tasks, 1 by default.
-# Set to 1 when less than or equal to 0.
-# Datatype: int
-# upgrade_thread_count=1
-
-####################
-### Schema Engine Configuration
-####################
-
-# cache size for SchemaRegion.
-# This cache is used to improve insert speed where all path check and TSDataType will be cached in SchemaRegion with corresponding Path.
-# Datatype: int
-# schema_region_device_node_cache_size=10000
-
-# thread pool size for read operation in DataNode's coordinator.
-# Datatype: int
-# coordinator_read_executor_size=20
-
-# thread pool size for write operation in DataNode's coordinator.
-# Datatype: int
-# coordinator_write_executor_size=50
-
-# cache size for partition.
-# This cache is used to improve partition fetch from config node.
-# Datatype: int
-# partition_cache_size=1000
-
-# interval num for tag and attribute records when force flushing to disk
-# When a certain amount of tag and attribute records is reached, they will be force flushed to disk
-# It is possible to lose at most tag_attribute_flush_interval records
-# tag_attribute_flush_interval=1000
-
-
-####################
-### Cache Configuration
-####################
-
-# whether to cache meta data(BloomFilter, ChunkMetadata and TimeSeriesMetadata) or not.
-# Datatype: boolean
-# meta_data_cache_enable=true
-
-# Read memory Allocation Ratio: BloomFilterCache : ChunkCache : TimeSeriesMetadataCache : Coordinator : Operators : DataExchange : timeIndex in TsFileResourceList : others.
-# The parameter form is a:b:c:d:e:f:g:h, where a, b, c, d, e, f, g and h are integers. for example: 1:1:1:1:1:1:1:1 , 1:100:200:50:200:200:200:50
-# chunk_timeseriesmeta_free_memory_proportion=1:100:200:50:200:200:200:50
-
-# Whether to enable LAST cache
-# Datatype: boolean
-# enable_last_cache=true
-
-
-####################
-### Memory Control Configuration
-####################
-
-# Whether to enable memory control
-# Datatype: boolean
-# enable_mem_control=true
-
-# Memory Allocation Ratio: Write, Read, Schema, Consensus and Free Memory.
-# The parameter form is a:b:c:d:e, where a, b, c, d and e are integers. for example: 1:1:1:1:1 , 6:2:1:1:1
-# If you have high level of writing pressure and low level of reading pressure, please adjust it to for example 6:1:1:1:2
-# write_read_schema_free_memory_proportion=3:3:1:1:2
-
-# Schema Memory Allocation Ratio: SchemaRegion, SchemaCache, PartitionCache and LastCache.
-# The parameter form is a:b:c:d, where a, b, c and d are integers. for example: 1:1:1:1 , 6:2:1:1
-# In cluster mode, we recommend 5:3:1:1. In standalone mode, we recommend 8:1:0:1
-# schema_memory_allocate_proportion=5:3:1:1
-
-# Memory allocation ratio in StorageEngine: MemTable, Compaction
-# The parameter form is a:b:c:d, where a, b, c and d are integers. for example: 8:2 , 7:3
-# storage_engine_memory_proportion=8:2
-
-# Max number of concurrent writing time partitions in one storage group
-# This parameter is used to control total memTable number when memory control is disabled
-# The max number of memTable is 4 * concurrent_writing_time_partition * storage group number
-# Datatype: long
-# concurrent_writing_time_partition=1
-
-# primitive array size (length of each array) in array pool
-# Datatype: int
-# primitive_array_size=32
-
-# size proportion for chunk metadata maintains in memory when writing tsfile
-# Datatype: double
-# chunk_metadata_size_proportion=0.1
-
-# Ratio of write memory for invoking flush disk, 0.4 by default
-# If you have extremely high write load (like batch=1000), it can be set lower than the default value like 0.2
-# Datatype: double
-# flush_proportion=0.4
-
-# Ratio of write memory allocated for buffered arrays, 0.6 by default
-# Datatype: double
-# buffered_arrays_memory_proportion=0.6
-
-# Ratio of write memory for rejecting insertion, 0.8 by default
-# If you have extremely high write load (like batch=1000) and the physical memory size is large enough,
-# it can be set higher than the default value like 0.9
-# Datatype: double
-# reject_proportion=0.8
+# Datatype: int
+# flush_thread_count=0
 
-# If memory cost of data region increased more than proportion of allocated memory for write, report to system. The default value is 0.001
-# Datatype: double
-# write_memory_variation_report_proportion=0.001
+# How many threads can concurrently execute query statement. When <= 0, use CPU core number.
+# Datatype: int
+# query_thread_count=0
 
-# When an inserting is rejected, waiting period (in ms) to check system again, 50 by default.
-# If the insertion has been rejected and the read load is low, it can be set larger.
+# How many threads can concurrently read data for raw data query. When <= 0, use CPU core number.
 # Datatype: int
-# check_period_when_insert_blocked=50
+# sub_rawQuery_thread_count=8
 
-# size of ioTaskQueue. The default value is 10
+# Blocking queue size for read task in raw data query. Must >= 1.
 # Datatype: int
-# io_task_queue_size_for_flushing=10
+# raw_query_blocking_queue_capacity=5
 
-# If true, we will estimate each query's possible memory footprint before executing it and deny it if its estimated memory exceeds current free memory
-# Datatype: bool
-# enable_query_memory_estimation=true
+# whether take over the memory management by IoTDB rather than JVM when serializing memtable as bytes in memory
+# (i.e., whether use ChunkBufferPool), value true, false
+# Datatype: boolean
+# chunk_buffer_pool_enable=false
 
+# The amount of data iterate each time in server (the number of data strips, that is, the number of different timestamps.)
+# Datatype: int
+# batch_size=100000
 
-####################
-### Select-Into Configuration
-####################
+# In one insert (one device, one timestamp, multiple measurements),
+# if enable partial insert, one measurement failure will not impact other measurements
+# Datatype: boolean
+# enable_partial_insert=true
 
-# The maximum number of rows can be processed in insert-tablet-plan when executing select-into statements.
-# When <= 0, use 10000.
+# the interval to log recover progress of each vsg when starting iotdb
 # Datatype: int
-# select_into_insert_tablet_plan_row_limit=10000
+# recovery_log_interval_in_ms=5000
+
+# When the insert plan column count reaches the specified threshold, which means that the plan is relatively large. At this time, may be enabled multithreading.
+# If the tablet is small, the time of each insertion is short.
+# If we enable multithreading, we also need to consider the switching loss between threads,
+# so we need to judge the size of the tablet.
+# Datatype: int
+# insert_multi_tablet_enable_multithreading_column_threshold=10
+
+# When there exists old version(0.9.x/v1) data, how many thread will be set up to perform upgrade tasks, 1 by default.
+# Set to 1 when less than or equal to 0.
+# Datatype: int
+# upgrade_thread_count=1
 
 ####################
 ### Compaction Configurations
@@ -480,7 +613,6 @@
 # Datatype: int
 # sub_compaction_thread_count=4
 
-
 ####################
 ### Write Ahead Log Configuration
 ####################
@@ -555,221 +687,76 @@
 # Datatype: long
 # multi_leader_cache_window_time_in_ms=-1
 
-
 ####################
-### Timestamp Precision
+### TsFile Configurations
 ####################
 
-# Use this value to set timestamp precision as "ms", "us" or "ns".
-# Once the precision has been set, it can not be changed.
-# Datatype: String
-timestamp_precision=ms
-
-# Default TTL for storage groups that are not set TTL by statements, If not set (default),
-# the TTL will be unlimited.
-# Notice: if this property is changed, previous created storage group which are not set TTL will
-# also be affected. And negative values are accepted, which means you can only insert future
-# data.
-# Datatype: long
-# Unit: ms
-# default_ttl_in_ms=36000000
-
-####################
-### Tlog Size Configuration
-####################
-# max size for tag and attribute of one time series
-# the unit is byte
 # Datatype: int
-# tag_attribute_total_size=700
-
-####################
-### Out of Order Data Configuration
-####################
-
-# Add a switch to drop ouf-of-order data
-# Out-of-order data will impact the aggregation query a lot. Users may not care about discarding some out-of-order data.
-# Datatype: boolean
-# enable_discard_out_of_order_data=false
-
-####################
-### Client Configuration
-####################
+# group_size_in_byte=134217728
 
-# The maximum session idle time. unit: ms
-# Idle sessions are the ones that performs neither query or non-query operations for a period of time
-# Set to 0 to disable session timeout
+# The memory size for each series writer to pack page, default value is 64KB
 # Datatype: int
-# session_timeout_threshold=0
-
-####################
-### Insert Control
-####################
+# page_size_in_byte=65536
 
-# When the waiting time (in ms) of an inserting exceeds this, throw an exception. 10000 by default.
-# If the insertion has been rejected and the read load is low, it can be set larger
+# The maximum number of data points in a page, default 1024*1024
 # Datatype: int
-# max_waiting_time_when_insert_blocked=10000
-
-####################
-### Query Configurations
-####################
+# max_number_of_points_in_page=1048576
 
-# allowed max numbers of deduplicated path in one query
-# it's just an advised value, the real limitation will be the smaller one between this and the one we calculated
+# The threshold for pattern matching in regex
 # Datatype: int
-# max_deduplicated_path_num=1000
-
-# the default time period that used in fill query, -1 by default means infinite past time
-# Datatype: int, Unit: ms
-# default_fill_interval=-1
-
-# the max execution time of a DriverTask
-# Datatype: int, Unit: ms
-# driver_task_execution_time_slice_in_ms=100
-
-# the max capacity of a TsBlock
-# Datatype: int, Unit: byte
-# max_tsblock_size_in_bytes=1048576
+# pattern_matching_threshold=1000000
 
-# the max number of lines in a single TsBlock
+# Max size limitation of input string
 # Datatype: int
-# max_tsblock_line_numbers=1000
-
-# time cost(ms) threshold for slow query
-# Datatype: long
-# slow_query_threshold=5000
-
-####################
-### External sort Configuration
-####################
-# Is external sort enable
-# Datatype: boolean
-# enable_external_sort=true
+# max_string_length=128
 
-# The maximum number of simultaneous chunk reading for a single time series.
-# If the num of simultaneous chunk reading is greater than external_sort_threshold, external sorting is used.
-# When external_sort_threshold increases, the number of chunks sorted at the same time in memory may increase and this will occupy more memory.
-# When external_sort_threshold decreases, triggering external sorting will increase the time-consuming.
+# Floating-point precision
 # Datatype: int
-# external_sort_threshold=1000
-
-####################
-### PIPE Configuration
-####################
-
-# White IP list of Sync client.
-# Please use the form of network segment to present the range of IP, for example: 192.168.0.0/16
-# If there are more than one IP segment, please separate them by commas
-# The default is to allow all IP to sync
-# Datatype: String
-# ip_white_list=0.0.0.0/0
-
-
-# The maximum number of retry when syncing a file to receiver fails.
-# max_number_of_sync_file_retry=5
+# float_precision=2
 
-####################
-### Configurations for watermark module
-####################
-# Datatype: boolean
-# watermark_module_opened=false
-# Datatype: String
-# watermark_secret_key=IoTDB*2019@Beijing
-# Datatype: String
-# watermark_bit_string=100101110100
-# Datatype: String
-# watermark_method=GroupBasedLSBMethod(embed_row_cycle=2,embed_lsb_num=5)
+# Encoder configuration
+# Encoder of time series, supports TS_2DIFF, PLAIN and RLE(run-length encoding), REGULAR and default value is TS_2DIFF
+# time_encoder=TS_2DIFF
 
-####################
-### Configurations for creating schema automatically
-####################
+# Encoder of value series. default value is PLAIN.
+# For int, long data type, also supports TS_2DIFF and RLE(run-length encoding), GORILLA and ZIGZAG.
+# value_encoder=PLAIN
 
-# Whether creating schema automatically is enabled
-# If true, then create storage group and timeseries automatically when not exists in insertion
-# Or else, user need to create storage group and timeseries before insertion.
-# Datatype: boolean
-# enable_auto_create_schema=true
+# Compression configuration
+# Data compression method, supports UNCOMPRESSED, SNAPPY or LZ4. Default value is SNAPPY
+# compressor=SNAPPY
 
-# Storage group level when creating schema automatically is enabled
-# e.g. root.sg0.d1.s2
-#      we will set root.sg0 as the storage group if storage group level is 1
+# Maximum degree of a metadataIndex node, default value is 256
 # Datatype: int
-# default_storage_group_level=1
-
-# ALL data types: BOOLEAN, INT32, INT64, FLOAT, DOUBLE, TEXT
-
-# register time series as which type when receiving boolean string "true" or "false"
-# Datatype: TSDataType
-# boolean_string_infer_type=BOOLEAN
-
-# register time series as which type when receiving an integer string "67"
-# Datatype: TSDataType
-# integer_string_infer_type=FLOAT
-
-# register time series as which type when receiving an integer string and using float may lose precision
-# num > 2 ^ 24
-# Datatype: TSDataType
-# long_string_infer_type=DOUBLE
-
-# register time series as which type when receiving a floating number string "6.7"
-# Datatype: TSDataType
-# floating_string_infer_type=FLOAT
-
-# register time series as which type when receiving the Literal NaN. Values can be DOUBLE, FLOAT or TEXT
-# Datatype: TSDataType
-# nan_string_infer_type=DOUBLE
-
-# BOOLEAN encoding when creating schema automatically is enabled
-# Datatype: TSEncoding
-# default_boolean_encoding=RLE
-
-# INT32 encoding when creating schema automatically is enabled
-# Datatype: TSEncoding
-# default_int32_encoding=RLE
-
-# INT64 encoding when creating schema automatically is enabled
-# Datatype: TSEncoding
-# default_int64_encoding=RLE
+# max_degree_of_index_node=256
 
-# FLOAT encoding when creating schema automatically is enabled
-# Datatype: TSEncoding
-# default_float_encoding=GORILLA
+# time interval in minute for calculating query frequency
+# Datatype: int
+# frequency_interval_in_minute=1
 
-# DOUBLE encoding when creating schema automatically is enabled
-# Datatype: TSEncoding
-# default_double_encoding=GORILLA
+# Signal-noise-ratio (SNR) of FREQ encoding
+# Datatype: double
+# freq_snr=40.0
 
-# TEXT encoding when creating schema automatically is enabled
-# Datatype: TSEncoding
-# default_text_encoding=PLAIN
+# Block size of FREQ encoding
+# Datatype: integer
+# freq_block_size=1024
 
 ####################
-### MQTT Broker Configuration
+### Watermark Configuration
 ####################
 
-# whether to enable the mqtt service.
 # Datatype: boolean
-# enable_mqtt_service=false
+# watermark_module_opened=false
 
-# the mqtt service binding host.
 # Datatype: String
-# mqtt_host=0.0.0.0
-
-# the mqtt service binding port.
-# Datatype: int
-# mqtt_port=1883
-
-# the handler pool size for handing the mqtt messages.
-# Datatype: int
-# mqtt_handler_pool_size=1
+# watermark_secret_key=IoTDB*2019@Beijing
 
-# the mqtt message payload formatter.
 # Datatype: String
-# mqtt_payload_formatter=json
+# watermark_bit_string=100101110100
 
-# max length of mqtt message in byte
-# Datatype: int
-# mqtt_max_message_size=1048576
+# Datatype: String
+# watermark_method=GroupBasedLSBMethod(embed_row_cycle=2,embed_lsb_num=5)
 
 ####################
 ### Authorization Configuration
@@ -804,7 +791,6 @@ timestamp_precision=ms
 # Datatype: int
 # author_cache_expire_time=30
 
-
 ####################
 ### UDF Configuration
 ####################
@@ -862,8 +848,6 @@ timestamp_precision=ms
 # If its prefix is "/", then the path is absolute. Otherwise, it is relative.
 # udf_temporary_lib_dir=ext/udf_temporary
 
-
-
 ####################
 ### Trigger Configuration
 ####################
@@ -916,230 +900,172 @@ timestamp_precision=ms
 # Datatype: int
 # stateful_trigger_retry_num_when_not_found=3
 
-
-####################
-### Time Partition Configuration
-####################
-
-# whether enable data partition. If disabled, all data belongs to partition 0
-# Datatype: boolean
-# enable_partition=true
-
-# time range for partitioning data inside each data region, the unit is millisecond, default is equal to one week
-# Datatype: long
-# time_partition_interval_for_storage=604800000
-
-####################
-### Influx DB RPC Service Configuration
-####################
-# Datatype: boolean
-# enable_influxdb_rpc_service=false
-
-# Datatype: int
-# influxdb_rpc_port=8086
-
-####################
-### Group By Fill Configuration
-####################
-# Datatype: float
-# group_by_fill_cache_size_in_mb=1.0
-
-####################
-### Trigger Forward
-####################
 # Number of queues per forwarding trigger
-trigger_forward_max_queue_number=8
+# trigger_forward_max_queue_number=8
+
 # The length of one of the queues per forwarding trigger
-trigger_forward_max_size_per_queue=2000
+# trigger_forward_max_size_per_queue=2000
+
 # Trigger forwarding data size per batch
-trigger_forward_batch_size=50
+# trigger_forward_batch_size=50
+
 # Trigger HTTP forward pool size
-trigger_forward_http_pool_size=200
+# trigger_forward_http_pool_size=200
+
 # Trigger HTTP forward pool max connection for per route
-trigger_forward_http_pool_max_per_route=20
+# trigger_forward_http_pool_max_per_route=20
+
 # Trigger MQTT forward pool size
-trigger_forward_mqtt_pool_size=4
+# trigger_forward_mqtt_pool_size=4
 
 ####################
-### Cluster configuration
+### Select-Into Configuration
 ####################
 
+# The maximum number of rows can be processed in insert-tablet-plan when executing select-into statements.
+# When <= 0, use 10000.
+# Datatype: int
+# select_into_insert_tablet_plan_row_limit=10000
+
 ####################
-### Region configuration
+### Continuous Query Configuration
 ####################
 
-# ConfigNode consensus protocol type.
-# This parameter is unmodifiable after ConfigNode starts for the first time.
-# These consensus protocols are currently supported:
-# 1. org.apache.iotdb.consensus.ratis.RatisConsensus(Raft protocol)
-# Datatype: String
-# config_node_consensus_protocol_class=org.apache.iotdb.consensus.ratis.RatisConsensus
-
-# SchemaRegion consensus protocol type.
-# This parameter is unmodifiable after ConfigNode starts for the first time.
-# These consensus protocols are currently supported:
-# 1. org.apache.iotdb.consensus.simple.SimpleConsensus(Consensus patterns optimized specifically for single replica)
-# 2. org.apache.iotdb.consensus.ratis.RatisConsensus(Raft protocol)
-# Datatype: String
-# schema_region_consensus_protocol_class=org.apache.iotdb.consensus.simple.SimpleConsensus
-
-# The maximum number of SchemaRegion expected to be managed by each DataNode.
-# Notice: Since each StorageGroup requires at least one SchemaRegion to manage its schema,
-# this parameter doesn't limit the number of SchemaRegions when there are too many StorageGroups.
-# Datatype: Double
-# schema_region_per_data_node=1.0
-
-# DataRegion consensus protocol type.
-# This parameter is unmodifiable after ConfigNode starts for the first time.
-# These consensus protocols are currently supported:
-# 1. org.apache.iotdb.consensus.simple.SimpleConsensus(Consensus patterns optimized specifically for single replica)
-# 2. org.apache.iotdb.consensus.multileader.MultiLeaderConsensus(weak consistency, high performance)
-# 3. org.apache.iotdb.consensus.ratis.RatisConsensus(Raft protocol)
-# Datatype: String
-# data_region_consensus_protocol_class=org.apache.iotdb.consensus.simple.SimpleConsensus
-
-# The maximum number of DataRegion expected to be managed by each processor.
-# Notice: Since each StorageGroup requires at least two DataRegions to manage its data,
-# this parameter doesn't limit the number of DataRegions when there are too many StorageGroups.
-# Datatype: Double
-# data_region_per_processor=0.5
-
-# Region allocate strategy
-# These allocate strategies are currently supported:
-# 1. GREEDY(Default, when region is allocated, always choose the dataNode that has bean allocated the least regions)
-# 2. COPY_SET(Random replication according to wight calculated from number of regions on all online dataNodes, suitable for large clusters)
-# Datatype: String
-# region_allocate_strategy=GREEDY
-
-# All parameters in PartitionSlot configuration is unmodifiable after ConfigNode starts for the first time.
-# And these parameters should be consistent within the ConfigNodeGroup.
-# Number of SeriesPartitionSlots per StorageGroup
+# Maximum number of continuous query tasks that can be pending for execution. When <= 0, the value is
+# 64 by default.
 # Datatype: int
-# series_partition_slot_num=10000
+# max_pending_continuous_query_tasks=64
 
+# The size of log buffer for every CQ management operation plan. If the size of a CQ
+# management operation plan is larger than this parameter, the CQ management operation plan
+# will be rejected by CQManager.
+# Datatype: int
+# cqlog_buffer_size=1048576
 
-# SeriesPartitionSlot executor class
-# These hashing algorithms are currently supported:
-# 1. BKDRHashExecutor(Default)
-# 2. APHashExecutor
-# 3. JSHashExecutor
-# 4. SDBMHashExecutor
-# Also, if you want to implement your own SeriesPartition executor, you can inherit the SeriesPartitionExecutor class and
-# modify this parameter to correspond to your Java class
-# Datatype: String
-# series_partition_executor_class=org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor
+# The number of threads in the scheduled thread pool that submit continuous query tasks periodically
+# Datatype: int
+# continuous_query_submit_thread_count=2
 
-# The routing policy of read/write requests
-# These routing policy are currently supported:
-# 1. leader(Default, routing to leader replica)
-# 2. greedy(Routing to replica with the lowest load, might cause read un-consistent)
-# Datatype: string
-# routing_policy=leader
+# The minimum value of the continuous query execution time interval
+# Datatype: long(duration)
+# continuous_query_min_every_interval_in_ms=1000
 
 ####################
-### StorageGroup initial configuration
+### PIPE Configuration
 ####################
 
-# All parameters in StorageGroup configuration is unmodifiable after ConfigNode starts for the first time.
-# And these parameters should be consistent within the ConfigNodeGroup.
+# White IP list of Sync client.
+# Please use the form of network segment to present the range of IP, for example: 192.168.0.0/16
+# If there are more than one IP segment, please separate them by commas
+# The default is to allow all IP to sync
+# Datatype: String
+# ip_white_list=0.0.0.0/0
 
+# The maximum number of retry when syncing a file to receiver fails.
+# max_number_of_sync_file_retry=5
 
-# Default TTL for storage groups that are not set TTL by statements, in ms. If not set (default),
-# the TTL will be unlimited.
-# Notice: if this property is changed, previous created storage group which are not set TTL will
-# also be affected. And negative values are accepted, which means you can only insert future data.
-# Datatype: long
-# default_ttl=36000000
+####################
+### Connection Configuration
+####################
 
+# The maximum session idle time. unit: ms
+# Idle sessions are the ones that performs neither query or non-query operations for a period of time
+# Set to 0 to disable session timeout
+# Datatype: int
+# session_timeout_threshold=0
 
-# Time partition interval in milliseconds, default is equal to one week
-# Datatype: long
-# time_partition_interval_for_routing=604800000
+# Datatype: boolean
+# rpc_thrift_compression_enable=false
 
+# if true, a snappy based compression method will be called before sending data by the network
+# Datatype: boolean
+# this feature is under development, set this as false before it is done.
+# rpc_advanced_compression_enable=false
 
-# Default number of SchemaRegion replicas
 # Datatype: int
-# schema_replication_factor=1
-
+# rpc_selector_thread_count=1
 
-# Default number of DataRegion replicas
 # Datatype: int
-# data_replication_factor=1
-
-####################
-### Read configuration
-####################
-
+# rpc_min_concurrent_client_num=1
 
-# The read consistency level
-# These consistency levels are currently supported:
-# 1. strong(Default, read from the leader replica)
-# 2. weak(Read from a random replica)
-# Datatype: string
-# read_consistency_level=strong
+# Datatype: int
+# rpc_max_concurrent_client_num=65535
 
-####################
-### Heartbeat configuration
-####################
+# thrift max frame size, 512MB by default
+# Datatype: int
+# thrift_max_frame_size=536870912
 
+# thrift init buffer size
+# Datatype: int
+# thrift_init_buffer_size=1024
 
-# The heartbeat interval in milliseconds, default is 1000ms
-# Datatype: long
-# heartbeat_interval_in_ms=1000
+# Thrift socket and connection timeout between raft nodes, in milliseconds.
+# Datatype: int
+# connection_timeout_ms=20000
 
+# The maximum number of clients that can be idle for a node's InternalService.
+# When the number of idle clients on a node exceeds this number, newly returned clients will be released
+# Datatype: int
+# core_connection_for_internal_service=100
 
+# The maximum number of clients that can be applied for a node's InternalService
+# Datatype: int
+# max_connection_for_internal_service=100
 
+# selector thread (TAsyncClientManager) nums for async thread in a clientManager
+# Datatype: int
+# selector_thread_count_of_client_manager=1
 
 ####################
 ### RatisConsensus Configuration
 ####################
 
 # max payload size for a single log-sync-RPC from leader to follower
-# partition_region_ratis_log_appender_buffer_size_max=4194304
+# config_node_ratis_log_appender_buffer_size_max=4194304
 # schema_region_ratis_log_appender_buffer_size_max=4194304
 # data_region_ratis_log_appender_buffer_size_max=4194304
 
 # trigger a snapshot when snapshot_trigger_threshold logs are written
-# partition_region_ratis_snapshot_trigger_threshold=400000
+# config_node_ratis_snapshot_trigger_threshold=400000
 # schema_region_ratis_snapshot_trigger_threshold=400000
 # data_region_ratis_snapshot_trigger_threshold=400000
-# partition_region_one_copy_snapshot_trigger_threshold=400000
+# config_node_simple_consensus_snapshot_trigger_threshold=400000
 
 # allow flushing Raft Log asynchronously
-# partition_region_ratis_log_unsafe_flush_enable=false
+# config_node_ratis_log_unsafe_flush_enable=false
 # schema_region_ratis_log_unsafe_flush_enable=false
 # data_region_ratis_log_unsafe_flush_enable=false
 
 # max capacity of a single Log segment file (in byte, by default 24MB)
-# partition_region_ratis_log_segment_size_max_in_byte=25165824
+# config_node_ratis_log_segment_size_max_in_byte=25165824
 # schema_region_ratis_log_segment_size_max_in_byte=25165824
 # data_region_ratis_log_segment_size_max_in_byte=25165824
-# partition_region_one_copy_log_segment_size_max_in_byte=25165824
+# config_node_simple_consensus_log_segment_size_max_in_byte=25165824
 
 # flow control window for ratis grpc log appender
-# partition_region_ratis_grpc_flow_control_window=4194304
+# config_node_ratis_grpc_flow_control_window=4194304
 # schema_region_ratis_grpc_flow_control_window=4194304
 # data_region_ratis_grpc_flow_control_window=4194304
 
 # min election timeout for leader election
-# partition_region_ratis_rpc_leader_election_timeout_min_ms=2000
+# config_node_ratis_rpc_leader_election_timeout_min_ms=2000
 # schema_region_ratis_rpc_leader_election_timeout_min_ms=2000
 # data_region_ratis_rpc_leader_election_timeout_min_ms=2000
 
 # max election timeout for leader election
-# partition_region_ratis_rpc_leader_election_timeout_max_ms=4000
+# config_node_ratis_rpc_leader_election_timeout_max_ms=4000
 # schema_region_ratis_rpc_leader_election_timeout_max_ms=4000
 # data_region_ratis_rpc_leader_election_timeout_max_ms=4000
 
 # ratis client retry threshold
-# partition_region_ratis_request_timeout_ms=10000
+# config_node_ratis_request_timeout_ms=10000
 # schema_region_ratis_request_timeout_ms=10000
 # data_region_ratis_request_timeout_ms=10000
 
 # currently we use exponential back-off retry policy for ratis
-# partition_region_ratis_max_retry_attempts=10
-# partition_region_ratis_initial_sleep_time_ms=100
-# partition_region_ratis_max_sleep_time_ms=10000
+# config_node_ratis_max_retry_attempts=10
+# config_node_ratis_initial_sleep_time_ms=100
+# config_node_ratis_max_sleep_time_ms=10000
 # schema_region_ratis_max_retry_attempts=10
 # schema_region_ratis_initial_sleep_time_ms=100
 # schema_region_ratis_max_sleep_time_ms=10000
@@ -1148,7 +1074,7 @@ trigger_forward_mqtt_pool_size=4
 # data_region_ratis_max_sleep_time_ms=10000
 
 # preserve certain logs when take snapshot and purge
-# partition_region_ratis_preserve_logs_num_when_purge=1000
+# config_node_ratis_preserve_logs_num_when_purge=1000
 # schema_region_ratis_preserve_logs_num_when_purge=1000
 # data_region_ratis_preserve_logs_num_when_purge=1000
 
@@ -1157,67 +1083,55 @@ trigger_forward_mqtt_pool_size=4
 # ratis_first_election_timeout_max_ms=150
 
 ####################
-### Disk Monitor
+### Procedure Configuration
 ####################
 
+# Default number of worker thread count
+# Datatype: int
+# procedure_core_worker_thread_count=4
 
-# Disk remaining threshold at which DataNode is set to ReadOnly status
-# Datatype: double(percentage)
-# disk_space_warning_threshold=5.0
+# Default time interval of completed procedure cleaner work in, time unit is second
+# Datatype: int
+# procedure_completed_clean_interval=30
+
+# Default ttl of completed procedure, time unit is second
+# Datatype: int
+# procedure_completed_evict_ttl=800
 
 ####################
-### Configurations for tsfile-format
+### MQTT Broker Configuration
 ####################
 
-# Datatype: int
-# group_size_in_byte=134217728
+# whether to enable the mqtt service.
+# Datatype: boolean
+# enable_mqtt_service=false
 
-# The memory size for each series writer to pack page, default value is 64KB
-# Datatype: int
-# page_size_in_byte=65536
+# the mqtt service binding host.
+# Datatype: String
+# mqtt_host=0.0.0.0
 
-# The maximum number of data points in a page, default 1024*1024
+# the mqtt service binding port.
 # Datatype: int
-# max_number_of_points_in_page=1048576
+# mqtt_port=1883
 
-# The threshold for pattern matching in regex
+# the handler pool size for handing the mqtt messages.
 # Datatype: int
-# pattern_matching_threshold=1000000
+# mqtt_handler_pool_size=1
 
-# Max size limitation of input string
-# Datatype: int
-# max_string_length=128
+# the mqtt message payload formatter.
+# Datatype: String
+# mqtt_payload_formatter=json
 
-# Floating-point precision
+# max length of mqtt message in byte
 # Datatype: int
-# float_precision=2
-
-# Encoder configuration
-# Encoder of time series, supports TS_2DIFF, PLAIN and RLE(run-length encoding), REGULAR and default value is TS_2DIFF
-# time_encoder=TS_2DIFF
-
-# Encoder of value series. default value is PLAIN.
-# For int, long data type, also supports TS_2DIFF and RLE(run-length encoding), GORILLA and ZIGZAG.
-# value_encoder=PLAIN
+# mqtt_max_message_size=1048576
 
-# Compression configuration
-# Data compression method, supports UNCOMPRESSED, SNAPPY or LZ4. Default value is SNAPPY
-# compressor=SNAPPY
+####################
+### InfluxDB RPC Service Configuration
+####################
 
-# Maximum degree of a metadataIndex node, default value is 256
-# Datatype: int
-# max_degree_of_index_node=256
+# Datatype: boolean
+# enable_influxdb_rpc_service=false
 
-# time interval in minute for calculating query frequency
 # Datatype: int
-# frequency_interval_in_minute=1
-
-# Signal-noise-ratio (SNR) of FREQ encoding
-# Datatype: double
-# freq_snr=40.0
-
-# Block size of FREQ encoding
-# Datatype: integer
-# freq_block_size=1024
-
-
+# influxdb_rpc_port=8086
\ No newline at end of file
diff --git a/node-commons/src/main/java/org/apache/iotdb/commons/conf/CommonConfig.java b/node-commons/src/main/java/org/apache/iotdb/commons/conf/CommonConfig.java
index 21a935d5db..f3f1062bb8 100644
--- a/node-commons/src/main/java/org/apache/iotdb/commons/conf/CommonConfig.java
+++ b/node-commons/src/main/java/org/apache/iotdb/commons/conf/CommonConfig.java
@@ -117,7 +117,7 @@ public class CommonConfig {
   private volatile String statusReason = null;
 
   /** Disk Monitor */
-  private double diskSpaceWarningThreshold = 5.0;
+  private double diskSpaceWarningThreshold = 0.05;
 
   CommonConfig() {}
 
diff --git a/node-commons/src/main/java/org/apache/iotdb/commons/conf/IoTDBConstant.java b/node-commons/src/main/java/org/apache/iotdb/commons/conf/IoTDBConstant.java
index d5a26f4982..32adf711ca 100644
--- a/node-commons/src/main/java/org/apache/iotdb/commons/conf/IoTDBConstant.java
+++ b/node-commons/src/main/java/org/apache/iotdb/commons/conf/IoTDBConstant.java
@@ -60,8 +60,8 @@ public class IoTDBConstant {
   public static final String DN_INTERNAL_PORT = "dn_internal_port";
   public static final String CN_CONSENSUS_PORT = "cn_consensus_port";
 
-  public static final String CN_TARGET_CONFIG_NODES = "cn_target_config_nodes";
-  public static final String DN_TARGET_CONFIG_NODES = "dn_target_config_nodes";
+  public static final String CN_TARGET_CONFIG_NODE_LIST = "cn_target_config_node_list";
+  public static final String DN_TARGET_CONFIG_NODE_LIST = "dn_target_config_node_list";
 
   // when running the program in IDE, we can not get the version info using
   // getImplementationVersion()
diff --git a/server/src/assembly/resources/conf/iotdb-datanode.properties b/server/src/assembly/resources/conf/iotdb-datanode.properties
index b90c723699..0fab8965a6 100644
--- a/server/src/assembly/resources/conf/iotdb-datanode.properties
+++ b/server/src/assembly/resources/conf/iotdb-datanode.properties
@@ -21,51 +21,54 @@
 ### Data Node RPC Configuration
 ####################
 
-# could set 0.0.0.0, 127.0.0.1(for local test) or ipv4 address
-# if enable redirection in session, rpc_address should be the ip which session can connect.
+# Used for connection of IoTDB native clients(Session)
+# Could set 127.0.0.1(for local test) or ipv4 address
 # Datatype: String
-dn_rpc_address=0.0.0.0
+dn_rpc_address=127.0.0.1
 
+# Used for connection of IoTDB native clients(Session)
+# Bind with dn_rpc_address
 # Datatype: int
 dn_rpc_port=6667
 
-# Datatype: int
-dn_mpp_data_exchange_port=8777
-
+# Used for communication inside cluster.
+# could set 127.0.0.1(for local test) or ipv4 address.
 # Datatype: String
-# used for communication between cluster nodes.
-# could set 0.0.0.0, 127.0.0.1(for local test) or ipv4 address.
-dn_internal_address=0.0.0.0
+dn_internal_address=127.0.0.1
 
+# Used for communication inside cluster.
+# Bind with dn_internal_address
 # Datatype: int
-# port for coordinator's communication between cluster nodes.
 dn_internal_port=9003
 
+# Port for data exchange among DataNodes inside cluster
+# Bind with dn_internal_address
 # Datatype: int
-# port for consensus's communication for data region between cluster nodes.
-dn_data_region_consensus_port=40010
+dn_mpp_data_exchange_port=8777
 
+# port for consensus's communication for schema region inside cluster.
+# Bind with dn_internal_address
 # Datatype: int
-# port for consensus's communication for schema region between cluster nodes.
 dn_schema_region_consensus_port=50010
 
+# port for consensus's communication for data region inside cluster.
+# Bind with dn_internal_address
+# Datatype: int
+dn_data_region_consensus_port=40010
+
 # Datatype: long
 # The time of data node waiting for the next retry to join into the cluster.
-dn_join_cluster_retry_interval_ms=5000
+# dn_join_cluster_retry_interval_ms=5000
 
 ####################
 ### Target Config Nodes
 ####################
 
-# At least one running ConfigNode should be set for joining the cluster
-# Format: ip:port
-# where the ip should be consistent with the target ConfigNode's confignode_internal_address,
-# and the port should be consistent with the target ConfigNode's confignode_internal_port.
-# When successfully connecting to the ConfigNodeGroup, DataNode will get all online
-# config nodes and store them in memory.
+# For the first ConfigNode to start, cn_target_config_node_list points to its own cn_internal_address:cn_internal_port.
+# For other ConfigNodes that to join the cluster, target_config_node_list points to any running ConfigNode's cn_internal_address:cn_internal_port.
+# Format: address:port(,address:port)*   e.g. 127.0.0.1:22277,127.0.0.1:22279
 # Datatype: String
-# Notice: The ip for any target_config_node should never be 0.0.0.0
-dn_target_config_nodes=127.0.0.1:22277
+dn_target_config_node_list=127.0.0.1:22277
 
 
 ####################
diff --git a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
index 725e6761a8..94a6703709 100644
--- a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
+++ b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
@@ -88,7 +88,7 @@ public class IoTDBConfig {
   private boolean enableMQTTService = false;
 
   /** the mqtt service binding host. */
-  private String mqttHost = "0.0.0.0";
+  private String mqttHost = "127.0.0.1";
 
   /** the mqtt service binding port. */
   private int mqttPort = 1883;
@@ -103,7 +103,7 @@ public class IoTDBConfig {
   private int mqttMaxMessageSize = 1048576;
 
   /** Rpc binding address. */
-  private String rpcAddress = "0.0.0.0";
+  private String rpcAddress = "127.0.0.1";
 
   /** whether to use thrift compression. */
   private boolean rpcThriftCompressionEnable = false;
@@ -865,7 +865,7 @@ public class IoTDBConfig {
   private int schemaFileLogSize = 16384;
 
   /** Internal address for data node */
-  private String internalAddress = "0.0.0.0";
+  private String internalAddress = "127.0.0.1";
 
   /** Internal port for coordinator */
   private int internalPort = 9003;
diff --git a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
index 4ec662803d..814bf550e2 100644
--- a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
+++ b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
@@ -1837,7 +1837,7 @@ public class IoTDBDescriptor {
   }
 
   public void loadClusterProps(Properties properties) {
-    String configNodeUrls = properties.getProperty(IoTDBConstant.DN_TARGET_CONFIG_NODES);
+    String configNodeUrls = properties.getProperty(IoTDBConstant.DN_TARGET_CONFIG_NODE_LIST);
     if (configNodeUrls != null) {
       try {
         conf.setTargetConfigNodeList(NodeUrlUtils.parseTEndPointUrls(configNodeUrls));
diff --git a/server/src/main/java/org/apache/iotdb/db/service/DataNode.java b/server/src/main/java/org/apache/iotdb/db/service/DataNode.java
index 774f84074c..56981ac5ec 100644
--- a/server/src/main/java/org/apache/iotdb/db/service/DataNode.java
+++ b/server/src/main/java/org/apache/iotdb/db/service/DataNode.java
@@ -135,7 +135,7 @@ public class DataNode implements DataNodeMBean {
     for (TEndPoint endPoint : config.getTargetConfigNodeList()) {
       if (endPoint.getIp().equals("0.0.0.0")) {
         throw new ConfigurationException(
-            "The ip address of any target_config_nodes couldn't be 0.0.0.0");
+            "The ip address of any target_config_node_list couldn't be 0.0.0.0");
       }
     }
 
diff --git a/server/src/main/java/org/apache/iotdb/db/service/thrift/impl/DataNodeInternalRPCServiceImpl.java b/server/src/main/java/org/apache/iotdb/db/service/thrift/impl/DataNodeInternalRPCServiceImpl.java
index 8e49715cbc..4f5269e211 100644
--- a/server/src/main/java/org/apache/iotdb/db/service/thrift/impl/DataNodeInternalRPCServiceImpl.java
+++ b/server/src/main/java/org/apache/iotdb/db/service/thrift/impl/DataNodeInternalRPCServiceImpl.java
@@ -1037,8 +1037,8 @@ public class DataNodeInternalRPCServiceImpl implements IDataNodeRPCService.Iface
             .value();
 
     if (freeDisk != 0 && totalDisk != 0) {
-      double freeDiskRatio = (double) freeDisk * 100 / totalDisk;
-      loadSample.setDiskUsageRate(100.0 - freeDiskRatio);
+      double freeDiskRatio = (double) freeDisk / totalDisk;
+      loadSample.setDiskUsageRate(1.0 - freeDiskRatio);
       // Reset NodeStatus if necessary
       if (freeDiskRatio < commonConfig.getDiskSpaceWarningThreshold()) {
         commonConfig.setNodeStatus(NodeStatus.ReadOnly);
diff --git a/server/src/test/resources/datanode1conf/iotdb-datanode.properties b/server/src/test/resources/datanode1conf/iotdb-datanode.properties
index dc82c3b8be..e1d46f1e5a 100644
--- a/server/src/test/resources/datanode1conf/iotdb-datanode.properties
+++ b/server/src/test/resources/datanode1conf/iotdb-datanode.properties
@@ -26,7 +26,7 @@ dn_internal_port=9003
 dn_data_region_consensus_port=40010
 dn_schema_region_consensus_port=50030
 
-dn_target_config_nodes=127.0.0.1:22277,127.0.0.1:22279,127.0.0.1:22281
+dn_target_config_node_list=127.0.0.1:22277,127.0.0.1:22279,127.0.0.1:22281
 
 dn_system_dir=target/datanode1/system
 dn_data_dirs=target/datanode1/data
diff --git a/server/src/test/resources/datanode2conf/iotdb-datanode.properties b/server/src/test/resources/datanode2conf/iotdb-datanode.properties
index a8fdd288e7..151dfd93da 100644
--- a/server/src/test/resources/datanode2conf/iotdb-datanode.properties
+++ b/server/src/test/resources/datanode2conf/iotdb-datanode.properties
@@ -26,7 +26,7 @@ dn_internal_port=9005
 dn_data_region_consensus_port=40012
 dn_schema_region_consensus_port=50032
 
-dn_target_config_nodes=127.0.0.1:22277,127.0.0.1:22279,127.0.0.1:22281
+dn_target_config_node_list=127.0.0.1:22277,127.0.0.1:22279,127.0.0.1:22281
 
 dn_system_dir=target/datanode2/system
 dn_data_dirs=target/datanode2/data
diff --git a/server/src/test/resources/datanode3conf/iotdb-datanode.properties b/server/src/test/resources/datanode3conf/iotdb-datanode.properties
index 8755ca81c6..d1eb32f9ff 100644
--- a/server/src/test/resources/datanode3conf/iotdb-datanode.properties
+++ b/server/src/test/resources/datanode3conf/iotdb-datanode.properties
@@ -26,7 +26,7 @@ dn_internal_port=9007
 dn_data_region_consensus_port=40014
 dn_schema_region_consensus_port=50034
 
-dn_target_config_nodes=127.0.0.1:22277,127.0.0.1:22279,127.0.0.1:22281
+dn_target_config_node_list=127.0.0.1:22277,127.0.0.1:22279,127.0.0.1:22281
 
 dn_system_dir=target/datanode3/system
 dn_data_dirs=target/datanode3/data