You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by ha...@apache.org on 2022/10/28 11:27:02 UTC

[iotdb] 01/01: Refactor the configuration file

This is an automated email from the ASF dual-hosted git repository.

haonan pushed a commit to branch config
in repository https://gitbox.apache.org/repos/asf/iotdb.git

commit 0e0ac5591dd5ef325799791016f76e4f3489c06f
Author: HTHou <hh...@outlook.com>
AuthorDate: Fri Oct 28 19:23:47 2022 +0800

    Refactor the configuration file
---
 confignode/src/assembly/confignode.xml             |    4 +
 .../resources/conf/iotdb-confignode.properties     |  301 +-----
 .../confignode/conf/ConfigNodeDescriptor.java      |  324 +++---
 distribution/src/assembly/all.xml                  |    4 +
 distribution/src/assembly/confignode.xml           |    4 +
 distribution/src/assembly/datanode.xml             |    4 +
 .../server/CustomizedJsonPayloadFormatter.java     |    2 +-
 .../apache/iotdb/integration/env/ClusterNode.java  |    2 +-
 .../resources/conf/iotdb-common.properties         |  996 +++++++++---------
 .../apache/iotdb/commons/conf/CommonConfig.java    |    2 +
 .../resources/conf/schema-rocksdb.properties       |    8 +-
 .../assembly/resources/conf/schema-tag.properties  |    2 +-
 .../resources/conf/iotdb-datanode.properties       | 1066 +-------------------
 server/src/assembly/server.xml                     |    4 +
 .../java/org/apache/iotdb/db/conf/IoTDBConfig.java |    4 +-
 .../org/apache/iotdb/db/conf/IoTDBDescriptor.java  |   48 +-
 .../exception/query/PathNumOverLimitException.java |    2 +-
 .../exception/sql/PathNumOverLimitException.java   |    2 +-
 .../apache/iotdb/db/metadata/tag/TagLogFile.java   |    2 +-
 .../apache/iotdb/db/conf/IoTDBDescriptorTest.java  |    6 +-
 20 files changed, 809 insertions(+), 1978 deletions(-)

diff --git a/confignode/src/assembly/confignode.xml b/confignode/src/assembly/confignode.xml
index 4531f5b04d..73d52a2be0 100644
--- a/confignode/src/assembly/confignode.xml
+++ b/confignode/src/assembly/confignode.xml
@@ -47,5 +47,9 @@
             <source>${maven.multiModuleProjectDirectory}/metrics/interface/src/main/assembly/resources/conf/iotdb-confignode-metric.yml</source>
             <destName>conf/iotdb-confignode-metric.yml</destName>
         </file>
+        <file>
+            <source>${maven.multiModuleProjectDirectory}/node-commons/src/assembly/resources/conf/iotdb-common.properties</source>
+            <destName>conf/iotdb-common.properties</destName>
+        </file>
     </files>
 </assembly>
diff --git a/confignode/src/assembly/resources/conf/iotdb-confignode.properties b/confignode/src/assembly/resources/conf/iotdb-confignode.properties
index a6a4f10ccf..9d7bc5d51c 100644
--- a/confignode/src/assembly/resources/conf/iotdb-confignode.properties
+++ b/confignode/src/assembly/resources/conf/iotdb-confignode.properties
@@ -18,7 +18,7 @@
 #
 
 ####################
-### Startup configuration
+### Config Node RPC Configuration
 ####################
 
 
@@ -37,6 +37,9 @@ internal_port=22277
 # Datatype: int
 consensus_port=22278
 
+####################
+### Target Config Nodes
+####################
 
 # At least one running ConfigNode should be set for joining the cluster
 # Format: ip:port
@@ -49,143 +52,6 @@ consensus_port=22278
 target_config_nodes=127.0.0.1:22277
 
 
-####################
-### Region configuration
-####################
-
-
-# SchemaRegion consensus protocol type.
-# This parameter is unmodifiable after ConfigNode starts for the first time.
-# These consensus protocols are currently supported:
-# 1. org.apache.iotdb.consensus.standalone.StandAloneConsensus(Consensus patterns optimized specifically for single replica)
-# 2. org.apache.iotdb.consensus.ratis.RatisConsensus(Raft protocol)
-# Datatype: String
-# schema_region_consensus_protocol_class=org.apache.iotdb.consensus.standalone.StandAloneConsensus
-
-# The maximum number of SchemaRegion expected to be managed by each DataNode.
-# Notice: Since each StorageGroup requires at least one SchemaRegion to manage its schema,
-# this parameter doesn't limit the number of SchemaRegions when there are too many StorageGroups.
-# Datatype: Double
-# schema_region_per_data_node=1.0
-
-# DataRegion consensus protocol type.
-# This parameter is unmodifiable after ConfigNode starts for the first time.
-# These consensus protocols are currently supported:
-# 1. org.apache.iotdb.consensus.standalone.StandAloneConsensus(Consensus patterns optimized specifically for single replica)
-# 2. org.apache.iotdb.consensus.multileader.MultiLeaderConsensus(weak consistency, high performance)
-# 3. org.apache.iotdb.consensus.ratis.RatisConsensus(Raft protocol)
-# Datatype: String
-# data_region_consensus_protocol_class=org.apache.iotdb.consensus.standalone.StandAloneConsensus
-
-# The maximum number of DataRegion expected to be managed by each processor.
-# Notice: Since each StorageGroup requires at least two DataRegions to manage its data,
-# this parameter doesn't limit the number of DataRegions when there are too many StorageGroups.
-# Datatype: Double
-# data_region_per_processor=0.5
-
-# Region allocate strategy
-# These allocate strategies are currently supported:
-# 1. GREEDY(Default, when region is allocated, always choose the dataNode that has bean allocated the least regions)
-# 2. COPY_SET(Random replication according to wight calculated from number of regions on all online dataNodes, suitable for large clusters)
-# Datatype: String
-# region_allocate_strategy=GREEDY
-
-####################
-### PartitionSlot configuration
-####################
-
-# All parameters in PartitionSlot configuration is unmodifiable after ConfigNode starts for the first time.
-# And these parameters should be consistent within the ConfigNodeGroup.
-
-
-# Number of SeriesPartitionSlots per StorageGroup
-# Datatype: int
-# series_partition_slot_num=10000
-
-
-# SeriesPartitionSlot executor class
-# These hashing algorithms are currently supported:
-# 1. BKDRHashExecutor(Default)
-# 2. APHashExecutor
-# 3. JSHashExecutor
-# 4. SDBMHashExecutor
-# Also, if you want to implement your own SeriesPartition executor, you can inherit the SeriesPartitionExecutor class and
-# modify this parameter to correspond to your Java class
-# Datatype: String
-# series_partition_executor_class=org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor
-
-
-####################
-### StorageGroup configuration
-####################
-
-# All parameters in StorageGroup configuration is unmodifiable after ConfigNode starts for the first time.
-# And these parameters should be consistent within the ConfigNodeGroup.
-
-
-# Default TTL for storage groups that are not set TTL by statements, in ms. If not set (default),
-# the TTL will be unlimited.
-# Notice: if this property is changed, previous created storage group which are not set TTL will
-# also be affected. And negative values are accepted, which means you can only insert future data.
-# Datatype: long
-# default_ttl=36000000
-
-
-# Time partition interval in milliseconds, default is equal to one week
-# Datatype: long
-# time_partition_interval_for_routing=604800000
-
-
-# Default number of SchemaRegion replicas
-# Datatype: int
-# schema_replication_factor=1
-
-
-# Default number of DataRegion replicas
-# Datatype: int
-# data_replication_factor=1
-
-
-####################
-### thrift rpc configuration
-####################
-
-
-# this feature is under development, set this as false before it is done.
-# Datatype: boolean
-# rpc_thrift_compression_enable=false
-
-
-# if true, a snappy based compression method will be called before sending data by the network
-# Datatype: boolean
-# this feature is under development, set this as false before it is done.
-# rpc_advanced_compression_enable=false
-
-
-# Datatype: int
-# rpc_max_concurrent_client_num=65535
-
-
-# thrift max frame size, 512MB by default
-# Datatype: int
-# thrift_max_frame_size=536870912
-
-
-# thrift init buffer size
-# Datatype: int
-# thrift_init_buffer_size=1024
-
-
-# Thrift socket and connection timeout between raft nodes, in milliseconds.
-# Datatype: int
-# connection_timeout_ms=20000
-
-
-# selector thread (TAsyncClientManager) nums for async thread in a clientManager
-# Datatype: int
-# selector_thread_nums_of_client_manager=1
-
-
 ####################
 ### Directory configuration
 ####################
@@ -234,7 +100,6 @@ target_config_nodes=127.0.0.1:22277
 # If its prefix is "/", then the path is absolute. Otherwise, it is relative.
 # udf_lib_dir=ext/udf
 
-
 # temporary lib dir
 # If this property is unset, system will save the data in the default relative path directory under
 # the UDF folder(i.e., %CONFIGNODE_HOME%/ext/temporary).
@@ -255,158 +120,44 @@ target_config_nodes=127.0.0.1:22277
 # udf_temporary_lib_dir=ext/udf_temporary
 
 
-####################
-### Procedure Configuration
-####################
-
-
-# Default number of worker thread count
-# Datatype: int
-#procedure_core_worker_thread_size=4
-
-
-# Default time interval of completed procedure cleaner work in, time unit is second
-# Datatype: int
-#procedure_completed_clean_interval=30
-
-
-# Default ttl of completed procedure, time unit is second
-# Datatype: int
-#procedure_completed_evict_ttl=800
-
-####################
-### Heartbeat configuration
-####################
-
-
-# The heartbeat interval in milliseconds, default is 1000ms
-# Datatype: long
-# heartbeat_interval=1000
-
 
 ####################
-### Routing policy
-####################
-
-
-# The routing policy of read/write requests
-# These routing policy are currently supported:
-# 1. leader(Default, routing to leader replica)
-# 2. greedy(Routing to replica with the lowest load, might cause read un-consistent)
-# Datatype: string
-# routing_policy=leader
-
-
-####################
-### Read configuration
+### thrift rpc configuration
 ####################
 
 
-# The read consistency level
-# These consistency levels are currently supported:
-# 1. strong(Default, read from the leader replica)
-# 2. weak(Read from a random replica)
-# Datatype: string
-# read_consistency_level=strong
-
-
-####################
-### Authorization Configuration
-####################
+# this feature is under development, set this as false before it is done.
+# Datatype: boolean
+# rpc_thrift_compression_enable=false
 
 
-# which class to serve for authorization. By default, it is LocalFileAuthorizer.
-# Another choice is org.apache.iotdb.db.auth.authorizer.OpenIdAuthorizer
-# authorizer_provider_class=org.apache.iotdb.commons.auth.authorizer.LocalFileAuthorizer
+# if true, a snappy based compression method will be called before sending data by the network
+# Datatype: boolean
+# this feature is under development, set this as false before it is done.
+# rpc_advanced_compression_enable=false
 
-# If OpenIdAuthorizer is enabled, then openID_url must be set.
-# openID_url=
 
-# admin username, default is root
-# Datatype: string
-# admin_name=root
+# Datatype: int
+# rpc_max_concurrent_client_num=65535
 
-# encryption provider class
-# iotdb_server_encrypt_decrypt_provider=org.apache.iotdb.commons.security.encrypt.MessageDigestEncrypt
 
-# encryption provided class parameter
-# iotdb_server_encrypt_decrypt_provider_parameter=
+# thrift max frame size, 512MB by default
+# Datatype: int
+# thrift_max_frame_size=536870912
 
-# admin password, default is root
-# Datatype: string
-# admin_password=root
 
+# thrift init buffer size
+# Datatype: int
+# thrift_init_buffer_size=1024
 
-####################
-### RatisConsensus Configuration
-####################
 
-# max payload size for a single log-sync-RPC from leader to follower
-# partition_region_ratis_log_appender_buffer_size_max = 4194304
-# schema_region_ratis_log_appender_buffer_size_max = 4194304
-# data_region_ratis_log_appender_buffer_size_max = 4194304
-
-# trigger a snapshot when ratis_snapshot_trigger_threshold logs are written
-# partition_region_ratis_snapshot_trigger_threshold = 400000
-# schema_region_ratis_snapshot_trigger_threshold = 400000
-# data_region_ratis_snapshot_trigger_threshold = 400000
-
-# allow flushing Raft Log asynchronously
-# partition_region_ratis_log_unsafe_flush_enable = false
-# schema_region_ratis_log_unsafe_flush_enable = false
-# data_region_ratis_log_unsafe_flush_enable = false
-
-# max capacity of a single Raft Log segment (by default 24MB)
-# partition_region_ratis_log_segment_size_max = 25165824
-# schema_region_ratis_log_segment_size_max = 25165824
-# data_region_ratis_log_segment_size_max = 25165824
-
-# flow control window for ratis grpc log appender
-# partition_region_ratis_grpc_flow_control_window = 4194304
-# schema_region_ratis_grpc_flow_control_window = 4194304
-# data_region_ratis_grpc_flow_control_window = 4194304
-
-# min election timeout for leader election
-# partition_region_ratis_rpc_leader_election_timeout_min_ms = 2000
-# schema_region_ratis_rpc_leader_election_timeout_min_ms = 2000
-# data_region_ratis_rpc_leader_election_timeout_min_ms = 2000
-
-# max election timeout for leader election
-# partition_region_ratis_rpc_leader_election_timeout_max_ms = 4000
-# schema_region_ratis_rpc_leader_election_timeout_max_ms = 4000
-# data_region_ratis_rpc_leader_election_timeout_max_ms = 4000
-
-# ratis client retry threshold
-# partition_region_ratis_request_timeout_ms = 10000
-# schema_region_ratis_request_timeout_ms = 10000
-# data_region_ratis_request_timeout_ms = 10000
-
-# currently we use exponential back-off retry policy for ratis
-# partition_region_ratis_max_retry_attempts = 10
-# partition_region_ratis_initial_sleep_time_ms = 100
-# partition_region_ratis_max_sleep_time_ms = 10000
-# schema_region_ratis_max_retry_attempts = 10
-# schema_region_ratis_initial_sleep_time_ms = 100
-# schema_region_ratis_max_sleep_time_ms = 10000
-# data_region_ratis_max_retry_attempts = 10
-# data_region_ratis_initial_sleep_time_ms = 100
-# data_region_ratis_max_sleep_time_ms = 10000
-
-# preserve certain logs when take snapshot and purge
-# partition_region_ratis_preserve_logs_num_when_purge = 1000
-# schema_region_ratis_preserve_logs_num_when_purge = 1000
-# data_region_ratis_preserve_logs_num_when_purge = 1000
-
-# first election timeout
-# ratis_first_election_timeout_min_ms = 50
-# ratis_first_election_timeout_max_ms = 150
+# Thrift socket and connection timeout between raft nodes, in milliseconds.
+# Datatype: int
+# connection_timeout_ms=20000
 
-####################
-### Disk Monitor
-####################
 
+# selector thread (TAsyncClientManager) nums for async thread in a clientManager
+# Datatype: int
+# selector_thread_nums_of_client_manager=1
 
-# Disk remaining threshold at which DataNode is set to ReadOnly status
-# Datatype: double(percentage)
-# disk_space_warning_threshold=5.0
 
diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeDescriptor.java b/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeDescriptor.java
index 039774db19..946ef82088 100644
--- a/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeDescriptor.java
+++ b/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeDescriptor.java
@@ -18,6 +18,7 @@
  */
 package org.apache.iotdb.confignode.conf;
 
+import org.apache.iotdb.commons.conf.CommonConfig;
 import org.apache.iotdb.commons.conf.CommonDescriptor;
 import org.apache.iotdb.commons.conf.IoTDBConstant;
 import org.apache.iotdb.commons.exception.BadNodeUrlException;
@@ -30,6 +31,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.File;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.net.MalformedURLException;
@@ -56,19 +58,14 @@ public class ConfigNodeDescriptor {
    *
    * @return url object if location exit, otherwise null.
    */
-  public URL getPropsUrl() {
+  public URL getPropsUrl(String configFileName) {
     // Check if a config-directory was specified first.
-    String urlString = System.getProperty(ConfigNodeConstant.CONFIGNODE_CONF, null);
+    String urlString = System.getProperty(configFileName, null);
     // If it wasn't, check if a home directory was provided
     if (urlString == null) {
-      urlString = System.getProperty(ConfigNodeConstant.CONFIGNODE_HOME, null);
+      urlString = System.getProperty(configFileName, null);
       if (urlString != null) {
-        urlString =
-            urlString
-                + File.separatorChar
-                + "conf"
-                + File.separatorChar
-                + ConfigNodeConstant.CONF_FILE_NAME;
+        urlString = urlString + File.separatorChar + "conf" + File.separatorChar + configFileName;
       } else {
         // When start ConfigNode with the script, the environment variables CONFIGNODE_CONF
         // and CONFIGNODE_HOME will be set. But we didn't set these two in developer mode.
@@ -79,7 +76,7 @@ public class ConfigNodeDescriptor {
     // If a config location was provided, but it doesn't end with a properties file,
     // append the default location.
     else if (!urlString.endsWith(".properties")) {
-      urlString += (File.separatorChar + ConfigNodeConstant.CONF_FILE_NAME);
+      urlString += (File.separatorChar + configFileName);
     }
 
     // If the url doesn't start with "file:" or "classpath:", it's provided as a no path.
@@ -95,7 +92,28 @@ public class ConfigNodeDescriptor {
   }
 
   private void loadProps() {
-    URL url = getPropsUrl();
+    URL url = getPropsUrl(CommonConfig.CONFIG_NAME);
+    if (url == null) {
+      LOGGER.warn("Couldn't load the configuration from any of the known sources.");
+      return;
+    }
+    try (InputStream inputStream = url.openStream()) {
+
+      LOGGER.info("Start to read config file {}", url);
+      Properties properties = new Properties();
+      properties.load(inputStream);
+
+      loadProperties(properties);
+
+    } catch (FileNotFoundException e) {
+      LOGGER.warn("Fail to find config file {}", url, e);
+    } catch (IOException e) {
+      LOGGER.warn("Cannot load config file, use default configuration", e);
+    } catch (Exception e) {
+      LOGGER.warn("Incorrect format in config file, use default configuration", e);
+    }
+
+    url = getPropsUrl(ConfigNodeConstant.CONF_FILE_NAME);
     if (url == null) {
       LOGGER.warn(
           "Couldn't load the ConfigNode configuration from any of the known sources. Use default configuration.");
@@ -108,179 +126,181 @@ public class ConfigNodeDescriptor {
 
       Properties properties = new Properties();
       properties.load(inputStream);
+      loadProperties(properties);
 
-      conf.setInternalAddress(
-          properties.getProperty(IoTDBConstant.INTERNAL_ADDRESS, conf.getInternalAddress()));
+    } catch (IOException | BadNodeUrlException e) {
+      LOGGER.warn("Couldn't load ConfigNode conf file, use default config", e);
+    } finally {
+      conf.updatePath();
+      commonDescriptor
+          .getConfig()
+          .updatePath(System.getProperty(ConfigNodeConstant.CONFIGNODE_HOME, null));
+      MetricConfigDescriptor.getInstance()
+          .getMetricConfig()
+          .updateRpcInstance(conf.getInternalAddress(), conf.getInternalPort());
+    }
+  }
 
-      conf.setInternalPort(
-          Integer.parseInt(
-              properties.getProperty(
-                  IoTDBConstant.INTERNAL_PORT, String.valueOf(conf.getInternalPort()))));
+  private void loadProperties(Properties properties) throws BadNodeUrlException, IOException {
+    conf.setInternalAddress(
+        properties.getProperty(IoTDBConstant.INTERNAL_ADDRESS, conf.getInternalAddress()));
 
-      conf.setConsensusPort(
-          Integer.parseInt(
-              properties.getProperty(
-                  IoTDBConstant.CONSENSUS_PORT, String.valueOf(conf.getConsensusPort()))));
+    conf.setInternalPort(
+        Integer.parseInt(
+            properties.getProperty(
+                IoTDBConstant.INTERNAL_PORT, String.valueOf(conf.getInternalPort()))));
 
-      // TODO: Enable multiple target_config_nodes
-      String targetConfigNodes = properties.getProperty(IoTDBConstant.TARGET_CONFIG_NODES, null);
-      if (targetConfigNodes != null) {
-        conf.setTargetConfigNode(NodeUrlUtils.parseTEndPointUrl(targetConfigNodes));
-      }
+    conf.setConsensusPort(
+        Integer.parseInt(
+            properties.getProperty(
+                IoTDBConstant.CONSENSUS_PORT, String.valueOf(conf.getConsensusPort()))));
 
-      conf.setSeriesPartitionSlotNum(
-          Integer.parseInt(
-              properties.getProperty(
-                  "series_partition_slot_num", String.valueOf(conf.getSeriesPartitionSlotNum()))));
+    // TODO: Enable multiple target_config_nodes
+    String targetConfigNodes = properties.getProperty(IoTDBConstant.TARGET_CONFIG_NODES, null);
+    if (targetConfigNodes != null) {
+      conf.setTargetConfigNode(NodeUrlUtils.parseTEndPointUrl(targetConfigNodes));
+    }
 
-      conf.setSeriesPartitionExecutorClass(
-          properties.getProperty(
-              "series_partition_executor_class", conf.getSeriesPartitionExecutorClass()));
+    conf.setSeriesPartitionSlotNum(
+        Integer.parseInt(
+            properties.getProperty(
+                "series_partition_slot_num", String.valueOf(conf.getSeriesPartitionSlotNum()))));
 
-      conf.setConfigNodeConsensusProtocolClass(
-          properties.getProperty(
-              "config_node_consensus_protocol_class", conf.getConfigNodeConsensusProtocolClass()));
+    conf.setSeriesPartitionExecutorClass(
+        properties.getProperty(
+            "series_partition_executor_class", conf.getSeriesPartitionExecutorClass()));
 
-      conf.setSchemaRegionConsensusProtocolClass(
-          properties.getProperty(
-              "schema_region_consensus_protocol_class",
-              conf.getSchemaRegionConsensusProtocolClass()));
+    conf.setConfigNodeConsensusProtocolClass(
+        properties.getProperty(
+            "config_node_consensus_protocol_class", conf.getConfigNodeConsensusProtocolClass()));
 
-      conf.setSchemaRegionPerDataNode(
-          Double.parseDouble(
-              properties.getProperty(
-                  "schema_region_per_data_node",
-                  String.valueOf(conf.getSchemaRegionPerDataNode()))));
+    conf.setSchemaRegionConsensusProtocolClass(
+        properties.getProperty(
+            "schema_region_consensus_protocol_class",
+            conf.getSchemaRegionConsensusProtocolClass()));
 
-      conf.setDataRegionConsensusProtocolClass(
-          properties.getProperty(
-              "data_region_consensus_protocol_class", conf.getDataRegionConsensusProtocolClass()));
+    conf.setSchemaRegionPerDataNode(
+        Double.parseDouble(
+            properties.getProperty(
+                "schema_region_per_data_node", String.valueOf(conf.getSchemaRegionPerDataNode()))));
 
-      conf.setDataRegionPerProcessor(
-          Double.parseDouble(
-              properties.getProperty(
-                  "data_region_per_processor", String.valueOf(conf.getDataRegionPerProcessor()))));
-
-      try {
-        conf.setRegionAllocateStrategy(
-            RegionBalancer.RegionAllocateStrategy.valueOf(
-                properties.getProperty(
-                    "region_allocate_strategy", conf.getRegionAllocateStrategy().name())));
-      } catch (IllegalArgumentException e) {
-        LOGGER.warn(
-            "The configured region allocate strategy does not exist, use the default: GREEDY!");
-      }
+    conf.setDataRegionConsensusProtocolClass(
+        properties.getProperty(
+            "data_region_consensus_protocol_class", conf.getDataRegionConsensusProtocolClass()));
 
-      conf.setRpcAdvancedCompressionEnable(
-          Boolean.parseBoolean(
-              properties.getProperty(
-                  "rpc_advanced_compression_enable",
-                  String.valueOf(conf.isRpcAdvancedCompressionEnable()))));
+    conf.setDataRegionPerProcessor(
+        Double.parseDouble(
+            properties.getProperty(
+                "data_region_per_processor", String.valueOf(conf.getDataRegionPerProcessor()))));
 
-      conf.setRpcMaxConcurrentClientNum(
-          Integer.parseInt(
+    try {
+      conf.setRegionAllocateStrategy(
+          RegionBalancer.RegionAllocateStrategy.valueOf(
               properties.getProperty(
-                  "rpc_max_concurrent_client_num",
-                  String.valueOf(conf.getRpcMaxConcurrentClientNum()))));
+                  "region_allocate_strategy", conf.getRegionAllocateStrategy().name())));
+    } catch (IllegalArgumentException e) {
+      LOGGER.warn(
+          "The configured region allocate strategy does not exist, use the default: GREEDY!");
+    }
 
-      conf.setThriftDefaultBufferSize(
-          Integer.parseInt(
-              properties.getProperty(
-                  "thrift_init_buffer_size", String.valueOf(conf.getThriftDefaultBufferSize()))));
+    conf.setRpcAdvancedCompressionEnable(
+        Boolean.parseBoolean(
+            properties.getProperty(
+                "rpc_advanced_compression_enable",
+                String.valueOf(conf.isRpcAdvancedCompressionEnable()))));
 
-      conf.setThriftMaxFrameSize(
-          Integer.parseInt(
-              properties.getProperty(
-                  "thrift_max_frame_size", String.valueOf(conf.getThriftMaxFrameSize()))));
+    conf.setRpcMaxConcurrentClientNum(
+        Integer.parseInt(
+            properties.getProperty(
+                "rpc_max_concurrent_client_num",
+                String.valueOf(conf.getRpcMaxConcurrentClientNum()))));
 
-      conf.setSystemDir(properties.getProperty("system_dir", conf.getSystemDir()));
+    conf.setThriftDefaultBufferSize(
+        Integer.parseInt(
+            properties.getProperty(
+                "thrift_init_buffer_size", String.valueOf(conf.getThriftDefaultBufferSize()))));
 
-      conf.setConsensusDir(properties.getProperty("consensus_dir", conf.getConsensusDir()));
+    conf.setThriftMaxFrameSize(
+        Integer.parseInt(
+            properties.getProperty(
+                "thrift_max_frame_size", String.valueOf(conf.getThriftMaxFrameSize()))));
 
-      conf.setUdfLibDir(properties.getProperty("udf_lib_dir", conf.getUdfLibDir()));
+    conf.setSystemDir(properties.getProperty("system_dir", conf.getSystemDir()));
 
-      conf.setTemporaryLibDir(
-          properties.getProperty("udf_temporary_lib_dir", conf.getTemporaryLibDir()));
+    conf.setConsensusDir(properties.getProperty("consensus_dir", conf.getConsensusDir()));
 
-      conf.setTriggerLibDir(properties.getProperty("trigger_lib_dir", conf.getTriggerLibDir()));
+    conf.setUdfLibDir(properties.getProperty("udf_lib_dir", conf.getUdfLibDir()));
 
-      conf.setTimePartitionInterval(
-          Long.parseLong(
-              properties.getProperty(
-                  "time_partition_interval_for_routing",
-                  String.valueOf(conf.getTimePartitionInterval()))));
+    conf.setTemporaryLibDir(
+        properties.getProperty("udf_temporary_lib_dir", conf.getTemporaryLibDir()));
 
-      conf.setSchemaReplicationFactor(
-          Integer.parseInt(
-              properties.getProperty(
-                  "schema_replication_factor", String.valueOf(conf.getSchemaReplicationFactor()))));
+    conf.setTriggerLibDir(properties.getProperty("trigger_lib_dir", conf.getTriggerLibDir()));
 
-      conf.setDataReplicationFactor(
-          Integer.parseInt(
-              properties.getProperty(
-                  "data_replication_factor", String.valueOf(conf.getDataReplicationFactor()))));
+    conf.setTimePartitionInterval(
+        Long.parseLong(
+            properties.getProperty(
+                "time_partition_interval_for_routing",
+                String.valueOf(conf.getTimePartitionInterval()))));
 
-      conf.setHeartbeatInterval(
-          Long.parseLong(
-              properties.getProperty(
-                  "heartbeat_interval", String.valueOf(conf.getHeartbeatInterval()))));
+    conf.setSchemaReplicationFactor(
+        Integer.parseInt(
+            properties.getProperty(
+                "schema_replication_factor", String.valueOf(conf.getSchemaReplicationFactor()))));
 
-      String routingPolicy = properties.getProperty("routing_policy", conf.getRoutingPolicy());
-      if (routingPolicy.equals(RouteBalancer.GREEDY_POLICY)
-          || routingPolicy.equals(RouteBalancer.LEADER_POLICY)) {
-        conf.setRoutingPolicy(routingPolicy);
-      } else {
-        throw new IOException(
-            String.format(
-                "Unknown routing_policy: %s, please set to \"leader\" or \"greedy\"",
-                routingPolicy));
-      }
+    conf.setDataReplicationFactor(
+        Integer.parseInt(
+            properties.getProperty(
+                "data_replication_factor", String.valueOf(conf.getDataReplicationFactor()))));
 
-      String readConsistencyLevel =
-          properties.getProperty("read_consistency_level", conf.getReadConsistencyLevel());
-      if (readConsistencyLevel.equals("strong") || readConsistencyLevel.equals("weak")) {
-        conf.setReadConsistencyLevel(readConsistencyLevel);
-      } else {
-        throw new IOException(
-            String.format(
-                "Unknown read_consistency_level: %s, please set to \"strong\" or \"weak\"",
-                readConsistencyLevel));
-      }
+    conf.setHeartbeatInterval(
+        Long.parseLong(
+            properties.getProperty(
+                "heartbeat_interval", String.valueOf(conf.getHeartbeatInterval()))));
+
+    String routingPolicy = properties.getProperty("routing_policy", conf.getRoutingPolicy());
+    if (routingPolicy.equals(RouteBalancer.GREEDY_POLICY)
+        || routingPolicy.equals(RouteBalancer.LEADER_POLICY)) {
+      conf.setRoutingPolicy(routingPolicy);
+    } else {
+      throw new IOException(
+          String.format(
+              "Unknown routing_policy: %s, please set to \"leader\" or \"greedy\"", routingPolicy));
+    }
 
-      // commons
-      commonDescriptor.loadCommonProps(properties);
-      commonDescriptor.initCommonConfigDir(conf.getSystemDir());
+    String readConsistencyLevel =
+        properties.getProperty("read_consistency_level", conf.getReadConsistencyLevel());
+    if (readConsistencyLevel.equals("strong") || readConsistencyLevel.equals("weak")) {
+      conf.setReadConsistencyLevel(readConsistencyLevel);
+    } else {
+      throw new IOException(
+          String.format(
+              "Unknown read_consistency_level: %s, please set to \"strong\" or \"weak\"",
+              readConsistencyLevel));
+    }
 
-      conf.setProcedureCompletedEvictTTL(
-          Integer.parseInt(
-              properties.getProperty(
-                  "procedure_completed_evict_ttl",
-                  String.valueOf(conf.getProcedureCompletedEvictTTL()))));
+    // commons
+    commonDescriptor.loadCommonProps(properties);
+    commonDescriptor.initCommonConfigDir(conf.getSystemDir());
 
-      conf.setProcedureCompletedCleanInterval(
-          Integer.parseInt(
-              properties.getProperty(
-                  "procedure_completed_clean_interval",
-                  String.valueOf(conf.getProcedureCompletedCleanInterval()))));
+    conf.setProcedureCompletedEvictTTL(
+        Integer.parseInt(
+            properties.getProperty(
+                "procedure_completed_evict_ttl",
+                String.valueOf(conf.getProcedureCompletedEvictTTL()))));
 
-      conf.setProcedureCoreWorkerThreadsSize(
-          Integer.parseInt(
-              properties.getProperty(
-                  "procedure_core_worker_thread_size",
-                  String.valueOf(conf.getProcedureCoreWorkerThreadsSize()))));
+    conf.setProcedureCompletedCleanInterval(
+        Integer.parseInt(
+            properties.getProperty(
+                "procedure_completed_clean_interval",
+                String.valueOf(conf.getProcedureCompletedCleanInterval()))));
 
-      loadRatisConsensusConfig(properties);
-    } catch (IOException | BadNodeUrlException e) {
-      LOGGER.warn("Couldn't load ConfigNode conf file, use default config", e);
-    } finally {
-      conf.updatePath();
-      commonDescriptor
-          .getConfig()
-          .updatePath(System.getProperty(ConfigNodeConstant.CONFIGNODE_HOME, null));
-      MetricConfigDescriptor.getInstance()
-          .getMetricConfig()
-          .updateRpcInstance(conf.getInternalAddress(), conf.getInternalPort());
-    }
+    conf.setProcedureCoreWorkerThreadsSize(
+        Integer.parseInt(
+            properties.getProperty(
+                "procedure_core_worker_thread_size",
+                String.valueOf(conf.getProcedureCoreWorkerThreadsSize()))));
+
+    loadRatisConsensusConfig(properties);
   }
 
   private void loadRatisConsensusConfig(Properties properties) {
diff --git a/distribution/src/assembly/all.xml b/distribution/src/assembly/all.xml
index 24ba86f0a7..63a9d2124f 100644
--- a/distribution/src/assembly/all.xml
+++ b/distribution/src/assembly/all.xml
@@ -54,6 +54,10 @@
             <outputDirectory>conf</outputDirectory>
             <directory>${maven.multiModuleProjectDirectory}/confignode/src/assembly/resources/conf</directory>
         </fileSet>
+        <fileSet>
+            <outputDirectory>conf</outputDirectory>
+            <directory>${maven.multiModuleProjectDirectory}/node-commons/src/assembly/resources/conf</directory>
+        </fileSet>
         <fileSet>
             <outputDirectory>conf</outputDirectory>
             <directory>${maven.multiModuleProjectDirectory}/metrics/interface/src/main/assembly/resources/conf</directory>
diff --git a/distribution/src/assembly/confignode.xml b/distribution/src/assembly/confignode.xml
index d37257c2f6..bf4a4252a4 100644
--- a/distribution/src/assembly/confignode.xml
+++ b/distribution/src/assembly/confignode.xml
@@ -46,6 +46,10 @@
             <directory>${maven.multiModuleProjectDirectory}/confignode/src/assembly/resources/conf</directory>
             <outputDirectory>conf</outputDirectory>
         </fileSet>
+        <fileSet>
+            <outputDirectory>conf</outputDirectory>
+            <directory>${maven.multiModuleProjectDirectory}/node-commons/src/assembly/resources/conf</directory>
+        </fileSet>
         <!--    <fileSet>-->
         <!--      <directory>${maven.multiModuleProjectDirectory}/confignode/src/assembly/resources/tools</directory>-->
         <!--      <outputDirectory>tools</outputDirectory>-->
diff --git a/distribution/src/assembly/datanode.xml b/distribution/src/assembly/datanode.xml
index d67bc5f1aa..b4b5db96ef 100644
--- a/distribution/src/assembly/datanode.xml
+++ b/distribution/src/assembly/datanode.xml
@@ -42,6 +42,10 @@
         <!--            <outputDirectory>conf</outputDirectory>-->
         <!--            <directory>${maven.multiModuleProjectDirectory}/server/src/assembly/resources/conf</directory>-->
         <!--        </fileSet>-->
+        <fileSet>
+            <outputDirectory>conf</outputDirectory>
+            <directory>${maven.multiModuleProjectDirectory}/node-commons/src/assembly/resources/conf</directory>
+        </fileSet>
         <fileSet>
             <outputDirectory>grafana-metrics-example</outputDirectory>
             <directory>${maven.multiModuleProjectDirectory}/grafana-metrics-example</directory>
diff --git a/example/mqtt-customize/src/main/java/org/apache/iotdb/mqtt/server/CustomizedJsonPayloadFormatter.java b/example/mqtt-customize/src/main/java/org/apache/iotdb/mqtt/server/CustomizedJsonPayloadFormatter.java
index 3332dba096..a56f10d157 100644
--- a/example/mqtt-customize/src/main/java/org/apache/iotdb/mqtt/server/CustomizedJsonPayloadFormatter.java
+++ b/example/mqtt-customize/src/main/java/org/apache/iotdb/mqtt/server/CustomizedJsonPayloadFormatter.java
@@ -56,7 +56,7 @@ public class CustomizedJsonPayloadFormatter implements PayloadFormatter {
 
   @Override
   public String getName() {
-    // set the value of mqtt_payload_formatter in iotdb-datanode.properties as the following string:
+    // set the value of mqtt_payload_formatter in iotdb-common.properties as the following string:
     return "CustomizedJson";
   }
 }
diff --git a/integration/src/main/java/org/apache/iotdb/integration/env/ClusterNode.java b/integration/src/main/java/org/apache/iotdb/integration/env/ClusterNode.java
index 59a12a6106..e5dae96923 100644
--- a/integration/src/main/java/org/apache/iotdb/integration/env/ClusterNode.java
+++ b/integration/src/main/java/org/apache/iotdb/integration/env/ClusterNode.java
@@ -117,7 +117,7 @@ public class ClusterNode {
       clusterConfig.putAll(clusterProperties);
       clusterConfig.store(new FileWriter(clusterConfigPath), null);
 
-      // iotdb-datanode.properties part
+      // iotdb-common.properties part
       String engineConfigPath =
           this.path
               + File.separator
diff --git a/server/src/assembly/resources/conf/iotdb-datanode.properties b/node-commons/src/assembly/resources/conf/iotdb-common.properties
similarity index 79%
copy from server/src/assembly/resources/conf/iotdb-datanode.properties
copy to node-commons/src/assembly/resources/conf/iotdb-common.properties
index 0b3761bc69..17f6479227 100644
--- a/server/src/assembly/resources/conf/iotdb-datanode.properties
+++ b/node-commons/src/assembly/resources/conf/iotdb-common.properties
@@ -18,59 +18,9 @@
 #
 
 ####################
-### RPC Configuration
+### Thrift Configuration
 ####################
 
-# could set 0.0.0.0, 127.0.0.1(for local test) or ipv4 address
-# if enable redirection in session, rpc_address should be the ip which session can connect.
-# Datatype: String
-rpc_address=0.0.0.0
-
-# Datatype: int
-rpc_port=6667
-
-####################
-### Shuffle Configuration
-####################
-# Datatype: int
-mpp_data_exchange_port=8777
-
-# Datatype: int
-# mpp_data_exchange_core_pool_size=10
-
-# Datatype: int
-# mpp_data_exchange_max_pool_size=10
-
-# Datatype: int
-# mpp_data_exchange_keep_alive_time_in_ms=1000
-
-# Datatype: String
-# used for communication between cluster nodes.
-# could set 0.0.0.0, 127.0.0.1(for local test) or ipv4 address.
-internal_address=0.0.0.0
-
-# Datatype: int
-# port for coordinator's communication between cluster nodes.
-internal_port=9003
-
-# Datatype: int
-# port for consensus's communication for data region between cluster nodes.
-data_region_consensus_port=40010
-
-# Datatype: int
-# port for consensus's communication for schema region between cluster nodes.
-schema_region_consensus_port=50010
-
-# At least one running ConfigNode should be set for joining the cluster
-# Format: ip:port
-# where the ip should be consistent with the target ConfigNode's confignode_internal_address,
-# and the port should be consistent with the target ConfigNode's confignode_internal_port.
-# When successfully connecting to the ConfigNodeGroup, DataNode will get all online
-# config nodes and store them in memory.
-# Datatype: String
-# Notice: The ip for any target_config_node should never be 0.0.0.0
-target_config_nodes=127.0.0.1:22277
-
 # Datatype: boolean
 # rpc_thrift_compression_enable=false
 
@@ -114,223 +64,66 @@ target_config_nodes=127.0.0.1:22277
 # selector_thread_nums_of_client_manager=1
 
 ####################
-### Write Ahead Log Configuration
+### Procedure Configuration
 ####################
 
-# Write mode of wal
-# The details of these three modes are as follows:
-# 1. DISABLE: the system will disable wal.
-# 2. SYNC: the system will submit wal synchronously, write request will not return until its wal is fsynced to the disk successfully.
-# 3. ASYNC: the system will submit wal asynchronously, write request will return immediately no matter its wal is fsynced to the disk successfully.
-# The write performance order is DISABLE > ASYNC > SYNC, but only SYNC mode can ensure data durability.
-# wal_mode=ASYNC
-
-# wal dirs
-# If this property is unset, system will save the data in the default relative path directory under the IoTDB folder(i.e., %IOTDB_HOME%/data/datanode).
-# If it is absolute, system will save the data in the exact location it points to.
-# If it is relative, system will save the data in the relative path directory it indicates under the IoTDB folder.
-# If there are more than one directory, please separate them by commas ",".
-# Note: If wal_dirs is assigned an empty string(i.e.,zero-size), it will be handled as a relative path.
-# For windows platform
-# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is absolute. Otherwise, it is relative.
-# wal_dirs=data\\datanode\\wal
-# For Linux platform
-# If its prefix is "/", then the path is absolute. Otherwise, it is relative.
-# wal_dirs=data/datanode/wal
 
-# Max number of wal nodes, each node corresponds to one wal directory
-# The default value 0 means twice the number of wal dirs.
-# Notice: this value affects write performance significantly.
-# For non-SSD disks, values between one third and half of storage groups number are recommended.
+# Default number of worker thread count
 # Datatype: int
-# max_wal_nodes_num=0
+# procedure_core_worker_thread_size=4
 
-# Duration a wal flush operation will wait before calling fsync
-# A duration greater than 0 batches multiple wal fsync calls into one. This is useful when disks are slow or WAL write contention exists.
-# Notice: this value affects write performance significantly, values in the range of 0ms-10ms are recommended.
-# Datatype: long
-# fsync_wal_delay_in_ms=3
-
-# Buffer size of each wal node
-# If it's a value smaller than 0, use the default value 16 * 1024 * 1024 bytes (16MB).
-# Datatype: int
-# wal_buffer_size_in_byte=16777216
 
-# Blocking queue capacity of each wal buffer, restricts maximum number of WALEdits cached in the blocking queue.
+# Default time interval of completed procedure cleaner work in, time unit is second
 # Datatype: int
-# wal_buffer_queue_capacity=50
-
-# Size threshold of each wal file
-# When a wal file's size exceeds this, the wal file will be closed and a new wal file will be created.
-# If it's a value smaller than 0, use the default value 10 * 1024 * 1024 (10MB).
-# Datatype: long
-# wal_file_size_threshold_in_byte=10485760
+# procedure_completed_clean_interval=30
 
-# Minimum ratio of effective information in wal files
-# This value should be between 0.0 and 1.0
-# If effective information ratio is below this value, MemTable snapshot or flush will be triggered.
-# Increase this value when wal occupies too much disk space. But, if this parameter is too large, the write performance may decline.
-# Datatype: double
-# wal_min_effective_info_ratio=0.1
 
-# MemTable size threshold for triggering MemTable snapshot in wal
-# When a memTable's size (in byte) exceeds this, wal can flush this memtable to disk, otherwise wal will snapshot this memtable in wal.
-# If it's a value smaller than 0, use the default value 8 * 1024 * 1024 bytes (8MB).
-# Datatype: long
-# wal_memtable_snapshot_threshold_in_byte=8388608
-
-# MemTable's max snapshot number in wal
-# If one memTable's snapshot number in wal exceeds this value, it will be flushed to disk.
+# Default ttl of completed procedure, time unit is second
 # Datatype: int
-# max_wal_memtable_snapshot_num=1
-
-# The period when outdated wal files are periodically deleted
-# If this value is too large, outdated wal files may not able to be deleted in time.
-# If it's a value smaller than 0, use the default value 20 * 1000 ms (20 seconds).
-# Datatype: long
-# delete_wal_files_period_in_ms=20000
-
-# The minimum size of wal files when throttle down in MultiLeader consensus
-# If it's a value smaller than 0, use the default value 50 * 1024 * 1024 * 1024 bytes (50GB).
-# Datatype: long
-# multi_leader_throttle_threshold_in_byte=53687091200
-
-# Maximum wait time of write cache in MultiLeader consensus
-# If this value is less than or equal to 0, use the default value Long.MAX_VALUE.
-# Datatype: long
-# multi_leader_cache_window_time_in_ms=-1
+# procedure_completed_evict_ttl=800
 
 ####################
-### Directory Configuration
+### MPP Data Exchange Configuration
 ####################
 
-# system dir
-# If this property is unset, system will save the data in the default relative path directory under the IoTDB folder(i.e., %IOTDB_HOME%/data/datanode/system).
-# If it is absolute, system will save the data in exact location it points to.
-# If it is relative, system will save the data in the relative path directory it indicates under the IoTDB folder.
-# For windows platform
-# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is absolute. Otherwise, it is relative.
-# system_dir=data\\datanode\\system
-# For Linux platform
-# If its prefix is "/", then the path is absolute. Otherwise, it is relative.
-# system_dir=data/datanode/system
-
-
-# data dirs
-# If this property is unset, system will save the data in the default relative path directory under the IoTDB folder(i.e., %IOTDB_HOME%/data/datanode/data).
-# If it is absolute, system will save the data in exact location it points to.
-# If it is relative, system will save the data in the relative path directory it indicates under the IoTDB folder.
-# If there are more than one directory, please separate them by commas ",".
-# Note: If data_dirs is assigned an empty string(i.e.,zero-size), it will be handled as a relative path.
-# For windows platform
-# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is absolute. Otherwise, it is relative.
-# data_dirs=data\\datanode\\data
-# For Linux platform
-# If its prefix is "/", then the path is absolute. Otherwise, it is relative.
-# data_dirs=data/datanode/data
-
-
-# multi_dir_strategy
-# The strategy is used to choose a directory from data_dirs for the system to store a new tsfile.
-# System provides four strategies to choose from, or user can create his own strategy by extending org.apache.iotdb.db.conf.directories.strategy.DirectoryStrategy.
-# The info of the four strategies are as follows:
-# 1. SequenceStrategy: the system will choose the directory in sequence.
-# 2. MaxDiskUsableSpaceFirstStrategy: the system will choose the directory whose disk has the maximum space.
-# 3. MinFolderOccupiedSpaceFirstStrategy: the system will choose the directory whose folder has the minimum occupied space.
-# 4. RandomOnDiskUsableSpaceStrategy: the system will randomly choose the directory based on usable space of disks. The more usable space, the greater the chance of being chosen;
-# Set SequenceStrategy,MaxDiskUsableSpaceFirstStrategy and MinFolderOccupiedSpaceFirstStrategy to apply the corresponding strategy.
-# If this property is unset, system will use MaxDiskUsableSpaceFirstStrategy as default strategy.
-# For this property, fully-qualified class name (include package name) and simple class name are both acceptable.
-# multi_dir_strategy=MaxDiskUsableSpaceFirstStrategy
-
-# consensus dir
-# If this property is unset, system will save the data in the default relative path directory under the IoTDB folder(i.e., %IOTDB_HOME%/data/datanode).
-# If it is absolute, system will save the data in the exact location it points to.
-# If it is relative, system will save the data in the relative path directory it indicates under the IoTDB folder.
-# Note: If consensus_dir is assigned an empty string(i.e.,zero-size), it will be handled as a relative path.
-# For windows platform
-# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is absolute. Otherwise, it is relative.
-# consensus_dir=data\\datanode\\consensus
-# For Linux platform
-# If its prefix is "/", then the path is absolute. Otherwise, it is relative.
-# consensus_dir=data/datanode/consensus
-
-
-# TSFile storage file system. Currently, TsFiles are supported to be stored in LOCAL file system or HDFS.
-# Datatype: FSType
-# tsfile_storage_fs=LOCAL
-
-# If using HDFS, the absolute file path of Hadoop core-site.xml should be configured
-# Datatype: String
-# core_site_path=/etc/hadoop/conf/core-site.xml
-
-# If using HDFS, the absolute file path of Hadoop hdfs-site.xml should be configured
-# Datatype: String
-# hdfs_site_path=/etc/hadoop/conf/hdfs-site.xml
-
-# If using HDFS, hadoop ip can be configured. If there are more than one hdfs_ip, Hadoop HA is used
-# Datatype: String
-# hdfs_ip=localhost
-
-# If using HDFS, hadoop port can be configured
-# Datatype: String
-# hdfs_port=9000
+# Datatype: int
+# mpp_data_exchange_core_pool_size=10
 
-# If there are more than one hdfs_ip, Hadoop HA is used. Below are configuration for HA
-# If using Hadoop HA, nameservices of hdfs can be configured
-# Datatype: String
-# dfs_nameservices=hdfsnamespace
+# Datatype: int
+# mpp_data_exchange_max_pool_size=10
 
-# If using Hadoop HA, namenodes under dfs nameservices can be configured
-# Datatype: String
-# dfs_ha_namenodes=nn1,nn2
+# Datatype: int
+# mpp_data_exchange_keep_alive_time_in_ms=1000
 
-# If using Hadoop HA, automatic failover can be enabled or disabled
-# Datatype: boolean
-# dfs_ha_automatic_failover_enabled=true
+####################
+### Continuous Query Configuration
+####################
 
-# If using Hadoop HA and enabling automatic failover, the proxy provider can be configured
-# Datatype: String
-# dfs_client_failover_proxy_provider=org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+# How many thread will be set up to perform continuous queries. When <= 0, use max(1, CPU core number / 2).
+# Datatype: int
+# continuous_query_execution_thread=2
 
-# If using kerberos to authenticate hdfs, this should be true
-# Datatype: boolean
-# hdfs_use_kerberos=false
+# Maximum number of continuous query tasks that can be pending for execution. When <= 0, the value is
+# 64 by default.
+# Datatype: int
+# max_pending_continuous_query_tasks=64
 
-# Full path of kerberos keytab file
-# Datatype: String
-# kerberos_keytab_file_path=/path
+# Minimum every interval to perform continuous query.
+# The every interval of continuous query instances should not be lower than this limit.
+# Datatype: duration
+# continuous_query_min_every_interval=1s
 
-# Kerberos pricipal
-# Datatype: String
-# kerberos_principal=your principal
+# The size of log buffer for every CQ management operation plan. If the size of a CQ
+# management operation plan is larger than this parameter, the CQ management operation plan
+# will be rejected by CQManager.
+# Datatype: int
+# cqlog_buffer_size=1048576
 
 
 ####################
-### Storage Engine Configuration
+### Mlog Buffer Configuration
 ####################
 
-# Use this value to set timestamp precision as "ms", "us" or "ns".
-# Once the precision is been set, it can not be changed.
-# Datatype: String
-timestamp_precision=ms
-
-# Default TTL for storage groups that are not set TTL by statements, If not set (default),
-# the TTL will be unlimited.
-# Notice: if this property is changed, previous created storage group which are not set TTL will
-# also be affected. And negative values are accepted, which means you can only insert future
-# data.
-# Datatype: long
-# Unit: ms
-# default_ttl=36000000
-
-# What will the system do when unrecoverable error occurs.
-# Datatype: String
-# Optional strategies are as follows:
-# 1. CHANGE_TO_READ_ONLY: set system status to read-only and the system only accepts query operations.
-# 2. SHUTDOWN: the system will be shutdown.
-# handle_system_error=CHANGE_TO_READ_ONLY
-
 # Size of log buffer in each metadata operation plan(in byte).
 # If the size of a metadata operation plan is larger than this parameter, then it will be rejected by SchemaRegion
 # If it sets a value smaller than 0, use the default value 1024*1024
@@ -342,6 +135,17 @@ timestamp_precision=ms
 # Set this parameter to 0 may slow down the operation on slow disk.
 # sync_mlog_period_in_ms=100
 
+####################
+### Storage Engine Configuration
+####################
+
+# What will the system do when unrecoverable error occurs.
+# Datatype: String
+# Optional strategies are as follows:
+# 1. CHANGE_TO_READ_ONLY: set system status to read-only and the system only accepts query operations.
+# 2. SHUTDOWN: the system will be shutdown.
+# handle_system_error=CHANGE_TO_READ_ONLY
+
 # When a memTable's size (in byte) exceeds this, the memtable is flushed to disk. The default threshold is 1 GB.
 # Datatype: long
 # memtable_size_threshold=1073741824
@@ -411,15 +215,6 @@ timestamp_precision=ms
 # Datatype: int
 # batch_size=100000
 
-# max size for tag and attribute of one time series
-# the unit is byte
-# Datatype: int
-# tag_attribute_total_size=700
-
-# interval num for tag and attribute records when force flushing to disk
-# When a certain amount of tag and attribute records is reached, they will be force flushed to disk
-# It is possible to lose at most tag_attribute_flush_interval records
-# tag_attribute_flush_interval=1000
 
 # In one insert (one device, one timestamp, multiple measurements),
 # if enable partial insert, one measurement failure will not impact other measurements
@@ -430,10 +225,69 @@ timestamp_precision=ms
 # Datatype: int
 # recovery_log_interval_in_ms=5000
 
-# Add a switch to drop ouf-of-order data
-# Out-of-order data will impact the aggregation query a lot. Users may not care about discarding some out-of-order data.
+# When the insert plan column count reaches the specified threshold, which means that the plan is relatively large. At this time, may be enabled multithreading.
+# If the tablet is small, the time of each insertion is short.
+# If we enable multithreading, we also need to consider the switching loss between threads,
+# so we need to judge the size of the tablet.
+# Datatype: int
+# insert_multi_tablet_enable_multithreading_column_threshold=10
+
+####################
+### Upgrade Configurations
+####################
+
+# When there exists old version(0.9.x/v1) data, how many thread will be set up to perform upgrade tasks, 1 by default.
+# Set to 1 when less than or equal to 0.
+# Datatype: int
+# upgrade_thread_num=1
+
+####################
+### Schema Engine Configuration
+####################
+
+# Choose the mode of schema engine. The value could be Memory,Schema_File and Rocksdb_based. If the provided value doesn't match any pre-defined value, Memory mode will be used as default.
+# Datatype: string
+# schema_engine_mode=Memory
+
+# cache size for SchemaRegion.
+# This cache is used to improve insert speed where all path check and TSDataType will be cached in SchemaRegion with corresponding Path.
+# Datatype: int
+# schema_region_device_node_cache_size=10000
+
+# thread pool size for read operation in DataNode's coordinator.
+# Datatype: int
+# coordinator_read_executor_size=20
+
+# thread pool size for write operation in DataNode's coordinator.
+# Datatype: int
+# coordinator_write_executor_size=50
+
+# cache size for partition.
+# This cache is used to improve partition fetch from config node.
+# Datatype: int
+# partition_cache_size=1000
+
+# interval num for tag and attribute records when force flushing to disk
+# When a certain amount of tag and attribute records is reached, they will be force flushed to disk
+# It is possible to lose at most tag_attribute_flush_interval records
+# tag_attribute_flush_interval=1000
+
+####################
+### Cache Configuration
+####################
+
+# whether to cache meta data(BloomFilter, ChunkMetadata and TimeSeriesMetadata) or not.
 # Datatype: boolean
-# enable_discard_out_of_order_data=false
+# meta_data_cache_enable=true
+
+# Read memory Allocation Ratio: BloomFilterCache : ChunkCache : TimeSeriesMetadataCache : Coordinator : Operators : DataExchange : timeIndex in TsFileResourceList : others.
+# The parameter form is a:b:c:d:e:f:g:h, where a, b, c, d, e, f, g and h are integers. for example: 1:1:1:1:1:1:1:1 , 1:100:200:50:200:200:200:50
+# chunk_timeseriesmeta_free_memory_proportion=1:100:200:50:200:200:200:50
+
+# Whether to enable LAST cache
+# Datatype: boolean
+# enable_last_cache=true
+
 
 ####################
 ### Memory Control Configuration
@@ -490,21 +344,11 @@ timestamp_precision=ms
 # Datatype: double
 # write_memory_variation_report_proportion=0.001
 
-# allowed max numbers of deduplicated path in one query
-# it's just an advised value, the real limitation will be the smaller one between this and the one we calculated
-# Datatype: int
-# max_deduplicated_path_num=1000
-
 # When an inserting is rejected, waiting period (in ms) to check system again, 50 by default.
 # If the insertion has been rejected and the read load is low, it can be set larger.
 # Datatype: int
 # check_period_when_insert_blocked=50
 
-# When the waiting time (in ms) of an inserting exceeds this, throw an exception. 10000 by default.
-# If the insertion has been rejected and the read load is low, it can be set larger
-# Datatype: int
-# max_waiting_time_when_insert_blocked=10000
-
 # size of ioTaskQueue. The default value is 10
 # Datatype: int
 # io_task_queue_size_for_flushing=10
@@ -513,34 +357,15 @@ timestamp_precision=ms
 # Datatype: bool
 # enable_query_memory_estimation=true
 
-####################
-### Upgrade Configurations
-####################
-
-# When there exists old version(0.9.x/v1) data, how many thread will be set up to perform upgrade tasks, 1 by default.
-# Set to 1 when less than or equal to 0.
-# Datatype: int
-# upgrade_thread_num=1
 
 ####################
-### Query Configurations
+### Select-Into Configuration
 ####################
 
-# the default time period that used in fill query, -1 by default means infinite past time
-# Datatype: int, Unit: ms
-# default_fill_interval=-1
-
-# the max execution time of a DriverTask
-# Datatype: int, Unit: ms
-# driver_task_execution_time_slice_in_ms=100
-
-# the max capacity of a TsBlock
-# Datatype: int, Unit: byte
-# max_tsblock_size_in_bytes=1048576
-
-# the max number of lines in a single TsBlock
+# The maximum number of rows can be processed in insert-tablet-plan when executing select-into statements.
+# When <= 0, use 10000.
 # Datatype: int
-# max_tsblock_line_numbers=1000
+# select_into_insert_tablet_plan_row_limit=10000
 
 ####################
 ### Compaction Configurations
@@ -645,12 +470,6 @@ timestamp_precision=ms
 # Datatype: int
 # compaction_write_throughput_mb_per_sec=16
 
-# The maximum session idle time. unit: ms
-# Idle sessions are the ones that performs neither query or non-query operations for a period of time
-# Set to 0 to disable session timeout
-# Datatype: int
-# session_timeout_threshold=0
-
 # The max executing time of query. unit: ms
 # Datatype: int
 # query_timeout_threshold=60000
@@ -665,25 +484,164 @@ timestamp_precision=ms
 # Datatype: int
 # sub_compaction_thread_num=4
 
+
 ####################
-### Metadata Cache Configuration
+### Write Ahead Log Configuration
 ####################
 
-# whether to cache meta data(BloomFilter, ChunkMetadata and TimeSeriesMetadata) or not.
-# Datatype: boolean
-# meta_data_cache_enable=true
+# Write mode of wal
+# The details of these three modes are as follows:
+# 1. DISABLE: the system will disable wal.
+# 2. SYNC: the system will submit wal synchronously, write request will not return until its wal is fsynced to the disk successfully.
+# 3. ASYNC: the system will submit wal asynchronously, write request will return immediately no matter its wal is fsynced to the disk successfully.
+# The write performance order is DISABLE > ASYNC > SYNC, but only SYNC mode can ensure data durability.
+# wal_mode=ASYNC
+
+# Max number of wal nodes, each node corresponds to one wal directory
+# The default value 0 means twice the number of wal dirs.
+# Notice: this value affects write performance significantly.
+# For non-SSD disks, values between one third and half of storage groups number are recommended.
+# Datatype: int
+# max_wal_nodes_num=0
+
+# Duration a wal flush operation will wait before calling fsync
+# A duration greater than 0 batches multiple wal fsync calls into one. This is useful when disks are slow or WAL write contention exists.
+# Notice: this value affects write performance significantly, values in the range of 0ms-10ms are recommended.
+# Datatype: long
+# fsync_wal_delay_in_ms=3
+
+# Buffer size of each wal node
+# If it's a value smaller than 0, use the default value 16 * 1024 * 1024 bytes (16MB).
+# Datatype: int
+# wal_buffer_size_in_byte=16777216
+
+# Blocking queue capacity of each wal buffer, restricts maximum number of WALEdits cached in the blocking queue.
+# Datatype: int
+# wal_buffer_queue_capacity=50
+
+# Size threshold of each wal file
+# When a wal file's size exceeds this, the wal file will be closed and a new wal file will be created.
+# If it's a value smaller than 0, use the default value 10 * 1024 * 1024 (10MB).
+# Datatype: long
+# wal_file_size_threshold_in_byte=10485760
+
+# Minimum ratio of effective information in wal files
+# This value should be between 0.0 and 1.0
+# If effective information ratio is below this value, MemTable snapshot or flush will be triggered.
+# Increase this value when wal occupies too much disk space. But, if this parameter is too large, the write performance may decline.
+# Datatype: double
+# wal_min_effective_info_ratio=0.1
+
+# MemTable size threshold for triggering MemTable snapshot in wal
+# When a memTable's size (in byte) exceeds this, wal can flush this memtable to disk, otherwise wal will snapshot this memtable in wal.
+# If it's a value smaller than 0, use the default value 8 * 1024 * 1024 bytes (8MB).
+# Datatype: long
+# wal_memtable_snapshot_threshold_in_byte=8388608
+
+# MemTable's max snapshot number in wal
+# If one memTable's snapshot number in wal exceeds this value, it will be flushed to disk.
+# Datatype: int
+# max_wal_memtable_snapshot_num=1
+
+# The period when outdated wal files are periodically deleted
+# If this value is too large, outdated wal files may not able to be deleted in time.
+# If it's a value smaller than 0, use the default value 20 * 1000 ms (20 seconds).
+# Datatype: long
+# delete_wal_files_period_in_ms=20000
+
+# The minimum size of wal files when throttle down in MultiLeader consensus
+# If it's a value smaller than 0, use the default value 50 * 1024 * 1024 * 1024 bytes (50GB).
+# Datatype: long
+# multi_leader_throttle_threshold_in_byte=53687091200
+
+# Maximum wait time of write cache in MultiLeader consensus
+# If this value is less than or equal to 0, use the default value Long.MAX_VALUE.
+# Datatype: long
+# multi_leader_cache_window_time_in_ms=-1
 
-# Read memory Allocation Ratio: BloomFilterCache : ChunkCache : TimeSeriesMetadataCache : Coordinator : Operators : DataExchange : timeIndex in TsFileResourceList : others.
-# The parameter form is a:b:c:d:e:f:g:h, where a, b, c, d, e, f, g and h are integers. for example: 1:1:1:1:1:1:1:1 , 1:100:200:50:200:200:200:50
-# chunk_timeseriesmeta_free_memory_proportion=1:100:200:50:200:200:200:50
 
 ####################
-### LAST Cache Configuration
+### Timestamp Precision
 ####################
 
-# Whether to enable LAST cache
+# Use this value to set timestamp precision as "ms", "us" or "ns".
+# Once the precision is been set, it can not be changed.
+# Datatype: String
+timestamp_precision=ms
+
+# Default TTL for storage groups that are not set TTL by statements, If not set (default),
+# the TTL will be unlimited.
+# Notice: if this property is changed, previous created storage group which are not set TTL will
+# also be affected. And negative values are accepted, which means you can only insert future
+# data.
+# Datatype: long
+# Unit: ms
+# default_ttl=36000000
+
+####################
+### Tlog Size Configuration
+####################
+# max size for tag and attribute of one time series
+# the unit is byte
+# Datatype: int
+# tag_attribute_total_size=700
+
+####################
+### Out of Order Data Configuration
+####################
+
+# Add a switch to drop ouf-of-order data
+# Out-of-order data will impact the aggregation query a lot. Users may not care about discarding some out-of-order data.
 # Datatype: boolean
-# enable_last_cache=true
+# enable_discard_out_of_order_data=false
+
+####################
+### Client Configuration
+####################
+
+# The maximum session idle time. unit: ms
+# Idle sessions are the ones that performs neither query or non-query operations for a period of time
+# Set to 0 to disable session timeout
+# Datatype: int
+# session_timeout_threshold=0
+
+####################
+### Insert Control
+####################
+
+# When the waiting time (in ms) of an inserting exceeds this, throw an exception. 10000 by default.
+# If the insertion has been rejected and the read load is low, it can be set larger
+# Datatype: int
+# max_waiting_time_when_insert_blocked=10000
+
+####################
+### Query Configurations
+####################
+
+# allowed max numbers of deduplicated path in one query
+# it's just an advised value, the real limitation will be the smaller one between this and the one we calculated
+# Datatype: int
+# max_deduplicated_path_num=1000
+
+# the default time period that used in fill query, -1 by default means infinite past time
+# Datatype: int, Unit: ms
+# default_fill_interval=-1
+
+# the max execution time of a DriverTask
+# Datatype: int, Unit: ms
+# driver_task_execution_time_slice_in_ms=100
+
+# the max capacity of a TsBlock
+# Datatype: int, Unit: byte
+# max_tsblock_size_in_bytes=1048576
+
+# the max number of lines in a single TsBlock
+# Datatype: int
+# max_tsblock_line_numbers=1000
+
+# time cost(ms) threshold for slow query
+# Datatype: long
+# slow_query_threshold=5000
 
 ####################
 ### External sort Configuration
@@ -700,8 +658,9 @@ timestamp_precision=ms
 # external_sort_threshold=1000
 
 ####################
-### PIPE Server Configuration
+### PIPE Configuration
 ####################
+
 # White IP list of Sync client.
 # Please use the form of network segment to present the range of IP, for example: 192.168.0.0/16
 # If there are more than one IP segment, please separate them by commas
@@ -709,23 +668,9 @@ timestamp_precision=ms
 # Datatype: String
 # ip_white_list=0.0.0.0/0
 
-####################
-### PIPE Sender Configuration
-####################
-# The maximum number of retry when syncing a file to receiver fails.
-# max_number_of_sync_file_retry=5
-
-
-####################
-### performance statistic configuration
-####################
-
-# Uncomment following fields to configure the tracing root directory.
-# For Window platform, the index is as follows:
-# tracing_dir=datanode\\tracing
-# For Linux platform
-# If its prefix is "/", then the path is absolute. Otherwise, it is relative.
-# tracing_dir=datanode/tracing
+
+# The maximum number of retry when syncing a file to receiver fails.
+# max_number_of_sync_file_retry=5
 
 ####################
 ### Configurations for watermark module
@@ -739,7 +684,6 @@ timestamp_precision=ms
 # Datatype: String
 # watermark_method=GroupBasedLSBMethod(embed_row_cycle=2,embed_lsb_num=5)
 
-
 ####################
 ### Configurations for creating schema automatically
 ####################
@@ -779,7 +723,6 @@ timestamp_precision=ms
 # Datatype: TSDataType
 # nan_string_infer_type=DOUBLE
 
-
 # BOOLEAN encoding when creating schema automatically is enabled
 # Datatype: TSEncoding
 # default_boolean_encoding=RLE
@@ -804,65 +747,6 @@ timestamp_precision=ms
 # Datatype: TSEncoding
 # default_text_encoding=PLAIN
 
-####################
-### Configurations for tsfile-format
-####################
-
-# Datatype: int
-# group_size_in_byte=134217728
-
-# The memory size for each series writer to pack page, default value is 64KB
-# Datatype: int
-# page_size_in_byte=65536
-
-# The maximum number of data points in a page, default 1024*1024
-# Datatype: int
-# max_number_of_points_in_page=1048576
-
-# The threshold for pattern matching in regex
-# Datatype: int
-# pattern_matching_threshold=1000000
-
-# Max size limitation of input string
-# Datatype: int
-# max_string_length=128
-
-# Floating-point precision
-# Datatype: int
-# float_precision=2
-
-# Encoder configuration
-# Encoder of time series, supports TS_2DIFF, PLAIN and RLE(run-length encoding), REGULAR and default value is TS_2DIFF
-# time_encoder=TS_2DIFF
-
-# Encoder of value series. default value is PLAIN.
-# For int, long data type, also supports TS_2DIFF and RLE(run-length encoding), GORILLA and ZIGZAG.
-# value_encoder=PLAIN
-
-# Compression configuration
-# Data compression method, supports UNCOMPRESSED, SNAPPY or LZ4. Default value is SNAPPY
-# compressor=SNAPPY
-
-# Maximum degree of a metadataIndex node, default value is 256
-# Datatype: int
-# max_degree_of_index_node=256
-
-# time interval in minute for calculating query frequency
-# Datatype: int
-# frequency_interval_in_minute=1
-
-# time cost(ms) threshold for slow query
-# Datatype: long
-# slow_query_threshold=5000
-
-# Signal-noise-ratio (SNR) of FREQ encoding
-# Datatype: double
-# freq_snr=40.0
-
-# Block size of FREQ encoding
-# Datatype: integer
-# freq_block_size=1024
-
 ####################
 ### MQTT Broker Configuration
 ####################
@@ -924,6 +808,7 @@ timestamp_precision=ms
 # Datatype: int
 # author_cache_expire_time=30
 
+
 ####################
 ### UDF Configuration
 ####################
@@ -952,6 +837,8 @@ timestamp_precision=ms
 # If its prefix is "/", then the path is absolute. Otherwise, it is relative.
 # udf_root_dir=ext/udf
 
+
+
 ####################
 ### Trigger Configuration
 ####################
@@ -1004,78 +891,11 @@ timestamp_precision=ms
 # Datatype: int
 # stateful_trigger_retry_num_when_not_found=3
 
-####################
-### Continuous Query Configuration
-####################
-
-# How many thread will be set up to perform continuous queries. When <= 0, use max(1, CPU core number / 2).
-# Datatype: int
-# continuous_query_execution_thread=2
-
-# Maximum number of continuous query tasks that can be pending for execution. When <= 0, the value is
-# 64 by default.
-# Datatype: int
-# max_pending_continuous_query_tasks=64
-
-# Minimum every interval to perform continuous query.
-# The every interval of continuous query instances should not be lower than this limit.
-# Datatype: duration
-# continuous_query_min_every_interval=1s
-
-# The size of log buffer for every CQ management operation plan. If the size of a CQ
-# management operation plan is larger than this parameter, the CQ management operation plan
-# will be rejected by CQManager.
-# Datatype: int
-# cqlog_buffer_size=1048576
-
-####################
-### Select-Into Configuration
-####################
-
-# The maximum number of rows can be processed in insert-tablet-plan when executing select-into statements.
-# When <= 0, use 10000.
-# Datatype: int
-# select_into_insert_tablet_plan_row_limit=10000
-
-
-####################
-### Insert-Tablets Configuration
-####################
-
-# When the insert plan column count reaches the specified threshold, which means that the plan is relatively large. At this time, may be enabled multithreading.
-# If the tablet is small, the time of each insertion is short.
-# If we enable multithreading, we also need to consider the switching loss between threads,
-# so we need to judge the size of the tablet.
-# Datatype: int
-# insert_multi_tablet_enable_multithreading_column_threshold=10
 
 ####################
-### Index Configuration
+### Time Partition Configuration
 ####################
 
-# Uncomment following fields to configure the index root directory.
-# For Window platform, the index is as follows:
-# index_root_dir=data\\index
-# For Linux platform
-# If its prefix is "/", then the path is absolute. Otherwise, it is relative.
-# index_root_dir=datanode/index
-
-# Is index enable
-# Datatype: boolean
-# enable_index=false
-
-# How many threads can concurrently build index. When <= 0, use CPU core number.
-# Datatype: int
-# concurrent_index_build_thread=0
-
-# the default size of sliding window used for the subsequence matching in index framework
-# Datatype: int
-# default_index_window_range=10
-
-# buffer parameter for index processor.
-# Datatype: long
-# index_buffer_size=134217728
-
 # whether enable data partition. If disabled, all data belongs to partition 0
 # Datatype: boolean
 # enable_partition=false
@@ -1099,30 +919,7 @@ timestamp_precision=ms
 # Datatype: float
 # group_by_fill_cache_size_in_mb=1.0
 
-####################
-### Schema Engine Configuration
-####################
-# Choose the mode of schema engine. The value could be Memory,Schema_File and Rocksdb_based. If the provided value doesn't match any pre-defined value, Memory mode will be used as default.
-# Datatype: string
-# schema_engine_mode=Memory
-
-# cache size for SchemaRegion.
-# This cache is used to improve insert speed where all path check and TSDataType will be cached in SchemaRegion with corresponding Path.
-# Datatype: int
-# schema_region_device_node_cache_size=10000
-
-# thread pool size for read operation in DataNode's coordinator.
-# Datatype: int
-# coordinator_read_executor_size=20
-
-# thread pool size for write operation in DataNode's coordinator.
-# Datatype: int
-# coordinator_write_executor_size=50
 
-# cache size for partition.
-# This cache is used to improve partition fetch from config node.
-# Datatype: int
-# partition_cache_size=1000
 
 ####################
 ### Schema File Configuration
@@ -1155,16 +952,255 @@ trigger_forward_http_pool_max_per_route=20
 # Trigger MQTT forward pool size
 trigger_forward_mqtt_pool_size=4
 
+####################
+### Cluster configuration
+####################
+
+####################
+### Region configuration
+####################
+
+# SchemaRegion consensus protocol type.
+# This parameter is unmodifiable after ConfigNode starts for the first time.
+# These consensus protocols are currently supported:
+# 1. org.apache.iotdb.consensus.standalone.StandAloneConsensus(Consensus patterns optimized specifically for single replica)
+# 2. org.apache.iotdb.consensus.ratis.RatisConsensus(Raft protocol)
+# Datatype: String
+# schema_region_consensus_protocol_class=org.apache.iotdb.consensus.standalone.StandAloneConsensus
+
+# The maximum number of SchemaRegion expected to be managed by each DataNode.
+# Notice: Since each StorageGroup requires at least one SchemaRegion to manage its schema,
+# this parameter doesn't limit the number of SchemaRegions when there are too many StorageGroups.
+# Datatype: Double
+# schema_region_per_data_node=1.0
+
+# DataRegion consensus protocol type.
+# This parameter is unmodifiable after ConfigNode starts for the first time.
+# These consensus protocols are currently supported:
+# 1. org.apache.iotdb.consensus.standalone.StandAloneConsensus(Consensus patterns optimized specifically for single replica)
+# 2. org.apache.iotdb.consensus.multileader.MultiLeaderConsensus(weak consistency, high performance)
+# 3. org.apache.iotdb.consensus.ratis.RatisConsensus(Raft protocol)
+# Datatype: String
+# data_region_consensus_protocol_class=org.apache.iotdb.consensus.standalone.StandAloneConsensus
+
+# The maximum number of DataRegion expected to be managed by each processor.
+# Notice: Since each StorageGroup requires at least two DataRegions to manage its data,
+# this parameter doesn't limit the number of DataRegions when there are too many StorageGroups.
+# Datatype: Double
+# data_region_per_processor=0.5
+
+# Region allocate strategy
+# These allocate strategies are currently supported:
+# 1. GREEDY(Default, when region is allocated, always choose the dataNode that has bean allocated the least regions)
+# 2. COPY_SET(Random replication according to wight calculated from number of regions on all online dataNodes, suitable for large clusters)
+# Datatype: String
+# region_allocate_strategy=GREEDY
+
+# All parameters in PartitionSlot configuration is unmodifiable after ConfigNode starts for the first time.
+# And these parameters should be consistent within the ConfigNodeGroup.
+# Number of SeriesPartitionSlots per StorageGroup
+# Datatype: int
+# series_partition_slot_num=10000
+
+
+# SeriesPartitionSlot executor class
+# These hashing algorithms are currently supported:
+# 1. BKDRHashExecutor(Default)
+# 2. APHashExecutor
+# 3. JSHashExecutor
+# 4. SDBMHashExecutor
+# Also, if you want to implement your own SeriesPartition executor, you can inherit the SeriesPartitionExecutor class and
+# modify this parameter to correspond to your Java class
+# Datatype: String
+# series_partition_executor_class=org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor
+
+# The routing policy of read/write requests
+# These routing policy are currently supported:
+# 1. leader(Default, routing to leader replica)
+# 2. greedy(Routing to replica with the lowest load, might cause read un-consistent)
+# Datatype: string
+# routing_policy=leader
+
+####################
+### StorageGroup initial configuration
+####################
+
+# All parameters in StorageGroup configuration is unmodifiable after ConfigNode starts for the first time.
+# And these parameters should be consistent within the ConfigNodeGroup.
+
+
+# Default TTL for storage groups that are not set TTL by statements, in ms. If not set (default),
+# the TTL will be unlimited.
+# Notice: if this property is changed, previous created storage group which are not set TTL will
+# also be affected. And negative values are accepted, which means you can only insert future data.
+# Datatype: long
+# default_ttl=36000000
+
+
+# Time partition interval in milliseconds, default is equal to one week
+# Datatype: long
+# time_partition_interval_for_routing=604800000
+
+
+# Default number of SchemaRegion replicas
+# Datatype: int
+# schema_replication_factor=1
+
+
+# Default number of DataRegion replicas
+# Datatype: int
+# data_replication_factor=1
+
+####################
+### Read configuration
+####################
+
+
+# The read consistency level
+# These consistency levels are currently supported:
+# 1. strong(Default, read from the leader replica)
+# 2. weak(Read from a random replica)
+# Datatype: string
+# read_consistency_level=strong
+
+####################
+### Heartbeat configuration
+####################
+
+
+# The heartbeat interval in milliseconds, default is 1000ms
+# Datatype: long
+# heartbeat_interval=1000
+
+
+
+
+####################
+### RatisConsensus Configuration
+####################
+
+# max payload size for a single log-sync-RPC from leader to follower
+# partition_region_ratis_log_appender_buffer_size_max = 4194304
+# schema_region_ratis_log_appender_buffer_size_max = 4194304
+# data_region_ratis_log_appender_buffer_size_max = 4194304
+
+# trigger a snapshot when ratis_snapshot_trigger_threshold logs are written
+# partition_region_ratis_snapshot_trigger_threshold = 400000
+# schema_region_ratis_snapshot_trigger_threshold = 400000
+# data_region_ratis_snapshot_trigger_threshold = 400000
+
+# allow flushing Raft Log asynchronously
+# partition_region_ratis_log_unsafe_flush_enable = false
+# schema_region_ratis_log_unsafe_flush_enable = false
+# data_region_ratis_log_unsafe_flush_enable = false
+
+# max capacity of a single Raft Log segment (by default 24MB)
+# partition_region_ratis_log_segment_size_max = 25165824
+# schema_region_ratis_log_segment_size_max = 25165824
+# data_region_ratis_log_segment_size_max = 25165824
+
+# flow control window for ratis grpc log appender
+# partition_region_ratis_grpc_flow_control_window = 4194304
+# schema_region_ratis_grpc_flow_control_window = 4194304
+# data_region_ratis_grpc_flow_control_window = 4194304
+
+# min election timeout for leader election
+# partition_region_ratis_rpc_leader_election_timeout_min_ms = 2000
+# schema_region_ratis_rpc_leader_election_timeout_min_ms = 2000
+# data_region_ratis_rpc_leader_election_timeout_min_ms = 2000
+
+# max election timeout for leader election
+# partition_region_ratis_rpc_leader_election_timeout_max_ms = 4000
+# schema_region_ratis_rpc_leader_election_timeout_max_ms = 4000
+# data_region_ratis_rpc_leader_election_timeout_max_ms = 4000
+
+# ratis client retry threshold
+# partition_region_ratis_request_timeout_ms = 10000
+# schema_region_ratis_request_timeout_ms = 10000
+# data_region_ratis_request_timeout_ms = 10000
+
+# currently we use exponential back-off retry policy for ratis
+# partition_region_ratis_max_retry_attempts = 10
+# partition_region_ratis_initial_sleep_time_ms = 100
+# partition_region_ratis_max_sleep_time_ms = 10000
+# schema_region_ratis_max_retry_attempts = 10
+# schema_region_ratis_initial_sleep_time_ms = 100
+# schema_region_ratis_max_sleep_time_ms = 10000
+# data_region_ratis_max_retry_attempts = 10
+# data_region_ratis_initial_sleep_time_ms = 100
+# data_region_ratis_max_sleep_time_ms = 10000
+
+# preserve certain logs when take snapshot and purge
+# partition_region_ratis_preserve_logs_num_when_purge = 1000
+# schema_region_ratis_preserve_logs_num_when_purge = 1000
+# data_region_ratis_preserve_logs_num_when_purge = 1000
+
+# first election timeout
+# ratis_first_election_timeout_min_ms = 50
+# ratis_first_election_timeout_max_ms = 150
+
+####################
+### Disk Monitor
+####################
+
+
+# Disk remaining threshold at which DataNode is set to ReadOnly status
+# Datatype: double(percentage)
+# disk_space_warning_threshold=5.0
+
+####################
+### Configurations for tsfile-format
+####################
+
+# Datatype: int
+# group_size_in_byte=134217728
+
+# The memory size for each series writer to pack page, default value is 64KB
+# Datatype: int
+# page_size_in_byte=65536
+
+# The maximum number of data points in a page, default 1024*1024
+# Datatype: int
+# max_number_of_points_in_page=1048576
+
+# The threshold for pattern matching in regex
+# Datatype: int
+# pattern_matching_threshold=1000000
 
+# Max size limitation of input string
+# Datatype: int
+# max_string_length=128
+
+# Floating-point precision
+# Datatype: int
+# float_precision=2
+
+# Encoder configuration
+# Encoder of time series, supports TS_2DIFF, PLAIN and RLE(run-length encoding), REGULAR and default value is TS_2DIFF
+# time_encoder=TS_2DIFF
+
+# Encoder of value series. default value is PLAIN.
+# For int, long data type, also supports TS_2DIFF and RLE(run-length encoding), GORILLA and ZIGZAG.
+# value_encoder=PLAIN
+
+# Compression configuration
+# Data compression method, supports UNCOMPRESSED, SNAPPY or LZ4. Default value is SNAPPY
+# compressor=SNAPPY
 
-#######################
-### LocalConfigNode ###
-#######################
+# Maximum degree of a metadataIndex node, default value is 256
+# Datatype: int
+# max_degree_of_index_node=256
 
-# number of data regions per user-defined storage group
-# a data region is the unit of parallelism in memory as all ingestions in one data region are serialized
-# recommended value is [data region number] = [CPU core number] / [user-defined storage group number]
+# time interval in minute for calculating query frequency
 # Datatype: int
-# data_region_num=1
+# frequency_interval_in_minute=1
+
+# Signal-noise-ratio (SNR) of FREQ encoding
+# Datatype: double
+# freq_snr=40.0
+
+# Block size of FREQ encoding
+# Datatype: integer
+# freq_block_size=1024
 
 
diff --git a/node-commons/src/main/java/org/apache/iotdb/commons/conf/CommonConfig.java b/node-commons/src/main/java/org/apache/iotdb/commons/conf/CommonConfig.java
index 429d97bb40..0c25ba4a46 100644
--- a/node-commons/src/main/java/org/apache/iotdb/commons/conf/CommonConfig.java
+++ b/node-commons/src/main/java/org/apache/iotdb/commons/conf/CommonConfig.java
@@ -29,6 +29,8 @@ import java.io.File;
 import java.util.concurrent.TimeUnit;
 
 public class CommonConfig {
+
+  public static final String CONFIG_NAME = "iotdb-common.properties";
   private static final Logger logger = LoggerFactory.getLogger(CommonConfig.class);
 
   // Open ID Secret
diff --git a/schema-engine-rocksdb/src/assembly/resources/conf/schema-rocksdb.properties b/schema-engine-rocksdb/src/assembly/resources/conf/schema-rocksdb.properties
index 6703cd48f0..4a5c212f5e 100644
--- a/schema-engine-rocksdb/src/assembly/resources/conf/schema-rocksdb.properties
+++ b/schema-engine-rocksdb/src/assembly/resources/conf/schema-rocksdb.properties
@@ -22,20 +22,20 @@
 ####################
 
 # This configuration takes effect only when the schema engine mode is Rocksdb_based.
-# The mode is configured in the 'iotdb-datanode.properties'(schema_engine_mode=Rocksdb_based).
+# The mode is configured in the 'iotdb-common.properties'(schema_engine_mode=Rocksdb_based).
 
 ####################
 ### Cache Configuration
 ####################
 # A proper cache size can speed up metadata query.You can configure the cache size as required.
-# By default, the block cache is calculated based on parameter 'write_read_schema_free_memory_proportion' in 'iotdb-datanode.properties'.
+# By default, the block cache is calculated based on parameter 'write_read_schema_free_memory_proportion' in 'iotdb-common.properties'.
 # Assuming 30GB of memory allocated to the schema, that will allocate 30GB to the following configuration items in proportion.
 
 # Datatype: long
 # LRU block cache size. Block cache is where RocksDB caches data in memory for reads.
 # The block cache stores uncompressed blocks.
 # The default value is 2/3 of the schema memory configured for parameter
-# 'write_read_schema_free_memory_proportion' in the 'iotdb-datanode.properties'.
+# 'write_read_schema_free_memory_proportion' in the 'iotdb-common.properties'.
 # For example, if the total configured memory size is 30GB and the schema ratio is 1/10,
 # the default value is 30GB * 1/10 * 2/3
 # block_cache_size=2147483648
@@ -44,7 +44,7 @@
 # LRU block cache size. Block cache is where RocksDB caches data in memory for reads.
 # The block cache stores compressed blocks.
 # The default value is 1/3 of the schema memory configured for parameter
-# 'write_read_schema_free_memory_proportion' in the 'iotdb-datanode.properties'.
+# 'write_read_schema_free_memory_proportion' in the 'iotdb-common.properties'.
 # For example, if the total configured memory size is 30GB and the schema ratio is 1/10,
 # the default value is 30GB * 1/10 * 1/3
 # block_cache_compressed_size=1073741824
diff --git a/schema-engine-tag/src/assembly/resources/conf/schema-tag.properties b/schema-engine-tag/src/assembly/resources/conf/schema-tag.properties
index bfb7df48b8..448f8c8472 100644
--- a/schema-engine-tag/src/assembly/resources/conf/schema-tag.properties
+++ b/schema-engine-tag/src/assembly/resources/conf/schema-tag.properties
@@ -22,7 +22,7 @@
 ####################
 
 # This configuration takes effect only when the schema engine mode is Tag.
-# The mode is configured in the 'iotdb-datanode.properties'(schema_engine_mode=Tag).
+# The mode is configured in the 'iotdb-common.properties'(schema_engine_mode=Tag).
 
 # Datatype: int
 # The size of wal buffer used to store a wal record.(unit: byte)
diff --git a/server/src/assembly/resources/conf/iotdb-datanode.properties b/server/src/assembly/resources/conf/iotdb-datanode.properties
index 0b3761bc69..c32893142b 100644
--- a/server/src/assembly/resources/conf/iotdb-datanode.properties
+++ b/server/src/assembly/resources/conf/iotdb-datanode.properties
@@ -18,7 +18,7 @@
 #
 
 ####################
-### RPC Configuration
+### Data Node RPC Configuration
 ####################
 
 # could set 0.0.0.0, 127.0.0.1(for local test) or ipv4 address
@@ -29,21 +29,9 @@ rpc_address=0.0.0.0
 # Datatype: int
 rpc_port=6667
 
-####################
-### Shuffle Configuration
-####################
 # Datatype: int
 mpp_data_exchange_port=8777
 
-# Datatype: int
-# mpp_data_exchange_core_pool_size=10
-
-# Datatype: int
-# mpp_data_exchange_max_pool_size=10
-
-# Datatype: int
-# mpp_data_exchange_keep_alive_time_in_ms=1000
-
 # Datatype: String
 # used for communication between cluster nodes.
 # could set 0.0.0.0, 127.0.0.1(for local test) or ipv4 address.
@@ -61,6 +49,10 @@ data_region_consensus_port=40010
 # port for consensus's communication for schema region between cluster nodes.
 schema_region_consensus_port=50010
 
+####################
+### Target Config Nodes
+####################
+
 # At least one running ConfigNode should be set for joining the cluster
 # Format: ip:port
 # where the ip should be consistent with the target ConfigNode's confignode_internal_address,
@@ -71,135 +63,6 @@ schema_region_consensus_port=50010
 # Notice: The ip for any target_config_node should never be 0.0.0.0
 target_config_nodes=127.0.0.1:22277
 
-# Datatype: boolean
-# rpc_thrift_compression_enable=false
-
-# if true, a snappy based compression method will be called before sending data by the network
-# Datatype: boolean
-# this feature is under development, set this as false before it is done.
-# rpc_advanced_compression_enable=false
-
-# Datatype: int
-# rpc_selector_thread_num=1
-
-# Datatype: int
-# rpc_min_concurrent_client_num=1
-
-# Datatype: int
-# rpc_max_concurrent_client_num=65535
-
-# thrift max frame size, 512MB by default
-# Datatype: int
-# thrift_max_frame_size=536870912
-
-# thrift init buffer size
-# Datatype: int
-# thrift_init_buffer_size=1024
-
-# Thrift socket and connection timeout between raft nodes, in milliseconds.
-# Datatype: int
-# connection_timeout_ms=20000
-
-# The maximum number of clients that can be idle for a node's InternalService.
-# When the number of idle clients on a node exceeds this number, newly returned clients will be released
-# Datatype: int
-# core_connection_for_internal_service=100
-
-# The maximum number of clients that can be applied for a node's InternalService
-# Datatype: int
-# max_connection_for_internal_service=100
-
-# selector thread (TAsyncClientManager) nums for async thread in a clientManager
-# Datatype: int
-# selector_thread_nums_of_client_manager=1
-
-####################
-### Write Ahead Log Configuration
-####################
-
-# Write mode of wal
-# The details of these three modes are as follows:
-# 1. DISABLE: the system will disable wal.
-# 2. SYNC: the system will submit wal synchronously, write request will not return until its wal is fsynced to the disk successfully.
-# 3. ASYNC: the system will submit wal asynchronously, write request will return immediately no matter its wal is fsynced to the disk successfully.
-# The write performance order is DISABLE > ASYNC > SYNC, but only SYNC mode can ensure data durability.
-# wal_mode=ASYNC
-
-# wal dirs
-# If this property is unset, system will save the data in the default relative path directory under the IoTDB folder(i.e., %IOTDB_HOME%/data/datanode).
-# If it is absolute, system will save the data in the exact location it points to.
-# If it is relative, system will save the data in the relative path directory it indicates under the IoTDB folder.
-# If there are more than one directory, please separate them by commas ",".
-# Note: If wal_dirs is assigned an empty string(i.e.,zero-size), it will be handled as a relative path.
-# For windows platform
-# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is absolute. Otherwise, it is relative.
-# wal_dirs=data\\datanode\\wal
-# For Linux platform
-# If its prefix is "/", then the path is absolute. Otherwise, it is relative.
-# wal_dirs=data/datanode/wal
-
-# Max number of wal nodes, each node corresponds to one wal directory
-# The default value 0 means twice the number of wal dirs.
-# Notice: this value affects write performance significantly.
-# For non-SSD disks, values between one third and half of storage groups number are recommended.
-# Datatype: int
-# max_wal_nodes_num=0
-
-# Duration a wal flush operation will wait before calling fsync
-# A duration greater than 0 batches multiple wal fsync calls into one. This is useful when disks are slow or WAL write contention exists.
-# Notice: this value affects write performance significantly, values in the range of 0ms-10ms are recommended.
-# Datatype: long
-# fsync_wal_delay_in_ms=3
-
-# Buffer size of each wal node
-# If it's a value smaller than 0, use the default value 16 * 1024 * 1024 bytes (16MB).
-# Datatype: int
-# wal_buffer_size_in_byte=16777216
-
-# Blocking queue capacity of each wal buffer, restricts maximum number of WALEdits cached in the blocking queue.
-# Datatype: int
-# wal_buffer_queue_capacity=50
-
-# Size threshold of each wal file
-# When a wal file's size exceeds this, the wal file will be closed and a new wal file will be created.
-# If it's a value smaller than 0, use the default value 10 * 1024 * 1024 (10MB).
-# Datatype: long
-# wal_file_size_threshold_in_byte=10485760
-
-# Minimum ratio of effective information in wal files
-# This value should be between 0.0 and 1.0
-# If effective information ratio is below this value, MemTable snapshot or flush will be triggered.
-# Increase this value when wal occupies too much disk space. But, if this parameter is too large, the write performance may decline.
-# Datatype: double
-# wal_min_effective_info_ratio=0.1
-
-# MemTable size threshold for triggering MemTable snapshot in wal
-# When a memTable's size (in byte) exceeds this, wal can flush this memtable to disk, otherwise wal will snapshot this memtable in wal.
-# If it's a value smaller than 0, use the default value 8 * 1024 * 1024 bytes (8MB).
-# Datatype: long
-# wal_memtable_snapshot_threshold_in_byte=8388608
-
-# MemTable's max snapshot number in wal
-# If one memTable's snapshot number in wal exceeds this value, it will be flushed to disk.
-# Datatype: int
-# max_wal_memtable_snapshot_num=1
-
-# The period when outdated wal files are periodically deleted
-# If this value is too large, outdated wal files may not able to be deleted in time.
-# If it's a value smaller than 0, use the default value 20 * 1000 ms (20 seconds).
-# Datatype: long
-# delete_wal_files_period_in_ms=20000
-
-# The minimum size of wal files when throttle down in MultiLeader consensus
-# If it's a value smaller than 0, use the default value 50 * 1024 * 1024 * 1024 bytes (50GB).
-# Datatype: long
-# multi_leader_throttle_threshold_in_byte=53687091200
-
-# Maximum wait time of write cache in MultiLeader consensus
-# If this value is less than or equal to 0, use the default value Long.MAX_VALUE.
-# Datatype: long
-# multi_leader_cache_window_time_in_ms=-1
-
 ####################
 ### Directory Configuration
 ####################
@@ -255,916 +118,23 @@ target_config_nodes=127.0.0.1:22277
 # If its prefix is "/", then the path is absolute. Otherwise, it is relative.
 # consensus_dir=data/datanode/consensus
 
+# wal dirs
+# If this property is unset, system will save the data in the default relative path directory under the IoTDB folder(i.e., %IOTDB_HOME%/data/datanode).
+# If it is absolute, system will save the data in the exact location it points to.
+# If it is relative, system will save the data in the relative path directory it indicates under the IoTDB folder.
+# If there are more than one directory, please separate them by commas ",".
+# Note: If wal_dirs is assigned an empty string(i.e.,zero-size), it will be handled as a relative path.
+# For windows platform
+# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is absolute. Otherwise, it is relative.
+# wal_dirs=data\\datanode\\wal
+# For Linux platform
+# If its prefix is "/", then the path is absolute. Otherwise, it is relative.
+# wal_dirs=data/datanode/wal
 
-# TSFile storage file system. Currently, TsFiles are supported to be stored in LOCAL file system or HDFS.
-# Datatype: FSType
-# tsfile_storage_fs=LOCAL
-
-# If using HDFS, the absolute file path of Hadoop core-site.xml should be configured
-# Datatype: String
-# core_site_path=/etc/hadoop/conf/core-site.xml
-
-# If using HDFS, the absolute file path of Hadoop hdfs-site.xml should be configured
-# Datatype: String
-# hdfs_site_path=/etc/hadoop/conf/hdfs-site.xml
-
-# If using HDFS, hadoop ip can be configured. If there are more than one hdfs_ip, Hadoop HA is used
-# Datatype: String
-# hdfs_ip=localhost
-
-# If using HDFS, hadoop port can be configured
-# Datatype: String
-# hdfs_port=9000
-
-# If there are more than one hdfs_ip, Hadoop HA is used. Below are configuration for HA
-# If using Hadoop HA, nameservices of hdfs can be configured
-# Datatype: String
-# dfs_nameservices=hdfsnamespace
-
-# If using Hadoop HA, namenodes under dfs nameservices can be configured
-# Datatype: String
-# dfs_ha_namenodes=nn1,nn2
-
-# If using Hadoop HA, automatic failover can be enabled or disabled
-# Datatype: boolean
-# dfs_ha_automatic_failover_enabled=true
-
-# If using Hadoop HA and enabling automatic failover, the proxy provider can be configured
-# Datatype: String
-# dfs_client_failover_proxy_provider=org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
-
-# If using kerberos to authenticate hdfs, this should be true
-# Datatype: boolean
-# hdfs_use_kerberos=false
-
-# Full path of kerberos keytab file
-# Datatype: String
-# kerberos_keytab_file_path=/path
-
-# Kerberos pricipal
-# Datatype: String
-# kerberos_principal=your principal
-
-
-####################
-### Storage Engine Configuration
-####################
-
-# Use this value to set timestamp precision as "ms", "us" or "ns".
-# Once the precision is been set, it can not be changed.
-# Datatype: String
-timestamp_precision=ms
-
-# Default TTL for storage groups that are not set TTL by statements, If not set (default),
-# the TTL will be unlimited.
-# Notice: if this property is changed, previous created storage group which are not set TTL will
-# also be affected. And negative values are accepted, which means you can only insert future
-# data.
-# Datatype: long
-# Unit: ms
-# default_ttl=36000000
-
-# What will the system do when unrecoverable error occurs.
-# Datatype: String
-# Optional strategies are as follows:
-# 1. CHANGE_TO_READ_ONLY: set system status to read-only and the system only accepts query operations.
-# 2. SHUTDOWN: the system will be shutdown.
-# handle_system_error=CHANGE_TO_READ_ONLY
-
-# Size of log buffer in each metadata operation plan(in byte).
-# If the size of a metadata operation plan is larger than this parameter, then it will be rejected by SchemaRegion
-# If it sets a value smaller than 0, use the default value 1024*1024
-# Datatype: int
-# mlog_buffer_size=1048576
-
-# The cycle when metadata log is periodically forced to be written to disk(in milliseconds)
-# If sync_mlog_period_in_ms = 0 it means force metadata log to be written to disk after each refreshment
-# Set this parameter to 0 may slow down the operation on slow disk.
-# sync_mlog_period_in_ms=100
-
-# When a memTable's size (in byte) exceeds this, the memtable is flushed to disk. The default threshold is 1 GB.
-# Datatype: long
-# memtable_size_threshold=1073741824
-
-# Whether to timed flush sequence tsfiles' memtables.
-# Datatype: boolean
-# enable_timed_flush_seq_memtable=false
-
-# If a memTable's created time is older than current time minus this, the memtable will be flushed to disk.
-# Only check sequence tsfiles' memtables.
-# The default flush interval is 60 * 60 * 1000. (unit: ms)
-# Datatype: long
-# seq_memtable_flush_interval_in_ms=3600000
-
-# The interval to check whether sequence memtables need flushing.
-# The default flush check interval is 10 * 60 * 1000. (unit: ms)
-# Datatype: long
-# seq_memtable_flush_check_interval_in_ms=600000
-
-# Whether to timed flush unsequence tsfiles' memtables.
-# Datatype: boolean
-# enable_timed_flush_unseq_memtable=true
-
-# If a memTable's created time is older than current time minus this, the memtable will be flushed to disk.
-# Only check unsequence tsfiles' memtables.
-# The default flush interval is 60 * 60 * 1000. (unit: ms)
-# Datatype: long
-# unseq_memtable_flush_interval_in_ms=3600000
-
-# The interval to check whether unsequence memtables need flushing.
-# The default flush check interval is 10 * 60 * 1000. (unit: ms)
-# Datatype: long
-# unseq_memtable_flush_check_interval_in_ms=600000
-
-# The sort algorithms used in the memtable's TVList
-# TIM: default tim sort,
-# QUICK: quick sort,
-# BACKWARD: backward sort
-# tvlist_sort_algorithm=TIM
-
-# When the average point number of timeseries in memtable exceeds this, the memtable is flushed to disk. The default threshold is 100000.
-# Datatype: int
-# avg_series_point_number_threshold=100000
-
-# How many threads can concurrently flush. When <= 0, use CPU core number.
-# Datatype: int
-# concurrent_flush_thread=0
-
-# How many threads can concurrently execute query statement. When <= 0, use CPU core number.
-# Datatype: int
-# concurrent_query_thread=0
-
-# How many threads can concurrently read data for raw data query. When <= 0, use CPU core number.
-# Datatype: int
-# concurrent_sub_rawQuery_thread=8
-
-# Blocking queue size for read task in raw data query. Must >= 1.
-# Datatype: int
-# raw_query_blocking_queue_capacity=5
-
-# whether take over the memory management by IoTDB rather than JVM when serializing memtable as bytes in memory
-# (i.e., whether use ChunkBufferPool), value true, false
-# Datatype: boolean
-# chunk_buffer_pool_enable=false
-
-# The amount of data iterate each time in server (the number of data strips, that is, the number of different timestamps.)
-# Datatype: int
-# batch_size=100000
-
-# max size for tag and attribute of one time series
-# the unit is byte
-# Datatype: int
-# tag_attribute_total_size=700
-
-# interval num for tag and attribute records when force flushing to disk
-# When a certain amount of tag and attribute records is reached, they will be force flushed to disk
-# It is possible to lose at most tag_attribute_flush_interval records
-# tag_attribute_flush_interval=1000
-
-# In one insert (one device, one timestamp, multiple measurements),
-# if enable partial insert, one measurement failure will not impact other measurements
-# Datatype: boolean
-# enable_partial_insert=true
-
-# the interval to log recover progress of each vsg when starting iotdb
-# Datatype: int
-# recovery_log_interval_in_ms=5000
-
-# Add a switch to drop ouf-of-order data
-# Out-of-order data will impact the aggregation query a lot. Users may not care about discarding some out-of-order data.
-# Datatype: boolean
-# enable_discard_out_of_order_data=false
-
-####################
-### Memory Control Configuration
-####################
-
-# Whether to enable memory control
-# Datatype: boolean
-# enable_mem_control=true
-
-# Memory Allocation Ratio: Write, Read, Schema, Consensus and Free Memory.
-# The parameter form is a:b:c:d:e, where a, b, c, d and e are integers. for example: 1:1:1:1:1 , 6:2:1:1:1
-# If you have high level of writing pressure and low level of reading pressure, please adjust it to for example 6:1:1:1:2
-# write_read_schema_free_memory_proportion=3:3:1:1:2
-
-# Schema Memory Allocation Ratio: SchemaRegion, SchemaCache, PartitionCache and LastCache.
-# The parameter form is a:b:c:d, where a, b, c and d are integers. for example: 1:1:1:1 , 6:2:1:1
-# In cluster mode, we recommend 5:3:1:1. In standalone mode, we recommend 8:1:0:1
-# schema_memory_allocate_proportion=5:3:1:1
-
-# Memory allocation ratio in StorageEngine: MemTable, Compaction
-# The parameter form is a:b:c:d, where a, b, c and d are integers. for example: 8:2 , 7:3
-# storage_engine_memory_proportion=8:2
-
-# Max number of concurrent writing time partitions in one storage group
-# This parameter is used to control total memTable number when memory control is disabled
-# The max number of memTable is 4 * concurrent_writing_time_partition * storage group number
-# Datatype: long
-# concurrent_writing_time_partition=1
-
-# primitive array size (length of each array) in array pool
-# Datatype: int
-# primitive_array_size=32
-
-# size proportion for chunk metadata maintains in memory when writing tsfile
-# Datatype: double
-# chunk_metadata_size_proportion=0.1
-
-# Ratio of write memory for invoking flush disk, 0.4 by default
-# If you have extremely high write load (like batch=1000), it can be set lower than the default value like 0.2
-# Datatype: double
-# flush_proportion=0.4
-
-# Ratio of write memory allocated for buffered arrays, 0.6 by default
-# Datatype: double
-# buffered_arrays_memory_proportion=0.6
-
-# Ratio of write memory for rejecting insertion, 0.8 by default
-# If you have extremely high write load (like batch=1000) and the physical memory size is large enough,
-# it can be set higher than the default value like 0.9
-# Datatype: double
-# reject_proportion=0.8
-
-# If memory cost of data region increased more than proportion of allocated memory for write, report to system. The default value is 0.001
-# Datatype: double
-# write_memory_variation_report_proportion=0.001
-
-# allowed max numbers of deduplicated path in one query
-# it's just an advised value, the real limitation will be the smaller one between this and the one we calculated
-# Datatype: int
-# max_deduplicated_path_num=1000
-
-# When an inserting is rejected, waiting period (in ms) to check system again, 50 by default.
-# If the insertion has been rejected and the read load is low, it can be set larger.
-# Datatype: int
-# check_period_when_insert_blocked=50
-
-# When the waiting time (in ms) of an inserting exceeds this, throw an exception. 10000 by default.
-# If the insertion has been rejected and the read load is low, it can be set larger
-# Datatype: int
-# max_waiting_time_when_insert_blocked=10000
-
-# size of ioTaskQueue. The default value is 10
-# Datatype: int
-# io_task_queue_size_for_flushing=10
-
-# If true, we will estimate each query's possible memory footprint before executing it and deny it if its estimated memory exceeds current free memory
-# Datatype: bool
-# enable_query_memory_estimation=true
-
-####################
-### Upgrade Configurations
-####################
-
-# When there exists old version(0.9.x/v1) data, how many thread will be set up to perform upgrade tasks, 1 by default.
-# Set to 1 when less than or equal to 0.
-# Datatype: int
-# upgrade_thread_num=1
-
-####################
-### Query Configurations
-####################
-
-# the default time period that used in fill query, -1 by default means infinite past time
-# Datatype: int, Unit: ms
-# default_fill_interval=-1
-
-# the max execution time of a DriverTask
-# Datatype: int, Unit: ms
-# driver_task_execution_time_slice_in_ms=100
-
-# the max capacity of a TsBlock
-# Datatype: int, Unit: byte
-# max_tsblock_size_in_bytes=1048576
-
-# the max number of lines in a single TsBlock
-# Datatype: int
-# max_tsblock_line_numbers=1000
-
-####################
-### Compaction Configurations
-####################
-# sequence space compaction: only compact the sequence files
-# Datatype: boolean
-# enable_seq_space_compaction=true
-
-# unsequence space compaction: only compact the unsequence files
-# Datatype: boolean
-# enable_unseq_space_compaction=true
-
-# cross space compaction: compact the unsequence files into the overlapped sequence files
-# Datatype: boolean
-# enable_cross_space_compaction=true
-
-# the selector of cross space compaction task
-# Options: rewrite
-# cross_selector=rewrite
-
-# the compaction performer of cross space compaction task
-# Options: read_point
-# cross_performer=read_point
-
-# the selector of inner sequence space compaction task
-# Options: size_tiered
-# inner_seq_selector=size_tiered
-
-# the performer of inner sequence space compaction task
-# Options: read_chunk
-# inner_seq_performer=read_chunk
-
-# the selector of inner unsequence space compaction task
-# Options: size_tiered
-# inner_unseq_selector=size_tiered
-
-# the performer of inner unsequence space compaction task
-# Options: read_point
-# inner_unseq_performer=read_point
-
-# The priority of compaction execution
-# INNER_CROSS: prioritize inner space compaction, reduce the number of files first
-# CROSS_INNER: prioritize cross space compaction, eliminate the unsequence files first
-# BALANCE: alternate two compaction types
-# compaction_priority=BALANCE
-
-# The target tsfile size in compaction
-# Datatype: long, Unit: byte
-# target_compaction_file_size=1073741824
-
-# The target chunk size in compaction and when memtable reaches this threshold, flush the memtable to disk.
-# default is 1MB
-# Datatype: long, Unit: byte
-# target_chunk_size=1048576
-
-# The target point nums in one chunk in compaction
-# Datatype: long
-# target_chunk_point_num=100000
-
-# If the chunk size is lower than this threshold, it will be deserialize into points, default is 128 byte
-# Datatype: long, Unit:byte
-# chunk_size_lower_bound_in_compaction=128
-
-# If the chunk size is lower than this threshold, it will be deserialize into points
-# Datatype: long
-# chunk_point_num_lower_bound_in_compaction=100
-
-# The max file when selecting inner space compaction candidate files
-# Datatype: int
-# max_inner_compaction_candidate_file_num=30
-
-# The max file when selecting cross space compaction candidate files
-# At least one unseq file with it's overlapped seq files will be selected even exceeded this number
-# Datatype: int
-# max_cross_compaction_candidate_file_num=1000
-
-# The max total size when selecting cross space compaction candidate files
-# At least one unseq file with it's overlapped seq files will be selected even exceeded this number
-# Datatype: long, Unit: byte
-# max_cross_compaction_candidate_file_size=5368709120
-
-# If one merge file selection runs for more than this time, it will be ended and its current
-# selection will be used as final selection.
-# When < 0, it means time is unbounded.
-# Datatype: long, Unit: ms
-# cross_compaction_file_selection_time_budget=30000
-
-# How many threads will be set up to perform compaction, 10 by default.
-# Set to 1 when less than or equal to 0.
-# Datatype: int
-# concurrent_compaction_thread=10
-
-# The interval of compaction task schedule
-# Datatype: long, Unit: ms
-# compaction_schedule_interval_in_ms=60000
-
-# The interval of compaction task submission
-# Datatype: long, Unit: ms
-# compaction_submission_interval_in_ms=60000
-
-# The limit of write throughput merge can reach per second
-# Datatype: int
-# compaction_write_throughput_mb_per_sec=16
-
-# The maximum session idle time. unit: ms
-# Idle sessions are the ones that performs neither query or non-query operations for a period of time
-# Set to 0 to disable session timeout
-# Datatype: int
-# session_timeout_threshold=0
-
-# The max executing time of query. unit: ms
-# Datatype: int
-# query_timeout_threshold=60000
-
-# The maximum allowed concurrently executing queries
-# Datatype: int
-# max_allowed_concurrent_queries=1000
-
-# The number of sub compaction threads to be set up to perform compaction.
-# Currently only works for nonAligned data in cross space compaction and unseq inner space compaction.
-# Set to 1 when less than or equal to 0.
-# Datatype: int
-# sub_compaction_thread_num=4
-
-####################
-### Metadata Cache Configuration
-####################
-
-# whether to cache meta data(BloomFilter, ChunkMetadata and TimeSeriesMetadata) or not.
-# Datatype: boolean
-# meta_data_cache_enable=true
-
-# Read memory Allocation Ratio: BloomFilterCache : ChunkCache : TimeSeriesMetadataCache : Coordinator : Operators : DataExchange : timeIndex in TsFileResourceList : others.
-# The parameter form is a:b:c:d:e:f:g:h, where a, b, c, d, e, f, g and h are integers. for example: 1:1:1:1:1:1:1:1 , 1:100:200:50:200:200:200:50
-# chunk_timeseriesmeta_free_memory_proportion=1:100:200:50:200:200:200:50
-
-####################
-### LAST Cache Configuration
-####################
-
-# Whether to enable LAST cache
-# Datatype: boolean
-# enable_last_cache=true
-
-####################
-### External sort Configuration
-####################
-# Is external sort enable
-# Datatype: boolean
-# enable_external_sort=true
-
-# The maximum number of simultaneous chunk reading for a single time series.
-# If the num of simultaneous chunk reading is greater than external_sort_threshold, external sorting is used.
-# When external_sort_threshold increases, the number of chunks sorted at the same time in memory may increase and this will occupy more memory.
-# When external_sort_threshold decreases, triggering external sorting will increase the time-consuming.
-# Datatype: int
-# external_sort_threshold=1000
-
-####################
-### PIPE Server Configuration
-####################
-# White IP list of Sync client.
-# Please use the form of network segment to present the range of IP, for example: 192.168.0.0/16
-# If there are more than one IP segment, please separate them by commas
-# The default is to allow all IP to sync
-# Datatype: String
-# ip_white_list=0.0.0.0/0
-
-####################
-### PIPE Sender Configuration
-####################
-# The maximum number of retry when syncing a file to receiver fails.
-# max_number_of_sync_file_retry=5
-
-
-####################
-### performance statistic configuration
-####################
-
+# tracing dir
 # Uncomment following fields to configure the tracing root directory.
 # For Window platform, the index is as follows:
 # tracing_dir=datanode\\tracing
 # For Linux platform
 # If its prefix is "/", then the path is absolute. Otherwise, it is relative.
 # tracing_dir=datanode/tracing
-
-####################
-### Configurations for watermark module
-####################
-# Datatype: boolean
-# watermark_module_opened=false
-# Datatype: String
-# watermark_secret_key=IoTDB*2019@Beijing
-# Datatype: String
-# watermark_bit_string=100101110100
-# Datatype: String
-# watermark_method=GroupBasedLSBMethod(embed_row_cycle=2,embed_lsb_num=5)
-
-
-####################
-### Configurations for creating schema automatically
-####################
-
-# Whether creating schema automatically is enabled
-# If true, then create storage group and timeseries automatically when not exists in insertion
-# Or else, user need to create storage group and timeseries before insertion.
-# Datatype: boolean
-# enable_auto_create_schema=true
-
-# Storage group level when creating schema automatically is enabled
-# e.g. root.sg0.d1.s2
-#      we will set root.sg0 as the storage group if storage group level is 1
-# Datatype: int
-# default_storage_group_level=1
-
-# ALL data types: BOOLEAN, INT32, INT64, FLOAT, DOUBLE, TEXT
-
-# register time series as which type when receiving boolean string "true" or "false"
-# Datatype: TSDataType
-# boolean_string_infer_type=BOOLEAN
-
-# register time series as which type when receiving an integer string "67"
-# Datatype: TSDataType
-# integer_string_infer_type=FLOAT
-
-# register time series as which type when receiving an integer string and using float may lose precision
-# num > 2 ^ 24
-# Datatype: TSDataType
-# long_string_infer_type=DOUBLE
-
-# register time series as which type when receiving a floating number string "6.7"
-# Datatype: TSDataType
-# floating_string_infer_type=FLOAT
-
-# register time series as which type when receiving the Literal NaN. Values can be DOUBLE, FLOAT or TEXT
-# Datatype: TSDataType
-# nan_string_infer_type=DOUBLE
-
-
-# BOOLEAN encoding when creating schema automatically is enabled
-# Datatype: TSEncoding
-# default_boolean_encoding=RLE
-
-# INT32 encoding when creating schema automatically is enabled
-# Datatype: TSEncoding
-# default_int32_encoding=RLE
-
-# INT64 encoding when creating schema automatically is enabled
-# Datatype: TSEncoding
-# default_int64_encoding=RLE
-
-# FLOAT encoding when creating schema automatically is enabled
-# Datatype: TSEncoding
-# default_float_encoding=GORILLA
-
-# DOUBLE encoding when creating schema automatically is enabled
-# Datatype: TSEncoding
-# default_double_encoding=GORILLA
-
-# TEXT encoding when creating schema automatically is enabled
-# Datatype: TSEncoding
-# default_text_encoding=PLAIN
-
-####################
-### Configurations for tsfile-format
-####################
-
-# Datatype: int
-# group_size_in_byte=134217728
-
-# The memory size for each series writer to pack page, default value is 64KB
-# Datatype: int
-# page_size_in_byte=65536
-
-# The maximum number of data points in a page, default 1024*1024
-# Datatype: int
-# max_number_of_points_in_page=1048576
-
-# The threshold for pattern matching in regex
-# Datatype: int
-# pattern_matching_threshold=1000000
-
-# Max size limitation of input string
-# Datatype: int
-# max_string_length=128
-
-# Floating-point precision
-# Datatype: int
-# float_precision=2
-
-# Encoder configuration
-# Encoder of time series, supports TS_2DIFF, PLAIN and RLE(run-length encoding), REGULAR and default value is TS_2DIFF
-# time_encoder=TS_2DIFF
-
-# Encoder of value series. default value is PLAIN.
-# For int, long data type, also supports TS_2DIFF and RLE(run-length encoding), GORILLA and ZIGZAG.
-# value_encoder=PLAIN
-
-# Compression configuration
-# Data compression method, supports UNCOMPRESSED, SNAPPY or LZ4. Default value is SNAPPY
-# compressor=SNAPPY
-
-# Maximum degree of a metadataIndex node, default value is 256
-# Datatype: int
-# max_degree_of_index_node=256
-
-# time interval in minute for calculating query frequency
-# Datatype: int
-# frequency_interval_in_minute=1
-
-# time cost(ms) threshold for slow query
-# Datatype: long
-# slow_query_threshold=5000
-
-# Signal-noise-ratio (SNR) of FREQ encoding
-# Datatype: double
-# freq_snr=40.0
-
-# Block size of FREQ encoding
-# Datatype: integer
-# freq_block_size=1024
-
-####################
-### MQTT Broker Configuration
-####################
-
-# whether to enable the mqtt service.
-# Datatype: boolean
-# enable_mqtt_service=false
-
-# the mqtt service binding host.
-# Datatype: String
-# mqtt_host=0.0.0.0
-
-# the mqtt service binding port.
-# Datatype: int
-# mqtt_port=1883
-
-# the handler pool size for handing the mqtt messages.
-# Datatype: int
-# mqtt_handler_pool_size=1
-
-# the mqtt message payload formatter.
-# Datatype: String
-# mqtt_payload_formatter=json
-
-# max length of mqtt message in byte
-# Datatype: int
-# mqtt_max_message_size=1048576
-
-####################
-### Authorization Configuration
-####################
-
-# which class to serve for authorization. By default, it is LocalFileAuthorizer.
-# Another choice is org.apache.iotdb.db.auth.authorizer.OpenIdAuthorizer
-# authorizer_provider_class=org.apache.iotdb.commons.auth.authorizer.LocalFileAuthorizer
-
-# If OpenIdAuthorizer is enabled, then openID_url must be set.
-# openID_url=
-
-# admin username, default is root
-# Datatype: string
-# admin_name=root
-
-# encryption provider class
-# iotdb_server_encrypt_decrypt_provider=org.apache.iotdb.commons.security.encrypt.MessageDigestEncrypt
-
-# encryption provided class parameter
-# iotdb_server_encrypt_decrypt_provider_parameter=
-
-# admin password, default is root
-# Datatype: string
-# admin_password=root
-
-# Cache size of user and role
-# Datatype: int
-# author_cache_size=1000
-
-# Cache expire time of user and role
-# Datatype: int
-# author_cache_expire_time=30
-
-####################
-### UDF Configuration
-####################
-
-# Used to estimate the memory usage of text fields in a UDF query.
-# It is recommended to set this value to be slightly larger than the average length of all text
-# records.
-# Datatype: int
-# udf_initial_byte_array_length_for_memory_control=48
-
-# How much memory may be used in ONE UDF query (in MB).
-# The upper limit is 20% of allocated memory for read.
-# Datatype: float
-# udf_memory_budget_in_mb=30.0
-
-# UDF memory allocation ratio.
-# The parameter form is a:b:c, where a, b, and c are integers.
-# udf_reader_transformer_collector_memory_proportion=1:1:1
-
-# Uncomment the following field to configure the udf root directory.
-# For Window platform
-# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is
-# absolute. Otherwise, it is relative.
-# udf_root_dir=ext\\udf
-# For Linux platform
-# If its prefix is "/", then the path is absolute. Otherwise, it is relative.
-# udf_root_dir=ext/udf
-
-####################
-### Trigger Configuration
-####################
-
-# The size of log buffer for every trigger management operation plan. If the size of a trigger
-# management operation plan is larger than this parameter, the trigger management operation plan
-# will be rejected by TriggerManager.
-# Datatype: int
-# tlog_buffer_size=1048576
-
-# Uncomment the following field to configure the trigger root directory.
-# For Window platform
-# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is
-# absolute. Otherwise, it is relative.
-# trigger_root_dir=ext\\trigger
-# For Linux platform
-# If its prefix is "/", then the path is absolute. Otherwise, it is relative.
-# trigger_root_dir=ext/trigger
-
-# temporary lib dir
-# If this property is unset, system will save the data in the default relative path directory under
-# the Trigger folder(i.e., ext/temporary).
-#
-# If it is absolute, system will save the data in exact location it points to.
-# If it is relative, system will save the data in the relative path directory it indicates under the
-# Trigger folder.
-# Note: If data_dir is assigned an empty string(i.e.,zero-size), it will be handled as a relative
-# path.
-#
-# For Window platform
-# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is
-# absolute. Otherwise, it is relative.
-# trigger_temporary_lib_dir=ext\\trigger_temporary
-#
-# For Linux platform
-# If its prefix is "/", then the path is absolute. Otherwise, it is relative.
-# trigger_temporary_lib_dir=ext/trigger_temporary
-
-# How many threads can be used for evaluating sliding windows. When <= 0, use CPU core number.
-# Datatype: int
-# concurrent_window_evaluation_thread=0
-
-# Max number of window evaluation tasks that can be pending for execution. When <= 0, the value is
-# 64 by default.
-# Datatype: int
-# max_pending_window_evaluation_tasks=64
-
-# How many times will we retry to found an instance of stateful trigger on DataNodes
-# 3 by default.
-# Datatype: int
-# stateful_trigger_retry_num_when_not_found=3
-
-####################
-### Continuous Query Configuration
-####################
-
-# How many thread will be set up to perform continuous queries. When <= 0, use max(1, CPU core number / 2).
-# Datatype: int
-# continuous_query_execution_thread=2
-
-# Maximum number of continuous query tasks that can be pending for execution. When <= 0, the value is
-# 64 by default.
-# Datatype: int
-# max_pending_continuous_query_tasks=64
-
-# Minimum every interval to perform continuous query.
-# The every interval of continuous query instances should not be lower than this limit.
-# Datatype: duration
-# continuous_query_min_every_interval=1s
-
-# The size of log buffer for every CQ management operation plan. If the size of a CQ
-# management operation plan is larger than this parameter, the CQ management operation plan
-# will be rejected by CQManager.
-# Datatype: int
-# cqlog_buffer_size=1048576
-
-####################
-### Select-Into Configuration
-####################
-
-# The maximum number of rows can be processed in insert-tablet-plan when executing select-into statements.
-# When <= 0, use 10000.
-# Datatype: int
-# select_into_insert_tablet_plan_row_limit=10000
-
-
-####################
-### Insert-Tablets Configuration
-####################
-
-# When the insert plan column count reaches the specified threshold, which means that the plan is relatively large. At this time, may be enabled multithreading.
-# If the tablet is small, the time of each insertion is short.
-# If we enable multithreading, we also need to consider the switching loss between threads,
-# so we need to judge the size of the tablet.
-# Datatype: int
-# insert_multi_tablet_enable_multithreading_column_threshold=10
-
-####################
-### Index Configuration
-####################
-
-# Uncomment following fields to configure the index root directory.
-# For Window platform, the index is as follows:
-# index_root_dir=data\\index
-# For Linux platform
-# If its prefix is "/", then the path is absolute. Otherwise, it is relative.
-# index_root_dir=datanode/index
-
-# Is index enable
-# Datatype: boolean
-# enable_index=false
-
-# How many threads can concurrently build index. When <= 0, use CPU core number.
-# Datatype: int
-# concurrent_index_build_thread=0
-
-# the default size of sliding window used for the subsequence matching in index framework
-# Datatype: int
-# default_index_window_range=10
-
-# buffer parameter for index processor.
-# Datatype: long
-# index_buffer_size=134217728
-
-# whether enable data partition. If disabled, all data belongs to partition 0
-# Datatype: boolean
-# enable_partition=false
-
-# time range for partitioning data inside each data region, the unit is millisecond, default is equal to one week
-# Datatype: long
-# time_partition_interval_for_storage=604800000
-
-####################
-### Influx DB RPC Service Configuration
-####################
-# Datatype: boolean
-# enable_influxdb_rpc_service=false
-
-# Datatype: int
-# influxdb_rpc_port=8086
-
-####################
-### Group By Fill Configuration
-####################
-# Datatype: float
-# group_by_fill_cache_size_in_mb=1.0
-
-####################
-### Schema Engine Configuration
-####################
-# Choose the mode of schema engine. The value could be Memory,Schema_File and Rocksdb_based. If the provided value doesn't match any pre-defined value, Memory mode will be used as default.
-# Datatype: string
-# schema_engine_mode=Memory
-
-# cache size for SchemaRegion.
-# This cache is used to improve insert speed where all path check and TSDataType will be cached in SchemaRegion with corresponding Path.
-# Datatype: int
-# schema_region_device_node_cache_size=10000
-
-# thread pool size for read operation in DataNode's coordinator.
-# Datatype: int
-# coordinator_read_executor_size=20
-
-# thread pool size for write operation in DataNode's coordinator.
-# Datatype: int
-# coordinator_write_executor_size=50
-
-# cache size for partition.
-# This cache is used to improve partition fetch from config node.
-# Datatype: int
-# partition_cache_size=1000
-
-####################
-### Schema File Configuration
-####################
-# The minimum size (in bytes) allocated for a node in schema file
-# A large number for this will make it faster while occupying more space, or otherwise
-# The default 0 means if a flushed internal(entity) had less than 20 children, it will get a segment with the size calculated from total size of its children
-# If no child, it would get a segment of 25 bytes, which is the size of segment header
-# Datatype: short
-# minimum_schema_file_segment_in_bytes=0
-
-# The cache size for schema page in one schema file
-# A bigger cache makes it faster but costs more space and more volatile when evicts item from cache
-# Datatype: int
-# page_cache_in_schema_file=1024
-
-####################
-### Trigger Forward
-####################
-# Number of queues per forwarding trigger
-trigger_forward_max_queue_number=8
-# The length of one of the queues per forwarding trigger
-trigger_forward_max_size_per_queue=2000
-# Trigger forwarding data size per batch
-trigger_forward_batch_size=50
-# Trigger HTTP forward pool size
-trigger_forward_http_pool_size=200
-# Trigger HTTP forward pool max connection for per route
-trigger_forward_http_pool_max_per_route=20
-# Trigger MQTT forward pool size
-trigger_forward_mqtt_pool_size=4
-
-
-
-#######################
-### LocalConfigNode ###
-#######################
-
-# number of data regions per user-defined storage group
-# a data region is the unit of parallelism in memory as all ingestions in one data region are serialized
-# recommended value is [data region number] = [CPU core number] / [user-defined storage group number]
-# Datatype: int
-# data_region_num=1
-
-
diff --git a/server/src/assembly/server.xml b/server/src/assembly/server.xml
index 874c14f285..d730398f9f 100644
--- a/server/src/assembly/server.xml
+++ b/server/src/assembly/server.xml
@@ -42,5 +42,9 @@
             <source>${maven.multiModuleProjectDirectory}/metrics/interface/src/main/assembly/resources/conf/iotdb-datanode-metric.yml</source>
             <destName>conf/iotdb-datanode-metric.yml</destName>
         </file>
+        <file>
+            <source>${maven.multiModuleProjectDirectory}/node-commons/src/assembly/resources/conf/iotdb-common.properties</source>
+            <destName>conf/iotdb-common.properties</destName>
+        </file>
     </files>
 </assembly>
diff --git a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
index 06522791d1..19946e5d87 100644
--- a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
+++ b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
@@ -817,13 +817,13 @@ public class IoTDBConfig {
 
   /**
    * whether enable the rpc service. This parameter has no a corresponding field in the
-   * iotdb-datanode.properties
+   * iotdb-common.properties
    */
   private boolean enableRpcService = true;
 
   /**
    * whether enable the influxdb rpc service. This parameter has no a corresponding field in the
-   * iotdb-datanode.properties
+   * iotdb-common.properties
    */
   private boolean enableInfluxDBRpcService = false;
 
diff --git a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
index 5d4035f300..77487333ca 100644
--- a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
+++ b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
@@ -19,6 +19,7 @@
 package org.apache.iotdb.db.conf;
 
 import org.apache.iotdb.common.rpc.thrift.TEndPoint;
+import org.apache.iotdb.commons.conf.CommonConfig;
 import org.apache.iotdb.commons.conf.CommonDescriptor;
 import org.apache.iotdb.commons.conf.IoTDBConstant;
 import org.apache.iotdb.commons.exception.BadNodeUrlException;
@@ -104,25 +105,24 @@ public class IoTDBDescriptor {
    *
    * @return url object if location exit, otherwise null.
    */
-  public URL getPropsUrl() {
+  public URL getPropsUrl(String configFileName) {
     // Check if a config-directory was specified first.
     String urlString = System.getProperty(IoTDBConstant.IOTDB_CONF, null);
     // If it wasn't, check if a home directory was provided (This usually contains a config)
     if (urlString == null) {
       urlString = System.getProperty(IoTDBConstant.IOTDB_HOME, null);
       if (urlString != null) {
-        urlString =
-            urlString + File.separatorChar + "conf" + File.separatorChar + IoTDBConfig.CONFIG_NAME;
+        urlString = urlString + File.separatorChar + "conf" + File.separatorChar + configFileName;
       } else {
         // If this too wasn't provided, try to find a default config in the root of the classpath.
-        URL uri = IoTDBConfig.class.getResource("/" + IoTDBConfig.CONFIG_NAME);
+        URL uri = IoTDBConfig.class.getResource("/" + configFileName);
         if (uri != null) {
           return uri;
         }
         logger.warn(
             "Cannot find IOTDB_HOME or IOTDB_CONF environment variable when loading "
                 + "config file {}, use default configuration",
-            IoTDBConfig.CONFIG_NAME);
+            configFileName);
         // update all data seriesPath
         conf.updatePath();
         return null;
@@ -131,7 +131,7 @@ public class IoTDBDescriptor {
     // If a config location was provided, but it doesn't end with a properties file,
     // append the default location.
     else if (!urlString.endsWith(".properties")) {
-      urlString += (File.separatorChar + IoTDBConfig.CONFIG_NAME);
+      urlString += (File.separatorChar + configFileName);
     }
 
     // If the url doesn't start with "file:" or "classpath:", it's provided as a no path.
@@ -149,12 +149,28 @@ public class IoTDBDescriptor {
   /** load an property file and set TsfileDBConfig variables. */
   @SuppressWarnings("squid:S3776") // Suppress high Cognitive Complexity warning
   private void loadProps() {
-    URL url = getPropsUrl();
+    URL url = getPropsUrl(CommonConfig.CONFIG_NAME);
     if (url == null) {
       logger.warn("Couldn't load the configuration from any of the known sources.");
       return;
     }
+    try (InputStream inputStream = url.openStream()) {
+
+      logger.info("Start to read config file {}", url);
+      Properties properties = new Properties();
+      properties.load(inputStream);
+
+      loadProperties(properties);
+
+    } catch (FileNotFoundException e) {
+      logger.warn("Fail to find config file {}", url, e);
+    } catch (IOException e) {
+      logger.warn("Cannot load config file, use default configuration", e);
+    } catch (Exception e) {
+      logger.warn("Incorrect format in config file, use default configuration", e);
+    }
 
+    url = getPropsUrl(IoTDBConfig.CONFIG_NAME);
     try (InputStream inputStream = url.openStream()) {
 
       logger.info("Start to read config file {}", url);
@@ -1500,12 +1516,28 @@ public class IoTDBDescriptor {
   }
 
   public void loadHotModifiedProps() throws QueryProcessException {
-    URL url = getPropsUrl();
+    URL url = getPropsUrl(CommonConfig.CONFIG_NAME);
     if (url == null) {
       logger.warn("Couldn't load the configuration from any of the known sources.");
       return;
     }
 
+    try (InputStream inputStream = url.openStream()) {
+      logger.info("Start to reload config file {}", url);
+      Properties properties = new Properties();
+      properties.load(inputStream);
+      loadHotModifiedProps(properties);
+    } catch (Exception e) {
+      logger.warn("Fail to reload config file {}", url, e);
+      throw new QueryProcessException(
+          String.format("Fail to reload config file %s because %s", url, e.getMessage()));
+    }
+
+    url = getPropsUrl(IoTDBConfig.CONFIG_NAME);
+    if (url == null) {
+      logger.warn("Couldn't load the configuration from any of the known sources.");
+      return;
+    }
     try (InputStream inputStream = url.openStream()) {
       logger.info("Start to reload config file {}", url);
       Properties properties = new Properties();
diff --git a/server/src/main/java/org/apache/iotdb/db/exception/query/PathNumOverLimitException.java b/server/src/main/java/org/apache/iotdb/db/exception/query/PathNumOverLimitException.java
index 63c3efd982..27f208f99f 100644
--- a/server/src/main/java/org/apache/iotdb/db/exception/query/PathNumOverLimitException.java
+++ b/server/src/main/java/org/apache/iotdb/db/exception/query/PathNumOverLimitException.java
@@ -27,7 +27,7 @@ public class PathNumOverLimitException extends QueryProcessException {
     super(
         String.format(
             "Too many paths in one query! Currently allowed max deduplicated path number is %d. "
-                + "Please use slimit or adjust max_deduplicated_path_num in iotdb-datanode.properties.",
+                + "Please use slimit or adjust max_deduplicated_path_num in iotdb-common.properties.",
             IoTDBDescriptor.getInstance().getConfig().getMaxQueryDeduplicatedPathNum()));
   }
 }
diff --git a/server/src/main/java/org/apache/iotdb/db/exception/sql/PathNumOverLimitException.java b/server/src/main/java/org/apache/iotdb/db/exception/sql/PathNumOverLimitException.java
index 4e5b85b659..4160f06e95 100644
--- a/server/src/main/java/org/apache/iotdb/db/exception/sql/PathNumOverLimitException.java
+++ b/server/src/main/java/org/apache/iotdb/db/exception/sql/PathNumOverLimitException.java
@@ -27,7 +27,7 @@ public class PathNumOverLimitException extends SemanticException {
     super(
         String.format(
             "Too many paths in one query! Currently allowed max deduplicated path number is %d. "
-                + "Please use slimit or adjust max_deduplicated_path_num in iotdb-datanode.properties.",
+                + "Please use slimit or adjust max_deduplicated_path_num in iotdb-common.properties.",
             IoTDBDescriptor.getInstance().getConfig().getMaxQueryDeduplicatedPathNum()));
   }
 }
diff --git a/server/src/main/java/org/apache/iotdb/db/metadata/tag/TagLogFile.java b/server/src/main/java/org/apache/iotdb/db/metadata/tag/TagLogFile.java
index b3cf924efd..07d47d8400 100644
--- a/server/src/main/java/org/apache/iotdb/db/metadata/tag/TagLogFile.java
+++ b/server/src/main/java/org/apache/iotdb/db/metadata/tag/TagLogFile.java
@@ -45,7 +45,7 @@ public class TagLogFile implements AutoCloseable {
   private FileChannel fileChannel;
   private static final String LENGTH_EXCEED_MSG =
       "Tag/Attribute exceeds the max length limit. "
-          + "Please enlarge tag_attribute_total_size in iotdb-datanode.properties";
+          + "Please enlarge tag_attribute_total_size in iotdb-common.properties";
 
   private static final int MAX_LENGTH =
       IoTDBDescriptor.getInstance().getConfig().getTagAttributeTotalSize();
diff --git a/server/src/test/java/org/apache/iotdb/db/conf/IoTDBDescriptorTest.java b/server/src/test/java/org/apache/iotdb/db/conf/IoTDBDescriptorTest.java
index 76d68a5bf6..23f0a459ea 100644
--- a/server/src/test/java/org/apache/iotdb/db/conf/IoTDBDescriptorTest.java
+++ b/server/src/test/java/org/apache/iotdb/db/conf/IoTDBDescriptorTest.java
@@ -50,7 +50,7 @@ public class IoTDBDescriptorTest {
     String pathString = "file:/usr/local/bin";
 
     System.setProperty(IoTDBConstant.IOTDB_CONF, pathString);
-    URL confURL = desc.getPropsUrl();
+    URL confURL = desc.getPropsUrl(IoTDBConfig.CONFIG_NAME);
     Assert.assertTrue(confURL.toString().startsWith(pathString));
   }
 
@@ -60,7 +60,7 @@ public class IoTDBDescriptorTest {
 
     String pathString = "classpath:/root/path";
     System.setProperty(IoTDBConstant.IOTDB_CONF, pathString);
-    URL confURL = desc.getPropsUrl();
+    URL confURL = desc.getPropsUrl(IoTDBConfig.CONFIG_NAME);
     Assert.assertTrue(confURL.toString().startsWith(pathString));
   }
 
@@ -71,7 +71,7 @@ public class IoTDBDescriptorTest {
     // filePath is a plain path string
     String filePath = path.getFile();
     System.setProperty(IoTDBConstant.IOTDB_CONF, filePath);
-    URL confURL = desc.getPropsUrl();
+    URL confURL = desc.getPropsUrl(IoTDBConfig.CONFIG_NAME);
     Assert.assertEquals(confURL.toString(), path.toString());
   }
 }