You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by ji...@apache.org on 2021/02/15 02:08:30 UTC

[iotdb] 02/02: Apply Google Code Style

This is an automated email from the ASF dual-hosted git repository.

jincheng pushed a commit to branch spotless
in repository https://gitbox.apache.org/repos/asf/iotdb.git

commit b72baf47c7d1ecb05a0908c836bffa83567502f4
Author: sunjincheng121 <su...@gmail.com>
AuthorDate: Mon Feb 15 09:59:03 2021 +0800

    Apply Google Code Style
---
 .../java/org/apache/iotdb/cli/AbstractCli.java     |  203 ++-
 cli/src/main/java/org/apache/iotdb/cli/Cli.java    |   24 +-
 cli/src/main/java/org/apache/iotdb/cli/WinCli.java |   27 +-
 .../org/apache/iotdb/cli/utils/IoTPrinter.java     |   12 +-
 .../apache/iotdb/exception/ArgsErrorException.java |    1 -
 .../org/apache/iotdb/tool/AbstractCsvTool.java     |   94 +-
 .../main/java/org/apache/iotdb/tool/ExportCsv.java |   87 +-
 .../main/java/org/apache/iotdb/tool/ImportCsv.java |  107 +-
 .../java/org/apache/iotdb/cli/AbstractCliIT.java   |  167 ++-
 .../java/org/apache/iotdb/cli/AbstractScript.java  |   14 +-
 .../org/apache/iotdb/cli/StartClientScriptIT.java  |   65 +-
 .../org/apache/iotdb/tool/CsvLineSplitTest.java    |    7 +-
 .../org/apache/iotdb/tool/ExportCsvTestIT.java     |   69 +-
 .../org/apache/iotdb/tool/ImportCsvTestIT.java     |   66 +-
 .../java/org/apache/iotdb/cluster/ClientMain.java  |  168 ++-
 .../iotdb/cluster/ClusterFileFlushPolicy.java      |   37 +-
 .../java/org/apache/iotdb/cluster/ClusterMain.java |  165 ++-
 .../apache/iotdb/cluster/RemoteTsFileResource.java |   12 +-
 .../iotdb/cluster/client/DataClientProvider.java   |    5 +-
 .../cluster/client/async/AsyncClientFactory.java   |    5 +-
 .../cluster/client/async/AsyncClientPool.java      |   39 +-
 .../cluster/client/async/AsyncDataClient.java      |   30 +-
 .../client/async/AsyncDataHeartbeatClient.java     |   30 +-
 .../cluster/client/async/AsyncMetaClient.java      |   30 +-
 .../client/async/AsyncMetaHeartbeatClient.java     |   31 +-
 .../cluster/client/sync/SyncClientAdaptor.java     |  130 +-
 .../cluster/client/sync/SyncClientFactory.java     |    1 +
 .../iotdb/cluster/client/sync/SyncClientPool.java  |   22 +-
 .../iotdb/cluster/client/sync/SyncDataClient.java  |   14 +-
 .../client/sync/SyncDataHeartbeatClient.java       |   23 +-
 .../iotdb/cluster/client/sync/SyncMetaClient.java  |    7 +-
 .../client/sync/SyncMetaHeartbeatClient.java       |   23 +-
 .../apache/iotdb/cluster/config/ClusterConfig.java |   56 +-
 .../iotdb/cluster/config/ClusterConstant.java      |    1 +
 .../iotdb/cluster/config/ClusterDescriptor.java    |  261 ++--
 .../iotdb/cluster/config/ConsistencyLevel.java     |   12 +-
 .../iotdb/cluster/coordinator/Coordinator.java     |  238 +--
 .../iotdb/cluster/exception/AddSelfException.java  |    4 +-
 .../exception/BadSeedUrlFormatException.java       |    7 +-
 .../exception/CheckConsistencyException.java       |    3 +-
 .../exception/ConfigInconsistentException.java     |    5 +-
 .../cluster/exception/EntryCompactedException.java |   11 +-
 .../exception/EntryUnavailableException.java       |    8 +-
 .../cluster/exception/LeaderUnknownException.java  |    4 +-
 .../cluster/exception/MemberReadOnlyException.java |    9 +-
 .../cluster/exception/NoHeaderNodeException.java   |    4 +-
 .../cluster/exception/NotInSameGroupException.java |    7 +-
 .../cluster/exception/NotManagedSlotException.java |    4 +-
 .../PartitionTableUnavailableException.java        |    4 +-
 .../iotdb/cluster/exception/PullFileException.java |    5 +-
 .../cluster/exception/RequestTimeOutException.java |    4 +-
 .../exception/SnapshotInstallationException.java   |    5 +-
 .../exception/TruncateCommittedEntryException.java |    7 +-
 .../cluster/exception/UnknownLogTypeException.java |    4 +-
 .../apache/iotdb/cluster/log/CommitLogTask.java    |   10 +-
 .../org/apache/iotdb/cluster/log/HardState.java    |   21 +-
 .../java/org/apache/iotdb/cluster/log/Log.java     |   13 +-
 .../org/apache/iotdb/cluster/log/LogApplier.java   |    9 +-
 .../apache/iotdb/cluster/log/LogDispatcher.java    |  155 +-
 .../org/apache/iotdb/cluster/log/LogParser.java    |    4 +-
 .../org/apache/iotdb/cluster/log/Snapshot.java     |   12 +-
 .../iotdb/cluster/log/StableEntryManager.java      |    2 +-
 .../cluster/log/applier/AsyncDataLogApplier.java   |   31 +-
 .../iotdb/cluster/log/applier/BaseApplier.java     |   14 +-
 .../iotdb/cluster/log/applier/DataLogApplier.java  |   14 +-
 .../iotdb/cluster/log/applier/MetaLogApplier.java  |    4 +-
 .../iotdb/cluster/log/catchup/CatchUpTask.java     |   91 +-
 .../iotdb/cluster/log/catchup/LogCatchUpTask.java  |   35 +-
 .../cluster/log/catchup/SnapshotCatchUpTask.java   |   23 +-
 .../iotdb/cluster/log/logtypes/AddNodeLog.java     |    4 +-
 .../iotdb/cluster/log/logtypes/CloseFileLog.java   |   22 +-
 .../cluster/log/logtypes/EmptyContentLog.java      |    3 +-
 .../iotdb/cluster/log/logtypes/LargeTestLog.java   |    9 +-
 .../cluster/log/logtypes/PhysicalPlanLog.java      |   15 +-
 .../iotdb/cluster/log/logtypes/RemoveNodeLog.java  |   93 +-
 .../cluster/log/manage/CommittedEntryManager.java  |   40 +-
 .../manage/FilePartitionedSnapshotLogManager.java  |   67 +-
 .../log/manage/MetaSingleSnapshotLogManager.java   |    9 +-
 .../log/manage/PartitionedSnapshotLogManager.java  |   20 +-
 .../iotdb/cluster/log/manage/RaftLogManager.java   |  285 ++--
 .../log/manage/UnCommittedEntryManager.java        |   51 +-
 .../log/manage/serializable/LogManagerMeta.java    |   15 +-
 .../serializable/SyncLogDequeSerializer.java       |  450 +++---
 .../iotdb/cluster/log/snapshot/FileSnapshot.java   |  142 +-
 .../cluster/log/snapshot/MetaSimpleSnapshot.java   |   36 +-
 .../cluster/log/snapshot/PartitionedSnapshot.java  |   32 +-
 .../cluster/log/snapshot/PullSnapshotTask.java     |   59 +-
 .../log/snapshot/PullSnapshotTaskDescriptor.java   |   17 +-
 .../cluster/log/snapshot/SnapshotInstaller.java    |    1 -
 .../apache/iotdb/cluster/metadata/CMManager.java   |  570 ++++---
 .../apache/iotdb/cluster/metadata/MetaPuller.java  |  110 +-
 .../cluster/partition/NodeAdditionResult.java      |    4 +-
 .../iotdb/cluster/partition/NodeRemovalResult.java |    4 +-
 .../iotdb/cluster/partition/PartitionGroup.java    |    8 +-
 .../iotdb/cluster/partition/PartitionTable.java    |   20 +-
 .../iotdb/cluster/partition/slot/SlotManager.java  |   39 +-
 .../partition/slot/SlotNodeAdditionResult.java     |    7 +-
 .../partition/slot/SlotNodeRemovalResult.java      |    4 +-
 .../cluster/partition/slot/SlotPartitionTable.java |   36 +-
 .../iotdb/cluster/partition/slot/SlotStrategy.java |    9 +-
 .../cluster/query/ClusterDataQueryExecutor.java    |   26 +-
 .../cluster/query/ClusterPhysicalGenerator.java    |    4 +-
 .../iotdb/cluster/query/ClusterPlanExecutor.java   |  209 +--
 .../iotdb/cluster/query/ClusterPlanRouter.java     |  110 +-
 .../apache/iotdb/cluster/query/ClusterPlanner.java |   14 +-
 .../iotdb/cluster/query/ClusterQueryRouter.java    |   15 +-
 .../iotdb/cluster/query/LocalQueryExecutor.java    |  295 ++--
 .../iotdb/cluster/query/RemoteQueryContext.java    |   12 +-
 .../query/aggregate/ClusterAggregateExecutor.java  |   29 +-
 .../cluster/query/aggregate/ClusterAggregator.java |  139 +-
 .../query/dataset/ClusterAlignByDeviceDataSet.java |   12 +-
 .../cluster/query/fill/ClusterFillExecutor.java    |   12 +-
 .../cluster/query/fill/ClusterLinearFill.java      |   27 +-
 .../cluster/query/fill/ClusterPreviousFill.java    |  126 +-
 .../cluster/query/fill/PreviousFillArguments.java  |   12 +-
 .../iotdb/cluster/query/filter/SlotSgFilter.java   |    5 +-
 .../cluster/query/filter/SlotTsFileFilter.java     |   17 +-
 .../groupby/ClusterGroupByNoVFilterDataSet.java    |   25 +-
 .../groupby/ClusterGroupByVFilterDataSet.java      |   29 +-
 .../query/groupby/MergeGroupByExecutor.java        |   32 +-
 .../query/groupby/RemoteGroupByExecutor.java       |   62 +-
 .../query/last/ClusterLastQueryExecutor.java       |   95 +-
 .../cluster/query/manage/ClusterQueryManager.java  |   18 +-
 .../cluster/query/manage/QueryCoordinator.java     |    1 -
 .../cluster/query/reader/ClusterReaderFactory.java |  412 +++--
 .../cluster/query/reader/ClusterTimeGenerator.java |   30 +-
 .../iotdb/cluster/query/reader/DataSourceInfo.java |   46 +-
 .../iotdb/cluster/query/reader/EmptyReader.java    |   14 +-
 .../cluster/query/reader/MergedReaderByTime.java   |    3 +-
 .../reader/RemoteSeriesReaderByTimestamp.java      |   21 +-
 .../query/reader/RemoteSimpleSeriesReader.java     |   32 +-
 .../apache/iotdb/cluster/server/ClientServer.java  |   71 +-
 .../iotdb/cluster/server/DataClusterServer.java    |  231 +--
 .../iotdb/cluster/server/HardLinkCleaner.java      |    5 +-
 .../iotdb/cluster/server/MetaClusterServer.java    |   40 +-
 .../cluster/server/PullSnapshotHintService.java    |    4 +-
 .../apache/iotdb/cluster/server/RaftServer.java    |   45 +-
 .../org/apache/iotdb/cluster/server/Response.java  |    1 -
 .../iotdb/cluster/server/StoppedMemberManager.java |   23 +-
 .../handlers/caller/AppendGroupEntryHandler.java   |   18 +-
 .../handlers/caller/AppendNodeEntryHandler.java    |   46 +-
 .../server/handlers/caller/ElectionHandler.java    |   33 +-
 .../server/handlers/caller/GenericHandler.java     |    7 +-
 .../caller/GetChildNodeNextLevelPathHandler.java   |    5 +-
 .../handlers/caller/GetNodesListHandler.java       |    3 +-
 .../server/handlers/caller/HeartbeatHandler.java   |   48 +-
 .../handlers/caller/LogCatchUpInBatchHandler.java  |   25 +-
 .../server/handlers/caller/NodeStatusHandler.java  |    3 +-
 .../handlers/caller/PreviousFillHandler.java       |    3 +-
 .../caller/PullMeasurementSchemaHandler.java       |    4 +-
 .../handlers/caller/PullSnapshotHandler.java       |   15 +-
 .../caller/PullTimeseriesSchemaHandler.java        |    4 +-
 .../handlers/caller/SnapshotCatchUpHandler.java    |    7 +-
 .../server/handlers/caller/package-info.java       |    5 +-
 .../server/heartbeat/DataHeartbeatServer.java      |   21 +-
 .../server/heartbeat/DataHeartbeatThread.java      |   14 +-
 .../cluster/server/heartbeat/HeartbeatServer.java  |   60 +-
 .../cluster/server/heartbeat/HeartbeatThread.java  |  125 +-
 .../server/heartbeat/MetaHeartbeatServer.java      |   22 +-
 .../cluster/server/member/DataGroupMember.java     |  208 ++-
 .../cluster/server/member/MetaGroupMember.java     |  377 ++---
 .../iotdb/cluster/server/member/RaftMember.java    |  467 +++---
 .../iotdb/cluster/server/member/package-info.java  |    6 +-
 .../iotdb/cluster/server/monitor/NodeReport.java   |  202 ++-
 .../iotdb/cluster/server/monitor/NodeStatus.java   |   15 +-
 .../cluster/server/monitor/NodeStatusManager.java  |   11 +-
 .../apache/iotdb/cluster/server/monitor/Timer.java |  212 ++-
 .../cluster/server/service/BaseAsyncService.java   |   25 +-
 .../cluster/server/service/BaseSyncService.java    |    7 +-
 .../cluster/server/service/DataAsyncService.java   |  140 +-
 .../cluster/server/service/DataSyncService.java    |   29 +-
 .../cluster/server/service/MetaAsyncService.java   |   22 +-
 .../cluster/server/service/MetaSyncService.java    |    4 +-
 .../iotdb/cluster/server/service/package-info.java |    2 +-
 .../apache/iotdb/cluster/utils/ClientUtils.java    |    2 +-
 .../iotdb/cluster/utils/ClusterConsistent.java     |    8 +-
 .../apache/iotdb/cluster/utils/ClusterNode.java    |   26 +-
 .../iotdb/cluster/utils/ClusterQueryUtils.java     |   13 +-
 .../apache/iotdb/cluster/utils/ClusterUtils.java   |  133 +-
 .../apache/iotdb/cluster/utils/PartitionUtils.java |   58 +-
 .../apache/iotdb/cluster/utils/StatusUtils.java    |   14 +-
 .../cluster/utils/nodetool/ClusterMonitor.java     |   45 +-
 .../utils/nodetool/ClusterMonitorMBean.java        |   36 +-
 .../iotdb/cluster/utils/nodetool/NodeTool.java     |   12 +-
 .../iotdb/cluster/utils/nodetool/Printer.java      |   32 +-
 .../cluster/utils/nodetool/function/Host.java      |   33 +-
 .../cluster/utils/nodetool/function/LogView.java   |   16 +-
 .../utils/nodetool/function/NodeToolCmd.java       |   42 +-
 .../cluster/utils/nodetool/function/Partition.java |   68 +-
 .../cluster/utils/nodetool/function/Ring.java      |   29 +-
 .../cluster/utils/nodetool/function/Status.java    |   36 +-
 .../cluster/client/DataClientProviderTest.java     |   22 +-
 .../cluster/client/async/AsyncClientPoolTest.java  |   66 +-
 .../cluster/client/async/AsyncDataClientTest.java  |   35 +-
 .../client/async/AsyncDataHeartbeatClientTest.java |    3 +-
 .../cluster/client/async/AsyncMetaClientTest.java  |   35 +-
 .../client/async/AsyncMetaHeartbeatClientTest.java |    6 +-
 .../cluster/client/sync/SyncClientAdaptorTest.java |  534 ++++---
 .../cluster/client/sync/SyncClientPoolTest.java    |   43 +-
 .../cluster/client/sync/SyncDataClientTest.java    |   42 +-
 .../client/sync/SyncDataHeartbeatClientTest.java   |   23 +-
 .../cluster/client/sync/SyncMetaClientTest.java    |   24 +-
 .../client/sync/SyncMetaHeartbeatClientTest.java   |   23 +-
 .../org/apache/iotdb/cluster/common/IoTDBTest.java |   35 +-
 .../iotdb/cluster/common/TestAsyncClient.java      |    8 +-
 .../cluster/common/TestAsyncClientFactory.java     |    7 +-
 .../iotdb/cluster/common/TestAsyncDataClient.java  |  196 ++-
 .../iotdb/cluster/common/TestAsyncMetaClient.java  |    7 +-
 .../org/apache/iotdb/cluster/common/TestLog.java   |    3 +-
 .../iotdb/cluster/common/TestLogApplier.java       |    7 +-
 .../iotdb/cluster/common/TestLogManager.java       |    6 +-
 .../cluster/common/TestManagedSeriesReader.java    |    1 -
 .../iotdb/cluster/common/TestMetaGroupMember.java  |    4 +-
 .../cluster/common/TestPartitionedLogManager.java  |   23 +-
 .../apache/iotdb/cluster/common/TestSnapshot.java  |    5 +-
 .../iotdb/cluster/common/TestSyncClient.java       |    1 -
 .../cluster/common/TestSyncClientFactory.java      |   58 +-
 .../org/apache/iotdb/cluster/common/TestUtils.java |   87 +-
 .../cluster/integration/BaseSingleNodeTest.java    |    7 +-
 .../iotdb/cluster/integration/SingleNodeTest.java  |   20 +-
 .../iotdb/cluster/log/CommitLogCallbackTest.java   |   12 +-
 .../iotdb/cluster/log/CommitLogTaskTest.java       |   25 +-
 .../apache/iotdb/cluster/log/HardStateTest.java    |    2 +-
 .../iotdb/cluster/log/LogDispatcherTest.java       |  102 +-
 .../apache/iotdb/cluster/log/LogParserTest.java    |    2 +-
 .../log/applier/AsyncDataLogApplierTest.java       |  124 +-
 .../cluster/log/applier/DataLogApplierTest.java    |  203 +--
 .../cluster/log/applier/MetaLogApplierTest.java    |   48 +-
 .../iotdb/cluster/log/catchup/CatchUpTaskTest.java |  127 +-
 .../cluster/log/catchup/LogCatchUpTaskTest.java    |  142 +-
 .../log/catchup/SnapshotCatchUpTaskTest.java       |  134 +-
 .../cluster/log/logtypes/SerializeLogTest.java     |   37 +-
 .../log/manage/CommittedEntryManagerTest.java      |  928 +++++++-----
 .../FilePartitionedSnapshotLogManagerTest.java     |   19 +-
 .../manage/MetaSingleSnapshotLogManagerTest.java   |    9 +-
 .../cluster/log/manage/RaftLogManagerTest.java     | 1357 +++++++++++------
 .../log/manage/UnCommittedEntryManagerTest.java    |  683 ++++++---
 .../serializable/SyncLogDequeSerializerTest.java   |  137 +-
 .../cluster/log/snapshot/DataSnapshotTest.java     |  163 +-
 .../cluster/log/snapshot/FileSnapshotTest.java     |   82 +-
 .../log/snapshot/MetaSimpleSnapshotTest.java       |   26 +-
 .../log/snapshot/PartitionedSnapshotTest.java      |   27 +-
 .../snapshot/PullSnapshotTaskDescriptorTest.java   |    2 +-
 .../cluster/log/snapshot/PullSnapshotTaskTest.java |  225 +--
 .../iotdb/cluster/log/snapshot/SimpleSnapshot.java |    4 +-
 .../iotdb/cluster/partition/MManagerWhiteBox.java  |   13 +-
 .../iotdb/cluster/partition/SlotManagerTest.java   |   52 +-
 .../cluster/partition/SlotPartitionTableTest.java  |  230 +--
 .../apache/iotdb/cluster/query/BaseQueryTest.java  |   23 +-
 .../query/ClusterAggregateExecutorTest.java        |   77 +-
 .../query/ClusterDataQueryExecutorTest.java        |   16 +-
 .../cluster/query/ClusterFillExecutorTest.java     |   67 +-
 .../query/ClusterPhysicalGeneratorTest.java        |   10 +-
 .../cluster/query/ClusterPlanExecutorTest.java     |    9 +-
 .../iotdb/cluster/query/ClusterPlannerTest.java    |    2 +-
 .../cluster/query/ClusterQueryRouterTest.java      |  129 +-
 .../iotdb/cluster/query/LoadConfigurationTest.java |  246 +--
 .../ClusterGroupByNoVFilterDataSetTest.java        |   17 +-
 .../groupby/ClusterGroupByVFilterDataSetTest.java  |   27 +-
 .../query/groupby/MergeGroupByExecutorTest.java    |   32 +-
 .../query/groupby/RemoteGroupByExecutorTest.java   |   41 +-
 .../query/manage/ClusterQueryManagerTest.java      |  217 ++-
 .../cluster/query/manage/QueryCoordinatorTest.java |   55 +-
 .../query/reader/ClusterTimeGeneratorTest.java     |   15 +-
 .../cluster/query/reader/DatasourceInfoTest.java   |   26 +-
 .../reader/RemoteSeriesReaderByTimestampTest.java  |  124 +-
 .../query/reader/RemoteSimpleSeriesReaderTest.java |   84 +-
 .../caller/AppendGroupEntryHandlerTest.java        |   51 +-
 .../caller/AppendNodeEntryHandlerTest.java         |    6 +-
 .../handlers/caller/ElectionHandlerTest.java       |   70 +-
 .../server/handlers/caller/GenericHandlerTest.java |    3 +-
 .../handlers/caller/HeartbeatHandlerTest.java      |   24 +-
 .../handlers/caller/JoinClusterHandlerTest.java    |    3 +-
 .../handlers/caller/LogCatchUpHandlerTest.java     |    4 +-
 .../caller/PullMeasurementSchemaHandlerTest.java   |   43 +-
 .../handlers/caller/PullSnapshotHandlerTest.java   |   23 +-
 .../caller/SnapshotCatchUpHandlerTest.java         |    3 +-
 .../handlers/forwarder/ForwardPlanHandlerTest.java |    2 +-
 .../server/heartbeat/DataHeartbeatThreadTest.java  |   78 +-
 .../server/heartbeat/HeartbeatThreadTest.java      |   87 +-
 .../server/heartbeat/MetaHeartbeatThreadTest.java  |  186 +--
 .../cluster/server/member/DataGroupMemberTest.java |  449 +++---
 .../iotdb/cluster/server/member/MemberTest.java    |  277 ++--
 .../cluster/server/member/MetaGroupMemberTest.java |  696 +++++----
 .../iotdb/cluster/utils/SerializeUtilTest.java     |   12 +-
 .../tests/tools/importCsv/AbstractScript.java      |   29 +-
 .../tests/tools/importCsv/ExportCsvTestIT.java     |   90 +-
 .../tests/tools/importCsv/ImportCsvTestIT.java     |  129 +-
 .../org/apache/iotdb/flink/FlinkIoTDBSink.java     |  102 +-
 .../apache/iotdb/flink/FlinkTsFileBatchSink.java   |  149 +-
 .../apache/iotdb/flink/FlinkTsFileBatchSource.java |   86 +-
 .../apache/iotdb/flink/FlinkTsFileStreamSink.java  |  151 +-
 .../iotdb/flink/FlinkTsFileStreamSource.java       |   92 +-
 .../java/org/apache/iotdb/flink/TsFileUtils.java   |   42 +-
 .../org/apache/iotdb/hadoop/tsfile/Constant.java   |    5 +-
 .../iotdb/hadoop/tsfile/TSFMRReadExample.java      |   35 +-
 .../iotdb/hadoop/tsfile/TSMRWriteExample.java      |   47 +-
 .../apache/iotdb/hadoop/tsfile/TsFileHelper.java   |   20 +-
 .../iotdb/hadoop/tsfile/TsFileWriteToHDFS.java     |    9 +-
 .../main/java/org/apache/iotdb/JDBCExample.java    |   31 +-
 .../org/apache/iotdb/PrepareStatementDemo.java     |   17 +-
 .../main/java/org/apache/iotdb/kafka/Constant.java |   57 +-
 .../java/org/apache/iotdb/kafka/KafkaConsumer.java |   27 +-
 .../apache/iotdb/kafka/KafkaConsumerThread.java    |   28 +-
 .../java/org/apache/iotdb/kafka/KafkaProducer.java |    6 +-
 .../java/org/apache/iotdb/mqtt/MQTTClient.java     |   44 +-
 .../java/org/apache/iotdb/pulsar/Constant.java     |    4 +-
 .../org/apache/iotdb/pulsar/PulsarConsumer.java    |   42 +-
 .../apache/iotdb/pulsar/PulsarConsumerThread.java  |   17 +-
 .../org/apache/iotdb/pulsar/PulsarProducer.java    |   72 +-
 .../java/org/apache/iotdb/rocketmq/Constant.java   |   55 +-
 .../apache/iotdb/rocketmq/RocketMQConsumer.java    |   69 +-
 .../apache/iotdb/rocketmq/RocketMQProducer.java    |   41 +-
 .../main/java/org/apache/iotdb/rocketmq/Utils.java |    3 +-
 .../org/apache/iotdb/DataMigrationExample.java     |   32 +-
 .../main/java/org/apache/iotdb/SessionExample.java |   64 +-
 .../java/org/apache/iotdb/SessionPoolExample.java  |   77 +-
 .../java/org/apache/iotdb/tsfile/Constant.java     |    5 +-
 .../iotdb/tsfile/TsFileForceAppendWrite.java       |   19 +-
 .../java/org/apache/iotdb/tsfile/TsFileRead.java   |   34 +-
 .../apache/iotdb/tsfile/TsFileSequenceRead.java    |   47 +-
 .../iotdb/tsfile/TsFileWriteWithTSRecord.java      |   15 +-
 .../apache/iotdb/tsfile/TsFileWriteWithTablet.java |   16 +-
 .../main/java/org/apache/iotdb/flink/Event.java    |   10 +-
 .../java/org/apache/iotdb/flink/IoTDBOptions.java  |  215 +--
 .../java/org/apache/iotdb/flink/IoTDBSink.java     |   52 +-
 .../apache/iotdb/flink/IoTSerializationSchema.java |    7 +-
 .../flink/DefaultIoTSerializationSchemaTest.java   |   37 +-
 .../iotdb/flink/IoTDBSinkBatchInsertTest.java      |   10 +-
 .../iotdb/flink/IoTDBSinkBatchTimerTest.java       |    5 +-
 .../apache/iotdb/flink/IoTDBSinkInsertTest.java    |    5 +-
 .../apache/iotdb/flink/tsfile/RowRecordParser.java |   29 +-
 .../iotdb/flink/tsfile/RowRowRecordParser.java     |  164 +-
 .../iotdb/flink/tsfile/RowTSRecordConverter.java   |  236 +--
 .../iotdb/flink/tsfile/TSRecordConverter.java      |   53 +-
 .../iotdb/flink/tsfile/TSRecordOutputFormat.java   |  126 +-
 .../iotdb/flink/tsfile/TsFileInputFormat.java      |  243 +--
 .../iotdb/flink/tsfile/TsFileOutputFormat.java     |  166 +--
 .../iotdb/flink/tsfile/util/TSFileConfigUtil.java  |   76 +-
 .../tsfile/RowTSRecordOutputFormatITCase.java      |   53 +-
 .../flink/tsfile/RowTSRecordOutputFormatTest.java  |   85 +-
 .../flink/tsfile/RowTsFileConnectorTestBase.java   |   88 +-
 .../flink/tsfile/RowTsFileInputFormatITCase.java   |  144 +-
 .../flink/tsfile/RowTsFileInputFormatTest.java     |  104 +-
 .../flink/tsfile/RowTsFileInputFormatTestBase.java |   39 +-
 .../tsfile/RowTsFileOutputFormatTestBase.java      |  143 +-
 .../util/TSFileConfigUtilCompletenessTest.java     |   82 +-
 .../apache/iotdb/flink/util/TsFileWriteUtil.java   |   22 +-
 .../web/grafana/TsfileWebDemoApplication.java      |    2 +-
 .../apache/iotdb/web/grafana/bean/TimeValues.java  |    4 +-
 .../iotdb/web/grafana/conf/MyConfiguration.java    |   10 +-
 .../controller/DatabaseConnectController.java      |   24 +-
 .../org/apache/iotdb/web/grafana/dao/BasicDao.java |    5 +-
 .../iotdb/web/grafana/dao/impl/BasicDaoImpl.java   |   77 +-
 .../grafana/service/DatabaseConnectService.java    |    1 -
 .../service/impl/DatabaseConnectServiceImpl.java   |    4 +-
 .../web/grafana/dao/impl/BasicDaoImplTest.java     |   41 +-
 .../iotdb/hadoop/fileSystem/HDFSConfUtil.java      |   29 +-
 .../apache/iotdb/hadoop/fileSystem/HDFSFile.java   |    1 -
 .../apache/iotdb/hadoop/fileSystem/HDFSOutput.java |    4 +-
 .../org/apache/iotdb/hadoop/tsfile/IReaderSet.java |    7 +-
 .../iotdb/hadoop/tsfile/TSFHadoopException.java    |    6 +-
 .../apache/iotdb/hadoop/tsfile/TSFInputFormat.java |   81 +-
 .../apache/iotdb/hadoop/tsfile/TSFInputSplit.java  |    6 +-
 .../iotdb/hadoop/tsfile/TSFOutputFormat.java       |    9 +-
 .../iotdb/hadoop/tsfile/TSFRecordReader.java       |   86 +-
 .../iotdb/hadoop/tsfile/TSFRecordWriter.java       |    7 +-
 .../iotdb/hadoop/tsfile/record/HDFSTSRecord.java   |   31 +-
 .../apache/iotdb/hadoop/tsfile/TSFHadoopTest.java  |   25 +-
 .../iotdb/hadoop/tsfile/TSFInputSplitTest.java     |   13 +-
 .../iotdb/hadoop/tsfile/TsFileTestHelper.java      |   15 +-
 .../iotdb/hadoop/tsfile/constant/TestConstant.java |    5 +-
 .../org/apache/iotdb/hive/TSFHiveInputFormat.java  |   12 +-
 .../org/apache/iotdb/hive/TSFHiveOutputFormat.java |   34 +-
 .../org/apache/iotdb/hive/TSFHiveRecordReader.java |   57 +-
 .../org/apache/iotdb/hive/TSFHiveRecordWriter.java |   18 +-
 .../org/apache/iotdb/hive/TsFileDeserializer.java  |   91 +-
 .../java/org/apache/iotdb/hive/TsFileSerDe.java    |   30 +-
 .../apache/iotdb/hive/TSFHiveInputFormatTest.java  |   16 +-
 .../apache/iotdb/hive/TSFHiveRecordReaderTest.java |   94 +-
 .../apache/iotdb/hive/TsFileDeserializerTest.java  |   33 +-
 .../org/apache/iotdb/hive/TsFileSerDeTest.java     |   30 +-
 .../org/apache/iotdb/hive/TsFileTestHelper.java    |   12 +-
 .../apache/iotdb/hive/constant/TestConstant.java   |    5 +-
 .../iotdb/jdbc/AbstractIoTDBJDBCResultSet.java     |   42 +-
 .../main/java/org/apache/iotdb/jdbc/Activator.java |    7 +-
 .../main/java/org/apache/iotdb/jdbc/Config.java    |   23 +-
 .../main/java/org/apache/iotdb/jdbc/Constant.java  |    2 +-
 .../org/apache/iotdb/jdbc/IoTDBConnection.java     |   62 +-
 .../org/apache/iotdb/jdbc/IoTDBDataSource.java     |   13 +-
 .../apache/iotdb/jdbc/IoTDBDataSourceFactory.java  |   16 +-
 .../apache/iotdb/jdbc/IoTDBDatabaseMetadata.java   |   66 +-
 .../java/org/apache/iotdb/jdbc/IoTDBDriver.java    |   12 +-
 .../org/apache/iotdb/jdbc/IoTDBJDBCResultSet.java  |   28 +-
 .../iotdb/jdbc/IoTDBNonAlignJDBCResultSet.java     |   62 +-
 .../apache/iotdb/jdbc/IoTDBPreparedStatement.java  |   63 +-
 .../org/apache/iotdb/jdbc/IoTDBResultMetadata.java |   22 +-
 .../org/apache/iotdb/jdbc/IoTDBSQLException.java   |    1 -
 .../java/org/apache/iotdb/jdbc/IoTDBStatement.java |  187 ++-
 .../src/main/java/org/apache/iotdb/jdbc/Utils.java |   15 +-
 .../test/java/org/apache/iotdb/jdbc/BatchTest.java |   71 +-
 .../org/apache/iotdb/jdbc/IoTDBConnectionTest.java |   27 +-
 .../apache/iotdb/jdbc/IoTDBJDBCResultSetTest.java  |  160 +-
 .../iotdb/jdbc/IoTDBPreparedStatementTest.java     |  164 +-
 .../apache/iotdb/jdbc/IoTDBResultMetadataTest.java |   24 +-
 .../org/apache/iotdb/jdbc/IoTDBStatementTest.java  |   13 +-
 .../test/java/org/apache/iotdb/jdbc/UtilsTest.java |   32 +-
 .../org/apache/iotdb/db/auth/AuthException.java    |    5 +-
 .../org/apache/iotdb/db/auth/AuthorityChecker.java |   18 +-
 .../iotdb/db/auth/authorizer/BasicAuthorizer.java  |   72 +-
 .../iotdb/db/auth/authorizer/IAuthorizer.java      |   68 +-
 .../db/auth/authorizer/LocalFileAuthorizer.java    |    5 +-
 .../iotdb/db/auth/authorizer/OpenIdAuthorizer.java |  372 ++---
 .../apache/iotdb/db/auth/entity/PathPrivilege.java |   12 +-
 .../apache/iotdb/db/auth/entity/PrivilegeType.java |   30 +-
 .../java/org/apache/iotdb/db/auth/entity/Role.java |   13 +-
 .../java/org/apache/iotdb/db/auth/entity/User.java |   32 +-
 .../apache/iotdb/db/auth/role/IRoleAccessor.java   |   18 +-
 .../apache/iotdb/db/auth/role/IRoleManager.java    |   37 +-
 .../iotdb/db/auth/role/LocalFileRoleAccessor.java  |   74 +-
 .../iotdb/db/auth/user/BasicUserManager.java       |    6 +-
 .../apache/iotdb/db/auth/user/IUserAccessor.java   |   18 +-
 .../apache/iotdb/db/auth/user/IUserManager.java    |   42 +-
 .../iotdb/db/auth/user/LocalFileUserAccessor.java  |   84 +-
 .../org/apache/iotdb/db/concurrent/HashLock.java   |    7 +-
 .../IoTDBDefaultThreadExceptionHandler.java        |    5 +-
 .../db/concurrent/IoTDBThreadPoolFactory.java      |   58 +-
 .../iotdb/db/concurrent/IoTThreadFactory.java      |    4 +-
 .../iotdb/db/concurrent/WrappedRunnable.java       |    3 +-
 .../java/org/apache/iotdb/db/conf/IoTDBConfig.java |  607 +++-----
 .../org/apache/iotdb/db/conf/IoTDBConfigCheck.java |  159 +-
 .../org/apache/iotdb/db/conf/IoTDBConstant.java    |   15 +-
 .../org/apache/iotdb/db/conf/IoTDBDescriptor.java  | 1005 ++++++++-----
 .../iotdb/db/conf/ServerConfigConsistent.java      |    7 +-
 .../iotdb/db/conf/adapter/CompressionRatio.java    |   58 +-
 .../db/conf/directories/DirectoryManager.java      |   29 +-
 .../directories/strategy/DirectoryStrategy.java    |    9 +-
 .../strategy/MaxDiskUsableSpaceFirstStrategy.java  |    4 +-
 .../strategy/RandomOnDiskUsableSpaceStrategy.java  |    7 +-
 .../db/cost/statistic/ConcurrentCircularArray.java |    6 +-
 .../iotdb/db/cost/statistic/Measurement.java       |  101 +-
 .../iotdb/db/cost/statistic/MeasurementMBean.java  |   24 +-
 .../apache/iotdb/db/cost/statistic/Operation.java  |    1 -
 .../org/apache/iotdb/db/engine/StorageEngine.java  |  247 ++-
 .../db/engine/cache/CacheHitRatioMonitor.java      |    4 +-
 .../engine/cache/CacheHitRatioMonitorMXBean.java   |    3 -
 .../apache/iotdb/db/engine/cache/ChunkCache.java   |   70 +-
 .../iotdb/db/engine/cache/ChunkMetadataCache.java  |  102 +-
 .../iotdb/db/engine/cache/LRULinkedHashMap.java    |   24 +-
 .../db/engine/cache/TimeSeriesMetadataCache.java   |  129 +-
 .../compaction/CompactionMergeTaskPoolManager.java |   42 +-
 .../db/engine/compaction/TsFileManagement.java     |  135 +-
 .../level/LevelCompactionTsFileManagement.java     |  319 ++--
 .../no/NoCompactionTsFileManagement.java           |   25 +-
 .../compaction/utils/CompactionLogAnalyzer.java    |    2 +-
 .../engine/compaction/utils/CompactionLogger.java  |    9 +-
 .../engine/compaction/utils/CompactionUtils.java   |  216 ++-
 .../db/engine/fileSystem/SystemFileFactory.java    |    5 +-
 .../apache/iotdb/db/engine/flush/FlushManager.java |   28 +-
 .../iotdb/db/engine/flush/MemTableFlushTask.java   |  318 ++--
 .../iotdb/db/engine/flush/NotifyFlushMemTable.java |    4 +-
 .../iotdb/db/engine/flush/TsFileFlushPolicy.java   |    4 +-
 .../db/engine/flush/pool/AbstractPoolManager.java  |   10 +-
 .../engine/flush/pool/FlushSubTaskPoolManager.java |   14 +-
 .../db/engine/flush/pool/FlushTaskPoolManager.java |    6 +-
 .../iotdb/db/engine/memtable/AbstractMemTable.java |   94 +-
 .../apache/iotdb/db/engine/memtable/IMemTable.java |   70 +-
 .../db/engine/memtable/IWritableMemChunk.java      |   31 +-
 .../db/engine/memtable/PrimitiveMemTable.java      |    6 +-
 .../iotdb/db/engine/memtable/WritableMemChunk.java |    6 +-
 .../iotdb/db/engine/merge/manage/MergeContext.java |   13 +-
 .../iotdb/db/engine/merge/manage/MergeFuture.java  |    5 +-
 .../iotdb/db/engine/merge/manage/MergeManager.java |   81 +-
 .../db/engine/merge/manage/MergeResource.java      |   33 +-
 .../db/engine/merge/manage/MergeThreadPool.java    |    7 +-
 .../apache/iotdb/db/engine/merge/package-info.java |    8 +-
 .../iotdb/db/engine/merge/recover/LogAnalyzer.java |   68 +-
 .../iotdb/db/engine/merge/recover/MergeLogger.java |    4 +-
 .../merge/selector/IFileQueryMemMeasurement.java   |    2 +-
 .../engine/merge/selector/IMergeFileSelector.java  |    4 +-
 .../engine/merge/selector/IMergePathSelector.java  |    8 +-
 .../merge/selector/MaxFileMergeFileSelector.java   |   93 +-
 .../merge/selector/MaxSeriesMergeFileSelector.java |   21 +-
 .../engine/merge/selector/NaivePathSelector.java   |    6 +-
 .../iotdb/db/engine/merge/task/MergeCallback.java  |   11 +-
 .../iotdb/db/engine/merge/task/MergeFileTask.java  |   93 +-
 .../db/engine/merge/task/MergeMultiChunkTask.java  |  201 ++-
 .../iotdb/db/engine/merge/task/MergeTask.java      |   63 +-
 .../db/engine/merge/task/RecoverMergeTask.java     |  115 +-
 .../iotdb/db/engine/modification/Deletion.java     |   11 +-
 .../iotdb/db/engine/modification/Modification.java |   11 +-
 .../db/engine/modification/ModificationFile.java   |   14 +-
 .../io/LocalTextModificationAccessor.java          |   44 +-
 .../engine/modification/io/ModificationReader.java |    8 +-
 .../engine/modification/io/ModificationWriter.java |   13 +-
 .../iotdb/db/engine/modification/package-info.java |    6 +-
 .../db/engine/querycontext/QueryDataSource.java    |   16 +-
 .../db/engine/querycontext/ReadOnlyMemChunk.java   |   26 +-
 .../db/engine/storagegroup/StorageGroupInfo.java   |   25 +-
 .../engine/storagegroup/StorageGroupProcessor.java | 1421 ++++++++++--------
 .../iotdb/db/engine/storagegroup/TsFileLock.java   |   39 +-
 .../db/engine/storagegroup/TsFileProcessor.java    |  541 ++++---
 .../engine/storagegroup/TsFileProcessorInfo.java   |   25 +-
 .../db/engine/storagegroup/TsFileResource.java     |  135 +-
 .../storagegroup/timeindex/DeviceTimeIndex.java    |   17 +-
 .../storagegroup/timeindex/FileTimeIndex.java      |   25 +-
 .../engine/storagegroup/timeindex/ITimeIndex.java  |   16 +-
 .../storagegroup/timeindex/TimeIndexLevel.java     |    3 +-
 .../virtualSg/HashVirtualPartitioner.java          |   10 +-
 .../storagegroup/virtualSg/VirtualPartitioner.java |    1 -
 .../virtualSg/VirtualStorageGroupManager.java      |  186 +--
 .../db/engine/upgrade/UpgradeCheckStatus.java      |    5 +-
 .../apache/iotdb/db/engine/upgrade/UpgradeLog.java |    9 +-
 .../iotdb/db/engine/upgrade/UpgradeTask.java       |   30 +-
 .../version/SimpleFileVersionController.java       |   22 +-
 .../engine/version/SysTimeVersionController.java   |    8 +-
 .../iotdb/db/engine/version/VersionController.java |   12 +-
 .../exception/DiskSpaceInsufficientException.java  |    3 +-
 .../apache/iotdb/db/exception/IoTDBException.java  |    4 +-
 .../db/exception/LoadConfigurationException.java   |    1 -
 .../iotdb/db/exception/LoadEmptyFileException.java |    2 -
 .../db/exception/QueryIdNotExsitException.java     |    1 -
 .../exception/QueryInBatchStatementException.java  |    3 +-
 .../iotdb/db/exception/ShutdownException.java      |    1 -
 .../iotdb/db/exception/StartupException.java       |    3 +-
 .../SyncDeviceOwnerConflictException.java          |   12 +-
 .../iotdb/db/exception/SystemCheckException.java   |    3 +-
 .../db/exception/UDFRegistrationException.java     |   68 +-
 .../iotdb/db/exception/WriteProcessException.java  |    1 -
 .../db/exception/WriteProcessRejectException.java  |    1 -
 .../exception/index/DistanceMetricException.java   |    1 -
 .../index/IllegalIndexParamException.java          |    1 -
 .../db/exception/index/IndexManagerException.java  |    1 -
 .../db/exception/index/QueryIndexException.java    |    1 -
 .../index/UnsupportedIndexTypeException.java       |    5 +-
 .../metadata/AliasAlreadyExistException.java       |    5 +-
 .../metadata/DataTypeMismatchException.java        |   14 +-
 .../exception/metadata/DeleteFailedException.java  |    5 +-
 .../metadata/PathAlreadyExistException.java        |    5 +-
 .../exception/metadata/PathNotExistException.java  |   20 +-
 .../metadata/StorageGroupAlreadySetException.java  |    3 +-
 .../exception/query/LogicalOperatorException.java  |    6 +-
 .../exception/query/LogicalOptimizeException.java  |   11 +-
 .../db/exception/query/OutOfTTLException.java      |   11 +-
 .../exception/query/PathNumOverLimitException.java |   14 +-
 .../query/QueryTimeoutRuntimeException.java        |    9 +-
 .../query/UnSupportedFillTypeException.java        |    3 +-
 .../db/exception/runtime/RPCServiceException.java  |    3 +-
 .../db/exception/runtime/SQLParserException.java   |    2 +
 .../iotdb/db/index/common/IndexConstant.java       |    5 +-
 .../apache/iotdb/db/index/common/IndexType.java    |  171 ++-
 .../apache/iotdb/db/index/common/IndexUtils.java   |    3 +-
 .../apache/iotdb/db/metadata/MLogTxtWriter.java    |   26 +-
 .../org/apache/iotdb/db/metadata/MManager.java     |  566 +++----
 .../java/org/apache/iotdb/db/metadata/MTree.java   |  305 ++--
 .../apache/iotdb/db/metadata/MeasurementMeta.java  |   14 +-
 .../org/apache/iotdb/db/metadata/MetaUtils.java    |   15 +-
 .../org/apache/iotdb/db/metadata/Metadata.java     |   21 +-
 .../apache/iotdb/db/metadata/MetadataConstant.java |    7 +-
 .../iotdb/db/metadata/MetadataOperationType.java   |    4 +-
 .../org/apache/iotdb/db/metadata/PartialPath.java  |   26 +-
 .../org/apache/iotdb/db/metadata/TagLogFile.java   |   39 +-
 .../iotdb/db/metadata/logfile/MLogReader.java      |    5 +-
 .../iotdb/db/metadata/logfile/MLogTxtReader.java   |   11 +-
 .../iotdb/db/metadata/logfile/MLogWriter.java      |  106 +-
 .../org/apache/iotdb/db/metadata/mnode/MNode.java  |   59 +-
 .../iotdb/db/metadata/mnode/MeasurementMNode.java  |   60 +-
 .../iotdb/db/metadata/mnode/StorageGroupMNode.java |    3 +-
 .../apache/iotdb/db/metrics/server/JettyUtil.java  |  209 +--
 .../iotdb/db/metrics/server/MetricsSystem.java     |  161 +-
 .../iotdb/db/metrics/server/QueryServlet.java      |    6 +-
 .../iotdb/db/metrics/server/ServerArgument.java    |  644 ++++----
 .../iotdb/db/metrics/server/SqlArgument.java       |  166 ++-
 .../apache/iotdb/db/metrics/sink/ConsoleSink.java  |    7 +-
 .../iotdb/db/metrics/sink/MetricsServletSink.java  |  101 +-
 .../org/apache/iotdb/db/metrics/sink/Sink.java     |    1 -
 .../apache/iotdb/db/metrics/source/JvmSource.java  |   99 +-
 .../iotdb/db/metrics/source/MetricsSource.java     |  247 +--
 .../org/apache/iotdb/db/metrics/source/Source.java |    1 -
 .../apache/iotdb/db/metrics/ui/MetricsPage.java    |  326 ++--
 .../apache/iotdb/db/metrics/ui/MetricsWebUI.java   |  132 +-
 .../org/apache/iotdb/db/monitor/IStatistic.java    |   10 +-
 .../apache/iotdb/db/monitor/MonitorConstants.java  |    8 +-
 .../org/apache/iotdb/db/monitor/StatMonitor.java   |   70 +-
 .../apache/iotdb/db/monitor/StatMonitorMBean.java  |    1 -
 .../apache/iotdb/db/mqtt/BrokerAuthenticator.java  |   30 +-
 .../apache/iotdb/db/mqtt/JSONPayloadFormatter.java |   91 +-
 .../java/org/apache/iotdb/db/mqtt/Message.java     |   83 +-
 .../apache/iotdb/db/mqtt/PayloadFormatManager.java |   30 +-
 .../org/apache/iotdb/db/mqtt/PayloadFormatter.java |   28 +-
 .../org/apache/iotdb/db/mqtt/PublishHandler.java   |   18 +-
 .../main/java/org/apache/iotdb/db/qp/Planner.java  |   55 +-
 .../apache/iotdb/db/qp/constant/SQLConstant.java   |   11 +-
 .../apache/iotdb/db/qp/executor/IPlanExecutor.java |   13 +-
 .../apache/iotdb/db/qp/executor/PlanExecutor.java  |  223 +--
 .../org/apache/iotdb/db/qp/logical/Operator.java   |  108 +-
 .../db/qp/logical/crud/BasicFunctionOperator.java  |   25 +-
 .../db/qp/logical/crud/BasicOperatorType.java      |    7 +-
 .../db/qp/logical/crud/DeleteDataOperator.java     |    4 +-
 .../iotdb/db/qp/logical/crud/FilterOperator.java   |   43 +-
 .../iotdb/db/qp/logical/crud/FromOperator.java     |    5 +-
 .../iotdb/db/qp/logical/crud/FunctionOperator.java |   11 +-
 .../iotdb/db/qp/logical/crud/InOperator.java       |   26 +-
 .../iotdb/db/qp/logical/crud/InsertOperator.java   |    5 +-
 .../iotdb/db/qp/logical/crud/QueryOperator.java    |    4 +-
 .../iotdb/db/qp/logical/crud/SFWOperator.java      |    4 +-
 .../iotdb/db/qp/logical/crud/SelectOperator.java   |    8 +-
 .../iotdb/db/qp/logical/sys/AuthorOperator.java    |   20 +-
 .../iotdb/db/qp/logical/sys/CountOperator.java     |   10 +-
 .../db/qp/logical/sys/CreateFunctionOperator.java  |  116 +-
 .../db/qp/logical/sys/CreateIndexOperator.java     |    4 +-
 .../qp/logical/sys/CreateTimeSeriesOperator.java   |    9 +-
 .../iotdb/db/qp/logical/sys/DataAuthOperator.java  |    1 -
 .../qp/logical/sys/DeleteStorageGroupOperator.java |    6 +-
 .../qp/logical/sys/DeleteTimeSeriesOperator.java   |    9 +-
 .../db/qp/logical/sys/DropFunctionOperator.java    |   80 +-
 .../iotdb/db/qp/logical/sys/DropIndexOperator.java |    5 +-
 .../iotdb/db/qp/logical/sys/FlushOperator.java     |    3 +-
 .../qp/logical/sys/LoadConfigurationOperator.java  |    5 +-
 .../iotdb/db/qp/logical/sys/LoadDataOperator.java  |    4 +-
 .../iotdb/db/qp/logical/sys/LoadFilesOperator.java |    1 -
 .../db/qp/logical/sys/SetStorageGroupOperator.java |    7 +-
 .../iotdb/db/qp/logical/sys/SetTTLOperator.java    |    2 +-
 .../db/qp/logical/sys/ShowFunctionsOperator.java   |   74 +-
 .../qp/logical/sys/ShowStorageGroupOperator.java   |   72 +-
 .../iotdb/db/qp/logical/sys/ShowTTLOperator.java   |    2 +-
 .../apache/iotdb/db/qp/physical/PhysicalPlan.java  |   66 +-
 .../db/qp/physical/crud/AlignByDevicePlan.java     |   29 +-
 .../iotdb/db/qp/physical/crud/DeletePlan.java      |    5 +-
 .../iotdb/db/qp/physical/crud/GroupByTimePlan.java |    3 +-
 .../db/qp/physical/crud/InsertMultiTabletPlan.java |   87 +-
 .../iotdb/db/qp/physical/crud/InsertPlan.java      |   12 +-
 .../iotdb/db/qp/physical/crud/InsertRowPlan.java   |   94 +-
 .../physical/crud/InsertRowsOfOneDevicePlan.java   |   40 +-
 .../db/qp/physical/crud/InsertTabletPlan.java      |   74 +-
 .../db/qp/physical/crud/RawDataQueryPlan.java      |   15 +-
 .../apache/iotdb/db/qp/physical/crud/UDFPlan.java  |   98 +-
 .../apache/iotdb/db/qp/physical/crud/UDTFPlan.java |  264 ++--
 .../db/qp/physical/sys/AlterTimeSeriesPlan.java    |   30 +-
 .../iotdb/db/qp/physical/sys/AuthorPlan.java       |   64 +-
 .../iotdb/db/qp/physical/sys/ChangeAliasPlan.java  |    9 +-
 .../db/qp/physical/sys/ChangeTagOffsetPlan.java    |    9 +-
 .../apache/iotdb/db/qp/physical/sys/CountPlan.java |    8 +-
 .../db/qp/physical/sys/CreateFunctionPlan.java     |  114 +-
 .../iotdb/db/qp/physical/sys/CreateIndexPlan.java  |   13 +-
 .../qp/physical/sys/CreateMultiTimeSeriesPlan.java |   14 +-
 .../db/qp/physical/sys/CreateSnapshotPlan.java     |    1 -
 .../db/qp/physical/sys/CreateTimeSeriesPlan.java   |   26 +-
 .../db/qp/physical/sys/DeleteStorageGroupPlan.java |   13 +-
 .../iotdb/db/qp/physical/sys/DropFunctionPlan.java |   90 +-
 .../iotdb/db/qp/physical/sys/DropIndexPlan.java    |    8 +-
 .../apache/iotdb/db/qp/physical/sys/FlushPlan.java |   38 +-
 .../db/qp/physical/sys/LoadConfigurationPlan.java  |    7 +-
 .../iotdb/db/qp/physical/sys/LoadDataPlan.java     |   12 +-
 .../apache/iotdb/db/qp/physical/sys/MNodePlan.java |   10 +-
 .../db/qp/physical/sys/MeasurementMNodePlan.java   |   31 +-
 .../apache/iotdb/db/qp/physical/sys/MergePlan.java |    1 -
 .../iotdb/db/qp/physical/sys/OperateFilePlan.java  |   22 +-
 .../db/qp/physical/sys/SetStorageGroupPlan.java    |    2 +-
 .../iotdb/db/qp/physical/sys/SetTTLPlan.java       |    2 +-
 .../iotdb/db/qp/physical/sys/ShowDevicesPlan.java  |    4 +-
 .../db/qp/physical/sys/ShowFunctionsPlan.java      |   68 +-
 .../apache/iotdb/db/qp/physical/sys/ShowPlan.java  |   21 +-
 .../qp/physical/sys/ShowQueryProcesslistPlan.java  |    1 -
 .../db/qp/physical/sys/ShowStorageGroupPlan.java   |   73 +-
 .../iotdb/db/qp/physical/sys/ShowTTLPlan.java      |    2 +-
 .../db/qp/physical/sys/ShowTimeSeriesPlan.java     |   12 +-
 .../db/qp/physical/sys/StorageGroupMNodePlan.java  |    9 +-
 .../apache/iotdb/db/qp/sql/IoTDBSqlVisitor.java    |  310 ++--
 .../iotdb/db/qp/strategy/LogicalGenerator.java     |   11 +-
 .../iotdb/db/qp/strategy/PhysicalGenerator.java    |  203 ++-
 .../apache/iotdb/db/qp/strategy/SQLParseError.java |    8 +-
 .../qp/strategy/optimizer/ConcatPathOptimizer.java |   99 +-
 .../qp/strategy/optimizer/DnfFilterOptimizer.java  |   18 +-
 .../db/qp/strategy/optimizer/IFilterOptimizer.java |    4 +-
 .../qp/strategy/optimizer/ILogicalOptimizer.java   |    4 +-
 .../optimizer/MergeSingleFilterOptimizer.java      |   18 +-
 .../qp/strategy/optimizer/RemoveNotOptimizer.java  |   13 +-
 .../apache/iotdb/db/qp/utils/DatetimeUtils.java    |  605 ++++----
 .../db/query/aggregation/AggregateResult.java      |   23 +-
 .../db/query/aggregation/AggregationType.java      |   11 +-
 .../db/query/aggregation/impl/AvgAggrResult.java   |   14 +-
 .../db/query/aggregation/impl/CountAggrResult.java |   10 +-
 .../aggregation/impl/FirstValueAggrResult.java     |    4 +-
 .../aggregation/impl/FirstValueDescAggrResult.java |    4 +-
 .../aggregation/impl/LastValueAggrResult.java      |    6 +-
 .../aggregation/impl/LastValueDescAggrResult.java  |    4 +-
 .../query/aggregation/impl/MaxTimeAggrResult.java  |   12 +-
 .../aggregation/impl/MaxTimeDescAggrResult.java    |    4 +-
 .../query/aggregation/impl/MaxValueAggrResult.java |   12 +-
 .../query/aggregation/impl/MinTimeAggrResult.java  |   15 +-
 .../aggregation/impl/MinTimeDescAggrResult.java    |    6 +-
 .../query/aggregation/impl/MinValueAggrResult.java |   11 +-
 .../db/query/aggregation/impl/SumAggrResult.java   |    4 +-
 .../iotdb/db/query/context/QueryContext.java       |   38 +-
 .../iotdb/db/query/control/FileReaderManager.java  |   83 +-
 .../iotdb/db/query/control/QueryFileManager.java   |  248 ++--
 .../db/query/control/QueryResourceManager.java     |  449 +++---
 .../iotdb/db/query/control/QueryTimeManager.java   |   54 +-
 .../iotdb/db/query/control/TracingManager.java     |  118 +-
 .../db/query/dataset/AlignByDeviceDataSet.java     |   19 +-
 .../db/query/dataset/DirectAlignByTimeDataSet.java |   66 +-
 .../db/query/dataset/DirectNonAlignDataSet.java    |   62 +-
 .../db/query/dataset/NonAlignEngineDataSet.java    |   32 +-
 .../dataset/RawQueryDataSetWithValueFilter.java    |   25 +-
 .../dataset/RawQueryDataSetWithoutValueFilter.java |   72 +-
 .../iotdb/db/query/dataset/ShowDevicesDataSet.java |   15 +-
 .../apache/iotdb/db/query/dataset/ShowResult.java  |    4 +-
 .../db/query/dataset/ShowTimeSeriesResult.java     |   26 +-
 .../db/query/dataset/ShowTimeseriesDataSet.java    |   40 +-
 .../db/query/dataset/UDTFAlignByTimeDataSet.java   |  579 ++++----
 .../apache/iotdb/db/query/dataset/UDTFDataSet.java |  328 ++--
 .../db/query/dataset/UDTFNonAlignDataSet.java      |  405 ++---
 .../dataset/groupby/GroupByEngineDataSet.java      |   28 +-
 .../db/query/dataset/groupby/GroupByExecutor.java  |   16 +-
 .../query/dataset/groupby/GroupByFillDataSet.java  |   50 +-
 .../query/dataset/groupby/GroupByTimeDataSet.java  |   12 +-
 .../groupby/GroupByWithValueFilterDataSet.java     |  101 +-
 .../groupby/GroupByWithoutValueFilterDataSet.java  |   68 +-
 .../dataset/groupby/LocalGroupByExecutor.java      |   62 +-
 .../db/query/executor/AggregationExecutor.java     |  202 ++-
 .../iotdb/db/query/executor/FillQueryExecutor.java |   25 +-
 .../iotdb/db/query/executor/IQueryRouter.java      |   39 +-
 .../iotdb/db/query/executor/LastQueryExecutor.java |   65 +-
 .../iotdb/db/query/executor/QueryRouter.java       |   78 +-
 .../db/query/executor/RawDataQueryExecutor.java    |  104 +-
 .../iotdb/db/query/executor/UDTFQueryExecutor.java |  194 +--
 .../apache/iotdb/db/query/executor/fill/IFill.java |   13 +-
 .../db/query/executor/fill/LastPointReader.java    |   50 +-
 .../iotdb/db/query/executor/fill/LinearFill.java   |   64 +-
 .../iotdb/db/query/executor/fill/PreviousFill.java |   21 +-
 .../db/query/externalsort/ExternalSortJob.java     |    4 +-
 .../query/externalsort/ExternalSortJobEngine.java  |   14 +-
 .../db/query/externalsort/ExternalSortJobPart.java |    5 +-
 .../externalsort/ExternalSortJobScheduler.java     |   19 +-
 .../iotdb/db/query/externalsort/LineMerger.java    |    8 +-
 .../MultiSourceExternalSortJobPart.java            |    5 +-
 .../externalsort/SimpleExternalSortEngine.java     |   45 +-
 .../SingleSourceExternalSortJobPart.java           |   19 +-
 .../adapter/ByTimestampReaderAdapter.java          |    4 +-
 .../FixLengthIExternalSortFileDeserializer.java    |   45 +-
 .../impl/FixLengthTimeValuePairSerializer.java     |   20 +-
 .../db/query/factory/AggregateResultFactory.java   |   31 +-
 .../iotdb/db/query/pool/QueryTaskPoolManager.java  |    7 +-
 .../db/query/reader/chunk/ChunkDataIterator.java   |   10 +-
 .../db/query/reader/chunk/ChunkReaderWrap.java     |   22 +-
 .../db/query/reader/chunk/DiskChunkLoader.java     |    4 +-
 .../reader/chunk/DiskChunkReaderByTimestamp.java   |    3 +-
 .../db/query/reader/chunk/MemChunkLoader.java      |    8 +-
 .../db/query/reader/chunk/MemChunkReader.java      |   16 +-
 .../iotdb/db/query/reader/chunk/MemPageReader.java |   13 +-
 .../chunk/metadata/DiskChunkMetadataLoader.java    |   26 +-
 .../chunk/metadata/MemChunkMetadataLoader.java     |   12 +-
 .../db/query/reader/series/IAggregateReader.java   |    8 +-
 .../db/query/reader/series/IReaderByTimestamp.java |   19 +-
 .../query/reader/series/SeriesAggregateReader.java |   36 +-
 .../reader/series/SeriesRawDataBatchReader.java    |   55 +-
 .../reader/series/SeriesRawDataPointReader.java    |   13 +-
 .../iotdb/db/query/reader/series/SeriesReader.java |  326 ++--
 .../reader/series/SeriesReaderByTimestamp.java     |   24 +-
 .../universal/CachedPriorityMergeReader.java       |    1 -
 .../reader/universal/DescPriorityMergeReader.java  |   15 +-
 .../reader/universal/PriorityMergeReader.java      |   34 +-
 .../query/timegenerator/ServerTimeGenerator.java   |   35 +-
 .../org/apache/iotdb/db/query/udf/api/UDF.java     |   84 +-
 .../org/apache/iotdb/db/query/udf/api/UDTF.java    |  241 ++-
 .../apache/iotdb/db/query/udf/api/access/Row.java  |  240 +--
 .../iotdb/db/query/udf/api/access/RowIterator.java |   96 +-
 .../iotdb/db/query/udf/api/access/RowWindow.java   |  122 +-
 .../db/query/udf/api/collector/PointCollector.java |  274 ++--
 .../api/customizer/config/UDFConfigurations.java   |   76 +-
 .../api/customizer/config/UDTFConfigurations.java  |  210 +--
 .../parameter/UDFParameterValidator.java           |   58 +-
 .../api/customizer/parameter/UDFParameters.java    |  303 ++--
 .../api/customizer/strategy/AccessStrategy.java    |  122 +-
 .../strategy/RowByRowAccessStrategy.java           |  122 +-
 .../strategy/SlidingSizeWindowAccessStrategy.java  |  247 +--
 .../strategy/SlidingTimeWindowAccessStrategy.java  |  509 +++----
 .../UDFInputSeriesDataTypeNotValidException.java   |   22 +-
 .../UDFInputSeriesIndexNotValidException.java      |    5 +-
 .../UDFInputSeriesNumberNotValidException.java     |   16 +-
 .../db/query/udf/builtin/BuiltinFunction.java      |    5 +-
 .../apache/iotdb/db/query/udf/builtin/UDTFAbs.java |    8 +-
 .../iotdb/db/query/udf/builtin/UDTFBottomK.java    |   15 +-
 .../db/query/udf/builtin/UDTFCommonDerivative.java |    4 +-
 .../udf/builtin/UDTFCommonValueDifference.java     |    4 +-
 .../iotdb/db/query/udf/builtin/UDTFMath.java       |    8 +-
 .../udf/builtin/UDTFNonNegativeDerivative.java     |    4 +-
 .../builtin/UDTFNonNegativeValueDifference.java    |    4 +-
 .../iotdb/db/query/udf/builtin/UDTFSelectK.java    |   69 +-
 .../iotdb/db/query/udf/builtin/UDTFTopK.java       |   10 +-
 .../db/query/udf/builtin/UDTFValueDifference.java  |    4 +-
 .../iotdb/db/query/udf/builtin/UDTFValueTrend.java |    8 +-
 .../iotdb/db/query/udf/core/access/RowImpl.java    |  184 +--
 .../db/query/udf/core/access/RowIteratorImpl.java  |  119 +-
 .../db/query/udf/core/access/RowWindowImpl.java    |  154 +-
 .../db/query/udf/core/context/UDFContext.java      |  275 ++--
 .../db/query/udf/core/executor/UDTFExecutor.java   |  231 +--
 .../iotdb/db/query/udf/core/input/InputLayer.java  | 1125 +++++++-------
 .../iotdb/db/query/udf/core/input/SafetyLine.java  |  155 +-
 .../db/query/udf/core/reader/LayerPointReader.java |   96 +-
 .../db/query/udf/core/reader/LayerRowReader.java   |   76 +-
 .../udf/core/reader/LayerRowWindowReader.java      |   72 +-
 .../core/transformer/RawQueryPointTransformer.java |  144 +-
 .../db/query/udf/core/transformer/Transformer.java |  186 +--
 .../core/transformer/UDFQueryRowTransformer.java   |   90 +-
 .../transformer/UDFQueryRowWindowTransformer.java  |   92 +-
 .../udf/core/transformer/UDFQueryTransformer.java  |  202 +--
 .../iotdb/db/query/udf/datastructure/Cache.java    |    7 +-
 .../query/udf/datastructure/SerializableList.java  |  330 ++---
 .../primitive/ElasticSerializableIntList.java      |  196 +--
 .../query/udf/datastructure/primitive/IntList.java |   66 +-
 .../primitive/SerializableIntList.java             |  263 ++--
 .../datastructure/primitive/WrappedIntArray.java   |  102 +-
 .../row/ElasticSerializableRowRecordList.java      |  494 +++---
 .../row/SerializableRowRecordList.java             |  469 +++---
 .../tv/ElasticSerializableBinaryTVList.java        |  252 ++--
 .../tv/ElasticSerializableTVList.java              |  628 ++++----
 .../datastructure/tv/SerializableBinaryTVList.java |  160 +-
 .../tv/SerializableBooleanTVList.java              |  140 +-
 .../datastructure/tv/SerializableDoubleTVList.java |  140 +-
 .../datastructure/tv/SerializableFloatTVList.java  |  140 +-
 .../datastructure/tv/SerializableIntTVList.java    |  140 +-
 .../datastructure/tv/SerializableLongTVList.java   |  140 +-
 .../udf/datastructure/tv/SerializableTVList.java   |  199 +--
 .../udf/service/TemporaryQueryDataFileService.java |  278 ++--
 .../iotdb/db/query/udf/service/UDFClassLoader.java |  166 +--
 .../query/udf/service/UDFClassLoaderManager.java   |  250 ++--
 .../iotdb/db/query/udf/service/UDFLogWriter.java   |  138 +-
 .../udf/service/UDFRegistrationInformation.java    |  159 +-
 .../query/udf/service/UDFRegistrationService.java  |  830 ++++++-----
 .../apache/iotdb/db/rescon/CachedStringPool.java   |    3 +-
 .../apache/iotdb/db/rescon/MemTableManager.java    |   23 +-
 .../iotdb/db/rescon/PrimitiveArrayManager.java     |   64 +-
 .../org/apache/iotdb/db/rescon/SystemInfo.java     |   58 +-
 .../apache/iotdb/db/rescon/TVListAllocator.java    |   15 +-
 .../iotdb/db/rescon/TVListAllocatorMBean.java      |    1 -
 .../org/apache/iotdb/db/rescon/package-info.java   |    2 +-
 .../java/org/apache/iotdb/db/service/IService.java |   17 +-
 .../java/org/apache/iotdb/db/service/IoTDB.java    |   14 +-
 .../apache/iotdb/db/service/IoTDBShutdownHook.java |    8 +-
 .../org/apache/iotdb/db/service/JMXService.java    |   21 +-
 .../org/apache/iotdb/db/service/MQTTService.java   |  145 +-
 .../apache/iotdb/db/service/MetricsService.java    |   42 +-
 .../iotdb/db/service/MetricsServiceMBean.java      |    1 -
 .../org/apache/iotdb/db/service/RPCService.java    |   35 +-
 .../iotdb/db/service/RPCServiceThriftHandler.java  |   29 +-
 .../apache/iotdb/db/service/RegisterManager.java   |   16 +-
 .../org/apache/iotdb/db/service/ServiceType.java   |   10 +-
 .../org/apache/iotdb/db/service/StartupCheck.java  |    4 +-
 .../org/apache/iotdb/db/service/StartupChecks.java |   50 +-
 .../org/apache/iotdb/db/service/StaticResps.java   |   24 +-
 .../org/apache/iotdb/db/service/TSServiceImpl.java |  507 ++++---
 .../org/apache/iotdb/db/service/UpgradeSevice.java |   10 +-
 .../iotdb/db/service/thrift/ThriftService.java     |   46 +-
 .../db/service/thrift/ThriftServiceThread.java     |   48 +-
 .../apache/iotdb/db/sync/conf/SyncConstant.java    |    8 +-
 .../iotdb/db/sync/conf/SyncSenderConfig.java       |   19 +-
 .../iotdb/db/sync/conf/SyncSenderDescriptor.java   |   29 +-
 .../org/apache/iotdb/db/sync/package-info.java     |   11 +-
 .../iotdb/db/sync/receiver/SyncServerManager.java  |   27 +-
 .../db/sync/receiver/SyncServerThriftHandler.java  |   30 +-
 .../iotdb/db/sync/receiver/load/FileLoader.java    |   54 +-
 .../db/sync/receiver/load/FileLoaderManager.java   |   20 +-
 .../iotdb/db/sync/receiver/load/IFileLoader.java   |   23 +-
 .../iotdb/db/sync/receiver/load/ILoadLogger.java   |    9 +-
 .../iotdb/db/sync/receiver/load/LoadLogger.java    |    2 +-
 .../iotdb/db/sync/receiver/load/LoadType.java      |    4 +-
 .../receiver/recover/ISyncReceiverLogAnalyzer.java |    1 -
 .../sync/receiver/recover/ISyncReceiverLogger.java |    8 +-
 .../receiver/recover/SyncReceiverLogAnalyzer.java  |   22 +-
 .../sync/receiver/recover/SyncReceiverLogger.java  |    2 +-
 .../db/sync/receiver/transfer/SyncServiceImpl.java |  109 +-
 .../db/sync/sender/manage/SyncFileManager.java     |   59 +-
 .../sender/recover/ISyncSenderLogAnalyzer.java     |   13 +-
 .../db/sync/sender/recover/ISyncSenderLogger.java  |    6 +-
 .../sync/sender/recover/SyncSenderLogAnalyzer.java |   10 +-
 .../db/sync/sender/recover/SyncSenderLogger.java   |    2 +-
 .../iotdb/db/sync/sender/transfer/ISyncClient.java |   32 +-
 .../iotdb/db/sync/sender/transfer/SyncClient.java  |  173 ++-
 .../apache/iotdb/db/tools/IoTDBDataDirViewer.java  |   39 +-
 .../iotdb/db/tools/TsFileResourcePrinter.java      |   19 +-
 .../apache/iotdb/db/tools/TsFileSketchTool.java    |  181 ++-
 .../java/org/apache/iotdb/db/tools/WalChecker.java |   25 +-
 .../org/apache/iotdb/db/tools/mlog/MLogParser.java |   59 +-
 .../db/tools/upgrade/TsFileOnlineUpgradeTool.java  |  201 +--
 .../db/tools/virtualsg/DeviceMappingViewer.java    |   19 +-
 .../watermark/GroupedLSBWatermarkEncoder.java      |    4 +-
 .../db/tools/watermark/WatermarkDetector.java      |   78 +-
 .../java/org/apache/iotdb/db/utils/AuthUtils.java  |   42 +-
 .../org/apache/iotdb/db/utils/CommonUtils.java     |   42 +-
 .../iotdb/db/utils/CopyOnReadLinkedList.java       |    1 -
 .../iotdb/db/utils/EncodingInferenceUtils.java     |    5 +-
 .../org/apache/iotdb/db/utils/FileLoaderUtils.java |   61 +-
 .../org/apache/iotdb/db/utils/FilePathUtils.java   |   47 +-
 .../java/org/apache/iotdb/db/utils/FileUtils.java  |   28 +-
 .../java/org/apache/iotdb/db/utils/IOUtils.java    |   79 +-
 .../java/org/apache/iotdb/db/utils/MathUtils.java  |   26 +-
 .../java/org/apache/iotdb/db/utils/MemUtils.java   |   52 +-
 .../java/org/apache/iotdb/db/utils/MergeUtils.java |   58 +-
 .../java/org/apache/iotdb/db/utils/MmapUtil.java   |    4 +-
 .../org/apache/iotdb/db/utils/OpenFileNumUtil.java |   19 +-
 .../apache/iotdb/db/utils/QueryDataSetUtils.java   |   37 +-
 .../java/org/apache/iotdb/db/utils/QueryUtils.java |   42 +-
 .../org/apache/iotdb/db/utils/SchemaUtils.java     |   49 +-
 .../org/apache/iotdb/db/utils/SerializeUtils.java  |  115 +-
 .../java/org/apache/iotdb/db/utils/SyncUtils.java  |   23 +-
 .../java/org/apache/iotdb/db/utils/TestOnly.java   |    7 +-
 .../apache/iotdb/db/utils/TimeValuePairUtils.java  |   17 +-
 .../apache/iotdb/db/utils/TypeInferenceUtils.java  |   31 +-
 .../org/apache/iotdb/db/utils/UpgradeUtils.java    |   73 +-
 .../iotdb/db/utils/datastructure/BinaryTVList.java |   19 +-
 .../db/utils/datastructure/BooleanTVList.java      |   24 +-
 .../iotdb/db/utils/datastructure/DoubleTVList.java |   20 +-
 .../iotdb/db/utils/datastructure/FloatTVList.java  |   19 +-
 .../iotdb/db/utils/datastructure/IntTVList.java    |   17 +-
 .../iotdb/db/utils/datastructure/LongTVList.java   |   16 +-
 .../iotdb/db/utils/datastructure/TVList.java       |   30 +-
 .../iotdb/db/utils/datastructure/TimeSelector.java |    8 +-
 .../iotdb/db/writelog/io/BatchLogReader.java       |    3 +-
 .../apache/iotdb/db/writelog/io/ILogReader.java    |    6 +-
 .../apache/iotdb/db/writelog/io/ILogWriter.java    |   15 +-
 .../org/apache/iotdb/db/writelog/io/LogWriter.java |   17 +-
 .../iotdb/db/writelog/io/SingleFileLogReader.java  |   17 +-
 .../writelog/manager/MultiFileLogNodeManager.java  |   12 +-
 .../db/writelog/manager/WriteLogNodeManager.java   |   10 +-
 .../db/writelog/node/ExclusiveWriteLogNode.java    |   15 +-
 .../iotdb/db/writelog/node/WriteLogNode.java       |   32 +-
 .../iotdb/db/writelog/recover/LogReplayer.java     |   59 +-
 .../writelog/recover/TsFileRecoverPerformer.java   |   71 +-
 .../apache/iotdb/db/auth/AuthorityCheckerTest.java |  201 ++-
 .../auth/authorizer/LocalFileAuthorizerTest.java   |   13 +-
 .../db/auth/authorizer/OpenIdAuthorizerTest.java   |   48 +-
 .../iotdb/db/auth/entity/PathPrivilegeTest.java    |    2 -
 .../org/apache/iotdb/db/auth/entity/RoleTest.java  |   10 +-
 .../org/apache/iotdb/db/auth/entity/UserTest.java  |    2 -
 .../db/auth/role/LocalFileRoleAccessorTest.java    |    1 -
 .../db/auth/user/LocalFileUserAccessorTest.java    |    1 -
 .../db/concurrent/IoTDBThreadPoolFactoryTest.java  |   20 +-
 .../apache/iotdb/db/conf/IoTDBDescriptorTest.java  |    4 +-
 .../db/conf/adapter/CompressionRatioTest.java      |   76 +-
 .../strategy/DirectoryStrategyTest.java            |   34 +-
 .../org/apache/iotdb/db/constant/TestConstant.java |  116 +-
 .../db/cost/statistic/PerformanceStatTest.java     |    7 +-
 .../iotdb/db/engine/MetadataManagerHelper.java     |  142 +-
 .../db/engine/cache/ChunkMetadataCacheTest.java    |   34 +-
 .../db/engine/compaction/CompactionChunkTest.java  |   79 +-
 .../engine/compaction/LevelCompactionLogTest.java  |   20 +-
 .../compaction/LevelCompactionMergeTest.java       |   75 +-
 .../engine/compaction/LevelCompactionModsTest.java |   16 +-
 .../compaction/LevelCompactionRecoverTest.java     |  510 ++++---
 .../compaction/LevelCompactionSelectorTest.java    |   17 +-
 .../db/engine/compaction/LevelCompactionTest.java  |   72 +-
 .../LevelCompactionTsFileManagementTest.java       |   88 +-
 .../NoCompactionTsFileManagementTest.java          |   92 +-
 .../db/engine/memtable/MemTableFlushTaskTest.java  |   43 +-
 .../db/engine/memtable/MemTableTestUtils.java      |   25 +-
 .../db/engine/memtable/MemtableBenchmark.java      |   20 +-
 .../db/engine/memtable/PrimitiveMemTableTest.java  |   97 +-
 .../engine/merge/MaxFileMergeFileSelectorTest.java |    2 +-
 .../merge/MaxSeriesMergeFileSelectorTest.java      |   23 +-
 .../apache/iotdb/db/engine/merge/MergeLogTest.java |   19 +-
 .../iotdb/db/engine/merge/MergeManagerTest.java    |   12 +-
 .../iotdb/db/engine/merge/MergeOverLapTest.java    |  101 +-
 .../iotdb/db/engine/merge/MergePerfTest.java       |   67 +-
 .../iotdb/db/engine/merge/MergeTaskTest.java       |  261 +++-
 .../apache/iotdb/db/engine/merge/MergeTest.java    |   80 +-
 .../iotdb/db/engine/merge/MergeUpgradeTest.java    |   70 +-
 .../engine/modification/DeletionFileNodeTest.java  |  106 +-
 .../db/engine/modification/DeletionQueryTest.java  |   15 +-
 .../engine/modification/ModificationFileTest.java  |   29 +-
 .../io/LocalTextModificationAccessorTest.java      |   16 +-
 .../storagegroup/FileNodeManagerBenchmark.java     |   30 +-
 .../storagegroup/StorageGroupProcessorTest.java    |  160 +-
 .../iotdb/db/engine/storagegroup/TTLTest.java      |  116 +-
 .../engine/storagegroup/TsFileProcessorTest.java   |  151 +-
 .../virtualSg/HashVirtualPartitionerTest.java      |    3 -
 .../version/SimpleFileVersionControllerTest.java   |   11 +-
 .../version/SysTimeVersionControllerTest.java      |    8 +-
 .../iotdb/db/integration/IOTDBGroupByIT.java       | 1055 +++++++------
 .../integration/IOTDBGroupByInnerIntervalIT.java   |  340 +++--
 .../apache/iotdb/db/integration/IOTDBInsertIT.java |   35 +-
 .../iotdb/db/integration/IoTDBAddSubDeviceIT.java  |   96 +-
 .../apache/iotdb/db/integration/IoTDBAliasIT.java  |  156 +-
 .../iotdb/db/integration/IoTDBAlignByDeviceIT.java | 1127 ++++++++------
 .../org/apache/iotdb/db/integration/IoTDBAsIT.java |  275 ++--
 .../db/integration/IoTDBAutoCreateSchemaIT.java    |   42 +-
 .../iotdb/db/integration/IoTDBCheckConfigIT.java   |   53 +-
 .../iotdb/db/integration/IoTDBClearCacheIT.java    |  156 +-
 .../apache/iotdb/db/integration/IoTDBCloseIT.java  |   83 +-
 .../iotdb/db/integration/IoTDBCompleteIT.java      |  627 ++++----
 .../iotdb/db/integration/IoTDBCompressTypeIT.java  |   34 +-
 .../db/integration/IoTDBCreateSnapshotIT.java      |   87 +-
 .../apache/iotdb/db/integration/IoTDBDaemonIT.java |  348 +++--
 .../db/integration/IoTDBDeleteStorageGroupIT.java  |   46 +-
 .../db/integration/IoTDBDeleteTimeseriesIT.java    |   41 +-
 .../iotdb/db/integration/IoTDBDeletionIT.java      |  164 +-
 .../iotdb/db/integration/IoTDBDisableAlignIT.java  |  354 ++---
 .../iotdb/db/integration/IoTDBEncodingIT.java      |  123 +-
 .../db/integration/IoTDBEngineTimeGeneratorIT.java |  118 +-
 .../iotdb/db/integration/IoTDBFilePathUtilsIT.java |   13 +-
 .../apache/iotdb/db/integration/IoTDBFillIT.java   |  739 +++++----
 .../db/integration/IoTDBFloatPrecisionIT.java      |   71 +-
 .../db/integration/IoTDBFlushQueryMergeIT.java     |   80 +-
 .../iotdb/db/integration/IoTDBGroupByFillIT.java   |  689 ++++-----
 .../integration/IoTDBGroupByFillWithRangeIT.java   |  115 +-
 .../iotdb/db/integration/IoTDBInsertNaNIT.java     |   70 +-
 .../db/integration/IoTDBInsertWithQueryIT.java     |  333 +++--
 .../iotdb/db/integration/IoTDBKillQueryTest.java   |   17 +-
 .../iotdb/db/integration/IoTDBLargeDataIT.java     |  743 +++++-----
 .../apache/iotdb/db/integration/IoTDBLastIT.java   |  366 ++---
 .../db/integration/IoTDBLevelCompactionIT.java     |  643 ++++----
 .../iotdb/db/integration/IoTDBLimitSlimitIT.java   |  146 +-
 .../db/integration/IoTDBLoadExternalTsfileIT.java  |  627 +++++---
 .../iotdb/db/integration/IoTDBMergeTest.java       |  153 +-
 .../iotdb/db/integration/IoTDBMetadataFetchIT.java |  234 +--
 .../iotdb/db/integration/IoTDBMultiDeviceIT.java   |  157 +-
 .../IoTDBMultiOverlappedChunkInUnseqIT.java        |   36 +-
 .../db/integration/IoTDBMultiOverlappedPageIT.java |   85 +-
 .../iotdb/db/integration/IoTDBMultiSeriesIT.java   |  980 ++++++------
 .../db/integration/IoTDBMultiStatementsIT.java     |   90 +-
 .../db/integration/IoTDBNewTsFileCompactionIT.java |  442 +++---
 .../iotdb/db/integration/IoTDBNumberPathIT.java    |  625 ++++----
 .../db/integration/IoTDBOverlappedPageIT.java      |   86 +-
 .../iotdb/db/integration/IoTDBQueryDemoIT.java     |  409 ++---
 .../db/integration/IoTDBQueryMemoryControlIT.java  |  586 ++++----
 .../db/integration/IoTDBQueryTimeoutTest.java      |   30 +-
 .../iotdb/db/integration/IoTDBQuotedPathIT.java    |   48 +-
 .../iotdb/db/integration/IoTDBRecoverIT.java       |  273 ++--
 .../db/integration/IoTDBRecoverUnclosedIT.java     |  174 ++-
 .../db/integration/IoTDBRemovePartitionIT.java     |  104 +-
 .../iotdb/db/integration/IoTDBRestartIT.java       |  146 +-
 .../iotdb/db/integration/IoTDBResultSetIT.java     |   44 +-
 .../db/integration/IoTDBRpcCompressionIT.java      |  117 +-
 .../IoTDBSameMeasurementsDifferentTypesIT.java     |   84 +-
 .../iotdb/db/integration/IoTDBSensorUpdateIT.java  |    9 +-
 .../db/integration/IoTDBSequenceDataQueryIT.java   |  118 +-
 .../iotdb/db/integration/IoTDBSeriesReaderIT.java  |  200 ++-
 .../iotdb/db/integration/IoTDBSimpleQueryIT.java   |  426 +++---
 .../integration/IoTDBSortedShowTimeseriesIT.java   |  346 +++--
 .../iotdb/db/integration/IoTDBTagAlterIT.java      |  481 +++---
 .../apache/iotdb/db/integration/IoTDBTagIT.java    | 1011 +++++++------
 .../iotdb/db/integration/IoTDBTimeZoneIT.java      |   68 +-
 .../iotdb/db/integration/IoTDBTracingTest.java     |    7 +-
 .../apache/iotdb/db/integration/IoTDBTtlIT.java    |   58 +-
 .../iotdb/db/integration/IoTDBUDFManagementIT.java |  883 +++++------
 .../db/integration/IoTDBUDFWindowQueryIT.java      | 1068 ++++++-------
 .../integration/IoTDBUDTFAlignByTimeQueryIT.java   | 1566 +++++++++++---------
 .../db/integration/IoTDBUDTFBuiltinFunctionIT.java |  133 +-
 .../db/integration/IoTDBUDTFHybridQueryIT.java     |  341 +++--
 .../db/integration/IoTDBUDTFNonAlignQueryIT.java   |  703 ++++-----
 .../iotdb/db/integration/IoTDBVersionIT.java       |   23 +-
 .../aggregation/IoTDBAggregationByLevelIT.java     |  188 ++-
 .../aggregation/IoTDBAggregationDeleteIT.java      |   37 +-
 .../aggregation/IoTDBAggregationIT.java            |  743 ++++++----
 .../aggregation/IoTDBAggregationLargeDataIT.java   |  772 ++++++----
 .../aggregation/IoTDBAggregationSmallDataIT.java   |  574 +++----
 .../db/integration/auth/IoTDBAuthorizationIT.java  |  295 ++--
 .../iotdb/db/metadata/MManagerAdvancedTest.java    |  172 ++-
 .../iotdb/db/metadata/MManagerBasicTest.java       |  314 ++--
 .../iotdb/db/metadata/MManagerImproveTest.java     |   11 +-
 .../org/apache/iotdb/db/metadata/MTreeTest.java    |  419 ++++--
 .../apache/iotdb/db/metadata/MetaUtilsTest.java    |   24 +-
 .../apache/iotdb/db/metadata/PartialPathTest.java  |    8 +-
 .../iotdb/db/monitor/IoTDBStatMonitorTest.java     |   45 +-
 .../iotdb/db/mqtt/BrokerAuthenticatorTest.java     |   43 +-
 .../iotdb/db/mqtt/JSONPayloadFormatTest.java       |   73 +-
 .../iotdb/db/mqtt/PayloadFormatManagerTest.java    |   22 +-
 .../apache/iotdb/db/mqtt/PublishHandlerTest.java   |   61 +-
 .../java/org/apache/iotdb/db/qp/PlannerTest.java   |  134 +-
 .../iotdb/db/qp/bench/QueryParseBenchmark.java     |    3 +-
 .../iotdb/db/qp/logical/IndexLogicalPlanTest.java  |   55 +-
 .../iotdb/db/qp/logical/LogicalPlanSmallTest.java  |  122 +-
 .../iotdb/db/qp/other/TSPlanContextAuthorTest.java |   48 +-
 .../iotdb/db/qp/physical/ConcatOptimizerTest.java  |   76 +-
 .../physical/IndexSubMatchingPhysicalPlanTest.java |   34 +-
 .../IndexWholeMatchingPhysicalPlanTest.java        |   41 +-
 .../db/qp/physical/InsertTabletMultiPlanTest.java  |   16 +-
 .../iotdb/db/qp/physical/InsertTabletPlanTest.java |   16 +-
 .../db/qp/physical/PhysicalPlanSerializeTest.java  |   83 +-
 .../iotdb/db/qp/physical/PhysicalPlanTest.java     |  404 +++--
 .../iotdb/db/qp/physical/SerializationTest.java    |   45 +-
 .../iotdb/db/qp/sql/IoTDBsqlVisitorTest.java       |   14 +-
 .../db/qp/utils/DatetimeQueryDataSetUtilsTest.java |   69 +-
 .../db/query/aggregation/AggregateResultTest.java  |   85 +-
 .../query/aggregation/DescAggregateResultTest.java |   21 +-
 .../db/query/control/FileReaderManagerTest.java    |   89 +-
 .../db/query/control/QueryResourceManagerTest.java |   58 +-
 .../iotdb/db/query/control/TracingManagerTest.java |   28 +-
 .../dataset/EngineDataSetWithValueFilterTest.java  |  115 +-
 .../iotdb/db/query/dataset/ListDataSetTest.java    |   76 +-
 .../db/query/dataset/ShowTimeSeriesResultTest.java |   17 +-
 .../iotdb/db/query/dataset/SingleDataSetTest.java  |   63 +-
 .../query/dataset/UDTFAlignByTimeDataSetTest.java  | 1268 ++++++++--------
 .../dataset/groupby/GroupByEngineDataSetTest.java  |  188 ++-
 .../dataset/groupby/GroupByFillDataSetTest.java    |   95 +-
 .../dataset/groupby/GroupByLevelDataSetTest.java   |   42 +-
 .../dataset/groupby/GroupByTimeDataSetTest.java    |  157 +-
 .../query/externalsort/ExternalSortEngineTest.java |   26 +-
 .../db/query/externalsort/FakeChunkReaderWrap.java |   12 +-
 ...ExternalSortFileSerializerDeserializerTest.java |   49 +-
 .../reader/series/SeriesAggregateReaderTest.java   |   34 +-
 .../reader/series/SeriesReaderByTimestampTest.java |   31 +-
 .../db/query/reader/series/SeriesReaderTest.java   |   51 +-
 .../query/reader/series/SeriesReaderTestUtil.java  |  118 +-
 .../query/reader/universal/FakedSeriesReader.java  |   14 +-
 .../reader/universal/PriorityMergeReaderTest.java  |   47 +-
 .../ElasticSerializableRowRecordListTest.java      |  454 +++---
 .../ElasticSerializableTVListTest.java             |  519 +++----
 .../SerializableBinaryTVListTest.java              |  165 ++-
 .../SerializableBooleanTVListTest.java             |  165 ++-
 .../SerializableDoubleTVListTest.java              |  163 +-
 .../datastructure/SerializableFloatTVListTest.java |  161 +-
 .../datastructure/SerializableIntTVListTest.java   |  161 +-
 .../udf/datastructure/SerializableListTest.java    |   85 +-
 .../datastructure/SerializableLongTVListTest.java  |  161 +-
 .../SerializableRowRecordListTest.java             |  355 ++---
 .../udf/datastructure/SerializableTVListTest.java  |   76 +-
 .../iotdb/db/query/udf/example/Accumulator.java    |  187 ++-
 .../apache/iotdb/db/query/udf/example/Adder.java   |  186 +--
 .../apache/iotdb/db/query/udf/example/Counter.java |  161 +-
 .../org/apache/iotdb/db/query/udf/example/Max.java |    4 +-
 .../iotdb/db/query/udf/example/Multiplier.java     |  130 +-
 .../SlidingSizeWindowConstructorTester0.java       |  116 +-
 .../SlidingSizeWindowConstructorTester1.java       |  132 +-
 .../SlidingTimeWindowConstructionTester.java       |  146 +-
 .../org/apache/iotdb/db/script/EnvScriptIT.java    |   42 +-
 .../db/sync/receiver/load/FileLoaderTest.java      |  135 +-
 .../recover/SyncReceiverLogAnalyzerTest.java       |   62 +-
 .../receiver/recover/SyncReceiverLoggerTest.java   |   37 +-
 .../db/sync/sender/manage/SyncFileManagerTest.java |   96 +-
 .../sender/recover/SyncSenderLogAnalyzerTest.java  |   54 +-
 .../sync/sender/recover/SyncSenderLoggerTest.java  |   29 +-
 .../db/sync/sender/transfer/SyncClientTest.java    |   20 +-
 .../apache/iotdb/db/tools/IoTDBWatermarkTest.java  |  110 +-
 .../org/apache/iotdb/db/tools/MLogParserTest.java  |   64 +-
 .../org/apache/iotdb/db/tools/WalCheckerTest.java  |   34 +-
 .../iotdb/db/utils/CopyOnReadLinkedListTest.java   |    1 -
 .../iotdb/db/utils/EncodingInferenceUtilsTest.java |   20 +-
 .../apache/iotdb/db/utils/EnvironmentUtils.java    |   47 +-
 .../apache/iotdb/db/utils/FilePathUtilsTest.java   |   22 +-
 .../iotdb/db/utils/MathQueryDataSetUtilsTest.java  |   40 +-
 .../org/apache/iotdb/db/utils/MemUtilsTest.java    |    5 +-
 .../apache/iotdb/db/utils/OpenFileNumUtilTest.java |   12 +-
 .../java/org/apache/iotdb/db/utils/RandomNum.java  |   11 +-
 .../org/apache/iotdb/db/utils/SchemaUtilsTest.java |   15 +-
 .../apache/iotdb/db/utils/SerializeUtilsTest.java  |   34 +-
 .../iotdb/db/utils/TypeInferenceUtilsTest.java     |   58 +-
 .../db/utils/datastructure/BinaryTVListTest.java   |    8 +-
 .../db/utils/datastructure/BooleanTVListTest.java  |    9 +-
 .../db/utils/datastructure/DoubleTVListTest.java   |   11 +-
 .../db/utils/datastructure/FloatTVListTest.java    |   11 +-
 .../db/utils/datastructure/IntTVListTest.java      |   13 +-
 .../db/utils/datastructure/LongTVListTest.java     |   27 +-
 .../db/utils/datastructure/PrecisionTest.java      |   53 +-
 .../iotdb/db/writelog/IoTDBLogFileSizeTest.java    |  181 ++-
 .../apache/iotdb/db/writelog/PerformanceTest.java  |  106 +-
 .../iotdb/db/writelog/WriteLogNodeManagerTest.java |  127 +-
 .../apache/iotdb/db/writelog/WriteLogNodeTest.java |  145 +-
 .../iotdb/db/writelog/io/LogWriterReaderTest.java  |   27 +-
 .../db/writelog/io/MultiFileLogReaderTest.java     |   15 +-
 .../db/writelog/recover/DeviceStringTest.java      |   51 +-
 .../iotdb/db/writelog/recover/LogReplayerTest.java |  132 +-
 .../recover/RecoverResourceFromReaderTest.java     |  173 ++-
 .../db/writelog/recover/SeqTsFileRecoverTest.java  |  138 +-
 .../writelog/recover/UnseqTsFileRecoverTest.java   |  149 +-
 .../org/apache/iotdb/rpc/AutoResizingBuffer.java   |   12 +-
 .../iotdb/rpc/AutoScalingBufferReadTransport.java  |    2 -
 .../iotdb/rpc/AutoScalingBufferWriteTransport.java |    5 +-
 .../apache/iotdb/rpc/BatchExecutionException.java  |    1 -
 .../java/org/apache/iotdb/rpc/IoTDBRpcDataSet.java |   31 +-
 .../org/apache/iotdb/rpc/NonOpenTransport.java     |    8 +-
 .../org/apache/iotdb/rpc/RedirectException.java    |    4 +-
 .../org/apache/iotdb/rpc/RpcTransportFactory.java  |    7 +-
 .../main/java/org/apache/iotdb/rpc/RpcUtils.java   |   61 +-
 .../org/apache/iotdb/rpc/SynchronizedHandler.java  |    3 +-
 .../rpc/TCompressedElasticFramedTransport.java     |   20 +-
 .../apache/iotdb/rpc/TElasticFramedTransport.java  |   16 +-
 .../java/org/apache/iotdb/rpc/TSStatusCode.java    |    5 +-
 .../iotdb/rpc/TSnappyElasticFramedTransport.java   |    4 +-
 .../rpc/TimeoutChangeableTFastFramedTransport.java |    3 +-
 .../TimeoutChangeableTSnappyFramedTransport.java   |    3 +-
 .../iotdb/rpc/TimeoutChangeableTransport.java      |    3 -
 .../main/java/org/apache/iotdb/session/Config.java |    8 +-
 .../java/org/apache/iotdb/session/Session.java     |  550 ++++---
 .../apache/iotdb/session/SessionConnection.java    |   82 +-
 .../org/apache/iotdb/session/SessionDataSet.java   |   68 +-
 .../org/apache/iotdb/session/SessionUtils.java     |    3 +-
 .../iotdb/session/pool/SessionDataSetWrapper.java  |   33 +-
 .../org/apache/iotdb/session/pool/SessionPool.java |  350 +++--
 .../iotdb/session/IoTDBSessionComplexIT.java       |  150 +-
 .../iotdb/session/IoTDBSessionIteratorIT.java      |  221 +--
 .../apache/iotdb/session/IoTDBSessionSimpleIT.java |  238 ++-
 .../apache/iotdb/session/SessionCacheLeaderUT.java |  169 ++-
 .../java/org/apache/iotdb/session/SessionUT.java   |   36 +-
 .../apache/iotdb/session/pool/SessionPoolTest.java |  146 +-
 .../org/apache/iotdb/spark/db/SQLConstant.java     |   28 +-
 .../iotdb/spark/tsfile/io/TsFileOutputFormat.java  |    1 -
 .../iotdb/spark/tsfile/io/TsFileRecordWriter.java  |   10 +-
 .../org/apache/iotdb/spark/tsfile/qp/Executor.java |    8 +-
 .../iotdb/spark/tsfile/qp/QueryProcessor.java      |   25 +-
 .../spark/tsfile/qp/common/BasicOperator.java      |    7 +-
 .../spark/tsfile/qp/common/FilterOperator.java     |    7 +-
 .../iotdb/spark/tsfile/qp/common/Operator.java     |    5 +-
 .../iotdb/spark/tsfile/qp/common/SQLConstant.java  |    7 +-
 .../iotdb/spark/tsfile/qp/common/SingleQuery.java  |   20 +-
 .../iotdb/spark/tsfile/qp/common/TSQueryPlan.java  |    5 +-
 .../qp/exception/BasicOperatorException.java       |    7 +-
 .../tsfile/qp/exception/DNFOptimizeException.java  |    7 +-
 .../qp/exception/LogicalOptimizeException.java     |    6 +-
 .../tsfile/qp/exception/MergeFilterException.java  |    2 -
 .../qp/exception/QueryOperatorException.java       |    1 -
 .../qp/exception/QueryProcessorException.java      |    6 +-
 .../tsfile/qp/exception/RemoveNotException.java    |    7 +-
 .../tsfile/qp/optimizer/DNFFilterOptimizer.java    |   22 +-
 .../tsfile/qp/optimizer/IFilterOptimizer.java      |    5 +-
 .../tsfile/qp/optimizer/PhysicalOptimizer.java     |   36 +-
 .../tsfile/qp/optimizer/RemoveNotOptimizer.java    |   21 +-
 .../apache/iotdb/tsfile/common/cache/Cache.java    |    1 -
 .../apache/iotdb/tsfile/common/cache/LRUCache.java |   17 +-
 .../iotdb/tsfile/common/conf/TSFileConfig.java     |  894 +++++------
 .../iotdb/tsfile/common/conf/TSFileDescriptor.java |  322 ++--
 .../tsfile/common/constant/JsonFormatConstant.java |   90 +-
 .../tsfile/common/constant/QueryConstant.java      |    3 +-
 .../tsfile/common/constant/TsFileConstant.java     |   68 +-
 .../apache/iotdb/tsfile/compress/ICompressor.java  |   23 +-
 .../iotdb/tsfile/compress/IUnCompressor.java       |   33 +-
 .../tsfile/encoding/bitpacking/IntPacker.java      |   54 +-
 .../tsfile/encoding/bitpacking/LongPacker.java     |   56 +-
 .../tsfile/encoding/decoder/BitmapDecoder.java     |   57 +-
 .../encoding/decoder/DeltaBinaryDecoder.java       |   30 +-
 .../encoding/decoder/DoublePrecisionDecoderV1.java |   17 +-
 .../encoding/decoder/DoublePrecisionDecoderV2.java |  104 +-
 .../tsfile/encoding/decoder/FloatDecoder.java      |   23 +-
 .../tsfile/encoding/decoder/GorillaDecoderV1.java  |   11 +-
 .../tsfile/encoding/decoder/GorillaDecoderV2.java  |  242 +--
 .../tsfile/encoding/decoder/IntGorillaDecoder.java |  178 +--
 .../tsfile/encoding/decoder/IntRleDecoder.java     |  248 ++--
 .../encoding/decoder/LongGorillaDecoder.java       |  176 +--
 .../tsfile/encoding/decoder/LongRleDecoder.java    |  238 ++-
 .../tsfile/encoding/decoder/PlainDecoder.java      |    1 -
 .../encoding/decoder/RegularDataDecoder.java       |   29 +-
 .../iotdb/tsfile/encoding/decoder/RleDecoder.java  |  469 +++---
 .../encoding/decoder/SinglePrecisionDecoderV1.java |   17 +-
 .../encoding/decoder/SinglePrecisionDecoderV2.java |  104 +-
 .../tsfile/encoding/encoder/BitmapEncoder.java     |   40 +-
 .../encoding/encoder/DeltaBinaryEncoder.java       |   55 +-
 .../encoding/encoder/DoublePrecisionEncoderV1.java |   18 +-
 .../encoding/encoder/DoublePrecisionEncoderV2.java |  108 +-
 .../iotdb/tsfile/encoding/encoder/Encoder.java     |   12 +-
 .../tsfile/encoding/encoder/FloatEncoder.java      |   31 +-
 .../tsfile/encoding/encoder/GorillaEncoderV1.java  |    1 -
 .../tsfile/encoding/encoder/GorillaEncoderV2.java  |  210 ++-
 .../tsfile/encoding/encoder/IntGorillaEncoder.java |  310 ++--
 .../tsfile/encoding/encoder/IntRleEncoder.java     |  262 ++--
 .../encoding/encoder/LongGorillaEncoder.java       |  310 ++--
 .../tsfile/encoding/encoder/LongRleEncoder.java    |  248 ++--
 .../tsfile/encoding/encoder/PlainEncoder.java      |   33 +-
 .../encoding/encoder/RegularDataEncoder.java       |   45 +-
 .../iotdb/tsfile/encoding/encoder/RleEncoder.java  |  672 +++++----
 .../iotdb/tsfile/encoding/encoder/SDTEncoder.java  |   66 +-
 .../encoding/encoder/SinglePrecisionEncoderV1.java |   21 +-
 .../encoding/encoder/SinglePrecisionEncoderV2.java |  108 +-
 .../tsfile/encoding/encoder/TSEncodingBuilder.java |   63 +-
 .../iotdb/tsfile/exception/NullFieldException.java |    1 -
 .../tsfile/exception/cache/CacheException.java     |    3 +-
 .../CompressionTypeNotSupportedException.java      |    4 +-
 .../exception/filter/StatisticsClassException.java |    1 -
 .../filter/UnSupportFilterDataTypeException.java   |    4 +-
 .../exception/write/NoMeasurementException.java    |    5 +-
 .../tsfile/exception/write/PageException.java      |    4 +-
 .../write/TsFileNotCompleteException.java          |    3 +-
 .../exception/write/WriteProcessException.java     |    4 +-
 .../org/apache/iotdb/tsfile/file/MetaMarker.java   |   27 +-
 .../iotdb/tsfile/file/header/ChunkGroupHeader.java |   17 +-
 .../iotdb/tsfile/file/header/ChunkHeader.java      |   98 +-
 .../iotdb/tsfile/file/header/PageHeader.java       |   24 +-
 .../tsfile/file/metadata/ChunkGroupMetadata.java   |    4 +-
 .../iotdb/tsfile/file/metadata/ChunkMetadata.java  |   71 +-
 .../file/metadata/MetadataIndexConstructor.java    |   46 +-
 .../tsfile/file/metadata/MetadataIndexNode.java    |   12 +-
 .../tsfile/file/metadata/TimeseriesMetadata.java   |   24 +-
 .../iotdb/tsfile/file/metadata/TsFileMetadata.java |   19 +-
 .../file/metadata/enums/CompressionType.java       |   37 +-
 .../file/metadata/enums/MetadataIndexNodeType.java |   25 +-
 .../tsfile/file/metadata/enums/TSDataType.java     |  231 ++-
 .../tsfile/file/metadata/enums/TSEncoding.java     |  142 +-
 .../file/metadata/statistics/BinaryStatistics.java |   30 +-
 .../metadata/statistics/BooleanStatistics.java     |   38 +-
 .../file/metadata/statistics/DoubleStatistics.java |   49 +-
 .../file/metadata/statistics/FloatStatistics.java  |   46 +-
 .../metadata/statistics/IntegerStatistics.java     |   47 +-
 .../file/metadata/statistics/LongStatistics.java   |   45 +-
 .../file/metadata/statistics/Statistics.java       |   17 +-
 .../org/apache/iotdb/tsfile/fileSystem/FSType.java |    3 +-
 .../fileInputFactory/FileInputFactory.java         |    1 -
 .../fileInputFactory/HDFSInputFactory.java         |    4 +-
 .../fileInputFactory/LocalFSInputFactory.java      |    6 +-
 .../fileOutputFactory/FileOutputFactory.java       |    1 -
 .../fileOutputFactory/HDFSOutputFactory.java       |    7 +-
 .../fileOutputFactory/LocalFSOutputFactory.java    |    6 +-
 .../tsfile/fileSystem/fsFactory/FSFactory.java     |   14 +-
 .../tsfile/fileSystem/fsFactory/HDFSFactory.java   |   79 +-
 .../fileSystem/fsFactory/LocalFSFactory.java       |   10 +-
 .../apache/iotdb/tsfile/read/ReadOnlyTsFile.java   |   11 +-
 .../apache/iotdb/tsfile/read/TimeValuePair.java    |    7 +-
 .../iotdb/tsfile/read/TsFileCheckStatus.java       |    1 -
 .../iotdb/tsfile/read/TsFileRestorableReader.java  |    8 +-
 .../iotdb/tsfile/read/TsFileSequenceReader.java    |  382 ++---
 .../iotdb/tsfile/read/UnClosedTsFileReader.java    |   13 +-
 .../apache/iotdb/tsfile/read/common/BatchData.java |   18 +-
 .../iotdb/tsfile/read/common/BatchDataFactory.java |    4 +-
 .../org/apache/iotdb/tsfile/read/common/Chunk.java |   32 +-
 .../tsfile/read/common/DescReadBatchData.java      |   12 +-
 .../tsfile/read/common/DescReadWriteBatchData.java |   42 +-
 .../org/apache/iotdb/tsfile/read/common/Field.java |    5 +-
 .../org/apache/iotdb/tsfile/read/common/Path.java  |   26 +-
 .../iotdb/tsfile/read/common/SignalBatchData.java  |    6 +-
 .../apache/iotdb/tsfile/read/common/TimeRange.java |   52 +-
 .../read/controller/CachedChunkLoaderImpl.java     |   28 +-
 .../iotdb/tsfile/read/controller/IChunkLoader.java |   11 +-
 .../read/controller/IChunkMetadataLoader.java      |    9 +-
 .../tsfile/read/controller/IMetadataQuerier.java   |   12 +-
 .../read/controller/MetadataQuerierByFileImpl.java |   67 +-
 .../tsfile/read/expression/ExpressionType.java     |   24 +-
 .../tsfile/read/expression/IBinaryExpression.java  |    1 -
 .../tsfile/read/expression/QueryExpression.java    |   13 +-
 .../read/expression/impl/BinaryExpression.java     |    1 -
 .../read/expression/impl/GlobalTimeExpression.java |    1 -
 .../expression/impl/SingleSeriesExpression.java    |    1 -
 .../read/expression/util/ExpressionOptimizer.java  |   76 +-
 .../iotdb/tsfile/read/filter/GroupByFilter.java    |   25 +-
 .../iotdb/tsfile/read/filter/TimeFilter.java       |    6 +-
 .../iotdb/tsfile/read/filter/ValueFilter.java      |    5 +-
 .../tsfile/read/filter/basic/BinaryFilter.java     |   10 +-
 .../iotdb/tsfile/read/filter/basic/Filter.java     |   20 +-
 .../tsfile/read/filter/basic/UnaryFilter.java      |    6 +-
 .../tsfile/read/filter/factory/FilterFactory.java  |    1 -
 .../read/filter/factory/FilterSerializeId.java     |   12 +-
 .../tsfile/read/filter/factory/FilterType.java     |    5 +-
 .../tsfile/read/filter/operator/AndFilter.java     |   15 +-
 .../iotdb/tsfile/read/filter/operator/Eq.java      |    7 +-
 .../iotdb/tsfile/read/filter/operator/Gt.java      |    3 +-
 .../iotdb/tsfile/read/filter/operator/GtEq.java    |    4 +-
 .../iotdb/tsfile/read/filter/operator/In.java      |    3 +-
 .../iotdb/tsfile/read/filter/operator/Lt.java      |    3 +-
 .../iotdb/tsfile/read/filter/operator/LtEq.java    |    4 +-
 .../iotdb/tsfile/read/filter/operator/NotEq.java   |    6 +-
 .../tsfile/read/filter/operator/NotFilter.java     |    7 +-
 .../tsfile/read/filter/operator/OrFilter.java      |   15 +-
 .../query/dataset/DataSetWithTimeGenerator.java    |    7 +-
 .../query/dataset/DataSetWithoutTimeGenerator.java |   20 +-
 .../tsfile/read/query/dataset/QueryDataSet.java    |   10 +-
 .../query/executor/ExecutorWithTimeGenerator.java  |   27 +-
 .../tsfile/read/query/executor/TsFileExecutor.java |   47 +-
 .../read/query/timegenerator/TimeGenerator.java    |    8 +-
 .../query/timegenerator/TsFileTimeGenerator.java   |    9 +-
 .../read/query/timegenerator/node/AndNode.java     |    2 +-
 .../read/query/timegenerator/node/LeafNode.java    |    5 +-
 .../read/query/timegenerator/node/NodeType.java    |    8 +-
 .../read/query/timegenerator/node/OrNode.java      |    8 +-
 .../tsfile/read/reader/BatchDataIterator.java      |    6 +-
 .../iotdb/tsfile/read/reader/IChunkReader.java     |    3 +-
 .../iotdb/tsfile/read/reader/IPageReader.java      |    4 +-
 .../iotdb/tsfile/read/reader/IPointReader.java     |    3 +-
 .../iotdb/tsfile/read/reader/TsFileInput.java      |   95 +-
 .../tsfile/read/reader/chunk/ChunkReader.java      |   87 +-
 .../read/reader/chunk/ChunkReaderByTimestamp.java  |    4 +-
 .../iotdb/tsfile/read/reader/page/PageReader.java  |   48 +-
 .../reader/series/AbstractFileSeriesReader.java    |   12 +-
 .../read/reader/series/EmptyFileSeriesReader.java  |    6 +-
 .../read/reader/series/FileSeriesReader.java       |    9 +-
 .../reader/series/FileSeriesReaderByTimestamp.java |   14 +-
 .../java/org/apache/iotdb/tsfile/utils/Binary.java |    5 +-
 .../org/apache/iotdb/tsfile/utils/BloomFilter.java |    9 +-
 .../org/apache/iotdb/tsfile/utils/BytesUtils.java  |   72 +-
 .../java/org/apache/iotdb/tsfile/utils/Loader.java |   11 +-
 .../apache/iotdb/tsfile/utils/Murmur128Hash.java   |   34 +-
 .../java/org/apache/iotdb/tsfile/utils/Pair.java   |    4 +-
 .../org/apache/iotdb/tsfile/utils/PublicBAOS.java  |   34 +-
 .../iotdb/tsfile/utils/RamUsageEstimator.java      |  335 ++---
 .../tsfile/utils/ReadWriteForEncodingUtils.java    |   65 +-
 .../iotdb/tsfile/utils/ReadWriteIOUtils.java       |  206 +--
 .../apache/iotdb/tsfile/utils/StringContainer.java |   66 +-
 .../apache/iotdb/tsfile/utils/TsPrimitiveType.java |    9 +-
 .../tsfile/v2/file/footer/ChunkGroupFooterV2.java  |    4 +-
 .../iotdb/tsfile/v2/file/header/ChunkHeaderV2.java |   24 +-
 .../iotdb/tsfile/v2/file/header/PageHeaderV2.java  |    5 +-
 .../tsfile/v2/file/metadata/ChunkMetadataV2.java   |    6 +-
 .../v2/file/metadata/MetadataIndexEntryV2.java     |    4 +-
 .../v2/file/metadata/MetadataIndexNodeV2.java      |    9 +-
 .../v2/file/metadata/TimeseriesMetadataV2.java     |   10 +-
 .../tsfile/v2/file/metadata/TsFileMetadataV2.java  |   11 +-
 .../v2/file/metadata/statistics/StatisticsV2.java  |   11 +-
 .../tsfile/v2/read/TsFileSequenceReaderForV2.java  |  207 +--
 .../tsfile/v2/read/reader/page/PageReaderV2.java   |   28 +-
 .../apache/iotdb/tsfile/write/TsFileWriter.java    |   61 +-
 .../tsfile/write/chunk/ChunkGroupWriterImpl.java   |   44 +-
 .../iotdb/tsfile/write/chunk/ChunkWriterImpl.java  |  125 +-
 .../tsfile/write/chunk/IChunkGroupWriter.java      |   49 +-
 .../iotdb/tsfile/write/chunk/IChunkWriter.java     |   77 +-
 .../apache/iotdb/tsfile/write/page/PageWriter.java |  101 +-
 .../apache/iotdb/tsfile/write/record/TSRecord.java |   25 +-
 .../apache/iotdb/tsfile/write/record/Tablet.java   |  132 +-
 .../write/record/datapoint/BooleanDataPoint.java   |   12 +-
 .../tsfile/write/record/datapoint/DataPoint.java   |   12 +-
 .../write/record/datapoint/DoubleDataPoint.java    |   12 +-
 .../write/record/datapoint/FloatDataPoint.java     |   11 +-
 .../write/record/datapoint/IntDataPoint.java       |   11 +-
 .../write/record/datapoint/LongDataPoint.java      |   13 +-
 .../write/record/datapoint/StringDataPoint.java    |   13 +-
 .../tsfile/write/schema/MeasurementSchema.java     |   98 +-
 .../apache/iotdb/tsfile/write/schema/Schema.java   |   18 +-
 .../tsfile/write/schema/TimeseriesSchema.java      |   75 +-
 .../write/writer/ForceAppendTsFileWriter.java      |    5 +-
 .../iotdb/tsfile/write/writer/IDataWriter.java     |    4 +-
 .../tsfile/write/writer/LocalTsFileOutput.java     |    6 +-
 .../write/writer/RestorableTsFileIOWriter.java     |   51 +-
 .../iotdb/tsfile/write/writer/TsFileIOWriter.java  |  117 +-
 .../iotdb/tsfile/write/writer/TsFileOutput.java    |   18 +-
 .../apache/iotdb/tsfile/common/LRUCacheTest.java   |   18 +-
 .../apache/iotdb/tsfile/compress/CompressTest.java |   12 +-
 .../org/apache/iotdb/tsfile/compress/GZIPTest.java |   25 +-
 .../org/apache/iotdb/tsfile/compress/LZ4Test.java  |    7 +-
 .../apache/iotdb/tsfile/compress/SnappyTest.java   |   19 +-
 .../apache/iotdb/tsfile/constant/TestConstant.java |    1 -
 .../iotdb/tsfile/encoding/SDTEncoderTest.java      |   19 +-
 .../tsfile/encoding/bitpacking/IntPackerTest.java  |    4 -
 .../tsfile/encoding/bitpacking/LongPackerTest.java |    5 +-
 .../tsfile/encoding/decoder/BitmapDecoderTest.java |    9 +-
 .../tsfile/encoding/decoder/FloatDecoderTest.java  |   30 +-
 .../encoding/decoder/GorillaDecoderV1Test.java     |   11 +-
 .../encoding/decoder/GorillaDecoderV2Test.java     | 1184 +++++++--------
 .../tsfile/encoding/decoder/IntRleDecoderTest.java |  434 +++---
 .../encoding/decoder/LongRleDecoderTest.java       |  440 +++---
 .../delta/DeltaBinaryEncoderIntegerTest.java       |    7 +-
 .../decoder/delta/DeltaBinaryEncoderLongTest.java  |   26 +-
 .../regular/RegularDataEncoderIntegerTest.java     |    6 +-
 .../regular/RegularDataEncoderLongTest.java        |   64 +-
 .../iotdb/tsfile/file/header/PageHeaderTest.java   |   17 +-
 .../file/metadata/MetadataIndexNodeTest.java       |    4 +-
 .../file/metadata/TimeSeriesMetadataTest.java      |    7 +-
 .../tsfile/file/metadata/TsFileMetadataTest.java   |    3 +-
 .../metadata/statistics/BooleanStatisticsTest.java |    4 +-
 .../metadata/statistics/DoubleStatisticsTest.java  |    2 -
 .../metadata/statistics/FloatStatisticsTest.java   |    2 -
 .../metadata/statistics/LongStatisticsTest.java    |    1 -
 .../metadata/statistics/StringStatisticsTest.java  |    7 +-
 .../tsfile/file/metadata/utils/TestHelper.java     |    7 +-
 .../iotdb/tsfile/file/metadata/utils/Utils.java    |   24 +-
 .../apache/iotdb/tsfile/read/ExpressionTest.java   |    4 +-
 .../iotdb/tsfile/read/GetAllDevicesTest.java       |   11 +-
 .../iotdb/tsfile/read/ReadInPartitionTest.java     |   84 +-
 .../iotdb/tsfile/read/ReadOnlyTsFileTest.java      |   44 +-
 .../org/apache/iotdb/tsfile/read/ReadTest.java     |   77 +-
 .../iotdb/tsfile/read/TimePlainEncodeReadTest.java |   92 +-
 .../tsfile/read/TimeSeriesMetadataReadTest.java    |    4 +-
 .../tsfile/read/TsFileRestorableReaderTest.java    |    6 +-
 .../tsfile/read/TsFileSequenceReaderTest.java      |   17 +-
 .../iotdb/tsfile/read/common/BatchDataTest.java    |   12 +-
 .../apache/iotdb/tsfile/read/common/FieldTest.java |    1 -
 .../apache/iotdb/tsfile/read/common/PathTest.java  |    2 +-
 .../iotdb/tsfile/read/common/TimeRangeTest.java    |    4 -
 .../tsfile/read/controller/ChunkLoaderTest.java    |   13 +-
 .../controller/IMetadataQuerierByFileImplTest.java |   54 +-
 .../tsfile/read/filter/FilterSerializeTest.java    |   55 +-
 .../tsfile/read/filter/GroupByFilterTest.java      |   17 +-
 .../read/filter/IExpressionOptimizerTest.java      |  123 +-
 .../read/filter/MinTimeMaxTimeFilterTest.java      |    4 -
 .../iotdb/tsfile/read/filter/OperatorTest.java     |   16 +-
 .../tsfile/read/filter/StatisticsFilterTest.java   |    9 +-
 .../read/query/executor/QueryExecutorTest.java     |   42 +-
 .../tsfile/read/query/timegenerator/NodeTest.java  |   45 +-
 .../read/query/timegenerator/ReadWriteTest.java    |   31 +-
 .../query/timegenerator/ReaderByTimestampTest.java |   23 +-
 .../timegenerator/TimeGeneratorReadEmptyTest.java  |   25 +-
 .../timegenerator/TimeGeneratorReadWriteTest.java  |   32 +-
 .../query/timegenerator/TimeGeneratorTest.java     |   20 +-
 .../TsFileGeneratorForSeriesReaderByTimestamp.java |   97 +-
 .../iotdb/tsfile/read/reader/FakedBatchReader.java |    5 +-
 .../tsfile/read/reader/FakedMultiBatchReader.java  |    4 +-
 .../tsfile/read/reader/FakedTimeGenerator.java     |   18 +-
 .../iotdb/tsfile/read/reader/PageReaderTest.java   |  186 ++-
 .../iotdb/tsfile/read/reader/ReaderTest.java       |   35 +-
 .../apache/iotdb/tsfile/utils/BloomFilterTest.java |    7 +-
 .../apache/iotdb/tsfile/utils/BytesUtilsTest.java  |    2 -
 .../apache/iotdb/tsfile/utils/FileGenerator.java   |   98 +-
 .../org/apache/iotdb/tsfile/utils/FileUtils.java   |   19 +-
 .../apache/iotdb/tsfile/utils/FileUtilsTest.java   |   10 +-
 .../org/apache/iotdb/tsfile/utils/PairTest.java    |    4 -
 .../iotdb/tsfile/utils/ReadWriteIOUtilsTest.java   |    9 +-
 .../tsfile/utils/ReadWriteStreamUtilsTest.java     |   42 +-
 .../tsfile/utils/ReadWriteToBytesUtilsTest.java    |   10 +-
 .../org/apache/iotdb/tsfile/utils/RecordUtils.java |   12 +-
 .../apache/iotdb/tsfile/utils/RecordUtilsTest.java |   31 +-
 .../iotdb/tsfile/utils/StringContainerTest.java    |    3 -
 .../iotdb/tsfile/utils/TsFileGeneratorForTest.java |   97 +-
 .../iotdb/tsfile/utils/TsPrimitiveTypeTest.java    |    1 -
 .../tsfile/write/DefaultDeviceTemplateTest.java    |    5 +-
 .../org/apache/iotdb/tsfile/write/PerfTest.java    |   87 +-
 .../iotdb/tsfile/write/ReadPageInMemTest.java      |   39 +-
 ...SameMeasurementsWithDifferentDataTypesTest.java |   18 +-
 .../iotdb/tsfile/write/TsFileIOWriterTest.java     |    9 +-
 .../iotdb/tsfile/write/TsFileReadWriteTest.java    |   67 +-
 .../iotdb/tsfile/write/TsFileWriterTest.java       |  125 +-
 .../org/apache/iotdb/tsfile/write/WriteTest.java   |   77 +-
 .../write/schema/converter/SchemaBuilderTest.java  |   71 +-
 .../write/writer/ForceAppendTsFileWriterTest.java  |   36 +-
 .../writer/MeasurementSchemaSerializeTest.java     |  110 +-
 .../iotdb/tsfile/write/writer/PageWriterTest.java  |    2 +-
 .../write/writer/RestorableTsFileIOWriterTest.java |  260 ++--
 .../apache/zeppelin/iotdb/IoTDBInterpreter.java    |   83 +-
 .../zeppelin/iotdb/IoTDBInterpreterTest.java       |  244 ++-
 1513 files changed, 65051 insertions(+), 55091 deletions(-)

diff --git a/cli/src/main/java/org/apache/iotdb/cli/AbstractCli.java b/cli/src/main/java/org/apache/iotdb/cli/AbstractCli.java
index 0a97fab..dc965ed 100644
--- a/cli/src/main/java/org/apache/iotdb/cli/AbstractCli.java
+++ b/cli/src/main/java/org/apache/iotdb/cli/AbstractCli.java
@@ -94,7 +94,7 @@ public abstract class AbstractCli {
   private static int fetchSize = 1000;
   static String timestampPrecision = "ms";
   static String timeFormat = RpcUtils.DEFAULT_TIME_FORMAT;
-  static private boolean continuePrint = false;
+  private static boolean continuePrint = false;
 
   private static int lineCount = 0;
   private static final String SUCCESS_MESSAGE = "The statement is executed successfully.";
@@ -136,56 +136,79 @@ public abstract class AbstractCli {
     timeFormat.setRequired(false);
     options.addOption(timeFormat);
 
-    Option host = Option.builder(HOST_ARGS).argName(HOST_NAME).hasArg()
-        .desc("Host Name (optional, default 127.0.0.1)").build();
+    Option host =
+        Option.builder(HOST_ARGS)
+            .argName(HOST_NAME)
+            .hasArg()
+            .desc("Host Name (optional, default 127.0.0.1)")
+            .build();
     options.addOption(host);
 
-    Option port = Option.builder(PORT_ARGS).argName(PORT_NAME).hasArg()
-        .desc("Port (optional, default 6667)")
-        .build();
+    Option port =
+        Option.builder(PORT_ARGS)
+            .argName(PORT_NAME)
+            .hasArg()
+            .desc("Port (optional, default 6667)")
+            .build();
     options.addOption(port);
 
-    Option username = Option.builder(USERNAME_ARGS).argName(USERNAME_NAME).hasArg()
-        .desc("User name (required)")
-        .required().build();
+    Option username =
+        Option.builder(USERNAME_ARGS)
+            .argName(USERNAME_NAME)
+            .hasArg()
+            .desc("User name (required)")
+            .required()
+            .build();
     options.addOption(username);
 
-    Option password = Option.builder(PASSWORD_ARGS).argName(PASSWORD_NAME).hasArg()
-        .desc("password (optional)")
-        .build();
+    Option password =
+        Option.builder(PASSWORD_ARGS)
+            .argName(PASSWORD_NAME)
+            .hasArg()
+            .desc("password (optional)")
+            .build();
     options.addOption(password);
 
-    Option execute = Option.builder(EXECUTE_ARGS).argName(EXECUTE_NAME).hasArg()
-        .desc("execute statement (optional)")
-        .build();
+    Option execute =
+        Option.builder(EXECUTE_ARGS)
+            .argName(EXECUTE_NAME)
+            .hasArg()
+            .desc("execute statement (optional)")
+            .build();
     options.addOption(execute);
 
-    Option maxPrintCount = Option.builder(MAX_PRINT_ROW_COUNT_ARGS)
-        .argName(MAX_PRINT_ROW_COUNT_NAME).hasArg()
-        .desc("Maximum number of rows displayed (optional)").build();
+    Option maxPrintCount =
+        Option.builder(MAX_PRINT_ROW_COUNT_ARGS)
+            .argName(MAX_PRINT_ROW_COUNT_NAME)
+            .hasArg()
+            .desc("Maximum number of rows displayed (optional)")
+            .build();
     options.addOption(maxPrintCount);
 
-    Option isRpcCompressed = Option.builder(RPC_COMPRESS_ARGS)
-        .argName(RPC_COMPRESS_NAME)
-        .desc("Rpc Compression enabled or not").build();
+    Option isRpcCompressed =
+        Option.builder(RPC_COMPRESS_ARGS)
+            .argName(RPC_COMPRESS_NAME)
+            .desc("Rpc Compression enabled or not")
+            .build();
     options.addOption(isRpcCompressed);
     return options;
   }
 
-  static String checkRequiredArg(String arg, String name, CommandLine commandLine,
-      boolean isRequired,
-      String defaultValue) throws ArgsErrorException {
+  static String checkRequiredArg(
+      String arg, String name, CommandLine commandLine, boolean isRequired, String defaultValue)
+      throws ArgsErrorException {
     String str = commandLine.getOptionValue(arg);
     if (str == null) {
       if (isRequired) {
-        String msg = String
-            .format("%s: Required values for option '%s' not provided", IOTDB_CLI_PREFIX, name);
+        String msg =
+            String.format(
+                "%s: Required values for option '%s' not provided", IOTDB_CLI_PREFIX, name);
         println(msg);
         println("Use -help for more information");
         throw new ArgsErrorException(msg);
       } else if (defaultValue == null) {
-        String msg = String
-            .format("%s: Required values for option '%s' is null.", IOTDB_CLI_PREFIX, name);
+        String msg =
+            String.format("%s: Required values for option '%s' is null.", IOTDB_CLI_PREFIX, name);
         throw new ArgsErrorException(msg);
       } else {
         return defaultValue;
@@ -222,8 +245,9 @@ public abstract class AbstractCli {
         break;
       }
     }
-    if (index >= 0 && ((index + 1 >= args.length) || (index + 1 < args.length && keywordSet
-        .contains(args[index + 1])))) {
+    if (index >= 0
+        && ((index + 1 >= args.length)
+            || (index + 1 < args.length && keywordSet.contains(args[index + 1])))) {
       return ArrayUtils.remove(args, index);
     }
     return args;
@@ -238,8 +262,9 @@ public abstract class AbstractCli {
         break;
       }
     }
-    if (index >= 0 && ((index + 1 >= args.length) || (index + 1 < args.length && keywordSet
-        .contains(args[index + 1])))) {
+    if (index >= 0
+        && ((index + 1 >= args.length)
+            || (index + 1 < args.length && keywordSet.contains(args[index + 1])))) {
       return ArrayUtils.remove(args, index);
     } else if (index == -1) {
       return args;
@@ -267,13 +292,16 @@ public abstract class AbstractCli {
   }
 
   static void displayLogo(String version) {
-    println(" _____       _________  ______   ______    \n"
-        + "|_   _|     |  _   _  ||_   _ `.|_   _ \\   \n"
-        + "  | |   .--.|_/ | | \\_|  | | `. \\ | |_) |  \n"
-        + "  | | / .'`\\ \\  | |      | |  | | |  __'.  \n"
-        + " _| |_| \\__. | _| |_    _| |_.' /_| |__) | \n"
-        + "|_____|'.__.' |_____|  |______.'|_______/  version " + version + "\n"
-        + "                                           \n");
+    println(
+        " _____       _________  ______   ______    \n"
+            + "|_   _|     |  _   _  ||_   _ `.|_   _ \\   \n"
+            + "  | |   .--.|_/ | | \\_|  | | `. \\ | |_) |  \n"
+            + "  | | / .'`\\ \\  | |      | |  | | |  __'.  \n"
+            + " _| |_| \\__. | _| |_    _| |_.' /_| |__) | \n"
+            + "|_____|'.__.' |_____|  |______.'|_______/  version "
+            + version
+            + "\n"
+            + "                                           \n");
   }
 
   static void echoStarting() {
@@ -336,27 +364,31 @@ public abstract class AbstractCli {
 
   private static void showHelp() {
     println("    <your-sql>\t\t\t execute your sql statment");
-    println(String.format("    %s\t\t show how many timeseries are in iotdb",
-        SHOW_METADATA_COMMAND));
-    println(String.format("    %s=xxx\t eg. long, default, ISO8601, yyyy-MM-dd HH:mm:ss.",
-        SET_TIMESTAMP_DISPLAY));
+    println(
+        String.format("    %s\t\t show how many timeseries are in iotdb", SHOW_METADATA_COMMAND));
+    println(
+        String.format(
+            "    %s=xxx\t eg. long, default, ISO8601, yyyy-MM-dd HH:mm:ss.",
+            SET_TIMESTAMP_DISPLAY));
     println(String.format("    %s\t show time display type", SHOW_TIMESTAMP_DISPLAY));
     println(String.format("    %s=xxx\t\t eg. +08:00, Asia/Shanghai.", SET_TIME_ZONE));
     println(String.format("    %s\t\t show cli time zone", SHOW_TIMEZONE));
     println(
-        String.format("    %s=xxx\t\t set fetch size when querying data from server.",
-            SET_FETCH_SIZE));
+        String.format(
+            "    %s=xxx\t\t set fetch size when querying data from server.", SET_FETCH_SIZE));
     println(String.format("    %s\t\t show fetch size", SHOW_FETCH_SIZE));
     println(
-        String.format("    %s=xxx\t eg. set max lines for cli to ouput, -1 equals to unlimited.",
+        String.format(
+            "    %s=xxx\t eg. set max lines for cli to ouput, -1 equals to unlimited.",
             SET_MAX_DISPLAY_NUM));
   }
 
   private static void setTimestampDisplay(String specialCmd, String cmd) {
     String[] values = specialCmd.split("=");
     if (values.length != 2) {
-      println(String.format("Time display format error, please input like %s=ISO8601",
-          SET_TIMESTAMP_DISPLAY));
+      println(
+          String.format(
+              "Time display format error, please input like %s=ISO8601", SET_TIMESTAMP_DISPLAY));
       return;
     }
     try {
@@ -369,8 +401,9 @@ public abstract class AbstractCli {
   }
 
   /**
-   * if cli has not specified a zondId, it will be set to cli's system timezone by default
-   * otherwise for insert and query accuracy cli should set timezone the same for all sessions
+   * if cli has not specified a zondId, it will be set to cli's system timezone by default otherwise
+   * for insert and query accuracy cli should set timezone the same for all sessions
+   *
    * @param specialCmd
    * @param cmd
    * @param connection
@@ -378,8 +411,7 @@ public abstract class AbstractCli {
   private static void setTimeZone(String specialCmd, String cmd, IoTDBConnection connection) {
     String[] values = specialCmd.split("=");
     if (values.length != 2) {
-      println(
-          String.format("Time zone format error, please input like %s=+08:00", SET_TIME_ZONE));
+      println(String.format("Time zone format error, please input like %s=+08:00", SET_TIME_ZONE));
       return;
     }
     try {
@@ -394,8 +426,7 @@ public abstract class AbstractCli {
   private static void setFetchSize(String specialCmd, String cmd) {
     String[] values = specialCmd.split("=");
     if (values.length != 2) {
-      println(String
-          .format("Fetch size format error, please input like %s=10000", SET_FETCH_SIZE));
+      println(String.format("Fetch size format error, please input like %s=10000", SET_FETCH_SIZE));
       return;
     }
     try {
@@ -410,8 +441,10 @@ public abstract class AbstractCli {
   private static void setMaxDisplayNum(String specialCmd, String cmd) {
     String[] values = specialCmd.split("=");
     if (values.length != 2) {
-      println(String.format("Max display number format error, please input like %s = 10000",
-          SET_MAX_DISPLAY_NUM));
+      println(
+          String.format(
+              "Max display number format error, please input like %s = 10000",
+              SET_MAX_DISPLAY_NUM));
       return;
     }
     try {
@@ -434,13 +467,14 @@ public abstract class AbstractCli {
   private static void importCmd(String specialCmd, String cmd, IoTDBConnection connection) {
     String[] values = specialCmd.split(" ");
     if (values.length != 2) {
-      println("Please input like: import /User/myfile. "
-          + "Noted that your file path cannot contain any space character)");
+      println(
+          "Please input like: import /User/myfile. "
+              + "Noted that your file path cannot contain any space character)");
       return;
     }
     println(cmd.split(" ")[1]);
-    ImportCsv.importCsvFromFile(host, port, username, password, cmd.split(" ")[1],
-        connection.getTimeZone());
+    ImportCsv.importCsvFromFile(
+        host, port, username, password, cmd.split(" ")[1], connection.getTimeZone());
   }
 
   private static void executeQuery(IoTDBConnection connection, String cmd) {
@@ -455,28 +489,28 @@ public abstract class AbstractCli {
           ResultSetMetaData resultSetMetaData = resultSet.getMetaData();
           int columnLength = resultSetMetaData.getColumnCount();
           List<Integer> maxSizeList = new ArrayList<>(columnLength);
-          List<List<String>> lists = cacheResult(resultSet, maxSizeList, columnLength,
-              resultSetMetaData, zoneId);
+          List<List<String>> lists =
+              cacheResult(resultSet, maxSizeList, columnLength, resultSetMetaData, zoneId);
           output(lists, maxSizeList);
           long costTime = System.currentTimeMillis() - startTime;
           println(String.format("It costs %.3fs", costTime / 1000.0));
           while (!isReachEnd) {
             if (continuePrint) {
               maxSizeList = new ArrayList<>(columnLength);
-              lists = cacheResult(resultSet, maxSizeList, columnLength,
-                  resultSetMetaData, zoneId);
+              lists = cacheResult(resultSet, maxSizeList, columnLength, resultSetMetaData, zoneId);
               output(lists, maxSizeList);
               continue;
             }
-            println(String.format(
-                "Reach the max_display_num = %s. Press ENTER to show more, input 'q' to quit.",
-                maxPrintRowCount));
+            println(
+                String.format(
+                    "Reach the max_display_num = %s. Press ENTER to show more, input 'q' to quit.",
+                    maxPrintRowCount));
             BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
             try {
               if (br.readLine().equals("")) {
                 maxSizeList = new ArrayList<>(columnLength);
-                lists = cacheResult(resultSet, maxSizeList, columnLength,
-                    resultSetMetaData, zoneId);
+                lists =
+                    cacheResult(resultSet, maxSizeList, columnLength, resultSetMetaData, zoneId);
                 output(lists, maxSizeList);
               } else {
                 break;
@@ -499,17 +533,22 @@ public abstract class AbstractCli {
   /**
    * cache all results
    *
-   * @param resultSet         jdbc resultSet
-   * @param maxSizeList       the longest result of every column
-   * @param columnCount       the number of column
+   * @param resultSet jdbc resultSet
+   * @param maxSizeList the longest result of every column
+   * @param columnCount the number of column
    * @param resultSetMetaData jdbc resultSetMetaData
-   * @param zoneId            your time zone
+   * @param zoneId your time zone
    * @return List<List<String>> result
    * @throws SQLException throw exception
    */
   @SuppressWarnings("squid:S3776") // Suppress high Cognitive Complexity warning
-  private static List<List<String>> cacheResult(ResultSet resultSet, List<Integer> maxSizeList,
-      int columnCount, ResultSetMetaData resultSetMetaData, ZoneId zoneId) throws SQLException {
+  private static List<List<String>> cacheResult(
+      ResultSet resultSet,
+      List<Integer> maxSizeList,
+      int columnCount,
+      ResultSetMetaData resultSetMetaData,
+      ZoneId zoneId)
+      throws SQLException {
 
     int j = 0;
     if (cursorBeforeFirst) {
@@ -533,8 +572,9 @@ public abstract class AbstractCli {
         for (int i = 1; i <= columnCount; i++) {
           String tmp;
           if (printTimestamp && i == 1) {
-            tmp = RpcUtils.formatDatetime(timeFormat, timestampPrecision,
-                resultSet.getLong(TIMESTAMP_STR), zoneId);
+            tmp =
+                RpcUtils.formatDatetime(
+                    timeFormat, timestampPrecision, resultSet.getLong(TIMESTAMP_STR), zoneId);
           } else {
             tmp = resultSet.getString(i);
           }
@@ -572,8 +612,8 @@ public abstract class AbstractCli {
           tmp = NULL;
         }
         if (i % 2 != 0 && !tmp.equals(NULL)) {
-          tmp = RpcUtils.formatDatetime(timeFormat, timestampPrecision,
-              Long.parseLong(tmp), zoneId);
+          tmp =
+              RpcUtils.formatDatetime(timeFormat, timestampPrecision, Long.parseLong(tmp), zoneId);
         }
         lists.get(i - 1).add(tmp);
         if (maxSizeList.get(i - 1) < tmp.length()) {
@@ -584,7 +624,6 @@ public abstract class AbstractCli {
       isReachEnd = !resultSet.next();
     }
     return lists;
-
   }
 
   private static void output(List<List<String>> lists, List<Integer> maxSizeList) {
@@ -610,7 +649,9 @@ public abstract class AbstractCli {
   }
 
   enum OperationResult {
-    STOP_OPER, CONTINUE_OPER, NO_OPER
+    STOP_OPER,
+    CONTINUE_OPER,
+    NO_OPER
   }
 
   static boolean processCommand(String s, IoTDBConnection connection) {
diff --git a/cli/src/main/java/org/apache/iotdb/cli/Cli.java b/cli/src/main/java/org/apache/iotdb/cli/Cli.java
index 98a7f8e..d691bd4 100644
--- a/cli/src/main/java/org/apache/iotdb/cli/Cli.java
+++ b/cli/src/main/java/org/apache/iotdb/cli/Cli.java
@@ -36,9 +36,7 @@ import org.apache.iotdb.jdbc.IoTDBConnection;
 import org.apache.iotdb.rpc.RpcUtils;
 import org.apache.thrift.TException;
 
-/**
- * args[]: -h 127.0.0.1 -p 6667 -u root -pw root
- */
+/** args[]: -h 127.0.0.1 -p 6667 -u root -pw root */
 public class Cli extends AbstractCli {
 
   private static CommandLine commandLine;
@@ -101,7 +99,8 @@ public class Cli extends AbstractCli {
       return false;
     } catch (NumberFormatException e) {
       println(
-          IOTDB_CLI_PREFIX + "> error format of max print row count, it should be a number and greater than 0");
+          IOTDB_CLI_PREFIX
+              + "> error format of max print row count, it should be a number and greater than 0");
       return false;
     }
     return true;
@@ -115,8 +114,10 @@ public class Cli extends AbstractCli {
 
       password = commandLine.getOptionValue(PASSWORD_ARGS);
       if (hasExecuteSQL && password != null) {
-        try (IoTDBConnection connection = (IoTDBConnection) DriverManager
-            .getConnection(Config.IOTDB_URL_PREFIX + host + ":" + port + "/", username, password)) {
+        try (IoTDBConnection connection =
+            (IoTDBConnection)
+                DriverManager.getConnection(
+                    Config.IOTDB_URL_PREFIX + host + ":" + port + "/", username, password)) {
           properties = connection.getServerProperties();
           AGGREGRATE_TIME_LIST.addAll(properties.getSupportedTimeAggregationOperations());
           processCommand(execute, connection);
@@ -140,8 +141,10 @@ public class Cli extends AbstractCli {
   }
 
   private static void receiveCommands(ConsoleReader reader) throws TException, IOException {
-    try (IoTDBConnection connection = (IoTDBConnection) DriverManager
-        .getConnection(Config.IOTDB_URL_PREFIX + host + ":" + port + "/", username, password)) {
+    try (IoTDBConnection connection =
+        (IoTDBConnection)
+            DriverManager.getConnection(
+                Config.IOTDB_URL_PREFIX + host + ":" + port + "/", username, password)) {
       String s;
       properties = connection.getServerProperties();
       AGGREGRATE_TIME_LIST.addAll(properties.getSupportedTimeAggregationOperations());
@@ -158,8 +161,9 @@ public class Cli extends AbstractCli {
         }
       }
     } catch (SQLException e) {
-      println(String
-          .format("%s> %s Host is %s, port is %s.", IOTDB_CLI_PREFIX, e.getMessage(), host, port));
+      println(
+          String.format(
+              "%s> %s Host is %s, port is %s.", IOTDB_CLI_PREFIX, e.getMessage(), host, port));
     }
   }
 }
diff --git a/cli/src/main/java/org/apache/iotdb/cli/WinCli.java b/cli/src/main/java/org/apache/iotdb/cli/WinCli.java
index 9a4b5de..2102439 100644
--- a/cli/src/main/java/org/apache/iotdb/cli/WinCli.java
+++ b/cli/src/main/java/org/apache/iotdb/cli/WinCli.java
@@ -37,9 +37,7 @@ import org.apache.iotdb.jdbc.IoTDBConnection;
 import org.apache.iotdb.rpc.RpcUtils;
 import org.apache.thrift.TException;
 
-/**
- * args[]: -h 127.0.0.1 -p 6667 -u root -pw root
- */
+/** args[]: -h 127.0.0.1 -p 6667 -u root -pw root */
 public class WinCli extends AbstractCli {
 
   private static CommandLine commandLine;
@@ -102,7 +100,8 @@ public class WinCli extends AbstractCli {
         maxPrintRowCount = Integer.parseInt(commandLine.getOptionValue(MAX_PRINT_ROW_COUNT_ARGS));
         if (maxPrintRowCount <= 0) {
           println(
-              IOTDB_CLI_PREFIX + "> error format of max print row count, it should be a number greater than 0");
+              IOTDB_CLI_PREFIX
+                  + "> error format of max print row count, it should be a number greater than 0");
           return false;
         }
       }
@@ -111,8 +110,7 @@ public class WinCli extends AbstractCli {
       hf.printHelp(IOTDB_CLI_PREFIX, options, true);
       return false;
     } catch (NumberFormatException e) {
-      println(
-          IOTDB_CLI_PREFIX + "> error format of max print row count, it should be a number");
+      println(IOTDB_CLI_PREFIX + "> error format of max print row count, it should be a number");
       return false;
     }
     return true;
@@ -128,8 +126,10 @@ public class WinCli extends AbstractCli {
         password = readPassword();
       }
       if (hasExecuteSQL) {
-        try (IoTDBConnection connection = (IoTDBConnection) DriverManager
-            .getConnection(Config.IOTDB_URL_PREFIX + host + ":" + port + "/", username, password)) {
+        try (IoTDBConnection connection =
+            (IoTDBConnection)
+                DriverManager.getConnection(
+                    Config.IOTDB_URL_PREFIX + host + ":" + port + "/", username, password)) {
           properties = connection.getServerProperties();
           AGGREGRATE_TIME_LIST.addAll(properties.getSupportedTimeAggregationOperations());
           processCommand(execute, connection);
@@ -148,8 +148,10 @@ public class WinCli extends AbstractCli {
   }
 
   private static void receiveCommands(Scanner scanner) throws TException {
-    try (IoTDBConnection connection = (IoTDBConnection) DriverManager
-        .getConnection(Config.IOTDB_URL_PREFIX + host + ":" + port + "/", username, password)) {
+    try (IoTDBConnection connection =
+        (IoTDBConnection)
+            DriverManager.getConnection(
+                Config.IOTDB_URL_PREFIX + host + ":" + port + "/", username, password)) {
       properties = connection.getServerProperties();
       AGGREGRATE_TIME_LIST.addAll(properties.getSupportedTimeAggregationOperations());
       timestampPrecision = properties.getTimestampPrecision();
@@ -166,8 +168,9 @@ public class WinCli extends AbstractCli {
         }
       }
     } catch (SQLException e) {
-      println(String
-          .format("%s> %s Host is %s, port is %s.", IOTDB_CLI_PREFIX, e.getMessage(), host, port));
+      println(
+          String.format(
+              "%s> %s Host is %s, port is %s.", IOTDB_CLI_PREFIX, e.getMessage(), host, port));
     }
   }
 }
diff --git a/cli/src/main/java/org/apache/iotdb/cli/utils/IoTPrinter.java b/cli/src/main/java/org/apache/iotdb/cli/utils/IoTPrinter.java
index 89b939b..fb1d044 100644
--- a/cli/src/main/java/org/apache/iotdb/cli/utils/IoTPrinter.java
+++ b/cli/src/main/java/org/apache/iotdb/cli/utils/IoTPrinter.java
@@ -95,13 +95,11 @@ public class IoTPrinter {
     return sb;
   }
 
-  /**
-   * compute the number of Chinese characters included in the String
-   */
+  /** compute the number of Chinese characters included in the String */
   public static int computeHANCount(String s) {
-    return (int) s.codePoints()
-        .filter(codePoint -> UnicodeScript.of(codePoint) == UnicodeScript.HAN)
-        .count();
+    return (int)
+        s.codePoints()
+            .filter(codePoint -> UnicodeScript.of(codePoint) == UnicodeScript.HAN)
+            .count();
   }
-
 }
diff --git a/cli/src/main/java/org/apache/iotdb/exception/ArgsErrorException.java b/cli/src/main/java/org/apache/iotdb/exception/ArgsErrorException.java
index 848e7eb..eabba3b 100644
--- a/cli/src/main/java/org/apache/iotdb/exception/ArgsErrorException.java
+++ b/cli/src/main/java/org/apache/iotdb/exception/ArgsErrorException.java
@@ -25,5 +25,4 @@ public class ArgsErrorException extends Exception {
   public ArgsErrorException(String msg) {
     super(msg);
   }
-
 }
diff --git a/cli/src/main/java/org/apache/iotdb/tool/AbstractCsvTool.java b/cli/src/main/java/org/apache/iotdb/tool/AbstractCsvTool.java
index 7b50924..83a25e6 100644
--- a/cli/src/main/java/org/apache/iotdb/tool/AbstractCsvTool.java
+++ b/cli/src/main/java/org/apache/iotdb/tool/AbstractCsvTool.java
@@ -51,18 +51,37 @@ public abstract class AbstractCsvTool {
   protected static final String TIME_ZONE_ARGS = "tz";
   protected static final String TIME_ZONE_NAME = "timeZone";
   protected static final int MAX_HELP_CONSOLE_WIDTH = 92;
-  protected static final String[] SUPPORT_TIME_FORMAT = new String[]{"default",
-      "long",
-      "number", "timestamp", "yyyy-MM-dd HH:mm:ss", "yyyy/MM/dd HH:mm:ss", "yyyy.MM.dd HH:mm:ss",
-      "yyyy-MM-dd'T'HH:mm:ss", "yyyy/MM/dd'T'HH:mm:ss", "yyyy.MM.dd'T'HH:mm:ss",
-      "yyyy-MM-dd HH:mm:ssZZ",
-      "yyyy/MM/dd HH:mm:ssZZ", "yyyy.MM.dd HH:mm:ssZZ", "yyyy-MM-dd'T'HH:mm:ssZZ",
-      "yyyy/MM/dd'T'HH:mm:ssZZ",
-      "yyyy.MM.dd'T'HH:mm:ssZZ", "yyyy/MM/dd HH:mm:ss.SSS", "yyyy-MM-dd HH:mm:ss.SSS",
-      "yyyy.MM.dd HH:mm:ss.SSS",
-      "yyyy/MM/dd'T'HH:mm:ss.SSS", "yyyy-MM-dd'T'HH:mm:ss.SSS", "yyyy-MM-dd'T'HH:mm:ss.SSS",
-      "yyyy.MM.dd'T'HH:mm:ss.SSS", "yyyy-MM-dd HH:mm:ss.SSSZZ", "yyyy/MM/dd HH:mm:ss.SSSZZ",
-      "yyyy.MM.dd HH:mm:ss.SSSZZ", "yyyy-MM-dd'T'HH:mm:ss.SSSZZ", "yyyy/MM/dd'T'HH:mm:ss.SSSZZ",};
+  protected static final String[] SUPPORT_TIME_FORMAT =
+      new String[] {
+        "default",
+        "long",
+        "number",
+        "timestamp",
+        "yyyy-MM-dd HH:mm:ss",
+        "yyyy/MM/dd HH:mm:ss",
+        "yyyy.MM.dd HH:mm:ss",
+        "yyyy-MM-dd'T'HH:mm:ss",
+        "yyyy/MM/dd'T'HH:mm:ss",
+        "yyyy.MM.dd'T'HH:mm:ss",
+        "yyyy-MM-dd HH:mm:ssZZ",
+        "yyyy/MM/dd HH:mm:ssZZ",
+        "yyyy.MM.dd HH:mm:ssZZ",
+        "yyyy-MM-dd'T'HH:mm:ssZZ",
+        "yyyy/MM/dd'T'HH:mm:ssZZ",
+        "yyyy.MM.dd'T'HH:mm:ssZZ",
+        "yyyy/MM/dd HH:mm:ss.SSS",
+        "yyyy-MM-dd HH:mm:ss.SSS",
+        "yyyy.MM.dd HH:mm:ss.SSS",
+        "yyyy/MM/dd'T'HH:mm:ss.SSS",
+        "yyyy-MM-dd'T'HH:mm:ss.SSS",
+        "yyyy-MM-dd'T'HH:mm:ss.SSS",
+        "yyyy.MM.dd'T'HH:mm:ss.SSS",
+        "yyyy-MM-dd HH:mm:ss.SSSZZ",
+        "yyyy/MM/dd HH:mm:ss.SSSZZ",
+        "yyyy.MM.dd HH:mm:ss.SSSZZ",
+        "yyyy-MM-dd'T'HH:mm:ss.SSSZZ",
+        "yyyy/MM/dd'T'HH:mm:ss.SSSZZ",
+      };
   protected static String host;
   protected static String port;
   protected static String username;
@@ -74,7 +93,7 @@ public abstract class AbstractCsvTool {
   protected static Session session;
 
   AbstractCsvTool() {}
-  
+
   protected static String checkRequiredArg(String arg, String name, CommandLine commandLine)
       throws ArgsErrorException {
     String str = commandLine.getOptionValue(arg);
@@ -112,31 +131,54 @@ public abstract class AbstractCsvTool {
         return true;
       }
     }
-    System.out.printf("Input time format %s is not supported, "
-        + "please input like yyyy-MM-dd\\ HH:mm:ss.SSS or yyyy-MM-dd'T'HH:mm:ss.SSS%n", timeFormat);
+    System.out.printf(
+        "Input time format %s is not supported, "
+            + "please input like yyyy-MM-dd\\ HH:mm:ss.SSS or yyyy-MM-dd'T'HH:mm:ss.SSS%n",
+        timeFormat);
     return false;
   }
 
   protected static Options createNewOptions() {
     Options options = new Options();
 
-    Option opHost = Option.builder(HOST_ARGS).longOpt(HOST_NAME).required().argName(HOST_NAME)
-        .hasArg()
-        .desc("Host Name (required)").build();
+    Option opHost =
+        Option.builder(HOST_ARGS)
+            .longOpt(HOST_NAME)
+            .required()
+            .argName(HOST_NAME)
+            .hasArg()
+            .desc("Host Name (required)")
+            .build();
     options.addOption(opHost);
 
-    Option opPort = Option.builder(PORT_ARGS).longOpt(PORT_NAME).required().argName(PORT_NAME)
-        .hasArg()
-        .desc("Port (required)").build();
+    Option opPort =
+        Option.builder(PORT_ARGS)
+            .longOpt(PORT_NAME)
+            .required()
+            .argName(PORT_NAME)
+            .hasArg()
+            .desc("Port (required)")
+            .build();
     options.addOption(opPort);
 
-    Option opUsername = Option.builder(USERNAME_ARGS).longOpt(USERNAME_NAME).required()
-        .argName(USERNAME_NAME)
-        .hasArg().desc("Username (required)").build();
+    Option opUsername =
+        Option.builder(USERNAME_ARGS)
+            .longOpt(USERNAME_NAME)
+            .required()
+            .argName(USERNAME_NAME)
+            .hasArg()
+            .desc("Username (required)")
+            .build();
     options.addOption(opUsername);
 
-    Option opPassword = Option.builder(PASSWORD_ARGS).longOpt(PASSWORD_NAME).optionalArg(true)
-        .argName(PASSWORD_NAME).hasArg().desc("Password (optional)").build();
+    Option opPassword =
+        Option.builder(PASSWORD_ARGS)
+            .longOpt(PASSWORD_NAME)
+            .optionalArg(true)
+            .argName(PASSWORD_NAME)
+            .hasArg()
+            .desc("Password (optional)")
+            .build();
     options.addOption(opPassword);
     return options;
   }
diff --git a/cli/src/main/java/org/apache/iotdb/tool/ExportCsv.java b/cli/src/main/java/org/apache/iotdb/tool/ExportCsv.java
index 7cf5d78..2d6b855 100644
--- a/cli/src/main/java/org/apache/iotdb/tool/ExportCsv.java
+++ b/cli/src/main/java/org/apache/iotdb/tool/ExportCsv.java
@@ -72,9 +72,7 @@ public class ExportCsv extends AbstractCsvTool {
 
   private static final int EXPORT_PER_LINE_COUNT = 10000;
 
-  /**
-   * main function of export csv tool.
-   */
+  /** main function of export csv tool. */
   public static void main(String[] args) throws IOException {
     Options options = createOptions();
     HelpFormatter hf = new HelpFormatter();
@@ -137,15 +135,14 @@ public class ExportCsv extends AbstractCsvTool {
         try {
           session.close();
         } catch (IoTDBConnectionException e) {
-          System.out
-              .println("Encounter an error when closing session, error is: " + e.getMessage());
+          System.out.println(
+              "Encounter an error when closing session, error is: " + e.getMessage());
         }
       }
     }
   }
 
-  private static void parseSpecialParams(CommandLine commandLine)
-      throws ArgsErrorException {
+  private static void parseSpecialParams(CommandLine commandLine) throws ArgsErrorException {
     targetDirectory = checkRequiredArg(TARGET_DIR_ARGS, TARGET_DIR_NAME, commandLine);
     targetFile = commandLine.getOptionValue(TARGET_FILE_ARGS);
     if (targetFile == null) {
@@ -169,33 +166,56 @@ public class ExportCsv extends AbstractCsvTool {
   private static Options createOptions() {
     Options options = createNewOptions();
 
-    Option opTargetFile = Option.builder(TARGET_DIR_ARGS).required().argName(TARGET_DIR_NAME)
-        .hasArg()
-        .desc("Target File Directory (required)").build();
+    Option opTargetFile =
+        Option.builder(TARGET_DIR_ARGS)
+            .required()
+            .argName(TARGET_DIR_NAME)
+            .hasArg()
+            .desc("Target File Directory (required)")
+            .build();
     options.addOption(opTargetFile);
 
-    Option targetFileName = Option.builder(TARGET_FILE_ARGS).argName(TARGET_FILE_NAME).hasArg()
-        .desc("Export file name (optional)").build();
+    Option targetFileName =
+        Option.builder(TARGET_FILE_ARGS)
+            .argName(TARGET_FILE_NAME)
+            .hasArg()
+            .desc("Export file name (optional)")
+            .build();
     options.addOption(targetFileName);
 
-    Option opSqlFile = Option.builder(SQL_FILE_ARGS).argName(SQL_FILE_NAME).hasArg()
-        .desc("SQL File Path (optional)").build();
+    Option opSqlFile =
+        Option.builder(SQL_FILE_ARGS)
+            .argName(SQL_FILE_NAME)
+            .hasArg()
+            .desc("SQL File Path (optional)")
+            .build();
     options.addOption(opSqlFile);
 
-    Option opTimeFormat = Option.builder(TIME_FORMAT_ARGS).argName(TIME_FORMAT_NAME).hasArg()
-        .desc("Output time Format in csv file. "
-            + "You can choose 1) timestamp, number, long 2) ISO8601, default 3) "
-            + "user-defined pattern like yyyy-MM-dd\\ HH:mm:ss, default ISO8601 (optional)")
-        .build();
+    Option opTimeFormat =
+        Option.builder(TIME_FORMAT_ARGS)
+            .argName(TIME_FORMAT_NAME)
+            .hasArg()
+            .desc(
+                "Output time Format in csv file. "
+                    + "You can choose 1) timestamp, number, long 2) ISO8601, default 3) "
+                    + "user-defined pattern like yyyy-MM-dd\\ HH:mm:ss, default ISO8601 (optional)")
+            .build();
     options.addOption(opTimeFormat);
 
-    Option opTimeZone = Option.builder(TIME_ZONE_ARGS).argName(TIME_ZONE_NAME).hasArg()
-        .desc("Time Zone eg. +08:00 or -01:00 (optional)").build();
+    Option opTimeZone =
+        Option.builder(TIME_ZONE_ARGS)
+            .argName(TIME_ZONE_NAME)
+            .hasArg()
+            .desc("Time Zone eg. +08:00 or -01:00 (optional)")
+            .build();
     options.addOption(opTimeZone);
 
-    Option opHelp = Option.builder(HELP_ARGS).longOpt(HELP_ARGS).hasArg(false)
-        .desc("Display help information")
-        .build();
+    Option opHelp =
+        Option.builder(HELP_ARGS)
+            .longOpt(HELP_ARGS)
+            .hasArg(false)
+            .desc("Display help information")
+            .build();
     options.addOption(opHelp);
 
     return options;
@@ -215,7 +235,7 @@ public class ExportCsv extends AbstractCsvTool {
   /**
    * Dump files from database to CSV file.
    *
-   * @param sql   export the result of executing the sql
+   * @param sql export the result of executing the sql
    * @param index use to create dump file name
    */
   private static void dumpResult(String sql, int index) {
@@ -239,10 +259,10 @@ public class ExportCsv extends AbstractCsvTool {
       writeMetadata(bw, sessionDataSet.getColumnNames());
 
       int line = writeResultSet(sessionDataSet, bw);
-      System.out
-          .printf("Statement [%s] has dumped to file %s successfully! It costs "
-                  + "%dms to export %d lines.%n", sql, path, System.currentTimeMillis() - startTime,
-              line);
+      System.out.printf(
+          "Statement [%s] has dumped to file %s successfully! It costs "
+              + "%dms to export %d lines.%n",
+          sql, path, System.currentTimeMillis() - startTime, line);
     } catch (IOException | StatementExecutionException | IoTDBConnectionException e) {
       System.out.println("Cannot dump result because: " + e.getMessage());
     }
@@ -283,9 +303,9 @@ public class ExportCsv extends AbstractCsvTool {
     String timestampPrecision = "ms";
     switch (timeFormat) {
       case "default":
-        String str = RpcUtils
-            .parseLongToDateWithPrecision(DateTimeFormatter.ISO_OFFSET_DATE_TIME, time, zoneId,
-                timestampPrecision);
+        String str =
+            RpcUtils.parseLongToDateWithPrecision(
+                DateTimeFormatter.ISO_OFFSET_DATE_TIME, time, zoneId, timestampPrecision);
         bw.write(str + ",");
         break;
       case "timestamp":
@@ -294,8 +314,7 @@ public class ExportCsv extends AbstractCsvTool {
         bw.write(time + ",");
         break;
       default:
-        dateTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(time),
-            zoneId);
+        dateTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(time), zoneId);
         bw.write(dateTime.format(DateTimeFormatter.ofPattern(timeFormat)) + ",");
         break;
     }
diff --git a/cli/src/main/java/org/apache/iotdb/tool/ImportCsv.java b/cli/src/main/java/org/apache/iotdb/tool/ImportCsv.java
index 91af958..41667bb 100644
--- a/cli/src/main/java/org/apache/iotdb/tool/ImportCsv.java
+++ b/cli/src/main/java/org/apache/iotdb/tool/ImportCsv.java
@@ -47,9 +47,7 @@ import org.apache.iotdb.tsfile.common.constant.TsFileConstant;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-/**
- * read a CSV formatted data File and insert all the data into IoTDB.
- */
+/** read a CSV formatted data File and insert all the data into IoTDB. */
 public class ImportCsv extends AbstractCsvTool {
 
   private static final String FILE_ARGS = "f";
@@ -61,7 +59,8 @@ public class ImportCsv extends AbstractCsvTool {
   private static final String TSFILEDB_CLI_PREFIX = "ImportCsv";
   private static final String ILLEGAL_PATH_ARGUMENT = "Path parameter is null";
 
-  // put these variable in here, because sonar fails.  have to extract some code into a function. nextNode method.
+  // put these variable in here, because sonar fails.  have to extract some code into a function.
+  // nextNode method.
   private static int i;
   private static int startIndex;
 
@@ -73,27 +72,37 @@ public class ImportCsv extends AbstractCsvTool {
   private static Options createOptions() {
     Options options = createNewOptions();
 
-    Option opFile = Option.builder(FILE_ARGS).required().argName(FILE_NAME).hasArg().desc(
-        "If input a file path, load a csv file, "
-            + "otherwise load all csv file under this directory (required)")
-        .build();
+    Option opFile =
+        Option.builder(FILE_ARGS)
+            .required()
+            .argName(FILE_NAME)
+            .hasArg()
+            .desc(
+                "If input a file path, load a csv file, "
+                    + "otherwise load all csv file under this directory (required)")
+            .build();
     options.addOption(opFile);
 
-    Option opHelp = Option.builder(HELP_ARGS).longOpt(HELP_ARGS)
-        .hasArg(false).desc("Display help information")
-        .build();
+    Option opHelp =
+        Option.builder(HELP_ARGS)
+            .longOpt(HELP_ARGS)
+            .hasArg(false)
+            .desc("Display help information")
+            .build();
     options.addOption(opHelp);
 
-    Option opTimeZone = Option.builder(TIME_ZONE_ARGS).argName(TIME_ZONE_NAME).hasArg()
-        .desc("Time Zone eg. +08:00 or -01:00 (optional)").build();
+    Option opTimeZone =
+        Option.builder(TIME_ZONE_ARGS)
+            .argName(TIME_ZONE_NAME)
+            .hasArg()
+            .desc("Time Zone eg. +08:00 or -01:00 (optional)")
+            .build();
     options.addOption(opTimeZone);
 
     return options;
   }
 
-  /**
-   * Data from csv To tsfile.
-   */
+  /** Data from csv To tsfile. */
   private static void loadDataFromCSV(File file) {
     int fileLine;
     try {
@@ -136,8 +145,7 @@ public class ImportCsv extends AbstractCsvTool {
           timeFormatter = formatterInit(cols[0]);
           useFormatter = (timeFormatter != null);
         }
-        for (Entry<String, List<Integer>> deviceToPositions : devicesToPositions
-            .entrySet()) {
+        for (Entry<String, List<Integer>> deviceToPositions : devicesToPositions.entrySet()) {
           String device = deviceToPositions.getKey();
           devices.add(device);
 
@@ -180,7 +188,6 @@ public class ImportCsv extends AbstractCsvTool {
     }
   }
 
-
   public static void main(String[] args) throws IOException {
     Options options = createOptions();
     HelpFormatter hf = new HelpFormatter();
@@ -233,9 +240,11 @@ public class ImportCsv extends AbstractCsvTool {
       } else {
         return Long.parseLong(str);
       }
-    } catch (Exception e){
-      throw new IllegalArgumentException("Input time format " + str
-          + "error. Input like yyyy-MM-dd HH:mm:ss, yyyy-MM-ddTHH:mm:ss or yyyy-MM-ddTHH:mm:ss.SSSZ");
+    } catch (Exception e) {
+      throw new IllegalArgumentException(
+          "Input time format "
+              + str
+              + "error. Input like yyyy-MM-dd HH:mm:ss, yyyy-MM-ddTHH:mm:ss or yyyy-MM-ddTHH:mm:ss.SSSZ");
     }
   }
 
@@ -278,9 +287,8 @@ public class ImportCsv extends AbstractCsvTool {
     timeZoneID = commandLine.getOptionValue(TIME_ZONE_ARGS);
   }
 
-  public static void importCsvFromFile(String ip, String port, String username,
-      String password, String filename,
-      String timeZone) {
+  public static void importCsvFromFile(
+      String ip, String port, String username, String password, String filename, String timeZone) {
     try {
       session = new Session(ip, Integer.parseInt(port), username, password);
       session.open(false);
@@ -296,15 +304,15 @@ public class ImportCsv extends AbstractCsvTool {
     } catch (IoTDBConnectionException e) {
       System.out.println("Encounter an error when connecting to server, because " + e.getMessage());
     } catch (StatementExecutionException e) {
-      System.out
-          .println("Encounter an error when executing the statement, because " + e.getMessage());
+      System.out.println(
+          "Encounter an error when executing the statement, because " + e.getMessage());
     } finally {
       if (session != null) {
         try {
           session.close();
         } catch (IoTDBConnectionException e) {
-          System.out
-              .println("Encounter an error when closing the connection, because " + e.getMessage());
+          System.out.println(
+              "Encounter an error when closing the connection, because " + e.getMessage());
         }
       }
     }
@@ -314,8 +322,8 @@ public class ImportCsv extends AbstractCsvTool {
     if (file.getName().endsWith(FILE_SUFFIX)) {
       loadDataFromCSV(file);
     } else {
-      System.out
-          .println("File " + file.getName() + "  should ends with '.csv' if you want to import");
+      System.out.println(
+          "File " + file.getName() + "  should ends with '.csv' if you want to import");
     }
   }
 
@@ -330,8 +338,8 @@ public class ImportCsv extends AbstractCsvTool {
         if (subFile.getName().endsWith(FILE_SUFFIX)) {
           loadDataFromCSV(subFile);
         } else {
-          System.out
-              .println("File " + file.getName() + " should ends with '.csv' if you want to import");
+          System.out.println(
+              "File " + file.getName() + " should ends with '.csv' if you want to import");
         }
       }
     }
@@ -341,7 +349,8 @@ public class ImportCsv extends AbstractCsvTool {
     int line;
     try (LineNumberReader count = new LineNumberReader(new FileReader(file))) {
       while (count.skip(Long.MAX_VALUE) > 0) {
-        // Loop just in case the file is > Long.MAX_VALUE or skip() decides to not read the entire file
+        // Loop just in case the file is > Long.MAX_VALUE or skip() decides to not read the entire
+        // file
       }
       // +1 because line index starts at 0
       line = count.getLineNumber() + 1;
@@ -349,7 +358,11 @@ public class ImportCsv extends AbstractCsvTool {
     return line;
   }
 
-  private static void splitColToDeviceAndMeasurement(String col, Map<String, List<Integer>> devicesToPositions, Map<String, List<String>> devicesToMeasurements, int position) {
+  private static void splitColToDeviceAndMeasurement(
+      String col,
+      Map<String, List<Integer>> devicesToPositions,
+      Map<String, List<String>> devicesToMeasurements,
+      int position) {
     if (col.length() > 0) {
       if (col.charAt(col.length() - 1) == TsFileConstant.DOUBLE_QUOTE) {
         int endIndex = col.lastIndexOf('"', col.length() - 2);
@@ -358,8 +371,12 @@ public class ImportCsv extends AbstractCsvTool {
           endIndex = col.lastIndexOf('"', endIndex - 2);
         }
         if (endIndex != -1 && (endIndex == 0 || col.charAt(endIndex - 1) == '.')) {
-          putDeviceAndMeasurement(col.substring(0, endIndex - 1), col.substring(endIndex),
-              devicesToPositions, devicesToMeasurements, position);
+          putDeviceAndMeasurement(
+              col.substring(0, endIndex - 1),
+              col.substring(endIndex),
+              devicesToPositions,
+              devicesToMeasurements,
+              position);
         } else {
           throw new IllegalArgumentException(ILLEGAL_PATH_ARGUMENT);
         }
@@ -369,8 +386,12 @@ public class ImportCsv extends AbstractCsvTool {
         if (endIndex < 0) {
           putDeviceAndMeasurement("", col, devicesToPositions, devicesToMeasurements, position);
         } else {
-          putDeviceAndMeasurement(col.substring(0, endIndex), col.substring(endIndex + 1),
-              devicesToPositions, devicesToMeasurements, position);
+          putDeviceAndMeasurement(
+              col.substring(0, endIndex),
+              col.substring(endIndex + 1),
+              devicesToPositions,
+              devicesToMeasurements,
+              position);
         }
       } else {
         throw new IllegalArgumentException(ILLEGAL_PATH_ARGUMENT);
@@ -380,7 +401,12 @@ public class ImportCsv extends AbstractCsvTool {
     }
   }
 
-  private static void putDeviceAndMeasurement(String device, String measurement, Map<String, List<Integer>> devicesToPositions, Map<String, List<String>> devicesToMeasurements, int position) {
+  private static void putDeviceAndMeasurement(
+      String device,
+      String measurement,
+      Map<String, List<Integer>> devicesToPositions,
+      Map<String, List<String>> devicesToMeasurements,
+      int position) {
     if (devicesToMeasurements.get(device) == null && devicesToPositions.get(device) == null) {
       List<String> measurements = new ArrayList<>();
       measurements.add(measurement);
@@ -427,5 +453,4 @@ public class ImportCsv extends AbstractCsvTool {
       throw new IllegalArgumentException("Illegal csv line" + path);
     }
   }
-
 }
diff --git a/cli/src/test/java/org/apache/iotdb/cli/AbstractCliIT.java b/cli/src/test/java/org/apache/iotdb/cli/AbstractCliIT.java
index 1998c94..147e81c 100644
--- a/cli/src/test/java/org/apache/iotdb/cli/AbstractCliIT.java
+++ b/cli/src/test/java/org/apache/iotdb/cli/AbstractCliIT.java
@@ -31,7 +31,6 @@ import org.apache.iotdb.cli.AbstractCli.OperationResult;
 import org.apache.iotdb.exception.ArgsErrorException;
 import org.apache.iotdb.jdbc.IoTDBConnection;
 import org.apache.iotdb.jdbc.IoTDBDatabaseMetadata;
-import org.apache.iotdb.rpc.RpcUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -42,11 +41,9 @@ import org.slf4j.LoggerFactory;
 
 public class AbstractCliIT {
   private static Logger logger = LoggerFactory.getLogger(AbstractCliIT.class);
-  @Mock
-  private IoTDBConnection connection;
+  @Mock private IoTDBConnection connection;
 
-  @Mock
-  private IoTDBDatabaseMetadata databaseMetadata;
+  @Mock private IoTDBDatabaseMetadata databaseMetadata;
 
   @Before
   public void setUp() throws Exception {
@@ -56,16 +53,20 @@ public class AbstractCliIT {
   }
 
   @After
-  public void tearDown() throws Exception {
-  }
+  public void tearDown() throws Exception {}
 
   @Test
   public void testInit() {
     AbstractCli.init();
-    String[] keywords = {AbstractCli.HOST_ARGS, AbstractCli.HELP_ARGS,
-        AbstractCli.PORT_ARGS,
-        AbstractCli.PASSWORD_ARGS, AbstractCli.USERNAME_ARGS, AbstractCli.ISO8601_ARGS,
-        AbstractCli.MAX_PRINT_ROW_COUNT_ARGS,};
+    String[] keywords = {
+      AbstractCli.HOST_ARGS,
+      AbstractCli.HELP_ARGS,
+      AbstractCli.PORT_ARGS,
+      AbstractCli.PASSWORD_ARGS,
+      AbstractCli.USERNAME_ARGS,
+      AbstractCli.ISO8601_ARGS,
+      AbstractCli.MAX_PRINT_ROW_COUNT_ARGS,
+    };
     for (String keyword : keywords) {
       if (!AbstractCli.keywordSet.contains("-" + keyword)) {
         logger.error(keyword);
@@ -78,30 +79,33 @@ public class AbstractCliIT {
   public void testCheckRequiredArg() throws ParseException, ArgsErrorException {
     Options options = AbstractCli.createOptions();
     CommandLineParser parser = new DefaultParser();
-    String[] args = new String[]{"-u", "user1"};
+    String[] args = new String[] {"-u", "user1"};
     CommandLine commandLine = parser.parse(options, args);
-    String str = AbstractCli
-        .checkRequiredArg(AbstractCli.USERNAME_ARGS, AbstractCli.USERNAME_NAME,
-            commandLine, true, "root");
+    String str =
+        AbstractCli.checkRequiredArg(
+            AbstractCli.USERNAME_ARGS, AbstractCli.USERNAME_NAME, commandLine, true, "root");
     assertEquals("user1", str);
 
-    args = new String[]{"-u", "root",};
+    args =
+        new String[] {
+          "-u", "root",
+        };
     commandLine = parser.parse(options, args);
-    str = AbstractCli
-        .checkRequiredArg(AbstractCli.HOST_ARGS, AbstractCli.HOST_NAME, commandLine, false,
-            "127.0.0.1");
+    str =
+        AbstractCli.checkRequiredArg(
+            AbstractCli.HOST_ARGS, AbstractCli.HOST_NAME, commandLine, false, "127.0.0.1");
     assertEquals("127.0.0.1", str);
     try {
-      str = AbstractCli
-          .checkRequiredArg(AbstractCli.HOST_ARGS, AbstractCli.HOST_NAME, commandLine, true,
-              "127.0.0.1");
+      str =
+          AbstractCli.checkRequiredArg(
+              AbstractCli.HOST_ARGS, AbstractCli.HOST_NAME, commandLine, true, "127.0.0.1");
     } catch (ArgsErrorException e) {
       assertEquals("IoTDB: Required values for option 'host' not provided", e.getMessage());
     }
     try {
-      str = AbstractCli
-          .checkRequiredArg(AbstractCli.HOST_ARGS, AbstractCli.HOST_NAME, commandLine,
-              false, null);
+      str =
+          AbstractCli.checkRequiredArg(
+              AbstractCli.HOST_ARGS, AbstractCli.HOST_NAME, commandLine, false, null);
     } catch (ArgsErrorException e) {
       assertEquals("IoTDB: Required values for option 'host' is null.", e.getMessage());
     }
@@ -110,28 +114,28 @@ public class AbstractCliIT {
   @Test
   public void testRemovePasswordArgs() {
     AbstractCli.init();
-    String[] input = new String[]{"-h", "127.0.0.1", "-p", "6667", "-u", "root", "-pw", "root"};
-    String[] res = new String[]{"-h", "127.0.0.1", "-p", "6667", "-u", "root", "-pw", "root"};
+    String[] input = new String[] {"-h", "127.0.0.1", "-p", "6667", "-u", "root", "-pw", "root"};
+    String[] res = new String[] {"-h", "127.0.0.1", "-p", "6667", "-u", "root", "-pw", "root"};
     isTwoStringArrayEqual(res, AbstractCli.removePasswordArgs(input));
 
-    input = new String[]{"-h", "127.0.0.1", "-p", "6667", "-pw", "root", "-u", "root"};
-    res = new String[]{"-h", "127.0.0.1", "-p", "6667", "-pw", "root", "-u", "root"};
+    input = new String[] {"-h", "127.0.0.1", "-p", "6667", "-pw", "root", "-u", "root"};
+    res = new String[] {"-h", "127.0.0.1", "-p", "6667", "-pw", "root", "-u", "root"};
     isTwoStringArrayEqual(res, AbstractCli.removePasswordArgs(input));
 
-    input = new String[]{"-h", "127.0.0.1", "-p", "6667", "root", "-u", "root", "-pw"};
-    res = new String[]{"-h", "127.0.0.1", "-p", "6667", "root", "-u", "root"};
+    input = new String[] {"-h", "127.0.0.1", "-p", "6667", "root", "-u", "root", "-pw"};
+    res = new String[] {"-h", "127.0.0.1", "-p", "6667", "root", "-u", "root"};
     isTwoStringArrayEqual(res, AbstractCli.removePasswordArgs(input));
 
-    input = new String[]{"-h", "127.0.0.1", "-p", "6667", "-pw", "-u", "root"};
-    res = new String[]{"-h", "127.0.0.1", "-p", "6667", "-u", "root"};
+    input = new String[] {"-h", "127.0.0.1", "-p", "6667", "-pw", "-u", "root"};
+    res = new String[] {"-h", "127.0.0.1", "-p", "6667", "-u", "root"};
     isTwoStringArrayEqual(res, AbstractCli.removePasswordArgs(input));
 
-    input = new String[]{"-pw", "-h", "127.0.0.1", "-p", "6667", "root", "-u", "root"};
-    res = new String[]{"-h", "127.0.0.1", "-p", "6667", "root", "-u", "root"};
+    input = new String[] {"-pw", "-h", "127.0.0.1", "-p", "6667", "root", "-u", "root"};
+    res = new String[] {"-h", "127.0.0.1", "-p", "6667", "root", "-u", "root"};
     isTwoStringArrayEqual(res, AbstractCli.removePasswordArgs(input));
 
-    input = new String[]{};
-    res = new String[]{};
+    input = new String[] {};
+    res = new String[] {};
     isTwoStringArrayEqual(res, AbstractCli.removePasswordArgs(input));
   }
 
@@ -143,41 +147,64 @@ public class AbstractCliIT {
 
   @Test
   public void testHandleInputInputCmd() {
-    assertEquals(OperationResult.STOP_OPER, AbstractCli
-        .handleInputCmd(AbstractCli.EXIT_COMMAND, connection));
-    assertEquals(OperationResult.STOP_OPER, AbstractCli
-        .handleInputCmd(AbstractCli.QUIT_COMMAND, connection));
-    assertEquals(OperationResult.CONTINUE_OPER, AbstractCli
-        .handleInputCmd(String.format("%s=", AbstractCli.SET_TIMESTAMP_DISPLAY), connection));
-    assertEquals(OperationResult.CONTINUE_OPER, AbstractCli
-        .handleInputCmd(String.format("%s=xxx", AbstractCli.SET_TIMESTAMP_DISPLAY), connection));
-    assertEquals(OperationResult.CONTINUE_OPER, AbstractCli
-        .handleInputCmd(String.format("%s=default", AbstractCli.SET_TIMESTAMP_DISPLAY), connection));
-
-    assertEquals(OperationResult.CONTINUE_OPER, AbstractCli
-        .handleInputCmd(String.format("%s=", AbstractCli.SET_MAX_DISPLAY_NUM), connection));
-    assertEquals(OperationResult.CONTINUE_OPER, AbstractCli
-        .handleInputCmd(String.format("%s=xxx", AbstractCli.SET_MAX_DISPLAY_NUM),connection));
-    assertEquals(OperationResult.CONTINUE_OPER, AbstractCli
-        .handleInputCmd(String.format("%s=1", AbstractCli.SET_MAX_DISPLAY_NUM), connection));
+    assertEquals(
+        OperationResult.STOP_OPER,
+        AbstractCli.handleInputCmd(AbstractCli.EXIT_COMMAND, connection));
+    assertEquals(
+        OperationResult.STOP_OPER,
+        AbstractCli.handleInputCmd(AbstractCli.QUIT_COMMAND, connection));
+    assertEquals(
+        OperationResult.CONTINUE_OPER,
+        AbstractCli.handleInputCmd(
+            String.format("%s=", AbstractCli.SET_TIMESTAMP_DISPLAY), connection));
+    assertEquals(
+        OperationResult.CONTINUE_OPER,
+        AbstractCli.handleInputCmd(
+            String.format("%s=xxx", AbstractCli.SET_TIMESTAMP_DISPLAY), connection));
+    assertEquals(
+        OperationResult.CONTINUE_OPER,
+        AbstractCli.handleInputCmd(
+            String.format("%s=default", AbstractCli.SET_TIMESTAMP_DISPLAY), connection));
+
+    assertEquals(
+        OperationResult.CONTINUE_OPER,
+        AbstractCli.handleInputCmd(
+            String.format("%s=", AbstractCli.SET_MAX_DISPLAY_NUM), connection));
+    assertEquals(
+        OperationResult.CONTINUE_OPER,
+        AbstractCli.handleInputCmd(
+            String.format("%s=xxx", AbstractCli.SET_MAX_DISPLAY_NUM), connection));
+    assertEquals(
+        OperationResult.CONTINUE_OPER,
+        AbstractCli.handleInputCmd(
+            String.format("%s=1", AbstractCli.SET_MAX_DISPLAY_NUM), connection));
     testSetMaxDisplayNumber();
 
-    assertEquals(OperationResult.CONTINUE_OPER, AbstractCli
-        .handleInputCmd(AbstractCli.SHOW_TIMEZONE, connection));
-    assertEquals(OperationResult.CONTINUE_OPER, AbstractCli
-        .handleInputCmd(AbstractCli.SHOW_TIMESTAMP_DISPLAY, connection));
-    assertEquals(OperationResult.CONTINUE_OPER, AbstractCli
-        .handleInputCmd(AbstractCli.SHOW_FETCH_SIZE, connection));
-
-    assertEquals(OperationResult.CONTINUE_OPER, AbstractCli
-        .handleInputCmd(String.format("%s=", AbstractCli.SET_TIME_ZONE), connection));
-    assertEquals(OperationResult.CONTINUE_OPER, AbstractCli
-        .handleInputCmd(String.format("%s=+08:00", AbstractCli.SET_TIME_ZONE), connection));
-
-    assertEquals(OperationResult.CONTINUE_OPER, AbstractCli
-        .handleInputCmd(String.format("%s=", AbstractCli.SET_FETCH_SIZE), connection));
-    assertEquals(OperationResult.CONTINUE_OPER, AbstractCli
-        .handleInputCmd(String.format("%s=111", AbstractCli.SET_FETCH_SIZE), connection));
+    assertEquals(
+        OperationResult.CONTINUE_OPER,
+        AbstractCli.handleInputCmd(AbstractCli.SHOW_TIMEZONE, connection));
+    assertEquals(
+        OperationResult.CONTINUE_OPER,
+        AbstractCli.handleInputCmd(AbstractCli.SHOW_TIMESTAMP_DISPLAY, connection));
+    assertEquals(
+        OperationResult.CONTINUE_OPER,
+        AbstractCli.handleInputCmd(AbstractCli.SHOW_FETCH_SIZE, connection));
+
+    assertEquals(
+        OperationResult.CONTINUE_OPER,
+        AbstractCli.handleInputCmd(String.format("%s=", AbstractCli.SET_TIME_ZONE), connection));
+    assertEquals(
+        OperationResult.CONTINUE_OPER,
+        AbstractCli.handleInputCmd(
+            String.format("%s=+08:00", AbstractCli.SET_TIME_ZONE), connection));
+
+    assertEquals(
+        OperationResult.CONTINUE_OPER,
+        AbstractCli.handleInputCmd(String.format("%s=", AbstractCli.SET_FETCH_SIZE), connection));
+    assertEquals(
+        OperationResult.CONTINUE_OPER,
+        AbstractCli.handleInputCmd(
+            String.format("%s=111", AbstractCli.SET_FETCH_SIZE), connection));
   }
 
   private void testSetMaxDisplayNumber() {
diff --git a/cli/src/test/java/org/apache/iotdb/cli/AbstractScript.java b/cli/src/test/java/org/apache/iotdb/cli/AbstractScript.java
index 56810cb..0a32ee6 100644
--- a/cli/src/test/java/org/apache/iotdb/cli/AbstractScript.java
+++ b/cli/src/test/java/org/apache/iotdb/cli/AbstractScript.java
@@ -39,8 +39,9 @@ public abstract class AbstractScript {
         break;
       } else {
         // remove thing after "connection refused", only for test
-        if(line.contains("Connection refused")) {
-          line = line.substring(0, line.indexOf("Connection refused") + "Connection refused".length());
+        if (line.contains("Connection refused")) {
+          line =
+              line.substring(0, line.indexOf("Connection refused") + "Connection refused".length());
         }
         outputList.add(line);
       }
@@ -62,7 +63,7 @@ public abstract class AbstractScript {
     // This is usually always set by the JVM
 
     File userDir = new File(System.getProperty("user.dir"));
-    if(!userDir.exists()) {
+    if (!userDir.exists()) {
       throw new RuntimeException("user.dir " + userDir.getAbsolutePath() + " doesn't exist.");
     }
     File target = new File(userDir, "target/maven-archiver/pom.properties");
@@ -72,7 +73,12 @@ public abstract class AbstractScript {
     } catch (IOException e) {
       return "target/iotdb-cli-";
     }
-    return new File(userDir, String.format("target/%s-%s", properties.getProperty("artifactId"), properties.getProperty("version"))).getAbsolutePath();
+    return new File(
+            userDir,
+            String.format(
+                "target/%s-%s",
+                properties.getProperty("artifactId"), properties.getProperty("version")))
+        .getAbsolutePath();
   }
 
   protected abstract void testOnWindows() throws IOException;
diff --git a/cli/src/test/java/org/apache/iotdb/cli/StartClientScriptIT.java b/cli/src/test/java/org/apache/iotdb/cli/StartClientScriptIT.java
index eda8643..45a57b0 100644
--- a/cli/src/test/java/org/apache/iotdb/cli/StartClientScriptIT.java
+++ b/cli/src/test/java/org/apache/iotdb/cli/StartClientScriptIT.java
@@ -20,7 +20,6 @@ package org.apache.iotdb.cli;
 
 import java.io.File;
 import java.io.IOException;
-
 import org.apache.iotdb.db.utils.EnvironmentUtils;
 import org.junit.*;
 
@@ -51,18 +50,31 @@ public class StartClientScriptIT extends AbstractScript {
   protected void testOnWindows() throws IOException {
     String dir = getCliPath();
     final String[] output = {
-        "IoTDB> Connection Error, please check whether the network is available or the server has started. Host is 127.0.0.1, port is 6668."};
-    ProcessBuilder builder = new ProcessBuilder("cmd.exe", "/c",
-        dir + File.separator + "sbin" + File.separator + "start-cli.bat",
-        "-h",
-        "127.0.0.1", "-p", "6668", "-u", "root", "-pw", "root");
+      "IoTDB> Connection Error, please check whether the network is available or the server has started. Host is 127.0.0.1, port is 6668."
+    };
+    ProcessBuilder builder =
+        new ProcessBuilder(
+            "cmd.exe",
+            "/c",
+            dir + File.separator + "sbin" + File.separator + "start-cli.bat",
+            "-h",
+            "127.0.0.1",
+            "-p",
+            "6668",
+            "-u",
+            "root",
+            "-pw",
+            "root");
     testOutput(builder, output);
 
-    final String[] output2 = {
-        "Msg: The statement is executed successfully."};
-    ProcessBuilder builder2 = new ProcessBuilder("cmd.exe", "/c",
-        dir + File.separator + "sbin" + File.separator + "start-cli.bat",
-        "-e", "\"flush\"");
+    final String[] output2 = {"Msg: The statement is executed successfully."};
+    ProcessBuilder builder2 =
+        new ProcessBuilder(
+            "cmd.exe",
+            "/c",
+            dir + File.separator + "sbin" + File.separator + "start-cli.bat",
+            "-e",
+            "\"flush\"");
     testOutput(builder2, output2);
   }
 
@@ -70,18 +82,29 @@ public class StartClientScriptIT extends AbstractScript {
   protected void testOnUnix() throws IOException {
     String dir = getCliPath();
     final String[] output = {
-        "IoTDB> Connection Error, please check whether the network is available or the server has started. Host is 127.0.0.1, port is 6668."};
-    ProcessBuilder builder = new ProcessBuilder("sh",
-        dir + File.separator + "sbin" + File.separator + "start-cli.sh",
-        "-h",
-        "127.0.0.1", "-p", "6668", "-u", "root", "-pw", "root");
+      "IoTDB> Connection Error, please check whether the network is available or the server has started. Host is 127.0.0.1, port is 6668."
+    };
+    ProcessBuilder builder =
+        new ProcessBuilder(
+            "sh",
+            dir + File.separator + "sbin" + File.separator + "start-cli.sh",
+            "-h",
+            "127.0.0.1",
+            "-p",
+            "6668",
+            "-u",
+            "root",
+            "-pw",
+            "root");
     testOutput(builder, output);
 
-    final String[] output2 = {
-        "Msg: The statement is executed successfully."};
-    ProcessBuilder builder2 = new ProcessBuilder("sh",
-        dir + File.separator + "sbin" + File.separator + "start-cli.sh",
-        "-e", "\"flush\"");
+    final String[] output2 = {"Msg: The statement is executed successfully."};
+    ProcessBuilder builder2 =
+        new ProcessBuilder(
+            "sh",
+            dir + File.separator + "sbin" + File.separator + "start-cli.sh",
+            "-e",
+            "\"flush\"");
     testOutput(builder2, output2);
   }
 }
diff --git a/cli/src/test/java/org/apache/iotdb/tool/CsvLineSplitTest.java b/cli/src/test/java/org/apache/iotdb/tool/CsvLineSplitTest.java
index ec9011e..fd1c9ba 100644
--- a/cli/src/test/java/org/apache/iotdb/tool/CsvLineSplitTest.java
+++ b/cli/src/test/java/org/apache/iotdb/tool/CsvLineSplitTest.java
@@ -25,8 +25,9 @@ public class CsvLineSplitTest {
 
   @Test
   public void testSplit() {
-    Assert.assertArrayEquals(new String[]{"", "a", "b", "c", "\\\""}, ImportCsv.splitCsvLine(",a,b,c,\"\\\"\""));
-    Assert.assertArrayEquals(new String[]{"", "a", "b", "\\'"}, ImportCsv.splitCsvLine(",a,b,\"\\'\""));
+    Assert.assertArrayEquals(
+        new String[] {"", "a", "b", "c", "\\\""}, ImportCsv.splitCsvLine(",a,b,c,\"\\\"\""));
+    Assert.assertArrayEquals(
+        new String[] {"", "a", "b", "\\'"}, ImportCsv.splitCsvLine(",a,b,\"\\'\""));
   }
-
 }
diff --git a/cli/src/test/java/org/apache/iotdb/tool/ExportCsvTestIT.java b/cli/src/test/java/org/apache/iotdb/tool/ExportCsvTestIT.java
index 2673979..580d77b 100644
--- a/cli/src/test/java/org/apache/iotdb/tool/ExportCsvTestIT.java
+++ b/cli/src/test/java/org/apache/iotdb/tool/ExportCsvTestIT.java
@@ -25,18 +25,16 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
-public class ExportCsvTestIT extends AbstractScript{
+public class ExportCsvTestIT extends AbstractScript {
 
   @Before
-  public void setUp() {
-  }
+  public void setUp() {}
 
   @After
-  public void tearDown() {
-  }
+  public void tearDown() {}
 
   @Test
-  public void test() throws IOException{
+  public void test() throws IOException {
     String os = System.getProperty("os.name").toLowerCase();
     if (os.startsWith("windows")) {
       testOnWindows();
@@ -47,29 +45,56 @@ public class ExportCsvTestIT extends AbstractScript{
 
   @Override
   protected void testOnWindows() throws IOException {
-    final String[] output = {"````````````````````````````````````````````````",
-        "Starting IoTDB Client Export Script",
-        "````````````````````````````````````````````````",
-        "Connect failed because org.apache.thrift.transport.TTransportException: "
-            + "java.net.ConnectException: Connection refused"};
+    final String[] output = {
+      "````````````````````````````````````````````````",
+      "Starting IoTDB Client Export Script",
+      "````````````````````````````````````````````````",
+      "Connect failed because org.apache.thrift.transport.TTransportException: "
+          + "java.net.ConnectException: Connection refused"
+    };
     String dir = getCliPath();
-    ProcessBuilder builder = new ProcessBuilder("cmd.exe", "/c",
-        dir + File.separator + "tools" + File.separator + "export-csv.bat",
-        "-h", "127.0.0.1", "-p", "6668", "-u", "root", "-pw", "root", "-td", "./");
+    ProcessBuilder builder =
+        new ProcessBuilder(
+            "cmd.exe",
+            "/c",
+            dir + File.separator + "tools" + File.separator + "export-csv.bat",
+            "-h",
+            "127.0.0.1",
+            "-p",
+            "6668",
+            "-u",
+            "root",
+            "-pw",
+            "root",
+            "-td",
+            "./");
     testOutput(builder, output);
   }
 
   @Override
   protected void testOnUnix() throws IOException {
-    final String[] output = {"------------------------------------------",
-        "Starting IoTDB Client Export Script",
-        "------------------------------------------",
-        "Connect failed because org.apache.thrift.transport.TTransportException: "
-            + "java.net.ConnectException: Connection refused"};
+    final String[] output = {
+      "------------------------------------------",
+      "Starting IoTDB Client Export Script",
+      "------------------------------------------",
+      "Connect failed because org.apache.thrift.transport.TTransportException: "
+          + "java.net.ConnectException: Connection refused"
+    };
     String dir = getCliPath();
-    ProcessBuilder builder = new ProcessBuilder("sh",
-        dir + File.separator + "tools" + File.separator + "export-csv.sh",
-        "-h", "127.0.0.1", "-p", "6668", "-u", "root", "-pw", "root", "-td", "./");
+    ProcessBuilder builder =
+        new ProcessBuilder(
+            "sh",
+            dir + File.separator + "tools" + File.separator + "export-csv.sh",
+            "-h",
+            "127.0.0.1",
+            "-p",
+            "6668",
+            "-u",
+            "root",
+            "-pw",
+            "root",
+            "-td",
+            "./");
     testOutput(builder, output);
   }
 }
diff --git a/cli/src/test/java/org/apache/iotdb/tool/ImportCsvTestIT.java b/cli/src/test/java/org/apache/iotdb/tool/ImportCsvTestIT.java
index 6f7d2f4..fe644f0 100644
--- a/cli/src/test/java/org/apache/iotdb/tool/ImportCsvTestIT.java
+++ b/cli/src/test/java/org/apache/iotdb/tool/ImportCsvTestIT.java
@@ -28,12 +28,10 @@ import org.junit.Test;
 public class ImportCsvTestIT extends AbstractScript {
 
   @Before
-  public void setUp() {
-  }
+  public void setUp() {}
 
   @After
-  public void tearDown() {
-  }
+  public void tearDown() {}
 
   @Test
   public void test() throws IOException {
@@ -47,30 +45,56 @@ public class ImportCsvTestIT extends AbstractScript {
 
   @Override
   protected void testOnWindows() throws IOException {
-    final String[] output = {"````````````````````````````````````````````````",
-        "Starting IoTDB Client Import Script",
-        "````````````````````````````````````````````````",
-        "Encounter an error when connecting to server, because org.apache.thrift.transport.TTransportException: "
-            + "java.net.ConnectException: Connection refused"};
+    final String[] output = {
+      "````````````````````````````````````````````````",
+      "Starting IoTDB Client Import Script",
+      "````````````````````````````````````````````````",
+      "Encounter an error when connecting to server, because org.apache.thrift.transport.TTransportException: "
+          + "java.net.ConnectException: Connection refused"
+    };
     String dir = getCliPath();
-    ProcessBuilder builder = new ProcessBuilder("cmd.exe", "/c",
-        dir + File.separator + "tools" + File.separator + "import-csv.bat",
-        "-h", "127.0.0.1", "-p", "6668", "-u", "root", "-pw", "root", "-f", "./");
+    ProcessBuilder builder =
+        new ProcessBuilder(
+            "cmd.exe",
+            "/c",
+            dir + File.separator + "tools" + File.separator + "import-csv.bat",
+            "-h",
+            "127.0.0.1",
+            "-p",
+            "6668",
+            "-u",
+            "root",
+            "-pw",
+            "root",
+            "-f",
+            "./");
     testOutput(builder, output);
   }
 
   @Override
   protected void testOnUnix() throws IOException {
-    final String[] output = {"------------------------------------------",
-        "Starting IoTDB Client Import Script",
-        "------------------------------------------",
-        "Encounter an error when connecting to server, because org.apache.thrift.transport.TTransportException: "
-            + "java.net.ConnectException: Connection refused"};
+    final String[] output = {
+      "------------------------------------------",
+      "Starting IoTDB Client Import Script",
+      "------------------------------------------",
+      "Encounter an error when connecting to server, because org.apache.thrift.transport.TTransportException: "
+          + "java.net.ConnectException: Connection refused"
+    };
     String dir = getCliPath();
-    ProcessBuilder builder = new ProcessBuilder("sh",
-        dir + File.separator + "tools" + File.separator + "import-csv.sh",
-        "-h",
-        "127.0.0.1", "-p", "6668", "-u", "root", "-pw", "root", "-f", "./");
+    ProcessBuilder builder =
+        new ProcessBuilder(
+            "sh",
+            dir + File.separator + "tools" + File.separator + "import-csv.sh",
+            "-h",
+            "127.0.0.1",
+            "-p",
+            "6668",
+            "-u",
+            "root",
+            "-pw",
+            "root",
+            "-f",
+            "./");
     testOutput(builder, output);
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/ClientMain.java b/cluster/src/main/java/org/apache/iotdb/cluster/ClientMain.java
index 499945b..3146286 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/ClientMain.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/ClientMain.java
@@ -91,81 +91,76 @@ public class ClientMain {
     options.addOption(new Option(PARAM_QUERY, "Perform query"));
     options.addOption(new Option(PARAM_DELETE_SERIES, "Perform deleting timeseries"));
     options.addOption(new Option(PARAM_DELETE_STORAGE_GROUP, "Perform deleting storage group"));
-    options.addOption(new Option(PARAM_QUERY_PORTS, true, "Ports to query (ip is currently "
-        + "localhost)"));
+    options.addOption(
+        new Option(PARAM_QUERY_PORTS, true, "Ports to query (ip is currently " + "localhost)"));
     options.addOption(new Option(PARAM_INSERT_PORT, true, "Port to perform insertion"));
     options.addOption(new Option(PARAM_BATCH, "Test batch statement"));
   }
 
   private static Map<String, TSStatus> failedQueries;
 
-  private static final String[] STORAGE_GROUPS = new String[]{
-      "root.beijing",
-      "root.shanghai",
-      "root.guangzhou",
-      "root.shenzhen",
-  };
+  private static final String[] STORAGE_GROUPS =
+      new String[] {
+        "root.beijing", "root.shanghai", "root.guangzhou", "root.shenzhen",
+      };
 
-  private static final String[] DEVICES = new String[]{
-      "root.beijing.d1",
-      "root.shanghai.d1",
-      "root.guangzhou.d1",
-      "root.shenzhen.d1",
-  };
+  private static final String[] DEVICES =
+      new String[] {
+        "root.beijing.d1", "root.shanghai.d1", "root.guangzhou.d1", "root.shenzhen.d1",
+      };
 
-  private static final String[] MEASUREMENTS = new String[]{
-      "s1"
-  };
+  private static final String[] MEASUREMENTS = new String[] {"s1"};
 
-  private static final TSDataType[] DATA_TYPES = new TSDataType[]{
-      TSDataType.DOUBLE
-  };
+  private static final TSDataType[] DATA_TYPES = new TSDataType[] {TSDataType.DOUBLE};
 
   private static List<MeasurementSchema> schemas;
 
-  private static final String[] DATA_QUERIES = new String[]{
-      // raw data multi series
-      "SELECT * FROM root",
-      "SELECT * FROM root WHERE time <= 691200000",
-      "SELECT * FROM root WHERE time >= 391200000 and time <= 691200000",
-      "SELECT * FROM root.*.* WHERE s1 <= 0.7",
-      // raw data single series
-      "SELECT s1 FROM root.beijing.d1",
-      "SELECT s1 FROM root.shanghai.d1",
-      "SELECT s1 FROM root.guangzhou.d1",
-      "SELECT s1 FROM root.shenzhen.d1",
-      // aggregation
-      "SELECT count(s1) FROM root.*.*",
-      "SELECT avg(s1) FROM root.*.*",
-      "SELECT sum(s1) FROM root.*.*",
-      "SELECT max_value(s1) FROM root.*.*",
-      "SELECT count(s1) FROM root.*.* where time <= 691200000",
-      "SELECT count(s1) FROM root.*.* where s1 <= 0.7",
-      // group by device
-      "SELECT * FROM root GROUP BY DEVICE",
-      // fill
-      "SELECT s1 FROM root.beijing.d1 WHERE time = 86400000 FILL (DOUBLE[PREVIOUS,1d])",
-      "SELECT s1 FROM root.shanghai.d1 WHERE time = 86400000 FILL (DOUBLE[LINEAR,1d,1d])",
-      "SELECT s1 FROM root.guangzhou.d1 WHERE time = 126400000 FILL (DOUBLE[PREVIOUS,1d])",
-      "SELECT s1 FROM root.shenzhen.d1 WHERE time = 126400000 FILL (DOUBLE[LINEAR,1d,1d])",
-      // group by
-      "SELECT COUNT(*) FROM root.*.* GROUP BY ([0, 864000000), 3d, 3d)",
-      "SELECT AVG(*) FROM root.*.* WHERE s1 <= 0.7 GROUP BY ([0, 864000000), 3d, 3d)",
-      // last
-      "SELECT LAST s1 FROM root.*.*",
-  };
-
-  private static final String[] META_QUERY = new String[]{
-      "SHOW STORAGE GROUP",
-      "SHOW TIMESERIES root",
-      "COUNT TIMESERIES root",
-      "COUNT TIMESERIES root GROUP BY LEVEL=2",
-      "SHOW DEVICES",
-      "SHOW TIMESERIES root limit 1 offset 1",
-  };
+  private static final String[] DATA_QUERIES =
+      new String[] {
+        // raw data multi series
+        "SELECT * FROM root",
+        "SELECT * FROM root WHERE time <= 691200000",
+        "SELECT * FROM root WHERE time >= 391200000 and time <= 691200000",
+        "SELECT * FROM root.*.* WHERE s1 <= 0.7",
+        // raw data single series
+        "SELECT s1 FROM root.beijing.d1",
+        "SELECT s1 FROM root.shanghai.d1",
+        "SELECT s1 FROM root.guangzhou.d1",
+        "SELECT s1 FROM root.shenzhen.d1",
+        // aggregation
+        "SELECT count(s1) FROM root.*.*",
+        "SELECT avg(s1) FROM root.*.*",
+        "SELECT sum(s1) FROM root.*.*",
+        "SELECT max_value(s1) FROM root.*.*",
+        "SELECT count(s1) FROM root.*.* where time <= 691200000",
+        "SELECT count(s1) FROM root.*.* where s1 <= 0.7",
+        // group by device
+        "SELECT * FROM root GROUP BY DEVICE",
+        // fill
+        "SELECT s1 FROM root.beijing.d1 WHERE time = 86400000 FILL (DOUBLE[PREVIOUS,1d])",
+        "SELECT s1 FROM root.shanghai.d1 WHERE time = 86400000 FILL (DOUBLE[LINEAR,1d,1d])",
+        "SELECT s1 FROM root.guangzhou.d1 WHERE time = 126400000 FILL (DOUBLE[PREVIOUS,1d])",
+        "SELECT s1 FROM root.shenzhen.d1 WHERE time = 126400000 FILL (DOUBLE[LINEAR,1d,1d])",
+        // group by
+        "SELECT COUNT(*) FROM root.*.* GROUP BY ([0, 864000000), 3d, 3d)",
+        "SELECT AVG(*) FROM root.*.* WHERE s1 <= 0.7 GROUP BY ([0, 864000000), 3d, 3d)",
+        // last
+        "SELECT LAST s1 FROM root.*.*",
+      };
+
+  private static final String[] META_QUERY =
+      new String[] {
+        "SHOW STORAGE GROUP",
+        "SHOW TIMESERIES root",
+        "COUNT TIMESERIES root",
+        "COUNT TIMESERIES root GROUP BY LEVEL=2",
+        "SHOW DEVICES",
+        "SHOW TIMESERIES root limit 1 offset 1",
+      };
 
   public static void main(String[] args)
-      throws TException, StatementExecutionException, IoTDBConnectionException, ParseException, SQLException, ClassNotFoundException {
+      throws TException, StatementExecutionException, IoTDBConnectionException, ParseException,
+          SQLException, ClassNotFoundException {
     CommandLineParser parser = new DefaultParser();
     CommandLine commandLine = parser.parse(options, args);
     boolean noOption = args.length == 0;
@@ -173,8 +168,7 @@ public class ClientMain {
     failedQueries = new HashMap<>();
     prepareSchema();
 
-
-    if (commandLine.hasOption(PARAM_INSERT_PORT)){
+    if (commandLine.hasOption(PARAM_INSERT_PORT)) {
       port = Integer.parseInt(commandLine.getOptionValue(PARAM_INSERT_PORT));
     }
 
@@ -207,7 +201,7 @@ public class ClientMain {
         queryPorts = parseIntArray(commandLine.getOptionValue(PARAM_QUERY_PORTS));
       }
       if (queryPorts == null) {
-        queryPorts = new int[]{55560, 55561, 55562};
+        queryPorts = new int[] {55560, 55561, 55562};
       }
       for (int queryPort : queryPorts) {
         System.out.println("Test port: " + queryPort);
@@ -226,8 +220,7 @@ public class ClientMain {
     }
   }
 
-  private static void doDeleteSeries(boolean noOption, CommandLine commandLine)
-      throws TException {
+  private static void doDeleteSeries(boolean noOption, CommandLine commandLine) throws TException {
     if (noOption || commandLine.hasOption(PARAM_DELETE_SERIES)) {
       System.out.println("Test delete timeseries");
       Client client = getClient(ip, port);
@@ -269,8 +262,9 @@ public class ClientMain {
   }
 
   private static long connectClient(Client client) throws TException {
-    TSOpenSessionReq openReq = new TSOpenSessionReq(TSProtocolVersion.IOTDB_SERVICE_PROTOCOL_V3,
-        ZoneId.systemDefault().getId());
+    TSOpenSessionReq openReq =
+        new TSOpenSessionReq(
+            TSProtocolVersion.IOTDB_SERVICE_PROTOCOL_V3, ZoneId.systemDefault().getId());
     openReq.setUsername("root");
     openReq.setPassword("root");
     TSOpenSessionResp openResp = client.openSession(openReq);
@@ -283,8 +277,9 @@ public class ClientMain {
     TTransport transport = RpcTransportFactory.INSTANCE.getTransport(new TSocket(ip, port));
     transport.open();
     TProtocol protocol =
-        IoTDBDescriptor.getInstance().getConfig().isRpcThriftCompressionEnable() ?
-            new TCompactProtocol(transport) : new TBinaryProtocol(transport);
+        IoTDBDescriptor.getInstance().getConfig().isRpcThriftCompressionEnable()
+            ? new TCompactProtocol(transport)
+            : new TBinaryProtocol(transport);
     return factory.getClient(protocol);
   }
 
@@ -293,8 +288,9 @@ public class ClientMain {
     for (String device : DEVICES) {
       for (int i = 0; i < MEASUREMENTS.length; i++) {
         String measurement = MEASUREMENTS[i];
-        schemas.add(new MeasurementSchema(device + IoTDBConstant.PATH_SEPARATOR + measurement,
-            DATA_TYPES[i]));
+        schemas.add(
+            new MeasurementSchema(
+                device + IoTDBConstant.PATH_SEPARATOR + measurement, DATA_TYPES[i]));
       }
     }
   }
@@ -316,8 +312,9 @@ public class ClientMain {
     if (logger.isInfoEnabled()) {
       logger.info("{ {} }", query);
     }
-    TSExecuteStatementResp resp = client
-        .executeQueryStatement(new TSExecuteStatementReq(sessionId, query, statementId).setFetchSize(1000));
+    TSExecuteStatementResp resp =
+        client.executeQueryStatement(
+            new TSExecuteStatementReq(sessionId, query, statementId).setFetchSize(1000));
     if (resp.status.code != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
       failedQueries.put(query, resp.status);
       return;
@@ -328,9 +325,18 @@ public class ClientMain {
       logger.info(resp.columns.toString());
     }
 
-    SessionDataSet dataSet = new SessionDataSet(query, resp.getColumns(),
-        resp.getDataTypeList(), resp.columnNameIndexMap, queryId, statementId, client, sessionId,
-        resp.queryDataSet, false);
+    SessionDataSet dataSet =
+        new SessionDataSet(
+            query,
+            resp.getColumns(),
+            resp.getDataTypeList(),
+            resp.columnNameIndexMap,
+            queryId,
+            statementId,
+            client,
+            sessionId,
+            resp.queryDataSet,
+            false);
 
     while (dataSet.hasNext()) {
       if (logger.isInfoEnabled()) {
@@ -350,7 +356,7 @@ public class ClientMain {
       logger.info(client.deleteStorageGroups(sessionId, Arrays.asList(STORAGE_GROUPS)).toString());
     }
 
-    testQuery(client, sessionId, new String[]{"SELECT * FROM root"});
+    testQuery(client, sessionId, new String[] {"SELECT * FROM root"});
   }
 
   private static void registerTimeseries(long sessionId, Client client) throws TException {
@@ -433,9 +439,9 @@ public class ClientMain {
 
   private static void testBatch(String ip, int port) throws ClassNotFoundException, SQLException {
     Class.forName(Config.JDBC_DRIVER_NAME);
-    try (Connection connection = DriverManager
-        .getConnection(Config.IOTDB_URL_PREFIX + String.format("%s:%d/", ip, port), "root",
-            "root");
+    try (Connection connection =
+            DriverManager.getConnection(
+                Config.IOTDB_URL_PREFIX + String.format("%s:%d/", ip, port), "root", "root");
         Statement statement = connection.createStatement()) {
 
       statement.addBatch("SET STORAGE GROUP TO root.batch1");
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/ClusterFileFlushPolicy.java b/cluster/src/main/java/org/apache/iotdb/cluster/ClusterFileFlushPolicy.java
index d575ded..a43f53b 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/ClusterFileFlushPolicy.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/ClusterFileFlushPolicy.java
@@ -37,29 +37,38 @@ public class ClusterFileFlushPolicy implements TsFileFlushPolicy {
   private ExecutorService closePartitionExecutor;
   private MetaGroupMember metaGroupMember;
 
-  public ClusterFileFlushPolicy(
-      MetaGroupMember metaGroupMember) {
+  public ClusterFileFlushPolicy(MetaGroupMember metaGroupMember) {
     this.metaGroupMember = metaGroupMember;
-    this.closePartitionExecutor = new ThreadPoolExecutor(16, 1024, 0, TimeUnit.SECONDS,
-        new LinkedBlockingDeque<>(), r -> {
-      Thread thread = new Thread(r);
-      thread.setName("ClusterFileFlushPolicy-" + thread.getId());
-      return thread;
-    });
+    this.closePartitionExecutor =
+        new ThreadPoolExecutor(
+            16,
+            1024,
+            0,
+            TimeUnit.SECONDS,
+            new LinkedBlockingDeque<>(),
+            r -> {
+              Thread thread = new Thread(r);
+              thread.setName("ClusterFileFlushPolicy-" + thread.getId());
+              return thread;
+            });
   }
 
   @Override
-  public void apply(StorageGroupProcessor storageGroupProcessor, TsFileProcessor processor,
-      boolean isSeq) {
-    logger.info("The memtable size reaches the threshold, async flush it to tsfile: {}",
+  public void apply(
+      StorageGroupProcessor storageGroupProcessor, TsFileProcessor processor, boolean isSeq) {
+    logger.info(
+        "The memtable size reaches the threshold, async flush it to tsfile: {}",
         processor.getTsFileResource().getTsFile().getAbsolutePath());
 
     if (processor.shouldClose()) {
       // find the related DataGroupMember and close the processor through it
       // we execute it in another thread to avoid deadlocks
-      closePartitionExecutor
-          .submit(() -> metaGroupMember.closePartition(storageGroupProcessor.getVirtualStorageGroupId(),
-              processor.getTimeRangeId(), isSeq));
+      closePartitionExecutor.submit(
+          () ->
+              metaGroupMember.closePartition(
+                  storageGroupProcessor.getVirtualStorageGroupId(),
+                  processor.getTimeRangeId(),
+                  isSeq));
     }
     // flush the memtable anyway to avoid the insertion trigger the policy again
     processor.asyncFlush();
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/ClusterMain.java b/cluster/src/main/java/org/apache/iotdb/cluster/ClusterMain.java
index 1cb7a6f..a6caf76 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/ClusterMain.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/ClusterMain.java
@@ -64,14 +64,15 @@ public class ClusterMain {
 
   public static void main(String[] args) {
     if (args.length < 1) {
-      logger.error("Usage: <-s|-a|-r> [-internal_meta_port <internal meta port>] "
-          + "[-internal_data_port <internal data port>] "
-          + "[-cluster_rpc_port <cluster rpc port>] "
-          + "[-seed_nodes <node1:meta_port:data_port:cluster_rpc_port,"
-          +               "node2:meta_port:data_port:cluster_rpc_port,"
-          +           "...,noden:meta_port:data_port:cluster_rpc_port>] "
-          + "[-sc] "
-          + "[-rpc_port <rpc port>]");
+      logger.error(
+          "Usage: <-s|-a|-r> [-internal_meta_port <internal meta port>] "
+              + "[-internal_data_port <internal data port>] "
+              + "[-cluster_rpc_port <cluster rpc port>] "
+              + "[-seed_nodes <node1:meta_port:data_port:cluster_rpc_port,"
+              + "node2:meta_port:data_port:cluster_rpc_port,"
+              + "...,noden:meta_port:data_port:cluster_rpc_port>] "
+              + "[-sc] "
+              + "[-rpc_port <rpc port>]");
       return;
     }
     String mode = args[0];
@@ -95,8 +96,11 @@ public class ClusterMain {
         // preStartCustomize();
         metaServer.start();
         metaServer.buildCluster();
-      } catch (TTransportException | StartupException | QueryProcessException |
-          StartUpCheckFailureException | ConfigInconsistentException e) {
+      } catch (TTransportException
+          | StartupException
+          | QueryProcessException
+          | StartUpCheckFailureException
+          | ConfigInconsistentException e) {
         metaServer.stop();
         logger.error("Fail to start meta server", e);
       }
@@ -106,7 +110,11 @@ public class ClusterMain {
         // preStartCustomize();
         metaServer.start();
         metaServer.joinCluster();
-      } catch (TTransportException | StartupException | QueryProcessException | StartUpCheckFailureException | ConfigInconsistentException e) {
+      } catch (TTransportException
+          | StartupException
+          | QueryProcessException
+          | StartUpCheckFailureException
+          | ConfigInconsistentException e) {
         metaServer.stop();
         logger.error("Fail to join cluster", e);
       }
@@ -125,16 +133,18 @@ public class ClusterMain {
     ClusterConfig config = ClusterDescriptor.getInstance().getConfig();
     // check the initial replicateNum and refuse to start when the replicateNum <= 0
     if (config.getReplicationNum() <= 0) {
-      String message = String.format("ReplicateNum should be greater than 0 instead of %d.",
-          config.getReplicationNum());
+      String message =
+          String.format(
+              "ReplicateNum should be greater than 0 instead of %d.", config.getReplicationNum());
       throw new StartupException(metaServer.getMember().getName(), message);
     }
     // check the initial cluster size and refuse to start when the size < quorum
     int quorum = config.getReplicationNum() / 2 + 1;
     if (config.getSeedNodeUrls().size() < quorum) {
-      String message = String.format("Seed number less than quorum, seed number: %s, quorum: "
-              + "%s.",
-          config.getSeedNodeUrls().size(), quorum);
+      String message =
+          String.format(
+              "Seed number less than quorum, seed number: %s, quorum: " + "%s.",
+              config.getSeedNodeUrls().size(), quorum);
       throw new StartupException(metaServer.getMember().getName(), message);
     }
     // assert not duplicated nodes
@@ -142,8 +152,9 @@ public class ClusterMain {
     for (String url : config.getSeedNodeUrls()) {
       Node node = ClusterUtils.parseNode(url);
       if (seedNodes.contains(node)) {
-        String message = String.format(
-            "SeedNodes must not repeat each other. SeedNodes: %s", config.getSeedNodeUrls());
+        String message =
+            String.format(
+                "SeedNodes must not repeat each other. SeedNodes: %s", config.getSeedNodeUrls());
         throw new StartupException(metaServer.getMember().getName(), message);
       }
       seedNodes.add(node);
@@ -152,10 +163,11 @@ public class ClusterMain {
     // assert this node is in all nodes when restart
     if (!metaServer.getMember().getAllNodes().isEmpty()) {
       if (!metaServer.getMember().getAllNodes().contains(metaServer.getMember().getThisNode())) {
-        String message = String.format(
-            "All nodes in partitionTables must contains local node in start-server mode. "
-                + "LocalNode: %s, AllNodes: %s",
-            metaServer.getMember().getThisNode(), metaServer.getMember().getAllNodes());
+        String message =
+            String.format(
+                "All nodes in partitionTables must contains local node in start-server mode. "
+                    + "LocalNode: %s, AllNodes: %s",
+                metaServer.getMember().getThisNode(), metaServer.getMember().getAllNodes());
         throw new StartupException(metaServer.getMember().getName(), message);
       } else {
         return;
@@ -164,12 +176,16 @@ public class ClusterMain {
 
     // assert this node is in seed nodes list
     Node localNode = new Node();
-    localNode.setIp(config.getClusterRpcIp()).setMetaPort(config.getInternalMetaPort())
-        .setDataPort(config.getInternalDataPort()).setClientPort(config.getClusterRpcPort());
+    localNode
+        .setIp(config.getClusterRpcIp())
+        .setMetaPort(config.getInternalMetaPort())
+        .setDataPort(config.getInternalDataPort())
+        .setClientPort(config.getClusterRpcPort());
     if (!seedNodes.contains(localNode)) {
-      String message = String.format(
-          "SeedNodes must contains local node in start-server mode. LocalNode: %s ,SeedNodes: %s",
-          localNode.toString(), config.getSeedNodeUrls());
+      String message =
+          String.format(
+              "SeedNodes must contains local node in start-server mode. LocalNode: %s ,SeedNodes: %s",
+              localNode.toString(), config.getSeedNodeUrls());
       throw new StartupException(metaServer.getMember().getName(), message);
     }
   }
@@ -179,12 +195,12 @@ public class ClusterMain {
     String[] clusterParams;
     String[] serverParams = null;
     for (index = 0; index < args.length; index++) {
-      //find where -sc is
+      // find where -sc is
       if (SERVER_CONF_SEPARATOR.equals(args[index])) {
         break;
       }
     }
-    //parameters from 0 to "-sc" are for clusters
+    // parameters from 0 to "-sc" are for clusters
     clusterParams = Arrays.copyOfRange(args, 0, index);
 
     if (index < args.length) {
@@ -202,9 +218,7 @@ public class ClusterMain {
     }
   }
 
-  /**
-   * check the configuration is legal or not
-   */
+  /** check the configuration is legal or not */
   private static boolean checkConfig() {
     // 0. first replace all hostname with ip
     try {
@@ -222,11 +236,12 @@ public class ClusterMain {
     List<String> seedNodes = config.getSeedNodeUrls();
     boolean isLocalCluster = localhostIp.equals(configClusterRpcIp);
     for (String seedNodeIP : seedNodes) {
-      if ((isLocalCluster && !seedNodeIP.contains(localhostIp)) ||
-          (!isLocalCluster && seedNodeIP.contains(localhostIp))) {
+      if ((isLocalCluster && !seedNodeIP.contains(localhostIp))
+          || (!isLocalCluster && seedNodeIP.contains(localhostIp))) {
         logger.error(
             "cluster_rpc_ip={} and seed_nodes={} should be consistent, both use local ip or real ip please",
-            configClusterRpcIp, seedNodes);
+            configClusterRpcIp,
+            seedNodes);
         return false;
       }
     }
@@ -241,8 +256,8 @@ public class ClusterMain {
     String ip = args[1];
     int metaPort = Integer.parseInt(args[2]);
     ClusterConfig config = ClusterDescriptor.getInstance().getConfig();
-    TProtocolFactory factory = config
-        .isRpcThriftCompressionEnabled() ? new TCompactProtocol.Factory() : new Factory();
+    TProtocolFactory factory =
+        config.isRpcThriftCompressionEnabled() ? new TCompactProtocol.Factory() : new Factory();
     Node nodeToRemove = new Node();
     nodeToRemove.setIp(ip).setMetaPort(metaPort);
     // try sending the request to each seed node
@@ -285,50 +300,50 @@ public class ClusterMain {
     return metaServer;
   }
 
-  /**
-   * Developers may perform pre-start customizations here for debugging or experiments.
-   */
+  /** Developers may perform pre-start customizations here for debugging or experiments. */
   @SuppressWarnings("java:S125") // leaving examples
   private static void preStartCustomize() {
     // customize data distribution
     // The given example tries to divide storage groups like "root.sg_1", "root.sg_2"... into k
     // nodes evenly, and use default strategy for other groups
-    SlotPartitionTable.setSlotStrategy(new SlotStrategy() {
-      SlotStrategy defaultStrategy = new SlotStrategy.DefaultStrategy();
-      int k = 3;
-      @Override
-      public int calculateSlotByTime(String storageGroupName, long timestamp, int maxSlotNum) {
-        int sgSerialNum = extractSerialNumInSGName(storageGroupName) % k;
-        if (sgSerialNum > 0) {
-          return maxSlotNum / k * sgSerialNum;
-        } else {
-          return defaultStrategy.calculateSlotByTime(storageGroupName, timestamp, maxSlotNum);
-        }
-      }
+    SlotPartitionTable.setSlotStrategy(
+        new SlotStrategy() {
+          SlotStrategy defaultStrategy = new SlotStrategy.DefaultStrategy();
+          int k = 3;
 
-      @Override
-      public int calculateSlotByPartitionNum(String storageGroupName, long partitionId,
-          int maxSlotNum) {
-        int sgSerialNum = extractSerialNumInSGName(storageGroupName) % k;
-        if (sgSerialNum > 0) {
-          return maxSlotNum / k * sgSerialNum;
-        } else {
-          return defaultStrategy
-              .calculateSlotByPartitionNum(storageGroupName, partitionId, maxSlotNum);
-        }
-      }
+          @Override
+          public int calculateSlotByTime(String storageGroupName, long timestamp, int maxSlotNum) {
+            int sgSerialNum = extractSerialNumInSGName(storageGroupName) % k;
+            if (sgSerialNum > 0) {
+              return maxSlotNum / k * sgSerialNum;
+            } else {
+              return defaultStrategy.calculateSlotByTime(storageGroupName, timestamp, maxSlotNum);
+            }
+          }
 
-      private int extractSerialNumInSGName(String storageGroupName) {
-        String[] s = storageGroupName.split("_");
-        if (s.length != 2) {
-          return -1;
-        }
-        try {
-          return Integer.parseInt(s[1]);
-        } catch (NumberFormatException e) {
-          return -1;
-        }
-      }
-    });
+          @Override
+          public int calculateSlotByPartitionNum(
+              String storageGroupName, long partitionId, int maxSlotNum) {
+            int sgSerialNum = extractSerialNumInSGName(storageGroupName) % k;
+            if (sgSerialNum > 0) {
+              return maxSlotNum / k * sgSerialNum;
+            } else {
+              return defaultStrategy.calculateSlotByPartitionNum(
+                  storageGroupName, partitionId, maxSlotNum);
+            }
+          }
+
+          private int extractSerialNumInSGName(String storageGroupName) {
+            String[] s = storageGroupName.split("_");
+            if (s.length != 2) {
+              return -1;
+            }
+            try {
+              return Integer.parseInt(s[1]);
+            } catch (NumberFormatException e) {
+              return -1;
+            }
+          }
+        });
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/RemoteTsFileResource.java b/cluster/src/main/java/org/apache/iotdb/cluster/RemoteTsFileResource.java
index 974b587..079dfdf 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/RemoteTsFileResource.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/RemoteTsFileResource.java
@@ -38,8 +38,8 @@ public class RemoteTsFileResource extends TsFileResource {
   /**
    * Whether the plan range ([minPlanIndex, maxPlanIndex]) overlaps with another TsFile in the same
    * time partition. If not (unique = true), we shall have confidence that the file has all data
-   * whose plan indexes are within [minPlanIndex, maxPlanIndex], so we can remove other local
-   * files that overlaps with it.
+   * whose plan indexes are within [minPlanIndex, maxPlanIndex], so we can remove other local files
+   * that overlaps with it.
    */
   private boolean isPlanRangeUnique = false;
 
@@ -104,8 +104,12 @@ public class RemoteTsFileResource extends TsFileResource {
     SerializeUtils.deserialize(source, buffer);
     setFile(new File(SerializeUtils.deserializeString(buffer)));
 
-    timeIndex = IoTDBDescriptor.getInstance().getConfig().getTimeIndexLevel().getTimeIndex()
-        .deserialize(buffer);
+    timeIndex =
+        IoTDBDescriptor.getInstance()
+            .getConfig()
+            .getTimeIndexLevel()
+            .getTimeIndex()
+            .deserialize(buffer);
 
     withModification = buffer.get() == 1;
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/DataClientProvider.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/DataClientProvider.java
index 9a1c4df..7f8473f 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/DataClientProvider.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/client/DataClientProvider.java
@@ -40,6 +40,7 @@ public class DataClientProvider {
    * nodes
    */
   private AsyncClientPool dataAsyncClientPool;
+
   private SyncClientPool dataSyncClientPool;
 
   public DataClientProvider(TProtocolFactory factory) {
@@ -61,7 +62,7 @@ public class DataClientProvider {
   /**
    * Get a thrift client that will connect to "node" using the data port.
    *
-   * @param node    the node to be connected
+   * @param node the node to be connected
    * @param timeout timeout threshold of connection
    */
   public AsyncDataClient getAsyncDataClient(Node node, int timeout) throws IOException {
@@ -76,7 +77,7 @@ public class DataClientProvider {
   /**
    * Get a thrift client that will connect to "node" using the data port.
    *
-   * @param node    the node to be connected
+   * @param node the node to be connected
    * @param timeout timeout threshold of connection
    */
   public SyncDataClient getSyncDataClient(Node node, int timeout) throws TException {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncClientFactory.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncClientFactory.java
index 258b184..2a897e5 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncClientFactory.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncClientFactory.java
@@ -37,8 +37,9 @@ public abstract class AsyncClientFactory {
 
   static {
     if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) {
-      managers = new TAsyncClientManager[ClusterDescriptor.getInstance().getConfig()
-          .getSelectorNumOfClientPool()];
+      managers =
+          new TAsyncClientManager
+              [ClusterDescriptor.getInstance().getConfig().getSelectorNumOfClientPool()];
       for (int i = 0; i < managers.length; i++) {
         try {
           managers[i] = new TAsyncClientManager();
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncClientPool.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncClientPool.java
index b9b752f..1c6c419 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncClientPool.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncClientPool.java
@@ -50,6 +50,7 @@ public class AsyncClientPool {
 
   /**
    * See getClient(Node node, boolean activatedOnly)
+   *
    * @param node
    * @return
    * @throws IOException
@@ -60,14 +61,13 @@ public class AsyncClientPool {
 
   /**
    * Get a client of the given node from the cache if one is available, or create a new one.
-   * <p>
-   * IMPORTANT!!! The caller should check whether the return value is null or not!
+   *
+   * <p>IMPORTANT!!! The caller should check whether the return value is null or not!
    *
    * @param node the node want to connect
    * @param activatedOnly if true, only return a client if the node's NodeStatus.isActivated ==
-   *                      true, which avoid unnecessary wait for already down nodes, but
-   *                      heartbeat attempts should always try to connect so the node can be
-   *                      reactivated ASAP
+   *     true, which avoid unnecessary wait for already down nodes, but heartbeat attempts should
+   *     always try to connect so the node can be reactivated ASAP
    * @return if the node can connect, return the client, otherwise null
    * @throws IOException if the node can not be connected
    */
@@ -78,9 +78,9 @@ public class AsyncClientPool {
     }
 
     AsyncClient client;
-    //As clientCaches is ConcurrentHashMap, computeIfAbsent is thread safety.
-    Deque<AsyncClient> clientStack = clientCaches.computeIfAbsent(clusterNode,
-        n -> new ArrayDeque<>());
+    // As clientCaches is ConcurrentHashMap, computeIfAbsent is thread safety.
+    Deque<AsyncClient> clientStack =
+        clientCaches.computeIfAbsent(clusterNode, n -> new ArrayDeque<>());
     synchronized (this) {
       if (clientStack.isEmpty()) {
         int nodeClientNum = nodeClientNumMap.getOrDefault(clusterNode, 0);
@@ -109,9 +109,8 @@ public class AsyncClientPool {
    * @throws IOException
    */
   @SuppressWarnings({"squid:S2273"}) // synchronized outside
-  private AsyncClient waitForClient(Deque<AsyncClient> clientStack, ClusterNode node,
-      int nodeClientNum)
-      throws IOException {
+  private AsyncClient waitForClient(
+      Deque<AsyncClient> clientStack, ClusterNode node, int nodeClientNum) throws IOException {
     // wait for an available client
     long waitStart = System.currentTimeMillis();
     while (clientStack.isEmpty()) {
@@ -121,7 +120,9 @@ public class AsyncClientPool {
             && System.currentTimeMillis() - waitStart >= WAIT_CLIENT_TIMEOUT_MS) {
           logger.warn(
               "Cannot get an available client after {}ms, create a new one, factory {} now is {}",
-              WAIT_CLIENT_TIMEOUT_MS, asyncClientFactory, nodeClientNum);
+              WAIT_CLIENT_TIMEOUT_MS,
+              asyncClientFactory,
+              nodeClientNum);
           nodeClientNumMap.put(node, nodeClientNum + 1);
           return asyncClientFactory.getAsyncClient(node, this);
         }
@@ -152,9 +153,9 @@ public class AsyncClientPool {
       logger.warn("A using client {} is put back while running {}", client.hashCode(), call);
     }
     synchronized (this) {
-      //As clientCaches is ConcurrentHashMap, computeIfAbsent is thread safety.
-      Deque<AsyncClient> clientStack = clientCaches
-          .computeIfAbsent(clusterNode, n -> new ArrayDeque<>());
+      // As clientCaches is ConcurrentHashMap, computeIfAbsent is thread safety.
+      Deque<AsyncClient> clientStack =
+          clientCaches.computeIfAbsent(clusterNode, n -> new ArrayDeque<>());
       clientStack.push(client);
       this.notifyAll();
     }
@@ -164,8 +165,8 @@ public class AsyncClientPool {
     ClusterNode clusterNode = new ClusterNode(node);
     // clean all cached clients when network fails
     synchronized (this) {
-      Deque<AsyncClient> clientStack = clientCaches
-          .computeIfAbsent(clusterNode, n -> new ArrayDeque<>());
+      Deque<AsyncClient> clientStack =
+          clientCaches.computeIfAbsent(clusterNode, n -> new ArrayDeque<>());
       while (!clientStack.isEmpty()) {
         AsyncClient client = clientStack.pop();
         if (client instanceof AsyncDataClient) {
@@ -188,8 +189,8 @@ public class AsyncClientPool {
   void recreateClient(Node node) {
     ClusterNode clusterNode = new ClusterNode(node);
     synchronized (this) {
-      Deque<AsyncClient> clientStack = clientCaches
-          .computeIfAbsent(clusterNode, n -> new ArrayDeque<>());
+      Deque<AsyncClient> clientStack =
+          clientCaches.computeIfAbsent(clusterNode, n -> new ArrayDeque<>());
       try {
         AsyncClient asyncClient = asyncClientFactory.getAsyncClient(node, this);
         clientStack.push(asyncClient);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncDataClient.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncDataClient.java
index d6b6f45..fec76b4 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncDataClient.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncDataClient.java
@@ -47,17 +47,25 @@ public class AsyncDataClient extends AsyncClient {
   Node node;
   AsyncClientPool pool;
 
-  public AsyncDataClient(TProtocolFactory protocolFactory,
+  public AsyncDataClient(
+      TProtocolFactory protocolFactory,
       TAsyncClientManager clientManager,
       TNonblockingTransport transport) {
     super(protocolFactory, clientManager, transport);
   }
 
-  public AsyncDataClient(TProtocolFactory protocolFactory,
-      TAsyncClientManager clientManager, Node node, AsyncClientPool pool) throws IOException {
+  public AsyncDataClient(
+      TProtocolFactory protocolFactory,
+      TAsyncClientManager clientManager,
+      Node node,
+      AsyncClientPool pool)
+      throws IOException {
     // the difference of the two clients lies in the port
-    super(protocolFactory, clientManager, new TNonblockingSocket(node.getIp(), node.getDataPort()
-        , RaftServer.getConnectionTimeoutInMS()));
+    super(
+        protocolFactory,
+        clientManager,
+        new TNonblockingSocket(
+            node.getIp(), node.getDataPort(), RaftServer.getConnectionTimeoutInMS()));
     this.node = node;
     this.pool = pool;
   }
@@ -70,7 +78,6 @@ public class AsyncDataClient extends AsyncClient {
       pool.putClient(node, this);
       pool.onComplete(node);
     }
-
   }
 
   @SuppressWarnings("squid:S1135")
@@ -79,7 +86,7 @@ public class AsyncDataClient extends AsyncClient {
     super.onError(e);
     if (pool != null) {
       pool.recreateClient(node);
-      //TODO: if e instance of network failure
+      // TODO: if e instance of network failure
       pool.onError(node);
     }
   }
@@ -128,9 +135,7 @@ public class AsyncDataClient extends AsyncClient {
 
   @Override
   public String toString() {
-    return "DataClient{" +
-        "node=" + node +
-        '}';
+    return "DataClient{" + "node=" + node + '}';
   }
 
   public Node getNode() {
@@ -139,7 +144,10 @@ public class AsyncDataClient extends AsyncClient {
 
   public boolean isReady() {
     if (___currentMethod != null) {
-      logger.warn("Client {} is running {} and will timeout at {}", hashCode(), ___currentMethod,
+      logger.warn(
+          "Client {} is running {} and will timeout at {}",
+          hashCode(),
+          ___currentMethod,
           new Date(___currentMethod.getTimeoutTimestamp()));
     }
     return ___currentMethod == null && !hasError();
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncDataHeartbeatClient.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncDataHeartbeatClient.java
index 5f82136..9596b96 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncDataHeartbeatClient.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncDataHeartbeatClient.java
@@ -34,11 +34,19 @@ import org.apache.thrift.transport.TNonblockingSocket;
  */
 public class AsyncDataHeartbeatClient extends AsyncDataClient {
 
-  private AsyncDataHeartbeatClient(TProtocolFactory protocolFactory,
-      TAsyncClientManager clientManager, Node node, AsyncClientPool pool) throws IOException {
-    super(protocolFactory, clientManager, new TNonblockingSocket(node.getIp(),
-        node.getDataPort() + ClusterUtils.DATA_HEARTBEAT_PORT_OFFSET
-        , RaftServer.getConnectionTimeoutInMS()));
+  private AsyncDataHeartbeatClient(
+      TProtocolFactory protocolFactory,
+      TAsyncClientManager clientManager,
+      Node node,
+      AsyncClientPool pool)
+      throws IOException {
+    super(
+        protocolFactory,
+        clientManager,
+        new TNonblockingSocket(
+            node.getIp(),
+            node.getDataPort() + ClusterUtils.DATA_HEARTBEAT_PORT_OFFSET,
+            RaftServer.getConnectionTimeoutInMS()));
     this.node = node;
     this.pool = pool;
   }
@@ -60,10 +68,12 @@ public class AsyncDataHeartbeatClient extends AsyncDataClient {
 
   @Override
   public String toString() {
-    return "AsyncDataHeartbeatClient{" +
-        "node=" + super.getNode() + "," +
-        "dataHeartbeatPort=" + (super.getNode().getDataPort()
-        + ClusterUtils.DATA_HEARTBEAT_PORT_OFFSET) +
-        '}';
+    return "AsyncDataHeartbeatClient{"
+        + "node="
+        + super.getNode()
+        + ","
+        + "dataHeartbeatPort="
+        + (super.getNode().getDataPort() + ClusterUtils.DATA_HEARTBEAT_PORT_OFFSET)
+        + '}';
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncMetaClient.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncMetaClient.java
index 17ad8e3..e6cc4ae 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncMetaClient.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncMetaClient.java
@@ -45,17 +45,25 @@ public class AsyncMetaClient extends AsyncClient {
   Node node;
   AsyncClientPool pool;
 
-  public AsyncMetaClient(TProtocolFactory protocolFactory,
+  public AsyncMetaClient(
+      TProtocolFactory protocolFactory,
       TAsyncClientManager clientManager,
       TNonblockingTransport transport) {
     super(protocolFactory, clientManager, transport);
   }
 
-  public AsyncMetaClient(TProtocolFactory protocolFactory,
-      TAsyncClientManager clientManager, Node node, AsyncClientPool pool) throws IOException {
+  public AsyncMetaClient(
+      TProtocolFactory protocolFactory,
+      TAsyncClientManager clientManager,
+      Node node,
+      AsyncClientPool pool)
+      throws IOException {
     // the difference of the two clients lies in the port
-    super(protocolFactory, clientManager, new TNonblockingSocket(node.getIp(), node.getMetaPort(),
-        RaftServer.getConnectionTimeoutInMS()));
+    super(
+        protocolFactory,
+        clientManager,
+        new TNonblockingSocket(
+            node.getIp(), node.getMetaPort(), RaftServer.getConnectionTimeoutInMS()));
     this.node = node;
     this.pool = pool;
   }
@@ -75,7 +83,7 @@ public class AsyncMetaClient extends AsyncClient {
   public void onError(Exception e) {
     super.onError(e);
     pool.recreateClient(node);
-    //TODO: if e instance of network failure
+    // TODO: if e instance of network failure
     pool.onError(node);
   }
 
@@ -96,12 +104,9 @@ public class AsyncMetaClient extends AsyncClient {
 
   @Override
   public String toString() {
-    return "MetaClient{" +
-        "node=" + node +
-        '}';
+    return "MetaClient{" + "node=" + node + '}';
   }
 
-
   public void close() {
     ___transport.close();
     ___currentMethod = null;
@@ -113,7 +118,10 @@ public class AsyncMetaClient extends AsyncClient {
 
   public boolean isReady() {
     if (___currentMethod != null) {
-      logger.warn("Client {} is running {} and will timeout at {}", hashCode(), ___currentMethod,
+      logger.warn(
+          "Client {} is running {} and will timeout at {}",
+          hashCode(),
+          ___currentMethod,
           new Date(___currentMethod.getTimeoutTimestamp()));
     }
     return ___currentMethod == null;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncMetaHeartbeatClient.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncMetaHeartbeatClient.java
index 0cc1016..848be76 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncMetaHeartbeatClient.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncMetaHeartbeatClient.java
@@ -34,11 +34,19 @@ import org.apache.thrift.transport.TNonblockingSocket;
  */
 public class AsyncMetaHeartbeatClient extends AsyncMetaClient {
 
-  private AsyncMetaHeartbeatClient(TProtocolFactory protocolFactory,
-      TAsyncClientManager clientManager, Node node, AsyncClientPool pool) throws IOException {
-    super(protocolFactory, clientManager, new TNonblockingSocket(node.getIp(),
-        node.getMetaPort() + ClusterUtils.DATA_HEARTBEAT_PORT_OFFSET
-        , RaftServer.getConnectionTimeoutInMS()));
+  private AsyncMetaHeartbeatClient(
+      TProtocolFactory protocolFactory,
+      TAsyncClientManager clientManager,
+      Node node,
+      AsyncClientPool pool)
+      throws IOException {
+    super(
+        protocolFactory,
+        clientManager,
+        new TNonblockingSocket(
+            node.getIp(),
+            node.getMetaPort() + ClusterUtils.DATA_HEARTBEAT_PORT_OFFSET,
+            RaftServer.getConnectionTimeoutInMS()));
     this.node = node;
     this.pool = pool;
   }
@@ -60,11 +68,12 @@ public class AsyncMetaHeartbeatClient extends AsyncMetaClient {
 
   @Override
   public String toString() {
-    return "AsyncMetaHeartbeatClient{" +
-        "node=" + super.getNode() + "," +
-        "metaHeartbeatPort=" + (super.getNode().getMetaPort()
-        + ClusterUtils.META_HEARTBEAT_PORT_OFFSET) +
-        '}';
+    return "AsyncMetaHeartbeatClient{"
+        + "node="
+        + super.getNode()
+        + ","
+        + "metaHeartbeatPort="
+        + (super.getNode().getMetaPort() + ClusterUtils.META_HEARTBEAT_PORT_OFFSET)
+        + '}';
   }
-
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncClientAdaptor.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncClientAdaptor.java
index b4436f5..6d09182 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncClientAdaptor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncClientAdaptor.java
@@ -100,8 +100,9 @@ public class SyncClientAdaptor {
     return responseRef.get();
   }
 
-  public static Boolean matchTerm(AsyncClient client, Node target, long prevLogIndex,
-      long prevLogTerm, Node header) throws TException, InterruptedException {
+  public static Boolean matchTerm(
+      AsyncClient client, Node target, long prevLogIndex, long prevLogTerm, Node header)
+      throws TException, InterruptedException {
     try {
       AtomicReference<Boolean> resultRef = new AtomicReference<>(null);
       GenericHandler<Boolean> matchTermHandler = new GenericHandler<>(target, resultRef);
@@ -119,8 +120,8 @@ public class SyncClientAdaptor {
     }
   }
 
-  public static Long querySingleSeriesByTimestamp(AsyncDataClient client,
-      SingleSeriesQueryRequest request)
+  public static Long querySingleSeriesByTimestamp(
+      AsyncDataClient client, SingleSeriesQueryRequest request)
       throws TException, InterruptedException {
     AtomicReference<Long> result = new AtomicReference<>();
     GenericHandler<Long> handler = new GenericHandler<>(client.getNode(), result);
@@ -134,8 +135,9 @@ public class SyncClientAdaptor {
     return result.get();
   }
 
-  public static Long querySingleSeries(AsyncDataClient client, SingleSeriesQueryRequest request,
-      long timeOffset) throws TException, InterruptedException {
+  public static Long querySingleSeries(
+      AsyncDataClient client, SingleSeriesQueryRequest request, long timeOffset)
+      throws TException, InterruptedException {
     AtomicReference<Long> result = new AtomicReference<>();
     GenericHandler<Long> handler = new GenericHandler<>(client.getNode(), result);
     Filter newFilter;
@@ -157,8 +159,9 @@ public class SyncClientAdaptor {
     return result.get();
   }
 
-  public static List<String> getNodeList(AsyncDataClient client, Node header,
-      String schemaPattern, int level) throws TException, InterruptedException {
+  public static List<String> getNodeList(
+      AsyncDataClient client, Node header, String schemaPattern, int level)
+      throws TException, InterruptedException {
     GetNodesListHandler handler = new GetNodesListHandler();
     AtomicReference<List<String>> response = new AtomicReference<>(null);
     handler.setResponse(response);
@@ -189,8 +192,8 @@ public class SyncClientAdaptor {
     return response.get();
   }
 
-  public static ByteBuffer getAllMeasurementSchema(AsyncDataClient client,
-      Node header, ShowTimeSeriesPlan plan)
+  public static ByteBuffer getAllMeasurementSchema(
+      AsyncDataClient client, Node header, ShowTimeSeriesPlan plan)
       throws IOException, InterruptedException, TException {
     GetTimeseriesSchemaHandler handler = new GetTimeseriesSchemaHandler();
     AtomicReference<ByteBuffer> response = new AtomicReference<>(null);
@@ -200,8 +203,8 @@ public class SyncClientAdaptor {
     DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream);
     plan.serialize(dataOutputStream);
 
-    client.getAllMeasurementSchema(header, ByteBuffer.wrap(byteArrayOutputStream.toByteArray()),
-        handler);
+    client.getAllMeasurementSchema(
+        header, ByteBuffer.wrap(byteArrayOutputStream.toByteArray()), handler);
     synchronized (response) {
       if (response.get() == null) {
         response.wait(RaftServer.getReadOperationTimeoutMS());
@@ -244,8 +247,8 @@ public class SyncClientAdaptor {
     return resultRef.get();
   }
 
-  public static AddNodeResponse addNode(AsyncMetaClient client, Node thisNode,
-      StartUpStatus startUpStatus)
+  public static AddNodeResponse addNode(
+      AsyncMetaClient client, Node thisNode, StartUpStatus startUpStatus)
       throws TException, InterruptedException {
     JoinClusterHandler handler = new JoinClusterHandler();
     AtomicReference<AddNodeResponse> response = new AtomicReference<>(null);
@@ -261,13 +264,15 @@ public class SyncClientAdaptor {
     return response.get();
   }
 
-  public static List<MeasurementSchema> pullMeasurementSchema(AsyncDataClient client,
-      PullSchemaRequest pullSchemaRequest) throws TException, InterruptedException {
+  public static List<MeasurementSchema> pullMeasurementSchema(
+      AsyncDataClient client, PullSchemaRequest pullSchemaRequest)
+      throws TException, InterruptedException {
     AtomicReference<List<MeasurementSchema>> measurementSchemas = new AtomicReference<>();
 
-    client.pullMeasurementSchema(pullSchemaRequest,
-        new PullMeasurementSchemaHandler(client.getNode(), pullSchemaRequest.getPrefixPaths(),
-            measurementSchemas));
+    client.pullMeasurementSchema(
+        pullSchemaRequest,
+        new PullMeasurementSchemaHandler(
+            client.getNode(), pullSchemaRequest.getPrefixPaths(), measurementSchemas));
     synchronized (measurementSchemas) {
       if (measurementSchemas.get() == null) {
         measurementSchemas.wait(RaftServer.getReadOperationTimeoutMS());
@@ -276,12 +281,14 @@ public class SyncClientAdaptor {
     return measurementSchemas.get();
   }
 
-  public static List<TimeseriesSchema> pullTimeseriesSchema(AsyncDataClient client,
-      PullSchemaRequest pullSchemaRequest) throws TException, InterruptedException {
+  public static List<TimeseriesSchema> pullTimeseriesSchema(
+      AsyncDataClient client, PullSchemaRequest pullSchemaRequest)
+      throws TException, InterruptedException {
     AtomicReference<List<TimeseriesSchema>> timeseriesSchemas = new AtomicReference<>();
-    client.pullTimeSeriesSchema(pullSchemaRequest,
-        new PullTimeseriesSchemaHandler(client.getNode(), pullSchemaRequest.getPrefixPaths(),
-            timeseriesSchemas));
+    client.pullTimeSeriesSchema(
+        pullSchemaRequest,
+        new PullTimeseriesSchemaHandler(
+            client.getNode(), pullSchemaRequest.getPrefixPaths(), timeseriesSchemas));
 
     synchronized (timeseriesSchemas) {
       if (timeseriesSchemas.get() == null) {
@@ -294,8 +301,8 @@ public class SyncClientAdaptor {
   public static List<ByteBuffer> getAggrResult(AsyncDataClient client, GetAggrResultRequest request)
       throws TException, InterruptedException {
     AtomicReference<List<ByteBuffer>> resultReference = new AtomicReference<>();
-    GenericHandler<List<ByteBuffer>> handler = new GenericHandler<>(client.getNode(),
-        resultReference);
+    GenericHandler<List<ByteBuffer>> handler =
+        new GenericHandler<>(client.getNode(), resultReference);
 
     client.getAggrResult(request, handler);
     synchronized (resultReference) {
@@ -309,8 +316,9 @@ public class SyncClientAdaptor {
     return resultReference.get();
   }
 
-  public static List<String> getUnregisteredMeasurements(AsyncDataClient client, Node header,
-      List<String> seriesPaths) throws TException, InterruptedException {
+  public static List<String> getUnregisteredMeasurements(
+      AsyncDataClient client, Node header, List<String> seriesPaths)
+      throws TException, InterruptedException {
     AtomicReference<List<String>> remoteResult = new AtomicReference<>();
     GenericHandler<List<String>> handler = new GenericHandler<>(client.getNode(), remoteResult);
 
@@ -318,19 +326,19 @@ public class SyncClientAdaptor {
     return handler.getResult(RaftServer.getReadOperationTimeoutMS());
   }
 
-  public static GetAllPathsResult getAllPaths(AsyncDataClient client, Node header,
-      List<String> pathsToQuery, boolean withAlias)
+  public static GetAllPathsResult getAllPaths(
+      AsyncDataClient client, Node header, List<String> pathsToQuery, boolean withAlias)
       throws InterruptedException, TException {
     AtomicReference<GetAllPathsResult> remoteResult = new AtomicReference<>();
-    GenericHandler<GetAllPathsResult> handler = new GenericHandler<>(client.getNode(),
-        remoteResult);
+    GenericHandler<GetAllPathsResult> handler =
+        new GenericHandler<>(client.getNode(), remoteResult);
 
     client.getAllPaths(header, pathsToQuery, withAlias, handler);
     return handler.getResult(RaftServer.getReadOperationTimeoutMS());
   }
 
-  public static Integer getPathCount(AsyncDataClient client, Node header, List<String> pathsToQuery,
-      int level)
+  public static Integer getPathCount(
+      AsyncDataClient client, Node header, List<String> pathsToQuery, int level)
       throws InterruptedException, TException {
     AtomicReference<Integer> remoteResult = new AtomicReference<>(null);
     GenericHandler<Integer> handler = new GenericHandler<>(client.getNode(), remoteResult);
@@ -339,8 +347,8 @@ public class SyncClientAdaptor {
     return handler.getResult(RaftServer.getReadOperationTimeoutMS());
   }
 
-  public static Set<String> getAllDevices(AsyncDataClient client, Node header,
-      List<String> pathsToQuery)
+  public static Set<String> getAllDevices(
+      AsyncDataClient client, Node header, List<String> pathsToQuery)
       throws InterruptedException, TException {
     AtomicReference<Set<String>> remoteResult = new AtomicReference<>();
     GenericHandler<Set<String>> handler = new GenericHandler<>(client.getNode(), remoteResult);
@@ -367,8 +375,9 @@ public class SyncClientAdaptor {
     return nodeHandler.getResult(RaftServer.getReadOperationTimeoutMS());
   }
 
-  public static TSStatus executeNonQuery(AsyncClient client, PhysicalPlan plan, Node header,
-      Node receiver) throws IOException, TException, InterruptedException {
+  public static TSStatus executeNonQuery(
+      AsyncClient client, PhysicalPlan plan, Node header, Node receiver)
+      throws IOException, TException, InterruptedException {
     AtomicReference<TSStatus> status = new AtomicReference<>();
     ExecutNonQueryReq req = new ExecutNonQueryReq();
     req.planBytes = ByteBuffer.wrap(PlanSerializer.getInstance().serialize(plan));
@@ -385,8 +394,8 @@ public class SyncClientAdaptor {
     return status.get();
   }
 
-  public static ByteBuffer readFile(AsyncDataClient client, String remotePath, long offset,
-      int fetchSize)
+  public static ByteBuffer readFile(
+      AsyncDataClient client, String remotePath, long offset, int fetchSize)
       throws InterruptedException, TException {
     AtomicReference<ByteBuffer> result = new AtomicReference<>();
     GenericHandler<ByteBuffer> handler = new GenericHandler<>(client.getNode(), result);
@@ -395,9 +404,9 @@ public class SyncClientAdaptor {
     return handler.getResult(RaftServer.getWriteOperationTimeoutMS());
   }
 
-  public static List<ByteBuffer> getGroupByResult(AsyncDataClient client, Node header,
-      long executorId
-      , long curStartTime, long curEndTime) throws InterruptedException, TException {
+  public static List<ByteBuffer> getGroupByResult(
+      AsyncDataClient client, Node header, long executorId, long curStartTime, long curEndTime)
+      throws InterruptedException, TException {
     AtomicReference<List<ByteBuffer>> fetchResult = new AtomicReference<>();
     GenericHandler<List<ByteBuffer>> handler = new GenericHandler<>(client.getNode(), fetchResult);
 
@@ -405,9 +414,9 @@ public class SyncClientAdaptor {
     return handler.getResult(RaftServer.getReadOperationTimeoutMS());
   }
 
-  public static ByteBuffer peekNextNotNullValue(AsyncDataClient client, Node header,
-      long executorId
-      , long curStartTime, long curEndTime) throws InterruptedException, TException {
+  public static ByteBuffer peekNextNotNullValue(
+      AsyncDataClient client, Node header, long executorId, long curStartTime, long curEndTime)
+      throws InterruptedException, TException {
     AtomicReference<ByteBuffer> fetchResult = new AtomicReference<>();
     GenericHandler<ByteBuffer> handler = new GenericHandler<>(client.getNode(), fetchResult);
 
@@ -415,13 +424,16 @@ public class SyncClientAdaptor {
     return handler.getResult(RaftServer.getReadOperationTimeoutMS());
   }
 
-  public static <T extends Snapshot> Map<Integer, T> pullSnapshot(AsyncDataClient client,
-      PullSnapshotRequest request, List<Integer> slots, SnapshotFactory<T> factory)
+  public static <T extends Snapshot> Map<Integer, T> pullSnapshot(
+      AsyncDataClient client,
+      PullSnapshotRequest request,
+      List<Integer> slots,
+      SnapshotFactory<T> factory)
       throws TException, InterruptedException {
     AtomicReference<Map<Integer, T>> snapshotRef = new AtomicReference<>();
 
-    client.pullSnapshot(request, new PullSnapshotHandler<>(snapshotRef,
-        client.getNode(), slots, factory));
+    client.pullSnapshot(
+        request, new PullSnapshotHandler<>(snapshotRef, client.getNode(), slots, factory));
     synchronized (snapshotRef) {
       if (snapshotRef.get() == null) {
         snapshotRef.wait(RaftServer.getReadOperationTimeoutMS());
@@ -430,16 +442,24 @@ public class SyncClientAdaptor {
     return snapshotRef.get();
   }
 
-  public static ByteBuffer last(AsyncDataClient client, List<PartialPath> seriesPaths,
-      List<Integer> dataTypeOrdinals, QueryContext context,
+  public static ByteBuffer last(
+      AsyncDataClient client,
+      List<PartialPath> seriesPaths,
+      List<Integer> dataTypeOrdinals,
+      QueryContext context,
       Map<String, Set<String>> deviceMeasurements,
       Node header)
       throws TException, InterruptedException {
     AtomicReference<ByteBuffer> result = new AtomicReference<>();
     GenericHandler<ByteBuffer> handler = new GenericHandler<>(client.getNode(), result);
-    LastQueryRequest request = new LastQueryRequest(PartialPath.toStringList(seriesPaths),
-        dataTypeOrdinals,
-        context.getQueryId(), deviceMeasurements, header, client.getNode());
+    LastQueryRequest request =
+        new LastQueryRequest(
+            PartialPath.toStringList(seriesPaths),
+            dataTypeOrdinals,
+            context.getQueryId(),
+            deviceMeasurements,
+            header,
+            client.getNode());
 
     client.last(request, handler);
     return handler.getResult(RaftServer.getReadOperationTimeoutMS());
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncClientFactory.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncClientFactory.java
index fc70b26..c16c34f 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncClientFactory.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncClientFactory.java
@@ -28,6 +28,7 @@ public interface SyncClientFactory {
 
   /**
    * Get a client which will connect the given node and be cached in the given pool.
+   *
    * @param node the cluster node the client will connect.
    * @param pool the pool that will cache the client for reusing.
    * @return
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncClientPool.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncClientPool.java
index 73c8d7a..36dfa3e 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncClientPool.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncClientPool.java
@@ -51,6 +51,7 @@ public class SyncClientPool {
 
   /**
    * See getClient(Node node, boolean activatedOnly)
+   *
    * @param node
    * @return
    */
@@ -60,13 +61,13 @@ public class SyncClientPool {
 
   /**
    * Get a client of the given node from the cache if one is available, or create a new one.
-   * <p>
-   * IMPORTANT!!! The caller should check whether the return value is null or not!
    *
-   * @param node          the node want to connect
+   * <p>IMPORTANT!!! The caller should check whether the return value is null or not!
+   *
+   * @param node the node want to connect
    * @param activatedOnly if true, only return a client if the node's NodeStatus.isActivated ==
-   *                      true, which avoid unnecessary wait for already down nodes, but heartbeat
-   *                      attempts should always try to connect so the node can be reactivated ASAP
+   *     true, which avoid unnecessary wait for already down nodes, but heartbeat attempts should
+   *     always try to connect so the node can be reactivated ASAP
    * @return if the node can connect, return the client, otherwise null
    */
   public Client getClient(Node node, boolean activatedOnly) {
@@ -75,7 +76,7 @@ public class SyncClientPool {
       return null;
     }
 
-    //As clientCaches is ConcurrentHashMap, computeIfAbsent is thread safety.
+    // As clientCaches is ConcurrentHashMap, computeIfAbsent is thread safety.
     Deque<Client> clientStack = clientCaches.computeIfAbsent(clusterNode, n -> new ArrayDeque<>());
     synchronized (this) {
       if (clientStack.isEmpty()) {
@@ -101,7 +102,8 @@ public class SyncClientPool {
         this.wait(WAIT_CLIENT_TIMEOUT_MS);
         if (clientStack.isEmpty()
             && System.currentTimeMillis() - waitStart >= WAIT_CLIENT_TIMEOUT_MS) {
-          logger.warn("Cannot get an available client after {}ms, create a new one",
+          logger.warn(
+              "Cannot get an available client after {}ms, create a new one",
               WAIT_CLIENT_TIMEOUT_MS);
           nodeClientNumMap.put(node, nodeClientNum + 1);
           return createClient(node, nodeClientNum);
@@ -123,7 +125,7 @@ public class SyncClientPool {
    */
   void putClient(Node node, Client client) {
     ClusterNode clusterNode = new ClusterNode(node);
-    //As clientCaches is ConcurrentHashMap, computeIfAbsent is thread safety.
+    // As clientCaches is ConcurrentHashMap, computeIfAbsent is thread safety.
     Deque<Client> clientStack = clientCaches.computeIfAbsent(clusterNode, n -> new ArrayDeque<>());
     synchronized (this) {
       if (client.getInputProtocol() != null && client.getInputProtocol().getTransport().isOpen()) {
@@ -147,8 +149,8 @@ public class SyncClientPool {
     try {
       return syncClientFactory.getSyncClient(node, this);
     } catch (TTransportException e) {
-      if (e.getCause() instanceof ConnectException || e
-          .getCause() instanceof SocketTimeoutException) {
+      if (e.getCause() instanceof ConnectException
+          || e.getCause() instanceof SocketTimeoutException) {
         logger.debug("Cannot open transport for client {} : {}", node, e.getMessage());
       } else {
         logger.error("Cannot open transport for client {}", node, e);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncDataClient.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncDataClient.java
index 23e084c..de4e958 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncDataClient.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncDataClient.java
@@ -49,8 +49,11 @@ public class SyncDataClient extends Client {
   public SyncDataClient(TProtocolFactory protocolFactory, Node node, SyncClientPool pool)
       throws TTransportException {
     // the difference of the two clients lies in the port
-    super(protocolFactory.getProtocol(RpcTransportFactory.INSTANCE.getTransport(
-        new TSocket(node.getIp(), node.getDataPort(), RaftServer.getConnectionTimeoutInMS()))));
+    super(
+        protocolFactory.getProtocol(
+            RpcTransportFactory.INSTANCE.getTransport(
+                new TSocket(
+                    node.getIp(), node.getDataPort(), RaftServer.getConnectionTimeoutInMS()))));
     this.node = node;
     this.pool = pool;
     getInputProtocol().getTransport().open();
@@ -58,8 +61,7 @@ public class SyncDataClient extends Client {
 
   public void setTimeout(int timeout) {
     // the same transport is used in both input and output
-    ((TimeoutChangeableTransport) (getInputProtocol().getTransport()))
-        .setTimeout(timeout);
+    ((TimeoutChangeableTransport) (getInputProtocol().getTransport())).setTimeout(timeout);
   }
 
   @TestOnly
@@ -94,9 +96,7 @@ public class SyncDataClient extends Client {
 
   @Override
   public String toString() {
-    return "DataClient{" +
-        "node=" + node +
-        '}';
+    return "DataClient{" + "node=" + node + '}';
   }
 
   public Node getNode() {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncDataHeartbeatClient.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncDataHeartbeatClient.java
index 134ec11..9aa9c38 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncDataHeartbeatClient.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncDataHeartbeatClient.java
@@ -36,15 +36,18 @@ public class SyncDataHeartbeatClient extends SyncDataClient {
   private SyncDataHeartbeatClient(TProtocolFactory protocolFactory, Node node, SyncClientPool pool)
       throws TTransportException {
     // the difference of the two clients lies in the port
-    super(protocolFactory.getProtocol(RpcTransportFactory.INSTANCE.getTransport(
-        new TSocket(node.getIp(), node.getDataPort() + ClusterUtils.DATA_HEARTBEAT_PORT_OFFSET,
-            RaftServer.getConnectionTimeoutInMS()))));
+    super(
+        protocolFactory.getProtocol(
+            RpcTransportFactory.INSTANCE.getTransport(
+                new TSocket(
+                    node.getIp(),
+                    node.getDataPort() + ClusterUtils.DATA_HEARTBEAT_PORT_OFFSET,
+                    RaftServer.getConnectionTimeoutInMS()))));
     this.node = node;
     this.pool = pool;
     getInputProtocol().getTransport().open();
   }
 
-
   public static class FactorySync implements SyncClientFactory {
 
     private TProtocolFactory protocolFactory;
@@ -62,10 +65,12 @@ public class SyncDataHeartbeatClient extends SyncDataClient {
 
   @Override
   public String toString() {
-    return "SyncHeartbeatDataClient{" +
-        "node=" + super.getNode() + "," +
-        "dataHeartbeatPort=" + (super.getNode().getDataPort()
-        + ClusterUtils.DATA_HEARTBEAT_PORT_OFFSET) +
-        '}';
+    return "SyncHeartbeatDataClient{"
+        + "node="
+        + super.getNode()
+        + ","
+        + "dataHeartbeatPort="
+        + (super.getNode().getDataPort() + ClusterUtils.DATA_HEARTBEAT_PORT_OFFSET)
+        + '}';
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncMetaClient.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncMetaClient.java
index 6e4d450..e5043ca 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncMetaClient.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncMetaClient.java
@@ -45,8 +45,11 @@ public class SyncMetaClient extends Client {
 
   public SyncMetaClient(TProtocolFactory protocolFactory, Node node, SyncClientPool pool)
       throws TTransportException {
-    super(protocolFactory.getProtocol(RpcTransportFactory.INSTANCE.getTransport(
-        new TSocket(node.getIp(), node.getMetaPort(), RaftServer.getConnectionTimeoutInMS()))));
+    super(
+        protocolFactory.getProtocol(
+            RpcTransportFactory.INSTANCE.getTransport(
+                new TSocket(
+                    node.getIp(), node.getMetaPort(), RaftServer.getConnectionTimeoutInMS()))));
     this.node = node;
     this.pool = pool;
     getInputProtocol().getTransport().open();
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncMetaHeartbeatClient.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncMetaHeartbeatClient.java
index dddc66f..ab0b2b4 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncMetaHeartbeatClient.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncMetaHeartbeatClient.java
@@ -36,15 +36,18 @@ public class SyncMetaHeartbeatClient extends SyncMetaClient {
   private SyncMetaHeartbeatClient(TProtocolFactory protocolFactory, Node node, SyncClientPool pool)
       throws TTransportException {
     // the difference of the two clients lies in the port
-    super(protocolFactory.getProtocol(RpcTransportFactory.INSTANCE.getTransport(
-        new TSocket(node.getIp(), node.getMetaPort() + ClusterUtils.META_HEARTBEAT_PORT_OFFSET,
-            RaftServer.getConnectionTimeoutInMS()))));
+    super(
+        protocolFactory.getProtocol(
+            RpcTransportFactory.INSTANCE.getTransport(
+                new TSocket(
+                    node.getIp(),
+                    node.getMetaPort() + ClusterUtils.META_HEARTBEAT_PORT_OFFSET,
+                    RaftServer.getConnectionTimeoutInMS()))));
     this.node = node;
     this.pool = pool;
     getInputProtocol().getTransport().open();
   }
 
-
   public static class FactorySync implements SyncClientFactory {
 
     private TProtocolFactory protocolFactory;
@@ -62,10 +65,12 @@ public class SyncMetaHeartbeatClient extends SyncMetaClient {
 
   @Override
   public String toString() {
-    return "SyncMetaHeartbeatClient{" +
-        "node=" + super.getNode() + "," +
-        "metaHeartbeatPort=" + (super.getNode().getMetaPort()
-        + ClusterUtils.META_HEARTBEAT_PORT_OFFSET) +
-        '}';
+    return "SyncMetaHeartbeatClient{"
+        + "node="
+        + super.getNode()
+        + ","
+        + "metaHeartbeatPort="
+        + (super.getNode().getMetaPort() + ClusterUtils.META_HEARTBEAT_PORT_OFFSET)
+        + '}';
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java
index 6d043a6..7d83f23 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java
@@ -21,7 +21,6 @@ package org.apache.iotdb.cluster.config;
 import java.util.Arrays;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
-
 import org.apache.iotdb.cluster.utils.ClusterConsistent;
 
 public class ClusterConfig {
@@ -33,25 +32,19 @@ public class ClusterConfig {
   private int internalDataPort = 40010;
   private int clusterRpcPort = 55560;
 
-  /**
-   * each one is a "<IP | domain name>:<meta port>:<data port>:<client port></>" string tuple
-   */
-  private List<String> seedNodeUrls = Arrays
-      .asList("127.0.0.1:9003:40010:55560", "127.0.0.1:9005:40012:55561",
-          "127.0.0.1:9007:40014:55562");
+  /** each one is a "<IP | domain name>:<meta port>:<data port>:<client port></>" string tuple */
+  private List<String> seedNodeUrls =
+      Arrays.asList(
+          "127.0.0.1:9003:40010:55560", "127.0.0.1:9005:40012:55561", "127.0.0.1:9007:40014:55562");
 
-  @ClusterConsistent
-  private boolean isRpcThriftCompressionEnabled = false;
+  @ClusterConsistent private boolean isRpcThriftCompressionEnabled = false;
   private int maxConcurrentClientNum = 10000;
 
-  @ClusterConsistent
-  private int replicationNum = 2;
+  @ClusterConsistent private int replicationNum = 2;
 
-  @ClusterConsistent
-  private String clusterName = "default";
+  @ClusterConsistent private String clusterName = "default";
 
-  @ClusterConsistent
-  private boolean useAsyncServer = true;
+  @ClusterConsistent private boolean useAsyncServer = true;
 
   private boolean useAsyncApplier = true;
 
@@ -65,32 +58,26 @@ public class ClusterConfig {
 
   private boolean useBatchInLogCatchUp = true;
 
-  /**
-   * max number of committed logs to be saved
-   */
+  /** max number of committed logs to be saved */
   private int minNumOfLogsInMem = 100;
 
-  /**
-   * max number of committed logs in memory
-   */
+  /** max number of committed logs in memory */
   private int maxNumOfLogsInMem = 1000;
 
-  /**
-   * deletion check period of the submitted log
-   */
+  /** deletion check period of the submitted log */
   private int logDeleteCheckIntervalSecond = -1;
 
-  /**
-   * max number of clients in a ClientPool of a member for one node.
-   */
+  /** max number of clients in a ClientPool of a member for one node. */
   private int maxClientPerNodePerMember = 1000;
 
   /**
    * ClientPool will have so many selector threads (TAsyncClientManager) to distribute to its
    * clients.
    */
-  private int selectorNumOfClientPool = Runtime.getRuntime().availableProcessors() / 3 > 0 ?
-      Runtime.getRuntime().availableProcessors() / 3 : 1;
+  private int selectorNumOfClientPool =
+      Runtime.getRuntime().availableProcessors() / 3 > 0
+          ? Runtime.getRuntime().availableProcessors() / 3
+          : 1;
 
   /**
    * Whether creating schema automatically is enabled, this will replace the one in
@@ -100,7 +87,6 @@ public class ClusterConfig {
 
   private boolean enableRaftLogPersistence = true;
 
-
   private int flushRaftLogThreshold = 10000;
 
   /**
@@ -142,12 +128,9 @@ public class ClusterConfig {
    */
   private int maxNumberOfPersistRaftLogFiles = 5;
 
-  /**
-   * The maximum number of logs saved on the disk
-   */
+  /** The maximum number of logs saved on the disk */
   private int maxPersistRaftLogNumberOnDisk = 1_000_000;
 
-
   private boolean enableUsePersistLogOnDiskToCatchUp = false;
 
   /**
@@ -158,9 +141,8 @@ public class ClusterConfig {
   private int maxNumberOfLogsPerFetchOnDisk = 1000;
 
   /**
-   * When set to true, if the log queue of a follower fills up, LogDispatcher will wait for a
-   * while until the queue becomes available, otherwise LogDispatcher will just ignore that slow
-   * node.
+   * When set to true, if the log queue of a follower fills up, LogDispatcher will wait for a while
+   * until the queue becomes available, otherwise LogDispatcher will just ignore that slow node.
    */
   private boolean waitForSlowNode = true;
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConstant.java b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConstant.java
index 8d3a4da..401e90e 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConstant.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConstant.java
@@ -27,6 +27,7 @@ public class ClusterConstant {
    * We only change the two values in tests to reduce test time, so they are essentially constant.
    */
   private static long electionLeastTimeOutMs = 2 * 1000L;
+
   private static long electionRandomTimeOutMs = 3 * 1000L;
 
   public static final int SLOT_NUM = 10000;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterDescriptor.java b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterDescriptor.java
index 3c9e8ec..e4dadfa 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterDescriptor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterDescriptor.java
@@ -52,7 +52,6 @@ public class ClusterDescriptor {
   private static final String OPTION_CLUSTER_RPC_PORT = "cluster_rpc_port";
   private static final String OPTION_SEED_NODES = "seed_nodes";
 
-
   private ClusterConfig config = new ClusterConfig();
   private static CommandLine commandLine;
 
@@ -91,23 +90,33 @@ public class ClusterDescriptor {
   public void replaceProps(String[] params) {
     Options options = new Options();
 
-    Option metaPort = new Option(OPTION_INTERVAL_META_PORT, OPTION_INTERVAL_META_PORT, true,
-        "port for metadata service");
+    Option metaPort =
+        new Option(
+            OPTION_INTERVAL_META_PORT,
+            OPTION_INTERVAL_META_PORT,
+            true,
+            "port for metadata service");
     metaPort.setRequired(false);
     options.addOption(metaPort);
 
-    Option dataPort = new Option(OPTION_INTERVAL_DATA_PORT, OPTION_INTERVAL_DATA_PORT, true,
-        "port for data service");
+    Option dataPort =
+        new Option(
+            OPTION_INTERVAL_DATA_PORT, OPTION_INTERVAL_DATA_PORT, true, "port for data service");
     dataPort.setRequired(false);
     options.addOption(dataPort);
 
-    Option clusterRpcPort = new Option(OPTION_CLUSTER_RPC_PORT, OPTION_CLUSTER_RPC_PORT, true,
-        "port for client service");
+    Option clusterRpcPort =
+        new Option(
+            OPTION_CLUSTER_RPC_PORT, OPTION_CLUSTER_RPC_PORT, true, "port for client service");
     clusterRpcPort.setRequired(false);
     options.addOption(clusterRpcPort);
 
-    Option seedNodes = new Option(OPTION_SEED_NODES, OPTION_SEED_NODES, true,
-        "comma-separated {IP/DOMAIN}:meta_port:data_port:client_port pairs");
+    Option seedNodes =
+        new Option(
+            OPTION_SEED_NODES,
+            OPTION_SEED_NODES,
+            true,
+            "comma-separated {IP/DOMAIN}:meta_port:data_port:client_port pairs");
     seedNodes.setRequired(false);
     options.addOption(seedNodes);
 
@@ -116,20 +125,20 @@ public class ClusterDescriptor {
       logger.error("replaces properties failed, use default conf params");
     } else {
       if (commandLine.hasOption(OPTION_INTERVAL_META_PORT)) {
-        config.setInternalMetaPort(Integer.parseInt(commandLine.getOptionValue(
-            OPTION_INTERVAL_META_PORT)));
+        config.setInternalMetaPort(
+            Integer.parseInt(commandLine.getOptionValue(OPTION_INTERVAL_META_PORT)));
         logger.debug("replace local meta port with={}", config.getInternalMetaPort());
       }
 
       if (commandLine.hasOption(OPTION_INTERVAL_DATA_PORT)) {
-        config.setInternalDataPort(Integer.parseInt(commandLine.getOptionValue(
-            OPTION_INTERVAL_DATA_PORT)));
+        config.setInternalDataPort(
+            Integer.parseInt(commandLine.getOptionValue(OPTION_INTERVAL_DATA_PORT)));
         logger.debug("replace local data port with={}", config.getInternalDataPort());
       }
 
       if (commandLine.hasOption(OPTION_CLUSTER_RPC_PORT)) {
-        config.setClusterRpcPort(Integer.parseInt(commandLine.getOptionValue(
-            OPTION_CLUSTER_RPC_PORT)));
+        config.setClusterRpcPort(
+            Integer.parseInt(commandLine.getOptionValue(OPTION_CLUSTER_RPC_PORT)));
         logger.debug("replace local cluster rpc port with={}", config.getClusterRpcPort());
       }
 
@@ -164,7 +173,9 @@ public class ClusterDescriptor {
       }
     }
     config.setSeedNodeUrls(newSeedUrls);
-    logger.debug("after replace, the clusterRpcIP={}, seedUrls={}", config.getClusterRpcIp(),
+    logger.debug(
+        "after replace, the clusterRpcIP={}, seedUrls={}",
+        config.getClusterRpcIp(),
         config.getSeedNodeUrls());
   }
 
@@ -179,9 +190,7 @@ public class ClusterDescriptor {
     return true;
   }
 
-  /**
-   * load an property file and set TsfileDBConfig variables.
-   */
+  /** load an property file and set TsfileDBConfig variables. */
   private void loadProps() {
 
     String url = getPropsUrl();
@@ -196,101 +205,147 @@ public class ClusterDescriptor {
     }
     config.setClusterRpcIp(properties.getProperty("cluster_rpc_ip", config.getClusterRpcIp()));
 
-    config.setInternalMetaPort(Integer.parseInt(properties.getProperty(OPTION_INTERVAL_META_PORT,
-        String.valueOf(config.getInternalMetaPort()))));
+    config.setInternalMetaPort(
+        Integer.parseInt(
+            properties.getProperty(
+                OPTION_INTERVAL_META_PORT, String.valueOf(config.getInternalMetaPort()))));
 
-    config.setInternalDataPort(Integer.parseInt(properties.getProperty(OPTION_INTERVAL_DATA_PORT,
-        Integer.toString(config.getInternalDataPort()))));
+    config.setInternalDataPort(
+        Integer.parseInt(
+            properties.getProperty(
+                OPTION_INTERVAL_DATA_PORT, Integer.toString(config.getInternalDataPort()))));
 
-    config.setClusterRpcPort(Integer.parseInt(properties.getProperty(OPTION_CLUSTER_RPC_PORT,
-        Integer.toString(config.getClusterRpcPort()))));
+    config.setClusterRpcPort(
+        Integer.parseInt(
+            properties.getProperty(
+                OPTION_CLUSTER_RPC_PORT, Integer.toString(config.getClusterRpcPort()))));
 
-    config.setMaxConcurrentClientNum(Integer.parseInt(properties.getProperty(
-        "max_concurrent_client_num", String.valueOf(config.getMaxConcurrentClientNum()))));
+    config.setMaxConcurrentClientNum(
+        Integer.parseInt(
+            properties.getProperty(
+                "max_concurrent_client_num", String.valueOf(config.getMaxConcurrentClientNum()))));
 
-    config.setReplicationNum(Integer.parseInt(properties.getProperty(
-        "default_replica_num", String.valueOf(config.getReplicationNum()))));
+    config.setReplicationNum(
+        Integer.parseInt(
+            properties.getProperty(
+                "default_replica_num", String.valueOf(config.getReplicationNum()))));
 
     config.setClusterName(properties.getProperty("cluster_name", config.getClusterName()));
 
-    config.setRpcThriftCompressionEnabled(Boolean.parseBoolean(properties.getProperty(
-        "rpc_thrift_compression_enable", String.valueOf(config.isRpcThriftCompressionEnabled()))));
-
-    config.setConnectionTimeoutInMS(Integer.parseInt(properties
-        .getProperty("connection_timeout_ms", String.valueOf(config.getConnectionTimeoutInMS()))));
-
-    config.setReadOperationTimeoutMS(Integer.parseInt(properties
-        .getProperty("read_operation_timeout_ms",
-            String.valueOf(config.getReadOperationTimeoutMS()))));
-
-    config.setCatchUpTimeoutMS(Integer.parseInt(properties
-        .getProperty("catch_up_timeout_ms",
-            String.valueOf(config.getCatchUpTimeoutMS()))));
-
-    config.setWriteOperationTimeoutMS(Integer.parseInt(properties
-        .getProperty("write_operation_timeout_ms",
-            String.valueOf(config.getWriteOperationTimeoutMS()))));
-
-    config.setUseBatchInLogCatchUp(Boolean.parseBoolean(properties.getProperty(
-        "use_batch_in_catch_up", String.valueOf(config.isUseBatchInLogCatchUp()))));
-
-    config.setMinNumOfLogsInMem(Integer.parseInt(properties
-        .getProperty("min_num_of_logs_in_mem", String.valueOf(config.getMinNumOfLogsInMem()))));
-
-    config.setMaxNumOfLogsInMem(Integer.parseInt(properties
-        .getProperty("max_num_of_logs_in_mem", String.valueOf(config.getMaxNumOfLogsInMem()))));
-
-    config.setLogDeleteCheckIntervalSecond(Integer.parseInt(properties
-        .getProperty("log_deletion_check_interval_second",
-            String.valueOf(config.getLogDeleteCheckIntervalSecond()))));
-
-    config.setEnableAutoCreateSchema(Boolean.parseBoolean(properties
-        .getProperty("enable_auto_create_schema",
-            String.valueOf(config.isEnableAutoCreateSchema()))));
+    config.setRpcThriftCompressionEnabled(
+        Boolean.parseBoolean(
+            properties.getProperty(
+                "rpc_thrift_compression_enable",
+                String.valueOf(config.isRpcThriftCompressionEnabled()))));
+
+    config.setConnectionTimeoutInMS(
+        Integer.parseInt(
+            properties.getProperty(
+                "connection_timeout_ms", String.valueOf(config.getConnectionTimeoutInMS()))));
+
+    config.setReadOperationTimeoutMS(
+        Integer.parseInt(
+            properties.getProperty(
+                "read_operation_timeout_ms", String.valueOf(config.getReadOperationTimeoutMS()))));
+
+    config.setCatchUpTimeoutMS(
+        Integer.parseInt(
+            properties.getProperty(
+                "catch_up_timeout_ms", String.valueOf(config.getCatchUpTimeoutMS()))));
+
+    config.setWriteOperationTimeoutMS(
+        Integer.parseInt(
+            properties.getProperty(
+                "write_operation_timeout_ms",
+                String.valueOf(config.getWriteOperationTimeoutMS()))));
+
+    config.setUseBatchInLogCatchUp(
+        Boolean.parseBoolean(
+            properties.getProperty(
+                "use_batch_in_catch_up", String.valueOf(config.isUseBatchInLogCatchUp()))));
+
+    config.setMinNumOfLogsInMem(
+        Integer.parseInt(
+            properties.getProperty(
+                "min_num_of_logs_in_mem", String.valueOf(config.getMinNumOfLogsInMem()))));
+
+    config.setMaxNumOfLogsInMem(
+        Integer.parseInt(
+            properties.getProperty(
+                "max_num_of_logs_in_mem", String.valueOf(config.getMaxNumOfLogsInMem()))));
+
+    config.setLogDeleteCheckIntervalSecond(
+        Integer.parseInt(
+            properties.getProperty(
+                "log_deletion_check_interval_second",
+                String.valueOf(config.getLogDeleteCheckIntervalSecond()))));
+
+    config.setEnableAutoCreateSchema(
+        Boolean.parseBoolean(
+            properties.getProperty(
+                "enable_auto_create_schema", String.valueOf(config.isEnableAutoCreateSchema()))));
 
     config.setUseAsyncServer(
-        Boolean.parseBoolean(properties.getProperty("is_use_async_server",
-            String.valueOf(config.isUseAsyncServer()))));
+        Boolean.parseBoolean(
+            properties.getProperty(
+                "is_use_async_server", String.valueOf(config.isUseAsyncServer()))));
 
     config.setUseAsyncApplier(
-        Boolean.parseBoolean(properties.getProperty("is_use_async_applier",
-            String.valueOf(config.isUseAsyncApplier()))));
+        Boolean.parseBoolean(
+            properties.getProperty(
+                "is_use_async_applier", String.valueOf(config.isUseAsyncApplier()))));
 
     config.setEnableRaftLogPersistence(
-        Boolean.parseBoolean(properties.getProperty("is_enable_raft_log_persistence",
-            String.valueOf(config.isEnableRaftLogPersistence()))));
-
-    config.setFlushRaftLogThreshold(Integer.parseInt(properties
-        .getProperty("flush_raft_log_threshold", String.valueOf(config.getFlushRaftLogThreshold())))
-    );
-
-    config.setRaftLogBufferSize(Integer.parseInt(properties
-        .getProperty("raft_log_buffer_size", String.valueOf(config.getRaftLogBufferSize())))
-    );
-
-    config.setMaxRaftLogIndexSizeInMemory(Integer
-        .parseInt(properties.getProperty("max_raft_log_index_size_in_memory",
-            String.valueOf(config.getMaxRaftLogIndexSizeInMemory()))));
-
-    config.setMaxRaftLogPersistDataSizePerFile(Integer
-        .parseInt(properties.getProperty("max_raft_log_persist_data_size_per_file",
-            String.valueOf(config.getMaxRaftLogPersistDataSizePerFile()))));
-
-    config.setMaxNumberOfPersistRaftLogFiles(Integer
-        .parseInt(properties.getProperty("max_number_of_persist_raft_log_files",
-            String.valueOf(config.getMaxNumberOfPersistRaftLogFiles()))));
+        Boolean.parseBoolean(
+            properties.getProperty(
+                "is_enable_raft_log_persistence",
+                String.valueOf(config.isEnableRaftLogPersistence()))));
+
+    config.setFlushRaftLogThreshold(
+        Integer.parseInt(
+            properties.getProperty(
+                "flush_raft_log_threshold", String.valueOf(config.getFlushRaftLogThreshold()))));
+
+    config.setRaftLogBufferSize(
+        Integer.parseInt(
+            properties.getProperty(
+                "raft_log_buffer_size", String.valueOf(config.getRaftLogBufferSize()))));
+
+    config.setMaxRaftLogIndexSizeInMemory(
+        Integer.parseInt(
+            properties.getProperty(
+                "max_raft_log_index_size_in_memory",
+                String.valueOf(config.getMaxRaftLogIndexSizeInMemory()))));
+
+    config.setMaxRaftLogPersistDataSizePerFile(
+        Integer.parseInt(
+            properties.getProperty(
+                "max_raft_log_persist_data_size_per_file",
+                String.valueOf(config.getMaxRaftLogPersistDataSizePerFile()))));
+
+    config.setMaxNumberOfPersistRaftLogFiles(
+        Integer.parseInt(
+            properties.getProperty(
+                "max_number_of_persist_raft_log_files",
+                String.valueOf(config.getMaxNumberOfPersistRaftLogFiles()))));
 
     config.setMaxPersistRaftLogNumberOnDisk(
-        Integer.parseInt(properties.getProperty("max_persist_raft_log_number_on_disk",
-            String.valueOf(config.getMaxPersistRaftLogNumberOnDisk()))));
+        Integer.parseInt(
+            properties.getProperty(
+                "max_persist_raft_log_number_on_disk",
+                String.valueOf(config.getMaxPersistRaftLogNumberOnDisk()))));
 
     config.setMaxNumberOfLogsPerFetchOnDisk(
-        Integer.parseInt(properties.getProperty("max_number_of_logs_per_fetch_on_disk",
-            String.valueOf(config.getMaxNumberOfLogsPerFetchOnDisk()))));
+        Integer.parseInt(
+            properties.getProperty(
+                "max_number_of_logs_per_fetch_on_disk",
+                String.valueOf(config.getMaxNumberOfLogsPerFetchOnDisk()))));
 
     config.setEnableUsePersistLogOnDiskToCatchUp(
-        Boolean.parseBoolean(properties.getProperty("enable_use_persist_log_on_disk_to_catch_up",
-            String.valueOf(config.isEnableUsePersistLogOnDiskToCatchUp()))));
+        Boolean.parseBoolean(
+            properties.getProperty(
+                "enable_use_persist_log_on_disk_to_catch_up",
+                String.valueOf(config.isEnableUsePersistLogOnDiskToCatchUp()))));
 
     String consistencyLevel = properties.getProperty("consistency_level");
     if (consistencyLevel != null) {
@@ -353,12 +408,15 @@ public class ClusterDescriptor {
    */
   public void loadHotModifiedProps(Properties properties) {
 
-    config.setMaxConcurrentClientNum(Integer.parseInt(properties
-        .getProperty("max_concurrent_client_num",
-            String.valueOf(config.getMaxConcurrentClientNum()))));
+    config.setMaxConcurrentClientNum(
+        Integer.parseInt(
+            properties.getProperty(
+                "max_concurrent_client_num", String.valueOf(config.getMaxConcurrentClientNum()))));
 
-    config.setConnectionTimeoutInMS(Integer.parseInt(properties
-        .getProperty("connection_timeout_ms", String.valueOf(config.getConnectionTimeoutInMS()))));
+    config.setConnectionTimeoutInMS(
+        Integer.parseInt(
+            properties.getProperty(
+                "connection_timeout_ms", String.valueOf(config.getConnectionTimeoutInMS()))));
 
     logger.info("Set cluster configuration {}", properties);
   }
@@ -367,5 +425,4 @@ public class ClusterDescriptor {
     InetAddress address = InetAddress.getByName(hostname);
     return address.getHostAddress();
   }
-
-}
\ No newline at end of file
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/config/ConsistencyLevel.java b/cluster/src/main/java/org/apache/iotdb/cluster/config/ConsistencyLevel.java
index b7d0303..cd95bde 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/config/ConsistencyLevel.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/config/ConsistencyLevel.java
@@ -18,7 +18,6 @@
  */
 package org.apache.iotdb.cluster.config;
 
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -35,9 +34,7 @@ public enum ConsistencyLevel {
    */
   MID_CONSISTENCY("mid"),
 
-  /**
-   * weak consistency do not synchronize with the leader and simply use the local data
-   */
+  /** weak consistency do not synchronize with the leader and simply use the local data */
   WEAK_CONSISTENCY("weak"),
   ;
 
@@ -60,10 +57,11 @@ public enum ConsistencyLevel {
       case "weak":
         return ConsistencyLevel.WEAK_CONSISTENCY;
       default:
-        logger.warn("Unsupported consistency level={}, use default consistency level={}",
-            consistencyLevel, ConsistencyLevel.MID_CONSISTENCY.consistencyLevelName);
+        logger.warn(
+            "Unsupported consistency level={}, use default consistency level={}",
+            consistencyLevel,
+            ConsistencyLevel.MID_CONSISTENCY.consistencyLevelName);
         return ConsistencyLevel.MID_CONSISTENCY;
     }
   }
-
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/coordinator/Coordinator.java b/cluster/src/main/java/org/apache/iotdb/cluster/coordinator/Coordinator.java
index de36ac4..11f431b 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/coordinator/Coordinator.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/coordinator/Coordinator.java
@@ -19,6 +19,11 @@
 
 package org.apache.iotdb.cluster.coordinator;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
 import org.apache.iotdb.cluster.client.async.AsyncDataClient;
 import org.apache.iotdb.cluster.client.sync.SyncDataClient;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
@@ -30,8 +35,8 @@ import org.apache.iotdb.cluster.query.ClusterPlanRouter;
 import org.apache.iotdb.cluster.rpc.thrift.Node;
 import org.apache.iotdb.cluster.rpc.thrift.RaftService;
 import org.apache.iotdb.cluster.server.RaftServer;
-import org.apache.iotdb.cluster.server.monitor.Timer;
 import org.apache.iotdb.cluster.server.member.MetaGroupMember;
+import org.apache.iotdb.cluster.server.monitor.Timer;
 import org.apache.iotdb.cluster.utils.PartitionUtils;
 import org.apache.iotdb.cluster.utils.StatusUtils;
 import org.apache.iotdb.db.conf.IoTDBConstant;
@@ -57,15 +62,7 @@ import org.apache.thrift.TException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Coordinator of client non-query request
- */
+/** Coordinator of client non-query request */
 public class Coordinator {
 
   private static final Logger logger = LoggerFactory.getLogger(Coordinator.class);
@@ -74,13 +71,12 @@ public class Coordinator {
 
   private String name;
   private Node thisNode;
-  /**
-   * router calculates the partition groups that a partitioned plan should be sent to
-   */
+  /** router calculates the partition groups that a partitioned plan should be sent to */
   private ClusterPlanRouter router;
 
-  private static final String MSG_MULTIPLE_ERROR = "The following errors occurred when executing "
-      + "the query, please retry or contact the DBA: ";
+  private static final String MSG_MULTIPLE_ERROR =
+      "The following errors occurred when executing "
+          + "the query, please retry or contact the DBA: ";
 
   public Coordinator(MetaGroupMember metaGroupMember) {
     this.metaGroupMember = metaGroupMember;
@@ -88,9 +84,7 @@ public class Coordinator {
     this.thisNode = metaGroupMember.getThisNode();
   }
 
-  public Coordinator() {
-
-  }
+  public Coordinator() {}
 
   public void setMetaGroupMember(MetaGroupMember metaGroupMember) {
     this.metaGroupMember = metaGroupMember;
@@ -116,13 +110,13 @@ public class Coordinator {
       // run locally
       result = executeNonQueryLocally(plan);
     } else if (PartitionUtils.isGlobalMetaPlan(plan)) {
-      //forward the plan to all meta group nodes
+      // forward the plan to all meta group nodes
       result = metaGroupMember.processNonPartitionedMetaPlan(plan);
     } else if (PartitionUtils.isGlobalDataPlan(plan)) {
-      //forward the plan to all data group nodes
+      // forward the plan to all data group nodes
       result = processNonPartitionedDataPlan(plan);
     } else {
-      //split the plan and forward them to some PartitionGroups
+      // split the plan and forward them to some PartitionGroups
       try {
         result = processPartitionedPlan(plan);
       } catch (UnsupportedPlanException e) {
@@ -133,9 +127,7 @@ public class Coordinator {
     return result;
   }
 
-  /**
-   * execute a non-query plan that is not necessary to be executed on other nodes.
-   */
+  /** execute a non-query plan that is not necessary to be executed on other nodes. */
   private TSStatus executeNonQueryLocally(PhysicalPlan plan) {
     boolean execRet;
     try {
@@ -184,14 +176,13 @@ public class Coordinator {
       logger.debug("Forwarding global data plan {} to {} groups", plan, globalGroups.size());
       return forwardPlan(globalGroups, plan);
     } catch (CheckConsistencyException e) {
-      logger.debug("Forwarding global data plan {} to meta leader {}", plan,
-          metaGroupMember.getLeader());
+      logger.debug(
+          "Forwarding global data plan {} to meta leader {}", plan, metaGroupMember.getLeader());
       metaGroupMember.waitLeader();
       return metaGroupMember.forwardPlan(plan, metaGroupMember.getLeader(), null);
     }
   }
 
-
   /**
    * A partitioned plan (like batch insertion) will be split into several sub-plans, each belongs to
    * a data group. And these sub-plans will be sent to and executed on the corresponding groups
@@ -209,15 +200,15 @@ public class Coordinator {
     try {
       planGroupMap = splitPlan(plan);
     } catch (CheckConsistencyException checkConsistencyException) {
-      return StatusUtils
-          .getStatus(StatusUtils.CONSISTENCY_FAILURE, checkConsistencyException.getMessage());
+      return StatusUtils.getStatus(
+          StatusUtils.CONSISTENCY_FAILURE, checkConsistencyException.getMessage());
     }
 
     // the storage group is not found locally
     if (planGroupMap == null || planGroupMap.isEmpty()) {
       if ((plan instanceof InsertPlan
-          || plan instanceof CreateTimeSeriesPlan
-          || plan instanceof CreateMultiTimeSeriesPlan)
+              || plan instanceof CreateTimeSeriesPlan
+              || plan instanceof CreateMultiTimeSeriesPlan)
           && ClusterDescriptor.getInstance().getConfig().isEnableAutoCreateSchema()) {
         logger.debug("{}: No associated storage group found for {}, auto-creating", name, plan);
         try {
@@ -250,36 +241,37 @@ public class Coordinator {
       if (partitionGroup.contains(thisNode)) {
         // the query should be handled by a group the local node is in, handle it with in the group
         logger.debug("Execute {} in a local group of {}", plan, partitionGroup.getHeader());
-        status = metaGroupMember.getLocalDataMember(partitionGroup.getHeader())
-            .executeNonQueryPlan(plan);
+        status =
+            metaGroupMember
+                .getLocalDataMember(partitionGroup.getHeader())
+                .executeNonQueryPlan(plan);
       } else {
         // forward the query to the group that should handle it
-        logger.debug("Forward {} to a remote group of {}", plan,
-            partitionGroup.getHeader());
+        logger.debug("Forward {} to a remote group of {}", plan, partitionGroup.getHeader());
         status = forwardPlan(plan, partitionGroup);
       }
-      if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode() && (
-          !(plan instanceof DeleteTimeSeriesPlan) ||
-              status.getCode() != TSStatusCode.TIMESERIES_NOT_EXIST.getStatusCode())) {
+      if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()
+          && (!(plan instanceof DeleteTimeSeriesPlan)
+              || status.getCode() != TSStatusCode.TIMESERIES_NOT_EXIST.getStatusCode())) {
         // execution failed, record the error message
-        errorCodePartitionGroups.add(String.format("[%s@%s:%s]",
-            status.getCode(), partitionGroup.getHeader(),
-            status.getMessage()));
+        errorCodePartitionGroups.add(
+            String.format(
+                "[%s@%s:%s]", status.getCode(), partitionGroup.getHeader(), status.getMessage()));
       }
     }
     if (errorCodePartitionGroups.isEmpty()) {
       status = StatusUtils.OK;
     } else {
-      status = StatusUtils.getStatus(StatusUtils.EXECUTE_STATEMENT_ERROR,
-          MSG_MULTIPLE_ERROR + errorCodePartitionGroups.toString());
+      status =
+          StatusUtils.getStatus(
+              StatusUtils.EXECUTE_STATEMENT_ERROR,
+              MSG_MULTIPLE_ERROR + errorCodePartitionGroups.toString());
     }
     logger.debug("{}: executed {} with answer {}", name, plan, status);
     return status;
   }
 
-  /**
-   * split a plan into several sub-plans, each belongs to only one data group.
-   */
+  /** split a plan into several sub-plans, each belongs to only one data group. */
   private Map<PhysicalPlan, PartitionGroup> splitPlan(PhysicalPlan plan)
       throws UnsupportedPlanException, CheckConsistencyException {
     Map<PhysicalPlan, PartitionGroup> planGroupMap = null;
@@ -311,9 +303,9 @@ public class Coordinator {
     if (planGroupMap.size() == 1) {
       status = forwardToSingleGroup(planGroupMap.entrySet().iterator().next());
     } else {
-      if (plan instanceof InsertTabletPlan ||
-          plan instanceof InsertMultiTabletPlan ||
-          plan instanceof CreateMultiTimeSeriesPlan) {
+      if (plan instanceof InsertTabletPlan
+          || plan instanceof InsertMultiTabletPlan
+          || plan instanceof CreateMultiTimeSeriesPlan) {
         // InsertTabletPlan, InsertMultiTabletPlan and CreateMultiTimeSeriesPlan contains many rows,
         // each will correspond to a TSStatus as its execution result,
         // as the plan is split and the sub-plans may have interleaving ranges,
@@ -334,8 +326,8 @@ public class Coordinator {
         status = tmpStatus;
       }
     }
-    if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode() && status
-        .isSetRedirectNode()) {
+    if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()
+        && status.isSetRedirectNode()) {
       status.setCode(TSStatusCode.NEED_REDIRECTION.getStatusCode());
     }
     logger.debug("{}: executed {} with answer {}", name, plan, status);
@@ -362,25 +354,28 @@ public class Coordinator {
     return null;
   }
 
-
   private TSStatus forwardToSingleGroup(Map.Entry<PhysicalPlan, PartitionGroup> entry) {
     TSStatus result;
     if (entry.getValue().contains(thisNode)) {
       // the query should be handled by a group the local node is in, handle it with in the group
-      long startTime = Timer.Statistic.META_GROUP_MEMBER_EXECUTE_NON_QUERY_IN_LOCAL_GROUP
-          .getOperationStartTime();
-      logger.debug("Execute {} in a local group of {}", entry.getKey(),
-          entry.getValue().getHeader());
-      result = metaGroupMember.getLocalDataMember(entry.getValue().getHeader())
-          .executeNonQueryPlan(entry.getKey());
+      long startTime =
+          Timer.Statistic.META_GROUP_MEMBER_EXECUTE_NON_QUERY_IN_LOCAL_GROUP
+              .getOperationStartTime();
+      logger.debug(
+          "Execute {} in a local group of {}", entry.getKey(), entry.getValue().getHeader());
+      result =
+          metaGroupMember
+              .getLocalDataMember(entry.getValue().getHeader())
+              .executeNonQueryPlan(entry.getKey());
       Timer.Statistic.META_GROUP_MEMBER_EXECUTE_NON_QUERY_IN_LOCAL_GROUP
           .calOperationCostTimeFromStart(startTime);
     } else {
       // forward the query to the group that should handle it
-      long startTime = Timer.Statistic.META_GROUP_MEMBER_EXECUTE_NON_QUERY_IN_REMOTE_GROUP
-          .getOperationStartTime();
-      logger.debug("Forward {} to a remote group of {}", entry.getKey(),
-          entry.getValue().getHeader());
+      long startTime =
+          Timer.Statistic.META_GROUP_MEMBER_EXECUTE_NON_QUERY_IN_REMOTE_GROUP
+              .getOperationStartTime();
+      logger.debug(
+          "Forward {} to a remote group of {}", entry.getKey(), entry.getValue().getHeader());
       result = forwardPlan(entry.getKey(), entry.getValue());
       Timer.Statistic.META_GROUP_MEMBER_EXECUTE_NON_QUERY_IN_REMOTE_GROUP
           .calOperationCostTimeFromStart(startTime);
@@ -408,9 +403,10 @@ public class Coordinator {
       }
       if (tmpStatus.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
         // execution failed, record the error message
-        errorCodePartitionGroups.add(String.format("[%s@%s:%s]",
-            tmpStatus.getCode(), entry.getValue().getHeader(),
-            tmpStatus.getMessage()));
+        errorCodePartitionGroups.add(
+            String.format(
+                "[%s@%s:%s]",
+                tmpStatus.getCode(), entry.getValue().getHeader(), tmpStatus.getMessage()));
       }
     }
     TSStatus status;
@@ -420,21 +416,22 @@ public class Coordinator {
         status = StatusUtils.getStatus(status, endPoint);
       }
     } else {
-      status = StatusUtils.getStatus(StatusUtils.EXECUTE_STATEMENT_ERROR,
-          MSG_MULTIPLE_ERROR + errorCodePartitionGroups.toString());
+      status =
+          StatusUtils.getStatus(
+              StatusUtils.EXECUTE_STATEMENT_ERROR,
+              MSG_MULTIPLE_ERROR + errorCodePartitionGroups.toString());
     }
     return status;
   }
 
-
   /**
    * Forward each sub-plan to its belonging data group, and combine responses from the groups.
    *
    * @param planGroupMap sub-plan -> data group pairs
    */
   @SuppressWarnings("squid:S3776") // Suppress high Cognitive Complexity warning
-  private TSStatus forwardMultiSubPlan(Map<PhysicalPlan, PartitionGroup> planGroupMap,
-      PhysicalPlan parentPlan) {
+  private TSStatus forwardMultiSubPlan(
+      Map<PhysicalPlan, PartitionGroup> planGroupMap, PhysicalPlan parentPlan) {
     List<String> errorCodePartitionGroups = new ArrayList<>();
     TSStatus tmpStatus;
     TSStatus[] subStatus = null;
@@ -446,10 +443,9 @@ public class Coordinator {
     for (Map.Entry<PhysicalPlan, PartitionGroup> entry : planGroupMap.entrySet()) {
       tmpStatus = forwardToSingleGroup(entry);
       logger.debug("{}: from {},{},{}", name, entry.getKey(), entry.getValue(), tmpStatus);
-      noFailure =
-          (tmpStatus.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) && noFailure;
-      isBatchFailure = (tmpStatus.getCode() == TSStatusCode.MULTIPLE_ERROR.getStatusCode())
-          || isBatchFailure;
+      noFailure = (tmpStatus.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) && noFailure;
+      isBatchFailure =
+          (tmpStatus.getCode() == TSStatusCode.MULTIPLE_ERROR.getStatusCode()) || isBatchFailure;
       if (tmpStatus.getCode() == TSStatusCode.MULTIPLE_ERROR.getStatusCode()) {
         if (parentPlan instanceof InsertTabletPlan) {
           totalRowNum = ((InsertTabletPlan) parentPlan).getRowCount();
@@ -477,16 +473,20 @@ public class Coordinator {
               Arrays.fill(tmpSubTsStatus, RpcUtils.SUCCESS_STATUS);
               subStatus[parentIndex].subStatus = Arrays.asList(tmpSubTsStatus);
             }
-            TSStatus[] reorderTsStatus = subStatus[parentIndex].subStatus
-                .toArray(new TSStatus[]{});
+            TSStatus[] reorderTsStatus =
+                subStatus[parentIndex].subStatus.toArray(new TSStatus[] {});
 
-            PartitionUtils.reordering(tmpInsertTabletPlan, reorderTsStatus,
-                tmpStatus.subStatus.toArray(new TSStatus[]{}));
+            PartitionUtils.reordering(
+                tmpInsertTabletPlan,
+                reorderTsStatus,
+                tmpStatus.subStatus.toArray(new TSStatus[] {}));
             subStatus[parentIndex].subStatus = Arrays.asList(reorderTsStatus);
           }
         } else if (parentPlan instanceof InsertTabletPlan) {
-          PartitionUtils.reordering((InsertTabletPlan) entry.getKey(), subStatus,
-              tmpStatus.subStatus.toArray(new TSStatus[]{}));
+          PartitionUtils.reordering(
+              (InsertTabletPlan) entry.getKey(),
+              subStatus,
+              tmpStatus.subStatus.toArray(new TSStatus[] {}));
         } else if (parentPlan instanceof CreateMultiTimeSeriesPlan) {
           CreateMultiTimeSeriesPlan subPlan = (CreateMultiTimeSeriesPlan) entry.getKey();
           for (int i = 0; i < subPlan.getIndexes().size(); i++) {
@@ -496,19 +496,25 @@ public class Coordinator {
       }
       if (tmpStatus.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
         // execution failed, record the error message
-        errorCodePartitionGroups.add(String.format("[%s@%s:%s:%s]",
-            tmpStatus.getCode(), entry.getValue().getHeader(),
-            tmpStatus.getMessage(), tmpStatus.subStatus));
+        errorCodePartitionGroups.add(
+            String.format(
+                "[%s@%s:%s:%s]",
+                tmpStatus.getCode(),
+                entry.getValue().getHeader(),
+                tmpStatus.getMessage(),
+                tmpStatus.subStatus));
       }
 
       if (tmpStatus.isSetRedirectNode()) {
-        boolean isLastInsertTabletPlan = parentPlan instanceof InsertTabletPlan
-            && ((InsertTabletPlan) entry.getKey()).getMaxTime()
-            == ((InsertTabletPlan) parentPlan).getMaxTime();
+        boolean isLastInsertTabletPlan =
+            parentPlan instanceof InsertTabletPlan
+                && ((InsertTabletPlan) entry.getKey()).getMaxTime()
+                    == ((InsertTabletPlan) parentPlan).getMaxTime();
 
-        boolean isLastInsertMultiTabletPlan = parentPlan instanceof InsertMultiTabletPlan
-            && ((InsertMultiTabletPlan) entry.getKey()).getMaxTime()
-            == ((InsertMultiTabletPlan) parentPlan).getMaxTime();
+        boolean isLastInsertMultiTabletPlan =
+            parentPlan instanceof InsertMultiTabletPlan
+                && ((InsertMultiTabletPlan) entry.getKey()).getMaxTime()
+                    == ((InsertMultiTabletPlan) parentPlan).getMaxTime();
 
         if (isLastInsertTabletPlan || isLastInsertMultiTabletPlan) {
           endPoint = tmpStatus.getRedirectNode();
@@ -516,40 +522,42 @@ public class Coordinator {
       }
     }
 
-    if (parentPlan instanceof InsertMultiTabletPlan &&
-        !((InsertMultiTabletPlan) parentPlan).getResults().isEmpty()) {
+    if (parentPlan instanceof InsertMultiTabletPlan
+        && !((InsertMultiTabletPlan) parentPlan).getResults().isEmpty()) {
       if (subStatus == null) {
         subStatus = new TSStatus[totalRowNum];
         Arrays.fill(subStatus, RpcUtils.SUCCESS_STATUS);
       }
       noFailure = false;
       isBatchFailure = true;
-      for (Map.Entry<Integer, TSStatus> integerTSStatusEntry : ((InsertMultiTabletPlan) parentPlan)
-          .getResults().entrySet()) {
+      for (Map.Entry<Integer, TSStatus> integerTSStatusEntry :
+          ((InsertMultiTabletPlan) parentPlan).getResults().entrySet()) {
         subStatus[integerTSStatusEntry.getKey()] = integerTSStatusEntry.getValue();
       }
     }
 
-    if (parentPlan instanceof CreateMultiTimeSeriesPlan &&
-        !((CreateMultiTimeSeriesPlan) parentPlan).getResults().isEmpty()) {
+    if (parentPlan instanceof CreateMultiTimeSeriesPlan
+        && !((CreateMultiTimeSeriesPlan) parentPlan).getResults().isEmpty()) {
       if (subStatus == null) {
         subStatus = new TSStatus[totalRowNum];
         Arrays.fill(subStatus, RpcUtils.SUCCESS_STATUS);
       }
       noFailure = false;
       isBatchFailure = true;
-      for (Map.Entry<Integer, TSStatus> integerTSStatusEntry : ((CreateMultiTimeSeriesPlan) parentPlan)
-          .getResults().entrySet()) {
+      for (Map.Entry<Integer, TSStatus> integerTSStatusEntry :
+          ((CreateMultiTimeSeriesPlan) parentPlan).getResults().entrySet()) {
         subStatus[integerTSStatusEntry.getKey()] = integerTSStatusEntry.getValue();
       }
     }
-    return concludeFinalStatus(noFailure, endPoint, isBatchFailure, subStatus,
-        errorCodePartitionGroups);
+    return concludeFinalStatus(
+        noFailure, endPoint, isBatchFailure, subStatus, errorCodePartitionGroups);
   }
 
-
-  private TSStatus concludeFinalStatus(boolean noFailure, EndPoint endPoint,
-      boolean isBatchFailure, TSStatus[] subStatus,
+  private TSStatus concludeFinalStatus(
+      boolean noFailure,
+      EndPoint endPoint,
+      boolean isBatchFailure,
+      TSStatus[] subStatus,
       List<String> errorCodePartitionGroups) {
     TSStatus status;
     if (noFailure) {
@@ -560,13 +568,14 @@ public class Coordinator {
     } else if (isBatchFailure) {
       status = RpcUtils.getStatus(Arrays.asList(subStatus));
     } else {
-      status = StatusUtils.getStatus(StatusUtils.EXECUTE_STATEMENT_ERROR,
-          MSG_MULTIPLE_ERROR + errorCodePartitionGroups.toString());
+      status =
+          StatusUtils.getStatus(
+              StatusUtils.EXECUTE_STATEMENT_ERROR,
+              MSG_MULTIPLE_ERROR + errorCodePartitionGroups.toString());
     }
     return status;
   }
 
-
   /**
    * Forward a plan to the DataGroupMember of one node in the group. Only when all nodes time out,
    * will a TIME_OUT be returned.
@@ -601,15 +610,16 @@ public class Coordinator {
   /**
    * Forward a non-query plan to the data port of "receiver"
    *
-   * @param plan   a non-query plan
+   * @param plan a non-query plan
    * @param header to determine which DataGroupMember of "receiver" will process the request.
    * @return a TSStatus indicating if the forwarding is successful.
    */
   private TSStatus forwardDataPlanAsync(PhysicalPlan plan, Node receiver, Node header)
       throws IOException {
-    RaftService.AsyncClient client = metaGroupMember.getClientProvider()
-        .getAsyncDataClient(receiver,
-            RaftServer.getWriteOperationTimeoutMS());
+    RaftService.AsyncClient client =
+        metaGroupMember
+            .getClientProvider()
+            .getAsyncDataClient(receiver, RaftServer.getWriteOperationTimeoutMS());
     return this.metaGroupMember.forwardPlanAsync(plan, receiver, header, client);
   }
 
@@ -617,8 +627,10 @@ public class Coordinator {
       throws IOException {
     RaftService.Client client = null;
     try {
-      client = metaGroupMember.getClientProvider().getSyncDataClient(receiver,
-          RaftServer.getWriteOperationTimeoutMS());
+      client =
+          metaGroupMember
+              .getClientProvider()
+              .getSyncDataClient(receiver, RaftServer.getWriteOperationTimeoutMS());
     } catch (TException e) {
       throw new IOException(e);
     }
@@ -628,7 +640,7 @@ public class Coordinator {
   /**
    * Get a thrift client that will connect to "node" using the data port.
    *
-   * @param node    the node to be connected
+   * @param node the node to be connected
    * @param timeout timeout threshold of connection
    */
   public AsyncDataClient getAsyncDataClient(Node node, int timeout) throws IOException {
@@ -642,7 +654,7 @@ public class Coordinator {
   /**
    * Get a thrift client that will connect to "node" using the data port.
    *
-   * @param node    the node to be connected
+   * @param node the node to be connected
    * @param timeout timeout threshold of connection
    */
   public SyncDataClient getSyncDataClient(Node node, int timeout) throws TException {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/AddSelfException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/AddSelfException.java
index f6fe674..3e7ff15 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/AddSelfException.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/exception/AddSelfException.java
@@ -19,9 +19,7 @@
 
 package org.apache.iotdb.cluster.exception;
 
-/**
- * Raised when a node received an AddNodeRequest of adding itself.
- */
+/** Raised when a node received an AddNodeRequest of adding itself. */
 public class AddSelfException extends Exception {
 
   public AddSelfException() {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/BadSeedUrlFormatException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/BadSeedUrlFormatException.java
index 813b924..1bf2c54 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/BadSeedUrlFormatException.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/exception/BadSeedUrlFormatException.java
@@ -22,7 +22,10 @@ package org.apache.iotdb.cluster.exception;
 public class BadSeedUrlFormatException extends Exception {
 
   public BadSeedUrlFormatException(String seedUrl) {
-    super(String.format("Seed url %s has bad format, which should be "
-        + "{IP/DomainName}:{metaPort}:{dataPort}:{clientPort}", seedUrl));
+    super(
+        String.format(
+            "Seed url %s has bad format, which should be "
+                + "{IP/DomainName}:{metaPort}:{dataPort}:{clientPort}",
+            seedUrl));
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/CheckConsistencyException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/CheckConsistencyException.java
index 12ac407..55b9722 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/CheckConsistencyException.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/exception/CheckConsistencyException.java
@@ -29,6 +29,5 @@ public class CheckConsistencyException extends Exception {
   }
 
   public static final CheckConsistencyException CHECK_STRONG_CONSISTENCY_EXCEPTION =
-      new CheckConsistencyException(
-      "strong consistency, sync with leader failed");
+      new CheckConsistencyException("strong consistency, sync with leader failed");
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/ConfigInconsistentException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/ConfigInconsistentException.java
index 31c7150..71e2eeb 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/ConfigInconsistentException.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/exception/ConfigInconsistentException.java
@@ -22,7 +22,8 @@ package org.apache.iotdb.cluster.exception;
 public class ConfigInconsistentException extends Exception {
 
   public ConfigInconsistentException() {
-    super("The configuration of this node is inconsistent with the cluster. See previous logs for "
-        + "explanation");
+    super(
+        "The configuration of this node is inconsistent with the cluster. See previous logs for "
+            + "explanation");
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/EntryCompactedException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/EntryCompactedException.java
index a5b2888..629252f9 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/EntryCompactedException.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/exception/EntryCompactedException.java
@@ -21,9 +21,10 @@ package org.apache.iotdb.cluster.exception;
 
 public class EntryCompactedException extends Exception {
 
-    public EntryCompactedException(long index, long boundary) {
-        super(String
-            .format("Entry index %d is unavailable due to compaction, and the lower bound is %d",
-                index, boundary));
-    }
+  public EntryCompactedException(long index, long boundary) {
+    super(
+        String.format(
+            "Entry index %d is unavailable due to compaction, and the lower bound is %d",
+            index, boundary));
+  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/EntryUnavailableException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/EntryUnavailableException.java
index f4b6984..d5d0303 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/EntryUnavailableException.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/exception/EntryUnavailableException.java
@@ -21,8 +21,8 @@ package org.apache.iotdb.cluster.exception;
 
 public class EntryUnavailableException extends Exception {
 
-    public EntryUnavailableException(long index, long boundary) {
-        super(String
-            .format("Entry index %d is unavailable, and the upper bound is %d", index, boundary));
-    }
+  public EntryUnavailableException(long index, long boundary) {
+    super(
+        String.format("Entry index %d is unavailable, and the upper bound is %d", index, boundary));
+  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/LeaderUnknownException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/LeaderUnknownException.java
index bf6c3c6..bffd796 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/LeaderUnknownException.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/exception/LeaderUnknownException.java
@@ -21,9 +21,7 @@ package org.apache.iotdb.cluster.exception;
 import java.util.Collection;
 import org.apache.iotdb.cluster.rpc.thrift.Node;
 
-/**
- * Raised when a request should be forwarded to the leader but the leader cannot be found.
- */
+/** Raised when a request should be forwarded to the leader but the leader cannot be found. */
 public class LeaderUnknownException extends Exception {
 
   public LeaderUnknownException(Collection<Node> nodes) {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/MemberReadOnlyException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/MemberReadOnlyException.java
index 63d7f55..0139e42 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/MemberReadOnlyException.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/exception/MemberReadOnlyException.java
@@ -21,10 +21,13 @@ package org.apache.iotdb.cluster.exception;
 
 import org.apache.iotdb.cluster.rpc.thrift.Node;
 
-public class MemberReadOnlyException extends Exception{
+public class MemberReadOnlyException extends Exception {
 
   public MemberReadOnlyException(Node node) {
-    super(String.format("The node %s has been set readonly for the partitions, please retry to find "
-        + "a new node", node));
+    super(
+        String.format(
+            "The node %s has been set readonly for the partitions, please retry to find "
+                + "a new node",
+            node));
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/NoHeaderNodeException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/NoHeaderNodeException.java
index d634089..b610a61 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/NoHeaderNodeException.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/exception/NoHeaderNodeException.java
@@ -20,8 +20,8 @@
 package org.apache.iotdb.cluster.exception;
 
 /**
- * Raised when a DataClusterServer receives a request without a header node indicating which
- * group it belongs to.
+ * Raised when a DataClusterServer receives a request without a header node indicating which group
+ * it belongs to.
  */
 public class NoHeaderNodeException extends Exception {
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/NotInSameGroupException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/NotInSameGroupException.java
index 61314b2..671a79d 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/NotInSameGroupException.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/exception/NotInSameGroupException.java
@@ -22,13 +22,10 @@ package org.apache.iotdb.cluster.exception;
 import java.util.List;
 import org.apache.iotdb.cluster.rpc.thrift.Node;
 
-/**
- * Raised when a DataClusterServer receives a request but the node is not in the target group.
- */
+/** Raised when a DataClusterServer receives a request but the node is not in the target group. */
 public class NotInSameGroupException extends Exception {
 
   public NotInSameGroupException(List<Node> group, Node thisNode) {
-    super(String.format("This node %s is not in the data group %s", thisNode,
-        group));
+    super(String.format("This node %s is not in the data group %s", thisNode, group));
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/NotManagedSlotException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/NotManagedSlotException.java
index ec78451..638f2aa 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/NotManagedSlotException.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/exception/NotManagedSlotException.java
@@ -22,8 +22,8 @@ package org.apache.iotdb.cluster.exception;
 import java.util.List;
 
 /**
- * Raised when a data group leader receives a PullSnapshotRequest but finds it does not manage
- * the requested slot.
+ * Raised when a data group leader receives a PullSnapshotRequest but finds it does not manage the
+ * requested slot.
  */
 public class NotManagedSlotException extends Exception {
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/PartitionTableUnavailableException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/PartitionTableUnavailableException.java
index f658dfc..5a9679e 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/PartitionTableUnavailableException.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/exception/PartitionTableUnavailableException.java
@@ -21,9 +21,7 @@ package org.apache.iotdb.cluster.exception;
 
 import org.apache.iotdb.cluster.rpc.thrift.Node;
 
-/**
- * Raised when a node receives requests before the its partition table is set up.
- */
+/** Raised when a node receives requests before the its partition table is set up. */
 public class PartitionTableUnavailableException extends Exception {
 
   public PartitionTableUnavailableException(Node node) {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/PullFileException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/PullFileException.java
index c4d25d2..2b7151e 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/PullFileException.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/exception/PullFileException.java
@@ -21,13 +21,14 @@ package org.apache.iotdb.cluster.exception;
 
 import org.apache.iotdb.cluster.rpc.thrift.Node;
 
-public class PullFileException extends Exception{
+public class PullFileException extends Exception {
 
   public PullFileException(String fileName, Node node) {
     super(String.format("Cannot pull file %s from %s due to network condition", fileName, node));
   }
 
   public PullFileException(String fileName, Node node, Exception e) {
-    super(String.format("Cannot pull file %s from %s because %s", fileName, node, e.getMessage()), e);
+    super(
+        String.format("Cannot pull file %s from %s because %s", fileName, node, e.getMessage()), e);
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/RequestTimeOutException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/RequestTimeOutException.java
index 792b343..6fbede1 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/RequestTimeOutException.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/exception/RequestTimeOutException.java
@@ -21,9 +21,7 @@ package org.apache.iotdb.cluster.exception;
 
 import org.apache.iotdb.cluster.log.Log;
 
-/**
- * Raised when a request times out before reaching an agreement in a group.
- */
+/** Raised when a request times out before reaching an agreement in a group. */
 public class RequestTimeOutException extends Exception {
 
   public RequestTimeOutException(Log log) {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/SnapshotInstallationException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/SnapshotInstallationException.java
index 2866c3e..750c6bf 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/SnapshotInstallationException.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/exception/SnapshotInstallationException.java
@@ -19,7 +19,7 @@
 
 package org.apache.iotdb.cluster.exception;
 
-public class SnapshotInstallationException extends Exception{
+public class SnapshotInstallationException extends Exception {
 
   public SnapshotInstallationException(String message) {
     super(message);
@@ -29,8 +29,7 @@ public class SnapshotInstallationException extends Exception{
     super(message, cause);
   }
 
-  public SnapshotInstallationException() {
-  }
+  public SnapshotInstallationException() {}
 
   public SnapshotInstallationException(Throwable cause) {
     super(cause);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/TruncateCommittedEntryException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/TruncateCommittedEntryException.java
index 46b4ff5..ac10999 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/TruncateCommittedEntryException.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/exception/TruncateCommittedEntryException.java
@@ -22,8 +22,9 @@ package org.apache.iotdb.cluster.exception;
 public class TruncateCommittedEntryException extends Exception {
 
   public TruncateCommittedEntryException(long index, long committed) {
-    super(String
-        .format("The committed entries cannot be truncated: parameter: %d, commitIndex : %d", index,
-            committed));
+    super(
+        String.format(
+            "The committed entries cannot be truncated: parameter: %d, commitIndex : %d",
+            index, committed));
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/UnknownLogTypeException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/UnknownLogTypeException.java
index 149f623..d2eaa6d 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/UnknownLogTypeException.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/exception/UnknownLogTypeException.java
@@ -19,9 +19,7 @@
 
 package org.apache.iotdb.cluster.exception;
 
-/**
- * Raised when the type of a raft log is unknown.
- */
+/** Raised when the type of a raft log is unknown. */
 public class UnknownLogTypeException extends Exception {
 
   public UnknownLogTypeException(int logType) {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/CommitLogTask.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/CommitLogTask.java
index 4b71abf..839e61f 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/CommitLogTask.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/CommitLogTask.java
@@ -37,14 +37,10 @@ public class CommitLogTask implements Runnable {
     this.term = term;
   }
 
-  /**
-   * listener field
-   */
+  /** listener field */
   private AsyncMethodCallback<Void> callback;
 
-  /**
-   * @param callback the event listener
-   */
+  /** @param callback the event listener */
   public void registerCallback(AsyncMethodCallback<Void> callback) {
     this.callback = callback;
   }
@@ -65,4 +61,4 @@ public class CommitLogTask implements Runnable {
   public void run() {
     doCommitLog();
   }
-}
\ No newline at end of file
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/HardState.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/HardState.java
index 46bf2dc..271cc89 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/HardState.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/HardState.java
@@ -26,7 +26,6 @@ import org.apache.iotdb.cluster.rpc.thrift.Node;
 import org.apache.iotdb.db.utils.SerializeUtils;
 import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
 
-
 public class HardState {
 
   private long currentTerm;
@@ -57,11 +56,17 @@ public class HardState {
   public ByteBuffer serialize() {
     int totalSize = Long.BYTES + Byte.BYTES;
     // currentTerm(long), marker(byte)
-    // (optional) ipLength(int), ipBytes(byte[]), port(int), identifier(int), dataPort(int), clientPort(int)
+    // (optional) ipLength(int), ipBytes(byte[]), port(int), identifier(int), dataPort(int),
+    // clientPort(int)
     if (voteFor != null) {
       byte[] ipBytes = voteFor.getIp().getBytes();
       totalSize +=
-          Integer.BYTES + ipBytes.length + Integer.BYTES + Integer.BYTES + Integer.BYTES + Integer.BYTES;
+          Integer.BYTES
+              + ipBytes.length
+              + Integer.BYTES
+              + Integer.BYTES
+              + Integer.BYTES
+              + Integer.BYTES;
       byte[] buffer = new byte[totalSize];
       ByteBuffer byteBuffer = ByteBuffer.wrap(buffer);
       byteBuffer.putLong(currentTerm);
@@ -117,17 +122,11 @@ public class HardState {
 
   @Override
   public int hashCode() {
-    return new HashCodeBuilder(17, 37)
-        .append(currentTerm)
-        .append(voteFor)
-        .toHashCode();
+    return new HashCodeBuilder(17, 37).append(currentTerm).append(voteFor).toHashCode();
   }
 
   @Override
   public String toString() {
-    return "HardState{" +
-        "currentTerm=" + currentTerm +
-        ", voteFor=" + voteFor +
-        '}';
+    return "HardState{" + "currentTerm=" + currentTerm + ", voteFor=" + voteFor + '}';
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/Log.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/Log.java
index 0c236b2..6977634 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/Log.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/Log.java
@@ -38,6 +38,7 @@ public abstract class Log implements Comparable<Log> {
 
   // for async application
   private volatile boolean applied;
+
   @SuppressWarnings("java:S3077")
   private volatile Exception exception;
 
@@ -50,7 +51,12 @@ public abstract class Log implements Comparable<Log> {
 
   public enum Types {
     // DO CHECK LogParser when you add a new type of log
-    ADD_NODE, PHYSICAL_PLAN, CLOSE_FILE, REMOVE_NODE, EMPTY_CONTENT, TEST_LARGE_CONTENT
+    ADD_NODE,
+    PHYSICAL_PLAN,
+    CLOSE_FILE,
+    REMOVE_NODE,
+    EMPTY_CONTENT,
+    TEST_LARGE_CONTENT
   }
 
   public long getCurrLogIndex() {
@@ -98,8 +104,7 @@ public abstract class Log implements Comparable<Log> {
       return false;
     }
     Log log = (Log) o;
-    return currLogIndex == log.currLogIndex &&
-        currLogTerm == log.currLogTerm;
+    return currLogIndex == log.currLogIndex && currLogTerm == log.currLogTerm;
   }
 
   @Override
@@ -127,4 +132,4 @@ public abstract class Log implements Comparable<Log> {
   public void setEnqueueTime(long enqueueTime) {
     this.enqueueTime = enqueueTime;
   }
-}
\ No newline at end of file
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/LogApplier.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/LogApplier.java
index 6c768f6..e4fbec7 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/LogApplier.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/LogApplier.java
@@ -19,20 +19,17 @@
 
 package org.apache.iotdb.cluster.log;
 
-/**
- * LogApplier applies the log to the local node to make it take effect.
- */
+/** LogApplier applies the log to the local node to make it take effect. */
 public interface LogApplier {
 
   /**
    * Apply the given log, if any exception is thrown during the execution, the exception will be
    * recorded in the log. Either an exception is thrown or the log is executed successfully, log
    * .setApplied(true) must be called.
+   *
    * @param log
    */
   void apply(Log log);
 
-  default void close() {
-
-  }
+  default void close() {}
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/LogDispatcher.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/LogDispatcher.java
index bd0bd3f..2f7cf64 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/LogDispatcher.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/LogDispatcher.java
@@ -19,19 +19,29 @@
 
 package org.apache.iotdb.cluster.log;
 
-
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.cluster.rpc.thrift.AppendEntriesRequest;
 import org.apache.iotdb.cluster.rpc.thrift.AppendEntryRequest;
 import org.apache.iotdb.cluster.rpc.thrift.Node;
 import org.apache.iotdb.cluster.rpc.thrift.RaftService.AsyncClient;
 import org.apache.iotdb.cluster.rpc.thrift.RaftService.Client;
-import org.apache.iotdb.cluster.server.monitor.Peer;
-import org.apache.iotdb.cluster.server.monitor.Timer;
 import org.apache.iotdb.cluster.server.handlers.caller.AppendNodeEntryHandler;
 import org.apache.iotdb.cluster.server.member.RaftMember;
+import org.apache.iotdb.cluster.server.monitor.Peer;
+import org.apache.iotdb.cluster.server.monitor.Timer;
 import org.apache.iotdb.cluster.utils.ClientUtils;
 import org.apache.iotdb.db.conf.IoTDBConstant;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
@@ -41,18 +51,6 @@ import org.apache.thrift.async.AsyncMethodCallback;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.ArrayBlockingQueue;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-
 /**
  * A LogDispatcher serves a raft leader by queuing logs that the leader wants to send to its
  * followers and send the logs in an ordered manner so that the followers will not wait for previous
@@ -64,13 +62,13 @@ public class LogDispatcher {
 
   private static final Logger logger = LoggerFactory.getLogger(LogDispatcher.class);
   private RaftMember member;
-  private boolean useBatchInLogCatchUp = ClusterDescriptor.getInstance().getConfig()
-      .isUseBatchInLogCatchUp();
-  private List<BlockingQueue<SendLogRequest>> nodeLogQueues =
-      new ArrayList<>();
+  private boolean useBatchInLogCatchUp =
+      ClusterDescriptor.getInstance().getConfig().isUseBatchInLogCatchUp();
+  private List<BlockingQueue<SendLogRequest>> nodeLogQueues = new ArrayList<>();
   private ExecutorService executorService;
   private static ExecutorService serializationService =
-      Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors(),
+      Executors.newFixedThreadPool(
+          Runtime.getRuntime().availableProcessors(),
           new ThreadFactoryBuilder().setDaemon(true).setNameFormat("DispatcherEncoder-%d").build());
 
   public LogDispatcher(RaftMember member) {
@@ -99,22 +97,24 @@ public class LogDispatcher {
       try {
         boolean addSucceeded;
         if (ClusterDescriptor.getInstance().getConfig().isWaitForSlowNode()) {
-          addSucceeded = nodeLogQueue.offer(log,
-              ClusterDescriptor.getInstance().getConfig().getWriteOperationTimeoutMS(),
-              TimeUnit.MILLISECONDS);
+          addSucceeded =
+              nodeLogQueue.offer(
+                  log,
+                  ClusterDescriptor.getInstance().getConfig().getWriteOperationTimeoutMS(),
+                  TimeUnit.MILLISECONDS);
         } else {
           addSucceeded = nodeLogQueue.add(log);
         }
 
         if (!addSucceeded) {
-          logger.debug("Log queue[{}] of {} is full, ignore the log to this node", i,
-              member.getName());
+          logger.debug(
+              "Log queue[{}] of {} is full, ignore the log to this node", i, member.getName());
         } else {
           log.setEnqueueTime(System.nanoTime());
         }
       } catch (IllegalStateException e) {
-        logger.debug("Log queue[{}] of {} is full, ignore the log to this node", i,
-            member.getName());
+        logger.debug(
+            "Log queue[{}] of {} is full, ignore the log to this node", i, member.getName());
       } catch (InterruptedException e) {
         Thread.currentThread().interrupt();
       }
@@ -142,8 +142,11 @@ public class LogDispatcher {
     private long enqueueTime;
     private Future<ByteBuffer> serializedLogFuture;
 
-    public SendLogRequest(Log log, AtomicInteger voteCounter,
-        AtomicBoolean leaderShipStale, AtomicLong newLeaderTerm,
+    public SendLogRequest(
+        Log log,
+        AtomicInteger voteCounter,
+        AtomicBoolean leaderShipStale,
+        AtomicLong newLeaderTerm,
         AppendEntryRequest appendEntryRequest) {
       this.setLog(log);
       this.setVoteCounter(voteCounter);
@@ -196,16 +199,13 @@ public class LogDispatcher {
       return appendEntryRequest;
     }
 
-    public void setAppendEntryRequest(
-        AppendEntryRequest appendEntryRequest) {
+    public void setAppendEntryRequest(AppendEntryRequest appendEntryRequest) {
       this.appendEntryRequest = appendEntryRequest;
     }
 
     @Override
     public String toString() {
-      return "SendLogRequest{" +
-          "log=" + log +
-          '}';
+      return "SendLogRequest{" + "log=" + log + '}';
     }
   }
 
@@ -216,12 +216,13 @@ public class LogDispatcher {
     private List<SendLogRequest> currBatch = new ArrayList<>();
     private Peer peer;
 
-    DispatcherThread(Node receiver,
-        BlockingQueue<SendLogRequest> logBlockingDeque) {
+    DispatcherThread(Node receiver, BlockingQueue<SendLogRequest> logBlockingDeque) {
       this.receiver = receiver;
       this.logBlockingDeque = logBlockingDeque;
-      this.peer = member.getPeerMap().computeIfAbsent(receiver,
-          r -> new Peer(member.getLogManager().getLastLogIndex()));
+      this.peer =
+          member
+              .getPeerMap()
+              .computeIfAbsent(receiver, r -> new Peer(member.getLogManager().getLastLogIndex()));
     }
 
     @Override
@@ -249,26 +250,29 @@ public class LogDispatcher {
       logger.info("Dispatcher exits");
     }
 
-    private void appendEntriesAsync(List<ByteBuffer> logList, AppendEntriesRequest request,
-        List<SendLogRequest> currBatch)
+    private void appendEntriesAsync(
+        List<ByteBuffer> logList, AppendEntriesRequest request, List<SendLogRequest> currBatch)
         throws TException {
       AsyncMethodCallback<Long> handler = new AppendEntriesHandler(currBatch);
       AsyncClient client = member.getSendLogAsyncClient(receiver);
       if (logger.isDebugEnabled()) {
-        logger.debug("{}: append entries {} with {} logs", member.getName(), receiver,
-            logList.size());
+        logger.debug(
+            "{}: append entries {} with {} logs", member.getName(), receiver, logList.size());
       }
       if (client != null) {
         client.appendEntries(request, handler);
       }
     }
 
-    private void appendEntriesSync(List<ByteBuffer> logList, AppendEntriesRequest request,
-        List<SendLogRequest> currBatch) {
+    private void appendEntriesSync(
+        List<ByteBuffer> logList, AppendEntriesRequest request, List<SendLogRequest> currBatch) {
 
       long startTime = Timer.Statistic.RAFT_SENDER_WAIT_FOR_PREV_LOG.getOperationStartTime();
       if (!member.waitForPrevLog(peer, currBatch.get(0).getLog())) {
-        logger.warn("{}: node {} timed out when appending {}", member.getName(), receiver,
+        logger.warn(
+            "{}: node {} timed out when appending {}",
+            member.getName(),
+            receiver,
             currBatch.get(0).getLog());
         return;
       }
@@ -285,8 +289,12 @@ public class LogDispatcher {
         long result = client.appendEntries(request);
         Timer.Statistic.RAFT_SENDER_SEND_LOG.calOperationCostTimeFromStart(startTime);
         if (result != -1 && logger.isInfoEnabled()) {
-          logger.info("{}: Append {} logs to {}, resp: {}", member.getName(), logList.size(),
-              receiver, result);
+          logger.info(
+              "{}: Append {} logs to {}, resp: {}",
+              member.getName(),
+              logList.size(),
+              receiver,
+              result);
         }
         handler.onComplete(result);
       } catch (TException e) {
@@ -298,8 +306,8 @@ public class LogDispatcher {
       }
     }
 
-    private AppendEntriesRequest prepareRequest(List<ByteBuffer> logList,
-        List<SendLogRequest> currBatch, int firstIndex) {
+    private AppendEntriesRequest prepareRequest(
+        List<ByteBuffer> logList, List<SendLogRequest> currBatch, int firstIndex) {
       AppendEntriesRequest request = new AppendEntriesRequest();
 
       if (member.getHeader() != null) {
@@ -325,7 +333,9 @@ public class LogDispatcher {
 
     private void sendLogs(List<SendLogRequest> currBatch) throws TException {
       int logIndex = 0;
-      logger.debug("send logs from index {} to {}", currBatch.get(0).getLog().getCurrLogIndex(),
+      logger.debug(
+          "send logs from index {} to {}",
+          currBatch.get(0).getLog().getCurrLogIndex(),
           currBatch.get(currBatch.size() - 1).getLog().getCurrLogIndex());
       while (logIndex < currBatch.size()) {
         long logSize = IoTDBDescriptor.getInstance().getConfig().getThriftMaxFrameSize();
@@ -338,8 +348,8 @@ public class LogDispatcher {
             break;
           }
           logSize -= curSize;
-          Timer.Statistic.LOG_DISPATCHER_LOG_IN_QUEUE
-              .calOperationCostTimeFromStart(currBatch.get(logIndex).getLog().getCreateTime());
+          Timer.Statistic.LOG_DISPATCHER_LOG_IN_QUEUE.calOperationCostTimeFromStart(
+              currBatch.get(logIndex).getLog().getCreateTime());
           logList.add(currBatch.get(logIndex).getAppendEntryRequest().entry);
         }
 
@@ -350,8 +360,8 @@ public class LogDispatcher {
           appendEntriesSync(logList, appendEntriesRequest, currBatch.subList(prevIndex, logIndex));
         }
         for (; prevIndex < logIndex; prevIndex++) {
-          Timer.Statistic.LOG_DISPATCHER_FROM_CREATE_TO_END
-              .calOperationCostTimeFromStart(currBatch.get(prevIndex).getLog().getCreateTime());
+          Timer.Statistic.LOG_DISPATCHER_FROM_CREATE_TO_END.calOperationCostTimeFromStart(
+              currBatch.get(prevIndex).getLog().getCreateTime());
         }
       }
     }
@@ -371,13 +381,17 @@ public class LogDispatcher {
     }
 
     private void sendLog(SendLogRequest logRequest) {
-      Timer.Statistic.LOG_DISPATCHER_LOG_IN_QUEUE
-          .calOperationCostTimeFromStart(logRequest.getLog().getCreateTime());
-      member.sendLogToFollower(logRequest.getLog(), logRequest.getVoteCounter(), receiver,
-          logRequest.getLeaderShipStale(), logRequest.getNewLeaderTerm(),
+      Timer.Statistic.LOG_DISPATCHER_LOG_IN_QUEUE.calOperationCostTimeFromStart(
+          logRequest.getLog().getCreateTime());
+      member.sendLogToFollower(
+          logRequest.getLog(),
+          logRequest.getVoteCounter(),
+          receiver,
+          logRequest.getLeaderShipStale(),
+          logRequest.getNewLeaderTerm(),
           logRequest.getAppendEntryRequest());
-      Timer.Statistic.LOG_DISPATCHER_FROM_CREATE_TO_END
-          .calOperationCostTimeFromStart(logRequest.getLog().getCreateTime());
+      Timer.Statistic.LOG_DISPATCHER_FROM_CREATE_TO_END.calOperationCostTimeFromStart(
+          logRequest.getLog().getCreateTime());
     }
 
     class AppendEntriesHandler implements AsyncMethodCallback<Long> {
@@ -387,10 +401,14 @@ public class LogDispatcher {
       private AppendEntriesHandler(List<SendLogRequest> batch) {
         singleEntryHandlers = new ArrayList<>(batch.size());
         for (SendLogRequest sendLogRequest : batch) {
-          AppendNodeEntryHandler handler = getAppendNodeEntryHandler(sendLogRequest.getLog(),
-              sendLogRequest.getVoteCounter()
-              , receiver,
-              sendLogRequest.getLeaderShipStale(), sendLogRequest.getNewLeaderTerm(), peer);
+          AppendNodeEntryHandler handler =
+              getAppendNodeEntryHandler(
+                  sendLogRequest.getLog(),
+                  sendLogRequest.getVoteCounter(),
+                  receiver,
+                  sendLogRequest.getLeaderShipStale(),
+                  sendLogRequest.getNewLeaderTerm(),
+                  peer);
           singleEntryHandlers.add(handler);
         }
       }
@@ -409,8 +427,13 @@ public class LogDispatcher {
         }
       }
 
-      private AppendNodeEntryHandler getAppendNodeEntryHandler(Log log, AtomicInteger voteCounter,
-          Node node, AtomicBoolean leaderShipStale, AtomicLong newLeaderTerm, Peer peer) {
+      private AppendNodeEntryHandler getAppendNodeEntryHandler(
+          Log log,
+          AtomicInteger voteCounter,
+          Node node,
+          AtomicBoolean leaderShipStale,
+          AtomicLong newLeaderTerm,
+          Peer peer) {
         AppendNodeEntryHandler handler = new AppendNodeEntryHandler();
         handler.setReceiver(node);
         handler.setVoteCounter(voteCounter);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/LogParser.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/LogParser.java
index 4a7afc4..3108836 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/LogParser.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/LogParser.java
@@ -31,9 +31,7 @@ import org.apache.iotdb.cluster.log.logtypes.RemoveNodeLog;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-/**
- * LogParser transform a ByteBuffer into a Log.
- */
+/** LogParser transform a ByteBuffer into a Log. */
 public class LogParser {
 
   private static final Logger logger = LoggerFactory.getLogger(LogParser.class);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/Snapshot.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/Snapshot.java
index 2d15dd1..40cdb5b 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/Snapshot.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/Snapshot.java
@@ -19,7 +19,6 @@
 
 package org.apache.iotdb.cluster.log;
 
-
 import java.nio.ByteBuffer;
 import org.apache.iotdb.cluster.log.snapshot.SnapshotInstaller;
 import org.apache.iotdb.cluster.server.member.RaftMember;
@@ -60,12 +59,11 @@ public abstract class Snapshot {
   public abstract SnapshotInstaller<? extends Snapshot> getDefaultInstaller(RaftMember member);
 
   /**
-   * Discard contents which is generated by logs whose index <= 'minIndex' if possible.
-   * This method is a best-effort one without guarantee that the result will absolutely not
-   * contain contents before 'minIndex'.
+   * Discard contents which is generated by logs whose index <= 'minIndex' if possible. This method
+   * is a best-effort one without guarantee that the result will absolutely not contain contents
+   * before 'minIndex'.
+   *
    * @param minIndex
    */
-  public void truncateBefore(long minIndex) {
-
-  }
+  public void truncateBefore(long minIndex) {}
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/StableEntryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/StableEntryManager.java
index 1b406d9..62d34f3 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/StableEntryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/StableEntryManager.java
@@ -40,7 +40,7 @@ public interface StableEntryManager {
 
   /**
    * @param startIndex (inclusive) the log start index
-   * @param endIndex   (inclusive) the log end index
+   * @param endIndex (inclusive) the log end index
    * @return the raft log which index between [startIndex, endIndex] or empty if not found
    */
   List<Log> getLogs(long startIndex, long endIndex);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/AsyncDataLogApplier.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/AsyncDataLogApplier.java
index 63b5d11..b80a7aa 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/AsyncDataLogApplier.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/AsyncDataLogApplier.java
@@ -65,8 +65,13 @@ public class AsyncDataLogApplier implements LogApplier {
   public AsyncDataLogApplier(LogApplier embeddedApplier, String name) {
     this.embeddedApplier = embeddedApplier;
     consumerMap = new HashMap<>();
-    consumerPool = new ThreadPoolExecutor(CONCURRENT_CONSUMER_NUM,
-        Integer.MAX_VALUE, 0, TimeUnit.SECONDS, new SynchronousQueue<>());
+    consumerPool =
+        new ThreadPoolExecutor(
+            CONCURRENT_CONSUMER_NUM,
+            Integer.MAX_VALUE,
+            0,
+            TimeUnit.SECONDS,
+            new SynchronousQueue<>());
     this.name = name;
   }
 
@@ -131,8 +136,9 @@ public class AsyncDataLogApplier implements LogApplier {
   }
 
   /**
-   * We can sure that the storage group of all InsertTabletPlans in InsertMultiTabletPlan are the same. this is
-   * done in {@link org.apache.iotdb.cluster.query.ClusterPlanRouter#splitAndRoutePlan(InsertMultiTabletPlan)}
+   * We can sure that the storage group of all InsertTabletPlans in InsertMultiTabletPlan are the
+   * same. this is done in {@link
+   * org.apache.iotdb.cluster.query.ClusterPlanRouter#splitAndRoutePlan(InsertMultiTabletPlan)}
    *
    * @return the sg that the plan belongs to
    * @throws StorageGroupNotSetException if no sg found
@@ -270,12 +276,17 @@ public class AsyncDataLogApplier implements LogApplier {
 
     @Override
     public String toString() {
-      return "DataLogConsumer{" +
-          "logQueue=" + logQueue.size() +
-          ", lastLogIndex=" + lastLogIndex +
-          ", lastAppliedLogIndex=" + lastAppliedLogIndex +
-          ", name='" + name + '\'' +
-          '}';
+      return "DataLogConsumer{"
+          + "logQueue="
+          + logQueue.size()
+          + ", lastLogIndex="
+          + lastLogIndex
+          + ", lastAppliedLogIndex="
+          + lastAppliedLogIndex
+          + ", name='"
+          + name
+          + '\''
+          + '}';
     }
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/BaseApplier.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/BaseApplier.java
index 43476e2..c1e4165 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/BaseApplier.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/BaseApplier.java
@@ -41,9 +41,7 @@ import org.apache.iotdb.db.utils.SchemaUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-/**
- * BaseApplier use PlanExecutor to execute PhysicalPlans.
- */
+/** BaseApplier use PlanExecutor to execute PhysicalPlans. */
 abstract class BaseApplier implements LogApplier {
 
   private static final Logger logger = LoggerFactory.getLogger(BaseApplier.class);
@@ -58,7 +56,7 @@ abstract class BaseApplier implements LogApplier {
   /**
    * @param plan
    * @param dataGroupMember the data group member that is applying the log, null if the log is
-   *                        applied by a meta group member
+   *     applied by a meta group member
    * @throws QueryProcessException
    * @throws StorageGroupNotSetException
    * @throws StorageEngineException
@@ -97,7 +95,7 @@ abstract class BaseApplier implements LogApplier {
   /**
    * @param plan
    * @param dataGroupMember the data group member that is applying the log, null if the log is
-   *                        applied by a meta group member
+   *     applied by a meta group member
    * @throws QueryProcessException
    * @throws StorageGroupNotSetException
    * @throws StorageEngineException
@@ -113,8 +111,10 @@ abstract class BaseApplier implements LogApplier {
 
       if (causedByPathNotExist) {
         if (logger.isDebugEnabled()) {
-          logger.debug("Timeseries is not found locally[{}], try pulling it from another group: {}",
-              metaGroupMember.getName(), e.getCause().getMessage());
+          logger.debug(
+              "Timeseries is not found locally[{}], try pulling it from another group: {}",
+              metaGroupMember.getName(),
+              e.getCause().getMessage());
         }
         pullTimeseriesSchema(plan, dataGroupMember.getHeader());
         plan.recoverFromFailure();
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/DataLogApplier.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/DataLogApplier.java
index e590489..abaf4a3 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/DataLogApplier.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/DataLogApplier.java
@@ -43,8 +43,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * DataLogApplier applies logs like data insertion/deletion/update and timeseries creation to
- * IoTDB.
+ * DataLogApplier applies logs like data insertion/deletion/update and timeseries creation to IoTDB.
  */
 public class DataLogApplier extends BaseApplier {
 
@@ -75,9 +74,11 @@ public class DataLogApplier extends BaseApplier {
       } else if (log instanceof CloseFileLog) {
         CloseFileLog closeFileLog = ((CloseFileLog) log);
         StorageEngine.getInstance()
-            .closeStorageGroupProcessor(new PartialPath(closeFileLog.getStorageGroupName()),
+            .closeStorageGroupProcessor(
+                new PartialPath(closeFileLog.getStorageGroupName()),
                 closeFileLog.getPartitionId(),
-                closeFileLog.isSeq(), false);
+                closeFileLog.isSeq(),
+                false);
       } else {
         logger.error("Unsupported log: {}", log);
       }
@@ -116,8 +117,9 @@ public class DataLogApplier extends BaseApplier {
       }
       sg = IoTDB.metaManager.getStorageGroupPath(plan.getDeviceId());
     }
-    int slotId = SlotPartitionTable.getSlotStrategy().calculateSlotByTime(sg.getFullPath(), time,
-        ClusterConstant.SLOT_NUM);
+    int slotId =
+        SlotPartitionTable.getSlotStrategy()
+            .calculateSlotByTime(sg.getFullPath(), time, ClusterConstant.SLOT_NUM);
     // the slot may not be writable because it is pulling file versions, wait until it is done
     dataGroupMember.getSlotManager().waitSlotForWrite(slotId);
     applyPhysicalPlan(plan, dataGroupMember);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/MetaLogApplier.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/MetaLogApplier.java
index d7dd5f9..5e93705 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/MetaLogApplier.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/MetaLogApplier.java
@@ -31,9 +31,7 @@ import org.apache.iotdb.db.exception.query.QueryProcessException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-/**
- * MetaLogApplier applies logs like node addition and storage group creation to IoTDB.
- */
+/** MetaLogApplier applies logs like node addition and storage group creation to IoTDB. */
 public class MetaLogApplier extends BaseApplier {
 
   private static final Logger logger = LoggerFactory.getLogger(MetaLogApplier.class);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/catchup/CatchUpTask.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/catchup/CatchUpTask.java
index 8a266ec..d75fca9 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/catchup/CatchUpTask.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/catchup/CatchUpTask.java
@@ -35,8 +35,8 @@ import org.apache.iotdb.cluster.rpc.thrift.Node;
 import org.apache.iotdb.cluster.rpc.thrift.RaftService;
 import org.apache.iotdb.cluster.rpc.thrift.RaftService.Client;
 import org.apache.iotdb.cluster.server.NodeCharacter;
-import org.apache.iotdb.cluster.server.monitor.Peer;
 import org.apache.iotdb.cluster.server.member.RaftMember;
+import org.apache.iotdb.cluster.server.monitor.Peer;
 import org.apache.iotdb.cluster.utils.ClientUtils;
 import org.apache.iotdb.db.utils.TestOnly;
 import org.apache.thrift.TException;
@@ -68,7 +68,7 @@ public class CatchUpTask implements Runnable {
 
   /**
    * @return true if a matched index is found so that we can use logs only to catch up, or false if
-   * the catch up must be done with a snapshot.
+   *     the catch up must be done with a snapshot.
    * @throws TException
    * @throws InterruptedException
    */
@@ -93,7 +93,12 @@ public class CatchUpTask implements Runnable {
         logger.info(
             "{}: use {} logs of [{}, {}] to fix log inconsistency with node [{}], "
                 + "local first index: {}",
-            raftMember.getName(), logs.size(), lo, hi, node, localFirstIndex);
+            raftMember.getName(),
+            logs.size(),
+            lo,
+            hi,
+            node,
+            localFirstIndex);
       }
     } catch (ConcurrentModificationException e) {
       // ignore
@@ -115,7 +120,12 @@ public class CatchUpTask implements Runnable {
         if (!logsInDisk.isEmpty()) {
           logger.info(
               "{}, found {} logs in disk to catch up {} , startIndex={}, endIndex={}, memoryFirstIndex={}, getFirstLogIndex={}",
-              name, logsInDisk.size(), node, startIndex, endIndex, localFirstIndex,
+              name,
+              logsInDisk.size(),
+              node,
+              startIndex,
+              endIndex,
+              localFirstIndex,
               logsInDisk.get(0).getCurrLogIndex());
           logs = logsInDisk;
           return true;
@@ -125,24 +135,30 @@ public class CatchUpTask implements Runnable {
     }
     long newMatchedIndex = logs.get(index).getCurrLogIndex() - 1;
     if (newMatchedIndex > lastLogIndex) {
-      logger.info("{}: matched index of {} has moved beyond last log index, node is "
-          + "self-catching-up, abort this catch up to avoid duplicates", name,
+      logger.info(
+          "{}: matched index of {} has moved beyond last log index, node is "
+              + "self-catching-up, abort this catch up to avoid duplicates",
+          name,
           node);
       abort = true;
       return true;
     }
-    logger.info("{}: {} matches at {}", name, node,
-        newMatchedIndex);
+    logger.info("{}: {} matches at {}", name, node, newMatchedIndex);
 
     peer.setMatchIndex(newMatchedIndex);
-    // if follower return RESPONSE.AGREE with this empty log, then start sending real logs from index.
+    // if follower return RESPONSE.AGREE with this empty log, then start sending real logs from
+    // index.
     logs.subList(0, index).clear();
     if (logger.isInfoEnabled()) {
       if (logs.isEmpty()) {
         logger.info("{}: {} has caught up by previous catch up", name, node);
       } else {
-        logger.info("{}: makes {} catch up with {} and other {} logs", name,
-            node, logs.get(0), logs.size());
+        logger.info(
+            "{}: makes {} catch up with {} and other {} logs",
+            name,
+            node,
+            logs.get(0),
+            logs.size());
       }
     }
     return true;
@@ -150,7 +166,7 @@ public class CatchUpTask implements Runnable {
 
   @SuppressWarnings("squid:S1135")
   private boolean judgeUseLogsInDiskToCatchUp() {
-    //TODO use log in disk to snapshot first, if the log not found on disk, then use snapshot.
+    // TODO use log in disk to snapshot first, if the log not found on disk, then use snapshot.
     if (!ClusterDescriptor.getInstance().getConfig().isEnableRaftLogPersistence()) {
       return false;
     }
@@ -159,10 +175,15 @@ public class CatchUpTask implements Runnable {
   }
 
   private List<Log> getLogsInStableEntryManager(long startIndex, long endIndex) {
-    List<Log> logsInDisk = raftMember.getLogManager().getStableEntryManager()
-        .getLogs(startIndex, endIndex);
-    logger.debug("{}, found {} logs in disk to catchup {}, startIndex={}, endIndex={}",
-        raftMember.getName(), logsInDisk.size(), node, startIndex, endIndex);
+    List<Log> logsInDisk =
+        raftMember.getLogManager().getStableEntryManager().getLogs(startIndex, endIndex);
+    logger.debug(
+        "{}, found {} logs in disk to catchup {}, startIndex={}, endIndex={}",
+        raftMember.getName(),
+        logsInDisk.size(),
+        node,
+        startIndex,
+        endIndex);
     return logsInDisk;
   }
 
@@ -195,7 +216,7 @@ public class CatchUpTask implements Runnable {
   /**
    * @param index the index of a log in logs
    * @return true if the previous log at logs[index] matches a log in the remote node, false if the
-   * corresponding log cannot be found
+   *     corresponding log cannot be found
    * @throws LeaderUnknownException
    * @throws TException
    * @throws InterruptedException
@@ -220,16 +241,20 @@ public class CatchUpTask implements Runnable {
 
     boolean matched = checkLogIsMatch(prevLogIndex, prevLogTerm);
     raftMember.getLastCatchUpResponseTime().put(node, System.currentTimeMillis());
-    logger.info("{} check {}'s matchIndex {} with log [{}]", raftMember.getName(), node,
-        matched ? "succeed" : "failed", log);
+    logger.info(
+        "{} check {}'s matchIndex {} with log [{}]",
+        raftMember.getName(),
+        node,
+        matched ? "succeed" : "failed",
+        log);
     return matched;
   }
 
   /**
    * @param logIndex the log index needs to check
-   * @param logTerm  the log term need to check
+   * @param logTerm the log term need to check
    * @return true if the log's index and term matches a log in the remote node, false if the
-   * corresponding log cannot be found
+   *     corresponding log cannot be found
    * @throws TException
    * @throws InterruptedException
    */
@@ -282,18 +307,15 @@ public class CatchUpTask implements Runnable {
     }
     snapshot = raftMember.getLogManager().getSnapshot(peer.getMatchIndex());
     if (logger.isInfoEnabled()) {
-      logger
-          .info("{}: Logs in {} are too old, catch up with snapshot", raftMember.getName(), node);
+      logger.info("{}: Logs in {} are too old, catch up with snapshot", raftMember.getName(), node);
     }
   }
 
-  /**
-   * Remove logs that are contained in the snapshot.
-   */
+  /** Remove logs that are contained in the snapshot. */
   private void removeSnapshotLogs() {
     Log logToSearch = new EmptyContentLog(snapshot.getLastLogIndex(), snapshot.getLastLogTerm());
-    int pos = Collections
-        .binarySearch(logs, logToSearch, Comparator.comparingLong(Log::getCurrLogIndex));
+    int pos =
+        Collections.binarySearch(logs, logToSearch, Comparator.comparingLong(Log::getCurrLogIndex));
     int prevSize = logs.size();
     if (pos >= 0) {
       logs.subList(0, pos + 1).clear();
@@ -332,13 +354,18 @@ public class CatchUpTask implements Runnable {
         // the catch up may be triggered by an old heartbeat, and the node may have already
         // caught up, so logs can be empty
         if (!logs.isEmpty() || snapshot != null) {
-          long lastIndex = !logs.isEmpty() ? logs.get(logs.size() - 1).getCurrLogIndex() :
-              snapshot.getLastLogIndex();
+          long lastIndex =
+              !logs.isEmpty()
+                  ? logs.get(logs.size() - 1).getCurrLogIndex()
+                  : snapshot.getLastLogIndex();
           peer.setMatchIndex(lastIndex);
         }
         if (logger.isInfoEnabled()) {
-          logger.info("{}: Catch up {} finished, update it's matchIndex to {}",
-              raftMember.getName(), node, peer.getMatchIndex());
+          logger.info(
+              "{}: Catch up {} finished, update it's matchIndex to {}",
+              raftMember.getName(),
+              node,
+              peer.getMatchIndex());
         }
         peer.resetInconsistentHeartbeatNum();
       }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/catchup/LogCatchUpTask.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/catchup/LogCatchUpTask.java
index 3520ce4..cbed41f 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/catchup/LogCatchUpTask.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/catchup/LogCatchUpTask.java
@@ -19,6 +19,11 @@
 
 package org.apache.iotdb.cluster.log.catchup;
 
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.atomic.AtomicBoolean;
 import org.apache.iotdb.cluster.config.ClusterConstant;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.cluster.exception.LeaderUnknownException;
@@ -41,15 +46,7 @@ import org.apache.thrift.TException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.Callable;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-/**
- * LogCatchUpTask sends a list of logs to a node to make the node keep up with the leader.
- */
+/** LogCatchUpTask sends a list of logs to a node to make the node keep up with the leader. */
 @SuppressWarnings("java:S2274") // enable timeout
 public class LogCatchUpTask implements Callable<Boolean> {
 
@@ -211,9 +208,11 @@ public class LogCatchUpTask implements Callable<Boolean> {
 
       ByteBuffer logData = logs.get(i).serialize();
       int logSize = logData.array().length;
-      if (logSize > IoTDBDescriptor.getInstance().getConfig().getThriftMaxFrameSize()
-          - IoTDBConstant.LEFT_SIZE_IN_REQUEST) {
-        logger.warn("the frame size {} of thrift is too small",
+      if (logSize
+          > IoTDBDescriptor.getInstance().getConfig().getThriftMaxFrameSize()
+              - IoTDBConstant.LEFT_SIZE_IN_REQUEST) {
+        logger.warn(
+            "the frame size {} of thrift is too small",
             IoTDBDescriptor.getInstance().getConfig().getThriftMaxFrameSize());
         abort = true;
         return;
@@ -223,8 +222,8 @@ public class LogCatchUpTask implements Callable<Boolean> {
       // we should send logs who's size is smaller than the max frame size of thrift
       // left 200 byte for other fields of AppendEntriesRequest
       // send at most 100 logs a time to avoid long latency
-      if (totalLogSize >
-          IoTDBDescriptor.getInstance().getConfig().getThriftMaxFrameSize()
+      if (totalLogSize
+          > IoTDBDescriptor.getInstance().getConfig().getThriftMaxFrameSize()
               - IoTDBConstant.LEFT_SIZE_IN_REQUEST) {
         // batch oversize, send previous batch and add the log to a new batch
         sendBatchLogs(logList, firstLogPos);
@@ -252,8 +251,12 @@ public class LogCatchUpTask implements Callable<Boolean> {
   private void sendBatchLogs(List<ByteBuffer> logList, int firstLogPos)
       throws TException, InterruptedException {
     if (logger.isInfoEnabled()) {
-      logger.info("{} send logs from {} num {} for {}", raftMember.getThisNode(),
-          logs.get(firstLogPos).getCurrLogIndex(), logList.size(), node);
+      logger.info(
+          "{} send logs from {} num {} for {}",
+          raftMember.getThisNode(),
+          logs.get(firstLogPos).getCurrLogIndex(),
+          logList.size(),
+          node);
     }
     AppendEntriesRequest request = prepareRequest(logList, firstLogPos);
     if (request == null) {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/catchup/SnapshotCatchUpTask.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/catchup/SnapshotCatchUpTask.java
index 1a858b0..7a888fb 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/catchup/SnapshotCatchUpTask.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/catchup/SnapshotCatchUpTask.java
@@ -47,8 +47,8 @@ public class SnapshotCatchUpTask extends LogCatchUpTask implements Callable<Bool
   private static final Logger logger = LoggerFactory.getLogger(SnapshotCatchUpTask.class);
 
   // sending a snapshot may take longer than normal communications
-  private static final long SEND_SNAPSHOT_WAIT_MS = ClusterDescriptor.getInstance().getConfig()
-      .getCatchUpTimeoutMS();
+  private static final long SEND_SNAPSHOT_WAIT_MS =
+      ClusterDescriptor.getInstance().getConfig().getCatchUpTimeoutMS();
   private Snapshot snapshot;
 
   SnapshotCatchUpTask(List<Log> logs, Snapshot snapshot, Node node, RaftMember raftMember) {
@@ -56,8 +56,7 @@ public class SnapshotCatchUpTask extends LogCatchUpTask implements Callable<Bool
     this.snapshot = snapshot;
   }
 
-  private void doSnapshotCatchUp()
-      throws TException, InterruptedException, LeaderUnknownException {
+  private void doSnapshotCatchUp() throws TException, InterruptedException, LeaderUnknownException {
     SendSnapshotRequest request = new SendSnapshotRequest();
     if (raftMember.getHeader() != null) {
       request.setHeader(raftMember.getHeader());
@@ -95,7 +94,9 @@ public class SnapshotCatchUpTask extends LogCatchUpTask implements Callable<Bool
       return false;
     }
 
-    logger.info("{}: the snapshot request size={}", raftMember.getName(),
+    logger.info(
+        "{}: the snapshot request size={}",
+        raftMember.getName(),
         request.getSnapshotBytes().length);
     synchronized (succeed) {
       client.sendSnapshot(request, handler);
@@ -109,8 +110,11 @@ public class SnapshotCatchUpTask extends LogCatchUpTask implements Callable<Bool
   }
 
   private boolean sendSnapshotSync(SendSnapshotRequest request) throws TException {
-    logger.info("{}: sending a snapshot request size={} to {}", raftMember.getName(),
-        request.getSnapshotBytes().length, node);
+    logger.info(
+        "{}: sending a snapshot request size={} to {}",
+        raftMember.getName(),
+        request.getSnapshotBytes().length,
+        node);
     Client client = raftMember.getSyncClient(node);
     if (client == null) {
       return false;
@@ -137,9 +141,8 @@ public class SnapshotCatchUpTask extends LogCatchUpTask implements Callable<Bool
       raftMember.getLastCatchUpResponseTime().remove(node);
       return false;
     }
-    logger
-        .info("{}: Snapshot catch up {} finished, begin to catch up log", raftMember.getName(),
-            node);
+    logger.info(
+        "{}: Snapshot catch up {} finished, begin to catch up log", raftMember.getName(), node);
     doLogCatchUp();
     if (!abort) {
       logger.info("{}: Catch up {} finished", raftMember.getName(), node);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/AddNodeLog.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/AddNodeLog.java
index f54725d..3684283 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/AddNodeLog.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/AddNodeLog.java
@@ -28,9 +28,7 @@ import org.apache.iotdb.cluster.log.Log;
 import org.apache.iotdb.cluster.rpc.thrift.Node;
 import org.apache.iotdb.db.utils.SerializeUtils;
 
-/**
- * AddNodeLog records the operation of adding a node into this cluster.
- */
+/** AddNodeLog records the operation of adding a node into this cluster. */
 public class AddNodeLog extends Log {
 
   private Node newNode;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/CloseFileLog.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/CloseFileLog.java
index 2948d52..7aa325f 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/CloseFileLog.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/CloseFileLog.java
@@ -35,8 +35,7 @@ public class CloseFileLog extends Log {
   private boolean isSeq;
   private long partitionId;
 
-  public CloseFileLog() {
-  }
+  public CloseFileLog() {}
 
   public CloseFileLog(String storageGroupName, long partitionId, boolean isSeq) {
     this.storageGroupName = storageGroupName;
@@ -89,11 +88,15 @@ public class CloseFileLog extends Log {
 
   @Override
   public String toString() {
-    return "CloseFileLog{" +
-        "storageGroupName='" + storageGroupName + '\'' +
-        ", isSeq=" + isSeq +
-        ", partitionId=" + partitionId +
-        '}';
+    return "CloseFileLog{"
+        + "storageGroupName='"
+        + storageGroupName
+        + '\''
+        + ", isSeq="
+        + isSeq
+        + ", partitionId="
+        + partitionId
+        + '}';
   }
 
   @Override
@@ -108,8 +111,9 @@ public class CloseFileLog extends Log {
       return false;
     }
     CloseFileLog that = (CloseFileLog) o;
-    return isSeq == that.isSeq &&
-        Objects.equals(storageGroupName, that.storageGroupName) && partitionId == that.partitionId;
+    return isSeq == that.isSeq
+        && Objects.equals(storageGroupName, that.storageGroupName)
+        && partitionId == that.partitionId;
   }
 
   @Override
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/EmptyContentLog.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/EmptyContentLog.java
index 915ab35..bbd4c6d 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/EmptyContentLog.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/EmptyContentLog.java
@@ -29,8 +29,7 @@ import org.apache.iotdb.cluster.log.Log;
 
 public class EmptyContentLog extends Log {
 
-  public EmptyContentLog() {
-  }
+  public EmptyContentLog() {}
 
   public EmptyContentLog(long index, long term) {
     this.setCurrLogIndex(index);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/LargeTestLog.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/LargeTestLog.java
index 74981a4..82c4b2f 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/LargeTestLog.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/LargeTestLog.java
@@ -19,18 +19,18 @@
 
 package org.apache.iotdb.cluster.log.logtypes;
 
-import org.apache.iotdb.cluster.log.Log;
+import static org.apache.iotdb.cluster.log.Log.Types.TEST_LARGE_CONTENT;
 
 import java.io.ByteArrayOutputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.Objects;
-
-import static org.apache.iotdb.cluster.log.Log.Types.TEST_LARGE_CONTENT;
+import org.apache.iotdb.cluster.log.Log;
 
 public class LargeTestLog extends Log {
   private ByteBuffer data;
+
   public LargeTestLog() {
     data = ByteBuffer.wrap(new byte[8192]);
   }
@@ -62,8 +62,7 @@ public class LargeTestLog extends Log {
       return false;
     }
     LargeTestLog obj1 = (LargeTestLog) obj;
-    return getCurrLogIndex() == obj1.getCurrLogIndex() &&
-      getCurrLogTerm() == obj1.getCurrLogTerm();
+    return getCurrLogIndex() == obj1.getCurrLogIndex() && getCurrLogTerm() == obj1.getCurrLogTerm();
   }
 
   @Override
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/PhysicalPlanLog.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/PhysicalPlanLog.java
index e0927c0..e9cd03e 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/PhysicalPlanLog.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/PhysicalPlanLog.java
@@ -32,16 +32,13 @@ import org.apache.iotdb.db.qp.physical.PhysicalPlan;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-/**
- * PhysicalPlanLog contains a non-partitioned physical plan like set storage group.
- */
+/** PhysicalPlanLog contains a non-partitioned physical plan like set storage group. */
 public class PhysicalPlanLog extends Log {
 
   private static final Logger logger = LoggerFactory.getLogger(PhysicalPlanLog.class);
   private PhysicalPlan plan;
 
-  public PhysicalPlanLog() {
-  }
+  public PhysicalPlanLog() {}
 
   public PhysicalPlanLog(PhysicalPlan plan) {
     this.plan = plan;
@@ -72,8 +69,12 @@ public class PhysicalPlanLog extends Log {
     try {
       plan = PhysicalPlan.Factory.create(buffer);
     } catch (IOException | IllegalPathException e) {
-      logger.error("Cannot parse a physical {}:{} plan {}", getCurrLogIndex(), getCurrLogTerm(),
-          buffer.array().length, e);
+      logger.error(
+          "Cannot parse a physical {}:{} plan {}",
+          getCurrLogIndex(),
+          getCurrLogTerm(),
+          buffer.array().length,
+          e);
     }
   }
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/RemoveNodeLog.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/RemoveNodeLog.java
index 02d89d0..3768275 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/RemoveNodeLog.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/RemoveNodeLog.java
@@ -19,69 +19,68 @@
 
 package org.apache.iotdb.cluster.log.logtypes;
 
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.Objects;
 import org.apache.iotdb.cluster.log.Log;
 import org.apache.iotdb.cluster.rpc.thrift.Node;
-
-import java.io.ByteArrayOutputStream;
-import java.io.DataOutputStream;
-import java.nio.ByteBuffer;
 import org.apache.iotdb.db.utils.SerializeUtils;
 
 public class RemoveNodeLog extends Log {
 
-    private Node removedNode;
+  private Node removedNode;
 
-    @Override
-    public ByteBuffer serialize() {
-        ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
-        try (DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream)) {
-            dataOutputStream.writeByte(Types.REMOVE_NODE.ordinal());
-            dataOutputStream.writeLong(getCurrLogIndex());
-            dataOutputStream.writeLong(getCurrLogTerm());
+  @Override
+  public ByteBuffer serialize() {
+    ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
+    try (DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream)) {
+      dataOutputStream.writeByte(Types.REMOVE_NODE.ordinal());
+      dataOutputStream.writeLong(getCurrLogIndex());
+      dataOutputStream.writeLong(getCurrLogTerm());
 
-            SerializeUtils.serialize(removedNode, dataOutputStream);
-        } catch (IOException e) {
-            // ignored
-        }
-        return ByteBuffer.wrap(byteArrayOutputStream.toByteArray());
+      SerializeUtils.serialize(removedNode, dataOutputStream);
+    } catch (IOException e) {
+      // ignored
     }
+    return ByteBuffer.wrap(byteArrayOutputStream.toByteArray());
+  }
 
-    @Override
-    public void deserialize(ByteBuffer buffer) {
-        setCurrLogIndex(buffer.getLong());
-        setCurrLogTerm(buffer.getLong());
+  @Override
+  public void deserialize(ByteBuffer buffer) {
+    setCurrLogIndex(buffer.getLong());
+    setCurrLogTerm(buffer.getLong());
 
-        removedNode = new Node();
-        SerializeUtils.deserialize(removedNode, buffer);
-    }
+    removedNode = new Node();
+    SerializeUtils.deserialize(removedNode, buffer);
+  }
 
-    public Node getRemovedNode() {
-        return removedNode;
-    }
+  public Node getRemovedNode() {
+    return removedNode;
+  }
 
-    public void setRemovedNode(Node removedNode) {
-        this.removedNode = removedNode;
-    }
+  public void setRemovedNode(Node removedNode) {
+    this.removedNode = removedNode;
+  }
 
-    @Override
-    public boolean equals(Object o) {
-        if (this == o) {
-            return true;
-        }
-        if (o == null || getClass() != o.getClass()) {
-            return false;
-        }
-        if (!super.equals(o)) {
-            return false;
-        }
-        RemoveNodeLog that = (RemoveNodeLog) o;
-        return Objects.equals(removedNode, that.removedNode);
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
     }
-
-    @Override
-    public int hashCode() {
-        return Objects.hash(super.hashCode(), removedNode);
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+    if (!super.equals(o)) {
+      return false;
     }
+    RemoveNodeLog that = (RemoveNodeLog) o;
+    return Objects.equals(removedNode, that.removedNode);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(super.hashCode(), removedNode);
+  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/CommittedEntryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/CommittedEntryManager.java
index e65be0c..faad07e 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/CommittedEntryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/CommittedEntryManager.java
@@ -109,7 +109,7 @@ public class CommittedEntryManager {
    *
    * @param index request entry index
    * @return -1 if index > entries[entries.size()-1].index, throw EntryCompactedException if index <
-   * dummyIndex, or return the entry's term for given index
+   *     dummyIndex, or return the entry's term for given index
    * @throws EntryCompactedException
    */
   public long maybeTerm(long index) throws EntryCompactedException {
@@ -124,7 +124,7 @@ public class CommittedEntryManager {
    * Pack entries from low through high - 1, just like slice (entries[low:high]). dummyIndex < low
    * <= high. Note that caller must ensure low <= high.
    *
-   * @param low  request index low bound
+   * @param low request index low bound
    * @param high request index upper bound
    */
   public List<Log> getEntries(long low, long high) {
@@ -136,14 +136,18 @@ public class CommittedEntryManager {
     if (low <= dummyIndex) {
       logger.debug(
           "entries low ({}) is out of bound dummyIndex ({}), adjust parameter 'low' to {}",
-          low, dummyIndex, dummyIndex);
+          low,
+          dummyIndex,
+          dummyIndex);
       low = dummyIndex + 1;
     }
     long lastIndex = getLastIndex();
     if (high > lastIndex + 1) {
       logger.debug(
           "entries high ({}) is out of bound lastIndex ({}), adjust parameter 'high' to {}",
-          high, lastIndex, lastIndex);
+          high,
+          lastIndex,
+          lastIndex);
       high = lastIndex + 1;
     }
     return entries.subList((int) (low - dummyIndex), (int) (high - dummyIndex));
@@ -155,7 +159,7 @@ public class CommittedEntryManager {
    *
    * @param index request entry index
    * @return null if index > entries[entries.size()-1].index, throw EntryCompactedException if index
-   * < dummyIndex, or return the entry's log for given index
+   *     < dummyIndex, or return the entry's log for given index
    * @throws EntryCompactedException
    */
   Log getEntry(long index) throws EntryCompactedException {
@@ -163,14 +167,16 @@ public class CommittedEntryManager {
     if (index < dummyIndex) {
       logger.debug(
           "invalid committedEntryManager getEntry: parameter: index({}) < compactIndex({})",
-          index, dummyIndex);
+          index,
+          dummyIndex);
       throw new EntryCompactedException(index, dummyIndex);
     }
     if ((int) (index - dummyIndex) >= entries.size()) {
       if (logger.isDebugEnabled()) {
         logger.debug(
             "invalid committedEntryManager getEntry : parameter: index({}) > lastIndex({})",
-            index, getLastIndex());
+            index,
+            getLastIndex());
       }
       return null;
     }
@@ -188,17 +194,19 @@ public class CommittedEntryManager {
     if (compactIndex < dummyIndex) {
       logger.info(
           "entries before request index ({}) have been compacted, and the compactIndex is ({})",
-          compactIndex, dummyIndex);
+          compactIndex,
+          dummyIndex);
       return;
     }
     if (compactIndex > getLastIndex()) {
-      logger
-          .info("compact ({}) is out of bound lastIndex ({})", compactIndex, getLastIndex());
+      logger.info("compact ({}) is out of bound lastIndex ({})", compactIndex, getLastIndex());
       throw new EntryUnavailableException(compactIndex, getLastIndex());
     }
     int index = (int) (compactIndex - dummyIndex);
-    entries.set(0, new EmptyContentLog(entries.get(index).getCurrLogIndex(),
-        entries.get(index).getCurrLogTerm()));
+    entries.set(
+        0,
+        new EmptyContentLog(
+            entries.get(index).getCurrLogIndex(), entries.get(index).getCurrLogTerm()));
     entries.subList(1, index + 1).clear();
   }
 
@@ -217,10 +225,12 @@ public class CommittedEntryManager {
     if (entries.size() - offset == 0) {
       entries.addAll(appendingEntries);
     } else if (entries.size() - offset > 0) {
-      throw new TruncateCommittedEntryException(appendingEntries.get(0).getCurrLogIndex(),
-          getLastIndex());
+      throw new TruncateCommittedEntryException(
+          appendingEntries.get(0).getCurrLogIndex(), getLastIndex());
     } else {
-      logger.error("missing log entry [last: {}, append at: {}]", getLastIndex(),
+      logger.error(
+          "missing log entry [last: {}, append at: {}]",
+          getLastIndex(),
           appendingEntries.get(0).getCurrLogIndex());
     }
   }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/FilePartitionedSnapshotLogManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/FilePartitionedSnapshotLogManager.java
index a3b0153..5eda202 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/FilePartitionedSnapshotLogManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/FilePartitionedSnapshotLogManager.java
@@ -26,7 +26,6 @@ import java.util.Comparator;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
-
 import org.apache.iotdb.cluster.exception.EntryCompactedException;
 import org.apache.iotdb.cluster.log.LogApplier;
 import org.apache.iotdb.cluster.log.snapshot.FileSnapshot;
@@ -50,21 +49,23 @@ import org.slf4j.LoggerFactory;
  */
 public class FilePartitionedSnapshotLogManager extends PartitionedSnapshotLogManager<FileSnapshot> {
 
-  private static final Logger logger = LoggerFactory
-      .getLogger(FilePartitionedSnapshotLogManager.class);
+  private static final Logger logger =
+      LoggerFactory.getLogger(FilePartitionedSnapshotLogManager.class);
 
-  public FilePartitionedSnapshotLogManager(LogApplier logApplier, PartitionTable partitionTable,
-      Node header, Node thisNode, DataGroupMember dataGroupMember) {
+  public FilePartitionedSnapshotLogManager(
+      LogApplier logApplier,
+      PartitionTable partitionTable,
+      Node header,
+      Node thisNode,
+      DataGroupMember dataGroupMember) {
     super(logApplier, partitionTable, header, thisNode, Factory.INSTANCE, dataGroupMember);
   }
 
-  /**
-   * send FlushPlan to all nodes in one dataGroup
-   */
+  /** send FlushPlan to all nodes in one dataGroup */
   private void syncFlushAllProcessor() {
     logger.info("{}: Start flush all storage group processor in one data group", getName());
-    Map<String, List<Pair<Long, Boolean>>> storageGroupPartitions = StorageEngine.getInstance()
-        .getWorkingStorageGroupPartitions();
+    Map<String, List<Pair<Long, Boolean>>> storageGroupPartitions =
+        StorageEngine.getInstance().getWorkingStorageGroupPartitions();
     if (storageGroupPartitions.size() == 0) {
       logger.info("{}: no need to flush processor", getName());
       return;
@@ -113,11 +114,10 @@ public class FilePartitionedSnapshotLogManager extends PartitionedSnapshotLogMan
     // 1.collect tsfile
     collectTsFiles();
 
-    //2.register the measurement
+    // 2.register the measurement
     for (Map.Entry<Integer, Collection<TimeseriesSchema>> entry : slotTimeseries.entrySet()) {
       int slotNum = entry.getKey();
-      FileSnapshot snapshot = slotSnapshots.computeIfAbsent(slotNum,
-          s -> new FileSnapshot());
+      FileSnapshot snapshot = slotSnapshots.computeIfAbsent(slotNum, s -> new FileSnapshot());
       if (snapshot.getTimeseriesSchemas().isEmpty()) {
         snapshot.setTimeseriesSchemas(entry.getValue());
       }
@@ -126,8 +126,8 @@ public class FilePartitionedSnapshotLogManager extends PartitionedSnapshotLogMan
 
   private void collectTsFiles() throws IOException {
     slotSnapshots.clear();
-    Map<PartialPath, Map<Long, List<TsFileResource>>> allClosedStorageGroupTsFile = StorageEngine
-        .getInstance().getAllClosedStorageGroupTsFile();
+    Map<PartialPath, Map<Long, List<TsFileResource>>> allClosedStorageGroupTsFile =
+        StorageEngine.getInstance().getAllClosedStorageGroupTsFile();
     List<TsFileResource> createdHardlinks = new ArrayList<>();
     // group the TsFiles by their slots
     for (Entry<PartialPath, Map<Long, List<TsFileResource>>> entry :
@@ -158,15 +158,22 @@ public class FilePartitionedSnapshotLogManager extends PartitionedSnapshotLogMan
    * @param storageGroupName
    * @param createdHardlinks
    * @return true if all hardlinks are created successfully or false if some of them failed to
-   * create
+   *     create
    * @throws IOException
    */
-  private boolean collectTsFiles(Long partitionNum, List<TsFileResource> resourceList,
-      PartialPath storageGroupName, List<TsFileResource> createdHardlinks) throws IOException {
-    int slotNum = SlotPartitionTable.getSlotStrategy().calculateSlotByPartitionNum(storageGroupName.getFullPath(),
-        partitionNum, ((SlotPartitionTable) partitionTable).getTotalSlotNumbers());
-    FileSnapshot snapshot = slotSnapshots.computeIfAbsent(slotNum,
-        s -> new FileSnapshot());
+  private boolean collectTsFiles(
+      Long partitionNum,
+      List<TsFileResource> resourceList,
+      PartialPath storageGroupName,
+      List<TsFileResource> createdHardlinks)
+      throws IOException {
+    int slotNum =
+        SlotPartitionTable.getSlotStrategy()
+            .calculateSlotByPartitionNum(
+                storageGroupName.getFullPath(),
+                partitionNum,
+                ((SlotPartitionTable) partitionTable).getTotalSlotNumbers());
+    FileSnapshot snapshot = slotSnapshots.computeIfAbsent(slotNum, s -> new FileSnapshot());
     for (TsFileResource tsFileResource : resourceList) {
       TsFileResource hardlink = tsFileResource.createHardlink();
       if (hardlink == null) {
@@ -182,14 +189,14 @@ public class FilePartitionedSnapshotLogManager extends PartitionedSnapshotLogMan
 
   /**
    * Check if the plan index of 'resource' overlaps any one in 'others' from the same time
-   * partition. For example, we have plan {1,2,3,4,5,6}, plan 1 and 6 are written into an
-   * unsequnce file Unseq1, and {2,3} and {4,5} are written to sequence files Seq1 and Seq2
-   * respectively (notice the numbers are just indexes, not timestamps, so they can be written
-   * anywhere if properly constructed). So Unseq1 both overlaps Seq1 and Seq2. If Unseq1 merges
-   * with Seq1 and generated Seq1' (ranges [1, 6]), it will also overlap with Seq2. But if Seq1'
-   * further merge with Seq2, its range remains to be [1,6], and we cannot find any other files
-   * that overlap with it, so we can conclude with confidence that the file contains all plans
-   * within [1,6].
+   * partition. For example, we have plan {1,2,3,4,5,6}, plan 1 and 6 are written into an unsequnce
+   * file Unseq1, and {2,3} and {4,5} are written to sequence files Seq1 and Seq2 respectively
+   * (notice the numbers are just indexes, not timestamps, so they can be written anywhere if
+   * properly constructed). So Unseq1 both overlaps Seq1 and Seq2. If Unseq1 merges with Seq1 and
+   * generated Seq1' (ranges [1, 6]), it will also overlap with Seq2. But if Seq1' further merge
+   * with Seq2, its range remains to be [1,6], and we cannot find any other files that overlap with
+   * it, so we can conclude with confidence that the file contains all plans within [1,6].
+   *
    * @param resource
    * @param others
    * @return
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/MetaSingleSnapshotLogManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/MetaSingleSnapshotLogManager.java
index ff650e3..af1e4b5 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/MetaSingleSnapshotLogManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/MetaSingleSnapshotLogManager.java
@@ -36,9 +36,7 @@ import org.apache.iotdb.db.service.IoTDB;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-/**
- * MetaSingleSnapshotLogManager provides a MetaSimpleSnapshot as snapshot.
- */
+/** MetaSingleSnapshotLogManager provides a MetaSimpleSnapshot as snapshot. */
 public class MetaSingleSnapshotLogManager extends RaftLogManager {
 
   private static final Logger logger = LoggerFactory.getLogger(MetaSingleSnapshotLogManager.class);
@@ -75,8 +73,9 @@ public class MetaSingleSnapshotLogManager extends RaftLogManager {
 
   @Override
   public Snapshot getSnapshot(long minIndex) {
-    MetaSimpleSnapshot snapshot = new MetaSimpleSnapshot(storageGroupTTLMap, userMap, roleMap,
-        metaGroupMember.getPartitionTable().serialize());
+    MetaSimpleSnapshot snapshot =
+        new MetaSimpleSnapshot(
+            storageGroupTTLMap, userMap, roleMap, metaGroupMember.getPartitionTable().serialize());
     snapshot.setLastLogIndex(commitIndex);
     snapshot.setLastLogTerm(term);
     return snapshot;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/PartitionedSnapshotLogManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/PartitionedSnapshotLogManager.java
index 1ca26a2..7206645 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/PartitionedSnapshotLogManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/PartitionedSnapshotLogManager.java
@@ -58,9 +58,13 @@ public abstract class PartitionedSnapshotLogManager<T extends Snapshot> extends
   Node thisNode;
   DataGroupMember dataGroupMember;
 
-
-  protected PartitionedSnapshotLogManager(LogApplier logApplier, PartitionTable partitionTable,
-      Node header, Node thisNode, SnapshotFactory<T> factory, DataGroupMember dataGroupMember) {
+  protected PartitionedSnapshotLogManager(
+      LogApplier logApplier,
+      PartitionTable partitionTable,
+      Node header,
+      Node thisNode,
+      SnapshotFactory<T> factory,
+      DataGroupMember dataGroupMember) {
     super(new SyncLogDequeSerializer(header.nodeIdentifier), logApplier, header.toString());
     this.partitionTable = partitionTable;
     this.factory = factory;
@@ -88,11 +92,13 @@ public abstract class PartitionedSnapshotLogManager<T extends Snapshot> extends
     List<StorageGroupMNode> allSgNodes = IoTDB.metaManager.getAllStorageGroupNodes();
     for (MNode sgNode : allSgNodes) {
       String storageGroupName = sgNode.getFullPath();
-      int slot = SlotPartitionTable.getSlotStrategy().calculateSlotByTime(storageGroupName, 0,
-          ((SlotPartitionTable) partitionTable).getTotalSlotNumbers());
+      int slot =
+          SlotPartitionTable.getSlotStrategy()
+              .calculateSlotByTime(
+                  storageGroupName, 0, ((SlotPartitionTable) partitionTable).getTotalSlotNumbers());
 
-      Collection<TimeseriesSchema> schemas = slotTimeseries.computeIfAbsent(slot,
-          s -> new HashSet<>());
+      Collection<TimeseriesSchema> schemas =
+          slotTimeseries.computeIfAbsent(slot, s -> new HashSet<>());
       IoTDB.metaManager.collectTimeseriesSchema(sgNode, schemas);
       logger.debug("{}: {} timeseries are snapshot in slot {}", getName(), schemas.size(), slot);
     }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/RaftLogManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/RaftLogManager.java
index c27cf9e..1b1aa90 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/RaftLogManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/RaftLogManager.java
@@ -52,19 +52,13 @@ public abstract class RaftLogManager {
 
   private static final Logger logger = LoggerFactory.getLogger(RaftLogManager.class);
 
-  /**
-   * manage uncommitted entries
-   */
+  /** manage uncommitted entries */
   private UnCommittedEntryManager unCommittedEntryManager;
 
-  /**
-   * manage committed entries in memory as a cache
-   */
+  /** manage committed entries in memory as a cache */
   private CommittedEntryManager committedEntryManager;
 
-  /**
-   * manage committed entries in disk for safety
-   */
+  /** manage committed entries in disk for safety */
   private StableEntryManager stableEntryManager;
 
   private long commitIndex;
@@ -76,6 +70,7 @@ public abstract class RaftLogManager {
    * used for asyncLogApplier
    */
   private volatile long maxHaveAppliedCommitIndex;
+
   private final Object changeApplyCommitIndexCond = new Object();
 
   /**
@@ -84,12 +79,9 @@ public abstract class RaftLogManager {
    */
   private volatile long blockAppliedCommitIndex;
 
-
   private LogApplier logApplier;
 
-  /**
-   * to distinguish managers of different members
-   */
+  /** to distinguish managers of different members */
   private String name;
 
   private ScheduledExecutorService deleteLogExecutorService;
@@ -98,17 +90,13 @@ public abstract class RaftLogManager {
   private ExecutorService checkLogApplierExecutorService;
   private Future<?> checkLogApplierFuture;
 
-  /**
-   * minimum number of committed logs in memory
-   */
-  private int minNumOfLogsInMem = ClusterDescriptor.getInstance().getConfig()
-      .getMinNumOfLogsInMem();
+  /** minimum number of committed logs in memory */
+  private int minNumOfLogsInMem =
+      ClusterDescriptor.getInstance().getConfig().getMinNumOfLogsInMem();
 
-  /**
-   * maximum number of committed logs in memory
-   */
-  private int maxNumOfLogsInMem = ClusterDescriptor.getInstance().getConfig()
-      .getMaxNumOfLogsInMem();
+  /** maximum number of committed logs in memory */
+  private int maxNumOfLogsInMem =
+      ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem();
 
   /**
    * Each time new logs are appended, this condition will be notified so logs that have larger
@@ -132,14 +120,12 @@ public abstract class RaftLogManager {
     long last = getCommittedEntryManager().getLastIndex();
     this.setUnCommittedEntryManager(new UnCommittedEntryManager(last + 1));
 
-    /**
-     * must have applied entry [compactIndex,last] to state machine
-     */
+    /** must have applied entry [compactIndex,last] to state machine */
     this.commitIndex = last;
 
     /**
-     * due to the log operation is idempotent, so we can just reapply the log from the
-     * first index of committed logs
+     * due to the log operation is idempotent, so we can just reapply the log from the first index
+     * of committed logs
      */
     this.maxHaveAppliedCommitIndex = first;
 
@@ -147,32 +133,37 @@ public abstract class RaftLogManager {
 
     this.blockedUnappliedLogList = new CopyOnWriteArrayList<>();
 
-    this.deleteLogExecutorService = new ScheduledThreadPoolExecutor(1,
-        new BasicThreadFactory.Builder().namingPattern("raft-log-delete-" + name).daemon(true)
-            .build());
-
-    this.checkLogApplierExecutorService = Executors.newSingleThreadExecutor(
-        new BasicThreadFactory.Builder().namingPattern("check-log-applier-" + name).daemon(true)
-            .build());
-
-    /**
-     * deletion check period of the submitted log
-     */
-    int logDeleteCheckIntervalSecond = ClusterDescriptor.getInstance().getConfig()
-        .getLogDeleteCheckIntervalSecond();
+    this.deleteLogExecutorService =
+        new ScheduledThreadPoolExecutor(
+            1,
+            new BasicThreadFactory.Builder()
+                .namingPattern("raft-log-delete-" + name)
+                .daemon(true)
+                .build());
+
+    this.checkLogApplierExecutorService =
+        Executors.newSingleThreadExecutor(
+            new BasicThreadFactory.Builder()
+                .namingPattern("check-log-applier-" + name)
+                .daemon(true)
+                .build());
+
+    /** deletion check period of the submitted log */
+    int logDeleteCheckIntervalSecond =
+        ClusterDescriptor.getInstance().getConfig().getLogDeleteCheckIntervalSecond();
 
     if (logDeleteCheckIntervalSecond > 0) {
-      this.deleteLogFuture = deleteLogExecutorService
-          .scheduleAtFixedRate(this::checkDeleteLog, logDeleteCheckIntervalSecond,
+      this.deleteLogFuture =
+          deleteLogExecutorService.scheduleAtFixedRate(
+              this::checkDeleteLog,
+              logDeleteCheckIntervalSecond,
               logDeleteCheckIntervalSecond,
               TimeUnit.SECONDS);
     }
 
     this.checkLogApplierFuture = checkLogApplierExecutorService.submit(this::checkAppliedLogIndex);
 
-    /**
-     * flush log to file periodically
-     */
+    /** flush log to file periodically */
     if (ClusterDescriptor.getInstance().getConfig().isEnableRaftLogPersistence()) {
       this.applyAllCommittedLogWhenStartUp();
     }
@@ -190,9 +181,10 @@ public abstract class RaftLogManager {
 
   /**
    * IMPORTANT!!!
-   * <p>
-   * The subclass's takeSnapshot() must call this method to insure that all logs have been applied
-   * before take snapshot
+   *
+   * <p>The subclass's takeSnapshot() must call this method to insure that all logs have been
+   * applied before take snapshot
+   *
    * <p>
    *
    * @throws IOException timeout exception
@@ -207,16 +199,21 @@ public abstract class RaftLogManager {
     }
     logger.info(
         "{}: before take snapshot, blockAppliedCommitIndex={}, maxHaveAppliedCommitIndex={}, commitIndex={}",
-        name, blockAppliedCommitIndex, maxHaveAppliedCommitIndex, commitIndex);
+        name,
+        blockAppliedCommitIndex,
+        maxHaveAppliedCommitIndex,
+        commitIndex);
     while (blockAppliedCommitIndex > maxHaveAppliedCommitIndex) {
       long waitTime = System.currentTimeMillis() - startTime;
-      if (waitTime > ClusterDescriptor.getInstance().getConfig()
-          .getCatchUpTimeoutMS()) {
+      if (waitTime > ClusterDescriptor.getInstance().getConfig().getCatchUpTimeoutMS()) {
         logger.error(
             "{}: wait all log applied time out, time cost={}, blockAppliedCommitIndex={}, maxHaveAppliedCommitIndex={},commitIndex={}",
-            name, waitTime, blockAppliedCommitIndex, maxHaveAppliedCommitIndex, commitIndex);
-        throw new IOException(
-            "wait all log applied time out");
+            name,
+            waitTime,
+            blockAppliedCommitIndex,
+            maxHaveAppliedCommitIndex,
+            commitIndex);
+        throw new IOException("wait all log applied time out");
       }
     }
   }
@@ -275,7 +272,7 @@ public abstract class RaftLogManager {
    *
    * @param index request entry index
    * @return throw EntryCompactedException if index < dummyIndex, -1 if index > lastIndex or the
-   * entry is compacted, otherwise return the entry's term for given index
+   *     entry is compacted, otherwise return the entry's term for given index
    * @throws EntryCompactedException
    */
   public long getTerm(long index) throws EntryCompactedException {
@@ -319,8 +316,7 @@ public abstract class RaftLogManager {
     try {
       term = getTerm(getLastLogIndex());
     } catch (Exception e) {
-      logger
-          .error("{}: unexpected error when getting the last term : {}", name, e.getMessage());
+      logger.error("{}: unexpected error when getting the last term : {}", name, e.getMessage());
     }
     return term;
   }
@@ -335,8 +331,7 @@ public abstract class RaftLogManager {
     try {
       term = getTerm(getCommitLogIndex());
     } catch (Exception e) {
-      logger
-          .error("{}: unexpected error when getting the last term : {}", name, e.getMessage());
+      logger.error("{}: unexpected error when getting the last term : {}", name, e.getMessage());
     }
     return term;
   }
@@ -345,11 +340,11 @@ public abstract class RaftLogManager {
    * Used by follower node to support leader's complicated log replication rpc parameters and try to
    * commit entries.
    *
-   * @param lastIndex    leader's matchIndex for this follower node
-   * @param lastTerm     the entry's term which index is leader's matchIndex for this follower node
+   * @param lastIndex leader's matchIndex for this follower node
+   * @param lastTerm the entry's term which index is leader's matchIndex for this follower node
    * @param leaderCommit leader's commitIndex
-   * @param entries      entries sent from the leader node Note that the leader must ensure
-   *                     entries[0].index = lastIndex + 1
+   * @param entries entries sent from the leader node Note that the leader must ensure
+   *     entries[0].index = lastIndex + 1
    * @return -1 if the entries cannot be appended, otherwise the last index of new entries
    */
   public long maybeAppend(long lastIndex, long lastTerm, long leaderCommit, List<Log> entries) {
@@ -358,13 +353,18 @@ public abstract class RaftLogManager {
       long ci = findConflict(entries);
       if (ci <= commitIndex) {
         if (ci != -1) {
-          logger
-              .error("{}: entry {} conflict with committed entry [commitIndex({})]", name, ci,
-                  commitIndex);
+          logger.error(
+              "{}: entry {} conflict with committed entry [commitIndex({})]",
+              name,
+              ci,
+              commitIndex);
         } else {
           if (logger.isDebugEnabled() && !entries.isEmpty()) {
-            logger.debug("{}: Appending entries [{} and other {} logs] all exist locally",
-                name, entries.get(0), entries.size() - 1);
+            logger.debug(
+                "{}: Appending entries [{} and other {} logs] all exist locally",
+                name,
+                entries.get(0),
+                entries.size() - 1);
           }
         }
 
@@ -386,20 +386,21 @@ public abstract class RaftLogManager {
    * Used by follower node to support leader's complicated log replication rpc parameters and try to
    * commit entry.
    *
-   * @param lastIndex    leader's matchIndex for this follower node
-   * @param lastTerm     the entry's term which index is leader's matchIndex for this follower node
+   * @param lastIndex leader's matchIndex for this follower node
+   * @param lastTerm the entry's term which index is leader's matchIndex for this follower node
    * @param leaderCommit leader's commitIndex
-   * @param entry        entry sent from the leader node
+   * @param entry entry sent from the leader node
    * @return -1 if the entries cannot be appended, otherwise the last index of new entries
    */
   public long maybeAppend(long lastIndex, long lastTerm, long leaderCommit, Log entry) {
     if (matchTerm(lastTerm, lastIndex)) {
       long newLastIndex = lastIndex + 1;
       if (entry.getCurrLogIndex() <= commitIndex) {
-        logger
-            .debug("{}: entry {} conflict with committed entry [commitIndex({})]",
-                name, entry.getCurrLogIndex(),
-                commitIndex);
+        logger.debug(
+            "{}: entry {} conflict with committed entry [commitIndex({})]",
+            name,
+            entry.getCurrLogIndex(),
+            commitIndex);
       } else {
         append(entry);
       }
@@ -430,8 +431,8 @@ public abstract class RaftLogManager {
       return -1;
     }
     getUnCommittedEntryManager().truncateAndAppend(entries);
-    Object logUpdateCondition = getLogUpdateCondition(
-        entries.get(entries.size() - 1).getCurrLogIndex());
+    Object logUpdateCondition =
+        getLogUpdateCondition(entries.get(entries.size() - 1).getCurrLogIndex());
     synchronized (logUpdateCondition) {
       logUpdateCondition.notifyAll();
     }
@@ -463,7 +464,7 @@ public abstract class RaftLogManager {
    * Used by leader node to try to commit entries.
    *
    * @param leaderCommit leader's commitIndex
-   * @param term         the entry's term which index is leaderCommit in leader's log module
+   * @param term the entry's term which index is leaderCommit in leader's log module
    * @return true or false
    */
   public synchronized boolean maybeCommit(long leaderCommit, long term) {
@@ -484,8 +485,11 @@ public abstract class RaftLogManager {
    * @param snapshot leader's snapshot
    */
   public void applySnapshot(Snapshot snapshot) {
-    logger.info("{}: log module starts to restore snapshot [index: {}, term: {}]",
-        name, snapshot.getLastLogIndex(), snapshot.getLastLogTerm());
+    logger.info(
+        "{}: log module starts to restore snapshot [index: {}, term: {}]",
+        name,
+        snapshot.getLastLogIndex(),
+        snapshot.getLastLogTerm());
     try {
       getCommittedEntryManager().compactEntries(snapshot.getLastLogIndex());
       getStableEntryManager().removeCompactedEntries(snapshot.getLastLogIndex());
@@ -514,20 +518,20 @@ public abstract class RaftLogManager {
    * then whichever log has the larger lastIndex is more up-to-date. If the logs are the same, the
    * given log is up-to-date.
    *
-   * @param lastTerm  candidate's lastTerm
+   * @param lastTerm candidate's lastTerm
    * @param lastIndex candidate's lastIndex
    * @return true or false
    */
   public boolean isLogUpToDate(long lastTerm, long lastIndex) {
-    return lastTerm > getLastLogTerm() || (lastTerm == getLastLogTerm()
-        && lastIndex >= getLastLogIndex());
+    return lastTerm > getLastLogTerm()
+        || (lastTerm == getLastLogTerm() && lastIndex >= getLastLogIndex());
   }
 
   /**
    * Pack entries from low through high - 1, just like slice (entries[low:high]). firstIndex <= low
    * <= high <= lastIndex.
    *
-   * @param low  request index low bound
+   * @param low request index low bound
    * @param high request index upper bound
    */
   public List<Log> getEntries(long low, long high) {
@@ -557,8 +561,7 @@ public abstract class RaftLogManager {
     long startTime = Statistic.RAFT_SENDER_COMMIT_GET_LOGS.getOperationStartTime();
     long lo = getUnCommittedEntryManager().getFirstUnCommittedIndex();
     long hi = newCommitIndex + 1;
-    List<Log> entries = new ArrayList<>(getUnCommittedEntryManager()
-        .getEntries(lo, hi));
+    List<Log> entries = new ArrayList<>(getUnCommittedEntryManager().getEntries(lo, hi));
     Statistic.RAFT_SENDER_COMMIT_GET_LOGS.calOperationCostTimeFromStart(startTime);
 
     if (entries.isEmpty()) {
@@ -568,10 +571,12 @@ public abstract class RaftLogManager {
     long commitLogIndex = getCommitLogIndex();
     long firstLogIndex = entries.get(0).getCurrLogIndex();
     if (commitLogIndex >= firstLogIndex) {
-      logger.warn("Committing logs that has already been committed: {} >= {}", commitLogIndex,
+      logger.warn(
+          "Committing logs that has already been committed: {} >= {}",
+          commitLogIndex,
           firstLogIndex);
-      entries.subList(0,
-          (int) (getCommitLogIndex() - entries.get(0).getCurrLogIndex() + 1))
+      entries
+          .subList(0, (int) (getCommitLogIndex() - entries.get(0).getCurrLogIndex() + 1))
           .clear();
     }
     try {
@@ -603,8 +608,10 @@ public abstract class RaftLogManager {
 
       long unappliedLogSize = commitLogIndex - maxHaveAppliedCommitIndex;
       if (unappliedLogSize > ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()) {
-        logger.debug("There are too many unapplied logs [{}], wait for a while to avoid memory "
-            + "overflow", unappliedLogSize);
+        logger.debug(
+            "There are too many unapplied logs [{}], wait for a while to avoid memory "
+                + "overflow",
+            unappliedLogSize);
         Thread.sleep(
             unappliedLogSize - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem());
       }
@@ -620,7 +627,7 @@ public abstract class RaftLogManager {
   /**
    * Returns whether the index and term passed in match.
    *
-   * @param term  request entry term
+   * @param term request entry term
    * @param index request entry index
    * @return true or false
    */
@@ -658,7 +665,7 @@ public abstract class RaftLogManager {
    * Check whether the parameters passed in satisfy the following properties. firstIndex <= low <=
    * high.
    *
-   * @param low  request index low bound
+   * @param low request index low bound
    * @param high request index upper bound
    * @throws EntryCompactedException
    * @throws GetEntriesWrongParametersException
@@ -671,8 +678,8 @@ public abstract class RaftLogManager {
     }
     long first = getFirstIndex();
     if (low < first) {
-      logger.error("{}: CheckBound out of index: parameter: {} , lower bound: {} ", name, low,
-          high);
+      logger.error(
+          "{}: CheckBound out of index: parameter: {} , lower bound: {} ", name, low, high);
       throw new EntryCompactedException(low, first);
     }
   }
@@ -693,8 +700,7 @@ public abstract class RaftLogManager {
     for (Log entry : entries) {
       if (!matchTerm(entry.getCurrLogTerm(), entry.getCurrLogIndex())) {
         if (entry.getCurrLogIndex() <= getLastLogIndex()) {
-          logger.info("found conflict at index {}",
-              entry.getCurrLogIndex());
+          logger.info("found conflict at index {}", entry.getCurrLogIndex());
         }
         return entry.getCurrLogIndex();
       }
@@ -703,7 +709,8 @@ public abstract class RaftLogManager {
   }
 
   @TestOnly
-  protected RaftLogManager(CommittedEntryManager committedEntryManager,
+  protected RaftLogManager(
+      CommittedEntryManager committedEntryManager,
       StableEntryManager stableEntryManager,
       LogApplier applier) {
     this.setCommittedEntryManager(committedEntryManager);
@@ -716,9 +723,12 @@ public abstract class RaftLogManager {
     this.maxHaveAppliedCommitIndex = first;
     this.blockAppliedCommitIndex = -1;
     this.blockedUnappliedLogList = new CopyOnWriteArrayList<>();
-    this.checkLogApplierExecutorService = Executors.newSingleThreadExecutor(
-        new BasicThreadFactory.Builder().namingPattern("check-log-applier-" + name).daemon(true)
-            .build());
+    this.checkLogApplierExecutorService =
+        Executors.newSingleThreadExecutor(
+            new BasicThreadFactory.Builder()
+                .namingPattern("check-log-applier-" + name)
+                .daemon(true)
+                .build());
     this.checkLogApplierFuture = checkLogApplierExecutorService.submit(this::checkAppliedLogIndex);
     for (int i = 0; i < logUpdateConditions.length; i++) {
       logUpdateConditions[i] = new Object();
@@ -779,8 +789,7 @@ public abstract class RaftLogManager {
     return unCommittedEntryManager;
   }
 
-  private void setUnCommittedEntryManager(
-      UnCommittedEntryManager unCommittedEntryManager) {
+  private void setUnCommittedEntryManager(UnCommittedEntryManager unCommittedEntryManager) {
     this.unCommittedEntryManager = unCommittedEntryManager;
   }
 
@@ -788,8 +797,7 @@ public abstract class RaftLogManager {
     return committedEntryManager;
   }
 
-  private void setCommittedEntryManager(
-      CommittedEntryManager committedEntryManager) {
+  private void setCommittedEntryManager(CommittedEntryManager committedEntryManager) {
     this.committedEntryManager = committedEntryManager;
   }
 
@@ -805,9 +813,7 @@ public abstract class RaftLogManager {
     return maxHaveAppliedCommitIndex;
   }
 
-  /**
-   * check whether delete the committed log
-   */
+  /** check whether delete the committed log */
   void checkDeleteLog() {
     try {
       synchronized (this) {
@@ -827,26 +833,34 @@ public abstract class RaftLogManager {
       return;
     }
 
-    long compactIndex = Math
-        .min(committedEntryManager.getDummyIndex() + removeSize, maxHaveAppliedCommitIndex - 1);
+    long compactIndex =
+        Math.min(committedEntryManager.getDummyIndex() + removeSize, maxHaveAppliedCommitIndex - 1);
     try {
       logger.debug(
           "{}: Before compaction index {}-{}, compactIndex {}, removeSize {}, committedLogSize "
               + "{}, maxAppliedLog {}",
-          name, getFirstIndex(), getLastLogIndex(), compactIndex, removeSize,
-          committedEntryManager.getTotalSize(), maxHaveAppliedCommitIndex);
+          name,
+          getFirstIndex(),
+          getLastLogIndex(),
+          compactIndex,
+          removeSize,
+          committedEntryManager.getTotalSize(),
+          maxHaveAppliedCommitIndex);
       getCommittedEntryManager().compactEntries(compactIndex);
       if (ClusterDescriptor.getInstance().getConfig().isEnableRaftLogPersistence()) {
         getStableEntryManager().removeCompactedEntries(compactIndex);
       }
-      logger.debug("{}: After compaction index {}-{}, committedLogSize {}", name,
-          getFirstIndex(), getLastLogIndex(), committedEntryManager.getTotalSize());
+      logger.debug(
+          "{}: After compaction index {}-{}, committedLogSize {}",
+          name,
+          getFirstIndex(),
+          getLastLogIndex(),
+          committedEntryManager.getTotalSize());
     } catch (EntryUnavailableException e) {
       logger.error("{}: regular compact log entries failed, error={}", name, e.getMessage());
     }
   }
 
-
   public Object getLogUpdateCondition(long logIndex) {
     return logUpdateConditions[(int) (logIndex % logUpdateConditions.length)];
   }
@@ -855,8 +869,11 @@ public abstract class RaftLogManager {
     long lo = maxHaveAppliedCommitIndex;
     long hi = getCommittedEntryManager().getLastIndex() + 1;
     if (lo >= hi) {
-      logger.info("{}: the maxHaveAppliedCommitIndex={}, lastIndex={}, no need to reapply",
-          name, maxHaveAppliedCommitIndex, hi);
+      logger.info(
+          "{}: the maxHaveAppliedCommitIndex={}, lastIndex={}, no need to reapply",
+          name,
+          maxHaveAppliedCommitIndex,
+          hi);
       return;
     }
 
@@ -872,16 +889,18 @@ public abstract class RaftLogManager {
         logger.error("{}, an exception occurred when checking the applied log index", name, e);
       }
     }
-    logger.info("{}, the check-log-applier thread {} is interrupted", name,
+    logger.info(
+        "{}, the check-log-applier thread {} is interrupted",
+        name,
         Thread.currentThread().getName());
   }
 
   void doCheckAppliedLogIndex() {
     long nextToCheckIndex = maxHaveAppliedCommitIndex + 1;
     try {
-      if (nextToCheckIndex > commitIndex || nextToCheckIndex > getCommittedEntryManager()
-          .getLastIndex() || (blockAppliedCommitIndex > 0
-          && blockAppliedCommitIndex < nextToCheckIndex)) {
+      if (nextToCheckIndex > commitIndex
+          || nextToCheckIndex > getCommittedEntryManager().getLastIndex()
+          || (blockAppliedCommitIndex > 0 && blockAppliedCommitIndex < nextToCheckIndex)) {
         // avoid spinning
         Thread.sleep(5);
         return;
@@ -890,7 +909,9 @@ public abstract class RaftLogManager {
       if (log == null || log.getCurrLogIndex() != nextToCheckIndex) {
         logger.warn(
             "{}, get log error when checking the applied log index, log={}, nextToCheckIndex={}",
-            name, log, nextToCheckIndex);
+            name,
+            log,
+            nextToCheckIndex);
         return;
       }
       synchronized (log) {
@@ -905,7 +926,11 @@ public abstract class RaftLogManager {
       }
       logger.debug(
           "{}: log={} is applied, nextToCheckIndex={}, commitIndex={}, maxHaveAppliedCommitIndex={}",
-          name, log, nextToCheckIndex, commitIndex, maxHaveAppliedCommitIndex);
+          name,
+          log,
+          nextToCheckIndex,
+          commitIndex,
+          maxHaveAppliedCommitIndex);
     } catch (InterruptedException e) {
       Thread.currentThread().interrupt();
       logger.info("{}: do check applied log index is interrupt", name);
@@ -914,9 +939,13 @@ public abstract class RaftLogManager {
         // maxHaveAppliedCommitIndex may change if a snapshot is applied concurrently
         maxHaveAppliedCommitIndex = Math.max(maxHaveAppliedCommitIndex, nextToCheckIndex);
       }
-      logger.debug("{}: compacted log is assumed applied, nextToCheckIndex={}, commitIndex={}, "
+      logger.debug(
+          "{}: compacted log is assumed applied, nextToCheckIndex={}, commitIndex={}, "
               + "maxHaveAppliedCommitIndex={}",
-          name, nextToCheckIndex, commitIndex, maxHaveAppliedCommitIndex);
+          name,
+          nextToCheckIndex,
+          commitIndex,
+          maxHaveAppliedCommitIndex);
     }
   }
 
@@ -940,9 +969,7 @@ public abstract class RaftLogManager {
     this.blockAppliedCommitIndex = blockAppliedCommitIndex;
   }
 
-  /**
-   * Apply the committed logs that were previously blocked by `blockAppliedCommitIndex` if any.
-   */
+  /** Apply the committed logs that were previously blocked by `blockAppliedCommitIndex` if any. */
   private void reapplyBlockedLogs() {
     if (!blockedUnappliedLogList.isEmpty()) {
       applyEntries(blockedUnappliedLogList);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/UnCommittedEntryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/UnCommittedEntryManager.java
index d83aed7..262f7a2 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/UnCommittedEntryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/UnCommittedEntryManager.java
@@ -51,7 +51,6 @@ public class UnCommittedEntryManager {
     return offset;
   }
 
-
   /**
    * Return last entry's index if this instance has at least one uncommitted entry.
    *
@@ -76,7 +75,7 @@ public class UnCommittedEntryManager {
    *
    * @param index request entry index
    * @return -1 if index < offset, or index > last or entries is empty, or return the entry's term
-   * for given index
+   *     for given index
    * @throws EntryUnavailableException
    */
   @SuppressWarnings("java:S1135") // ignore todos
@@ -86,7 +85,8 @@ public class UnCommittedEntryManager {
       if (entryPos < 0) {
         logger.debug(
             "invalid unCommittedEntryManager maybeTerm : parameter: index({}) < offset({})",
-            index, index - entryPos);
+            index,
+            index - entryPos);
         return -1;
       }
       long last = maybeLastIndex();
@@ -148,8 +148,7 @@ public class UnCommittedEntryManager {
     Log firstAppendingEntry = appendingEntries.get(0);
     Log lastAppendingEntry = appendingEntries.get(appendingEntries.size() - 1);
     if (maybeTerm(firstAppendingEntry.getCurrLogIndex()) == firstAppendingEntry.getCurrLogTerm()
-        &&
-        maybeTerm(lastAppendingEntry.getCurrLogIndex()) == lastAppendingEntry.getCurrLogTerm()) {
+        && maybeTerm(lastAppendingEntry.getCurrLogIndex()) == lastAppendingEntry.getCurrLogTerm()) {
       // skip existing entry
       return;
     }
@@ -157,9 +156,9 @@ public class UnCommittedEntryManager {
     long after = appendingEntries.get(0).getCurrLogIndex();
     long len = after - offset;
     if (len < 0) {
-      // the logs are being truncated to before our current offset portion, which is committed entries
-      logger.error("The logs which first index is {} are going to truncate committed logs",
-          after);
+      // the logs are being truncated to before our current offset portion, which is committed
+      // entries
+      logger.error("The logs which first index is {} are going to truncate committed logs", after);
     } else if (len == entries.size()) {
       // after is the next index in the entries
       // directly append
@@ -192,9 +191,9 @@ public class UnCommittedEntryManager {
     long after = appendingEntry.getCurrLogIndex();
     long len = after - offset;
     if (len < 0) {
-      // the logs are being truncated to before our current offset portion, which is committed entries
-      logger.error("The logs which first index is {} are going to truncate committed logs",
-          after);
+      // the logs are being truncated to before our current offset portion, which is committed
+      // entries
+      logger.error("The logs which first index is {} are going to truncate committed logs", after);
     } else if (len == entries.size()) {
       // after is the next index in the entries
       // directly append
@@ -202,8 +201,8 @@ public class UnCommittedEntryManager {
     } else {
       // clear conflict entries
       // then append
-      logger.info("truncate the entries after index {}, append a new entry {}", after,
-          appendingEntry);
+      logger.info(
+          "truncate the entries after index {}, append a new entry {}", after, appendingEntry);
       int truncateIndex = (int) (after - offset);
       if (truncateIndex < entries.size()) {
         entries.subList(truncateIndex, entries.size()).clear();
@@ -216,15 +215,14 @@ public class UnCommittedEntryManager {
    * Pack entries from low through high - 1, just like slice (entries[low:high]). offset <= low <=
    * high. Note that caller must ensure low <= high.
    *
-   * @param low  request index low bound
+   * @param low request index low bound
    * @param high request index upper bound
    */
   public List<Log> getEntries(long low, long high) {
     if (low > high) {
       if (logger.isDebugEnabled()) {
-        logger
-            .debug("invalid unCommittedEntryManager getEntries: parameter: low({}) > high({})",
-                low, high);
+        logger.debug(
+            "invalid unCommittedEntryManager getEntries: parameter: low({}) > high({})", low, high);
       }
       return Collections.emptyList();
     }
@@ -234,18 +232,29 @@ public class UnCommittedEntryManager {
       // getEntries(low, Integer.MAX_VALUE) if low is larger than lastIndex.
       logger.info(
           "unCommittedEntryManager getEntries[{},{}) out of bound : [{},{}] , return empty ArrayList",
-          low, high, offset, upper);
+          low,
+          high,
+          offset,
+          upper);
       return Collections.emptyList();
     }
     if (low < offset) {
-      logger.debug("unCommittedEntryManager getEntries[{},{}) out of bound : [{},{}]", low,
-          high, offset, upper);
+      logger.debug(
+          "unCommittedEntryManager getEntries[{},{}) out of bound : [{},{}]",
+          low,
+          high,
+          offset,
+          upper);
       low = offset;
     }
     if (high > upper) {
       logger.info(
           "unCommittedEntryManager getEntries[{},{}) out of bound : [{},{}] , adjust parameter 'high' to {}",
-          low, high, offset, upper, upper);
+          low,
+          high,
+          offset,
+          upper,
+          upper);
       // don't throw a exception to support getEntries(low, Integer.MAX_VALUE).
       high = upper;
     }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/serializable/LogManagerMeta.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/serializable/LogManagerMeta.java
index 936c198..d536df0 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/serializable/LogManagerMeta.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/serializable/LogManagerMeta.java
@@ -66,11 +66,16 @@ public class LogManagerMeta {
   @Override
   public String toString() {
     return "LogManagerMeta{"
-        + " commitLogTerm=" + commitLogTerm
-        + ", commitLogIndex=" + commitLogIndex
-        + ", lastLogIndex=" + lastLogIndex
-        + ", lastLogTerm=" + lastLogTerm
-        + ", maxHaveAppliedCommitIndex=" + maxHaveAppliedCommitIndex
+        + " commitLogTerm="
+        + commitLogTerm
+        + ", commitLogIndex="
+        + commitLogIndex
+        + ", lastLogIndex="
+        + lastLogIndex
+        + ", lastLogTerm="
+        + lastLogTerm
+        + ", maxHaveAppliedCommitIndex="
+        + maxHaveAppliedCommitIndex
         + "}";
   }
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/serializable/SyncLogDequeSerializer.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/serializable/SyncLogDequeSerializer.java
index 9a50d2d..66fc3a1 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/serializable/SyncLogDequeSerializer.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/serializable/SyncLogDequeSerializer.java
@@ -68,14 +68,10 @@ public class SyncLogDequeSerializer implements StableEntryManager {
   private static final String LOG_DATA_FILE_SUFFIX = "data";
   private static final String LOG_INDEX_FILE_SUFFIX = "idx";
 
-  /**
-   * the log data files
-   */
+  /** the log data files */
   private List<File> logDataFileList;
 
-  /**
-   * the log index files
-   */
+  /** the log index files */
   private List<File> logIndexFileList;
 
   private LogParser parser = LogParser.getINSTANCE();
@@ -85,54 +81,49 @@ public class SyncLogDequeSerializer implements StableEntryManager {
   private LogManagerMeta meta;
   private HardState state;
 
-  /**
-   * min version of available log
-   */
+  /** min version of available log */
   private long minAvailableVersion = 0;
 
-  /**
-   * max version of available log
-   */
+  /** max version of available log */
   private long maxAvailableVersion = Long.MAX_VALUE;
 
   private String logDir;
 
   private VersionController versionController;
 
-  private ByteBuffer logDataBuffer = ByteBuffer
-      .allocate(ClusterDescriptor.getInstance().getConfig().getRaftLogBufferSize());
-  private ByteBuffer logIndexBuffer = ByteBuffer
-      .allocate(ClusterDescriptor.getInstance().getConfig().getRaftLogBufferSize());
+  private ByteBuffer logDataBuffer =
+      ByteBuffer.allocate(ClusterDescriptor.getInstance().getConfig().getRaftLogBufferSize());
+  private ByteBuffer logIndexBuffer =
+      ByteBuffer.allocate(ClusterDescriptor.getInstance().getConfig().getRaftLogBufferSize());
 
   private long offsetOfTheCurrentLogDataOutputStream = 0;
 
-  private static final int MAX_NUMBER_OF_LOGS_PER_FETCH_ON_DISK = ClusterDescriptor.getInstance()
-      .getConfig().getMaxNumberOfLogsPerFetchOnDisk();
+  private static final int MAX_NUMBER_OF_LOGS_PER_FETCH_ON_DISK =
+      ClusterDescriptor.getInstance().getConfig().getMaxNumberOfLogsPerFetchOnDisk();
 
   private static final String LOG_META = "logMeta";
   private static final String LOG_META_TMP = "logMeta.tmp";
 
-
   /**
    * file name pattern:
-   * <p>
-   * for log data file: ${startLogIndex}-${endLogIndex}-{version}-data
-   * <p>
-   * for log index file: ${startLogIndex}-${endLogIndex}-{version}-idx
+   *
+   * <p>for log data file: ${startLogIndex}-${endLogIndex}-{version}-data
+   *
+   * <p>for log index file: ${startLogIndex}-${endLogIndex}-{version}-idx
    */
   private static final int FILE_NAME_PART_LENGTH = 4;
 
-  private int maxRaftLogIndexSizeInMemory = ClusterDescriptor.getInstance().getConfig()
-      .getMaxRaftLogIndexSizeInMemory();
+  private int maxRaftLogIndexSizeInMemory =
+      ClusterDescriptor.getInstance().getConfig().getMaxRaftLogIndexSizeInMemory();
 
-  private int maxRaftLogPersistDataSizePerFile = ClusterDescriptor.getInstance().getConfig()
-      .getMaxRaftLogPersistDataSizePerFile();
+  private int maxRaftLogPersistDataSizePerFile =
+      ClusterDescriptor.getInstance().getConfig().getMaxRaftLogPersistDataSizePerFile();
 
-  private int maxNumberOfPersistRaftLogFiles = ClusterDescriptor.getInstance().getConfig()
-      .getMaxNumberOfPersistRaftLogFiles();
+  private int maxNumberOfPersistRaftLogFiles =
+      ClusterDescriptor.getInstance().getConfig().getMaxNumberOfPersistRaftLogFiles();
 
-  private int maxPersistRaftLogNumberOnDisk = ClusterDescriptor.getInstance().getConfig()
-      .getMaxPersistRaftLogNumberOnDisk();
+  private int maxPersistRaftLogNumberOnDisk =
+      ClusterDescriptor.getInstance().getConfig().getMaxPersistRaftLogNumberOnDisk();
 
   private ScheduledExecutorService persistLogDeleteExecutorService;
   private ScheduledFuture<?> persistLogDeleteLogFuture;
@@ -153,9 +144,7 @@ public class SyncLogDequeSerializer implements StableEntryManager {
 
   private static final int LOG_DELETE_CHECK_INTERVAL_SECOND = 5;
 
-  /**
-   * the lock uses when change the log data files or log index files
-   */
+  /** the lock uses when change the log data files or log index files */
   private final Lock lock = new ReentrantLock();
 
   private volatile boolean isClosed = false;
@@ -169,12 +158,18 @@ public class SyncLogDequeSerializer implements StableEntryManager {
     } catch (IOException e) {
       logger.error("log serializer build version controller failed", e);
     }
-    this.persistLogDeleteExecutorService = new ScheduledThreadPoolExecutor(1,
-        new BasicThreadFactory.Builder().namingPattern("persist-log-delete-" + logDir).daemon(true)
-            .build());
-
-    this.persistLogDeleteLogFuture = persistLogDeleteExecutorService
-        .scheduleAtFixedRate(this::checkDeletePersistRaftLog, LOG_DELETE_CHECK_INTERVAL_SECOND,
+    this.persistLogDeleteExecutorService =
+        new ScheduledThreadPoolExecutor(
+            1,
+            new BasicThreadFactory.Builder()
+                .namingPattern("persist-log-delete-" + logDir)
+                .daemon(true)
+                .build());
+
+    this.persistLogDeleteLogFuture =
+        persistLogDeleteExecutorService.scheduleAtFixedRate(
+            this::checkDeletePersistRaftLog,
+            LOG_DELETE_CHECK_INTERVAL_SECOND,
             LOG_DELETE_CHECK_INTERVAL_SECOND,
             TimeUnit.SECONDS);
   }
@@ -192,8 +187,8 @@ public class SyncLogDequeSerializer implements StableEntryManager {
 
   /**
    * log in disk is [size of log1 | log1 buffer] [size of log2 | log2 buffer]
-   * <p>
-   * build serializer with node id
+   *
+   * <p>build serializer with node id
    */
   public SyncLogDequeSerializer(int nodeIdentifier) {
     logDir = getLogDir(nodeIdentifier);
@@ -203,8 +198,12 @@ public class SyncLogDequeSerializer implements StableEntryManager {
 
   public static String getLogDir(int nodeIdentifier) {
     String systemDir = IoTDBDescriptor.getInstance().getConfig().getSystemDir();
-    return systemDir + File.separator + "raftLog" + File.separator +
-        nodeIdentifier + File.separator;
+    return systemDir
+        + File.separator
+        + "raftLog"
+        + File.separator
+        + nodeIdentifier
+        + File.separator;
   }
 
   @TestOnly
@@ -217,20 +216,18 @@ public class SyncLogDequeSerializer implements StableEntryManager {
     return metaFile;
   }
 
-  /**
-   * for log tools
-   */
+  /** for log tools */
   public LogManagerMeta getMeta() {
     return meta;
   }
 
-  /**
-   * Recover all the logs in disk. This function will be called once this instance is created.
-   */
+  /** Recover all the logs in disk. This function will be called once this instance is created. */
   @Override
   public List<Log> getAllEntriesAfterAppliedIndex() {
-    logger.debug("getAllEntriesBeforeAppliedIndex, maxHaveAppliedCommitIndex={}, commitLogIndex={}",
-        meta.getMaxHaveAppliedCommitIndex(), meta.getCommitLogIndex());
+    logger.debug(
+        "getAllEntriesBeforeAppliedIndex, maxHaveAppliedCommitIndex={}, commitLogIndex={}",
+        meta.getMaxHaveAppliedCommitIndex(),
+        meta.getCommitLogIndex());
     if (meta.getMaxHaveAppliedCommitIndex() >= meta.getCommitLogIndex()) {
       return Collections.emptyList();
     }
@@ -248,13 +245,16 @@ public class SyncLogDequeSerializer implements StableEntryManager {
       meta.setLastLogIndex(entry.getCurrLogIndex());
       meta.setLastLogTerm(entry.getCurrLogTerm());
       meta.setMaxHaveAppliedCommitIndex(maxHaveAppliedCommitIndex);
-      logger.debug("maxHaveAppliedCommitIndex={}, commitLogIndex={},lastLogIndex={}",
-          maxHaveAppliedCommitIndex, meta.getCommitLogIndex(), meta.getLastLogIndex());
+      logger.debug(
+          "maxHaveAppliedCommitIndex={}, commitLogIndex={},lastLogIndex={}",
+          maxHaveAppliedCommitIndex,
+          meta.getCommitLogIndex(),
+          meta.getLastLogIndex());
     } catch (BufferOverflowException e) {
       throw new IOException(
           "Log cannot fit into buffer, please increase raft_log_buffer_size;"
-              + "otherwise, please increase the JVM memory", e
-      );
+              + "otherwise, please increase the JVM memory",
+          e);
     } finally {
       lock.unlock();
     }
@@ -312,17 +312,24 @@ public class SyncLogDequeSerializer implements StableEntryManager {
       currentLogDataOutputStream = null;
 
       File currentLogDataFile = getCurrentLogDataFile();
-      String newDataFileName = currentLogDataFile.getName()
-          .replaceAll(String.valueOf(Long.MAX_VALUE), String.valueOf(commitIndex));
-      File newCurrentLogDatFile = SystemFileFactory.INSTANCE
-          .getFile(currentLogDataFile.getParent() + File.separator + newDataFileName);
+      String newDataFileName =
+          currentLogDataFile
+              .getName()
+              .replaceAll(String.valueOf(Long.MAX_VALUE), String.valueOf(commitIndex));
+      File newCurrentLogDatFile =
+          SystemFileFactory.INSTANCE.getFile(
+              currentLogDataFile.getParent() + File.separator + newDataFileName);
       if (!currentLogDataFile.renameTo(newCurrentLogDatFile)) {
-        logger.error("rename log data file={} to {} failed", currentLogDataFile.getAbsoluteFile(),
+        logger.error(
+            "rename log data file={} to {} failed",
+            currentLogDataFile.getAbsoluteFile(),
             newCurrentLogDatFile);
       }
       logDataFileList.set(logDataFileList.size() - 1, newCurrentLogDatFile);
 
-      logger.debug("rename data file={} to file={}", currentLogDataFile.getAbsoluteFile(),
+      logger.debug(
+          "rename data file={} to file={}",
+          currentLogDataFile.getAbsoluteFile(),
           newCurrentLogDatFile.getAbsoluteFile());
     }
 
@@ -332,14 +339,19 @@ public class SyncLogDequeSerializer implements StableEntryManager {
       currentLogIndexOutputStream = null;
 
       File currentLogIndexFile = getCurrentLogIndexFile();
-      String newIndexFileName = currentLogIndexFile.getName()
-          .replaceAll(String.valueOf(Long.MAX_VALUE), String.valueOf(commitIndex));
-      File newCurrentLogIndexFile = SystemFileFactory.INSTANCE
-          .getFile(currentLogIndexFile.getParent() + File.separator + newIndexFileName);
+      String newIndexFileName =
+          currentLogIndexFile
+              .getName()
+              .replaceAll(String.valueOf(Long.MAX_VALUE), String.valueOf(commitIndex));
+      File newCurrentLogIndexFile =
+          SystemFileFactory.INSTANCE.getFile(
+              currentLogIndexFile.getParent() + File.separator + newIndexFileName);
       if (!currentLogIndexFile.renameTo(newCurrentLogIndexFile)) {
         logger.error("rename log index file={} failed", currentLogIndexFile.getAbsoluteFile());
       }
-      logger.debug("rename index file={} to file={}", currentLogIndexFile.getAbsoluteFile(),
+      logger.debug(
+          "rename index file={} to file={}",
+          currentLogIndexFile.getAbsoluteFile(),
           newCurrentLogIndexFile.getAbsoluteFile());
 
       logIndexFileList.set(logIndexFileList.size() - 1, newCurrentLogIndexFile);
@@ -359,12 +371,10 @@ public class SyncLogDequeSerializer implements StableEntryManager {
       try {
         checkStream();
         // 1. write to the log data file
-        ReadWriteIOUtils
-            .writeWithoutSize(logDataBuffer, 0, logDataBuffer.position(),
-                currentLogDataOutputStream);
-        ReadWriteIOUtils
-            .writeWithoutSize(logIndexBuffer, 0, logIndexBuffer.position(),
-                currentLogIndexOutputStream);
+        ReadWriteIOUtils.writeWithoutSize(
+            logDataBuffer, 0, logDataBuffer.position(), currentLogDataOutputStream);
+        ReadWriteIOUtils.writeWithoutSize(
+            logIndexBuffer, 0, logIndexBuffer.position(), currentLogIndexOutputStream);
         if (ClusterDescriptor.getInstance().getConfig().getFlushRaftLogThreshold() == 0) {
           currentLogDataOutputStream.getChannel().force(true);
           currentLogIndexOutputStream.getChannel().force(true);
@@ -404,9 +414,7 @@ public class SyncLogDequeSerializer implements StableEntryManager {
     }
   }
 
-  /**
-   * flush the log buffer and check if the file needs to be closed
-   */
+  /** flush the log buffer and check if the file needs to be closed */
   @Override
   public void forceFlushLogBuffer() {
     lock.lock();
@@ -416,7 +424,6 @@ public class SyncLogDequeSerializer implements StableEntryManager {
     } finally {
       lock.unlock();
     }
-
   }
 
   @Override
@@ -442,11 +449,9 @@ public class SyncLogDequeSerializer implements StableEntryManager {
     try {
       recoverLogFiles();
 
-      logDataFileList.sort(
-          this::comparePersistLogFileName);
+      logDataFileList.sort(this::comparePersistLogFileName);
 
-      logIndexFileList.sort(
-          this::comparePersistLogFileName);
+      logIndexFileList.sort(this::comparePersistLogFileName);
 
       // add init log file
       if (logDataFileList.isEmpty()) {
@@ -458,9 +463,7 @@ public class SyncLogDequeSerializer implements StableEntryManager {
     }
   }
 
-  /**
-   * The file name rules are as follows: ${startLogIndex}-${endLogIndex}-${version}.data
-   */
+  /** The file name rules are as follows: ${startLogIndex}-${endLogIndex}-${version}.data */
   private void recoverLogFiles() {
     // 1. first we should recover the log index file
     recoverLogFiles(LOG_INDEX_FILE_SUFFIX);
@@ -473,10 +476,11 @@ public class SyncLogDequeSerializer implements StableEntryManager {
   }
 
   private void recoverLogFiles(String logFileType) {
-    FileFilter logFilter = pathname -> {
-      String s = pathname.getName();
-      return s.endsWith(logFileType);
-    };
+    FileFilter logFilter =
+        pathname -> {
+          String s = pathname.getName();
+          return s.endsWith(logFileType);
+        };
 
     List<File> logFiles = Arrays.asList(metaFile.getParentFile().listFiles(logFilter));
     logger.info("Find log type ={} log files {}", logFileType, logFiles);
@@ -500,9 +504,9 @@ public class SyncLogDequeSerializer implements StableEntryManager {
   /**
    * Check that the file is legal or not
    *
-   * @param file     file needs to be check
-   * @param fileType {@link SyncLogDequeSerializer#LOG_DATA_FILE_SUFFIX} or  {@link
-   *                 SyncLogDequeSerializer#LOG_INDEX_FILE_SUFFIX}
+   * @param file file needs to be check
+   * @param fileType {@link SyncLogDequeSerializer#LOG_DATA_FILE_SUFFIX} or {@link
+   *     SyncLogDequeSerializer#LOG_INDEX_FILE_SUFFIX}
    * @return true if the file legal otherwise false
    */
   private boolean checkLogFile(File file, String fileType) {
@@ -557,7 +561,8 @@ public class SyncLogDequeSerializer implements StableEntryManager {
     }
 
     if (!success) {
-      logger.error("recover log index file failed, clear all logs in disk, {}",
+      logger.error(
+          "recover log index file failed, clear all logs in disk, {}",
           lastIndexFile.getAbsoluteFile());
       forceDeleteAllLogFiles();
       clearFirstLogIndex();
@@ -573,7 +578,8 @@ public class SyncLogDequeSerializer implements StableEntryManager {
 
     success = recoverTheLastLogDataFile(logDataFileList.get(logDataFileList.size() - 1));
     if (!success) {
-      logger.error("recover log data file failed, clear all logs in disk,{}",
+      logger.error(
+          "recover log data file failed, clear all logs in disk,{}",
           lastDataFile.getAbsoluteFile());
       forceDeleteAllLogFiles();
       clearFirstLogIndex();
@@ -586,10 +592,10 @@ public class SyncLogDequeSerializer implements StableEntryManager {
     Pair<File, Pair<Long, Long>> fileStartAndEndIndex = getLogIndexFile(startIndex);
     if (fileStartAndEndIndex.right.left == startIndex) {
       long endIndex = fileStartAndEndIndex.right.right;
-      String newDataFileName = file.getName()
-          .replaceAll(String.valueOf(Long.MAX_VALUE), String.valueOf(endIndex));
-      File newLogDataFile = SystemFileFactory.INSTANCE
-          .getFile(file.getParent() + File.separator + newDataFileName);
+      String newDataFileName =
+          file.getName().replaceAll(String.valueOf(Long.MAX_VALUE), String.valueOf(endIndex));
+      File newLogDataFile =
+          SystemFileFactory.INSTANCE.getFile(file.getParent() + File.separator + newDataFileName);
       if (!file.renameTo(newLogDataFile)) {
         logger.error("rename log data file={} failed when recover", file.getAbsoluteFile());
       }
@@ -621,21 +627,26 @@ public class SyncLogDequeSerializer implements StableEntryManager {
       logger.error("recover log index file failed,", e);
     }
     long endIndex = startIndex + totalCount - 1;
-    logger.debug("recover log index file={}, startIndex={}, endIndex={}", file.getAbsoluteFile(),
-        startIndex, endIndex);
+    logger.debug(
+        "recover log index file={}, startIndex={}, endIndex={}",
+        file.getAbsoluteFile(),
+        startIndex,
+        endIndex);
 
     if (endIndex < meta.getCommitLogIndex()) {
       logger.error(
           "due to the last abnormal exit, part of the raft logs are lost. "
               + "The commit index saved by the meta shall prevail, and all logs will be deleted"
-              + "meta commitLogIndex={}, endIndex={}", meta.getCommitLogIndex(), endIndex);
+              + "meta commitLogIndex={}, endIndex={}",
+          meta.getCommitLogIndex(),
+          endIndex);
       return false;
     }
     if (endIndex >= startIndex) {
-      String newIndexFileName = file.getName()
-          .replaceAll(String.valueOf(Long.MAX_VALUE), String.valueOf(endIndex));
-      File newLogIndexFile = SystemFileFactory.INSTANCE
-          .getFile(file.getParent() + File.separator + newIndexFileName);
+      String newIndexFileName =
+          file.getName().replaceAll(String.valueOf(Long.MAX_VALUE), String.valueOf(endIndex));
+      File newLogIndexFile =
+          SystemFileFactory.INSTANCE.getFile(file.getParent() + File.separator + newIndexFileName);
       if (!file.renameTo(newLogIndexFile)) {
         logger.error("rename log index file={} failed when recover", file.getAbsoluteFile());
       }
@@ -713,9 +724,7 @@ public class SyncLogDequeSerializer implements StableEntryManager {
     }
   }
 
-  /**
-   * for unclosed file, the file name is ${startIndex}-${Long.MAX_VALUE}-{version}
-   */
+  /** for unclosed file, the file name is ${startIndex}-${Long.MAX_VALUE}-{version} */
   private void createNewLogFile(String dirName, long startLogIndex) throws IOException {
     lock.lock();
     try {
@@ -723,12 +732,17 @@ public class SyncLogDequeSerializer implements StableEntryManager {
       long endLogIndex = Long.MAX_VALUE;
 
       String fileNamePrefix =
-          dirName + File.separator + startLogIndex + FILE_NAME_SEPARATOR + endLogIndex
-              + FILE_NAME_SEPARATOR + nextVersion + FILE_NAME_SEPARATOR;
-      File logDataFile = SystemFileFactory.INSTANCE
-          .getFile(fileNamePrefix + LOG_DATA_FILE_SUFFIX);
-      File logIndexFile = SystemFileFactory.INSTANCE
-          .getFile(fileNamePrefix + LOG_INDEX_FILE_SUFFIX);
+          dirName
+              + File.separator
+              + startLogIndex
+              + FILE_NAME_SEPARATOR
+              + endLogIndex
+              + FILE_NAME_SEPARATOR
+              + nextVersion
+              + FILE_NAME_SEPARATOR;
+      File logDataFile = SystemFileFactory.INSTANCE.getFile(fileNamePrefix + LOG_DATA_FILE_SUFFIX);
+      File logIndexFile =
+          SystemFileFactory.INSTANCE.getFile(fileNamePrefix + LOG_INDEX_FILE_SUFFIX);
 
       if (!logDataFile.createNewFile()) {
         logger.warn("Cannot create new log data file {}", logDataFile);
@@ -760,19 +774,23 @@ public class SyncLogDequeSerializer implements StableEntryManager {
     if (metaFile.exists() && metaFile.length() > 0) {
       if (logger.isInfoEnabled()) {
         SimpleDateFormat format = new SimpleDateFormat();
-        logger.info("MetaFile {} exists, last modified: {}", metaFile.getPath(),
+        logger.info(
+            "MetaFile {} exists, last modified: {}",
+            metaFile.getPath(),
             format.format(new Date(metaFile.lastModified())));
       }
       try (FileInputStream fileInputStream = new FileInputStream(metaFile);
           BufferedInputStream bufferedInputStream = new BufferedInputStream(fileInputStream)) {
         minAvailableVersion = ReadWriteIOUtils.readLong(bufferedInputStream);
         maxAvailableVersion = ReadWriteIOUtils.readLong(bufferedInputStream);
-        meta = LogManagerMeta.deserialize(
-            ByteBuffer
-                .wrap(ReadWriteIOUtils.readBytesWithSelfDescriptionLength(bufferedInputStream)));
-        state = HardState.deserialize(
-            ByteBuffer
-                .wrap(ReadWriteIOUtils.readBytesWithSelfDescriptionLength(bufferedInputStream)));
+        meta =
+            LogManagerMeta.deserialize(
+                ByteBuffer.wrap(
+                    ReadWriteIOUtils.readBytesWithSelfDescriptionLength(bufferedInputStream)));
+        state =
+            HardState.deserialize(
+                ByteBuffer.wrap(
+                    ReadWriteIOUtils.readBytesWithSelfDescriptionLength(bufferedInputStream)));
       } catch (IOException e) {
         logger.error("Cannot recover log meta: ", e);
         meta = new LogManagerMeta();
@@ -782,9 +800,12 @@ public class SyncLogDequeSerializer implements StableEntryManager {
       meta = new LogManagerMeta();
       state = new HardState();
     }
-    logger
-        .info("Recovered log meta: {}, availableVersion: [{},{}], state: {}",
-            meta, minAvailableVersion, maxAvailableVersion, state);
+    logger.info(
+        "Recovered log meta: {}, availableVersion: [{},{}], state: {}",
+        meta,
+        minAvailableVersion,
+        maxAvailableVersion,
+        state);
   }
 
   private void serializeMeta(LogManagerMeta meta) {
@@ -855,8 +876,7 @@ public class SyncLogDequeSerializer implements StableEntryManager {
 
       // 2. init
       if (!logIndexOffsetList.isEmpty()) {
-        this.firstLogIndex = Math
-            .max(commitIndex + 1, firstLogIndex + logIndexOffsetList.size());
+        this.firstLogIndex = Math.max(commitIndex + 1, firstLogIndex + logIndexOffsetList.size());
       } else {
         this.firstLogIndex = commitIndex + 1;
       }
@@ -938,10 +958,11 @@ public class SyncLogDequeSerializer implements StableEntryManager {
   }
 
   private void forceDeleteAllLogDataFiles() {
-    FileFilter logFilter = pathname -> {
-      String s = pathname.getName();
-      return s.endsWith(LOG_DATA_FILE_SUFFIX);
-    };
+    FileFilter logFilter =
+        pathname -> {
+          String s = pathname.getName();
+          return s.endsWith(LOG_DATA_FILE_SUFFIX);
+        };
     List<File> logFiles = Arrays.asList(metaFile.getParentFile().listFiles(logFilter));
     logger.info("get log data files {} when forcing delete all logs", logFiles);
     for (File logFile : logFiles) {
@@ -955,10 +976,11 @@ public class SyncLogDequeSerializer implements StableEntryManager {
   }
 
   private void forceDeleteAllLogIndexFiles() {
-    FileFilter logIndexFilter = pathname -> {
-      String s = pathname.getName();
-      return s.endsWith(LOG_INDEX_FILE_SUFFIX);
-    };
+    FileFilter logIndexFilter =
+        pathname -> {
+          String s = pathname.getName();
+          return s.endsWith(LOG_INDEX_FILE_SUFFIX);
+        };
 
     List<File> logIndexFiles = Arrays.asList(metaFile.getParentFile().listFiles(logIndexFilter));
     logger.info("get log index files {} when forcing delete all logs", logIndexFiles);
@@ -1003,10 +1025,13 @@ public class SyncLogDequeSerializer implements StableEntryManager {
       Files.delete(logIndexFile.toPath());
       logDataFileList.remove(0);
       logIndexFileList.remove(0);
-      logger.debug("delete date file={}, index file={}", logDataFile.getAbsoluteFile(),
+      logger.debug(
+          "delete date file={}, index file={}",
+          logDataFile.getAbsoluteFile(),
           logIndexFile.getAbsoluteFile());
     } catch (IOException e) {
-      logger.error("delete file failed, data file={}, index file={}",
+      logger.error(
+          "delete file failed, data file={}, index file={}",
           logDataFile.getAbsoluteFile(),
           logIndexFile.getAbsoluteFile());
       return false;
@@ -1028,7 +1053,8 @@ public class SyncLogDequeSerializer implements StableEntryManager {
     if (items1.length != FILE_NAME_PART_LENGTH || items2.length != FILE_NAME_PART_LENGTH) {
       logger.error(
           "file1={}, file2={} name should be in the following format: startLogIndex-endLogIndex-version-data",
-          file1.getAbsoluteFile(), file2.getAbsoluteFile());
+          file1.getAbsoluteFile(),
+          file2.getAbsoluteFile());
     }
     long startLogIndex1 = Long.parseLong(items1[0]);
     long startLogIndex2 = Long.parseLong(items2[0]);
@@ -1041,21 +1067,19 @@ public class SyncLogDequeSerializer implements StableEntryManager {
 
   /**
    * @param startIndex the log start index
-   * @param endIndex   the log end index
+   * @param endIndex the log end index
    * @return the raft log which index between [startIndex, endIndex] or empty if not found
    */
   @Override
   public List<Log> getLogs(long startIndex, long endIndex) {
     if (startIndex > endIndex) {
-      logger
-          .error("startIndex={} should be less than or equal to endIndex={}", startIndex,
-              endIndex);
+      logger.error(
+          "startIndex={} should be less than or equal to endIndex={}", startIndex, endIndex);
       return Collections.emptyList();
     }
     if (startIndex < 0 || endIndex < 0) {
-      logger
-          .error("startIndex={} and endIndex={} should be larger than zero", startIndex,
-              endIndex);
+      logger.error(
+          "startIndex={} and endIndex={} should be larger than zero", startIndex, endIndex);
       return Collections.emptyList();
     }
 
@@ -1063,9 +1087,12 @@ public class SyncLogDequeSerializer implements StableEntryManager {
     if (endIndex - startIndex > MAX_NUMBER_OF_LOGS_PER_FETCH_ON_DISK) {
       newEndIndex = startIndex + MAX_NUMBER_OF_LOGS_PER_FETCH_ON_DISK;
     }
-    logger
-        .debug("intend to get logs between[{}, {}], actually get logs between[{},{}]", startIndex,
-            endIndex, startIndex, newEndIndex);
+    logger.debug(
+        "intend to get logs between[{}, {}], actually get logs between[{},{}]",
+        startIndex,
+        endIndex,
+        startIndex,
+        newEndIndex);
 
     // maybe the logs will be deleted during checkDeletePersistRaftLog or clearAllLogs,
     // use lock for two reasons:
@@ -1073,8 +1100,8 @@ public class SyncLogDequeSerializer implements StableEntryManager {
     // 2.prevent these log files from being deleted
     lock.lock();
     try {
-      List<Pair<File, Pair<Long, Long>>> logDataFileAndOffsetList = getLogDataFileAndOffset(
-          startIndex, newEndIndex);
+      List<Pair<File, Pair<Long, Long>>> logDataFileAndOffsetList =
+          getLogDataFileAndOffset(startIndex, newEndIndex);
       if (logDataFileAndOffsetList.isEmpty()) {
         return Collections.emptyList();
       }
@@ -1090,7 +1117,6 @@ public class SyncLogDequeSerializer implements StableEntryManager {
     }
   }
 
-
   /**
    * @param logIndex the log's index
    * @return The offset of the data file corresponding to the log index, -1 if not found
@@ -1100,8 +1126,11 @@ public class SyncLogDequeSerializer implements StableEntryManager {
 
     long maxLogIndex = firstLogIndex + logIndexOffsetList.size();
     if (logIndex >= maxLogIndex) {
-      logger.error("given log index={} exceed the max log index={}, firstLogIndex={}", logIndex,
-          maxLogIndex, firstLogIndex);
+      logger.error(
+          "given log index={} exceed the max log index={}, firstLogIndex={}",
+          logIndex,
+          maxLogIndex,
+          firstLogIndex);
       return -1;
     }
     // 1. first find in memory
@@ -1111,14 +1140,19 @@ public class SyncLogDequeSerializer implements StableEntryManager {
         offset = logIndexOffsetList.get(arrayIndex);
         logger.debug(
             "found the offset in memory, logIndex={}, firstLogIndex={}, logIndexOffsetList size={}, offset={}",
-            logIndex, firstLogIndex, logIndexOffsetList.size(), offset);
+            logIndex,
+            firstLogIndex,
+            logIndexOffsetList.size(),
+            offset);
         return offset;
       }
     }
 
     logger.debug(
         "can not found the offset in memory, logIndex={}, firstLogIndex={}, logIndexOffsetList size={}",
-        logIndex, firstLogIndex, logIndexOffsetList.size());
+        logIndex,
+        firstLogIndex,
+        logIndexOffsetList.size());
 
     // 2. second read the log index file
     Pair<File, Pair<Long, Long>> fileWithStartAndEndIndex = getLogIndexFile(logIndex);
@@ -1129,16 +1163,20 @@ public class SyncLogDequeSerializer implements StableEntryManager {
     Pair<Long, Long> startAndEndIndex = fileWithStartAndEndIndex.right;
     logger.debug(
         "start to read the log index file={} for log index={}, file size={}",
-        file.getAbsoluteFile(), logIndex, file.length());
+        file.getAbsoluteFile(),
+        logIndex,
+        file.length());
     try (FileInputStream fileInputStream = new FileInputStream(file);
         BufferedInputStream bufferedInputStream = new BufferedInputStream(fileInputStream)) {
       long bytesNeedToSkip = (logIndex - startAndEndIndex.left) * (Long.BYTES);
       long bytesActuallySkip = bufferedInputStream.skip(bytesNeedToSkip);
-      logger.debug("skip {} bytes when read file={}", bytesActuallySkip,
-          file.getAbsoluteFile());
+      logger.debug("skip {} bytes when read file={}", bytesActuallySkip, file.getAbsoluteFile());
       if (bytesNeedToSkip != bytesActuallySkip) {
-        logger.error("read file={} failed, should skip={}, actually skip={}",
-            file.getAbsoluteFile(), bytesNeedToSkip, bytesActuallySkip);
+        logger.error(
+            "read file={} failed, should skip={}, actually skip={}",
+            file.getAbsoluteFile(),
+            bytesNeedToSkip,
+            bytesActuallySkip);
         return -1;
       }
       offset = ReadWriteIOUtils.readLong(bufferedInputStream);
@@ -1151,12 +1189,12 @@ public class SyncLogDequeSerializer implements StableEntryManager {
 
   /**
    * @param startIndex the log start index
-   * @param endIndex   the log end index
+   * @param endIndex the log end index
    * @return first value-> the log data file, second value-> the left value is the start offset of
-   * the file, the right is the end offset of the file
+   *     the file, the right is the end offset of the file
    */
-  private List<Pair<File, Pair<Long, Long>>> getLogDataFileAndOffset(long startIndex,
-      long endIndex) {
+  private List<Pair<File, Pair<Long, Long>>> getLogDataFileAndOffset(
+      long startIndex, long endIndex) {
     long startIndexInOneFile = startIndex;
     long endIndexInOneFile = 0;
     List<Pair<File, Pair<Long, Long>>> fileNameWithStartAndEndOffset = new ArrayList<>();
@@ -1165,8 +1203,8 @@ public class SyncLogDequeSerializer implements StableEntryManager {
     if (startOffset == -1) {
       return Collections.emptyList();
     }
-    Pair<File, Pair<Long, Long>> logDataFileWithStartAndEndLogIndex = getLogDataFile(
-        startIndexInOneFile);
+    Pair<File, Pair<Long, Long>> logDataFileWithStartAndEndLogIndex =
+        getLogDataFile(startIndexInOneFile);
     if (logDataFileWithStartAndEndLogIndex == null) {
       return Collections.emptyList();
     }
@@ -1175,18 +1213,19 @@ public class SyncLogDequeSerializer implements StableEntryManager {
     while (endIndex > endIndexInOneFile) {
       //  this means the endIndex's offset can not be found in the file
       //  logDataFileWithStartAndEndLogIndex.left; and should be find in the next log data file.
-      //3. get the file's end offset
+      // 3. get the file's end offset
       long endOffset = getOffsetAccordingToLogIndex(endIndexInOneFile);
       fileNameWithStartAndEndOffset.add(
-          new Pair<>(logDataFileWithStartAndEndLogIndex.left,
-              new Pair<>(startOffset, endOffset)));
-
-      logger
-          .debug("get log data offset=[{},{}] according to log index=[{},{}], file={}",
-              startOffset,
-              endOffset, startIndexInOneFile, endIndexInOneFile,
-              logDataFileWithStartAndEndLogIndex.left);
-      //4. search the next file to get the log index of fileEndLogIndex + 1
+          new Pair<>(logDataFileWithStartAndEndLogIndex.left, new Pair<>(startOffset, endOffset)));
+
+      logger.debug(
+          "get log data offset=[{},{}] according to log index=[{},{}], file={}",
+          startOffset,
+          endOffset,
+          startIndexInOneFile,
+          endIndexInOneFile,
+          logDataFileWithStartAndEndLogIndex.left);
+      // 4. search the next file to get the log index of fileEndLogIndex + 1
       startIndexInOneFile = endIndexInOneFile + 1;
       startOffset = getOffsetAccordingToLogIndex(startIndexInOneFile);
       if (startOffset == -1) {
@@ -1198,21 +1237,26 @@ public class SyncLogDequeSerializer implements StableEntryManager {
       }
       endIndexInOneFile = logDataFileWithStartAndEndLogIndex.right.right;
     }
-    // this means the endIndex's offset can not be found in the file logDataFileWithStartAndEndLogIndex.left
+    // this means the endIndex's offset can not be found in the file
+    // logDataFileWithStartAndEndLogIndex.left
     long endOffset = getOffsetAccordingToLogIndex(endIndex);
     fileNameWithStartAndEndOffset.add(
         new Pair<>(logDataFileWithStartAndEndLogIndex.left, new Pair<>(startOffset, endOffset)));
-    logger
-        .debug("get log data offset=[{},{}] according to log index=[{},{}], file={}", startOffset,
-            endOffset, startIndexInOneFile, endIndex, logDataFileWithStartAndEndLogIndex.left);
+    logger.debug(
+        "get log data offset=[{},{}] according to log index=[{},{}], file={}",
+        startOffset,
+        endOffset,
+        startIndexInOneFile,
+        endIndex,
+        logDataFileWithStartAndEndLogIndex.left);
     return fileNameWithStartAndEndOffset;
   }
 
   /**
    * @param startIndex the start log index
    * @return the first value of the pair is the log index file which contains the start index; the
-   * second pair's first value is the file's start log index. the second pair's second value is the
-   * file's end log index. null if not found
+   *     second pair's first value is the file's start log index. the second pair's second value is
+   *     the file's end log index. null if not found
    */
   public Pair<File, Pair<Long, Long>> getLogIndexFile(long startIndex) {
     for (File file : logIndexFileList) {
@@ -1223,8 +1267,7 @@ public class SyncLogDequeSerializer implements StableEntryManager {
             file.getAbsoluteFile());
       }
       if (Long.parseLong(splits[0]) <= startIndex && startIndex <= Long.parseLong(splits[1])) {
-        return new Pair<>(file,
-            new Pair<>(Long.parseLong(splits[0]), Long.parseLong(splits[1])));
+        return new Pair<>(file, new Pair<>(Long.parseLong(splits[0]), Long.parseLong(splits[1])));
       }
     }
     logger.debug("can not found the log index file for startIndex={}", startIndex);
@@ -1234,8 +1277,8 @@ public class SyncLogDequeSerializer implements StableEntryManager {
   /**
    * @param startIndex the start log index
    * @return the first value of the pair is the log data file which contains the start index; the
-   * second pair's first value is the file's start log index. the second pair's second value is the
-   * file's end log index. null if not found
+   *     second pair's first value is the file's start log index. the second pair's second value is
+   *     the file's end log index. null if not found
    */
   public Pair<File, Pair<Long, Long>> getLogDataFile(long startIndex) {
     for (File file : logDataFileList) {
@@ -1246,8 +1289,7 @@ public class SyncLogDequeSerializer implements StableEntryManager {
             file.getAbsoluteFile());
       }
       if (Long.parseLong(splits[0]) <= startIndex && startIndex <= Long.parseLong(splits[1])) {
-        return new Pair<>(file,
-            new Pair<>(Long.parseLong(splits[0]), Long.parseLong(splits[1])));
+        return new Pair<>(file, new Pair<>(Long.parseLong(splits[0]), Long.parseLong(splits[1])));
       }
     }
     logger.debug("can not found the log data file for startIndex={}", startIndex);
@@ -1255,12 +1297,11 @@ public class SyncLogDequeSerializer implements StableEntryManager {
   }
 
   /**
-   * @param file              the log data file
-   * @param startAndEndOffset the left value is the start offset of the file,  the right is the end
-   *                          offset of the file
+   * @param file the log data file
+   * @param startAndEndOffset the left value is the start offset of the file, the right is the end
+   *     offset of the file
    * @return the logs between start offset and end offset
    */
-
   private List<Log> getLogsFromOneLogDataFile(File file, Pair<Long, Long> startAndEndOffset) {
     List<Log> result = new ArrayList<>();
     if (file.getName().equals(getCurrentLogDataFile().getName())) {
@@ -1270,27 +1311,38 @@ public class SyncLogDequeSerializer implements StableEntryManager {
         BufferedInputStream bufferedInputStream = new BufferedInputStream(fileInputStream)) {
       long bytesSkip = bufferedInputStream.skip(startAndEndOffset.left);
       if (bytesSkip != startAndEndOffset.left) {
-        logger.error("read file={} failed when skip {} bytes, actual skip bytes={}",
-            file.getAbsoluteFile(), startAndEndOffset.left, bytesSkip);
+        logger.error(
+            "read file={} failed when skip {} bytes, actual skip bytes={}",
+            file.getAbsoluteFile(),
+            startAndEndOffset.left,
+            bytesSkip);
         return result;
       }
 
       logger.debug(
           "start to read file={} and skip {} bytes, startOffset={}, endOffset={}, fileLength={}",
-          file.getAbsoluteFile(), bytesSkip, startAndEndOffset.left, startAndEndOffset.right,
+          file.getAbsoluteFile(),
+          bytesSkip,
+          startAndEndOffset.left,
+          startAndEndOffset.right,
           file.length());
 
       long currentReadOffset = bytesSkip;
-      // because we want to get all the logs whose offset between [startAndEndOffset.left, startAndEndOffset.right]
+      // because we want to get all the logs whose offset between [startAndEndOffset.left,
+      // startAndEndOffset.right]
       // which means, the last offset's value should be still read, in other words,
-      // the first log index of the offset starting with startAndEndOffset.right also needs to be read.
+      // the first log index of the offset starting with startAndEndOffset.right also needs to be
+      // read.
       while (currentReadOffset <= startAndEndOffset.right) {
-        logger.debug("read file={}, currentReadOffset={}, end offset={}", file.getAbsoluteFile(),
-            currentReadOffset, startAndEndOffset.right);
+        logger.debug(
+            "read file={}, currentReadOffset={}, end offset={}",
+            file.getAbsoluteFile(),
+            currentReadOffset,
+            startAndEndOffset.right);
         int logSize = ReadWriteIOUtils.readInt(bufferedInputStream);
         Log log = null;
-        log = parser
-            .parse(ByteBuffer.wrap(ReadWriteIOUtils.readBytes(bufferedInputStream, logSize)));
+        log =
+            parser.parse(ByteBuffer.wrap(ReadWriteIOUtils.readBytes(bufferedInputStream, logSize)));
         result.add(log);
         currentReadOffset = currentReadOffset + Integer.BYTES + logSize;
       }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/FileSnapshot.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/FileSnapshot.java
index d837441..8188ce7 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/FileSnapshot.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/FileSnapshot.java
@@ -69,16 +69,16 @@ import org.slf4j.LoggerFactory;
 /**
  * FileSnapshot records the data files in a slot and their md5 (or other verification). When the
  * snapshot is used to perform a catch-up, the receiver should:
- * <p>
- * 1. create a remote snapshot indicating that the slot is being pulled from the remote
- * <p>
- * 2. traverse the file list, for each file:
- * <p>
- * 2.1 if the file exists locally and the md5 is correct, skip it.
- * <p>
- * 2.2 otherwise pull the file from the remote.
- * <p>
- * 3. replace the remote snapshot with a FileSnapshot indicating that the slot of this node is
+ *
+ * <p>1. create a remote snapshot indicating that the slot is being pulled from the remote
+ *
+ * <p>2. traverse the file list, for each file:
+ *
+ * <p>2.1 if the file exists locally and the md5 is correct, skip it.
+ *
+ * <p>2.2 otherwise pull the file from the remote.
+ *
+ * <p>3. replace the remote snapshot with a FileSnapshot indicating that the slot of this node is
  * synchronized with the remote one.
  */
 @SuppressWarnings("java:S1135") // ignore todos
@@ -155,8 +155,7 @@ public class FileSnapshot extends Snapshot implements TimeseriesSchemaSnapshot {
   }
 
   @Override
-  public void setTimeseriesSchemas(
-      Collection<TimeseriesSchema> timeseriesSchemas) {
+  public void setTimeseriesSchemas(Collection<TimeseriesSchema> timeseriesSchemas) {
     this.timeseriesSchemas = timeseriesSchemas;
   }
 
@@ -167,8 +166,9 @@ public class FileSnapshot extends Snapshot implements TimeseriesSchemaSnapshot {
 
   @Override
   public String toString() {
-    return String.format("FileSnapshot{%d files, %d series, index-term: %d-%d}", dataFiles.size()
-        , timeseriesSchemas.size(), lastLogIndex, lastLogTerm);
+    return String.format(
+        "FileSnapshot{%d files, %d series, index-term: %d-%d}",
+        dataFiles.size(), timeseriesSchemas.size(), lastLogIndex, lastLogTerm);
   }
 
   public static class Installer implements SnapshotInstaller<FileSnapshot> {
@@ -266,10 +266,11 @@ public class FileSnapshot extends Snapshot implements TimeseriesSchemaSnapshot {
       List<RemoteTsFileResource> remoteTsFileResources = snapshot.getDataFiles();
       // pull file
       for (int i = 0, remoteTsFileResourcesSize = remoteTsFileResources.size();
-          i < remoteTsFileResourcesSize; i++) {
+          i < remoteTsFileResourcesSize;
+          i++) {
         RemoteTsFileResource resource = remoteTsFileResources.get(i);
-        logger.info("Pulling {}/{} files, current: {}", i + 1, remoteTsFileResources.size(),
-            resource);
+        logger.info(
+            "Pulling {}/{} files, current: {}", i + 1, remoteTsFileResources.size(), resource);
         try {
           if (!isFileAlreadyPulled(resource)) {
             loadRemoteFile(resource);
@@ -296,10 +297,12 @@ public class FileSnapshot extends Snapshot implements TimeseriesSchemaSnapshot {
      * @return
      */
     private boolean isFileAlreadyPulled(RemoteTsFileResource resource) throws IllegalPathException {
-      Pair<String, Long> sgNameAndTimePartitionIdPair = FilePathUtils
-          .getLogicalSgNameAndTimePartitionIdPair(resource);
+      Pair<String, Long> sgNameAndTimePartitionIdPair =
+          FilePathUtils.getLogicalSgNameAndTimePartitionIdPair(resource);
       return StorageEngine.getInstance()
-          .isFileAlreadyExist(resource, new PartialPath(sgNameAndTimePartitionIdPair.left),
+          .isFileAlreadyExist(
+              resource,
+              new PartialPath(sgNameAndTimePartitionIdPair.left),
               sgNameAndTimePartitionIdPair.right);
     }
 
@@ -309,28 +312,32 @@ public class FileSnapshot extends Snapshot implements TimeseriesSchemaSnapshot {
         AsyncDataClient client = (AsyncDataClient) dataGroupMember.getAsyncClient(sourceNode);
         if (client != null) {
           try {
-            client.removeHardLink(resource.getTsFile().getAbsolutePath(),
-                new GenericHandler<>(sourceNode, null));
+            client.removeHardLink(
+                resource.getTsFile().getAbsolutePath(), new GenericHandler<>(sourceNode, null));
           } catch (TException e) {
-            logger
-                .error("Cannot remove hardlink {} from {}", resource.getTsFile().getAbsolutePath(),
-                    sourceNode);
+            logger.error(
+                "Cannot remove hardlink {} from {}",
+                resource.getTsFile().getAbsolutePath(),
+                sourceNode);
           }
         }
       } else {
         SyncDataClient client = (SyncDataClient) dataGroupMember.getSyncClient(sourceNode);
         if (client == null) {
-          logger.error("Cannot remove hardlink {} from {}, due to can not get client",
-              resource.getTsFile().getAbsolutePath(), sourceNode);
+          logger.error(
+              "Cannot remove hardlink {} from {}, due to can not get client",
+              resource.getTsFile().getAbsolutePath(),
+              sourceNode);
           return;
         }
         try {
           client.removeHardLink(resource.getTsFile().getAbsolutePath());
         } catch (TException te) {
           client.getInputProtocol().getTransport().close();
-          logger
-              .error("Cannot remove hardlink {} from {}", resource.getTsFile().getAbsolutePath(),
-                  sourceNode);
+          logger.error(
+              "Cannot remove hardlink {} from {}",
+              resource.getTsFile().getAbsolutePath(),
+              sourceNode);
         } finally {
           ClientUtils.putBackSyncClient(client);
         }
@@ -379,10 +386,11 @@ public class FileSnapshot extends Snapshot implements TimeseriesSchemaSnapshot {
      * @param resource
      */
     private void loadRemoteResource(RemoteTsFileResource resource) throws IllegalPathException {
-      // the new file is stored at: remote/<nodeIdentifier>/<FilePathUtils.getTsFilePrefixPath(resource)>/<tsfile>
+      // the new file is stored at:
+      // remote/<nodeIdentifier>/<FilePathUtils.getTsFilePrefixPath(resource)>/<tsfile>
       // you can see FilePathUtils.splitTsFilePath() method for details.
-      PartialPath storageGroupName = new PartialPath(
-          FilePathUtils.getLogicalStorageGroupName(resource));
+      PartialPath storageGroupName =
+          new PartialPath(FilePathUtils.getLogicalStorageGroupName(resource));
       File remoteModFile =
           new File(resource.getTsFile().getAbsoluteFile() + ModificationFile.FILE_SUFFIX);
       try {
@@ -390,7 +398,8 @@ public class FileSnapshot extends Snapshot implements TimeseriesSchemaSnapshot {
         if (resource.isPlanRangeUnique()) {
           // only when a file has a unique range can we remove other files that over lap with it,
           // otherwise we may remove data that is not contained in the file
-          StorageEngine.getInstance().getProcessor(storageGroupName)
+          StorageEngine.getInstance()
+              .getProcessor(storageGroupName)
               .removeFullyOverlapFiles(resource);
         }
       } catch (StorageEngineException | LoadFileException e) {
@@ -421,27 +430,35 @@ public class FileSnapshot extends Snapshot implements TimeseriesSchemaSnapshot {
      * resource has modification file, also download it.
      *
      * @param resource the TsFile to be downloaded
-     * @param node     where to download the file
+     * @param node where to download the file
      * @return the downloaded file or null if the file cannot be downloaded or its MD5 is not right
      * @throws IOException
      */
     private File pullRemoteFile(RemoteTsFileResource resource, Node node) throws IOException {
-      logger.info("{}: pulling remote file {} from {}, plan index [{}, {}]", name, resource, node
-          , resource.getMinPlanIndex(), resource.getMaxPlanIndex());
+      logger.info(
+          "{}: pulling remote file {} from {}, plan index [{}, {}]",
+          name,
+          resource,
+          node,
+          resource.getMinPlanIndex(),
+          resource.getMaxPlanIndex());
       // the new file is stored at:
       // remote/<nodeIdentifier>/<FilePathUtils.getTsFilePrefixPath(resource)>/<newTsFile>
       // you can see FilePathUtils.splitTsFilePath() method for details.
       String tempFileName = FilePathUtils.getTsFileNameWithoutHardLink(resource);
       String tempFilePath =
-          node.getNodeIdentifier() + File.separator + FilePathUtils.getTsFilePrefixPath(resource)
-              + File.separator + tempFileName;
+          node.getNodeIdentifier()
+              + File.separator
+              + FilePathUtils.getTsFilePrefixPath(resource)
+              + File.separator
+              + tempFileName;
       File tempFile = new File(REMOTE_FILE_TEMP_DIR, tempFilePath);
       tempFile.getParentFile().mkdirs();
       if (pullRemoteFile(resource.getTsFile().getAbsolutePath(), node, tempFile)) {
         // TODO-Cluster#353: implement file examination, may be replaced with other algorithm
         if (resource.isWithModification()) {
-          File tempModFile = new File(REMOTE_FILE_TEMP_DIR,
-              tempFilePath + ModificationFile.FILE_SUFFIX);
+          File tempModFile =
+              new File(REMOTE_FILE_TEMP_DIR, tempFilePath + ModificationFile.FILE_SUFFIX);
           pullRemoteFile(resource.getModFile().getFilePath(), node, tempModFile);
         }
         return tempFile;
@@ -454,8 +471,8 @@ public class FileSnapshot extends Snapshot implements TimeseriesSchemaSnapshot {
      * the network is bad, this method will retry upto 5 times before returning a failure.
      *
      * @param remotePath the file to be downloaded
-     * @param node       where to download the file
-     * @param dest       where to store the file
+     * @param node where to download the file
+     * @param dest where to store the file
      * @return true if the file is successfully downloaded, false otherwise
      * @throws IOException
      */
@@ -471,13 +488,17 @@ public class FileSnapshot extends Snapshot implements TimeseriesSchemaSnapshot {
           }
 
           if (logger.isInfoEnabled()) {
-            logger.info("{}: remote file {} is pulled at {}, length: {}", name, remotePath, dest,
+            logger.info(
+                "{}: remote file {} is pulled at {}, length: {}",
+                name,
+                remotePath,
+                dest,
                 dest.length());
           }
           return true;
         } catch (TException e) {
-          logger.warn("{}: Cannot pull file {} from {}, wait 5s to retry", name, remotePath, node,
-              e);
+          logger.warn(
+              "{}: Cannot pull file {} from {}, wait 5s to retry", name, remotePath, node, e);
         } catch (InterruptedException e) {
           Thread.currentThread().interrupt();
           logger.warn("{}: Pulling file {} from {} interrupted", name, remotePath, node, e);
@@ -528,7 +549,9 @@ public class FileSnapshot extends Snapshot implements TimeseriesSchemaSnapshot {
       // notice: the buffer returned by thrift is a slice of a larger buffer which contains
       // the whole response, so buffer.position() is not 0 initially and buffer.limit() is
       // not the size of the downloaded chunk
-      dest.write(buffer.array(), buffer.position() + buffer.arrayOffset(),
+      dest.write(
+          buffer.array(),
+          buffer.position() + buffer.arrayOffset(),
           buffer.limit() - buffer.position());
       return buffer.limit() - buffer.position();
     }
@@ -571,20 +594,21 @@ public class FileSnapshot extends Snapshot implements TimeseriesSchemaSnapshot {
       return false;
     }
     FileSnapshot snapshot = (FileSnapshot) o;
-    return Objects.equals(timeseriesSchemas, snapshot.timeseriesSchemas) &&
-        Objects.equals(dataFiles, snapshot.dataFiles);
+    return Objects.equals(timeseriesSchemas, snapshot.timeseriesSchemas)
+        && Objects.equals(dataFiles, snapshot.dataFiles);
   }
 
   @Override
   public void truncateBefore(long minIndex) {
-    dataFiles.removeIf(res -> {
-      boolean toBeTruncated = res.getMaxPlanIndex() <= minIndex;
-      if (toBeTruncated) {
-        // also remove the hardlink
-        res.remove();
-      }
-      return toBeTruncated;
-    });
+    dataFiles.removeIf(
+        res -> {
+          boolean toBeTruncated = res.getMaxPlanIndex() <= minIndex;
+          if (toBeTruncated) {
+            // also remove the hardlink
+            res.remove();
+          }
+          return toBeTruncated;
+        });
   }
 
   @Override
@@ -607,8 +631,8 @@ public class FileSnapshot extends Snapshot implements TimeseriesSchemaSnapshot {
       fileSnapshot.setLastLogIndex(origin.lastLogIndex);
       fileSnapshot.setLastLogTerm(origin.lastLogTerm);
       fileSnapshot.dataFiles = origin.dataFiles == null ? null : new ArrayList<>(origin.dataFiles);
-      fileSnapshot.timeseriesSchemas = origin.timeseriesSchemas == null ? null :
-          new ArrayList<>(origin.timeseriesSchemas);
+      fileSnapshot.timeseriesSchemas =
+          origin.timeseriesSchemas == null ? null : new ArrayList<>(origin.timeseriesSchemas);
       return fileSnapshot;
     }
   }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/MetaSimpleSnapshot.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/MetaSimpleSnapshot.java
index 31b1cc4..cb471f3 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/MetaSimpleSnapshot.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/MetaSimpleSnapshot.java
@@ -28,7 +28,6 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.Objects;
 import org.apache.iotdb.cluster.log.Snapshot;
-import org.apache.iotdb.cluster.log.snapshot.FileSnapshot.Factory;
 import org.apache.iotdb.cluster.server.member.MetaGroupMember;
 import org.apache.iotdb.cluster.server.member.RaftMember;
 import org.apache.iotdb.db.auth.AuthException;
@@ -47,9 +46,7 @@ import org.apache.iotdb.db.utils.SerializeUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-/**
- * MetaSimpleSnapshot also records all storage groups.
- */
+/** MetaSimpleSnapshot also records all storage groups. */
 public class MetaSimpleSnapshot extends Snapshot {
 
   private static final Logger logger = LoggerFactory.getLogger(MetaSimpleSnapshot.class);
@@ -134,8 +131,8 @@ public class MetaSimpleSnapshot extends Snapshot {
     storageGroupTTLMap = new HashMap<>(storageGroupTTLMapSize);
     for (int i = 0; i < storageGroupTTLMapSize; i++) {
       try {
-        storageGroupTTLMap.put(new PartialPath(SerializeUtils.deserializeString(buffer)),
-            buffer.getLong());
+        storageGroupTTLMap.put(
+            new PartialPath(SerializeUtils.deserializeString(buffer)), buffer.getLong());
       } catch (IllegalPathException e) {
         // ignore
       }
@@ -203,9 +200,11 @@ public class MetaSimpleSnapshot extends Snapshot {
           } catch (StorageGroupAlreadySetException e) {
             // ignore
           } catch (MetadataException e) {
-            logger.error("{}: Cannot add storage group {} in snapshot, errMessage:{}",
+            logger.error(
+                "{}: Cannot add storage group {} in snapshot, errMessage:{}",
                 metaGroupMember.getName(),
-                entry.getKey(), e.getMessage());
+                entry.getKey(),
+                e.getMessage());
           }
 
           // 2. register ttl in the snapshot
@@ -213,10 +212,11 @@ public class MetaSimpleSnapshot extends Snapshot {
             IoTDB.metaManager.setTTL(sgPath, entry.getValue());
             StorageEngine.getInstance().setTTL(sgPath, entry.getValue());
           } catch (MetadataException | StorageEngineException | IOException e) {
-            logger
-                .error("{}: Cannot set ttl in storage group {} , errMessage: {}",
-                    metaGroupMember.getName(),
-                    entry.getKey(), e.getMessage());
+            logger.error(
+                "{}: Cannot set ttl in storage group {} , errMessage: {}",
+                metaGroupMember.getName(),
+                entry.getKey(),
+                e.getMessage());
           }
         }
 
@@ -226,8 +226,8 @@ public class MetaSimpleSnapshot extends Snapshot {
           installSnapshotUsers(authorizer, snapshot);
           installSnapshotRoles(authorizer, snapshot);
         } catch (AuthException e) {
-          logger.error("{}: Cannot get authorizer instance, error is: ", metaGroupMember.getName(),
-              e);
+          logger.error(
+              "{}: Cannot get authorizer instance, error is: ", metaGroupMember.getName(), e);
         }
 
         // 4. accept partition table
@@ -265,10 +265,10 @@ public class MetaSimpleSnapshot extends Snapshot {
       return false;
     }
     MetaSimpleSnapshot that = (MetaSimpleSnapshot) o;
-    return Objects.equals(storageGroupTTLMap, that.storageGroupTTLMap) &&
-        Objects.equals(userMap, that.userMap) &&
-        Objects.equals(roleMap, that.roleMap) &&
-        Objects.equals(partitionTableBuffer, that.partitionTableBuffer);
+    return Objects.equals(storageGroupTTLMap, that.storageGroupTTLMap)
+        && Objects.equals(userMap, that.userMap)
+        && Objects.equals(roleMap, that.roleMap)
+        && Objects.equals(partitionTableBuffer, that.partitionTableBuffer);
   }
 
   @Override
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/PartitionedSnapshot.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/PartitionedSnapshot.java
index a0eff88..a840d1c 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/PartitionedSnapshot.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/PartitionedSnapshot.java
@@ -37,9 +37,7 @@ import org.apache.iotdb.cluster.server.member.RaftMember;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-/**
- * PartitionedSnapshot stores the snapshot of each slot in a map.
- */
+/** PartitionedSnapshot stores the snapshot of each slot in a map. */
 public class PartitionedSnapshot<T extends Snapshot> extends Snapshot {
 
   private static final Logger logger = LoggerFactory.getLogger(PartitionedSnapshot.class);
@@ -51,8 +49,7 @@ public class PartitionedSnapshot<T extends Snapshot> extends Snapshot {
     this(new HashMap<>(), factory);
   }
 
-  private PartitionedSnapshot(
-      Map<Integer, T> slotSnapshots, SnapshotFactory<T> factory) {
+  private PartitionedSnapshot(Map<Integer, T> slotSnapshots, SnapshotFactory<T> factory) {
     this.slotSnapshots = slotSnapshots;
     this.factory = factory;
   }
@@ -100,11 +97,14 @@ public class PartitionedSnapshot<T extends Snapshot> extends Snapshot {
 
   @Override
   public String toString() {
-    return "PartitionedSnapshot{" +
-        "slotSnapshots=" + slotSnapshots.size() +
-        ", lastLogIndex=" + lastLogIndex +
-        ", lastLogTerm=" + lastLogTerm +
-        '}';
+    return "PartitionedSnapshot{"
+        + "slotSnapshots="
+        + slotSnapshots.size()
+        + ", lastLogIndex="
+        + lastLogIndex
+        + ", lastLogTerm="
+        + lastLogTerm
+        + '}';
   }
 
   @Override
@@ -141,7 +141,6 @@ public class PartitionedSnapshot<T extends Snapshot> extends Snapshot {
     }
 
     @Override
-
     public void install(PartitionedSnapshot snapshot, int slot)
         throws SnapshotInstallationException {
       installPartitionedSnapshot(snapshot);
@@ -161,8 +160,11 @@ public class PartitionedSnapshot<T extends Snapshot> extends Snapshot {
      */
     private void installPartitionedSnapshot(PartitionedSnapshot<T> snapshot)
         throws SnapshotInstallationException {
-      logger.info("{}: start to install a snapshot of {}-{}", dataGroupMember.getName(),
-          snapshot.lastLogIndex, snapshot.lastLogTerm);
+      logger.info(
+          "{}: start to install a snapshot of {}-{}",
+          dataGroupMember.getName(),
+          snapshot.lastLogIndex,
+          snapshot.lastLogTerm);
       synchronized (dataGroupMember.getSnapshotApplyLock()) {
         List<Integer> slots =
             ((SlotPartitionTable) dataGroupMember.getMetaGroupMember().getPartitionTable())
@@ -197,8 +199,8 @@ public class PartitionedSnapshot<T extends Snapshot> extends Snapshot {
       } catch (CheckConsistencyException e) {
         throw new SnapshotInstallationException(e);
       }
-      SnapshotInstaller<T> defaultInstaller = (SnapshotInstaller<T>) snapshot
-          .getDefaultInstaller(dataGroupMember);
+      SnapshotInstaller<T> defaultInstaller =
+          (SnapshotInstaller<T>) snapshot.getDefaultInstaller(dataGroupMember);
       defaultInstaller.install(snapshot, slot);
     }
   }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/PullSnapshotTask.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/PullSnapshotTask.java
index f0aa3f0..c1a4a49 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/PullSnapshotTask.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/PullSnapshotTask.java
@@ -71,11 +71,14 @@ public class PullSnapshotTask<T extends Snapshot> implements Callable<Void> {
    * @param descriptor
    * @param newMember
    * @param snapshotFactory
-   * @param snapshotSave    if the task is resumed from a disk file, this should that file,
-   *                        otherwise it should bu null
+   * @param snapshotSave if the task is resumed from a disk file, this should that file, otherwise
+   *     it should bu null
    */
-  public PullSnapshotTask(PullSnapshotTaskDescriptor descriptor,
-      DataGroupMember newMember, SnapshotFactory<T> snapshotFactory, File snapshotSave) {
+  public PullSnapshotTask(
+      PullSnapshotTaskDescriptor descriptor,
+      DataGroupMember newMember,
+      SnapshotFactory<T> snapshotFactory,
+      File snapshotSave) {
     this.descriptor = descriptor;
     this.newMember = newMember;
     this.snapshotFactory = snapshotFactory;
@@ -83,11 +86,13 @@ public class PullSnapshotTask<T extends Snapshot> implements Callable<Void> {
   }
 
   @SuppressWarnings("java:S3740") // type cannot be known ahead
-  private boolean pullSnapshot(int nodeIndex)
-      throws InterruptedException, TException {
+  private boolean pullSnapshot(int nodeIndex) throws InterruptedException, TException {
     Node node = descriptor.getPreviousHolders().get(nodeIndex);
     if (logger.isDebugEnabled()) {
-      logger.debug("Pulling {} snapshots from {} of {}", descriptor.getSlots().size(), node,
+      logger.debug(
+          "Pulling {} snapshots from {} of {}",
+          descriptor.getSlots().size(),
+          node,
           descriptor.getPreviousHolders().getHeader());
     }
 
@@ -103,12 +108,17 @@ public class PullSnapshotTask<T extends Snapshot> implements Callable<Void> {
         }
       }
       if (!noSnapshotSlots.isEmpty() && logger.isInfoEnabled()) {
-        logger.info("{}: {} and other {} slots do not have snapshot", newMember.getName(),
-            noSnapshotSlots.get(0), noSnapshotSlots.size() - 1);
+        logger.info(
+            "{}: {} and other {} slots do not have snapshot",
+            newMember.getName(),
+            noSnapshotSlots.get(0),
+            noSnapshotSlots.size() - 1);
       }
 
       if (logger.isInfoEnabled()) {
-        logger.info("Received a snapshot {} from {}", result,
+        logger.info(
+            "Received a snapshot {} from {}",
+            result,
             descriptor.getPreviousHolders().get(nodeIndex));
       }
       try {
@@ -128,13 +138,12 @@ public class PullSnapshotTask<T extends Snapshot> implements Callable<Void> {
   private Map<Integer, T> pullSnapshot(Node node) throws TException, InterruptedException {
     Map<Integer, T> result;
     if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) {
-      AsyncDataClient client =
-          (AsyncDataClient) newMember.getAsyncClient(node);
+      AsyncDataClient client = (AsyncDataClient) newMember.getAsyncClient(node);
       if (client == null) {
         return null;
       }
-      result = SyncClientAdaptor.pullSnapshot(client, request,
-          descriptor.getSlots(), snapshotFactory);
+      result =
+          SyncClientAdaptor.pullSnapshot(client, request, descriptor.getSlots(), snapshotFactory);
     } else {
       SyncDataClient client = (SyncDataClient) newMember.getSyncClient(node);
       if (client == null) {
@@ -150,8 +159,8 @@ public class PullSnapshotTask<T extends Snapshot> implements Callable<Void> {
         ClientUtils.putBackSyncClient(client);
       }
       result = new HashMap<>();
-      for (Entry<Integer, ByteBuffer> integerByteBufferEntry : pullSnapshotResp.snapshotBytes
-          .entrySet()) {
+      for (Entry<Integer, ByteBuffer> integerByteBufferEntry :
+          pullSnapshotResp.snapshotBytes.entrySet()) {
         T snapshot = snapshotFactory.create();
         snapshot.deserialize(integerByteBufferEntry.getValue());
         result.put(integerByteBufferEntry.getKey(), snapshot);
@@ -176,16 +185,19 @@ public class PullSnapshotTask<T extends Snapshot> implements Callable<Void> {
         nodeIndex = (nodeIndex + 1) % descriptor.getPreviousHolders().size();
         finished = pullSnapshot(nodeIndex);
         if (!finished) {
-          Thread
-              .sleep(ClusterDescriptor.getInstance().getConfig().getPullSnapshotRetryIntervalMs());
+          Thread.sleep(
+              ClusterDescriptor.getInstance().getConfig().getPullSnapshotRetryIntervalMs());
         }
       } catch (InterruptedException e) {
         Thread.currentThread().interrupt();
         finished = true;
       } catch (TException e) {
         if (logger.isDebugEnabled()) {
-          logger.debug("Cannot pull slot {} from {}, retry", descriptor.getSlots(),
-              descriptor.getPreviousHolders().get(nodeIndex), e);
+          logger.debug(
+              "Cannot pull slot {} from {}, retry",
+              descriptor.getSlots(),
+              descriptor.getPreviousHolders().get(nodeIndex),
+              e);
         }
       }
     }
@@ -212,8 +224,11 @@ public class PullSnapshotTask<T extends Snapshot> implements Callable<Void> {
         new DataOutputStream(new BufferedOutputStream(new FileOutputStream(snapshotSave)))) {
       descriptor.serialize(dataOutputStream);
     } catch (IOException e) {
-      logger.error("Cannot save the pulling task: pull {} from {}", descriptor.getSlots(),
-          descriptor.getPreviousHolders(), e);
+      logger.error(
+          "Cannot save the pulling task: pull {} from {}",
+          descriptor.getSlots(),
+          descriptor.getPreviousHolders(),
+          e);
     }
   }
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/PullSnapshotTaskDescriptor.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/PullSnapshotTaskDescriptor.java
index 9e1fb90..a70cfe4 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/PullSnapshotTaskDescriptor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/PullSnapshotTaskDescriptor.java
@@ -31,8 +31,8 @@ import org.apache.iotdb.db.utils.SerializeUtils;
 
 /**
  * PullSnapshotTaskDescriptor describes a pull-snapshot-task with the slots to pull, the previous
- * owners and does this pulling require the provider to become read-only. So the task can be
- * resumed when system crashes.
+ * owners and does this pulling require the provider to become read-only. So the task can be resumed
+ * when system crashes.
  */
 public class PullSnapshotTaskDescriptor {
   private PartitionGroup previousHolders;
@@ -43,11 +43,10 @@ public class PullSnapshotTaskDescriptor {
   // replicas can pull the same snapshot.
   private boolean requireReadOnly;
 
-  public PullSnapshotTaskDescriptor() {
-  }
+  public PullSnapshotTaskDescriptor() {}
 
-  public PullSnapshotTaskDescriptor(PartitionGroup previousOwners,
-      List<Integer> slots, boolean requireReadOnly) {
+  public PullSnapshotTaskDescriptor(
+      PartitionGroup previousOwners, List<Integer> slots, boolean requireReadOnly) {
     this.previousHolders = previousOwners;
     this.slots = slots;
     this.requireReadOnly = requireReadOnly;
@@ -110,9 +109,9 @@ public class PullSnapshotTaskDescriptor {
       return false;
     }
     PullSnapshotTaskDescriptor that = (PullSnapshotTaskDescriptor) o;
-    return requireReadOnly == that.requireReadOnly &&
-        Objects.equals(previousHolders, that.previousHolders) &&
-        Objects.equals(slots, that.slots);
+    return requireReadOnly == that.requireReadOnly
+        && Objects.equals(previousHolders, that.previousHolders)
+        && Objects.equals(slots, that.slots);
   }
 
   @Override
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/SnapshotInstaller.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/SnapshotInstaller.java
index 86d6321..a2851f7 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/SnapshotInstaller.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/SnapshotInstaller.java
@@ -17,7 +17,6 @@
  * under the License.
  */
 
-
 package org.apache.iotdb.cluster.log.snapshot;
 
 import java.util.Map;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/metadata/CMManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/metadata/CMManager.java
index caecd13..8518310 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/metadata/CMManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/metadata/CMManager.java
@@ -182,13 +182,13 @@ public class CMManager extends MManager {
       seriesType = super.getSeriesType(path);
     } catch (PathNotExistException e) {
       // pull from remote node
-      List<MeasurementSchema> schemas = metaPuller
-          .pullMeasurementSchemas(Collections.singletonList(path));
+      List<MeasurementSchema> schemas =
+          metaPuller.pullMeasurementSchemas(Collections.singletonList(path));
       if (!schemas.isEmpty()) {
         MeasurementSchema measurementSchema = schemas.get(0);
-        MeasurementMNode measurementMNode = new MeasurementMNode(null,
-            measurementSchema.getMeasurementId(),
-            measurementSchema, null);
+        MeasurementMNode measurementMNode =
+            new MeasurementMNode(
+                null, measurementSchema.getMeasurementId(), measurementSchema, null);
         cacheMeta(path, measurementMNode);
         return schemas.get(0).getType();
       } else {
@@ -227,18 +227,19 @@ public class CMManager extends MManager {
       // try again
       failedMeasurementIndex = getMNodesLocally(deviceId, measurements, measurementMNodes);
       if (failedMeasurementIndex != -1) {
-        throw new MetadataException(deviceId.getFullPath() + IoTDBConstant.PATH_SEPARATOR
-            + measurements[failedMeasurementIndex] + " is not found");
+        throw new MetadataException(
+            deviceId.getFullPath()
+                + IoTDBConstant.PATH_SEPARATOR
+                + measurements[failedMeasurementIndex]
+                + " is not found");
       }
       return measurementMNodes;
     }
   }
 
-  /**
-   * @return -1 if all schemas are found, or the first index of the non-exist schema
-   */
-  private int getMNodesLocally(PartialPath deviceId, String[] measurements,
-      MeasurementMNode[] measurementMNodes) {
+  /** @return -1 if all schemas are found, or the first index of the non-exist schema */
+  private int getMNodesLocally(
+      PartialPath deviceId, String[] measurements, MeasurementMNode[] measurementMNodes) {
     int failedMeasurementIndex = -1;
     cacheLock.readLock().lock();
     try {
@@ -266,8 +267,8 @@ public class CMManager extends MManager {
     List<MeasurementSchema> schemas = metaPuller.pullMeasurementSchemas(schemasToPull);
     for (MeasurementSchema schema : schemas) {
       // TODO-Cluster: also pull alias?
-      MeasurementMNode measurementMNode = new MeasurementMNode(null, schema.getMeasurementId(),
-          schema, null);
+      MeasurementMNode measurementMNode =
+          new MeasurementMNode(null, schema.getMeasurementId(), schema, null);
       cacheMeta(deviceId.concatNode(schema.getMeasurementId()), measurementMNode);
     }
     logger.debug("Pulled {}/{} schemas from remote", schemas.size(), measurementList.length);
@@ -281,8 +282,11 @@ public class CMManager extends MManager {
   }
 
   @Override
-  public void updateLastCache(PartialPath seriesPath, TimeValuePair timeValuePair,
-      boolean highPriorityUpdate, Long latestFlushedTime,
+  public void updateLastCache(
+      PartialPath seriesPath,
+      TimeValuePair timeValuePair,
+      boolean highPriorityUpdate,
+      Long latestFlushedTime,
       MeasurementMNode node) {
     cacheLock.writeLock().lock();
     try {
@@ -294,8 +298,7 @@ public class CMManager extends MManager {
       cacheLock.writeLock().unlock();
     }
     // maybe local also has the timeseries
-    super.updateLastCache(seriesPath, timeValuePair, highPriorityUpdate, latestFlushedTime,
-        node);
+    super.updateLastCache(seriesPath, timeValuePair, highPriorityUpdate, latestFlushedTime, node);
   }
 
   @Override
@@ -314,8 +317,8 @@ public class CMManager extends MManager {
     try {
       MNode deviceNode = getDeviceNode(plan.getDeviceId());
 
-      int nonExistSchemaIndex = getMNodesLocally(plan.getDeviceId(), plan.getMeasurements(),
-          measurementMNodes);
+      int nonExistSchemaIndex =
+          getMNodesLocally(plan.getDeviceId(), plan.getMeasurements(), measurementMNodes);
       if (nonExistSchemaIndex == -1) {
         plan.setMeasurementMNodes(measurementMNodes);
         return deviceNode;
@@ -352,13 +355,12 @@ public class CMManager extends MManager {
     }
 
     // pull from remote
-    pullSeriesSchemas(device, new String[]{measurement});
+    pullSeriesSchemas(device, new String[] {measurement});
 
     // try again
     cacheLock.readLock().lock();
     try {
-      MeasurementMNode measurementMeta =
-          mRemoteMetaCache.get(device.concatNode(measurement));
+      MeasurementMNode measurementMeta = mRemoteMetaCache.get(device.concatNode(measurement));
       if (measurementMeta != null) {
         return measurementMeta.getSchema();
       }
@@ -422,9 +424,7 @@ public class CMManager extends MManager {
       // try to create timeseries
       boolean isAutoCreateTimeseriesSuccess = createTimeseries((InsertPlan) plan);
       if (!isAutoCreateTimeseriesSuccess) {
-        throw new MetadataException(
-            "Failed to create timeseries from InsertPlan automatically."
-        );
+        throw new MetadataException("Failed to create timeseries from InsertPlan automatically.");
       }
     }
   }
@@ -441,9 +441,10 @@ public class CMManager extends MManager {
... 180665 lines suppressed ...