You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by yu...@apache.org on 2022/03/06 04:01:53 UTC

[iotdb] branch research/separation updated: sep

This is an automated email from the ASF dual-hosted git repository.

yuyuankang pushed a commit to branch research/separation
in repository https://gitbox.apache.org/repos/asf/iotdb.git


The following commit(s) were added to refs/heads/research/separation by this push:
     new 4245f04  sep
4245f04 is described below

commit 4245f0407ca1f53a6130ff179064906ada3d5c6e
Author: Ring-k <yu...@hotmail.com>
AuthorDate: Sun Mar 6 12:00:49 2022 +0800

    sep
---
 .gitattributes                                     |    1 -
 .github/dependabot.yml                             |   29 -
 .github/workflows/client.yml                       |   28 +-
 .gitignore                                         |    5 -
 LICENSE-binary                                     |   39 +-
 README.md                                          |    8 -
 README_ZH.md                                       |    8 -
 RELEASE_NOTES.md                                   |  406 +-------
 antlr/pom.xml                                      |    2 +-
 .../antlr4/org/apache/iotdb/db/qp/sql/SqlBase.g4   |   83 +-
 cli/pom.xml                                        |    7 +-
 cli/src/assembly/resources/sbin/start-cli.sh       |    1 -
 .../java/org/apache/iotdb/cli/AbstractCli.java     |   17 +-
 cli/src/main/java/org/apache/iotdb/cli/Cli.java    |    3 +-
 cli/src/main/java/org/apache/iotdb/cli/WinCli.java |   13 +-
 .../org/apache/iotdb/tool/AbstractCsvTool.java     |   96 +-
 .../main/java/org/apache/iotdb/tool/ExportCsv.java |  250 ++---
 .../main/java/org/apache/iotdb/tool/ImportCsv.java |  808 ++++++---------
 .../java/org/apache/iotdb/cli/AbstractScript.java  |   10 +-
 .../org/apache/iotdb/cli/StartClientScriptIT.java  |   27 -
 .../org/apache/iotdb/tool/CsvLineSplitTest.java    |   18 +-
 .../tool/{integration => }/ExportCsvTestIT.java    |    2 +-
 .../tool/{integration => }/ImportCsvTestIT.java    |    2 +-
 .../apache/iotdb/tool/unit/WriteCsvFileTestUT.java |   46 -
 client-cpp/pom.xml                                 |    4 +-
 client-cpp/src/main/CMakeLists.txt                 |    2 +-
 client-cpp/src/main/Session.cpp                    |  769 +++++++-------
 client-cpp/src/main/Session.h                      |  578 +++++------
 client-cpp/src/test/CMakeLists.txt                 |    2 +-
 client-cpp/src/test/main.cpp                       |    6 +-
 client-py/iotdb/Session.py                         |    1 -
 client-py/iotdb/utils/IoTDBConstants.py            |   15 -
 client-py/iotdb/utils/IoTDBRpcDataSet.py           |    4 +-
 client-py/iotdb/utils/SessionDataSet.py            |    2 -
 client-py/pom.xml                                  |    2 +-
 client-py/setup.py                                 |    2 +-
 cluster/pom.xml                                    |   19 +-
 cluster/src/assembly/resources/sbin/start-node.bat |   21 +-
 cluster/src/assembly/resources/sbin/stop-node.sh   |    6 +-
 .../java/org/apache/iotdb/cluster/ClusterMain.java |    7 -
 .../iotdb/cluster/client/DataClientProvider.java   |   48 +-
 .../cluster/client/async/AsyncClientPool.java      |   57 +-
 .../cluster/client/async/AsyncDataClient.java      |    4 +-
 .../client/async/AsyncDataHeartbeatClient.java     |    4 +-
 .../cluster/client/async/AsyncMetaClient.java      |    4 +-
 .../client/async/AsyncMetaHeartbeatClient.java     |    4 +-
 .../cluster/client/sync/SyncClientAdaptor.java     |   15 +-
 .../iotdb/cluster/client/sync/SyncClientPool.java  |   29 +-
 .../iotdb/cluster/client/sync/SyncDataClient.java  |    2 -
 .../client/sync/SyncDataHeartbeatClient.java       |    2 -
 .../iotdb/cluster/client/sync/SyncMetaClient.java  |    2 -
 .../client/sync/SyncMetaHeartbeatClient.java       |    2 -
 .../iotdb/cluster/coordinator/Coordinator.java     |  239 ++---
 .../iotdb/cluster/log/StableEntryManager.java      |    6 -
 .../iotdb/cluster/log/applier/BaseApplier.java     |  110 +-
 .../iotdb/cluster/log/applier/DataLogApplier.java  |   59 +-
 .../cluster/log/manage/CommittedEntryManager.java  |   16 -
 .../iotdb/cluster/log/manage/RaftLogManager.java   |    6 +-
 .../serializable/SyncLogDequeSerializer.java       |   34 +-
 .../iotdb/cluster/log/snapshot/FileSnapshot.java   |   13 +-
 .../apache/iotdb/cluster/metadata/CMManager.java   |  213 +---
 .../apache/iotdb/cluster/metadata/MetaPuller.java  |   18 +-
 .../cluster/partition/slot/SlotPartitionTable.java |   36 +-
 .../cluster/query/ClusterDataQueryExecutor.java    |   13 +-
 .../iotdb/cluster/query/ClusterPlanExecutor.java   |  229 +----
 .../iotdb/cluster/query/ClusterPlanRouter.java     |   36 -
 .../apache/iotdb/cluster/query/ClusterPlanner.java |   13 +-
 .../iotdb/cluster/query/ClusterQueryRouter.java    |   54 +-
 .../cluster/query/ClusterUDTFQueryExecutor.java    |  111 --
 .../iotdb/cluster/query/LocalQueryExecutor.java    |  108 +-
 .../cluster/query/aggregate/ClusterAggregator.java |    9 +-
 .../cluster/query/fill/ClusterFillExecutor.java    |   55 +-
 .../cluster/query/fill/ClusterLinearFill.java      |    4 +-
 .../cluster/query/fill/ClusterPreviousFill.java    |   50 +-
 .../cluster/query/filter/SlotTsFileFilter.java     |    5 +-
 .../query/groupby/RemoteGroupByExecutor.java       |   25 +-
 .../query/last/ClusterLastQueryExecutor.java       |   24 +-
 .../cluster/query/manage/ClusterQueryManager.java  |    8 +-
 .../cluster/query/reader/ClusterReaderFactory.java |   47 +-
 .../cluster/query/reader/ClusterTimeGenerator.java |   45 +-
 .../iotdb/cluster/query/reader/DataSourceInfo.java |   41 +-
 .../reader/ManagedDescPriorityMergeReader.java     |   96 --
 ...ityMergeReader.java => ManagedMergeReader.java} |    4 +-
 .../reader/RemoteSeriesReaderByTimestamp.java      |    2 -
 .../query/reader/RemoteSimpleSeriesReader.java     |    2 -
 .../mult/AssignPathDescPriorityMergeReader.java    |   49 -
 .../reader/mult/AssignPathManagedMergeReader.java  |   43 +-
 ...der.java => AssignPathPriorityMergeReader.java} |   37 +-
 .../mult/IAssignPathPriorityMergeReader.java       |   47 -
 .../cluster/query/reader/mult/MultBatchReader.java |    5 +-
 .../query/reader/mult/MultDataSourceInfo.java      |   20 +-
 .../cluster/query/reader/mult/MultElement.java     |   49 -
 .../query/reader/mult/RemoteMultSeriesReader.java  |   21 +-
 .../apache/iotdb/cluster/server/ClientServer.java  |   18 +-
 .../iotdb/cluster/server/DataClusterServer.java    |   24 -
 .../iotdb/cluster/server/MetaClusterServer.java    |   10 -
 .../handlers/caller/PreviousFillHandler.java       |   26 +-
 .../cluster/server/heartbeat/HeartbeatThread.java  |   41 +-
 .../cluster/server/member/DataGroupMember.java     |  134 +--
 .../cluster/server/member/MetaGroupMember.java     |   89 +-
 .../iotdb/cluster/server/member/RaftMember.java    |   32 +-
 .../cluster/server/service/BaseAsyncService.java   |    4 -
 .../cluster/server/service/BaseSyncService.java    |    4 -
 .../cluster/server/service/DataAsyncService.java   |   12 -
 .../cluster/server/service/DataSyncService.java    |    9 -
 .../apache/iotdb/cluster/utils/PartitionUtils.java |   14 +-
 .../apache/iotdb/cluster/utils/PlanSerializer.java |    8 +-
 .../apache/iotdb/cluster/utils/StatusUtils.java    |    3 +-
 .../utils/nodetool/function/NodeToolCmd.java       |    2 +-
 .../cluster/client/DataClientProviderTest.java     |    5 +-
 .../cluster/client/async/AsyncMetaClientTest.java  |   28 +-
 .../cluster/client/sync/SyncClientAdaptorTest.java |    1 -
 .../cluster/client/sync/SyncDataClientTest.java    |    6 +-
 .../cluster/client/sync/SyncMetaClientTest.java    |    6 +-
 .../org/apache/iotdb/cluster/common/IoTDBTest.java |    3 +-
 .../cluster/common/TestAsyncClientFactory.java     |    4 +-
 .../iotdb/cluster/common/TestAsyncDataClient.java  |   22 -
 .../cluster/common/TestSyncClientFactory.java      |   13 -
 .../org/apache/iotdb/cluster/common/TestUtils.java |    5 +-
 .../iotdb/cluster/integration/SingleNodeTest.java  |   35 -
 .../cluster/log/applier/DataLogApplierTest.java    |    9 +-
 .../cluster/log/snapshot/DataSnapshotTest.java     |   14 -
 .../cluster/log/snapshot/FileSnapshotTest.java     |   20 -
 .../cluster/log/snapshot/PullSnapshotTaskTest.java |   14 -
 .../apache/iotdb/cluster/query/BaseQueryTest.java  |    4 +-
 .../query/ClusterAggregateExecutorTest.java        |    4 +-
 .../query/ClusterDataQueryExecutorTest.java        |   16 +-
 .../query/{fill => }/ClusterFillExecutorTest.java  |   73 +-
 .../cluster/query/ClusterPlanExecutorTest.java     |   23 +-
 .../cluster/query/ClusterQueryRouterTest.java      |   45 +-
 .../query/ClusterUDTFQueryExecutorTest.java        |  116 ---
 .../ClusterGroupByNoVFilterDataSetTest.java        |    2 +-
 .../groupby/ClusterGroupByVFilterDataSetTest.java  |    2 +-
 .../query/groupby/MergeGroupByExecutorTest.java    |    4 +-
 .../query/groupby/RemoteGroupByExecutorTest.java   |    4 +-
 .../query/last/ClusterLastQueryExecutorTest.java   |  101 --
 .../query/manage/ClusterQueryManagerTest.java      |   12 +-
 .../query/reader/ClusterReaderFactoryTest.java     |   81 --
 .../query/reader/ClusterTimeGeneratorTest.java     |   60 +-
 .../mult/AssignPathManagedMergeReaderTest.java     |    3 +-
 .../iotdb/cluster/server/member/BaseMember.java    |    2 +-
 .../cluster/server/member/DataGroupMemberTest.java |   24 +-
 .../cluster/server/member/MetaGroupMemberTest.java |    4 +-
 code-coverage/pom.xml                              |    2 +-
 compile-tools/pom.xml                              |    2 +-
 compile-tools/thrift/pom.xml                       |   11 +-
 cross-tests/pom.xml                                |   11 +-
 .../tests/tools/importCsv/AbstractScript.java      |   54 +-
 .../tests/tools/importCsv/ExportCsvTestIT.java     |  209 ++--
 .../tests/tools/importCsv/ImportCsvTestIT.java     |  383 +++----
 distribution/pom.xml                               |   17 +-
 docker/ReadMe.md                                   |   44 -
 .../main/DockerCompose/docker-compose-grafana.yml  |   50 -
 docker/src/main/Dockerfile-0.12.0-cluster          |   53 -
 docker/src/main/Dockerfile-0.12.0-node             |   45 -
 docker/src/main/Dockerfile-0.12.1-cluster          |   53 -
 docker/src/main/Dockerfile-0.12.1-node             |   45 -
 docker/src/main/Dockerfile-0.12.2-cluster          |   53 -
 docker/src/main/Dockerfile-0.12.2-grafana          |   41 -
 docker/src/main/Dockerfile-0.12.2-node             |   45 -
 docs/Download/README.md                            |   12 +-
 docs/SystemDesign/SchemaManager/SchemaManager.md   |   39 +-
 docs/UserGuide/API/Programming-JDBC.md             |    4 -
 docs/UserGuide/API/Programming-Other-Languages.md  |   20 +-
 docs/UserGuide/Appendix/SQL-Reference.md           |   63 +-
 docs/UserGuide/Cluster/Cluster-Setup-Example.md    |  200 +---
 .../UserGuide/Ecosystem Integration/Flink IoTDB.md |    3 +-
 .../UserGuide/Ecosystem Integration/Spark IoTDB.md |   42 -
 .../Ecosystem Integration/Zeppelin-IoTDB.md        |    2 +-
 .../DDL-Data-Definition-Language.md                |    8 +-
 .../DML-Data-Manipulation-Language.md              |  168 +---
 .../IoTDB-SQL-Language/Maintenance-Command.md      |   31 +-
 docs/UserGuide/System-Tools/CSV-Tool.md            |  204 ++--
 .../UserGuide/System-Tools/Load-External-Tsfile.md |   32 +-
 docs/UserGuide/UDF/UDF-User-Defined-Function.md    |   10 +-
 docs/zh/Download/README.md                         |   12 +-
 .../zh/SystemDesign/SchemaManager/SchemaManager.md |   55 +-
 docs/zh/UserGuide/API/Programming-JDBC.md          |    3 -
 .../UserGuide/API/Programming-Other-Languages.md   |   16 +-
 docs/zh/UserGuide/Appendix/SQL-Reference.md        |   61 +-
 docs/zh/UserGuide/Cluster/Cluster-Setup-Example.md |  236 +----
 .../UserGuide/Ecosystem Integration/Spark IoTDB.md |   42 -
 .../Ecosystem Integration/Zeppelin-IoTDB.md        |    2 +-
 .../DDL-Data-Definition-Language.md                |   14 +-
 .../DML-Data-Manipulation-Language.md              |  187 +---
 .../IoTDB-SQL-Language/Maintenance-Command.md      |   31 +-
 docs/zh/UserGuide/System-Tools/CSV-Tool.md         |  199 ++--
 .../UserGuide/System-Tools/Load-External-Tsfile.md |   56 +-
 docs/zh/UserGuide/UDF/UDF-User-Defined-Function.md |   10 +-
 example/client-cpp-example/pom.xml                 |    4 +-
 example/client-cpp-example/src/SessionExample.cpp  |   16 +-
 example/flink/pom.xml                              |    2 +-
 .../org/apache/iotdb/flink/FlinkIoTDBSink.java     |    1 +
 example/hadoop/pom.xml                             |    2 +-
 example/jdbc/pom.xml                               |    2 +-
 example/kafka/pom.xml                              |    2 +-
 example/mqtt/pom.xml                               |    2 +-
 example/pom.xml                                    |    3 +-
 example/pulsar/pom.xml                             |    2 +-
 example/rocketmq/pom.xml                           |    2 +-
 example/session/pom.xml                            |    2 +-
 .../org/apache/iotdb/DataMigrationExample.java     |    8 +-
 .../org/apache/iotdb/SessionConcurrentExample.java |  198 ----
 .../main/java/org/apache/iotdb/SessionExample.java |   46 +-
 .../src/main/java/org/apache/iotdb/Write.java      |   44 +
 example/spark/pom.xml                              |   58 --
 .../src/main/java/org/apache/iotdb/Example.java    |   54 -
 .../main/scala/org/apache/iotdb/ScalaExample.scala |   51 -
 example/tsfile/pom.xml                             |    2 +-
 .../apache/iotdb/tsfile/TsFileSequenceRead.java    |   13 +-
 .../apache/iotdb/tsfile/tlsm_compaction.properties |  111 +-
 .../org/apache/iotdb/tsfile/tlsm_write.properties  |  111 +-
 example/udf/pom.xml                                |    2 +-
 flink-iotdb-connector/pom.xml                      |    2 +-
 .../java/org/apache/iotdb/flink/IoTDBSink.java     |   19 +-
 .../iotdb/flink/options/IoTDBSinkOptions.java      |   11 +
 flink-tsfile-connector/pom.xml                     |    2 +-
 grafana/pom.xml                                    |    6 +-
 hadoop/pom.xml                                     |   20 +-
 hive-connector/pom.xml                             |   24 +-
 jdbc/pom.xml                                       |    2 +-
 jdbc/src/main/feature/feature.xml                  |    4 +-
 .../iotdb/jdbc/AbstractIoTDBJDBCResultSet.java     |    4 +-
 .../org/apache/iotdb/jdbc/IoTDBConnection.java     |    9 +-
 .../iotdb/jdbc/IoTDBNonAlignJDBCResultSet.java     |    3 -
 .../org/apache/iotdb/jdbc/IoTDBResultMetadata.java |    8 +-
 .../apache/iotdb/jdbc/IoTDBResultMetadataTest.java |    2 +-
 pom.xml                                            |  106 +-
 server/pom.xml                                     |   30 +-
 .../resources/conf/iotdb-engine.properties         |  100 +-
 server/src/assembly/resources/conf/iotdb-env.bat   |   69 +-
 server/src/assembly/resources/conf/iotdb-env.sh    |   41 +-
 server/src/assembly/resources/conf/logback.xml     |   24 +-
 .../src/assembly/resources/sbin/start-server.bat   |   10 +-
 .../org/apache/iotdb/db/auth/AuthorityChecker.java |    4 +-
 .../db/auth/authorizer/LocalFileAuthorizer.java    |    3 +-
 .../iotdb/db/auth/user/BasicUserManager.java       |   10 +-
 .../db/concurrent/IoTDBThreadPoolFactory.java      |   89 +-
 .../db/concurrent/threadpool/IThreadPoolMBean.java |   44 -
 .../WrappedScheduledExecutorService.java           |  192 ----
 .../WrappedScheduledExecutorServiceMBean.java      |   21 -
 .../WrappedSingleThreadExecutorService.java        |  118 ---
 .../WrappedSingleThreadScheduledExecutor.java      |  123 ---
 .../WrappedSingleThreadScheduledExecutorMBean.java |   21 -
 .../threadpool/WrappedThreadPoolExecutor.java      |   88 --
 .../threadpool/WrappedThreadPoolExecutorMBean.java |   21 -
 .../java/org/apache/iotdb/db/conf/IoTDBConfig.java |  262 ++---
 .../org/apache/iotdb/db/conf/IoTDBConfigCheck.java |   12 +-
 .../org/apache/iotdb/db/conf/IoTDBConstant.java    |    8 +-
 .../org/apache/iotdb/db/conf/IoTDBDescriptor.java  |  238 ++---
 .../iotdb/db/conf/adapter/CompressionRatio.java    |    2 +-
 .../iotdb/db/cost/statistic/Measurement.java       |    4 +-
 .../org/apache/iotdb/db/engine/StorageEngine.java  |  255 ++---
 .../db/engine/cache/CacheHitRatioMonitor.java      |   16 +-
 .../engine/cache/CacheHitRatioMonitorMXBean.java   |    8 +-
 .../apache/iotdb/db/engine/cache/ChunkCache.java   |  139 ++-
 .../iotdb/db/engine/cache/LRULinkedHashMap.java    |  138 +++
 .../db/engine/cache/TimeSeriesMetadataCache.java   |  199 ++--
 .../compaction/CompactionMergeTaskPoolManager.java |   60 +-
 .../db/engine/compaction/CompactionStrategy.java   |   13 +-
 .../compaction/StorageGroupCompactionTask.java     |   48 -
 .../db/engine/compaction/TsFileManagement.java     |  265 ++---
 .../level/LevelCompactionTsFileManagement.java     |  443 ++++----
 ...TraditionalLevelCompactionTsFileManagement.java |  666 ++++++++++++
 .../no/NoCompactionTsFileManagement.java           |   26 +-
 .../compaction/utils/CompactionFileInfo.java       |  107 --
 .../compaction/utils/CompactionLogAnalyzer.java    |   44 +-
 .../engine/compaction/utils/CompactionLogger.java  |   25 -
 .../utils/CompactionSeparateFileUtils.java         |  241 +++++
 .../engine/compaction/utils/CompactionUtils.java   |  325 +++---
 .../apache/iotdb/db/engine/flush/FlushManager.java |    4 +-
 .../iotdb/db/engine/memtable/AbstractMemTable.java |   37 +-
 .../apache/iotdb/db/engine/memtable/IMemTable.java |    8 -
 .../db/engine/memtable/PrimitiveMemTable.java      |   11 +-
 .../iotdb/db/engine/merge/manage/MergeContext.java |    2 +
 .../iotdb/db/engine/merge/manage/MergeManager.java |   20 +-
 .../db/engine/merge/manage/MergeResource.java      |   38 +-
 .../{MergeLogAnalyzer.java => LogAnalyzer.java}    |   50 +-
 .../db/engine/merge/recover/MergeFileInfo.java     |  116 ---
 .../iotdb/db/engine/merge/recover/MergeLogger.java |    9 +-
 .../merge/selector/MaxFileMergeFileSelector.java   |   24 +-
 .../merge/task/CompactionMergeRecoverTask.java     |   82 --
 .../iotdb/db/engine/merge/task/MergeFileTask.java  |   12 +-
 .../db/engine/merge/task/MergeMultiChunkTask.java  |   89 +-
 .../iotdb/db/engine/merge/task/MergeTask.java      |   29 +-
 .../db/engine/merge/task/RecoverMergeTask.java     |   28 +-
 .../db/engine/modification/ModificationFile.java   |   10 +-
 .../engine/modification/io/ModificationWriter.java |    4 +-
 .../db/engine/querycontext/QueryDataSource.java    |   58 +-
 .../engine/storagegroup/StorageGroupProcessor.java |  791 +++++++--------
 .../db/engine/storagegroup/TsFileProcessor.java    |  258 ++---
 .../db/engine/storagegroup/TsFileResource.java     |  374 +++----
 .../storagegroup/timeindex/DeviceTimeIndex.java    |    7 +-
 .../storagegroup/timeindex/FileTimeIndex.java      |    7 +-
 .../virtualSg/VirtualStorageGroupManager.java      |   51 +-
 .../iotdb/db/engine/upgrade/UpgradeTask.java       |   22 +-
 .../version/SimpleFileVersionController.java       |    2 +-
 .../metadata/StorageGroupAlreadySetException.java  |    7 -
 .../metadata/UndefinedTemplateException.java       |   31 -
 .../exception/query/PathNumOverLimitException.java |   15 +-
 .../db/metadata/{logfile => }/MLogTxtWriter.java   |   71 +-
 .../org/apache/iotdb/db/metadata/MManager.java     |  450 +++------
 .../java/org/apache/iotdb/db/metadata/MTree.java   |  309 +-----
 .../org/apache/iotdb/db/metadata/MetaUtils.java    |    3 -
 .../iotdb/db/metadata/MetadataOperationType.java   |    4 -
 .../db/metadata/{logfile => }/TagLogFile.java      |   85 +-
 .../iotdb/db/metadata/logfile/MLogWriter.java      |   47 +-
 .../org/apache/iotdb/db/metadata/mnode/MNode.java  |   69 --
 .../iotdb/db/metadata/template/Template.java       |  147 ---
 .../org/apache/iotdb/db/monitor/StatMonitor.java   |    6 +-
 .../org/apache/iotdb/db/mqtt/PublishHandler.java   |   16 +-
 .../main/java/org/apache/iotdb/db/qp/Planner.java  |   32 +-
 .../apache/iotdb/db/qp/constant/SQLConstant.java   |    4 -
 .../apache/iotdb/db/qp/executor/PlanExecutor.java  |  192 +---
 .../org/apache/iotdb/db/qp/logical/Operator.java   |    9 +-
 .../db/qp/logical/crud/BasicFunctionOperator.java  |   21 +-
 .../db/qp/logical/crud/GroupByLevelController.java |  143 ---
 .../iotdb/db/qp/logical/crud/InOperator.java       |    4 -
 .../iotdb/db/qp/logical/crud/LikeOperator.java     |  130 ---
 .../iotdb/db/qp/logical/crud/QueryOperator.java    |   53 +-
 .../iotdb/db/qp/logical/crud/RegexpOperator.java   |  130 ---
 .../iotdb/db/qp/logical/crud/SFWOperator.java      |   10 -
 .../iotdb/db/qp/logical/crud/SelectOperator.java   |   41 -
 .../iotdb/db/qp/logical/sys/LoadFilesOperator.java |   21 +-
 .../db/qp/logical/sys/SetSystemModeOperator.java   |   36 -
 .../db/qp/logical/sys/ShowLockInfoOperator.java    |   36 -
 .../org/apache/iotdb/db/qp/physical/BatchPlan.java |   20 -
 .../apache/iotdb/db/qp/physical/PhysicalPlan.java  |  111 +-
 .../iotdb/db/qp/physical/crud/AggregationPlan.java |   70 +-
 .../db/qp/physical/crud/CreateTemplatePlan.java    |  270 -----
 .../iotdb/db/qp/physical/crud/DeletePlan.java      |    2 +-
 .../db/qp/physical/crud/InsertMultiTabletPlan.java |   20 +-
 .../iotdb/db/qp/physical/crud/InsertRowPlan.java   |   21 +-
 .../physical/crud/InsertRowsOfOneDevicePlan.java   |  104 +-
 .../iotdb/db/qp/physical/crud/InsertRowsPlan.java  |   29 +-
 .../db/qp/physical/crud/InsertTabletPlan.java      |    2 +-
 .../iotdb/db/qp/physical/crud/QueryPlan.java       |   22 -
 .../db/qp/physical/crud/SetDeviceTemplatePlan.java |   94 --
 .../apache/iotdb/db/qp/physical/crud/UDTFPlan.java |   20 -
 .../iotdb/db/qp/physical/sys/AuthorPlan.java       |    2 +-
 .../qp/physical/sys/AutoCreateDeviceMNodePlan.java |   87 --
 .../iotdb/db/qp/physical/sys/ChangeAliasPlan.java  |    2 +-
 .../db/qp/physical/sys/ChangeTagOffsetPlan.java    |    2 +-
 .../db/qp/physical/sys/CreateFunctionPlan.java     |   45 +-
 .../iotdb/db/qp/physical/sys/CreateIndexPlan.java  |    2 +-
 .../qp/physical/sys/CreateMultiTimeSeriesPlan.java |   13 +-
 .../db/qp/physical/sys/CreateTimeSeriesPlan.java   |    2 +-
 .../iotdb/db/qp/physical/sys/DataAuthPlan.java     |    2 +-
 .../db/qp/physical/sys/DeleteStorageGroupPlan.java |    2 +-
 .../db/qp/physical/sys/DeleteTimeSeriesPlan.java   |    2 +-
 .../iotdb/db/qp/physical/sys/DropFunctionPlan.java |   25 +-
 .../iotdb/db/qp/physical/sys/DropIndexPlan.java    |    2 +-
 .../apache/iotdb/db/qp/physical/sys/FlushPlan.java |    2 +-
 .../apache/iotdb/db/qp/physical/sys/MNodePlan.java |    2 +-
 .../db/qp/physical/sys/MeasurementMNodePlan.java   |    2 +-
 .../iotdb/db/qp/physical/sys/OperateFilePlan.java  |   23 +-
 .../db/qp/physical/sys/SetStorageGroupPlan.java    |    2 +-
 .../db/qp/physical/sys/SetSystemModePlan.java      |   78 --
 .../iotdb/db/qp/physical/sys/SetTTLPlan.java       |    2 +-
 .../physical/sys/SetUsingDeviceTemplatePlan.java   |   83 --
 .../iotdb/db/qp/physical/sys/ShowLockInfoPlan.java |   36 -
 .../apache/iotdb/db/qp/physical/sys/ShowPlan.java  |    3 +-
 .../db/qp/physical/sys/StorageGroupMNodePlan.java  |    2 +-
 .../apache/iotdb/db/qp/sql/IoTDBSqlVisitor.java    |  343 +++----
 .../iotdb/db/qp/strategy/PhysicalGenerator.java    |   73 +-
 .../qp/strategy/optimizer/ConcatPathOptimizer.java |  244 ++---
 .../qp/strategy/optimizer/ILogicalOptimizer.java   |    3 +-
 .../apache/iotdb/db/qp/utils/DatetimeUtils.java    |   48 +-
 .../db/query/aggregation/AggregateResult.java      |   10 +-
 .../db/query/aggregation/impl/CountAggrResult.java |   24 +-
 .../aggregation/impl/FirstValueAggrResult.java     |    8 +-
 .../aggregation/impl/FirstValueDescAggrResult.java |    5 -
 .../aggregation/impl/MinTimeDescAggrResult.java    |    5 -
 .../iotdb/db/query/context/QueryContext.java       |   10 -
 .../iotdb/db/query/control/FileReaderManager.java  |  112 ++-
 .../iotdb/db/query/control/QueryFileManager.java   |   42 +-
 .../db/query/control/QueryResourceManager.java     |  185 ++--
 .../iotdb/db/query/control/QueryTimeManager.java   |   25 +-
 .../iotdb/db/query/control/SessionManager.java     |  193 ----
 .../apache/iotdb/db/query/control/TracingInfo.java |   92 --
 .../iotdb/db/query/control/TracingManager.java     |  139 +--
 .../db/query/dataset/AlignByDeviceDataSet.java     |   14 +-
 .../dataset/RawQueryDataSetWithValueFilter.java    |    4 +-
 .../dataset/RawQueryDataSetWithoutValueFilter.java |  178 ++--
 .../UDFRawQueryInputDataSetWithoutValueFilter.java |   67 --
 .../apache/iotdb/db/query/dataset/UDTFDataSet.java |    7 +-
 .../dataset/groupby/GroupByEngineDataSet.java      |    2 -
 .../query/dataset/groupby/GroupByFillDataSet.java  |   78 +-
 .../query/dataset/groupby/GroupByTimeDataSet.java  |    7 +-
 .../groupby/GroupByWithValueFilterDataSet.java     |   13 +-
 .../groupby/GroupByWithoutValueFilterDataSet.java  |    5 +-
 .../dataset/groupby/LocalGroupByExecutor.java      |    3 -
 .../db/query/executor/AggregationExecutor.java     |   24 +-
 .../iotdb/db/query/executor/FillQueryExecutor.java |  150 +--
 .../iotdb/db/query/executor/LastQueryExecutor.java |   21 +-
 .../iotdb/db/query/executor/QueryRouter.java       |   41 +-
 .../db/query/executor/RawDataQueryExecutor.java    |   12 +-
 .../apache/iotdb/db/query/executor/fill/IFill.java |    3 +-
 .../iotdb/db/query/executor/fill/LinearFill.java   |    1 +
 .../iotdb/db/query/executor/fill/PreviousFill.java |    3 +-
 .../iotdb/db/query/executor/fill/ValueFill.java    |  110 --
 .../iotdb/db/query/pool/QueryTaskPoolManager.java  |   10 +-
 .../db/query/reader/chunk/ChunkDataIterator.java   |    2 +-
 .../db/query/reader/chunk/DiskChunkLoader.java     |   10 +-
 .../reader/chunk/DiskChunkReaderByTimestamp.java   |    2 +-
 .../db/query/reader/chunk/MemChunkReader.java      |    2 +-
 .../chunk/metadata/DiskChunkMetadataLoader.java    |    2 +-
 .../chunk/metadata/MemChunkMetadataLoader.java     |    4 +-
 .../query/reader/series/SeriesAggregateReader.java |   27 -
 .../reader/series/SeriesRawDataBatchReader.java    |    6 +-
 .../iotdb/db/query/reader/series/SeriesReader.java |  348 ++-----
 .../reader/series/SeriesReaderByTimestamp.java     |   33 +-
 .../reader/universal/DescPriorityMergeReader.java  |    5 +-
 .../reader/universal/PriorityMergeReader.java      |   14 +-
 .../query/timegenerator/ServerTimeGenerator.java   |   10 +-
 .../iotdb/db/query/udf/core/input/InputLayer.java  |   11 +-
 .../query/udf/service/UDFRegistrationService.java  |    5 +-
 .../apache/iotdb/db/rescon/MemTableManager.java    |   10 +-
 .../iotdb/db/rescon/PrimitiveArrayManager.java     |  349 +++----
 .../apache/iotdb/db/rescon/TVListAllocator.java    |   94 ++
 .../TVListAllocatorMBean.java}                     |    7 +-
 .../java/org/apache/iotdb/db/service/IoTDB.java    |   16 +-
 .../apache/iotdb/db/service/MetricsService.java    |    4 +-
 .../org/apache/iotdb/db/service/StartupChecks.java |    2 +-
 .../org/apache/iotdb/db/service/StaticResps.java   |    6 +-
 .../org/apache/iotdb/db/service/TSServiceImpl.java |  352 ++++---
 .../org/apache/iotdb/db/service/UpgradeSevice.java |   31 +-
 .../apache/iotdb/db/sync/conf/SyncConstant.java    |    7 +-
 .../receiver/recover/SyncReceiverLogAnalyzer.java  |    2 +-
 .../db/sync/receiver/transfer/SyncServiceImpl.java |   46 +-
 .../iotdb/db/sync/sender/transfer/SyncClient.java  |  130 +--
 .../apache/iotdb/db/tools/TsFileRewriteTool.java   |  178 ++--
 .../apache/iotdb/db/tools/TsFileSketchTool.java    |   17 +-
 .../org/apache/iotdb/db/tools/mlog/MLogParser.java |   19 +-
 .../db/tools/upgrade/TsFileOnlineUpgradeTool.java  |  154 ++-
 .../org/apache/iotdb/db/utils/FileLoaderUtils.java |   35 +-
 .../{AggregateUtils.java => FilePathUtils.java}    |  161 ++-
 .../java/org/apache/iotdb/db/utils/MemUtils.java   |    2 +-
 .../java/org/apache/iotdb/db/utils/MergeUtils.java |    2 +-
 .../apache/iotdb/db/utils/QueryDataSetUtils.java   |   22 +-
 .../java/org/apache/iotdb/db/utils/QueryUtils.java |   40 +-
 .../apache/iotdb/db/utils/RandomDeleteCache.java   |   76 ++
 .../org/apache/iotdb/db/utils/SerializeUtils.java  |   46 +-
 .../iotdb/db/utils/datastructure/TVList.java       |   14 +-
 .../writelog/manager/MultiFileLogNodeManager.java  |   12 +-
 .../db/writelog/node/ExclusiveWriteLogNode.java    |   97 +-
 .../writelog/recover/TsFileRecoverPerformer.java   |   45 +-
 .../auth/authorizer/LocalFileAuthorizerTest.java   |    4 +-
 .../db/auth/user/LocalFileUserManagerTest.java     |    4 +-
 .../db/conf/adapter/CompressionRatioTest.java      |    2 +-
 .../org/apache/iotdb/db/constant/TestConstant.java |   15 +-
 .../iotdb/db/engine/cache/ChunkCacheTest.java      |  257 -----
 .../db/engine/compaction/CompactionChunkTest.java  |   19 +-
 .../compaction/LevelCompactionCacheTest.java       |    9 +-
 .../engine/compaction/LevelCompactionLogTest.java  |   11 +-
 .../compaction/LevelCompactionMergeTest.java       |  156 +--
 .../engine/compaction/LevelCompactionModsTest.java |   11 +-
 .../compaction/LevelCompactionMoreDataTest.java    |   15 +-
 .../compaction/LevelCompactionRecoverTest.java     |  475 ++-------
 .../compaction/LevelCompactionRestoreTest.java     |  469 ---------
 .../compaction/LevelCompactionSelectorTest.java    |    7 +-
 .../db/engine/compaction/LevelCompactionTest.java  |   77 +-
 .../LevelCompactionTsFileManagementTest.java       |   29 +-
 .../NoCompactionTsFileManagementTest.java          |   25 +-
 .../iotdb/db/engine/merge/ConcurrentMergeTest.java |  142 ---
 .../engine/merge/MaxFileMergeFileSelectorTest.java |   79 +-
 .../apache/iotdb/db/engine/merge/MergeLogTest.java |    7 +-
 .../iotdb/db/engine/merge/MergeOverLapTest.java    |   13 +-
 .../iotdb/db/engine/merge/MergePerfTest.java       |    7 +-
 .../iotdb/db/engine/merge/MergeTaskTest.java       |  169 +---
 .../apache/iotdb/db/engine/merge/MergeTest.java    |   23 +-
 .../iotdb/db/engine/merge/MergeUpgradeTest.java    |   11 +-
 .../engine/modification/DeletionFileNodeTest.java  |   18 +-
 .../storagegroup/StorageGroupProcessorTest.java    |  248 +----
 .../iotdb/db/engine/storagegroup/TTLTest.java      |   15 +-
 .../engine/storagegroup/TsFileProcessorTest.java   |  146 +--
 .../iotdb/db/integration/IoTDBAlignByDeviceIT.java |  120 +--
 .../org/apache/iotdb/db/integration/IoTDBAsIT.java |   10 +-
 .../db/integration/IoTDBAutoCreateSchemaIT.java    |    8 +-
 .../iotdb/db/integration/IoTDBClearCacheIT.java    |   30 +-
 .../db/integration/IoTDBCreateStorageGroupIT.java  |  130 ---
 .../db/integration/IoTDBCreateTimeseriesIT.java    |    8 +-
 .../db/integration/IoTDBDeleteTimeseriesIT.java    |   55 +-
 .../iotdb/db/integration/IoTDBDeletionIT.java      |   81 --
 .../iotdb/db/integration/IoTDBDisableAlignIT.java  |    2 +-
 .../iotdb/db/integration/IoTDBFilePathUtilsIT.java |    8 +-
 .../apache/iotdb/db/integration/IoTDBFillIT.java   |   66 +-
 .../db/integration/IoTDBFlushQueryMergeIT.java     |    2 +-
 .../iotdb/db/integration/IoTDBFuzzyQueryIT.java    |  292 ------
 .../iotdb/db/integration/IoTDBGroupByMonthIT.java  |    9 +-
 .../iotdb/db/integration/IoTDBGroupByUnseqIT.java  |  189 ----
 .../org/apache/iotdb/db/integration/IoTDBInIT.java |  255 -----
 .../apache/iotdb/db/integration/IoTDBJMXTest.java  |   62 --
 .../IoTDBLastQueryWithTimeFilterIT.java            |  118 ---
 ...IoTDBLoadExternalTsFileWithTimePartitionIT.java |  137 +--
 .../db/integration/IoTDBLoadExternalTsfileIT.java  |  148 +--
 .../iotdb/db/integration/IoTDBMetadataFetchIT.java |   10 +-
 .../db/integration/IoTDBNewTsFileCompactionIT.java | 1057 ++++++++++++++++++++
 .../db/integration/IoTDBOverlappedPageIT.java      |   86 +-
 .../db/integration/IoTDBPathNumOverLimitIT.java    |   72 --
 .../iotdb/db/integration/IoTDBQueryDemoIT.java     |  220 ----
 .../db/integration/IoTDBQueryMemoryControlIT.java  |    7 +-
 .../iotdb/db/integration/IoTDBRestartIT.java       |   92 +-
 .../db/integration/IoTDBSequenceDataQueryIT.java   |    9 +-
 .../iotdb/db/integration/IoTDBSeriesReaderIT.java  |   11 +-
 .../IoTDBSetSystemReadOnlyWritableIT.java          |  266 -----
 .../iotdb/db/integration/IoTDBSimpleQueryIT.java   |   39 -
 .../iotdb/db/integration/IoTDBTimePartitionIT.java |   92 --
 .../apache/iotdb/db/integration/IoTDBTtlIT.java    |   17 -
 .../iotdb/db/integration/IoTDBUDFManagementIT.java |   21 -
 .../db/integration/IoTDBUDFWindowQueryIT.java      |   35 +-
 .../integration/IoTDBUDTFAlignByTimeQueryIT.java   |  147 ++-
 .../db/integration/IoTDBUDTFBuiltinFunctionIT.java |   35 +-
 .../db/integration/IoTDBUDTFHybridQueryIT.java     |   28 +-
 .../db/integration/IoTDBUDTFNonAlignQueryIT.java   |   42 +-
 .../db/integration/IoTDBWithoutAllNullIT.java      |  264 -----
 .../db/integration/IoTDBWithoutAnyNullIT.java      |  221 ----
 .../aggregation/IoTDBAggregationByLevelIT.java     |  161 +--
 .../aggregation/IoTDBAggregationSmallDataIT.java   |    5 +-
 .../db/integration/auth/IoTDBAuthorizationIT.java  |   26 -
 .../iotdb/db/metadata/MManagerBasicTest.java       |  550 +---------
 .../iotdb/db/metadata/MManagerImproveTest.java     |   17 +-
 .../apache/iotdb/db/metadata/MetaUtilsTest.java    |   11 -
 .../iotdb/db/qp/logical/LogicalPlanSmallTest.java  |    2 +-
 .../iotdb/db/qp/physical/ConcatOptimizerTest.java  |   24 -
 .../iotdb/db/qp/physical/InsertRowPlanTest.java    |  200 ----
 .../qp/physical/InsertRowsOfOneDevicePlanTest.java |   73 --
 .../iotdb/db/qp/physical/InsertTabletPlanTest.java |  156 ---
 .../iotdb/db/qp/physical/PhysicalPlanTest.java     |  126 +--
 .../db/qp/utils/DatetimeQueryDataSetUtilsTest.java |   28 -
 .../query/aggregation/DescAggregateResultTest.java |   58 --
 .../db/query/control/FileReaderManagerTest.java    |   29 +-
 .../iotdb/db/query/control/TracingManagerTest.java |    7 +-
 .../dataset/groupby/GroupByFillDataSetTest.java    |    2 +-
 .../dataset/groupby/GroupByLevelDataSetTest.java   |   10 -
 .../reader/series/SeriesAggregateReaderTest.java   |   12 +-
 .../reader/series/SeriesReaderByTimestampTest.java |   12 +-
 .../db/query/reader/series/SeriesReaderTest.java   |    2 +-
 .../query/reader/series/SeriesReaderTestUtil.java  |   47 +-
 .../db/sync/receiver/load/FileLoaderTest.java      |   11 +-
 .../recover/SyncReceiverLogAnalyzerTest.java       |    2 +-
 .../db/sync/sender/manage/SyncFileManagerTest.java |    2 +-
 .../sender/recover/SyncSenderLogAnalyzerTest.java  |    2 +-
 .../db/sync/sender/transfer/SyncClientTest.java    |  258 +----
 .../org/apache/iotdb/db/tools/MLogParserTest.java  |   71 +-
 .../iotdb/db/tools/TsFileSketchToolTest.java       |  132 ---
 .../org/apache/iotdb/db/tools/WalCheckerTest.java  |   36 +-
 .../apache/iotdb/db/utils/EnvironmentUtils.java    |   37 +-
 .../apache/iotdb/db}/utils/FilePathUtilsTest.java  |   37 +-
 .../org/apache/iotdb/db/utils/MemUtilsTest.java    |    8 -
 .../apache/iotdb/db/utils/SerializeUtilsTest.java  |  569 -----------
 .../iotdb/db/utils/TsFileRewriteToolTest.java      |  115 +--
 .../apache/iotdb/db/writelog/PerformanceTest.java  |    2 +-
 .../apache/iotdb/db/writelog/WriteLogNodeTest.java |   49 -
 .../recover/RecoverResourceFromReaderTest.java     |    2 +-
 .../db/writelog/recover/SeqTsFileRecoverTest.java  |    4 +-
 .../recover/TsFileRecoverPerformerTest.java        |  277 -----
 .../writelog/recover/UnseqTsFileRecoverTest.java   |   11 +-
 service-rpc/pom.xml                                |    2 +-
 .../iotdb/rpc/AutoScalingBufferReadTransport.java  |   13 -
 .../iotdb/rpc/AutoScalingBufferWriteTransport.java |   15 -
 .../java/org/apache/iotdb/rpc/IoTDBRpcDataSet.java |    8 +-
 .../org/apache/iotdb/rpc/RpcTransportFactory.java  |    3 +-
 .../main/java/org/apache/iotdb/rpc/RpcUtils.java   |   12 +-
 .../rpc/TCompressedElasticFramedTransport.java     |    2 +-
 .../org/apache/iotdb/rpc/TConfigurationConst.java  |   31 -
 .../apache/iotdb/rpc/TElasticFramedTransport.java  |   20 +-
 .../iotdb/rpc/TNonblockingSocketWrapper.java       |   60 --
 .../java/org/apache/iotdb/rpc/TSStatusCode.java    |    3 -
 .../java/org/apache/iotdb/rpc/TSocketWrapper.java  |   68 --
 session/pom.xml                                    |    2 +-
 .../main/java/org/apache/iotdb/session/Config.java |    6 +-
 .../org/apache/iotdb/session/InsertConsumer.java   |   31 -
 .../java/org/apache/iotdb/session/Session.java     |  403 ++------
 .../apache/iotdb/session/SessionConnection.java    |  136 +--
 .../org/apache/iotdb/session/SessionDataSet.java   |    2 +-
 .../iotdb/session/{util => }/SessionUtils.java     |   37 +-
 .../iotdb/session/pool/SessionDataSetWrapper.java  |    3 +-
 .../org/apache/iotdb/session/pool/SessionPool.java |  231 ++---
 .../org/apache/iotdb/session/util/ThreadUtils.java |   45 -
 .../iotdb/session/IoTDBSessionComplexIT.java       |   35 -
 .../session/IoTDBSessionDisableMemControlIT.java   |  133 ---
 .../apache/iotdb/session/IoTDBSessionSimpleIT.java |   16 +-
 .../apache/iotdb/session/SessionCacheLeaderUT.java |  434 +-------
 .../java/org/apache/iotdb/session/SessionUT.java   |    6 +-
 .../apache/iotdb/session/pool/SessionPoolTest.java |  206 +---
 .../apache/iotdb/session/util/ThreadUtilsTest.java |   35 -
 session/src/test/resources/logback.xml             |   40 -
 site/pom.xml                                       |   14 +-
 site/src/main/.vuepress/config.js                  |   16 +-
 .../theme/global-components/Contributor.vue        |    2 +-
 spark-iotdb-connector/pom.xml                      |   31 +-
 .../org/apache/iotdb/spark/db/SQLConstant.java     |   13 +-
 .../org/apache/iotdb/spark/db/Converter.scala      |    6 +-
 .../org/apache/iotdb/spark/db/DataFrameTools.scala |  105 --
 .../org/apache/iotdb/spark/db/DefaultSource.scala  |   28 +-
 .../org/apache/iotdb/spark/db/IoTDBOptions.scala   |    2 +-
 .../scala/org/apache/iotdb/spark/db/IoTDBRDD.scala |    7 +-
 .../org/apache/iotdb/spark/db/SQLConstant.scala    |   27 -
 .../apache/iotdb/spark/db/EnvironmentUtils.java    |  117 ++-
 .../org/apache/iotdb/spark/db/IoTDBWriteTest.scala |  117 ---
 spark-tsfile/pom.xml                               |    2 +-
 thrift-cluster/pom.xml                             |    2 +-
 thrift-cluster/src/main/thrift/cluster.thrift      |    8 +-
 thrift-sync/pom.xml                                |    2 +-
 thrift/pom.xml                                     |    2 +-
 thrift/src/main/thrift/rpc.thrift                  |   20 -
 tsfile/pom.xml                                     |    2 +-
 .../apache/iotdb/tsfile/compress/ICompressor.java  |   58 +-
 .../compress/GZIPCompressOverflowException.java    |   26 -
 .../iotdb/tsfile/file/metadata/ChunkMetadata.java  |   35 +-
 .../fileSystem/fsFactory/LocalFSFactory.java       |   16 +-
 .../iotdb/tsfile/read/TsFileSequenceReader.java    |   70 +-
 .../apache/iotdb/tsfile/read/common/BatchData.java |   89 --
 .../tsfile/read/common/DescReadBatchData.java      |    5 +-
 .../tsfile/read/common/DescReadWriteBatchData.java |   47 -
 .../tsfile/read/common/ExceptionBatchData.java     |   10 +-
 .../apache/iotdb/tsfile/read/common/RowRecord.java |   47 +-
 .../read/controller/CachedChunkLoaderImpl.java     |    3 +-
 .../read/expression/util/ExpressionOptimizer.java  |   34 +-
 .../tsfile/read/filter/GroupByMonthFilter.java     |  222 ++--
 .../iotdb/tsfile/read/filter/TimeFilter.java       |   11 -
 .../iotdb/tsfile/read/filter/ValueFilter.java      |   70 --
 .../tsfile/read/filter/factory/FilterFactory.java  |    8 -
 .../read/filter/factory/FilterSerializeId.java     |    5 +-
 .../iotdb/tsfile/read/filter/operator/In.java      |   15 +-
 .../iotdb/tsfile/read/filter/operator/Like.java    |  162 ---
 .../iotdb/tsfile/read/filter/operator/Regexp.java  |  112 ---
 .../tsfile/read/query/dataset/QueryDataSet.java    |   33 +-
 .../read/query/timegenerator/node/LeafNode.java    |    2 +-
 .../iotdb/tsfile/read/reader/IChunkReader.java     |    2 +-
 .../tsfile/read/reader/chunk/ChunkReader.java      |    9 +-
 .../reader/series/AbstractFileSeriesReader.java    |    2 +-
 .../reader/series/FileSeriesReaderByTimestamp.java |    8 +-
 .../apache/iotdb/tsfile/utils/FilePathUtils.java   |  126 ---
 .../iotdb/tsfile/utils/ReadWriteIOUtils.java       |   11 -
 .../tsfile/v2/read/TsFileSequenceReaderForV2.java  |    4 +
 .../tsfile/write/chunk/ChunkGroupWriterImpl.java   |    2 +-
 .../iotdb/tsfile/write/chunk/ChunkWriterImpl.java  |   30 +-
 .../iotdb/tsfile/write/chunk/IChunkWriter.java     |    2 +-
 .../apache/iotdb/tsfile/write/page/PageWriter.java |    4 -
 .../write/writer/RestorableTsFileIOWriter.java     |   54 +-
 .../iotdb/tsfile/write/writer/TsFileIOWriter.java  |   14 +-
 .../iotdb/tsfile/write/writer/TsFileOutput.java    |    4 +-
 .../org/apache/iotdb/tsfile/compress/GZIPTest.java |   17 +-
 .../org/apache/iotdb/tsfile/compress/LZ4Test.java  |   17 +-
 .../apache/iotdb/tsfile/compress/SnappyTest.java   |   17 +-
 .../apache/iotdb/tsfile/constant/TestConstant.java |    5 +-
 .../iotdb/tsfile/file/header/PageHeaderTest.java   |   12 +-
 .../file/metadata/TimeSeriesMetadataTest.java      |   13 +-
 .../tsfile/file/metadata/TsFileMetadataTest.java   |   14 +-
 ...easurementChunkMetadataListMapIteratorTest.java |    4 -
 .../iotdb/tsfile/read/ReadOnlyTsFileTest.java      |   88 --
 .../org/apache/iotdb/tsfile/read/ReadTest.java     |    5 +-
 .../tsfile/read/TsFileRestorableReaderTest.java    |   19 -
 .../read/TsFileSequenceReaderSelfCheckTest.java    |  799 ---------------
 .../tsfile/read/filter/FilterSerializeTest.java    |   25 +-
 .../tsfile/read/filter/GroupByMonthFilterTest.java |   46 +-
 .../apache/iotdb/tsfile/utils/FileGenerator.java   |    2 +-
 .../iotdb/tsfile/utils/ReadWriteIOUtilsTest.java   |   30 +-
 .../tsfile/write/DefaultDeviceTemplateTest.java    |    7 +-
 .../org/apache/iotdb/tsfile/write/PerfTest.java    |    5 +-
 .../iotdb/tsfile/write/ReadPageInMemTest.java      |   11 +-
 .../iotdb/tsfile/write/TsFileIOWriterTest.java     |    8 -
 .../iotdb/tsfile/write/TsFileReadWriteTest.java    |    3 -
 .../iotdb/tsfile/write/TsFileWriterTest.java       |   22 +-
 .../write/writer/RestorableTsFileIOWriterTest.java |   80 +-
 zeppelin-interpreter/pom.xml                       |    8 +-
 668 files changed, 10573 insertions(+), 30099 deletions(-)

diff --git a/.gitattributes b/.gitattributes
deleted file mode 100644
index 94f480d..0000000
--- a/.gitattributes
+++ /dev/null
@@ -1 +0,0 @@
-* text=auto eol=lf
\ No newline at end of file
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
deleted file mode 100644
index 32a82ca..0000000
--- a/.github/dependabot.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-#
-#  Licensed to the Apache Software Foundation (ASF) under one or more
-#  contributor license agreements.  See the NOTICE file distributed with
-#  this work for additional information regarding copyright ownership.
-#  The ASF licenses this file to You under the Apache License, Version 2.0
-#  (the "License"); you may not use this file except in compliance with
-#  the License.  You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-#
-
-version: 2
-
-updates:
-  - package-ecosystem: maven
-    directory: "/"
-    schedule:
-      interval: daily
-
-  - package-ecosystem: "github-actions"
-    directory: "/"
-    schedule:
-      interval: daily
diff --git a/.github/workflows/client.yml b/.github/workflows/client.yml
index 16a200a..f6a7bad 100644
--- a/.github/workflows/client.yml
+++ b/.github/workflows/client.yml
@@ -10,10 +10,20 @@ on:
     branches:
       - master
       - "rel/*"
+    paths:
+      - "client-*/**"
+      - "compile-tools/**"
+      - "thrift/**"
+      - "service-rpc/**"
   pull_request:
     branches:
       - master
       - "rel/*"
+    paths:
+      - "client-*/**"
+      - "compile-tools/**"
+      - "thrift/**"
+      - "service-rpc/**"
   # allow manually run the action:
   workflow_dispatch:
 
@@ -47,7 +57,6 @@ jobs:
           brew install bison
           echo 'export PATH=/usr/local/opt/bison/bin:$PATH' >> ~/.bash_profile
           source ~/.bash_profile && export LDFLAGS="-L/usr/local/opt/bison/lib"
-          brew install openssl
       - name: Cache Maven packages
         uses: actions/cache@v2
         with:
@@ -77,21 +86,18 @@ jobs:
           path: ~/.m2
           key: client-${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }}
           restore-keys: ${{ runner.os }}-m2-
-      - name: Install Win_Flex_Bison
+      - name: Install Boost and Win_Flex_Bison
         run: mkdir D:\a\cpp ; `
           Invoke-WebRequest https://github.com/lexxmark/winflexbison/releases/download/v2.5.24/win_flex_bison-2.5.24.zip -OutFile D:\a\cpp\win_flex_bison.zip ; `
           [Environment]::SetEnvironmentVariable("Path", $env:Path + ";D:\a\cpp", "User") ; `
-      - name: Download Boost
-        run: choco install boost-msvc-14.2
-      - name: Install Boost
-        run: cd C:\local\boost_1_74_0 ; `
+          Invoke-WebRequest https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.zip -OutFile D:\a\cpp\boost_1_72_0.zip ; `
+          Expand-Archive D:\a\cpp\boost_1_72_0.zip -DestinationPath D:\a\cpp ; `
+          cd D:\a\cpp\boost_1_72_0 ; `
           .\bootstrap.bat ; `
           .\b2.exe
-      - name: Install OpenSSL
-        run:  choco install openssl
-      - name: Add Flex and Bison Path and OpenSSL
+      - name: Add Flex and Bison Path
         shell: bash
-        run: cd /d/a/cpp && unzip win_flex_bison.zip && mv win_flex.exe flex.exe && mv win_bison.exe bison.exe  && echo 'export PATH=/d/a/cpp:$PATH' >> ~/.bash_profile && source ~/.bash_profile
+        run: cd /d/a/cpp && unzip win_flex_bison.zip && mv win_flex.exe flex.exe && mv win_bison.exe bison.exe && echo 'export PATH=/d/a/cpp:$PATH' >> ~/.bash_profile && source ~/.bash_profile
       - name: Test with Maven
         shell: bash
-        run: source ~/.bash_profile && mvn -B clean integration-test -P compile-cpp -Dboost.include.dir=/c/local/boost_1_74_0 -Dboost.library.dir=/c/local/boost_1_74_0/stage/lib -Dtsfile.test.skip=true -Djdbc.test.skip=true -Diotdb.test.skip=true -Dtest.port.closed=true -Denforcer.skip=true -pl server,client-cpp,example/client-cpp-example -am
+        run: source ~/.bash_profile && mvn -B clean integration-test -P compile-cpp -Dboost.include.dir=/d/a/cpp/boost_1_72_0 -Dboost.library.dir=/d/a/cpp/boost_1_72_0/stage/lib -Dtsfile.test.skip=true -Djdbc.test.skip=true -Diotdb.test.skip=true -Dtest.port.closed=true -Denforcer.skip=true -pl server,client-cpp,example/client-cpp-example -am
diff --git a/.gitignore b/.gitignore
index 4d2f8ba..757a55b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -108,8 +108,3 @@ node3/
 
 # Exclude copied license
 /client-py/LICENSE
-
-# ANTLR
-antlr/gen/
-antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/gen/
-antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/IoTDBSqlLexer.tokens
\ No newline at end of file
diff --git a/LICENSE-binary b/LICENSE-binary
index e8ec78d..cc1b475 100644
--- a/LICENSE-binary
+++ b/LICENSE-binary
@@ -216,10 +216,10 @@ following license. See licenses/ for text of these licenses.
 Apache Software Foundation License 2.0
 --------------------------------------
 commons-cli:commons-cli:1.3.1
-commons-codec:commons-codec:1.15
+commons-codec:commons-codec:1.13
 org.apache.commons:commons-collections4:4.0
 commons-io:commons-io:2.5
-org.apache.commons:commons-lang3:3.12.0
+org.apache.commons:commons-lang3:3.8.1
 commons-lang:commons-lang:2.6
 com.nimbusds:content-type:2.0
 com.google.code.gson:gson:2.8.6
@@ -230,7 +230,6 @@ com.fasterxml.jackson.core:jackson-databind:2.10.0
 javax.inject:javax.inject:1
 net.jpountz.lz4:1.3.0
 com.github.stephenc.jcip:jcip-annotations:1.0-1
-com.github.ben-manes.caffeine:caffeine:2.9.1
 org.eclipse.jetty:jetty-http:9.4.24.v20191120
 org.eclipse.jetty:jetty-io:9.4.24.v20191120
 org.eclipse.jetty:jetty-security:9.4.24.v20191120
@@ -239,42 +238,42 @@ org.eclipse.jetty:jetty-servlet:9.4.24.v20191120
 org.eclipse.jetty:jetty-util:9.4.24.v20191120
 org.eclipse.jetty:jetty-webapp:9.4.24.v20191120
 org.eclipse.jetty:jetty-xml:9.4.24.v20191120
-io.jsonwebtoken:jjwt-api:0.11.2
+io.jsonwebtoken:jjwt-api:0.10.7
 io.jsonwebtoken:jjwt-impl:0.10.7
 io.jsonwebtoken:jjwt-jackson:0.10.7
 net.minidev:json-smart:2.3
 com.google.code.findbugs:jsr305:3.0.2
 com.nimbusds:lang-tag:1.4.4
 com.librato.metrics:librato-java:2.1.0
-org.apache.thrift:libthrift:0.14.1
+org.apache.thrift:libthrift:0.13.0
 io.dropwizard.metrics:metrics-core:3.2.6
 io.dropwizard.metrics:metrics-json:3.2.6
 io.dropwizard.metrics:metrics-jvm:3.2.6
 com.librato.metrics:metrics-librato:5.1.0
-de.fraunhofer.iosb.io.moquette:moquette-broker:0.14.3
-io.netty:netty-buffer:4.1.68.Final
-io.netty:netty-codec:4.1.68.Final
-io.netty:netty-codec-http:4.1.68.Final
-io.netty:netty-codec-mqtt:4.1.68.Final
-io.netty:netty-common:4.1.68.Final
-io.netty:netty-handler:4.1.68.Final
-io.netty:netty-resolver:4.1.68.Final
-io.netty:netty-transport:4.1.68.Final
-io.netty:netty-transport-native-epoll:4.1.68.Final:linux-x86_64
-io.netty:netty-transport-native-unix-common:4.1.68.Final
+io.moquette:moquette-broker:0.13
+io.netty:netty-buffer:4.1.27.Final
+io.netty:netty-codec:4.1.27.Final
+io.netty:netty-codec-http:4.1.27.Final
+io.netty:netty-codec-mqtt:4.1.27.Final
+io.netty:netty-common:4.1.27.Final
+io.netty:netty-handler:4.1.27.Final
+io.netty:netty-resolver:4.1.27.Final
+io.netty:netty-transport:4.1.27.Final
+io.netty:netty-transport-native-epoll:4.1.27.Final:linux-x86_64
+io.netty:netty-transport-native-unix-common:4.1.27.Final
 com.nimbusds:nimbus-jose-jwt:8.14.1
 com.nimbusds:oauth2-oidc-sdk:8.3
 org.osgi:org.osgi.core:6.0.0
 org.osgi:osgi.cmpn:6.0.0
 org.ops4j.pax.jdbc:pax-jdbc-common:1.4.5
 org.xerial.snappy:snappy-java:1.1.7.2
-io.airlift.airline:0.9
+io.airlift.airline:0.8
 net.minidev:accessors-smart:1.2
 
 
 BSD 2-Clause
 ------------
-jline:jline:2.14.6
+jline:jline:2.14.5
 
 
 BSD 3-Clause
@@ -289,7 +288,7 @@ MIT License
 org.slf4j:slf4j-api
 me.tongfei:progressbar:0.7.3
 com.bugsnag:bugsnag:3.6.1
-org.slf4j:jcl-over-slf4j:1.7.32
+org.slf4j:jcl-over-slf4j:1.7.25
 
 
 EPL 1.0
@@ -304,4 +303,4 @@ CDDL 1.1
 javax.annotation:javax.annotation-api:1.3.2
 javax.servlet:javax.servlet-api:3.1.0
 javax.xml.bind:jaxb-api:2.4.0-b180725.0427
-org.glassfish.jaxb:jaxb-runtime:3.0.2
\ No newline at end of file
+org.glassfish.jaxb:jaxb-runtime:2.4.0-b180725.0644
\ No newline at end of file
diff --git a/README.md b/README.md
index 097301d..fe3a3de 100644
--- a/README.md
+++ b/README.md
@@ -88,14 +88,6 @@ To use IoTDB, you need to have:
 1. Java >= 1.8 (1.8, 11, and 13 are verified. Please make sure the environment path has been set accordingly).
 2. Maven >= 3.6 (If you want to compile and install IoTDB from source code).
 3. Set the max open files num as 65535 to avoid "too many open files" error.
-4. (Optional) Set the somaxconn as 65535 to avoid "connection reset" error when the system is under high load.
-    ```
-    # Linux
-    > sudo sysctl -w net.core.somaxconn=65535
-   
-    # FreeBSD or Darwin
-    > sudo sysctl -w kern.ipc.somaxconn=65535
-    ```
 
 ## Installation
 
diff --git a/README_ZH.md b/README_ZH.md
index f3ad6a2..b27fc9d 100644
--- a/README_ZH.md
+++ b/README_ZH.md
@@ -85,14 +85,6 @@ IoTDB的主要特点如下:
 1. Java >= 1.8 (目前 1.8、11和13 已经被验证可用。请确保环变量境路径已正确设置)。
 2. Maven >= 3.6 (如果希望从源代码编译和安装IoTDB)。
 3. 设置 max open files 为 65535,以避免"too many open files"错误。
-4. (可选) 将 somaxconn 设置为 65535 以避免系统在高负载时出现 "connection reset" 错误。 
-    ```
-    # Linux
-    > sudo sysctl -w net.core.somaxconn=65535
-   
-    # FreeBSD or Darwin
-    > sudo sysctl -w kern.ipc.somaxconn=65535
-    ```
 
 ## 安装
 
diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md
index 51a9f38..df16fb9 100644
--- a/RELEASE_NOTES.md
+++ b/RELEASE_NOTES.md
@@ -19,292 +19,13 @@
 
 -->
 
-# Apache IoTDB 0.12.4
-
-## New Features
-
-* [IOTDB-1823] group by multi level
-
-## Improvements
-
-* [IOTDB-2027] Rollback invalid entry after WAL writing failure
-* [IOTDB-2061] Add max concurrent sub query parameter, read data in batches to limit max IO and add max cached buffer size configuration
-* [IOTDB-2065] release TsFileSequenceReader soon when it is no longer used
-* [IOTDB-2072] Remove TVListAllocator to reduce the TVList mem cost
-* [IOTDB-2101] Reduce the memory footprint of QueryDataSource
-* [IOTDB-2102] Push limit operator down to each reader
-* [IOTDB-2123] Accelerate recovery process
-* update user guide for cpp-cpi and disable compiling nodejs in cpp-cli
-* Ignore too many WAL BufferOverflow log
-
-
-## Bug Fixes
-* [IOTDB-1408] Statement with 'as' executes incorrectly in mutil-path scenes
-* [IOTDB-2023] Fix serializing and deserializing bugs of Filters
-* [IOTDB-2025] Fix count nodes and devices incorrectly in cluster
-* [IOTDB-2031] Fix incorrect result of descending query with value filter in cluster
-* [IOTDB-2032] Fix incorrect result of descending query with multiple time partitions
-* [IOTDB-2039] Fix data redundant after too many open files exception occurs during compaction
-* [IOTDB-2047] Fix NPE when memControl is disabled and insert TEXT value to a non-TEXT series
-* [IOTDB-2058] Fix Query is blocked without sub-query-threads exist bug
-* [IOTDB-2063] Fix MinTimeDescAggregationResult implementation bug
-* [IOTDB-2064] Fix the NPE caused by map serde
-* [IOTDB-2068] Fix GZIP compressor meets ArrayIndexOutOfBoundsException
-* [IOTDB-2124] the filtering condition does not take efffect for last query in cluster
-* [IOTDB-2138] Fix data loss after IoTDB recover
-* [IOTDB-2140] Fix merge throw NullPointerException
-* [IOTDB-2152] PyClient: Override `__eq__()` of TSDataType, TSEncoding and Compressor to avoid unexpected comparation behaviour
-* [IOTDB-2160] Fix cluster groupby query cross-node reference leaks
-* [ISSUE-3335] Fix the bug of start-cli.sh -e mode can't work with wild card \*
-* fix memory leak: replace RandomDeleteCache with Caffine CacheLoader
-* Fix connection refused using session when users forget to set client ip
-
-
-# Apache IoTDB 0.12.3
-
-## Improvements
-
-* [IOTDB-842] Better Export/Import-CSV Tool
-* [IOTDB-1738] Cache paths list in batched insert plan
-* [IOTDB-1792] remove tomcat-embed dependency and make all transitive dependencies versions consistent
-* [ISSUE-4072] Parallel insert records in Session
-* Print the file path while meeting error in case of reading chunk
-
-## Bug Fixes
-
-* [IOTDB-1275] Fix backgroup exec for cli -e function causes an infinite loop
-* [IOTDB-1287] Fix C++ class Session has 2 useless sort()
-* [IOTDB-1289] fix CPP mem-leak in SessionExample.cpp insertRecords()
-* [IOTDB-1484] fix auto create schema in cluster
-* [IOTDB-1578] Set unsequnce when loading TsFile with the same establish time
-* [IOTDB-1619] Fix an error msg when restart iotdb-cluster
-* [IOTDB-1629] fix the NPE when using value fill in cluster mode
-* [IOTDB-1632] Fix Value fill function fills even when the data exists
-* [IOTDB-1651] add reconnect to solve out of sequence in sync module
-* [IOTDB-1659] Fix Windows CLI cannot set maxPRC less than or equal to 0
-* [IOTDB-1670] Fix cli -e mode didn't fetch timestamp_precision from server
-* [IOTDB-1674] Fix command interpret error causing somaxconn warning failed
-* [IOTDB-1677] Fix not generate file apache-iotdb-0.x.x-client-cpp-linux-x86_64-bin.zip.sha512
-* [IOTDB-1678] Fix client-cpp session bug: can cause connection leak.
-* [IOTDB-1679] client-cpp: Session descontruction need release server resource
-* [IOTDB-1690] Fix align by device type cast error
-* [IOTDB-1693] fix IoTDB restart does not truncate broken ChunkGroup bug
-* [IOTDB-1703] Fix MManager slow recover with tag
-* [IOTDB-1714] fix Could not find or load main class when start with jmx on win 
-* [IOTDB-1723] Fix concurrency issue in compaction selection
-* [IOTDB-1726] Wrong hashCode() and equals() method in ChunkMetadata
-* [IOTDB-1727] Fix Slow creation of timeseries with tag
-* [IOTDB-1731] Fix sync error between different os
-* [IOTDB-1733] Fix dropping built-in function
-* [IOTDB-1741] Avoid double close in level compaction execution
-* [IOTDB-1785] Fix Illegal String ending with . being parsed to PartialPath
-* [IOTDB-1836] Fix Query Exception Bug after deleting all sgs
-* [IOTDB-1837] Fix tagIndex rebuild failure after upgrade mlog from mlog.txt to mlog.bin
-* [IOTDB-1838] The compacting status in SGP is always false
-* [IOTDB-1846] Fix the error when count the total number of devices in cluster mode
-* [IoTDB-1847] Not throw excpetion when pulling non--existent time series
-* [IOTDB-1850] Fix deserialize page merge rate limiter
-* [IoTDB-1865] Compaction is blocking when removing old files in Cluster
-* [IOTDB-1868] Use RwLock to reduce the lock time for nodeRing
-* [IOTDB-1872] Fix data increases abnormally after IoTDB restarts
-* [IOTDB-1877] Fix Sync recovery and reconnection bugs in both sender and receiver
-* [IOTDB-1879] Fix some Unsequence files never be merged to higher level or Sequence folder
-* [IOTDB-1887] Fix importing csv data containing null throws exception
-* [IOTDB-1893] Fix Can not release file lock in sync verify singleton 
-* [IOTDB-1895] Cache leader optimization for batch write interfaces on multiple devices
-* [IOTDB-1903] Fix IndexOutOfRangeException when starting IoTDB
-* [IoTDB-1913] Fix When exporting a amount of data from csv, it will report network error or OOM
-* [IOTDB-1925] Fix the modification of max_select_unseq_file_num_in_each_compaction parameter does not take effect
-* [IOTDB-1958] Add storage group not ready exception
-* [IOTDB-1961] Cluster query memory leak
-* [IOTDB-1975] OOM caused by that MaxQueryDeduplicatedPathNum doesn't take effect
-* [IOTDB-1983] Fix DescReadWriteBatchData serializing bug
-* [IOTDB-1990] Fix unchecked null result by calling IReaderByTimestamp.getValuesInTimestamps()
-* [ISSUE-3945] Fix Fuzzy query not support multiDevices and alignByDevice Dataset
-* [ISSUE-4288] Fix CI issue caused by the invalid pentaho download url
-* [ISSUE-4293] SessionPool: InterruptedException is not properly handled in synchronized wait()
-* [ISSUE-4308] READ_TIMESERIES privilege granted to users and roles can not take effect when quering by UDFs
-* fix merge ClassCastException: MeasurementMNode
-* change sync version check to major version
-* init dummyIndex after restart cluster
-
-# Apache IoTDB 0.12.2
-
-## New Features
-
-* [IOTDB-959] Add create storage group Grammar
-* [IOTDB-1399] Add a session interface to connect multiple nodes
-* [IOTDB-1466] Support device template
-* [IOTDB-1491] UDTF query supported in cluster
-* [IOTDB-1496] Timed flush memtable
-* [IOTDB-1536] Support fuzzy query REGEXP
-* [IOTDB-1561] Support fill by specific value
-* [IOTDB-1565] Add sql: set system to readonly/writable
-* [IOTDB-1569] Timed close TsFileProcessor
-* [IOTDB-1586] Support mysql-style Like clause
-* [ISSUE-3811] Provide a data type column for the last query dataset
-* TTL can be set to the prefix path of storage group
-* add JMX monitor to all ThreadPools in the server module
-
-## Improvements
-
-* [IOTDB-1566] Do not restrict concurrent write partitions
-* [IOTDB-1585] ModificationFile‘s write interface blocking
-* [IOTDB-1587] SessionPool optimization: a more aggressive Session creation strategy
-* Use StringCachedPool in TsFileResource to reduce the memory size
-* write performance optimization when replicaNum == 1
-* Optimize Primitive Array Manager
-* Function Improvement: add overlapped page rate in Tracing
-
-## Bug Fixes
-
-* [IOTDB-1282] fix C++ class SessionDataSet mem-leak
-* [IOTDB-1407] fix Filtering time series based on tags query fails Occasionally
-* [IOTDB-1437] Fix the TsFileSketchTool NPE
-* [IOTDB-1442] Time filter & TTL do not take effect in cluster
-* [IOTDB-1452] remove compaction log/ change logger to daily
-* [IOTDB-1447] ClientPool is blocking other nodes when one node fails
-* [IOTDB-1456] Fix Error occurred while executing delete timeseries statement
-* [IOTDB-1461] Fix compaction conflicts with ttl
-* [IOTDB-1462] Fix cross space compaction recover null pointer bug
-* [IOTDB-1464] fix take byte array null pointer
-* [IOTDB-1469] fix cross space compaction lost data bug
-* [IOTDB-1471] Fix path not right in "sg may not ready" log
-* [IOTDB-1475] MeasurementId check while create timeseries or template/ disable time or timestamp in timeseries path
-* [IOTDB-1488] Fix metaMember's forwarding clientPool timeout in cluster module
-* [IOTDB-1494] fix compaction block flush bug
-* [IoTDB-1499] Remove series registration using IoTDBSink
-* [IoTDB-1501] Fix compaction recover delete tsfile bug
-* [IOTDB-1529] Fix mlog recover idx bug and synchronize setStorageGroup
-* [IOTDB-1537] fix insertTablet permission
-* [IOTDB-1539] Fix delete operation with value filter is abnormal
-* [IOTDB-1540] Bug Fix: 500 when using IN operator
-* [IOTDB-1541] Fix query result not right due to non-precise time index of resource
-* [IOTDB-1542] Cpp client segment fault: char[] buffer overflow caused by long exception message
-* [IOTDB-1545] Query dataset memory leak on server caused by cpp client
-* [IOTDB-1546] Optimize the Upgrade Tool rewrite logic to reduce the temp memory cost
-* [IOTDB-1552] Only allow equivalent filter for TEXT data type
-* [IOTDB-1556] Abort auto create device when meet exception in setStorageGroup
-* [IOTDB-1574] Deleted file handler leak
-* [IOTDB-1580] Error result of order by time desc when enable time partition
-* [IOTDB-1584] Doesn't support order by time desc in cluster mode
-* [IOTDB-1588] Bug fix: MAX_TIME is incorrect in cluster mode
-* [IOTDB-1594] Fix show timeseries returns incorrect tag value
-* [IOTDB-1600] Fix InsertRowsOfOneDevicePlan being not supported in cluster mode
-* [IOTDB-1610] Fix TsFileRewriteTool writing incorrect data file
-* [ISSUE-3116] Bug when using natural month unit in time interval in group by query
-* [ISSUE-3316] Query result with the same time range is inconsistent in group by query
-* [ISSUE-3436] Fix query result not right after deleting multiple time interval of one timeseries
-* [ISSUE-3458] fix load configuration does not take effect
-* [ISSUE-3545] Fix Time interval value is disorder in group by month
-* [ISSUE-3653] fix Max_time and last return inconsistent result
-* [ISSUE-3690] Memory leaks on the server when cpp client invokes checkTimeseriesExists
-* [ISSUE-3805] OOM caused by Chunk cache
-* [ISSUE-3865] Meaningless connection reset issues caused by low default value for SOMAXCONN
-* Fix DataMigrationExample OOM if migrate too many timeseries
-* Handle false positive cases which may cause NPE of tsfile bloom filter
-* Fix Windows shell error on JDK11 & fix iotdb-env.bat not working
-* Fix cluster auto create schema bug when retry locally
-* Fix thrift out of sequence in cluster module
-* Skip non exist measurement in where clause in align by device
-* fix blocking query when selecting TsFile in compaction
-* Fix redundant data in compaction recover
-* Fix load tsfile with time partition enable
-
-## Incompatible changes
-
-* [IOTDB-1485] Replace tsfile_size_threshold by unseq_tsfile_size/seq_tsfile_size
-
-## Miscellaneous changes
-
-* [IOTDB-1499] Remove unused exception throwing notation in IoTDBSink
-* [IOTDB-1500] Remove current dynamic query memory control
-* [ISSUE-3674] Disable thrift code generation for Javascript
-* enable cacheLeader by default
-* add audit log when execute delete and set sg for tracing
-* modify nodeTool user to root
-
-# Apache IoTDB 0.12.1
-
-## Bug Fixes
-
-* [GITHUB-3373] Remove the broken cached leader connection & optimize the insertRecords method in session
-* [IOTDB-1433] Fix bug in getMetadataAndEndOffset when querying non-exist device
-* [IOTDB-1432] fix level compaction loss data
-* [IOTDB-1427] Fix compaction lock with query
-* [IOTDB-1420] Fix compaction ttl bug
-* [IOTDB-1419] Remove redundant clearCompactionStatus, fix continuous compaction doesn't take effect when
-  enablePartition
-* [IOTDB-1415] Fix OOM caused by ChunkCache
-* [IOTDB-1414] NPE occurred when call getStorageGroupNodeByPath() method using not exist path
-* [IOTDB-1412] Unclear exception message thrown when executing empty InsertTabletPlan
-* [IOTDB-1411] Fix thriftMaxFrameSize and thriftDefaultBufferSize does not in effect
-* [IOTDB-1398] Do not select unseq files when there are uncompacted old unseq files
-* [IOTDB-1390] Fix unseq compaction loss data bug
-* [IOTDB-1384] Fix group by bug
-* [ISSUE-3378] Fix NPE when clear upgrade folder; Fix some upgraded pageHeader missing statistics
-* [GITHUB-3339] Try to fix sg dead lock
-* [GITHUB-3329] Fix upgrade NPE and DeadLock
-* [GITHUB-3319] Fix upgrade tool cannot close file reader
-* [IOTDB-1212] Fix The given error message is not right when executing select sin(non_existence) from root.sg1.d1
-* [IOTDB-1219] Fix a potential NPE issue in UDF module
-* [IOTDB-1286] Fix 4 C++ mem-leak points
-* [IOTDB-1294] Fix delete operation become invalid after compaction
-* [IOTDB-1313] Fix lossing time precision when import csv with unsupported timestamp format
-* [IOTDB-1316] The importCsv tool should continue inserting if a part of insertion failed
-* [IOTDB-1317] Fix log CatchUp always failed due to not check the follower's match index
-* [IOTDB-1323] Fix return a success message when encounter RuntimeException during the insertion process
-* [IOTDB-1325] Fix StackOverflow Exception in group by natural month query
-* [IOTDB-1330] Fix the load tsfile bug when the cross multi partition's tsfile only have one page
-* [IOTDB-1348] Fix Last plan not work in cluster mode
-* [IOTDB-1376] Fix BatchProcessException was not correctly handled in BaseApplier
-* [ISSUE-3277] Fix TotalSeriesNumber in MManager counted twice when recovering
-* [ISSUE-3116] Fix bug when using natural month unit in time interval in group by query
-* [ISSUE-3309] Fix InsertRecordsOfOneDevice runs too slow
-* Fix the plan index is always zero when using insertRecords interface to run the cluster
-* Add authority check for users create timeseries using executeBatch interface without the privilege
-* Fix versionInfo NPE when query upgrading 0.11 tsfile
-* Fix upgrade tool cannot load old tsfile if time partition enabled in 0.11
-* Fix import csv throw ArrayOutOfIndexError when the last value in a line is null
-* Fix upgrade tool cannot close file reader
-
-## Improvements
-
-* [GITHUB-3399] Change the default primitive array size to 32
-* [IOTDB-1387] Support Without Null ALL in align by device clause, Filter RowRecord automatically if any column in it is
-  null or all columns are null
-* [IOTDB-1385] Extract the super user to the configuration
-* [IOTDB-1315] ExportCsvTool should support timestamp `yyyy-MM-dd'T'HH:mm:ss.SSSZ`
-* [IOTDB-1339] optimize TimeoutChangeableTSnappyFramedTransport
-* [IOTDB-1356] Separate unseq_file_num_in_each_level from selecting candidate file in unseq compaction
-* [IOTDB-1357] Compaction use append chunk merge strategy when chunk is already large enough
-* [IOTDB-1380] Automatically close the dataset while there is no more data
-* Optimize sync leader for meta
-
-## New Features
-
-* [GITHUB-3389] TTL can be set to any path
-* [GITHUB-3387] Add parameter compaction_interval=10000ms
-* [IOTDB-1190] Fully support HTTP URL char set in timeseries path
-* [IOTDB-1321][IOTDB-1322] Filter RowRecord automatically if any column in it is null or all columns are null
-* [IOTDB-1357] Compaction use append chunk merge strategy when chunk is already large
-* [ISSUE-3089] Make it possible for storage groups to have name with hyphen
-
-## Miscellaneous changes
-
-* [GITHUB-3346] upgrade netty and claim exclusion for enforcer check
-* [IOTDB-1259] upgrade libthrift from 0.12.0/0.13.0 to 0.14.1
-* Uncomment the less used configurations
-* Enable the configration `concurrent_writing_time_partition`
-
 # Apache IoTDB 0.12.0
 
 ## New Features
-
 * [IOTDB-68] New shared-nothing cluster
 * [IOTDB-507] Add zeppelin-interpreter module
 * [IOTDB-825] Aggregation by natural month
-* [IOTDB-890] support SDT lossy compression
+* [IOTDB-890] support SDT lossy compression 
 * [IOTDB-944] Support UDTF (User-defined Timeseries Generating Function)
 * [IOTDB-965] Add timeout parameter for query
 * [IOTDB-1077] Add insertOneDeviceRecords API in java session
@@ -318,12 +39,11 @@
 * [PR-2605] Add level merge to "merge" command
 
 ## Incompatible changes
-
 * [IOTDB-1081] New TsFile Format
 * [ISSUE-2730] Add the number of unseq merge times in TsFile name.
 
-## Miscellaneous changes
 
+## Miscellaneous changes
 * [IOTDB-868] Change mlog from txt to bin
 * [IOTDB-1069] Restrict the flushing memtable number to avoid OOM when mem_control is disabled
 * [IOTDB-1104] Refactor the error handling process of query exceptions
@@ -336,7 +56,6 @@
 * [PR-2967] Log memory usage information in SystemInfo for better diagnosis
 
 ## Bug Fixes
-
 * [IOTDB-1049] Fix NullpointerException and a delete bug in Last query
 * [IOTDB-1050] Fix Count timeserise column name is wrong
 * [IOTDB-1068] Fix Time series metadata cache bug
@@ -360,35 +79,10 @@
 * [PR-2582] Fix sync bug for tsfiles's directory changed by vitural storage group
 * [ISSUE-2911] Fix The write stream is not closed when executing the command 'tracing off'
 
-# Apache IoTDB 0.11.4
-
-## Bug Fixes
-
-* IOTDB-1303 Disable group by without aggregation function in select clause
-* IOTDB-1306 Fix insertion blocked caused the deadlock in memory control module
-* IOTDB-1308 Fix users with READ_TIMESERIES permission cannot execute group by fill queries
-* IOTDB-1344 Fix cannot create timeseries caused by the timeseries count doesn't reset when deleting storage group
-* IOTDB-1384 Some value will disappear while using group by query
-* IOTDB-1398 Do not select unseq files when there are uncompacted old unseq files
-* ISSUE-3316 Fix query result with the same time range is inconsistent in group by query
-* Fix TotalSeriesNumber in MManager counted twice when recovering
-* Fix unseq compaction throws a wrong exception if some paths are not in the file
-* Fix overlapped data should be consumed first exception when query
-
-## Improvements
-
-* IOTDB-1356 Separate unseq_file_num_in_each_level from selecting candidate file in unseq compaction
-* IOTDB-1412 Unclear exception message thrown when executing empty InsertTabletPlan
-* continuous compaction in level compaction strategy when no tsfile is to be closed
-
-## New Features
-
-* support brackets with number in timeseries path
 
 # Apache IoTDB 0.11.3
 
 ## Bug Fixes
-
 * ISSUE-2505 ignore PathNotExistException in recover and change recover error to warn
 * IOTDB-1119 Fix C++ SessionDataSet bug when reading value buffer
 * Fix SessionPool does not recycle session and can not offer new Session due to RunTimeException
@@ -420,7 +114,6 @@
 * Fix high CPU usage during the compaction process
 
 ## Improvements
-
 * IOTDB-1140 optimize regular data encoding
 * Add more log for better tracing
 * Add backgroup exec for cli -e function
@@ -428,13 +121,12 @@
 * Change last cache log to debug level
 
 ## New Features
-
 * Add explain sql support
 
+
 # Apache IoTDB 0.11.2
 
 ## Bug Fixes
-
 * IOTDB-1049 Fix Nullpointer exception and a delete bug in Last query
 * IOTDB-1060 Support full deletion for delete statement without where clause
 * IOTDB-1068 Fix Time series metadata cache bug
@@ -456,7 +148,6 @@
 # Apache IoTDB 0.11.1
 
 ## Bug Fixes
-
 * IOTDB-990 cli parameter maxPRC shouldn't to be set zero
 * IOTDB-993 Fix tlog bug
 * IOTDB-994 Fix can not get last_value while doing the aggregation query along with first_value
@@ -544,7 +235,7 @@
 * IOTDB-963 Redo deleteStorageGroupPlan failed when recovering
 * IOTDB-967 Fix xxx does not have the child node xxx Bug in count timeseries
 * IOTDB-970 Restrict log file number and size
-* IOTDB-971 More precise error messages of slimit and soffset
+* IOTDB-971 More precise error messages of slimit and soffset 
 * IOTDB-975 when series does not exist in TsFile, reading wrong ChunkMetadataList
 
 # Apache IoTDB (incubating) 0.10.1
@@ -559,20 +250,21 @@
 * Query result is not correct when some unsequence data exists
 * Change the default fetch size to 10000 in session
 * [IOTDB-798] fix a set rowLimit and rowOffset bug
-* [IOTDB-800] Add a new config type for those parameters which could not be modified any more after the first start
+* [IOTDB-800] Add a new config type for those parameters which could not be modified any more after the first start 
 * [IOTDB-802] Improve "group by" query performance
-* [IOTDB-799] remove log visualizer tool from v0.10
-* fix license-binary
-* [IOTDB-805] Fix BufferUnderflowException when querying TsFile stored in HDFS
+* [IOTDB-799] remove log visualizer tool from v0.10 
+* fix license-binary  
+* [IOTDB-805] Fix BufferUnderflowException when querying TsFile stored in HDFS 
 * python session client ver-0.10.0
-* [IOTDB-808] fix bug in selfCheck() truncate
-* fix doc of MeasurementSchema in Tablet
+* [IOTDB-808] fix bug in selfCheck() truncate 
+* fix doc of MeasurementSchema in Tablet 
 * [IOTDB-811] fix upgrading mlog many times when upgrading system.properties crashed
 * Improve IoTDB restart process
 * remove jol-core dependency which is introduced by hive-serde 2.8.4
 * remove org.json dependency because of license compatibility
 * [ISSUE-1551] fix set historical version when loading additional tsfile
 
+
 # Apache IoTDB (incubating) 0.10.0
 
 ## New Features
@@ -593,8 +285,7 @@
 * IOTDB-396 Support new query clause: disable align, e.g., select * from root disable align
 * IOTDB-447 Support querying non-existing measurement and constant measurement
 * IOTDB-448 Add IN operation, e.g., where time in (1,2,3)
-* IOTDB-456 Support GroupByFill Query, e.g., select last_value(s1) from root.sg.d1 GROUP BY ([1, 10), 2ms) FILL(
-  int32[previousUntilLast])
+* IOTDB-456 Support GroupByFill Query, e.g., select last_value(s1) from root.sg.d1 GROUP BY ([1, 10), 2ms) FILL(int32[previousUntilLast])
 * IOTDB-467 The CLI displays query results in a batch manner
 * IOTDB-497 Support Apache Flink Connector with IoTDB
 * IOTDB-558 add text support for grafana
@@ -622,9 +313,10 @@
 * IOTDB-708 add config for inferring data type from string value
 * IOTDB-715 Support previous time range in previousuntillast
 * IOTDB-719 add avg_series_point_number_threshold in config
-* IOTDB-731 Continue write inside InsertPlan
+* IOTDB-731 Continue write inside InsertPlan 
 * IOTDB-734 Add Support for NaN in Double / Floats in SQL Syntax.
-* IOTDB-744 Support upsert alias
+* IOTDB-744 Support upsert alias 
+
 
 ## Incompatible changes
 
@@ -635,8 +327,7 @@
 * IOTDB-506 upgrade the rpc protocol to v2 to reject clients or servers that version < 0.10
 * IOTDB-587 TsFile is upgraded to version 2
 * IOTDB-593 add metaOffset in TsFileMetadata
-* IOTDB-597 Rename methods in Session: insertBatch to insertTablet, insertInBatch to insertRecords, insert to
-  insertRecord
+* IOTDB-597 Rename methods in Session: insertBatch to insertTablet, insertInBatch to insertRecords, insert to insertRecord
 * RPC is incompatible, you can not use client-v0.9 to connect with server-v0.10
 * TsFile format is incompatible, will be upgraded when starting 0.10
 * Refine exception code in native api
@@ -655,11 +346,11 @@
 * IOTDB-628 rename client to cli
 * IOTDB-621 Add Check isNull in Field for querying using session
 * IOTDB-632 Performance improve for PreviousFill/LinearFill
-* IOTDB-695 Accelerate the count timeseries query
-* IOTDB-707 Optimize TsFileResource memory usage
+* IOTDB-695 Accelerate the count timeseries query 
+* IOTDB-707 Optimize TsFileResource memory usage  
 * IOTDB-730 continue write in MQTT when some events are failed
-* IOTDB-729 shutdown uncessary threadpool
-* IOTDB-733 Enable setting for mqtt max length
+* IOTDB-729 shutdown uncessary threadpool 
+* IOTDB-733 Enable setting for mqtt max length 
 * IOTDB-732 Upgrade fastjson version to 1.2.70
 * Allow "count timeseries" without a prefix path
 * Add max backup log file number
@@ -669,11 +360,12 @@
 * Add metaquery in python example
 * Set inferType of MQTT InsertPlan to true
 
+
+
 ## Bug Fixes
 
 * IOTDB-125 Potential Concurrency bug while deleting and inserting happen together
-* IOTDB-185 fix start-client failed on WinOS if there is blank space in the file path; let start-server.bat suport
-  jdk12,13 etc
+* IOTDB-185 fix start-client failed on WinOS if there is blank space in the file path; let start-server.bat suport jdk12,13 etc
 * IOTDB-304 Fix bug of incomplete HDFS URI
 * IOTDB-341 Fix data type bug in grafana
 * IOTDB-346 Fix a bug of renaming tsfile in loading function
@@ -697,8 +389,8 @@
 * IOTDB-692 merge behaves incorrectly
 * IOTDB-712 Meet BufferUnderflowException and can not recover
 * IOTDB-718 Fix wrong time precision of NOW()
-* IOTDB-735 Fix Concurrent error for MNode when creating time series automatically
-* IOTDB-738 Fix measurements has blank
+* IOTDB-735 Fix Concurrent error for MNode when creating time series automatically 
+* IOTDB-738 Fix measurements has blank 
 
 * fix concurrent auto create schema conflict bug
 * fix meet incompatible file error in restart
@@ -708,10 +400,10 @@
 * Fix batch execution bug, the following sqls will all fail after one error sql
 * Fix recover endTime set bug
 
+
 # Apache IoTDB (incubating) 0.9.3
 
 ## Bug Fixes
-
 - IOTDB-531 Fix that JDBC URL does not support domain issue
 - IOTDB-563 Fix pentaho cannot be downloaded because of spring.io address
 - IOTDB-608 Skip error Mlog
@@ -719,34 +411,32 @@
 - IOTDB-636 Fix Grafana connector does not use correct time unit
 
 ## Miscellaneous changes
-
 - IOTDB-528 Modify grafana group by
 - IOTDB-635 Add workaround when doing Aggregation over boolean Series
 - Remove docs of Load External Tsfile
 - Add Grafana IoTDB Bridge Artifact to distribution in tools/grafana folder
 
+
 # Apache IoTDB (incubating) 0.9.2
 
 ## Bug Fixes
-
 - IOTDB-553 Fix Return Empty ResultSet when queried series doesn't exist
 - IOTDB-575 add default jmx user and password; fix issues that jmx can't be accessed remotely
 - IOTDB-584 Fix InitializerError when recovering files on HDFS
 - Fix batch insert once an illegal sql occurs all the sqls after that will not succeed
-- Fix concurrent modification exception when iterator TsFileResourceList
-- Fix some HDFS config issues
+- Fix concurrent modification exception when iterator TsFileResourceList 
+- Fix some HDFS config issues 
 - Fix runtime exception not be catched and sync schema pos was nullpointer bug in DataTransferManager
 - Fix python rpc grammar mistakes
 - Fix upgrade ConcurrentModificationException
 
 ## Miscellaneous changes
-
 - IOTDB-332 support Chinese characters in path
-- IOTDB-316 add AVG function to 4-SQL Reference.md and modify style
+- IOTDB-316 add AVG function to 4-SQL Reference.md and modify style 
 - improve start-server.bat by using quotes to protect against empty entries
 - Add Chinese documents for chapter 4.2
 - change download-maven-plugin to 1.3.0
-- add session pool
+- add session pool 
 - add insertInBatch in Session
 - add insertInBatch to SessionPool
 - modify 0.9 docs to fit website
@@ -790,7 +480,7 @@
 * IOTDB-188 Delete storage group
 * IOTDB-193 Create schema automatically when inserting
 * IOTDB-198 Add sync module (Sync TsFiles between IoTDB instances)
-* IOTDB-199 Add a log visualization tool
+* IOTDB-199 Add a log visualization tool 
 * IOTDB-203 Add "group by device" function for narrow table display
 * IOTDB-205 Support storage-group-level Time To Live (TTL)
 * IOTDB-208 Add Bloom filter in TsFile
@@ -799,7 +489,7 @@
 * IOTDB-239 Add interface for showing devices
 * IOTDB-241 Add query and non query interface in session
 * IOTDB-249 Enable lowercase in create_timeseries sql
-* IOTDB-253 Support time expression
+* IOTDB-253 Support time expression 
 * IOTDB-259 Level query of path
 * IOTDB-282 Add "show version"
 * IOTDB-294 Online upgrade from 0.8.0 to 0.9.0
@@ -808,17 +498,15 @@
 * Generate cpp, go, and python thrift files under service-rpc
 * Display cache hit rate through jconsole
 * Support inserting data that time < 0
-* Add interface (Delete timeseries) in session
+* Add interface (Delete timeseries) in session 
 * Add a tool to print tsfileResources (each device's start and end time)
 * Support watermark feature
 * Add micro and nano timestamp precision
 
 ## Incompatible changes
 
-* RPC is incompatible, you can not use client-0.8.0 to connect with server-0.9.0 or use client-0.9.0 to connect with
-  server-0.8.0.
-* Server is backward compatible, server-0.9.0 could run on data folder of 0.8.0. The data file will be upgraded
-  background.
+* RPC is incompatible, you can not use client-0.8.0 to connect with server-0.9.0 or use client-0.9.0 to connect with server-0.8.0.
+* Server is backward compatible, server-0.9.0 could run on data folder of 0.8.0. The data file will be upgraded background.
 * Change map key in TsDigest from String to enum data type
 
 ## Miscellaneous changes
@@ -834,8 +522,7 @@
 * IOTDB-221 Add a python client example
 * IOTDB-233 keep metadata plan clear
 * IOTDB-251 Improve TSQueryDataSet structure in RPC
-* IOTDB-257 Makes the client stop fetch when dataSize equals maxPrintRowCount and change client fetchSize less than
-  maxPrintRowCount
+* IOTDB-257 Makes the client stop fetch when dataSize equals maxPrintRowCount and change client fetchSize less than maxPrintRowCount
 * IOTDB-258 Add documents for Query History Visualization Tool and Shared Storage Architecture
 * IOTDB-265 Re-adjust the threshold size of memtable
 * IOTDB-267 Reduce IO operations in deserializing chunk header
@@ -847,7 +534,7 @@
 * IOTDB-295 Refactor db.exception
 * Reconstruct Antlr3 grammar to improve performance
 * Tooling for release
-* Modified Decoder and SequenceReader to support old version of TsFile
+* Modified Decoder and SequenceReader to support old version of TsFile 
 * Remove jdk constrain of jdk8 and 11
 * Modify print function in AbstractClient
 * Avoid second execution of parseSQLToPhysicalPlan in executeStatement
@@ -858,8 +545,7 @@
 
 ## Bug Fixes
 
-* IOTDB-168&169 Fix a bug in export-csv tool and fix compatibility of timestamp formats in exportCsv, client display and
-  sql
+* IOTDB-168&169 Fix a bug in export-csv tool and fix compatibility of timestamp formats in exportCsv, client display and sql
 * IOTDB-174 Fix querying timeseries interface cannot make a query by the specified path prefix
 * IOTDB-195 Using String.getBytes(utf-9).length to replace string.length() in ChunkGroupMetadata for supporting Chinese
 * IOTDB-211 Use "%IOTDB_HOME%\lib\*" to refers to all .jar files in the directory in start-server.bat
@@ -878,11 +564,11 @@
 
 # Apache IoTDB (incubating) 0.8.2
 
-This is a bug-fix version of 0.8.1
+ This is a bug-fix version of 0.8.1 
 
-- IOTDB-264 lack checking datatype before writing WAL
-- IOTDB-317 Fix "flush + wrong aggregation" causes failed query in v0.8.x
-- NOTICE and LICENSE file update
+-  IOTDB-264 lack checking datatype before writing WAL 
+-  IOTDB-317 Fix "flush + wrong aggregation" causes failed query in v0.8.x 
+-  NOTICE and LICENSE file update 
 
 # Apache IoTDB (incubating) 0.8.1
 
@@ -895,6 +581,7 @@ This is a bug-fix version of 0.8.0
 * Abnormal publishing of sequence and unsequence data folders in DirectoryManager
 * Fix a bug in TimeRange's intersects function
 
+
 # Apache IoTDB (incubating) 0.8.0
 
 This is the first official release of Apache IoTDB after joining the Incubator.
@@ -971,8 +658,7 @@ If you use the previous unofficial version 0.7.0. It is incompatible with 0.8.0.
 
 * IOTDB-20 Need to support UPDATE
 * IOTDB-124 Lost timeseries info after restart IoTDB
-* IOTDB-125 [potential] a concurrency conflict may occur when a delete command and insertion command appears
-  concurrently
+* IOTDB-125 [potential] a concurrency conflict may occur when a delete command and insertion command appears concurrently
 * IOTDB-126 IoTDB will not be closed immediately after run 'stop-server.sh' script
 * IOTDB-127 Chinese version documents problems
 
@@ -1032,4 +718,4 @@ If you use the previous unofficial version 0.7.0. It is incompatible with 0.8.0.
 * Import/export csv script bug
 * Log level and stack print in test
 * Bug in TsFile-Spark-Connector
-* A doc bug of QuickStart.md
+* A doc bug of QuickStart.md
\ No newline at end of file
diff --git a/antlr/pom.xml b/antlr/pom.xml
index a77730a..f6b9f2d 100644
--- a/antlr/pom.xml
+++ b/antlr/pom.xml
@@ -24,7 +24,7 @@
     <parent>
         <groupId>org.apache.iotdb</groupId>
         <artifactId>iotdb-parent</artifactId>
-        <version>0.12.4</version>
+        <version>0.12.1-SNAPSHOT</version>
         <relativePath>../pom.xml</relativePath>
     </parent>
     <artifactId>iotdb-antlr</artifactId>
diff --git a/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/SqlBase.g4 b/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/SqlBase.g4
index 0062431..6a027cd 100644
--- a/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/SqlBase.g4
+++ b/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/SqlBase.g4
@@ -37,7 +37,6 @@ statement
     | UPDATE prefixPath setClause whereClause? #updateStatement
     | DELETE FROM prefixPath (COMMA prefixPath)* (whereClause)? #deleteStatement
     | SET STORAGE GROUP TO prefixPath #setStorageGroup
-    | CREATE STORAGE GROUP prefixPath #createStorageGroup
     | DELETE STORAGE GROUP prefixPath (COMMA prefixPath)* #deleteStorageGroup
     | SHOW METADATA #showMetadata // not support yet
     | DESCRIBE prefixPath #describePath // not support yet
@@ -77,7 +76,6 @@ statement
     | SHOW VERSION #showVersion
     | SHOW LATEST? TIMESERIES prefixPath? showWhereClause? limitClause? #showTimeseries
     | SHOW STORAGE GROUP prefixPath? #showStorageGroup
-    | SHOW LOCK INFO prefixPath? #showLockInfo
     | SHOW CHILD PATHS prefixPath? #showChildPaths
     | SHOW CHILD NODES prefixPath? #showChildNodes
     | SHOW DEVICES prefixPath? (WITH STORAGE GROUP)? limitClause? #showDevices
@@ -86,14 +84,12 @@ statement
     | KILL QUERY INT? #killQuery
     | TRACING ON #tracingOn
     | TRACING OFF #tracingOff
-    | SET SYSTEM TO READONLY #setSystemToReadOnly
-    | SET SYSTEM TO WRITABLE #setSystemToWritable
     | COUNT TIMESERIES prefixPath? (GROUP BY LEVEL OPERATOR_EQ INT)? #countTimeseries
     | COUNT DEVICES prefixPath? #countDevices
     | COUNT STORAGE GROUP prefixPath? #countStorageGroup
     | COUNT NODES prefixPath LEVEL OPERATOR_EQ INT #countNodes
     | LOAD CONFIGURATION (MINUS GLOBAL)? #loadConfigurationStatement
-    | LOAD stringLiteral loadFilesClause?#loadFiles
+    | LOAD stringLiteral autoCreateSchema?#loadFiles
     | REMOVE stringLiteral #removeFile
     | MOVE stringLiteral stringLiteral #moveFile
     | DELETE PARTITION prefixPath INT(COMMA INT)* #deletePartition
@@ -240,7 +236,6 @@ predicate
     : (TIME | TIMESTAMP | suffixPath | fullPath) comparisonOperator constant
     | (TIME | TIMESTAMP | suffixPath | fullPath) inClause
     | OPERATOR_NOT? LR_BRACKET orExpression RR_BRACKET
-    | (suffixPath | fullPath) (REGEXP | LIKE) stringLiteral
     ;
 
 inClause
@@ -256,19 +251,15 @@ specialClause
     | orderByTimeClause specialLimit? #orderByTimeStatement
     | groupByTimeClause orderByTimeClause? specialLimit? #groupByTimeStatement
     | groupByFillClause orderByTimeClause? specialLimit? #groupByFillStatement
-    | groupByLevelClause orderByTimeClause? specialLimit? #groupByLevelStatement
     | fillClause slimitClause? alignByDeviceClauseOrDisableAlign? #fillStatement
+    | alignByDeviceClauseOrDisableAlign #alignByDeviceStatementOrDisableAlignInSpecialClause
+    | groupByLevelClause orderByTimeClause? specialLimit? #groupByLevelStatement
     ;
 
 specialLimit
     : limitClause slimitClause? alignByDeviceClauseOrDisableAlign? #limitStatement
     | slimitClause limitClause? alignByDeviceClauseOrDisableAlign? #slimitStatement
-    | withoutNullClause limitClause? slimitClause? alignByDeviceClauseOrDisableAlign? #withoutNullStatement
-    | alignByDeviceClauseOrDisableAlign #alignByDeviceClauseOrDisableAlignStatement
-    ;
-
-withoutNullClause
-    : WITHOUT NULL (ALL | ANY)
+    | alignByDeviceClauseOrDisableAlign #alignByDeviceClauseOrDisableAlignInSpecialLimit
     ;
 
 orderByTimeClause
@@ -322,7 +313,7 @@ groupByTimeClause
             COMMA DURATION
             (COMMA DURATION)?
             RR_BRACKET
-            COMMA LEVEL OPERATOR_EQ INT (COMMA INT)*
+            COMMA LEVEL OPERATOR_EQ INT
     ;
 
 groupByFillClause
@@ -334,13 +325,12 @@ groupByFillClause
      ;
 
 groupByLevelClause
-    : GROUP BY LEVEL OPERATOR_EQ INT (COMMA INT)*
+    : GROUP BY LEVEL OPERATOR_EQ INT
     ;
 
 typeClause
     : (dataType | ALL) LS_BRACKET linearClause RS_BRACKET
     | (dataType | ALL) LS_BRACKET previousClause RS_BRACKET
-    | (dataType | ALL) LS_BRACKET specificValueClause RS_BRACKET
     | (dataType | ALL) LS_BRACKET previousUntilLastClause RS_BRACKET
     ;
 
@@ -352,10 +342,6 @@ previousClause
     : PREVIOUS (COMMA DURATION)?
     ;
 
-specificValueClause
-    : constant?
-    ;
-
 previousUntilLastClause
     : PREVIOUSUNTILLAST (COMMA DURATION)?
     ;
@@ -543,9 +529,6 @@ nodeName
     | SCHEMA
     | TRACING
     | OFF
-    | SYSTEM
-    | READONLY
-    | WRITABLE
     | (ID | OPERATOR_IN)? LS_BRACKET INT? ID? RS_BRACKET? ID?
     | compressor
     | GLOBAL
@@ -657,9 +640,6 @@ nodeNameWithoutStar
     | SCHEMA
     | TRACING
     | OFF
-    | SYSTEM
-    | READONLY
-    | WRITABLE
     | (ID | OPERATOR_IN)? LS_BRACKET INT? ID? RS_BRACKET? ID?
     | compressor
     | GLOBAL
@@ -709,10 +689,9 @@ property
     : name=ID OPERATOR_EQ value=propertyValue
     ;
 
-loadFilesClause
-    : AUTOREGISTER OPERATOR_EQ booleanClause (COMMA loadFilesClause)?
-    | SGLEVEL OPERATOR_EQ INT (COMMA loadFilesClause)?
-    | VERIFY OPERATOR_EQ booleanClause (COMMA loadFilesClause)?
+autoCreateSchema
+    : booleanClause
+    | booleanClause INT
     ;
 
 triggerEventClause
@@ -996,18 +975,6 @@ OFF
     : O F F
     ;
 
-SYSTEM
-    : S Y S T E M
-    ;
-
-READONLY
-    : R E A D O N L Y
-    ;
-
-WRITABLE
-    : W R I T A B L E
-    ;
-
 DROP
     : D R O P
     ;
@@ -1056,18 +1023,6 @@ LOAD
     : L O A D
     ;
 
-AUTOREGISTER
-    : A U T O R E G I S T E R
-    ;
-
-VERIFY
-    : V E R I F Y
-    ;
-
-SGLEVEL
-    : S G L E V E L
-    ;
-
 WATERMARK_EMBEDDING
     : W A T E R M A R K '_' E M B E D D I N G
     ;
@@ -1327,10 +1282,6 @@ LIKE
     : L I K E
     ;
 
-REGEXP
-    : R E G E X P
-    ;
-
 TOLERANCE
     : T O L E R A N C E
     ;
@@ -1343,22 +1294,6 @@ DEBUG
     : D E B U G
     ;
 
-NULL
-    : N U L L
-    ;
-
-WITHOUT
-    : W I T H O U T
-    ;
-
-ANY
-    : A N Y
-    ;
-
-LOCK
-    : L O C K
-    ;
-
 //============================
 // End of the keywords list
 //============================
diff --git a/cli/pom.xml b/cli/pom.xml
index 3612fd6..18413b9 100644
--- a/cli/pom.xml
+++ b/cli/pom.xml
@@ -24,7 +24,7 @@
     <parent>
         <groupId>org.apache.iotdb</groupId>
         <artifactId>iotdb-parent</artifactId>
-        <version>0.12.4</version>
+        <version>0.12.1-SNAPSHOT</version>
         <relativePath>../pom.xml</relativePath>
     </parent>
     <artifactId>iotdb-cli</artifactId>
@@ -75,11 +75,6 @@
             <version>${project.version}</version>
             <scope>test</scope>
         </dependency>
-        <dependency>
-            <groupId>org.apache.commons</groupId>
-            <artifactId>commons-csv</artifactId>
-            <version>1.9.0</version>
-        </dependency>
     </dependencies>
     <build>
         <plugins>
diff --git a/cli/src/assembly/resources/sbin/start-cli.sh b/cli/src/assembly/resources/sbin/start-cli.sh
index 4a736e9..ac96496 100644
--- a/cli/src/assembly/resources/sbin/start-cli.sh
+++ b/cli/src/assembly/resources/sbin/start-cli.sh
@@ -75,7 +75,6 @@ esac
 
 # echo $PARAMETERS
 
-set -o noglob
 exec "$JAVA" -cp "$CLASSPATH" "$MAIN_CLASS" $PARAMETERS
 
 
diff --git a/cli/src/main/java/org/apache/iotdb/cli/AbstractCli.java b/cli/src/main/java/org/apache/iotdb/cli/AbstractCli.java
index 4c286a5..55216b9 100644
--- a/cli/src/main/java/org/apache/iotdb/cli/AbstractCli.java
+++ b/cli/src/main/java/org/apache/iotdb/cli/AbstractCli.java
@@ -21,7 +21,6 @@ package org.apache.iotdb.cli;
 import org.apache.iotdb.exception.ArgsErrorException;
 import org.apache.iotdb.jdbc.IoTDBConnection;
 import org.apache.iotdb.jdbc.IoTDBJDBCResultSet;
-import org.apache.iotdb.rpc.IoTDBConnectionException;
 import org.apache.iotdb.rpc.RpcUtils;
 import org.apache.iotdb.service.rpc.thrift.ServerProperties;
 import org.apache.iotdb.tool.ImportCsv;
@@ -289,9 +288,6 @@ public abstract class AbstractCli {
 
       execute = executeCommand.toString();
       hasExecuteSQL = true;
-      // When execute sql in CLI with -e mode, we should print all results by setting continuePrint
-      // is true.
-      continuePrint = true;
       args = Arrays.copyOfRange(args, 0, index);
       return args;
     }
@@ -479,17 +475,8 @@ public abstract class AbstractCli {
       return;
     }
     println(cmd.split(" ")[1]);
-    try {
-      ImportCsv.importFromTargetPath(
-          host,
-          Integer.valueOf(port),
-          username,
-          password,
-          cmd.split(" ")[1],
-          connection.getTimeZone());
-    } catch (IoTDBConnectionException e) {
-      e.printStackTrace();
-    }
+    ImportCsv.importCsvFromFile(
+        host, port, username, password, cmd.split(" ")[1], connection.getTimeZone());
   }
 
   private static void executeQuery(IoTDBConnection connection, String cmd) {
diff --git a/cli/src/main/java/org/apache/iotdb/cli/Cli.java b/cli/src/main/java/org/apache/iotdb/cli/Cli.java
index 6e57d27..d1b0735 100644
--- a/cli/src/main/java/org/apache/iotdb/cli/Cli.java
+++ b/cli/src/main/java/org/apache/iotdb/cli/Cli.java
@@ -102,7 +102,7 @@ public class Cli extends AbstractCli {
     } catch (NumberFormatException e) {
       println(
           IOTDB_CLI_PREFIX
-              + "> error format of max print row count, it should be an integer number");
+              + "> error format of max print row count, it should be a number and greater than 0");
       return false;
     }
     return true;
@@ -121,7 +121,6 @@ public class Cli extends AbstractCli {
                 DriverManager.getConnection(
                     Config.IOTDB_URL_PREFIX + host + ":" + port + "/", username, password)) {
           properties = connection.getServerProperties();
-          timestampPrecision = properties.getTimestampPrecision();
           AGGREGRATE_TIME_LIST.addAll(properties.getSupportedTimeAggregationOperations());
           processCommand(execute, connection);
           return;
diff --git a/cli/src/main/java/org/apache/iotdb/cli/WinCli.java b/cli/src/main/java/org/apache/iotdb/cli/WinCli.java
index 7d9e938..1ef3cd9 100644
--- a/cli/src/main/java/org/apache/iotdb/cli/WinCli.java
+++ b/cli/src/main/java/org/apache/iotdb/cli/WinCli.java
@@ -99,16 +99,20 @@ public class WinCli extends AbstractCli {
         timeFormat = RpcUtils.setTimeFormat("long");
       }
       if (commandLine.hasOption(MAX_PRINT_ROW_COUNT_ARGS)) {
-        setMaxDisplayNumber(commandLine.getOptionValue(MAX_PRINT_ROW_COUNT_ARGS));
+        maxPrintRowCount = Integer.parseInt(commandLine.getOptionValue(MAX_PRINT_ROW_COUNT_ARGS));
+        if (maxPrintRowCount <= 0) {
+          println(
+              IOTDB_CLI_PREFIX
+                  + "> error format of max print row count, it should be a number greater than 0");
+          return false;
+        }
       }
     } catch (ParseException e) {
       println("Require more params input, please check the following hint.");
       hf.printHelp(IOTDB_CLI_PREFIX, options, true);
       return false;
     } catch (NumberFormatException e) {
-      println(
-          IOTDB_CLI_PREFIX
-              + "> error format of max print row count, it should be an integer number");
+      println(IOTDB_CLI_PREFIX + "> error format of max print row count, it should be a number");
       return false;
     }
     return true;
@@ -129,7 +133,6 @@ public class WinCli extends AbstractCli {
                 DriverManager.getConnection(
                     Config.IOTDB_URL_PREFIX + host + ":" + port + "/", username, password)) {
           properties = connection.getServerProperties();
-          timestampPrecision = properties.getTimestampPrecision();
           AGGREGRATE_TIME_LIST.addAll(properties.getSupportedTimeAggregationOperations());
           processCommand(execute, connection);
           return;
diff --git a/cli/src/main/java/org/apache/iotdb/tool/AbstractCsvTool.java b/cli/src/main/java/org/apache/iotdb/tool/AbstractCsvTool.java
index f03275d..90764b5 100644
--- a/cli/src/main/java/org/apache/iotdb/tool/AbstractCsvTool.java
+++ b/cli/src/main/java/org/apache/iotdb/tool/AbstractCsvTool.java
@@ -23,18 +23,13 @@ import org.apache.iotdb.rpc.IoTDBConnectionException;
 import org.apache.iotdb.rpc.StatementExecutionException;
 import org.apache.iotdb.session.Session;
 
-import jline.internal.Nullable;
+import jline.console.ConsoleReader;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.Option;
 import org.apache.commons.cli.Options;
-import org.apache.commons.csv.CSVFormat;
-import org.apache.commons.csv.CSVPrinter;
-import org.apache.commons.csv.QuoteMode;
 
 import java.io.IOException;
-import java.io.PrintWriter;
 import java.time.ZoneId;
-import java.util.List;
 
 public abstract class AbstractCsvTool {
 
@@ -60,44 +55,33 @@ public abstract class AbstractCsvTool {
   protected static final int MAX_HELP_CONSOLE_WIDTH = 92;
   protected static final String[] TIME_FORMAT =
       new String[] {"default", "long", "number", "timestamp"};
-  public static final String[] STRING_TIME_FORMAT =
+  protected static final String[] STRING_TIME_FORMAT =
       new String[] {
-        "yyyy-MM-dd HH:mm:ss.SSSX",
-        "yyyy/MM/dd HH:mm:ss.SSSX",
-        "yyyy.MM.dd HH:mm:ss.SSSX",
-        "yyyy-MM-dd HH:mm:ssX",
-        "yyyy/MM/dd HH:mm:ssX",
-        "yyyy.MM.dd HH:mm:ssX",
-        "yyyy-MM-dd HH:mm:ss.SSSz",
-        "yyyy/MM/dd HH:mm:ss.SSSz",
-        "yyyy.MM.dd HH:mm:ss.SSSz",
-        "yyyy-MM-dd HH:mm:ssz",
-        "yyyy/MM/dd HH:mm:ssz",
-        "yyyy.MM.dd HH:mm:ssz",
-        "yyyy-MM-dd HH:mm:ss.SSS",
+        "yyyy-MM-dd'T'HH:mm:ss.SSSZ",
         "yyyy/MM/dd HH:mm:ss.SSS",
+        "yyyy-MM-dd HH:mm:ss.SSS",
         "yyyy.MM.dd HH:mm:ss.SSS",
+        "yyyy/MM/dd'T'HH:mm:ss.SSS",
+        "yyyy-MM-dd'T'HH:mm:ss.SSS",
+        "yyyy-MM-dd'T'HH:mm:ss.SSS",
+        "yyyy.MM.dd'T'HH:mm:ss.SSS",
+        "yyyy-MM-dd HH:mm:ss.SSSZZ",
+        "yyyy/MM/dd HH:mm:ss.SSSZZ",
+        "yyyy.MM.dd HH:mm:ss.SSSZZ",
+        "yyyy-MM-dd'T'HH:mm:ss.SSSZZ",
+        "yyyy/MM/dd'T'HH:mm:ss.SSSZZ",
         "yyyy-MM-dd HH:mm:ss",
         "yyyy/MM/dd HH:mm:ss",
         "yyyy.MM.dd HH:mm:ss",
-        "yyyy-MM-dd'T'HH:mm:ss.SSSX",
-        "yyyy/MM/dd'T'HH:mm:ss.SSSX",
-        "yyyy.MM.dd'T'HH:mm:ss.SSSX",
-        "yyyy-MM-dd'T'HH:mm:ssX",
-        "yyyy/MM/dd'T'HH:mm:ssX",
-        "yyyy.MM.dd'T'HH:mm:ssX",
-        "yyyy-MM-dd'T'HH:mm:ss.SSSz",
-        "yyyy/MM/dd'T'HH:mm:ss.SSSz",
-        "yyyy.MM.dd'T'HH:mm:ss.SSSz",
-        "yyyy-MM-dd'T'HH:mm:ssz",
-        "yyyy/MM/dd'T'HH:mm:ssz",
-        "yyyy.MM.dd'T'HH:mm:ssz",
-        "yyyy-MM-dd'T'HH:mm:ss.SSS",
-        "yyyy/MM/dd'T'HH:mm:ss.SSS",
-        "yyyy.MM.dd'T'HH:mm:ss.SSS",
         "yyyy-MM-dd'T'HH:mm:ss",
         "yyyy/MM/dd'T'HH:mm:ss",
-        "yyyy.MM.dd'T'HH:mm:ss"
+        "yyyy.MM.dd'T'HH:mm:ss",
+        "yyyy-MM-dd HH:mm:ssZZ",
+        "yyyy/MM/dd HH:mm:ssZZ",
+        "yyyy.MM.dd HH:mm:ssZZ",
+        "yyyy-MM-dd'T'HH:mm:ssZZ",
+        "yyyy/MM/dd'T'HH:mm:ssZZ",
+        "yyyy.MM.dd'T'HH:mm:ssZZ",
       };
   protected static String host;
   protected static String port;
@@ -109,7 +93,7 @@ public abstract class AbstractCsvTool {
   protected static String timeFormat;
   protected static Session session;
 
-  public AbstractCsvTool() {}
+  AbstractCsvTool() {}
 
   protected static String checkRequiredArg(String arg, String name, CommandLine commandLine)
       throws ArgsErrorException {
@@ -130,13 +114,16 @@ public abstract class AbstractCsvTool {
     zoneId = ZoneId.of(session.getTimeZone());
   }
 
-  protected static void parseBasicParams(CommandLine commandLine)
+  protected static void parseBasicParams(CommandLine commandLine, ConsoleReader reader)
       throws ArgsErrorException, IOException {
     host = checkRequiredArg(HOST_ARGS, HOST_NAME, commandLine);
     port = checkRequiredArg(PORT_ARGS, PORT_NAME, commandLine);
     username = checkRequiredArg(USERNAME_ARGS, USERNAME_NAME, commandLine);
 
     password = commandLine.getOptionValue(PASSWORD_ARGS);
+    if (password == null) {
+      password = reader.readLine("please input your password:", '\0');
+    }
   }
 
   protected static boolean checkTimeFormat() {
@@ -196,40 +183,9 @@ public abstract class AbstractCsvTool {
             .optionalArg(true)
             .argName(PASSWORD_NAME)
             .hasArg()
-            .desc("Password (required)")
+            .desc("Password (optional)")
             .build();
     options.addOption(opPassword);
     return options;
   }
-
-  /**
-   * write data to CSV file.
-   *
-   * @param headerNames the header names of CSV file
-   * @param records the records of CSV file
-   * @param filePath the directory to save the file
-   */
-  public static Boolean writeCsvFile(
-      @Nullable List<String> headerNames, List<List<Object>> records, String filePath) {
-    try {
-      CSVPrinter printer =
-          CSVFormat.DEFAULT
-              .withFirstRecordAsHeader()
-              .withEscape('\\')
-              .withQuoteMode(QuoteMode.NONE)
-              .print(new PrintWriter(filePath));
-      if (headerNames != null) {
-        printer.printRecord(headerNames);
-      }
-      for (List record : records) {
-        printer.printRecord(record);
-      }
-      printer.flush();
-      printer.close();
-      return true;
-    } catch (IOException e) {
-      e.printStackTrace();
-      return false;
-    }
-  }
 }
diff --git a/cli/src/main/java/org/apache/iotdb/tool/ExportCsv.java b/cli/src/main/java/org/apache/iotdb/tool/ExportCsv.java
index d645447..5b1d9ca 100644
--- a/cli/src/main/java/org/apache/iotdb/tool/ExportCsv.java
+++ b/cli/src/main/java/org/apache/iotdb/tool/ExportCsv.java
@@ -26,6 +26,7 @@ import org.apache.iotdb.rpc.StatementExecutionException;
 import org.apache.iotdb.session.Session;
 import org.apache.iotdb.session.SessionDataSet;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.read.common.Field;
 import org.apache.iotdb.tsfile.read.common.RowRecord;
 
 import jline.console.ConsoleReader;
@@ -36,15 +37,16 @@ import org.apache.commons.cli.HelpFormatter;
 import org.apache.commons.cli.Option;
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
-import org.apache.commons.csv.CSVFormat;
-import org.apache.commons.csv.CSVPrinter;
-import org.apache.commons.csv.QuoteMode;
 
-import java.io.*;
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.IOException;
 import java.time.Instant;
 import java.time.ZonedDateTime;
 import java.time.format.DateTimeFormatter;
-import java.util.ArrayList;
 import java.util.List;
 
 /**
@@ -63,12 +65,6 @@ public class ExportCsv extends AbstractCsvTool {
   private static final String SQL_FILE_ARGS = "s";
   private static final String SQL_FILE_NAME = "sqlfile";
 
-  private static final String DATA_TYPE_ARGS = "datatype";
-  private static final String DATA_TYPE_NAME = "datatype";
-
-  private static final String QUERY_COMMAND_ARGS = "q";
-  private static final String QUERY_COMMAND_NAME = "queryCommand";
-
   private static final String TSFILEDB_CLI_PREFIX = "ExportCsv";
 
   private static final String DUMP_FILE_NAME_DEFAULT = "dump";
@@ -76,10 +72,6 @@ public class ExportCsv extends AbstractCsvTool {
 
   private static String targetDirectory;
 
-  private static Boolean needDataTypePrinted;
-
-  private static String queryCommand;
-
   private static final int EXPORT_PER_LINE_COUNT = 10000;
 
   /** main function of export csv tool. */
@@ -108,38 +100,31 @@ public class ExportCsv extends AbstractCsvTool {
       return;
     }
 
+    ConsoleReader reader = new ConsoleReader();
+    reader.setExpandEvents(false);
+
     try {
-      parseBasicParams(commandLine);
+      parseBasicParams(commandLine, reader);
       parseSpecialParams(commandLine);
       if (!checkTimeFormat()) {
         return;
       }
 
+      String sqlFile = commandLine.getOptionValue(SQL_FILE_ARGS);
+      String sql;
       session = new Session(host, Integer.parseInt(port), username, password);
       session.open(false);
       setTimeZone();
 
-      if (queryCommand == null) {
-        String sqlFile = commandLine.getOptionValue(SQL_FILE_ARGS);
-        String sql;
-
-        if (sqlFile == null) {
-          ConsoleReader reader = new ConsoleReader();
-          reader.setExpandEvents(false);
-          sql = reader.readLine(TSFILEDB_CLI_PREFIX + "> please input query: ");
-          System.out.println(sql);
-          String[] values = sql.trim().split(";");
-          for (int i = 0; i < values.length; i++) {
-            dumpResult(values[i], i);
-          }
-          reader.close();
-        } else {
-          dumpFromSqlFile(sqlFile);
+      if (sqlFile == null) {
+        sql = reader.readLine(TSFILEDB_CLI_PREFIX + "> please input query: ");
+        String[] values = sql.trim().split(";");
+        for (int i = 0; i < values.length; i++) {
+          dumpResult(values[i], i);
         }
       } else {
-        dumpResult(queryCommand, 0);
+        dumpFromSqlFile(sqlFile);
       }
-
     } catch (IOException e) {
       System.out.println("Failed to operate on file, because " + e.getMessage());
     } catch (ArgsErrorException e) {
@@ -147,6 +132,7 @@ public class ExportCsv extends AbstractCsvTool {
     } catch (IoTDBConnectionException | StatementExecutionException e) {
       System.out.println("Connect failed because " + e.getMessage());
     } finally {
+      reader.close();
       if (session != null) {
         try {
           session.close();
@@ -161,12 +147,6 @@ public class ExportCsv extends AbstractCsvTool {
   private static void parseSpecialParams(CommandLine commandLine) throws ArgsErrorException {
     targetDirectory = checkRequiredArg(TARGET_DIR_ARGS, TARGET_DIR_NAME, commandLine);
     targetFile = commandLine.getOptionValue(TARGET_FILE_ARGS);
-    needDataTypePrinted = Boolean.valueOf(commandLine.getOptionValue(DATA_TYPE_ARGS));
-    queryCommand = commandLine.getOptionValue(QUERY_COMMAND_ARGS);
-
-    if (needDataTypePrinted == null) {
-      needDataTypePrinted = true;
-    }
     if (targetFile == null) {
       targetFile = DUMP_FILE_NAME_DEFAULT;
     }
@@ -175,7 +155,7 @@ public class ExportCsv extends AbstractCsvTool {
       timeFormat = "default";
     }
     timeZoneID = commandLine.getOptionValue(TIME_ZONE_ARGS);
-    if (!targetDirectory.endsWith("/") && !targetDirectory.endsWith("\\")) {
+    if (!targetDirectory.endsWith(File.separator)) {
       targetDirectory += File.separator;
     }
   }
@@ -232,25 +212,6 @@ public class ExportCsv extends AbstractCsvTool {
             .build();
     options.addOption(opTimeZone);
 
-    Option opDataType =
-        Option.builder(DATA_TYPE_ARGS)
-            .argName(DATA_TYPE_NAME)
-            .hasArg()
-            .desc(
-                "Will the data type of timeseries be printed in the head line of the CSV file?"
-                    + '\n'
-                    + "You can choose true) or false) . (optional)")
-            .build();
-    options.addOption(opDataType);
-
-    Option opQuery =
-        Option.builder(QUERY_COMMAND_ARGS)
-            .argName(QUERY_COMMAND_NAME)
-            .hasArg()
-            .desc("The query command that you want to execute. (optional)")
-            .build();
-    options.addOption(opQuery);
-
     Option opHelp =
         Option.builder(HELP_ARGS)
             .longOpt(HELP_ARGS)
@@ -262,12 +223,6 @@ public class ExportCsv extends AbstractCsvTool {
     return options;
   }
 
-  /**
-   * This method will be called, if the query commands are written in a sql file.
-   *
-   * @param filePath
-   * @throws IOException
-   */
   private static void dumpFromSqlFile(String filePath) throws IOException {
     try (BufferedReader reader = new BufferedReader(new FileReader(filePath))) {
       String sql;
@@ -286,85 +241,132 @@ public class ExportCsv extends AbstractCsvTool {
    * @param index use to create dump file name
    */
   private static void dumpResult(String sql, int index) {
+
     final String path = targetDirectory + targetFile + index + ".csv";
+    File tf = new File(path);
     try {
-      SessionDataSet sessionDataSet = session.executeQueryStatement(sql, 10000);
-      writeCsvFile(sessionDataSet, path);
-      sessionDataSet.closeOperationHandle();
-      System.out.println("Export completely!");
-    } catch (StatementExecutionException | IoTDBConnectionException | IOException e) {
+      if (!tf.exists() && !tf.createNewFile()) {
+        System.out.println("Could not create target file for sql statement: " + sql);
+        return;
+      }
+    } catch (IOException e) {
+      System.out.println("Cannot create dump file " + path + " " + "because: " + e.getMessage());
+      return;
+    }
+    System.out.println("Start to export data from sql statement: " + sql);
+    try (BufferedWriter bw = new BufferedWriter(new FileWriter(tf))) {
+      SessionDataSet sessionDataSet = session.executeQueryStatement(sql);
+      long startTime = System.currentTimeMillis();
+      // write data in csv file
+      writeMetadata(bw, sessionDataSet.getColumnNames());
+
+      int line = writeResultSet(sessionDataSet, bw);
+      System.out.printf(
+          "Statement [%s] has dumped to file %s successfully! It costs "
+              + "%dms to export %d lines.%n",
+          sql, path, System.currentTimeMillis() - startTime, line);
+    } catch (IOException | StatementExecutionException | IoTDBConnectionException e) {
       System.out.println("Cannot dump result because: " + e.getMessage());
     }
   }
 
-  public static String timeTrans(Long time) {
+  private static void writeMetadata(BufferedWriter bw, List<String> columnNames)
+      throws IOException {
+    if (!columnNames.get(0).equals("Time")) {
+      bw.write("Time" + ",");
+    }
+    for (int i = 0; i < columnNames.size() - 1; i++) {
+      bw.write(columnNames.get(i) + ",");
+    }
+    bw.write(columnNames.get(columnNames.size() - 1) + "\n");
+  }
+
+  private static int writeResultSet(SessionDataSet rs, BufferedWriter bw)
+      throws IOException, StatementExecutionException, IoTDBConnectionException {
+    int line = 0;
+    long timestamp = System.currentTimeMillis();
+    while (rs.hasNext()) {
+      RowRecord rowRecord = rs.next();
+      List<Field> fields = rowRecord.getFields();
+      writeTime(rowRecord.getTimestamp(), bw);
+      writeValue(fields, bw);
+      line++;
+      if (line % EXPORT_PER_LINE_COUNT == 0) {
+        long tmp = System.currentTimeMillis();
+        System.out.printf("%d lines have been exported, it takes %dms%n", line, (tmp - timestamp));
+        timestamp = tmp;
+      }
+    }
+    return line;
+  }
+
+  private static void writeTime(Long time, BufferedWriter bw) throws IOException {
+    ZonedDateTime dateTime;
     String timestampPrecision = "ms";
     switch (timeFormat) {
       case "default":
-        return RpcUtils.parseLongToDateWithPrecision(
-            DateTimeFormatter.ISO_OFFSET_DATE_TIME, time, zoneId, timestampPrecision);
+        String str =
+            RpcUtils.parseLongToDateWithPrecision(
+                DateTimeFormatter.ISO_OFFSET_DATE_TIME, time, zoneId, timestampPrecision);
+        bw.write(str + ",");
+        break;
       case "timestamp":
       case "long":
       case "number":
-        return String.valueOf(time);
+        bw.write(time + ",");
+        break;
       default:
-        return ZonedDateTime.ofInstant(Instant.ofEpochMilli(time), zoneId)
-            .format(DateTimeFormatter.ofPattern(timeFormat));
+        dateTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(time), zoneId);
+        bw.write(dateTime.format(DateTimeFormatter.ofPattern(timeFormat)) + ",");
+        break;
     }
   }
 
-  public static Boolean writeCsvFile(SessionDataSet sessionDataSet, String filePath)
-      throws IOException, IoTDBConnectionException, StatementExecutionException {
-    CSVPrinter printer =
-        CSVFormat.DEFAULT
-            .withFirstRecordAsHeader()
-            .withEscape('\\')
-            .withQuoteMode(QuoteMode.NONE)
-            .print(new PrintWriter(filePath));
-
-    List<Object> headers = new ArrayList<>();
-    List<String> names = sessionDataSet.getColumnNames();
-    List<String> types = sessionDataSet.getColumnTypes();
-
-    if (needDataTypePrinted) {
-      for (int i = 0; i < names.size(); i++) {
-        if (!names.get(i).equals("Time") && !names.get(i).equals("Device")) {
-          headers.add(String.format("%s(%s)", names.get(i), types.get(i)));
+  @SuppressWarnings("squid:S3776") // Suppress high Cognitive Complexity warning
+  private static void writeValue(List<Field> fields, BufferedWriter bw) throws IOException {
+    for (int j = 0; j < fields.size() - 1; j++) {
+      String value = fields.get(j).getStringValue();
+      if ("null".equalsIgnoreCase(value)) {
+        bw.write(",");
+      } else {
+        if (fields.get(j).getDataType() == TSDataType.TEXT) {
+          int location = value.indexOf("\"");
+          if (location > -1) {
+            if (location == 0 || value.charAt(location - 1) != '\\') {
+              bw.write("\"" + value.replace("\"", "\\\"") + "\",");
+            } else {
+              bw.write("\"" + value + "\",");
+            }
+          } else if (value.contains(",")) {
+            bw.write("\"" + value + "\",");
+          } else {
+            bw.write(value + ",");
+          }
         } else {
-          headers.add(names.get(i));
+          bw.write(value + ",");
         }
       }
-    } else {
-      names.forEach(name -> headers.add(name));
     }
-    printer.printRecord(headers);
-
-    while (sessionDataSet.hasNext()) {
-      RowRecord rowRecord = sessionDataSet.next();
-      ArrayList<String> record = new ArrayList<>();
-      if (rowRecord.getTimestamp() != 0) {
-        record.add(timeTrans(rowRecord.getTimestamp()));
+    String lastValue = fields.get(fields.size() - 1).getStringValue();
+    if ("null".equalsIgnoreCase(lastValue)) {
+      bw.write("\n");
+    } else {
+      if (fields.get(fields.size() - 1).getDataType() == TSDataType.TEXT) {
+        int location = lastValue.indexOf("\"");
+        if (location > -1) {
+          if (location == 0 || lastValue.charAt(location - 1) != '\\') {
+            bw.write("\"" + lastValue.replace("\"", "\\\"") + "\"\n");
+          } else {
+            bw.write("\"" + lastValue + "\"\n");
+          }
+        } else if (lastValue.contains(",")) {
+          bw.write("\"" + lastValue + "\"\n");
+        } else {
+          bw.write(lastValue + "\n");
+        }
+      } else {
+        bw.write(lastValue + "\n");
       }
-      rowRecord
-          .getFields()
-          .forEach(
-              field -> {
-                String fieldStringValue = field.getStringValue();
-                if (!field.getStringValue().equals("null")) {
-                  if (field.getDataType() == TSDataType.TEXT
-                      && !fieldStringValue.startsWith("root.")) {
-                    fieldStringValue = "\"" + fieldStringValue + "\"";
-                  }
-                  record.add(fieldStringValue);
-                } else {
-                  record.add("");
-                }
-              });
-      printer.printRecord(record);
     }
-
-    printer.flush();
-    printer.close();
-    return true;
   }
 }
diff --git a/cli/src/main/java/org/apache/iotdb/tool/ImportCsv.java b/cli/src/main/java/org/apache/iotdb/tool/ImportCsv.java
index c826c03..b0d3075 100644
--- a/cli/src/main/java/org/apache/iotdb/tool/ImportCsv.java
+++ b/cli/src/main/java/org/apache/iotdb/tool/ImportCsv.java
@@ -16,67 +16,53 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iotdb.tool;
 
 import org.apache.iotdb.exception.ArgsErrorException;
 import org.apache.iotdb.rpc.IoTDBConnectionException;
 import org.apache.iotdb.rpc.StatementExecutionException;
 import org.apache.iotdb.session.Session;
-import org.apache.iotdb.session.SessionDataSet;
-import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.common.constant.TsFileConstant;
 
+import jline.console.ConsoleReader;
+import me.tongfei.progressbar.ProgressBar;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.DefaultParser;
 import org.apache.commons.cli.HelpFormatter;
 import org.apache.commons.cli.Option;
 import org.apache.commons.cli.Options;
-import org.apache.commons.csv.CSVFormat;
-import org.apache.commons.csv.CSVParser;
-import org.apache.commons.csv.CSVRecord;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.thrift.annotation.Nullable;
+import org.apache.commons.cli.ParseException;
 
+import java.io.BufferedReader;
 import java.io.File;
 import java.io.FileInputStream;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStreamReader;
-import java.text.ParseException;
+import java.io.LineNumberReader;
+import java.nio.charset.StandardCharsets;
 import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicReference;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-import java.util.stream.Collectors;
-
-import static org.apache.iotdb.tsfile.file.metadata.enums.TSDataType.BOOLEAN;
-import static org.apache.iotdb.tsfile.file.metadata.enums.TSDataType.DOUBLE;
-import static org.apache.iotdb.tsfile.file.metadata.enums.TSDataType.FLOAT;
-import static org.apache.iotdb.tsfile.file.metadata.enums.TSDataType.INT32;
-import static org.apache.iotdb.tsfile.file.metadata.enums.TSDataType.INT64;
-import static org.apache.iotdb.tsfile.file.metadata.enums.TSDataType.TEXT;
+import java.util.Map.Entry;
 
+/** read a CSV formatted data File and insert all the data into IoTDB. */
 public class ImportCsv extends AbstractCsvTool {
 
   private static final String FILE_ARGS = "f";
   private static final String FILE_NAME = "file or folder";
-
-  private static final String FAILED_FILE_ARGS = "fd";
-  private static final String FAILED_FILE_NAME = "failed file directory";
-
-  private static final String CSV_SUFFIXS = "csv";
-  private static final String TXT_SUFFIXS = "txt";
+  private static final String FILE_SUFFIX = "csv";
 
   private static final String TSFILEDB_CLI_PREFIX = "ImportCsv";
   private static final String ILLEGAL_PATH_ARGUMENT = "Path parameter is null";
 
-  private static String targetPath;
-  private static String failedFileDirectory = null;
+  // put these variable in here, because sonar fails.  have to extract some code into a function.
+  // nextNode method.
+  private static int i;
+  private static int startIndex;
 
   /**
    * create the commandline options.
@@ -97,15 +83,6 @@ public class ImportCsv extends AbstractCsvTool {
             .build();
     options.addOption(opFile);
 
-    Option opFailedFile =
-        Option.builder(FAILED_FILE_ARGS)
-            .argName(FAILED_FILE_NAME)
-            .hasArg()
-            .desc(
-                "Specifying a directory to save failed file, default YOUR_CSV_FILE_PATH (optional)")
-            .build();
-    options.addOption(opFailedFile);
-
     Option opHelp =
         Option.builder(HELP_ARGS)
             .longOpt(HELP_ARGS)
@@ -125,25 +102,107 @@ public class ImportCsv extends AbstractCsvTool {
     return options;
   }
 
-  /**
-   * parse optional params
-   *
-   * @param commandLine
-   */
-  private static void parseSpecialParams(CommandLine commandLine) {
-    timeZoneID = commandLine.getOptionValue(TIME_ZONE_ARGS);
-    targetPath = commandLine.getOptionValue(FILE_ARGS);
-    if (commandLine.getOptionValue(FAILED_FILE_ARGS) != null) {
-      failedFileDirectory = commandLine.getOptionValue(FAILED_FILE_ARGS);
-      File file = new File(failedFileDirectory);
-      if (!file.isDirectory()) {
-        file.mkdir();
-        failedFileDirectory = file.getAbsolutePath() + File.separator;
+  /** Data from csv To tsfile. */
+  @SuppressWarnings("squid:S1135")
+  private static void loadDataFromCSV(File file) {
+    int fileLine;
+    try {
+      fileLine = getFileLineCount(file);
+    } catch (IOException e) {
+      System.out.println("Failed to import file: " + file.getName());
+      return;
+    }
+    System.out.println("Start to import data from: " + file.getName());
+    try (BufferedReader br =
+            new BufferedReader(
+                new InputStreamReader(new FileInputStream(file), StandardCharsets.UTF_8));
+        ProgressBar pb = new ProgressBar("Import from: " + file.getName(), fileLine)) {
+      pb.setExtraMessage("Importing...");
+      String header = br.readLine();
+      String[] cols = splitCsvLine(header);
+      if (cols.length <= 1) {
+        System.out.println("The CSV file " + file.getName() + " illegal, please check first line");
+        return;
+      }
+
+      List<String> devices = new ArrayList<>();
+      List<Long> times = new ArrayList<>();
+      List<List<String>> measurementsList = new ArrayList<>();
+      List<List<String>> valuesList = new ArrayList<>();
+      Map<String, List<Integer>> devicesToPositions = new HashMap<>();
+      Map<String, List<String>> devicesToMeasurements = new HashMap<>();
+
+      for (int i = 1; i < cols.length; i++) {
+        splitColToDeviceAndMeasurement(cols[i], devicesToPositions, devicesToMeasurements, i);
+      }
+
+      SimpleDateFormat timeFormatter = null;
+      boolean useFormatter = false;
+
+      int lineNumber = 0;
+      String line;
+      while ((line = br.readLine()) != null) {
+        cols = splitCsvLine(line);
+        lineNumber++;
+        if (lineNumber == 1) {
+          timeFormatter = formatterInit(cols[0]);
+          useFormatter = (timeFormatter != null);
+        }
+        for (Entry<String, List<Integer>> deviceToPositions : devicesToPositions.entrySet()) {
+          String device = deviceToPositions.getKey();
+          devices.add(device);
+
+          times.add(parseTime(cols[0], useFormatter, timeFormatter));
+
+          List<String> values = new ArrayList<>();
+          for (int position : deviceToPositions.getValue()) {
+            values.add(cols[position]);
+          }
+          valuesList.add(values);
+
+          measurementsList.add(devicesToMeasurements.get(device));
+        }
+        if (lineNumber % 10000 == 0) {
+          try {
+            session.insertRecords(devices, times, measurementsList, valuesList);
+          } catch (StatementExecutionException e) {
+            if (e.getMessage().contains("failed to insert measurements")) {
+              System.out.println("Meet error when insert csv because " + e.getMessage());
+              System.out.println("Continue inserting... ");
+            } else {
+              throw e;
+            }
+          }
+          pb.stepTo(lineNumber + 1L);
+          devices = new ArrayList<>();
+          times = new ArrayList<>();
+          measurementsList = new ArrayList<>();
+          valuesList = new ArrayList<>();
+        }
+      }
+      // TODO change it to insertTablet, now is slow
+      try {
+        session.insertRecords(devices, times, measurementsList, valuesList);
+      } catch (StatementExecutionException e) {
+        if (e.getMessage().contains("failed to insert measurements")) {
+          System.out.println("Meet error when insert csv because " + e.getMessage());
+          System.out.println("Continue inserting... ");
+        } else {
+          throw e;
+        }
       }
+      System.out.println("Insert csv successfully!");
+      pb.stepTo(fileLine);
+    } catch (FileNotFoundException e) {
+      System.out.println("Cannot find " + file.getName() + " because: " + e.getMessage());
+    } catch (IOException e) {
+      System.out.println("CSV file read exception because: " + e.getMessage());
+    } catch (IoTDBConnectionException | StatementExecutionException e) {
+      System.out.println("Meet error when insert csv because " + e.getMessage());
     }
   }
 
-  public static void main(String[] args) throws IOException, IoTDBConnectionException {
+  public static void main(String[] args) throws IOException {
     Options options = createOptions();
     HelpFormatter hf = new HelpFormatter();
     hf.setOptionComparator(null);
@@ -158,7 +217,7 @@ public class ImportCsv extends AbstractCsvTool {
     }
     try {
       commandLine = parser.parse(options, args);
-    } catch (org.apache.commons.cli.ParseException e) {
+    } catch (ParseException e) {
       System.out.println("Parse error: " + e.getMessage());
       hf.printHelp(TSFILEDB_CLI_PREFIX, options, true);
       return;
@@ -168,539 +227,232 @@ public class ImportCsv extends AbstractCsvTool {
       return;
     }
 
+    ConsoleReader reader = new ConsoleReader();
+    reader.setExpandEvents(false);
     try {
-      parseBasicParams(commandLine);
+      parseBasicParams(commandLine, reader);
       String filename = commandLine.getOptionValue(FILE_ARGS);
       if (filename == null) {
         hf.printHelp(TSFILEDB_CLI_PREFIX, options, true);
         return;
       }
       parseSpecialParams(commandLine);
+      importCsvFromFile(host, port, username, password, filename, timeZoneID);
     } catch (ArgsErrorException e) {
       System.out.println("Args error: " + e.getMessage());
     } catch (Exception e) {
       System.out.println("Encounter an error, because: " + e.getMessage());
+    } finally {
+      reader.close();
     }
+  }
 
-    importFromTargetPath(host, Integer.valueOf(port), username, password, targetPath, timeZoneID);
+  private static long parseTime(String str, boolean useFormatter, SimpleDateFormat timeFormatter) {
+    try {
+      if (useFormatter) {
+        return timeFormatter.parse(str).getTime();
+      } else {
+        return Long.parseLong(str);
+      }
+    } catch (Exception e) {
+      throw new IllegalArgumentException(
+          "Input time format "
+              + str
+              + "error. Input like yyyy-MM-dd HH:mm:ss, yyyy-MM-ddTHH:mm:ss or yyyy-MM-ddTHH:mm:ss.SSSZ");
+    }
   }
 
-  /**
-   * Specifying a CSV file or a directory including CSV files that you want to import. This method
-   * can be offered to console cli to implement importing CSV file by command.
-   *
-   * @param host
-   * @param port
-   * @param username
-   * @param password
-   * @param targetPath a CSV file or a directory including CSV files
-   * @param timeZone
-   * @throws IoTDBConnectionException
-   */
-  public static void importFromTargetPath(
-      String host, int port, String username, String password, String targetPath, String timeZone)
-      throws IoTDBConnectionException {
+  private static SimpleDateFormat formatterInit(String time) {
+
     try {
-      session = new Session(host, Integer.valueOf(port), username, password, false);
+      Long.parseLong(time);
+      return null;
+    } catch (Exception ignored) {
+      // do nothing
+    }
+
+    for (String timeFormat : STRING_TIME_FORMAT) {
+      SimpleDateFormat format = new SimpleDateFormat(timeFormat);
+      try {
+        format.parse(time).getTime();
+        return format;
+      } catch (java.text.ParseException ignored) {
+        // do nothing
+      }
+    }
+    return null;
+  }
+
+  private static void parseSpecialParams(CommandLine commandLine) {
+    timeZoneID = commandLine.getOptionValue(TIME_ZONE_ARGS);
+  }
+
+  public static void importCsvFromFile(
+      String ip, String port, String username, String password, String filename, String timeZone) {
+    try {
+      session = new Session(ip, Integer.parseInt(port), username, password);
       session.open(false);
       timeZoneID = timeZone;
       setTimeZone();
 
-      File file = new File(targetPath);
+      File file = new File(filename);
       if (file.isFile()) {
         importFromSingleFile(file);
       } else if (file.isDirectory()) {
-        File[] files = file.listFiles();
-        if (files == null) {
-          return;
-        }
-
-        for (File subFile : files) {
-          if (subFile.isFile()) {
-            importFromSingleFile(subFile);
-          }
-        }
-      } else {
-        System.out.println("File not found!");
+        importFromDirectory(file);
       }
-    } catch (IoTDBConnectionException | StatementExecutionException e) {
+    } catch (IoTDBConnectionException e) {
       System.out.println("Encounter an error when connecting to server, because " + e.getMessage());
+    } catch (StatementExecutionException e) {
+      System.out.println(
+          "Encounter an error when executing the statement, because " + e.getMessage());
     } finally {
       if (session != null) {
-        session.close();
+        try {
+          session.close();
+        } catch (IoTDBConnectionException e) {
+          System.out.println(
+              "Encounter an error when closing the connection, because " + e.getMessage());
+        }
       }
     }
   }
 
-  /**
-   * import the CSV file and load headers and records.
-   *
-   * @param file the File object of the CSV file that you want to import.
-   */
   private static void importFromSingleFile(File file) {
-    if (file.getName().endsWith(CSV_SUFFIXS) || file.getName().endsWith(TXT_SUFFIXS)) {
-      try {
-        CSVParser csvRecords = readCsvFile(file.getAbsolutePath());
-        List<String> headerNames = csvRecords.getHeaderNames();
-        List<CSVRecord> records = csvRecords.getRecords();
-        if (headerNames.isEmpty()) {
-          System.out.println("Empty file!");
-          return;
-        }
-        if (!headerNames.contains("Time")) {
-          System.out.println("No headers!");
-          return;
-        }
-        if (records.isEmpty()) {
-          System.out.println("No records!");
-          return;
-        }
-        String failedFilePath = null;
-        if (failedFileDirectory == null) {
-          failedFilePath = file.getAbsolutePath() + ".failed";
-        } else {
-          failedFilePath = failedFileDirectory + file.getName() + ".failed";
-        }
-        if (!headerNames.contains("Device")) {
-          writeDataAlignedByTime(headerNames, records, failedFilePath);
-        } else {
-          writeDataAlignedByDevice(headerNames, records, failedFilePath);
-        }
-      } catch (IOException e) {
-        System.out.println("CSV file read exception because: " + e.getMessage());
-      }
+    if (file.getName().endsWith(FILE_SUFFIX)) {
+      loadDataFromCSV(file);
     } else {
-      System.out.println("The file name must end with \"csv\" or \"txt\"!");
+      System.out.println(
+          "File " + file.getName() + "  should ends with '.csv' if you want to import");
     }
   }
 
-  /**
-   * if the data is aligned by time, the data will be written by this method.
-   *
-   * @param headerNames the header names of CSV file
-   * @param records the records of CSV file
-   * @param failedFilePath the directory to save the failed files
-   */
-  private static void writeDataAlignedByTime(
-      List<String> headerNames, List<CSVRecord> records, String failedFilePath) {
-    HashMap<String, List<String>> deviceAndMeasurementNames = new HashMap<>();
-    HashMap<String, TSDataType> headerTypeMap = new HashMap<>();
-    HashMap<String, String> headerNameMap = new HashMap<>();
-    parseHeaders(headerNames, deviceAndMeasurementNames, headerTypeMap, headerNameMap);
-
-    Set<String> devices = deviceAndMeasurementNames.keySet();
-    String devicesStr = StringUtils.join(devices, ",");
-    try {
-      queryType(devicesStr, headerTypeMap, "Time");
-    } catch (StatementExecutionException | IoTDBConnectionException e) {
-      e.printStackTrace();
+  private static void importFromDirectory(File file) {
+    File[] files = file.listFiles();
+    if (files == null) {
+      return;
     }
 
-    SimpleDateFormat timeFormatter = formatterInit(records.get(0).get("Time"));
-
-    ArrayList<List<Object>> failedRecords = new ArrayList<>();
-
-    for (Map.Entry<String, List<String>> entry : deviceAndMeasurementNames.entrySet()) {
-      String deviceId = entry.getKey();
-      List<Long> times = new ArrayList<>();
-      List<String> measurementNames = entry.getValue();
-      List<List<TSDataType>> typesList = new ArrayList<>();
-      List<List<Object>> valuesList = new ArrayList<>();
-      List<List<String>> measurementsList = new ArrayList<>();
-      records.stream()
-          .forEach(
-              record -> {
-                ArrayList<TSDataType> types = new ArrayList<>();
-                ArrayList<Object> values = new ArrayList<>();
-                ArrayList<String> measurements = new ArrayList<>();
-                AtomicReference<Boolean> isFail = new AtomicReference<>(false);
-                measurementNames.stream()
-                    .forEach(
-                        measurementName -> {
-                          String header = deviceId + "." + measurementName;
-                          String value = record.get(header);
-                          if (!value.equals("")) {
-                            TSDataType type;
-                            if (!headerTypeMap.containsKey(headerNameMap.get(header))) {
-                              type = typeInfer(value);
-                              if (type != null) {
-                                headerTypeMap.put(header, type);
-                              } else {
-                                System.out.println(
-                                    String.format(
-                                        "Line '%s', column '%s': '%s' unknown type",
-                                        (records.indexOf(record) + 1), header, value));
-                                isFail.set(true);
-                              }
-                            }
-                            type = headerTypeMap.get(headerNameMap.get(header));
-                            if (type != null) {
-                              Object valueTransed = typeTrans(value, type);
-                              if (valueTransed == null) {
-                                isFail.set(true);
-                                System.out.println(
-                                    String.format(
-                                        "Line '%s', column '%s': '%s' can't convert to '%s'",
-                                        (records.indexOf(record) + 1), header, value, type));
-                              } else {
-                                measurements.add(
-                                    headerNameMap.get(header).replace(deviceId + '.', ""));
-                                types.add(type);
-                                values.add(valueTransed);
-                              }
-                            }
-                          }
-                        });
-                if (isFail.get()) {
-                  failedRecords.add(record.stream().collect(Collectors.toList()));
-                }
-                if (!measurements.isEmpty()) {
-                  try {
-                    if (timeFormatter == null) {
-                      try {
-                        times.add(Long.valueOf(record.get("Time")));
-                      } catch (Exception e) {
-                        System.out.println(
-                            "Meet error when insert csv because the format of time is not supported");
-                        System.exit(0);
-                      }
-                    } else {
-                      times.add(timeFormatter.parse(record.get("Time")).getTime());
-                    }
-                  } catch (ParseException e) {
-                    e.printStackTrace();
-                  }
-                  typesList.add(types);
-                  valuesList.add(values);
-                  measurementsList.add(measurements);
-                }
-              });
-      try {
-        session.insertRecordsOfOneDevice(deviceId, times, measurementsList, typesList, valuesList);
-      } catch (StatementExecutionException | IoTDBConnectionException e) {
-        System.out.println("Meet error when insert csv because " + e.getMessage());
-        System.exit(0);
+    for (File subFile : files) {
+      if (subFile.isFile()) {
+        if (subFile.getName().endsWith(FILE_SUFFIX)) {
+          loadDataFromCSV(subFile);
+        } else {
+          System.out.println(
+              "File " + file.getName() + " should ends with '.csv' if you want to import");
+        }
       }
     }
-    if (!failedRecords.isEmpty()) {
-      writeCsvFile(headerNames, failedRecords, failedFilePath);
-    }
-    System.out.println("Import completely!");
   }
 
-  /**
-   * if the data is aligned by device, the data will be written by this method.
-   *
-   * @param headerNames the header names of CSV file
-   * @param records the records of CSV file
-   * @param failedFilePath the directory to save the failed files
-   */
-  private static void writeDataAlignedByDevice(
-      List<String> headerNames, List<CSVRecord> records, String failedFilePath) {
-    HashMap<String, TSDataType> headerTypeMap = new HashMap<>();
-    HashMap<String, String> headerNameMap = new HashMap<>();
-    parseHeaders(headerNames, null, headerTypeMap, headerNameMap);
-    Set<String> devices =
-        records.stream().map(record -> record.get("Device")).collect(Collectors.toSet());
-    String devicesStr = StringUtils.join(devices, ",");
-    try {
-      queryType(devicesStr, headerTypeMap, "Device");
-    } catch (StatementExecutionException | IoTDBConnectionException e) {
-      e.printStackTrace();
-    }
-
-    SimpleDateFormat timeFormatter = formatterInit(records.get(0).get("Time"));
-    Set<String> measurementNames = headerNameMap.keySet();
-    ArrayList<List<Object>> failedRecords = new ArrayList<>();
-
-    devices.stream()
-        .forEach(
-            device -> {
-              List<Long> times = new ArrayList<>();
-
-              List<List<TSDataType>> typesList = new ArrayList<>();
-              List<List<Object>> valuesList = new ArrayList<>();
-              List<List<String>> measurementsList = new ArrayList<>();
-
-              records.stream()
-                  .filter(record -> record.get("Device").equals(device))
-                  .forEach(
-                      record -> {
-                        ArrayList<TSDataType> types = new ArrayList<>();
-                        ArrayList<Object> values = new ArrayList<>();
-                        ArrayList<String> measurements = new ArrayList<>();
-
-                        AtomicReference<Boolean> isFail = new AtomicReference<>(false);
-
-                        measurementNames.stream()
-                            .forEach(
-                                measurement -> {
-                                  String value = record.get(measurement);
-                                  if (!value.equals("")) {
-                                    TSDataType type;
-                                    if (!headerTypeMap.containsKey(
-                                        headerNameMap.get(measurement))) {
-                                      type = typeInfer(value);
-                                      if (type != null) {
-                                        headerTypeMap.put(measurement, type);
-                                      } else {
-                                        System.out.println(
-                                            String.format(
-                                                "Line '%s', column '%s': '%s' unknown type",
-                                                (records.indexOf(record) + 1), measurement, value));
-                                        isFail.set(true);
-                                      }
-                                    }
-                                    type = headerTypeMap.get(headerNameMap.get(measurement));
-                                    if (type != null) {
-                                      Object valueTransed = typeTrans(value, type);
-                                      if (valueTransed == null) {
-                                        isFail.set(true);
-                                        System.out.println(
-                                            String.format(
-                                                "Line '%s', column '%s': '%s' can't convert to '%s'",
-                                                (records.indexOf(record) + 1),
-                                                measurement,
-                                                value,
-                                                type));
-                                      } else {
-                                        values.add(valueTransed);
-                                        measurements.add(headerNameMap.get(measurement));
-                                        types.add(type);
-                                      }
-                                    }
-                                  }
-                                });
-                        if (isFail.get()) {
-                          failedRecords.add(record.stream().collect(Collectors.toList()));
-                        }
-                        if (!measurements.isEmpty()) {
-                          try {
-                            if (timeFormatter == null) {
-                              try {
-                                times.add(Long.valueOf(record.get("Time")));
-                              } catch (Exception e) {
-                                System.out.println(
-                                    "Meet error when insert csv because the format of time is not supported");
-                                System.exit(0);
-                              }
-                            } else {
-                              times.add(timeFormatter.parse(record.get("Time")).getTime());
-                            }
-                          } catch (ParseException e) {
-                            e.printStackTrace();
-                          }
-                          typesList.add(types);
-                          valuesList.add(values);
-                          measurementsList.add(measurements);
-                        }
-                      });
-              try {
-                session.insertRecordsOfOneDevice(
-                    device, times, measurementsList, typesList, valuesList);
-              } catch (StatementExecutionException | IoTDBConnectionException e) {
-                System.out.println("Meet error when insert csv because " + e.getMessage());
-                System.exit(0);
-              }
-            });
-    if (!failedRecords.isEmpty()) {
-      writeCsvFile(headerNames, failedRecords, failedFilePath);
+  private static int getFileLineCount(File file) throws IOException {
+    int line;
+    try (LineNumberReader count =
+        new LineNumberReader(
+            new InputStreamReader(new FileInputStream(file), StandardCharsets.UTF_8))) {
+      while (count.skip(Long.MAX_VALUE) > 0) {
+        // Loop just in case the file is > Long.MAX_VALUE or skip() decides to not read the entire
+        // file
+      }
+      // +1 because line index starts at 0
+      line = count.getLineNumber() + 1;
     }
-    System.out.println("Import completely!");
+    return line;
   }
 
-  /**
-   * read data from the CSV file
-   *
-   * @param path
-   * @return
-   * @throws IOException
-   */
-  private static CSVParser readCsvFile(String path) throws IOException {
-    return CSVFormat.EXCEL
-        .withFirstRecordAsHeader()
-        .withQuote('`')
-        .withEscape('\\')
-        .withIgnoreEmptyLines()
-        .parse(new InputStreamReader(new FileInputStream(path)));
-  }
-
-  /**
-   * parse deviceNames, measurementNames(aligned by time), headerType from headers
-   *
-   * @param headerNames
-   * @param deviceAndMeasurementNames
-   * @param headerTypeMap
-   * @param headerNameMap
-   */
-  private static void parseHeaders(
-      List<String> headerNames,
-      @Nullable HashMap<String, List<String>> deviceAndMeasurementNames,
-      HashMap<String, TSDataType> headerTypeMap,
-      HashMap<String, String> headerNameMap) {
-    String regex = "(?<=\\()\\S+(?=\\))";
-    Pattern pattern = Pattern.compile(regex);
-    for (String headerName : headerNames) {
-      if (headerName.equals("Time") || headerName.equals("Device")) continue;
-      Matcher matcher = pattern.matcher(headerName);
-      String type;
-      if (matcher.find()) {
-        type = matcher.group();
-        String headerNameWithoutType =
-            headerName.replace("(" + type + ")", "").replaceAll("\\s+", "");
-        headerNameMap.put(headerName, headerNameWithoutType);
-        headerTypeMap.put(headerNameWithoutType, getType(type));
-      } else {
-        headerNameMap.put(headerName, headerName);
-      }
-      String[] split = headerName.split("\\.");
-      String measurementName = split[split.length - 1];
-      String deviceName = headerName.replace("." + measurementName, "");
-      if (deviceAndMeasurementNames != null) {
-        if (!deviceAndMeasurementNames.containsKey(deviceName)) {
-          deviceAndMeasurementNames.put(deviceName, new ArrayList<>());
+  private static void splitColToDeviceAndMeasurement(
+      String col,
+      Map<String, List<Integer>> devicesToPositions,
+      Map<String, List<String>> devicesToMeasurements,
+      int position) {
+    if (col.length() > 0) {
+      if (col.charAt(col.length() - 1) == TsFileConstant.DOUBLE_QUOTE) {
+        int endIndex = col.lastIndexOf('"', col.length() - 2);
+        // if a double quotes with escape character
+        while (endIndex != -1 && col.charAt(endIndex - 1) == '\\') {
+          endIndex = col.lastIndexOf('"', endIndex - 2);
+        }
+        if (endIndex != -1 && (endIndex == 0 || col.charAt(endIndex - 1) == '.')) {
+          putDeviceAndMeasurement(
+              col.substring(0, endIndex - 1),
+              col.substring(endIndex),
+              devicesToPositions,
+              devicesToMeasurements,
+              position);
+        } else {
+          throw new IllegalArgumentException(ILLEGAL_PATH_ARGUMENT);
+        }
+      } else if (col.charAt(col.length() - 1) != TsFileConstant.DOUBLE_QUOTE
+          && col.charAt(col.length() - 1) != TsFileConstant.PATH_SEPARATOR_CHAR) {
+        int endIndex = col.lastIndexOf(TsFileConstant.PATH_SEPARATOR_CHAR);
+        if (endIndex < 0) {
+          putDeviceAndMeasurement("", col, devicesToPositions, devicesToMeasurements, position);
+        } else {
+          putDeviceAndMeasurement(
+              col.substring(0, endIndex),
+              col.substring(endIndex + 1),
+              devicesToPositions,
+              devicesToMeasurements,
+              position);
         }
-        deviceAndMeasurementNames.get(deviceName).add(measurementName);
+      } else {
+        throw new IllegalArgumentException(ILLEGAL_PATH_ARGUMENT);
       }
+    } else {
+      putDeviceAndMeasurement("", col, devicesToPositions, devicesToMeasurements, position);
     }
   }
 
-  /**
-   * query data type of timeseries from IoTDB
-   *
-   * @param deviceNames
-   * @param headerTypeMap
-   * @param alignedType
-   * @throws IoTDBConnectionException
-   * @throws StatementExecutionException
-   */
-  private static void queryType(
-      String deviceNames, HashMap<String, TSDataType> headerTypeMap, String alignedType)
-      throws IoTDBConnectionException, StatementExecutionException {
-    String sql = "select * from " + deviceNames + " limit 1";
-    SessionDataSet sessionDataSet = session.executeQueryStatement(sql);
-    List<String> columnNames = sessionDataSet.getColumnNames();
-    List<String> columnTypes = sessionDataSet.getColumnTypes();
-    for (int i = 1; i < columnNames.size(); i++) {
-      if (alignedType == "Time") {
-        headerTypeMap.put(columnNames.get(i), getType(columnTypes.get(i)));
-      } else if (alignedType == "Device") {
-        String[] split = columnNames.get(i).split("\\.");
-        String measurement = split[split.length - 1];
-        headerTypeMap.put(measurement, getType(columnTypes.get(i)));
-      }
+  private static void putDeviceAndMeasurement(
+      String device,
+      String measurement,
+      Map<String, List<Integer>> devicesToPositions,
+      Map<String, List<String>> devicesToMeasurements,
+      int position) {
+    if (devicesToMeasurements.get(device) == null && devicesToPositions.get(device) == null) {
+      List<String> measurements = new ArrayList<>();
+      measurements.add(measurement);
+      devicesToMeasurements.put(device, measurements);
+      List<Integer> positions = new ArrayList<>();
+      positions.add(position);
+      devicesToPositions.put(device, positions);
+    } else {
+      devicesToMeasurements.get(device).add(measurement);
+      devicesToPositions.get(device).add(position);
     }
   }
 
-  /**
-   * return a suit time formatter
-   *
-   * @param time
-   * @return
-   */
-  private static SimpleDateFormat formatterInit(String time) {
-    try {
-      Long.parseLong(time);
-      return null;
-    } catch (Exception ignored) {
-      // do nothing
-    }
-
-    for (String timeFormat : STRING_TIME_FORMAT) {
-      SimpleDateFormat format = new SimpleDateFormat(timeFormat);
-      try {
-        format.parse(time).getTime();
-        System.out.println(timeFormat);
-        return format;
-      } catch (java.text.ParseException ignored) {
-        // do nothing
+  public static String[] splitCsvLine(String path) {
+    List<String> nodes = new ArrayList<>();
+    startIndex = 0;
+    for (i = 0; i < path.length(); i++) {
+      if (path.charAt(i) == ',') {
+        nodes.add(path.substring(startIndex, i));
+        startIndex = i + 1;
+      } else if (path.charAt(i) == '"') {
+        nextNode(path, nodes, '"');
+      } else if (path.charAt(i) == '\'') {
+        nextNode(path, nodes, '\'');
       }
     }
-    return null;
-  }
-
-  /**
-   * return the TSDataType
-   *
-   * @param typeStr
-   * @return
-   */
-  private static TSDataType getType(String typeStr) {
-    switch (typeStr) {
-      case "TEXT":
-        return TEXT;
-      case "BOOLEAN":
-        return BOOLEAN;
-      case "INT32":
-        return INT32;
-      case "INT64":
-        return INT64;
-      case "FLOAT":
-        return FLOAT;
-      case "DOUBLE":
-        return DOUBLE;
-      default:
-        return null;
+    if (startIndex <= path.length() - 1) {
+      nodes.add(path.substring(startIndex));
     }
+    return nodes.toArray(new String[0]);
   }
 
-  /**
-   * if data type of timeseries is not defined in headers of schema, this method will be called to
-   * do type inference
-   *
-   * @param value
-   * @return
-   */
-  private static TSDataType typeInfer(String value) {
-    if (value.contains("\"")) return TEXT;
-    else if (value.equals("true") || value.equals("false")) return BOOLEAN;
-    else if (!value.contains(".")) {
-      try {
-        Integer.valueOf(value);
-        return INT32;
-      } catch (Exception e) {
-        try {
-          Long.valueOf(value);
-          return INT64;
-        } catch (Exception exception) {
-          return null;
-        }
-      }
-    } else {
-      if (Float.valueOf(value).toString().length() == Double.valueOf(value).toString().length())
-        return FLOAT;
-      else return DOUBLE;
+  public static void nextNode(String path, List<String> nodes, char enclose) {
+    int endIndex = path.indexOf(enclose, i + 1);
+    // if a double quotes with escape character
+    while (endIndex != -1 && path.charAt(endIndex - 1) == '\\') {
+      endIndex = path.indexOf(enclose, endIndex + 1);
     }
-  }
-
-  /**
-   * @param value
-   * @param type
-   * @return
-   */
-  private static Object typeTrans(String value, TSDataType type) {
-    try {
-      switch (type) {
-        case TEXT:
-          return value.substring(1, value.length() - 1);
-        case BOOLEAN:
-          if (!value.equals("true") && !value.equals("false")) {
-            return null;
-          }
-          return Boolean.valueOf(value);
-        case INT32:
-          return Integer.valueOf(value);
-        case INT64:
-          return Long.valueOf(value);
-        case FLOAT:
-          return Float.valueOf(value);
-        case DOUBLE:
-          return Double.valueOf(value);
-        default:
-          return null;
-      }
-    } catch (NumberFormatException e) {
-      return null;
+    if (endIndex != -1 && (endIndex == path.length() - 1 || path.charAt(endIndex + 1) == ',')) {
+      nodes.add(path.substring(startIndex + 1, endIndex));
+      i = endIndex + 1;
+      startIndex = endIndex + 2;
+    } else {
+      throw new IllegalArgumentException("Illegal csv line" + path);
     }
   }
 }
diff --git a/cli/src/test/java/org/apache/iotdb/cli/AbstractScript.java b/cli/src/test/java/org/apache/iotdb/cli/AbstractScript.java
index 12eaae8..0be9b55 100644
--- a/cli/src/test/java/org/apache/iotdb/cli/AbstractScript.java
+++ b/cli/src/test/java/org/apache/iotdb/cli/AbstractScript.java
@@ -18,8 +18,6 @@
  */
 package org.apache.iotdb.cli;
 
-import org.apache.thrift.annotation.Nullable;
-
 import java.io.*;
 import java.util.ArrayList;
 import java.util.List;
@@ -29,7 +27,7 @@ import static org.junit.Assert.assertEquals;
 
 public abstract class AbstractScript {
 
-  protected void testOutput(ProcessBuilder builder, @Nullable String[] output) throws IOException {
+  protected void testOutput(ProcessBuilder builder, String[] output) throws IOException {
     builder.redirectErrorStream(true);
     Process p = builder.start();
     BufferedReader r = new BufferedReader(new InputStreamReader(p.getInputStream()));
@@ -56,10 +54,8 @@ public abstract class AbstractScript {
       System.out.println(s);
     }
 
-    if (output != null) {
-      for (int i = 0; i < output.length; i++) {
-        assertEquals(output[output.length - 1 - i], outputList.get(outputList.size() - 1 - i));
-      }
+    for (int i = 0; i < output.length; i++) {
+      assertEquals(output[output.length - 1 - i], outputList.get(outputList.size() - 1 - i));
     }
   }
 
diff --git a/cli/src/test/java/org/apache/iotdb/cli/StartClientScriptIT.java b/cli/src/test/java/org/apache/iotdb/cli/StartClientScriptIT.java
index 7fa89bb..829cd9a 100644
--- a/cli/src/test/java/org/apache/iotdb/cli/StartClientScriptIT.java
+++ b/cli/src/test/java/org/apache/iotdb/cli/StartClientScriptIT.java
@@ -75,23 +75,9 @@ public class StartClientScriptIT extends AbstractScript {
             "cmd.exe",
             "/c",
             dir + File.separator + "sbin" + File.separator + "start-cli.bat",
-            "-maxPRC",
-            "0",
             "-e",
             "\"flush\"");
     testOutput(builder2, output2);
-
-    final String[] output3 = {
-      "IoTDB> error format of max print row count, it should be an integer number"
-    };
-    ProcessBuilder builder3 =
-        new ProcessBuilder(
-            "cmd.exe",
-            "/c",
-            dir + File.separator + "sbin" + File.separator + "start-cli.bat",
-            "-maxPRC",
-            "-1111111111111111111111111111");
-    testOutput(builder3, output3);
   }
 
   @Override
@@ -119,21 +105,8 @@ public class StartClientScriptIT extends AbstractScript {
         new ProcessBuilder(
             "sh",
             dir + File.separator + "sbin" + File.separator + "start-cli.sh",
-            "-maxPRC",
-            "0",
             "-e",
             "\"flush\"");
     testOutput(builder2, output2);
-
-    final String[] output3 = {
-      "IoTDB> error format of max print row count, it should be an integer number"
-    };
-    ProcessBuilder builder3 =
-        new ProcessBuilder(
-            "sh",
-            dir + File.separator + "sbin" + File.separator + "start-cli.sh",
-            "-maxPRC",
-            "-1111111111111111111111111111");
-    testOutput(builder3, output3);
   }
 }
diff --git a/server/src/main/java/org/apache/iotdb/db/exception/metadata/DuplicatedTemplateException.java b/cli/src/test/java/org/apache/iotdb/tool/CsvLineSplitTest.java
similarity index 67%
rename from server/src/main/java/org/apache/iotdb/db/exception/metadata/DuplicatedTemplateException.java
rename to cli/src/test/java/org/apache/iotdb/tool/CsvLineSplitTest.java
index 0ffee81..fd1c9ba 100644
--- a/server/src/main/java/org/apache/iotdb/db/exception/metadata/DuplicatedTemplateException.java
+++ b/cli/src/test/java/org/apache/iotdb/tool/CsvLineSplitTest.java
@@ -16,16 +16,18 @@
  * specific language governing permissions and limitations
  * under the License.
  */
+package org.apache.iotdb.tool;
 
-package org.apache.iotdb.db.exception.metadata;
+import org.junit.Assert;
+import org.junit.Test;
 
-import org.apache.iotdb.rpc.TSStatusCode;
+public class CsvLineSplitTest {
 
-public class DuplicatedTemplateException extends MetadataException {
-  public DuplicatedTemplateException(String path) {
-    super(
-        String.format("Failed to create duplicated template for path %s", path),
-        TSStatusCode.DUPLICATED_TEMPLATE.getStatusCode(),
-        true);
+  @Test
+  public void testSplit() {
+    Assert.assertArrayEquals(
+        new String[] {"", "a", "b", "c", "\\\""}, ImportCsv.splitCsvLine(",a,b,c,\"\\\"\""));
+    Assert.assertArrayEquals(
+        new String[] {"", "a", "b", "\\'"}, ImportCsv.splitCsvLine(",a,b,\"\\'\""));
   }
 }
diff --git a/cli/src/test/java/org/apache/iotdb/tool/integration/ExportCsvTestIT.java b/cli/src/test/java/org/apache/iotdb/tool/ExportCsvTestIT.java
similarity index 98%
rename from cli/src/test/java/org/apache/iotdb/tool/integration/ExportCsvTestIT.java
rename to cli/src/test/java/org/apache/iotdb/tool/ExportCsvTestIT.java
index b745e86..23bc812 100644
--- a/cli/src/test/java/org/apache/iotdb/tool/integration/ExportCsvTestIT.java
+++ b/cli/src/test/java/org/apache/iotdb/tool/ExportCsvTestIT.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.tool.integration;
+package org.apache.iotdb.tool;
 
 import org.apache.iotdb.cli.AbstractScript;
 
diff --git a/cli/src/test/java/org/apache/iotdb/tool/integration/ImportCsvTestIT.java b/cli/src/test/java/org/apache/iotdb/tool/ImportCsvTestIT.java
similarity index 98%
rename from cli/src/test/java/org/apache/iotdb/tool/integration/ImportCsvTestIT.java
rename to cli/src/test/java/org/apache/iotdb/tool/ImportCsvTestIT.java
index f5f8f86..6516b0e 100644
--- a/cli/src/test/java/org/apache/iotdb/tool/integration/ImportCsvTestIT.java
+++ b/cli/src/test/java/org/apache/iotdb/tool/ImportCsvTestIT.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.tool.integration;
+package org.apache.iotdb.tool;
 
 import org.apache.iotdb.cli.AbstractScript;
 
diff --git a/cli/src/test/java/org/apache/iotdb/tool/unit/WriteCsvFileTestUT.java b/cli/src/test/java/org/apache/iotdb/tool/unit/WriteCsvFileTestUT.java
deleted file mode 100644
index 7c7bd5e..0000000
--- a/cli/src/test/java/org/apache/iotdb/tool/unit/WriteCsvFileTestUT.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.tool.unit;
-
-import org.apache.iotdb.tool.AbstractCsvTool;
-
-import org.junit.Test;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-import static org.junit.Assert.assertTrue;
-
-public class WriteCsvFileTestUT {
-  @Test
-  public void writeCsvFileTest() {
-    List<String> headerNames =
-        new ArrayList<>(Arrays.asList("Time", "column1", "column2", "column3"));
-
-    List<Object> row1 = new ArrayList<>(Arrays.asList(1, null, "hello,world", true));
-    List<Object> row2 = new ArrayList<>(Arrays.asList(2, "", "hello,world", false));
-    List<Object> row3 = new ArrayList<>(Arrays.asList(3, "100", "hello world!!!", false));
-    ArrayList<List<Object>> records = new ArrayList<>(Arrays.asList(row1, row2, row3));
-
-    assertTrue(AbstractCsvTool.writeCsvFile(headerNames, records, "./test0.csv"));
-    assertTrue(AbstractCsvTool.writeCsvFile(null, records, "./test1.csv"));
-  }
-}
diff --git a/client-cpp/pom.xml b/client-cpp/pom.xml
index bb942f9..4d205f4 100644
--- a/client-cpp/pom.xml
+++ b/client-cpp/pom.xml
@@ -24,7 +24,7 @@
     <parent>
         <artifactId>iotdb-parent</artifactId>
         <groupId>org.apache.iotdb</groupId>
-        <version>0.12.4</version>
+        <version>0.12.1-SNAPSHOT</version>
         <relativePath>../pom.xml</relativePath>
     </parent>
     <artifactId>client-cpp</artifactId>
@@ -132,7 +132,7 @@
                     <plugin>
                         <groupId>com.coderplus.maven.plugins</groupId>
                         <artifactId>copy-rename-maven-plugin</artifactId>
-                        <version>1.0.1</version>
+                        <version>1.0</version>
                         <executions>
                             <execution>
                                 <id>copy-thrift-source</id>
diff --git a/client-cpp/src/main/CMakeLists.txt b/client-cpp/src/main/CMakeLists.txt
index 44c5aa4..6e4e468 100644
--- a/client-cpp/src/main/CMakeLists.txt
+++ b/client-cpp/src/main/CMakeLists.txt
@@ -38,7 +38,7 @@ ENDIF()
 # Add Boost include path for MacOS
 INCLUDE_DIRECTORIES(/usr/local/include)
 # Add Thrift include directory
-INCLUDE_DIRECTORIES(${TOOLS_DIR}/thrift/target/thrift-0.14.1/lib/cpp/src)
+INCLUDE_DIRECTORIES(${TOOLS_DIR}/thrift/target/thrift-0.13.0/lib/cpp/src)
 
 # Add ./generated-sources-cpp as a Cmake subdirectory
 AUX_SOURCE_DIRECTORY(./generated-sources-cpp SESSION_SRCS)
diff --git a/client-cpp/src/main/Session.cpp b/client-cpp/src/main/Session.cpp
index cd26f29..48cf484 100644
--- a/client-cpp/src/main/Session.cpp
+++ b/client-cpp/src/main/Session.cpp
@@ -23,30 +23,33 @@ using namespace std;
 
 TSDataType::TSDataType getTSDataTypeFromString(string str) {
     // BOOLEAN, INT32, INT64, FLOAT, DOUBLE, TEXT, NULLTYPE
-    if (str == "BOOLEAN") return TSDataType::BOOLEAN;
-    else if (str == "INT32") return TSDataType::INT32;
-    else if (str == "INT64") return TSDataType::INT64;
-    else if (str == "FLOAT") return TSDataType::FLOAT;
-    else if (str == "DOUBLE") return TSDataType::DOUBLE;
-    else if (str == "TEXT") return TSDataType::TEXT;
-    else if (str == "NULLTYPE") return TSDataType::NULLTYPE;
+    if (str == "BOOLEAN")   return TSDataType::BOOLEAN;
+    else if(str == "INT32") return TSDataType::INT32;
+    else if(str == "INT64") return TSDataType::INT64;
+    else if(str == "FLOAT") return TSDataType::FLOAT;
+    else if(str == "DOUBLE") return TSDataType::DOUBLE;
+    else if(str == "TEXT") return TSDataType::TEXT;
+    else if(str == "NULLTYPE") return TSDataType::NULLTYPE;
     return TSDataType::TEXT;
 }
 
-void RpcUtils::verifySuccess(TSStatus &status) {
+void RpcUtils::verifySuccess(TSStatus& status) {
     if (status.code == TSStatusCode::MULTIPLE_ERROR) {
         verifySuccess(status.subStatus);
         return;
     }
     if (status.code != TSStatusCode::SUCCESS_STATUS) {
-        throw IoTDBConnectionException(to_string(status.code) + ": " + status.message.c_str());
+        char buf[111];
+        sprintf(buf, "%d: %s", status.code, status.message.c_str());
+        throw IoTDBConnectionException(buf);
     }
 }
-
-void RpcUtils::verifySuccess(vector <TSStatus> &statuses) {
+void RpcUtils::verifySuccess(vector<TSStatus>& statuses) {
     for (TSStatus status : statuses) {
         if (status.code != TSStatusCode::SUCCESS_STATUS) {
-            throw BatchExecutionException(statuses, status.message);
+            char buf[111];
+            sprintf(buf, "%s", status.message.c_str());
+            throw BatchExecutionException(statuses, buf);
         }
     }
 }
@@ -56,45 +59,36 @@ TSStatus RpcUtils::getStatus(TSStatusCode::TSStatusCode tsStatusCode) {
     tmpTSStatus.__set_code(tsStatusCode);
     return tmpTSStatus;
 }
-
 TSStatus RpcUtils::getStatus(int code, string message) {
     TSStatus status = TSStatus();
     status.__set_code(code);
     status.__set_message(message);
     return status;
 }
-
-shared_ptr <TSExecuteStatementResp> RpcUtils::getTSExecuteStatementResp(TSStatusCode::TSStatusCode tsStatusCode) {
+shared_ptr<TSExecuteStatementResp> RpcUtils::getTSExecuteStatementResp(TSStatusCode::TSStatusCode tsStatusCode) {
     TSStatus status = getStatus(tsStatusCode);
     return getTSExecuteStatementResp(status);
 }
-
-shared_ptr <TSExecuteStatementResp>
-RpcUtils::getTSExecuteStatementResp(TSStatusCode::TSStatusCode tsStatusCode, string message) {
+shared_ptr<TSExecuteStatementResp> RpcUtils::getTSExecuteStatementResp(TSStatusCode::TSStatusCode tsStatusCode, string message) {
     TSStatus status = getStatus(tsStatusCode, message);
     return getTSExecuteStatementResp(status);
 }
-
-shared_ptr <TSExecuteStatementResp> RpcUtils::getTSExecuteStatementResp(TSStatus &status) {
-    shared_ptr <TSExecuteStatementResp> resp(new TSExecuteStatementResp());
+shared_ptr<TSExecuteStatementResp> RpcUtils::getTSExecuteStatementResp(TSStatus& status) {
+    shared_ptr<TSExecuteStatementResp> resp(new TSExecuteStatementResp());
     TSStatus tsStatus(status);
     resp->status = status;
     return resp;
 }
-
-shared_ptr <TSFetchResultsResp> RpcUtils::getTSFetchResultsResp(TSStatusCode::TSStatusCode tsStatusCode) {
+shared_ptr<TSFetchResultsResp> RpcUtils::getTSFetchResultsResp(TSStatusCode::TSStatusCode tsStatusCode) {
     TSStatus status = getStatus(tsStatusCode);
     return getTSFetchResultsResp(status);
 }
-
-shared_ptr <TSFetchResultsResp>
-RpcUtils::getTSFetchResultsResp(TSStatusCode::TSStatusCode tsStatusCode, string appendMessage) {
+shared_ptr<TSFetchResultsResp> RpcUtils::getTSFetchResultsResp(TSStatusCode::TSStatusCode tsStatusCode, string appendMessage) {
     TSStatus status = getStatus(tsStatusCode, appendMessage);
     return getTSFetchResultsResp(status);
 }
-
-shared_ptr <TSFetchResultsResp> RpcUtils::getTSFetchResultsResp(TSStatus &status) {
-    shared_ptr <TSFetchResultsResp> resp(new TSFetchResultsResp());
+shared_ptr<TSFetchResultsResp> RpcUtils::getTSFetchResultsResp(TSStatus& status) {
+    shared_ptr<TSFetchResultsResp> resp(new TSFetchResultsResp());
     TSStatus tsStatus(status);
     resp->__set_status(tsStatus);
     return resp;
@@ -122,36 +116,37 @@ int Tablet::getValueByteSize() {
     int valueOccupation = 0;
     for (int i = 0; i < schemas.size(); i++) {
         switch (schemas[i].second) {
-            case TSDataType::BOOLEAN:
-                valueOccupation += rowSize;
-                break;
-            case TSDataType::INT32:
-                valueOccupation += rowSize * 4;
-                break;
-            case TSDataType::INT64:
-                valueOccupation += rowSize * 8;
-                break;
-            case TSDataType::FLOAT:
-                valueOccupation += rowSize * 4;
-                break;
-            case TSDataType::DOUBLE:
-                valueOccupation += rowSize * 8;
-                break;
-            case TSDataType::TEXT:
-                valueOccupation += rowSize * 4;
-                for (string value : values[i]) {
-                    valueOccupation += value.size();
-                }
-                break;
-            default:
-                throw UnSupportedDataTypeException(
-                        string("Data type ") + to_string(schemas[i].second) + " is not supported.");
+        case TSDataType::BOOLEAN:
+            valueOccupation += rowSize;
+            break;
+        case TSDataType::INT32:
+            valueOccupation += rowSize * 4;
+            break;
+        case TSDataType::INT64:
+            valueOccupation += rowSize * 8;
+            break;
+        case TSDataType::FLOAT:
+            valueOccupation += rowSize * 4;
+            break;
+        case TSDataType::DOUBLE:
+            valueOccupation += rowSize * 8;
+            break;
+        case TSDataType::TEXT:
+            valueOccupation += rowSize * 4;
+            for (string value : values[i]) {
+                valueOccupation += value.size();
+            }
+            break;
+        default:
+            char buf[111];
+            sprintf(buf, "Data type %d is not supported.", schemas[i].second);
+            throw UnSupportedDataTypeException(buf);
         }
     }
     return valueOccupation;
 }
 
-string SessionUtils::getTime(Tablet &tablet) {
+string SessionUtils::getTime(Tablet& tablet) {
     MyStringBuffer timeBuffer;
     for (int i = 0; i < tablet.rowSize; i++) {
         timeBuffer.putLong(tablet.timestamps[i]);
@@ -159,71 +154,78 @@ string SessionUtils::getTime(Tablet &tablet) {
     return timeBuffer.str;
 }
 
-string SessionUtils::getValue(Tablet &tablet) {
+string SessionUtils::getValue(Tablet& tablet) {
     MyStringBuffer valueBuffer;
     for (int i = 0; i < tablet.schemas.size(); i++) {
         TSDataType::TSDataType dataType = tablet.schemas[i].second;
-        switch (dataType) {
-            case TSDataType::BOOLEAN:
-                for (int index = 0; index < tablet.rowSize; index++) {
-                    valueBuffer.putBool(tablet.values[i][index] == "true");
-                }
-                break;
-            case TSDataType::INT32:
-                for (int index = 0; index < tablet.rowSize; index++) {
-                    valueBuffer.putInt(stoi(tablet.values[i][index]));
-                }
-                break;
-            case TSDataType::INT64:
-                for (int index = 0; index < tablet.rowSize; index++) {
-                    valueBuffer.putLong(stol(tablet.values[i][index]));
-                }
-                break;
-            case TSDataType::FLOAT:
-                for (int index = 0; index < tablet.rowSize; index++) {
-                    valueBuffer.putFloat(stof(tablet.values[i][index]));
-                }
-                break;
-            case TSDataType::DOUBLE:
-                for (int index = 0; index < tablet.rowSize; index++) {
-                    valueBuffer.putDouble(stod(tablet.values[i][index]));
-                }
-                break;
-            case TSDataType::TEXT:
-                for (int index = 0; index < tablet.rowSize; index++) {
-                    valueBuffer.putString(tablet.values[i][index]);
-                }
-                break;
-            default:
-                throw UnSupportedDataTypeException(string("Data type ") + to_string(dataType) + " is not supported.");
+        switch (dataType)
+        {
+        case TSDataType::BOOLEAN:
+            for (int index = 0; index < tablet.rowSize; index++) {
+                valueBuffer.putBool(tablet.values[i][index] == "true" ? true : false);
+            }
+            break;
+        case TSDataType::INT32:
+            for (int index = 0; index < tablet.rowSize; index++) {
+                valueBuffer.putInt(stoi(tablet.values[i][index]));
+            }
+            break;
+        case TSDataType::INT64:
+            for (int index = 0; index < tablet.rowSize; index++) {
+                valueBuffer.putLong(stol(tablet.values[i][index]));
+            }
+            break;
+        case TSDataType::FLOAT:
+            for (int index = 0; index < tablet.rowSize; index++) {
+                valueBuffer.putFloat(stof(tablet.values[i][index]));
+            }
+            break;
+        case TSDataType::DOUBLE:
+            for (int index = 0; index < tablet.rowSize; index++) {
+                valueBuffer.putDouble(stod(tablet.values[i][index]));
+            }
+            break;
+        case TSDataType::TEXT:
+            for (int index = 0; index < tablet.rowSize; index++) {
+                valueBuffer.putString(tablet.values[i][index]);
+            }
+            break;
+        default:
+            char buf[111];
+            sprintf(buf, "Data type %d is not supported.", dataType);
+            throw UnSupportedDataTypeException(buf);
+            break;
         }
     }
     return valueBuffer.str;
 }
 
-int SessionDataSet::getBatchSize() {
+int SessionDataSet::getBatchSize()
+{
     return batchSize;
 }
 
-void SessionDataSet::setBatchSize(int batchSize) {
+void SessionDataSet::setBatchSize(int batchSize)
+{
     this->batchSize = batchSize;
 }
 
-vector <string> SessionDataSet::getColumnNames() { return this->columnNameList; }
+vector<string> SessionDataSet::getColumnNames() { return this->columnNameList; }
 
-bool SessionDataSet::hasNext() {
+bool SessionDataSet::hasNext()
+{
     if (hasCachedRecord) {
         return true;
     }
     if (!tsQueryDataSetTimeBuffer.hasRemaining()) {
-        shared_ptr <TSFetchResultsReq> req(new TSFetchResultsReq());
+        shared_ptr<TSFetchResultsReq> req(new TSFetchResultsReq());
         req->__set_sessionId(sessionId);
         req->__set_statement(sql);
         req->__set_fetchSize(batchSize);
         req->__set_queryId(queryId);
         req->__set_isAlign(true);
         try {
-            shared_ptr <TSFetchResultsResp> resp(new TSFetchResultsResp());
+            shared_ptr<TSFetchResultsResp> resp(new TSFetchResultsResp());
             client->fetchResults(*resp, *req);
             RpcUtils::verifySuccess(resp->status);
 
@@ -235,9 +237,11 @@ bool SessionDataSet::hasNext() {
                 rowsIndex = 0;
             }
         }
-        catch (IoTDBConnectionException e) {
-            throw IoTDBConnectionException(
-                    string("Cannot fetch result from server, because of network connection: ") + e.what());
+        catch (IoTDBConnectionException e)
+        {
+            char buf[111];
+            sprintf(buf, "Cannot fetch result from server, because of network connection: %s", e.what());
+            throw IoTDBConnectionException(buf);
         }
     }
 
@@ -247,7 +251,7 @@ bool SessionDataSet::hasNext() {
 }
 
 void SessionDataSet::constructOneRow() {
-    vector <Field> outFields;
+    vector<Field> outFields;
     int loc = 0;
     for (int i = 0; i < columnSize; i++) {
         Field field;
@@ -265,40 +269,41 @@ void SessionDataSet::constructOneRow() {
                 TSDataType::TSDataType dataType = getTSDataTypeFromString(columnTypeDeduplicatedList[loc]);
                 field.dataType = dataType;
                 switch (dataType) {
-                    case TSDataType::BOOLEAN: {
-                        bool booleanValue = valueBuffer->getBool();
-                        field.boolV = booleanValue;
-                        break;
-                    }
-                    case TSDataType::INT32: {
-                        int intValue = valueBuffer->getInt();
-                        field.intV = intValue;
-                        break;
-                    }
-                    case TSDataType::INT64: {
-                        int64_t longValue = valueBuffer->getLong();
-                        field.longV = longValue;
-                        break;
-                    }
-                    case TSDataType::FLOAT: {
-                        float floatValue = valueBuffer->getFloat();
-                        field.floatV = floatValue;
-                        break;
-                    }
-                    case TSDataType::DOUBLE: {
-                        double doubleValue = valueBuffer->getDouble();
-                        field.doubleV = doubleValue;
-                        break;
-                    }
-                    case TSDataType::TEXT: {
-                        string stringValue = valueBuffer->getString();
-                        field.stringV = stringValue;
-                        break;
-                    }
-                    default: {
-                        throw UnSupportedDataTypeException(
-                                string("Data type ") + columnTypeDeduplicatedList[i].c_str() + " is not supported.");
-                    }
+                case TSDataType::BOOLEAN: {
+                    bool booleanValue = valueBuffer->getBool();
+                    field.boolV = booleanValue;
+                    break;
+                }
+                case TSDataType::INT32: {
+                    int intValue = valueBuffer->getInt();
+                    field.intV = intValue;
+                    break;
+                }
+                case TSDataType::INT64: {
+                    int64_t longValue = valueBuffer->getLong();
+                    field.longV = longValue;
+                    break;
+                }
+                case TSDataType::FLOAT: {
+                    float floatValue = valueBuffer->getFloat();
+                    field.floatV = floatValue;
+                    break;
+                }
+                case TSDataType::DOUBLE: {
+                    double doubleValue = valueBuffer->getDouble();
+                    field.doubleV = doubleValue;
+                    break;
+                }
+                case TSDataType::TEXT: {
+                    string stringValue = valueBuffer->getString();
+                    field.stringV = stringValue;
+                    break;
+                }
+                default: {
+                    char buf[111];
+                    sprintf(buf, "Data type %s is not supported.", columnTypeDeduplicatedList[i].c_str());
+                    throw UnSupportedDataTypeException(buf);
+                }
                 }
             } else {
                 field.dataType = TSDataType::NULLTYPE;
@@ -318,7 +323,8 @@ bool SessionDataSet::isNull(int index, int rowNum) {
     return ((flag >> shift) & bitmap) == 0;
 }
 
-RowRecord *SessionDataSet::next() {
+RowRecord* SessionDataSet::next()
+{
     if (!hasCachedRecord) {
         if (!hasNext()) {
             return NULL;
@@ -329,35 +335,32 @@ RowRecord *SessionDataSet::next() {
     return &rowRecord;
 }
 
-void SessionDataSet::closeOperationHandle() {
-    shared_ptr <TSCloseOperationReq> closeReq(new TSCloseOperationReq());
+void SessionDataSet::closeOperationHandle()
+{
+    shared_ptr<TSCloseOperationReq> closeReq(new TSCloseOperationReq());
     closeReq->__set_sessionId(sessionId);
-    closeReq->__set_statementId(statementId);
     closeReq->__set_queryId(queryId);
-    shared_ptr <TSStatus> closeResp(new TSStatus());
-    try {
-        client->closeOperation(*closeResp, *closeReq);
+    shared_ptr<TSStatus> closeResp(new TSStatus());
+    try
+    {
+        client->closeOperation(*closeResp,*closeReq);
         RpcUtils::verifySuccess(*closeResp);
     }
-    catch (IoTDBConnectionException e) {
-        throw IoTDBConnectionException(
-                string("Error occurs when connecting to server for close operation, because: ") + e.what());
+    catch (IoTDBConnectionException e)
+    {
+        char buf[111];
+        sprintf(buf,"Error occurs when connecting to server for close operation, because: %s",e.what());
+        throw IoTDBConnectionException(buf);
     }
 }
 
-/**
- * When delete variable, make sure release all resource.
- */
-Session::~Session() {
-    close();
-}
 
 /**
    * check whether the batch has been sorted
    *
    * @return whether the batch has been sorted
    */
-bool Session::checkSorted(Tablet &tablet) {
+bool Session::checkSorted(Tablet& tablet) {
     for (int i = 1; i < tablet.rowSize; i++) {
         if (tablet.timestamps[i] < tablet.timestamps[i - 1]) {
             return false;
@@ -366,7 +369,7 @@ bool Session::checkSorted(Tablet &tablet) {
     return true;
 }
 
-bool Session::checkSorted(vector <int64_t> &times) {
+bool Session::checkSorted(vector<int64_t>& times) {
     for (int i = 1; i < times.size(); i++) {
         if (times[i] < times[i - 1]) {
             return false;
@@ -375,18 +378,19 @@ bool Session::checkSorted(vector <int64_t> &times) {
     return true;
 }
 
-void Session::sortTablet(Tablet &tablet) {
+void Session::sortTablet(Tablet& tablet) {
     /*
      * following part of code sort the batch data by time,
      * so we can insert continuous data in value list to get a better performance
      */
-    // sort to get index, and use index to sort value list
-    int *index = new int[tablet.rowSize];
+     // sort to get index, and use index to sort value list
+    int* index = new int[tablet.rowSize];
     for (int i = 0; i < tablet.rowSize; i++) {
         index[i] = i;
     }
 
     this->sortIndexByTimestamp(index, tablet.timestamps, tablet.rowSize);
+    sort(tablet.timestamps.begin(), tablet.timestamps.begin() + tablet.rowSize);
     for (int i = 0; i < tablet.schemas.size(); i++) {
         tablet.values[i] = sortList(tablet.values[i], index, tablet.rowSize);
     }
@@ -394,7 +398,7 @@ void Session::sortTablet(Tablet &tablet) {
     delete[] index;
 }
 
-void Session::sortIndexByTimestamp(int *index, std::vector <int64_t> &timestamps, int length) {
+void Session::sortIndexByTimestamp(int* index, std::vector<int64_t>& timestamps, int length) {
     // Use Insert Sort Algorithm
     if (length >= 2) {
         for (int i = 1; i < length; i++) {
@@ -417,16 +421,16 @@ void Session::sortIndexByTimestamp(int *index, std::vector <int64_t> &timestamps
 /**
  * Append value into buffer in Big Endian order to comply with IoTDB server
  */
-void Session::appendValues(string &buffer, char *value, int size) {
+void Session::appendValues(string &buffer, char* value, int size) {
     for (int i = size - 1; i >= 0; i--) {
         buffer.append(value + i, 1);
     }
 }
 
-void Session::putValuesIntoBuffer(vector <TSDataType::TSDataType> &types, vector<char *> &values, string &buf) {
+void Session::putValuesIntoBuffer(vector<TSDataType::TSDataType>& types, vector<char*>& values, string& buf) {
     for (int i = 0; i < values.size(); i++) {
         int8_t typeNum = getDataTypeNumber(types[i]);
-        buf.append((char *) (&typeNum), sizeof(int8_t));
+        buf.append((char*)(&typeNum), sizeof(int8_t));
         switch (types[i]) {
             case TSDataType::BOOLEAN:
                 buf.append(values[i], 1);
@@ -446,7 +450,7 @@ void Session::putValuesIntoBuffer(vector <TSDataType::TSDataType> &types, vector
             case TSDataType::TEXT:
                 string str(values[i]);
                 int len = str.length();
-                appendValues(buf, (char *) (&len), sizeof(int));
+                appendValues(buf, (char*)(&len), sizeof(int));
                 // no need to change the byte order of string value
                 buf.append(values[i], len);
                 break;
@@ -473,73 +477,89 @@ int8_t Session::getDataTypeNumber(TSDataType::TSDataType type) {
     }
 }
 
-void Session::open() {
-    try {
+void Session::open()
+{
+    try
+    {
         open(false, DEFAULT_TIMEOUT_MS);
     }
-    catch (IoTDBConnectionException e) {
+    catch (IoTDBConnectionException e)
+    {
         throw IoTDBConnectionException(e.what());
     }
 }
 
 void Session::open(bool enableRPCCompression) {
-    try {
+    try
+    {
         open(enableRPCCompression, DEFAULT_TIMEOUT_MS);
     }
-    catch (IoTDBConnectionException e) {
+    catch (IoTDBConnectionException e)
+    {
         throw IoTDBConnectionException(e.what());
     }
 }
 
-void Session::open(bool enableRPCCompression, int connectionTimeoutInMs) {
-    if (!isClosed) {
+void Session::open(bool enableRPCCompression, int connectionTimeoutInMs)
+{
+    if (!isClosed)
+    {
         return;
     }
-    shared_ptr <TSocket> socket(new TSocket(host, rpcPort));
-    transport = std::make_shared<TFramedTransport> (socket);
+    shared_ptr<TSocket> socket(new TSocket(host, rpcPort));
+    shared_ptr<TTransport> transport(new TFramedTransport(socket));
     socket->setConnTimeout(connectionTimeoutInMs);
-    if (!transport->isOpen()) {
-        try {
+    if (!transport->isOpen()) 
+    {
+        try 
+        {
             transport->open();
-        }
-        catch (TTransportException e) {
+        } 
+        catch (TTransportException e) 
+        {
             throw IoTDBConnectionException(e.what());
         }
     }
-    if (enableRPCCompression) {
-        shared_ptr <TCompactProtocol> protocol(new TCompactProtocol(transport));
-        shared_ptr <TSIServiceIf> client_instance(new TSIServiceClient(protocol));
+    if (enableRPCCompression) 
+    {
+        shared_ptr<TCompactProtocol> protocol(new TCompactProtocol(transport));
+        shared_ptr<TSIServiceIf> client_instance(new TSIServiceClient(protocol));
         client = client_instance;
     } else {
-        shared_ptr <TBinaryProtocol> protocol(new TBinaryProtocol(transport));
-        shared_ptr <TSIServiceIf> client_instance(new TSIServiceClient(protocol));
+        shared_ptr<TBinaryProtocol> protocol(new TBinaryProtocol(transport));
+        shared_ptr<TSIServiceIf> client_instance(new TSIServiceClient(protocol));
         client = client_instance;
     }
-    shared_ptr <TSOpenSessionReq> openReq(new TSOpenSessionReq());
+    shared_ptr<TSOpenSessionReq> openReq(new TSOpenSessionReq());
     openReq->__set_username(username);
     openReq->__set_password(password);
     openReq->__set_zoneId(zoneId);
-    try {
-        shared_ptr <TSOpenSessionResp> openResp(new TSOpenSessionResp());
-        client->openSession(*openResp, *openReq);
+    try 
+    {
+        shared_ptr<TSOpenSessionResp> openResp(new TSOpenSessionResp());
+        client->openSession(*openResp,*openReq);
         RpcUtils::verifySuccess(openResp->status);
-        if (protocolVersion != openResp->serverProtocolVersion) {
+        if (protocolVersion != openResp->serverProtocolVersion)
+        {
             if (openResp->serverProtocolVersion == 0) {// less than 0.10
-                throw logic_error(string("Protocol not supported, Client version is ") + to_string(protocolVersion) +
-                                  ", but Server version is " + to_string(openResp->serverProtocolVersion));
+                char buf[111];
+                sprintf(buf, "Protocol not supported, Client version is %d, but Server version is %d", protocolVersion, openResp->serverProtocolVersion);
+                logic_error e(buf);
+                throw exception(e);
             }
         }
 
         sessionId = openResp->sessionId;
         statementId = client->requestStatementId(sessionId);
-
+        
         if (zoneId != "") {
             setTimeZone(zoneId);
         } else {
             zoneId = getTimeZone();
         }
     }
-    catch (exception e) {
+    catch (exception e) 
+    {
         transport->close();
         throw IoTDBConnectionException(e.what());
     }
@@ -547,47 +567,59 @@ void Session::open(bool enableRPCCompression, int connectionTimeoutInMs) {
 }
 
 
-void Session::close() {
-    if (isClosed) {
+
+void Session::close()
+{
+    if (isClosed)
+    {
         return;
     }
-    shared_ptr <TSCloseSessionReq> req(new TSCloseSessionReq());
+    shared_ptr<TSCloseSessionReq> req(new TSCloseSessionReq());
     req->__set_sessionId(sessionId);
-    try {
-        shared_ptr <TSStatus> resp(new TSStatus());
-        client->closeSession(*resp, *req);
-    }
-    catch (exception e) {
-        throw IoTDBConnectionException(
-                string("Error occurs when closing session at server. Maybe server is down. ") + e.what());
-    }
+    try 
+    {
+        shared_ptr<TSStatus> resp(new TSStatus());
+        client->closeSession(*resp,*req);
+    }
+    catch (exception e) 
+    {
+        char buf[111];
+        sprintf(buf,"Error occurs when closing session at server. Maybe server is down. %s",e.what());
+        throw IoTDBConnectionException(buf);
+    } 
     isClosed = true;
-    if (transport != nullptr) {
+    if (transport != NULL) 
+    {
         transport->close();
     }
 }
 
+ 
 
-void Session::insertRecord(string deviceId, int64_t time, vector <string> &measurements, vector <string> &values) {
-    shared_ptr <TSInsertStringRecordReq> req(new TSInsertStringRecordReq());
+void Session::insertRecord(string deviceId,  int64_t time, vector<string>& measurements, vector<string>& values)
+{
+    shared_ptr<TSInsertStringRecordReq> req(new TSInsertStringRecordReq());
     req->__set_sessionId(sessionId);
     req->__set_deviceId(deviceId);
     req->__set_timestamp(time);
     req->__set_measurements(measurements);
     req->__set_values(values);
-    shared_ptr <TSStatus> resp(new TSStatus());
-    try {
-        client->insertStringRecord(*resp, *req);
+    shared_ptr<TSStatus> resp(new TSStatus());
+    try 
+    {
+        client->insertStringRecord(*resp,*req);
         RpcUtils::verifySuccess(*resp);
     }
-    catch (IoTDBConnectionException &e) {
+    catch (IoTDBConnectionException& e)
+    {
         throw IoTDBConnectionException(e.what());
     }
 }
 
-void Session::insertRecord(string deviceId, int64_t time, vector <string> &measurements,
-                           vector <TSDataType::TSDataType> &types, vector<char *> &values) {
-    shared_ptr <TSInsertRecordReq> req(new TSInsertRecordReq());
+void Session::insertRecord(string deviceId,  int64_t time, vector<string>& measurements,
+    vector<TSDataType::TSDataType>& types, vector<char*>& values)
+{
+    shared_ptr<TSInsertRecordReq> req(new TSInsertRecordReq());
     req->__set_sessionId(sessionId);
     req->__set_deviceId(deviceId);
     req->__set_timestamp(time);
@@ -595,55 +627,54 @@ void Session::insertRecord(string deviceId, int64_t time, vector <string> &measu
     string buffer;
     putValuesIntoBuffer(types, values, buffer);
     req->__set_values(buffer);
-    shared_ptr <TSStatus> resp(new TSStatus());
+    shared_ptr<TSStatus> resp(new TSStatus());
     try {
-        client->insertRecord(*resp, *req);
+        client->insertRecord(*resp,*req);
         RpcUtils::verifySuccess(*resp);
-    } catch (IoTDBConnectionException &e) {
+    } catch (IoTDBConnectionException& e) {
         throw IoTDBConnectionException(e.what());
     }
 }
 
-void
-Session::insertRecords(vector <string> &deviceIds, vector <int64_t> &times, vector <vector<string>> &measurementsList,
-                       vector <vector<string>> &valuesList) {
+void Session::insertRecords(vector<string>& deviceIds, vector<int64_t>& times, vector<vector<string>>& measurementsList, vector<vector<string>>& valuesList) {
     int len = deviceIds.size();
     if (len != times.size() || len != measurementsList.size() || len != valuesList.size()) {
         logic_error e("deviceIds, times, measurementsList and valuesList's size should be equal");
         throw exception(e);
     }
-    shared_ptr <TSInsertStringRecordsReq> request(new TSInsertStringRecordsReq());
+    shared_ptr<TSInsertStringRecordsReq> request(new TSInsertStringRecordsReq());
     request->__set_sessionId(sessionId);
     request->__set_deviceIds(deviceIds);
     request->__set_timestamps(times);
     request->__set_measurementsList(measurementsList);
     request->__set_valuesList(valuesList);
 
-    try {
-        shared_ptr <TSStatus> resp(new TSStatus());
+    try
+    {
+        shared_ptr<TSStatus> resp(new TSStatus());
         client->insertStringRecords(*resp, *request);
         RpcUtils::verifySuccess(*resp);
     }
-    catch (IoTDBConnectionException &e) {
+    catch (IoTDBConnectionException& e)
+    {
         throw IoTDBConnectionException(e.what());
     }
 }
 
-void Session::insertRecords(vector <string> &deviceIds, vector <int64_t> &times,
-                            vector <vector<string>> &measurementsList,
-                            vector <vector<TSDataType::TSDataType>> typesList,
-                            vector <vector<char *>> &valuesList) {
+void Session::insertRecords(vector<string>& deviceIds, vector<int64_t>& times,
+    vector<vector<string>>& measurementsList, vector<vector<TSDataType::TSDataType>> typesList,
+    vector<vector<char*>>& valuesList) {
     int len = deviceIds.size();
     if (len != times.size() || len != measurementsList.size() || len != valuesList.size()) {
         logic_error e("deviceIds, times, measurementsList and valuesList's size should be equal");
         throw exception(e);
     }
-    shared_ptr <TSInsertRecordsReq> request(new TSInsertRecordsReq());
+    shared_ptr<TSInsertRecordsReq> request(new TSInsertRecordsReq());
     request->__set_sessionId(sessionId);
     request->__set_deviceIds(deviceIds);
     request->__set_timestamps(times);
     request->__set_measurementsList(measurementsList);
-    vector <string> bufferList;
+    vector<string> bufferList;
     for (int i = 0; i < valuesList.size(); i++) {
         string buffer;
         putValuesIntoBuffer(typesList[i], valuesList[i], buffer);
@@ -652,48 +683,47 @@ void Session::insertRecords(vector <string> &deviceIds, vector <int64_t> &times,
     request->__set_valuesList(bufferList);
 
     try {
-        shared_ptr <TSStatus> resp(new TSStatus());
+        shared_ptr<TSStatus> resp(new TSStatus());
         client->insertRecords(*resp, *request);
         RpcUtils::verifySuccess(*resp);
-    } catch (IoTDBConnectionException &e) {
+    } catch (IoTDBConnectionException& e) {
         throw IoTDBConnectionException(e.what());
     }
 }
 
-void Session::insertRecordsOfOneDevice(string deviceId, vector <int64_t> &times,
-                                       vector <vector<string>> measurementsList,
-                                       vector <vector<TSDataType::TSDataType>> typesList,
-                                       vector <vector<char *>> &valuesList) {
+void Session::insertRecordsOfOneDevice(string deviceId, vector<int64_t>& times,
+    vector<vector<string>> measurementsList, vector<vector<TSDataType::TSDataType>> typesList,
+    vector<vector<char*>>& valuesList) {
     insertRecordsOfOneDevice(deviceId, times, measurementsList, typesList, valuesList, false);
 }
 
-void Session::insertRecordsOfOneDevice(string deviceId, vector <int64_t> &times,
-                                       vector <vector<string>> measurementsList,
-                                       vector <vector<TSDataType::TSDataType>> typesList,
-                                       vector <vector<char *>> &valuesList, bool sorted) {
+void Session::insertRecordsOfOneDevice(string deviceId, vector<int64_t>& times,
+    vector<vector<string>> measurementsList, vector<vector<TSDataType::TSDataType>> typesList,
+    vector<vector<char*>>& valuesList, bool sorted) {
 
     if (sorted) {
         if (!checkSorted(times)) {
             throw BatchExecutionException("Times in InsertOneDeviceRecords are not in ascending order");
         }
     } else {
-        int *index = new int[times.size()];
+        int* index = new int[times.size()];
         for (int i = 0; i < times.size(); i++) {
             index[i] = i;
         }
 
         this->sortIndexByTimestamp(index, times, times.size());
+        sort(times.begin(), times.end());
         measurementsList = sortList(measurementsList, index, times.size());
         typesList = sortList(typesList, index, times.size());
         valuesList = sortList(valuesList, index, times.size());
         delete[] index;
     }
-    unique_ptr <TSInsertRecordsOfOneDeviceReq> request(new TSInsertRecordsOfOneDeviceReq());
+    unique_ptr<TSInsertRecordsOfOneDeviceReq> request(new TSInsertRecordsOfOneDeviceReq());
     request->__set_sessionId(sessionId);
     request->__set_deviceId(deviceId);
     request->__set_timestamps(times);
     request->__set_measurementsList(measurementsList);
-    vector <string> bufferList;
+    vector<string> bufferList;
     for (int i = 0; i < valuesList.size(); i++) {
         string buffer;
         putValuesIntoBuffer(typesList[i], valuesList[i], buffer);
@@ -702,25 +732,27 @@ void Session::insertRecordsOfOneDevice(string deviceId, vector <int64_t> &times,
     request->__set_valuesList(bufferList);
 
     try {
-        unique_ptr <TSStatus> resp(new TSStatus());
+        unique_ptr<TSStatus> resp(new TSStatus());
         client->insertRecordsOfOneDevice(*resp, *request);
         RpcUtils::verifySuccess(*resp);
-    } catch (const exception &e) {
+    } catch (const exception& e) {
         throw IoTDBConnectionException(e.what());
     }
 }
 
-void Session::insertTablet(Tablet &tablet) {
-    try {
+void Session::insertTablet(Tablet& tablet) {
+    try
+    {
         insertTablet(tablet, false);
     }
-    catch (const exception &e) {
+    catch (const exception& e)
+    {
         logic_error error(e.what());
         throw exception(error);
     }
 }
 
-void Session::insertTablet(Tablet &tablet, bool sorted) {
+void Session::insertTablet(Tablet& tablet, bool sorted) {
     if (sorted) {
         if (!checkSorted(tablet)) {
             throw BatchExecutionException("Times in Tablet are not in ascending order");
@@ -729,10 +761,10 @@ void Session::insertTablet(Tablet &tablet, bool sorted) {
         sortTablet(tablet);
     }
 
-    shared_ptr <TSInsertTabletReq> request(new TSInsertTabletReq());
+    shared_ptr<TSInsertTabletReq> request(new TSInsertTabletReq());
     request->__set_sessionId(sessionId);
     request->deviceId = tablet.deviceId;
-    for (pair <string, TSDataType::TSDataType> schema : tablet.schemas) {
+    for (pair<string, TSDataType::TSDataType> schema : tablet.schemas) {
         request->measurements.push_back(schema.first);
         request->types.push_back(schema.second);
     }
@@ -740,28 +772,32 @@ void Session::insertTablet(Tablet &tablet, bool sorted) {
     request->__set_values(SessionUtils::getValue(tablet));
     request->__set_size(tablet.rowSize);
 
-    try {
-        shared_ptr <TSStatus> resp(new TSStatus());
+    try
+    {
+        shared_ptr<TSStatus> resp(new TSStatus());
         client->insertTablet(*resp, *request);
         RpcUtils::verifySuccess(*resp);
     }
-    catch (IoTDBConnectionException &e) {
+    catch (IoTDBConnectionException& e)
+    {
         throw new IoTDBConnectionException(e.what());
     }
 }
 
-void Session::insertTablets(map<string, Tablet *> &tablets) {
-    try {
+void Session::insertTablets(map<string, Tablet*>& tablets) {
+    try
+    {
         insertTablets(tablets, false);
     }
-    catch (const exception &e) {
+    catch (const exception& e)
+    {
         logic_error error(e.what());
         throw exception(error);
     }
 }
 
-void Session::insertTablets(map<string, Tablet *> &tablets, bool sorted) {
-    shared_ptr <TSInsertTabletsReq> request(new TSInsertTabletsReq());
+void Session::insertTablets(map<string, Tablet*>& tablets, bool sorted) {
+    shared_ptr<TSInsertTabletsReq> request(new TSInsertTabletsReq());
     request->__set_sessionId(sessionId);
 
     for (auto &item : tablets) {
@@ -774,9 +810,9 @@ void Session::insertTablets(map<string, Tablet *> &tablets, bool sorted) {
         }
 
         request->deviceIds.push_back(item.second->deviceId);
-        vector <string> measurements;
+        vector<string> measurements;
         vector<int> dataTypes;
-        for (pair <string, TSDataType::TSDataType> schema : item.second->schemas) {
+        for (pair<string, TSDataType::TSDataType> schema : item.second->schemas) {
             measurements.push_back(schema.first);
             dataTypes.push_back(schema.second);
         }
@@ -786,39 +822,43 @@ void Session::insertTablets(map<string, Tablet *> &tablets, bool sorted) {
         request->valuesList.push_back(SessionUtils::getValue(*(item.second)));
         request->sizeList.push_back(item.second->rowSize);
 
-        try {
-            shared_ptr <TSStatus> resp(new TSStatus());
+        try
+        {
+            shared_ptr<TSStatus> resp(new TSStatus());
             client->insertTablets(*resp, *request);
             RpcUtils::verifySuccess(*resp);
         }
-        catch (const exception &e) {
+        catch (const exception& e)
+        {
             throw IoTDBConnectionException(e.what());
         }
     }
 }
 
-void Session::testInsertRecord(string deviceId, int64_t time, vector <string> &measurements, vector <string> &values) {
-    shared_ptr <TSInsertStringRecordReq> req(new TSInsertStringRecordReq());
+void Session::testInsertRecord(string deviceId, int64_t time, vector<string>& measurements, vector<string>& values) {
+    shared_ptr<TSInsertStringRecordReq> req(new TSInsertStringRecordReq());
     req->__set_sessionId(sessionId);
     req->__set_deviceId(deviceId);
     req->__set_timestamp(time);
     req->__set_measurements(measurements);
     req->__set_values(values);
-    shared_ptr <TSStatus> resp(new TSStatus());
-    try {
+    shared_ptr<TSStatus> resp(new TSStatus());
+    try
+    {
         client->insertStringRecord(*resp, *req);
         RpcUtils::verifySuccess(*resp);
     }
-    catch (IoTDBConnectionException e) {
+    catch (IoTDBConnectionException e)
+    {
         throw IoTDBConnectionException(e.what());
     }
 }
 
-void Session::testInsertTablet(Tablet &tablet) {
-    shared_ptr <TSInsertTabletReq> request(new TSInsertTabletReq());
+void Session::testInsertTablet(Tablet& tablet) {
+    shared_ptr<TSInsertTabletReq> request(new TSInsertTabletReq());
     request->__set_sessionId(sessionId);
     request->deviceId = tablet.deviceId;
-    for (pair <string, TSDataType::TSDataType> schema : tablet.schemas) {
+    for (pair<string, TSDataType::TSDataType> schema : tablet.schemas) {
         request->measurements.push_back(schema.first);
         request->types.push_back(schema.second);
     }
@@ -826,121 +866,139 @@ void Session::testInsertTablet(Tablet &tablet) {
     request->__set_values(SessionUtils::getValue(tablet));
     request->__set_size(tablet.rowSize);
 
-    try {
-        shared_ptr <TSStatus> resp(new TSStatus());
+    try
+    {
+        shared_ptr<TSStatus> resp(new TSStatus());
         client->testInsertTablet(*resp, *request);
         RpcUtils::verifySuccess(*resp);
     }
-    catch (IoTDBConnectionException &e) {
+    catch (IoTDBConnectionException& e)
+    {
         throw new IoTDBConnectionException(e.what());
     }
 }
 
-void Session::testInsertRecords(vector <string> &deviceIds, vector <int64_t> &times,
-                                vector <vector<string>> &measurementsList, vector <vector<string>> &valuesList) {
+void Session::testInsertRecords(vector<string>& deviceIds, vector<int64_t>& times, vector<vector<string>>& measurementsList, vector<vector<string>>& valuesList) {
     int len = deviceIds.size();
     if (len != times.size() || len != measurementsList.size() || len != valuesList.size()) {
         logic_error error("deviceIds, times, measurementsList and valuesList's size should be equal");
         throw exception(error);
     }
-    shared_ptr <TSInsertStringRecordsReq> request(new TSInsertStringRecordsReq());
+    shared_ptr<TSInsertStringRecordsReq> request(new TSInsertStringRecordsReq());
     request->__set_sessionId(sessionId);
     request->__set_deviceIds(deviceIds);
     request->__set_timestamps(times);
     request->__set_measurementsList(measurementsList);
     request->__set_valuesList(valuesList);
 
-    try {
-        shared_ptr <TSStatus> resp(new TSStatus());
+    try
+    {
+        shared_ptr<TSStatus> resp(new TSStatus());
         client->insertStringRecords(*resp, *request);
         RpcUtils::verifySuccess(*resp);
     }
-    catch (IoTDBConnectionException &e) {
+    catch (IoTDBConnectionException& e)
+    {
         throw IoTDBConnectionException(e.what());
     }
 }
 
-void Session::deleteTimeseries(string path) {
-    vector <string> paths;
+void Session::deleteTimeseries(string path)
+{
+    vector<string> paths;
     paths.push_back(path);
     deleteTimeseries(paths);
 }
 
-void Session::deleteTimeseries(vector <string> &paths) {
-    shared_ptr <TSStatus> resp(new TSStatus());
-    try {
+void Session::deleteTimeseries(vector<string>& paths)
+{
+    shared_ptr<TSStatus> resp(new TSStatus());
+    try
+    {
         client->deleteTimeseries(*resp, sessionId, paths);
         RpcUtils::verifySuccess(*resp);
     }
-    catch (IoTDBConnectionException &e) {
+    catch (IoTDBConnectionException& e)
+    {
         throw IoTDBConnectionException(e.what());
     }
 }
 
-void Session::deleteData(string path, int64_t time) {
-    vector <string> paths;
+void Session::deleteData(string path,  int64_t time)
+{
+    vector<string> paths;
     paths.push_back(path);
     deleteData(paths, time);
 }
 
-void Session::deleteData(vector <string> &deviceId, int64_t time) {
-    shared_ptr <TSDeleteDataReq> req(new TSDeleteDataReq());
+void Session::deleteData(vector<string>& deviceId, int64_t time)
+{
+    shared_ptr<TSDeleteDataReq> req(new TSDeleteDataReq());
     req->__set_sessionId(sessionId);
     req->__set_paths(deviceId);
     req->__set_endTime(time);
-    shared_ptr <TSStatus> resp(new TSStatus());
-    try {
-        client->deleteData(*resp, *req);
+    shared_ptr<TSStatus> resp(new TSStatus());
+    try 
+    {
+        client->deleteData(*resp,*req);
         RpcUtils::verifySuccess(*resp);
     }
-    catch (exception &e) {
+    catch (exception& e) 
+    {
         throw IoTDBConnectionException(e.what());
     }
 }
 
-void Session::setStorageGroup(string storageGroupId) {
-    shared_ptr <TSStatus> resp(new TSStatus());
-    try {
-        client->setStorageGroup(*resp, sessionId, storageGroupId);
+void Session::setStorageGroup(string storageGroupId)
+{
+    shared_ptr<TSStatus> resp(new TSStatus());
+    try 
+    {
+        client->setStorageGroup(*resp,sessionId, storageGroupId);
         RpcUtils::verifySuccess(*resp);
     }
-    catch (IoTDBConnectionException &e) {
+    catch (IoTDBConnectionException& e)
+    {
         throw IoTDBConnectionException(e.what());
     }
 }
 
-void Session::deleteStorageGroup(string storageGroup) {
-    vector <string> storageGroups;
+void Session::deleteStorageGroup(string storageGroup)
+{
+    vector<string> storageGroups;
     storageGroups.push_back(storageGroup);
     deleteStorageGroups(storageGroups);
 }
 
-void Session::deleteStorageGroups(vector <string> &storageGroups) {
-    shared_ptr <TSStatus> resp(new TSStatus());
-    try {
+void Session::deleteStorageGroups(vector<string>& storageGroups)
+{
+    shared_ptr<TSStatus> resp(new TSStatus());
+    try 
+    {
         client->deleteStorageGroups(*resp, sessionId, storageGroups);
         RpcUtils::verifySuccess(*resp);
     }
-    catch (IoTDBConnectionException &e) {
+    catch (IoTDBConnectionException& e)
+    {
         throw IoTDBConnectionException(e.what());
     }
 }
 
-void Session::createTimeseries(string path, TSDataType::TSDataType dataType, TSEncoding::TSEncoding encoding,
-                               CompressionType::CompressionType compressor) {
-    try {
+void Session::createTimeseries(string path, TSDataType::TSDataType dataType, TSEncoding::TSEncoding encoding, CompressionType::CompressionType compressor) {
+    try
+    {
         createTimeseries(path, dataType, encoding, compressor, NULL, NULL, NULL, "");
     }
-    catch (IoTDBConnectionException &e) {
+    catch (IoTDBConnectionException& e)
+    {
         throw IoTDBConnectionException(e.what());
     }
 }
 
-void Session::createTimeseries(string path, TSDataType::TSDataType dataType, TSEncoding::TSEncoding encoding,
-                               CompressionType::CompressionType compressor,
-                               map <string, string> *props, map <string, string> *tags,
-                               map <string, string> *attributes, string measurementAlias) {
-    shared_ptr <TSCreateTimeseriesReq> req(new TSCreateTimeseriesReq());
+void Session::createTimeseries(string path, TSDataType::TSDataType dataType, TSEncoding::TSEncoding encoding, CompressionType::CompressionType compressor,
+    map<string, string>* props, map<string, string>* tags, map<string, string>* attributes, string measurementAlias)
+{
+    shared_ptr<TSCreateTimeseriesReq> req(new TSCreateTimeseriesReq());
     req->__set_sessionId(sessionId);
     req->__set_path(path);
     req->__set_dataType(dataType);
@@ -960,23 +1018,21 @@ void Session::createTimeseries(string path, TSDataType::TSDataType dataType, TSE
         req->__set_measurementAlias(measurementAlias);
     }
 
-    shared_ptr <TSStatus> resp(new TSStatus());
-    try {
-        client->createTimeseries(*resp, *req);
+    shared_ptr<TSStatus> resp(new TSStatus());
+    try 
+    {
+        client->createTimeseries(*resp,*req);
         RpcUtils::verifySuccess(*resp);
     }
-    catch (IoTDBConnectionException &e) {
+    catch (IoTDBConnectionException& e)
+    {
         throw IoTDBConnectionException(e.what());
     }
 }
 
-void Session::createMultiTimeseries(vector <string> paths, vector <TSDataType::TSDataType> dataTypes,
-                                    vector <TSEncoding::TSEncoding> encodings,
-                                    vector <CompressionType::CompressionType> compressors,
-                                    vector <map<string, string>> *propsList, vector <map<string, string>> *tagsList,
-                                    vector <map<string, string>> *attributesList,
-                                    vector <string> *measurementAliasList) {
-    shared_ptr <TSCreateMultiTimeseriesReq> request(new TSCreateMultiTimeseriesReq());
+void Session::createMultiTimeseries(vector<string> paths, vector<TSDataType::TSDataType> dataTypes, vector<TSEncoding::TSEncoding> encodings, vector<CompressionType::CompressionType> compressors,
+    vector<map<string, string>>* propsList, vector<map<string, string>>* tagsList, vector<map<string, string>>* attributesList, vector<string>* measurementAliasList) {
+    shared_ptr<TSCreateMultiTimeseriesReq> request(new TSCreateMultiTimeseriesReq());
     request->__set_sessionId(sessionId);
     request->__set_paths(paths);
 
@@ -1012,88 +1068,101 @@ void Session::createMultiTimeseries(vector <string> paths, vector <TSDataType::T
         request->__set_measurementAliasList(*measurementAliasList);
     }
 
-    try {
-        shared_ptr <TSStatus> resp(new TSStatus());
+    try
+    {
+        shared_ptr<TSStatus> resp(new TSStatus());
         client->createMultiTimeseries(*resp, *request);
         RpcUtils::verifySuccess(*resp);
     }
-    catch (IoTDBConnectionException &e) {
+    catch (IoTDBConnectionException& e)
+    {
         throw IoTDBConnectionException(e.what());
     }
 }
 
 bool Session::checkTimeseriesExists(string path) {
     try {
-        std::unique_ptr <SessionDataSet> dataset = executeQueryStatement("SHOW TIMESERIES " + path);
-        bool isExisted = dataset->hasNext();
-        dataset->closeOperationHandle();
-        return isExisted;
+        string sql = "SHOW TIMESERIES " + path;
+        return executeQueryStatement(sql)->hasNext();
     }
     catch (exception e) {
         throw IoTDBConnectionException(e.what());
     }
 }
 
-string Session::getTimeZone() {
-    if (zoneId != "") {
+string Session::getTimeZone() 
+{
+    if (zoneId != "") 
+    {
         return zoneId;
     }
-    shared_ptr <TSGetTimeZoneResp> resp(new TSGetTimeZoneResp());
-    try {
+    shared_ptr<TSGetTimeZoneResp> resp(new TSGetTimeZoneResp());
+    try 
+    {
         client->getTimeZone(*resp, sessionId);
         RpcUtils::verifySuccess(resp->status);
     }
-    catch (IoTDBConnectionException &e) {
+    catch (IoTDBConnectionException& e)
+    {
         throw IoTDBConnectionException(e.what());
     }
     return resp->timeZone;
 }
 
-void Session::setTimeZone(string zoneId) {
-    shared_ptr <TSSetTimeZoneReq> req(new TSSetTimeZoneReq());
+void Session::setTimeZone(string zoneId)
+{
+    shared_ptr<TSSetTimeZoneReq> req(new TSSetTimeZoneReq());
     req->__set_sessionId(sessionId);
     req->__set_timeZone(zoneId);
-    shared_ptr <TSStatus> resp(new TSStatus());
-    try {
-        client->setTimeZone(*resp, *req);
+    shared_ptr<TSStatus> resp(new TSStatus());
+    try
+    {
+        client->setTimeZone(*resp,*req);
     }
-    catch (IoTDBConnectionException &e) {
+    catch (IoTDBConnectionException& e)
+    {
         throw IoTDBConnectionException(e.what());
     }
     RpcUtils::verifySuccess(*resp);
     this->zoneId = zoneId;
 }
 
-unique_ptr <SessionDataSet> Session::executeQueryStatement(string sql) {
-    shared_ptr <TSExecuteStatementReq> req(new TSExecuteStatementReq());
+unique_ptr<SessionDataSet> Session::executeQueryStatement(string sql)
+{
+    shared_ptr<TSExecuteStatementReq> req(new TSExecuteStatementReq());
     req->__set_sessionId(sessionId);
     req->__set_statementId(statementId);
     req->__set_statement(sql);
     req->__set_fetchSize(fetchSize);
-    shared_ptr <TSExecuteStatementResp> resp(new TSExecuteStatementResp());
-    try {
-        client->executeStatement(*resp, *req);
+    shared_ptr<TSExecuteStatementResp> resp(new TSExecuteStatementResp());
+    try
+    {
+        client->executeStatement(*resp,*req);
         RpcUtils::verifySuccess(resp->status);
     }
-    catch (IoTDBConnectionException e) {
+    catch (IoTDBConnectionException e)
+    {
         throw IoTDBConnectionException(e.what());
     }
-    shared_ptr <TSQueryDataSet> queryDataSet(new TSQueryDataSet(resp->queryDataSet));
+    shared_ptr<TSQueryDataSet> queryDataSet(new TSQueryDataSet(resp->queryDataSet));
     return unique_ptr<SessionDataSet>(new SessionDataSet(
-            sql, resp->columns, resp->dataTypeList, resp->queryId, statementId, client, sessionId, queryDataSet));
+        sql, resp->columns, resp->dataTypeList, resp->queryId, client, sessionId, queryDataSet));
 }
 
-void Session::executeNonQueryStatement(string sql) {
-    shared_ptr <TSExecuteStatementReq> req(new TSExecuteStatementReq());
+void Session::executeNonQueryStatement(string sql)
+{
+    shared_ptr<TSExecuteStatementReq> req(new TSExecuteStatementReq());
     req->__set_sessionId(sessionId);
     req->__set_statementId(statementId);
     req->__set_statement(sql);
-    shared_ptr <TSExecuteStatementResp> resp(new TSExecuteStatementResp());
-    try {
-        client->executeUpdateStatement(*resp, *req);
+    shared_ptr<TSExecuteStatementResp> resp(new TSExecuteStatementResp());
+    try
+    {
+        client->executeUpdateStatement(*resp,*req);
         RpcUtils::verifySuccess(resp->status);
     }
-    catch (IoTDBConnectionException e) {
+    catch (IoTDBConnectionException e)
+    {
         throw IoTDBConnectionException(e.what());
     }
 }
diff --git a/client-cpp/src/main/Session.h b/client-cpp/src/main/Session.h
index b83e0d6..a702229 100644
--- a/client-cpp/src/main/Session.h
+++ b/client-cpp/src/main/Session.h
@@ -16,8 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-#ifndef __IOTDB_SESSION
-#define __IOTDB_SESSION
 
 #include <string>
 #include <vector>
@@ -48,63 +46,59 @@ using ::apache::thrift::transport::TBufferedTransport;
 using ::apache::thrift::transport::TFramedTransport;
 using ::apache::thrift::TException;
 
-class IoTDBConnectionException : public std::exception {
-public:
-    IoTDBConnectionException() : message() {}
-
-    IoTDBConnectionException(const char *m) : message(m) {}
-
-    IoTDBConnectionException(std::string m) : message(m) {}
-
-    virtual const char *what() const throw() {
-        return message.c_str();
-    }
+class IoTDBConnectionException : public std::exception
+{
+    public:
+        IoTDBConnectionException() : message() {}
+        IoTDBConnectionException(const char* m) : message(m) {}
+        IoTDBConnectionException(std::string m) : message(m) {}
+        virtual const char* what() const throw ()
+        {
+            return message.c_str();
+        }
 
-private:
-    std::string message;
+    private:
+        std::string message;
 };
 
-class BatchExecutionException : public std::exception {
+class BatchExecutionException : public std::exception
+{
 public:
     BatchExecutionException() : message() {}
-
-    BatchExecutionException(const char *m) : message(m) {}
-
+    BatchExecutionException(const char* m) : message(m) {}
     BatchExecutionException(std::string m) : message(m) {}
-
-    BatchExecutionException(std::vector <TSStatus> statusList) : message(), statusList(statusList) {}
-
-    BatchExecutionException(std::vector <TSStatus> statusList, std::string m) : message(m), statusList(statusList) {}
-
-    virtual const char *what() const throw() {
+    BatchExecutionException(std::vector<TSStatus> statusList) : message(), statusList(statusList) {}
+    BatchExecutionException(std::vector<TSStatus> statusList, std::string m) : message(m), statusList(statusList) {}
+    virtual const char* what() const throw ()
+    {
         return message.c_str();
     }
-
-    std::vector <TSStatus> statusList;
+    std::vector<TSStatus> statusList;
 private:
     std::string message;
 
 };
 
-class UnSupportedDataTypeException : public std::exception {
+class UnSupportedDataTypeException : public std::exception
+{
 private:
     std::string message;
 public:
     UnSupportedDataTypeException() : message() {}
-
-    UnSupportedDataTypeException(const char *m) : message(m) {}
-
+    UnSupportedDataTypeException(const char* m) : message(m) {}
     UnSupportedDataTypeException(std::string m) : message("UnSupported dataType: " + m) {}
 };
 
-namespace CompressionType {
+namespace CompressionType{
 
-    enum CompressionType {
+    enum CompressionType
+    {
         UNCOMPRESSED, SNAPPY, GZIP, LZO, SDT, PAA, PLA, LZ4
     };
 }
-namespace TSDataType {
-    enum TSDataType {
+namespace TSDataType{
+    enum TSDataType
+    {
         BOOLEAN, INT32, INT64, FLOAT, DOUBLE, TEXT, NULLTYPE
     };
 }
@@ -176,59 +170,48 @@ namespace TSStatusCode {
         TIME_OUT = 701,
         NO_LEADER = 702,
         UNSUPPORTED_OPERATION = 703,
-        NODE_READ_ONLY = 704,
+        NODE_READ_ONLY= 704,
         INCOMPATIBLE_VERSION = 203,
     };
 }
 
-class RpcUtils {
+class RpcUtils
+{
 public:
-    std::shared_ptr <TSStatus> SUCCESS_STATUS;
-
+    std::shared_ptr<TSStatus> SUCCESS_STATUS;
     RpcUtils() {
         SUCCESS_STATUS = std::make_shared<TSStatus>();
         SUCCESS_STATUS->__set_code(TSStatusCode::SUCCESS_STATUS);
     }
-
-    static void verifySuccess(TSStatus &status);
-
-    static void verifySuccess(std::vector <TSStatus> &statuses);
-
+    static void verifySuccess(TSStatus& status);
+    static void verifySuccess(std::vector<TSStatus>& statuses);
     static TSStatus getStatus(TSStatusCode::TSStatusCode tsStatusCode);
-
     static TSStatus getStatus(int code, std::string message);
-
-    static std::shared_ptr <TSExecuteStatementResp> getTSExecuteStatementResp(TSStatusCode::TSStatusCode tsStatusCode);
-
-    static std::shared_ptr <TSExecuteStatementResp>
-    getTSExecuteStatementResp(TSStatusCode::TSStatusCode tsStatusCode, std::string message);
-
-    static std::shared_ptr <TSExecuteStatementResp> getTSExecuteStatementResp(TSStatus &status);
-
-    static std::shared_ptr <TSFetchResultsResp> getTSFetchResultsResp(TSStatusCode::TSStatusCode tsStatusCode);
-
-    static std::shared_ptr <TSFetchResultsResp>
-    getTSFetchResultsResp(TSStatusCode::TSStatusCode tsStatusCode, std::string appendMessage);
-
-    static std::shared_ptr <TSFetchResultsResp> getTSFetchResultsResp(TSStatus &status);
+    static std::shared_ptr<TSExecuteStatementResp> getTSExecuteStatementResp(TSStatusCode::TSStatusCode tsStatusCode);
+    static std::shared_ptr<TSExecuteStatementResp> getTSExecuteStatementResp(TSStatusCode::TSStatusCode tsStatusCode, std::string message);
+    static std::shared_ptr<TSExecuteStatementResp> getTSExecuteStatementResp(TSStatus& status);
+    static std::shared_ptr<TSFetchResultsResp> getTSFetchResultsResp(TSStatusCode::TSStatusCode tsStatusCode);
+    static std::shared_ptr<TSFetchResultsResp> getTSFetchResultsResp(TSStatusCode::TSStatusCode tsStatusCode, std::string appendMessage);
+    static std::shared_ptr<TSFetchResultsResp> getTSFetchResultsResp(TSStatus& status);
 };
 
 // Simulate the ByteBuffer class in Java
 class MyStringBuffer {
 private:
-    char *getchar(int len) {
-        char *ret = new char[len];
+    char* getchar(int len)
+    {
+        char* ret = new char[len];
         for (int i = pos; i < pos + len; i++)
             ret[pos + len - 1 - i] = str[i];
         pos += len;
         return ret;
     }
 
-    void putchar(int len, char *ins) {
+    void putchar(int len, char* ins)
+    {
         for (int i = len - 1; i > -1; i--)
             str += ins[i];
     }
-
 public:
     std::string str;
     int pos;
@@ -244,91 +227,112 @@ public:
         this->pos = 0;
     }
 
-    int getInt() {
-        char *data = getchar(4);
-        int ret = *(int *) data;
+    //byte get() {
+    //    char tmpChar = getChar();
+    //    return (byte)tmpChar;
+    //}
+
+    int getInt()
+    {
+        char* data = getchar(4);
+        int ret = *(int*)data;
         delete[]data;
         return ret;
     }
 
-    int64_t getLong() {
-        char *data = getchar(8);
-        int64_t ret = *(int64_t *) data;
+    int64_t getLong()
+    {
+        char* data = getchar(8);
+        int64_t ret = *(int64_t*)data;
         delete[]data;
         return ret;
     }
 
-    float getFloat() {
-        char *data = getchar(4);
-        float ret = *(float *) data;
+    float getFloat()
+    {
+        char* data = getchar(4);
+        float ret = *(float*)data;
         delete[]data;
         return ret;
     }
 
-    double getDouble() {
-        char *data = getchar(8);
-        double ret = *(double *) data;
+    double getDouble()
+    {
+        char* data = getchar(8);
+        double ret = *(double*)data;
         delete[]data;
         return ret;
     }
 
-    char getChar() {
-        char *data = getchar(1);
-        char ret = *(char *) data;
+    char getChar()
+    {
+        char* data = getchar(1);
+        char ret = *(char*)data;
         delete[]data;
         return ret;
     }
 
-    bool getBool() {
-        return getChar() == 1;
+    bool getBool()
+    {
+        char bo = getChar();
+        return bo == 1;
     }
 
-    std::string getString() {
+    std::string getString()
+    {
         int len = getInt();
         std::string ret;
         for (int i = 0; i < len; i++) ret.append(1, getChar());
         return ret;
     }
 
-    void putInt(int ins) {
-        char *data = (char *) &ins;
+    void putInt(int ins)
+    {
+        char* data = (char*)&ins;
         putchar(4, data);
     }
 
-    void putLong(int64_t ins) {
-        char *data = (char *) &ins;
+    void putLong(int64_t ins)
+    {
+        char* data = (char*)&ins;
         putchar(8, data);
     }
 
-    void putFloat(float ins) {
-        char *data = (char *) &ins;
+    void putFloat(float ins)
+    {
+        char* data = (char*)&ins;
         putchar(4, data);
     }
 
-    void putDouble(double ins) {
-        char *data = (char *) &ins;
+    void putDouble(double ins)
+    {
+        char* data = (char*)&ins;
         putchar(8, data);
     }
 
-    void putChar(char ins) {
-        char *data = (char *) &ins;
+    void putChar(char ins)
+    {
+        char* data = (char*)&ins;
         putchar(1, data);
     }
 
-    void putBool(bool ins) {
+    void putBool(bool ins)
+    {
         char tmp = 0;
         if (ins) tmp = 1;
         putChar(tmp);
     }
 
-    void putString(std::string ins) {
+    void putString(std::string ins)
+    {
         int len = ins.size();
         putInt(len);
         for (int i = 0; i < len; i++) putChar(ins[i]);
     }
 };
 
-class Field {
+class Field
+{
 public:
     TSDataType::TSDataType dataType;
     bool boolV;
@@ -337,12 +341,11 @@ public:
     float floatV;
     double doubleV;
     std::string stringV;
-
-    Field(TSDataType::TSDataType a) {
+    Field(TSDataType::TSDataType a)
+    {
         dataType = a;
     }
-
-    Field() {}
+    Field(){}
 };
 
 /*
@@ -364,14 +367,13 @@ private:
     static const int DEFAULT_SIZE = 1024;
 public:
     std::string deviceId; // deviceId of this tablet
-    std::vector <std::pair<std::string, TSDataType::TSDataType>> schemas; // the list of measurement schemas for creating the tablet
+    std::vector<std::pair<std::string, TSDataType::TSDataType>> schemas; // the list of measurement schemas for creating the tablet
     std::vector <int64_t> timestamps;   //timestamps in this tablet
-    std::vector <std::vector<std::string>> values;
+    std::vector<std::vector<std::string>> values;
     int rowSize;    //the number of rows to include in this tablet
     int maxRowNumber;   // the maximum number of rows for this tablet
 
-    Tablet() {}
-
+    Tablet(){}
     /**
    * Return a tablet with default specified row number. This is the standard
    * constructor (all Tablet should be the same size).
@@ -379,7 +381,7 @@ public:
    * @param deviceId   the name of the device specified to be written in
    * @param timeseries the list of measurement schemas for creating the tablet
    */
-    Tablet(std::string deviceId, std::vector <std::pair<std::string, TSDataType::TSDataType>> &timeseries) {
+    Tablet(std::string deviceId, std::vector<std::pair<std::string, TSDataType::TSDataType>>& timeseries) {
         Tablet(deviceId, timeseries, DEFAULT_SIZE);
     }
 
@@ -393,8 +395,7 @@ public:
      *                     batch
      * @param maxRowNumber the maximum number of rows for this tablet
      */
-    Tablet(std::string deviceId, std::vector <std::pair<std::string, TSDataType::TSDataType>> &schemas,
-           int maxRowNumber) {
+    Tablet(std::string deviceId, std::vector<std::pair<std::string, TSDataType::TSDataType>>& schemas, int maxRowNumber) {
         this->deviceId = deviceId;
         this->schemas = schemas;
         this->maxRowNumber = maxRowNumber;
@@ -412,34 +413,31 @@ public:
 
     void reset(); // Reset Tablet to the default state - set the rowSize to 0
     void createColumns();
-
     int getTimeBytesSize();
-
     int getValueByteSize(); // total byte size that values occupies
 };
 
 class SessionUtils {
 public:
-    static std::string getTime(Tablet &tablet);
-
-    static std::string getValue(Tablet &tablet);
+    static std::string getTime(Tablet& tablet);
+    static std::string getValue(Tablet& tablet);
 };
 
-class RowRecord {
+class RowRecord
+{
 public:
     int64_t timestamp;
-    std::vector <Field> fields;
-
-    RowRecord(int64_t timestamp) {
+    std::vector<Field> fields;
+    RowRecord(int64_t timestamp)
+    {
         this->timestamp = timestamp;
     }
-
-    RowRecord(int64_t timestamp, std::vector <Field> &fields) {
+    RowRecord(int64_t timestamp, std::vector<Field> &fields) {
         this->timestamp = timestamp;
         this->fields = fields;
     }
-
-    RowRecord() {
+    RowRecord()
+    {
         this->timestamp = -1;
     }
 
@@ -447,38 +445,51 @@ public:
         this->fields.push_back(f);
     }
 
-    std::string toString() {
-        std::string ret = std::to_string(timestamp);
-        for (int i = 0; i < fields.size(); i++) {
+    std::string toString()
+    {
+        char buf[111];
+        sprintf(buf,"%lld",timestamp);
+        std::string ret = buf;
+        for (int i = 0; i < fields.size(); i++)
+        {
             ret.append("\t");
             TSDataType::TSDataType dataType = fields[i].dataType;
-            switch (dataType) {
-                case TSDataType::BOOLEAN: {
-                    std::string field = fields[i].boolV ? "true" : "false";
-                    ret.append(field);
+            switch (dataType)
+            {
+                case TSDataType::BOOLEAN:{
+                    if (fields[i].boolV) ret.append("true");
+                    else ret.append("false");
                     break;
                 }
-                case TSDataType::INT32: {
-                    ret.append(std::to_string(fields[i].intV));
+                case TSDataType::INT32:{
+                    char buf[111];
+                    sprintf(buf,"%d",fields[i].intV);
+                    ret.append(buf);
                     break;
                 }
                 case TSDataType::INT64: {
-                    ret.append(std::to_string(fields[i].longV));
+                    char buf[111];
+                    sprintf(buf,"%lld",fields[i].longV);
+                    ret.append(buf);
                     break;
                 }
-                case TSDataType::FLOAT: {
-                    ret.append(std::to_string(fields[i].floatV));
+                case TSDataType::FLOAT:{
+                    char buf[111];
+                    sprintf(buf,"%f",fields[i].floatV);
+                    ret.append(buf);
                     break;
                 }
-                case TSDataType::DOUBLE: {
-                    ret.append(std::to_string(fields[i].doubleV));
+                case TSDataType::DOUBLE:{
+                    char buf[111];
+                    sprintf(buf,"%lf",fields[i].doubleV);
+                    ret.append(buf);
                     break;
                 }
                 case TSDataType::TEXT: {
                     ret.append(fields[i].stringV);
                     break;
                 }
-                case TSDataType::NULLTYPE: {
+                case TSDataType::NULLTYPE:{
                     ret.append("NULL");
                 }
             }
@@ -488,17 +499,17 @@ public:
     }
 };
 
-class SessionDataSet {
+class SessionDataSet
+{
 private:
     bool hasCachedRecord = false;
     std::string sql;
     int64_t queryId;
-    int64_t statementId;
     int64_t sessionId;
-    std::shared_ptr <TSIServiceIf> client;
+	  std::shared_ptr<TSIServiceIf> client;
     int batchSize = 1024;
-    std::vector <std::string> columnNameList;
-    std::vector <std::string> columnTypeDeduplicatedList;
+    std::vector<std::string> columnNameList;
+    std::vector<std::string> columnTypeDeduplicatedList;
     // duplicated column index -> origin index
     std::map<int, int> duplicateLocation;
     // column name -> column location
@@ -507,25 +518,22 @@ private:
     int columnSize = 0;
 
     int rowsIndex = 0; // used to record the row index in current TSQueryDataSet
-    std::shared_ptr <TSQueryDataSet> tsQueryDataSet;
+    std::shared_ptr<TSQueryDataSet> tsQueryDataSet;
     MyStringBuffer tsQueryDataSetTimeBuffer;
-    std::vector <std::unique_ptr<MyStringBuffer>> valueBuffers;
-    std::vector <std::unique_ptr<MyStringBuffer>> bitmapBuffers;
+    std::vector<std::unique_ptr<MyStringBuffer>> valueBuffers;
+    std::vector<std::unique_ptr<MyStringBuffer>> bitmapBuffers;
     RowRecord rowRecord;
-    char *currentBitmap = NULL; // used to cache the current bitmap for every column
+    char* currentBitmap; // used to cache the current bitmap for every column
     static const int flag = 0x80; // used to do `or` operation with bitmap to judge whether the value is null
 
 public:
-    SessionDataSet() {}
-
-    SessionDataSet(std::string sql, std::vector <std::string> &columnNameList,
-                   std::vector <std::string> &columnTypeList, int64_t queryId, int64_t statementId,
-                   std::shared_ptr <TSIServiceIf> client, int64_t sessionId,
-                   std::shared_ptr <TSQueryDataSet> queryDataSet) : tsQueryDataSetTimeBuffer(queryDataSet->time) {
+    SessionDataSet(){}
+    SessionDataSet(std::string sql, std::vector<std::string>& columnNameList, std::vector<std::string>& columnTypeList, int64_t queryId,
+        std::shared_ptr<TSIServiceIf> client, int64_t sessionId, std::shared_ptr<TSQueryDataSet> queryDataSet) : tsQueryDataSetTimeBuffer(queryDataSet->time)
+    {
         this->sessionId = sessionId;
         this->sql = sql;
         this->queryId = queryId;
-        this->statementId = statementId;
         this->client = client;
         this->columnNameList = columnNameList;
         this->currentBitmap = new char[columnNameList.size()];
@@ -540,204 +548,122 @@ public:
                 this->columnMap[name] = i;
                 this->columnTypeDeduplicatedList.push_back(columnTypeList[i]);
             }
-            this->valueBuffers.push_back(
-                    std::unique_ptr<MyStringBuffer>(new MyStringBuffer(queryDataSet->valueList[i])));
-            this->bitmapBuffers.push_back(
-                    std::unique_ptr<MyStringBuffer>(new MyStringBuffer(queryDataSet->bitmapList[i])));
+            this->valueBuffers.push_back(std::unique_ptr<MyStringBuffer>(new MyStringBuffer(queryDataSet->valueList[i])));
+            this->bitmapBuffers.push_back(std::unique_ptr<MyStringBuffer>(new MyStringBuffer(queryDataSet->bitmapList[i])));
         }
         this->tsQueryDataSet = queryDataSet;
     }
 
-    ~SessionDataSet() {
-        if (currentBitmap != NULL) {
-            delete[] currentBitmap;
-            currentBitmap = NULL;
-        }
-    }
-
     int getBatchSize();
-
     void setBatchSize(int batchSize);
-
-    std::vector <std::string> getColumnNames();
-
+    std::vector<std::string> getColumnNames();
     bool hasNext();
-
     void constructOneRow();
-
     bool isNull(int index, int rowNum);
-
-    RowRecord *next();
-
+    RowRecord* next();
     void closeOperationHandle();
 };
 
 template<typename T>
-std::vector <T> sortList(std::vector <T> &valueList, int *index, int indexLength) {
-    std::vector <T> sortedValues(valueList.size());
+std::vector<T> sortList(std::vector<T>& valueList, int* index, int indexLength) {
+    std::vector<T> sortedValues(valueList.size());
     for (int i = 0; i < indexLength; i++) {
         sortedValues[i] = valueList[index[i]];
     }
     return sortedValues;
 }
 
-class Session {
-private:
-    std::string host;
-    int rpcPort;
-    std::string username;
-    std::string password;
-    TSProtocolVersion::type protocolVersion = TSProtocolVersion::IOTDB_SERVICE_PROTOCOL_V3;
-    std::shared_ptr <TSIServiceIf> client;
-    std::shared_ptr <TTransport> transport;
-    bool isClosed = true;
-    int64_t sessionId;
-    int64_t statementId;
-    std::string zoneId;
-    int fetchSize;
-    const static int DEFAULT_FETCH_SIZE = 10000;
-    const static int DEFAULT_TIMEOUT_MS = 0;
-
-    bool checkSorted(Tablet &tablet);
-
-    bool checkSorted(std::vector <int64_t> &times);
-
-    void sortTablet(Tablet &tablet);
-
-    void sortIndexByTimestamp(int *index, std::vector <int64_t> &timestamps, int length);
-
-    std::string getTimeZone();
-
-    void setTimeZone(std::string zoneId);
-
-    void appendValues(std::string &buffer, char *value, int size);
-
-    void
-    putValuesIntoBuffer(std::vector <TSDataType::TSDataType> &types, std::vector<char *> &values, std::string &buf);
-
-    int8_t getDataTypeNumber(TSDataType::TSDataType type);
-
-public:
-    Session(std::string host, int rpcPort) : username("user"), password("password") {
-        this->host = host;
-        this->rpcPort = rpcPort;
-    }
-
-    Session(std::string host, int rpcPort, std::string username, std::string password)
-            : fetchSize(10000) {
-        this->host = host;
-        this->rpcPort = rpcPort;
-        this->username = username;
-        this->password = password;
-        this->zoneId = "UTC+08:00";
-    }
-
-    Session(std::string host, int rpcPort, std::string username, std::string password, int fetchSize) {
-        this->host = host;
-        this->rpcPort = rpcPort;
-        this->username = username;
-        this->password = password;
-        this->fetchSize = fetchSize;
-        this->zoneId = "UTC+08:00";
-    }
-
-    Session(std::string host, std::string rpcPort, std::string username = "user",
-            std::string password = "password", int fetchSize = 10000) {
-        this->host = host;
-        this->rpcPort = stoi(rpcPort);
-        this->username = username;
-        this->password = password;
-        this->fetchSize = fetchSize;
-        this->zoneId = "UTC+08:00";
-    }
-
-    ~Session();
-
-    void open();
-
-    void open(bool enableRPCCompression);
-
-    void open(bool enableRPCCompression, int connectionTimeoutInMs);
-
-    void close();
-
-    void insertRecord(std::string deviceId, int64_t time, std::vector <std::string> &measurements,
-                      std::vector <std::string> &values);
-
-    void insertRecord(std::string deviceId, int64_t time, std::vector <std::string> &measurements,
-                      std::vector <TSDataType::TSDataType> &types, std::vector<char *> &values);
-
-    void insertRecords(std::vector <std::string> &deviceIds, std::vector <int64_t> &times,
-                       std::vector <std::vector<std::string>> &measurementsList,
-                       std::vector <std::vector<std::string>> &valuesList);
-
-    void insertRecords(std::vector <std::string> &deviceIds, std::vector <int64_t> &times,
-                       std::vector <std::vector<std::string>> &measurementsList,
-                       std::vector <std::vector<TSDataType::TSDataType>> typesList,
-                       std::vector <std::vector<char *>> &valuesList);
-
-    void insertRecordsOfOneDevice(std::string deviceId, std::vector <int64_t> &times,
-                                  std::vector <std::vector<std::string>> measurementsList,
-                                  std::vector <std::vector<TSDataType::TSDataType>> typesList,
-                                  std::vector <std::vector<char *>> &valuesList);
-
-    void insertRecordsOfOneDevice(std::string deviceId, std::vector <int64_t> &times,
-                                  std::vector <std::vector<std::string>> measurementsList,
-                                  std::vector <std::vector<TSDataType::TSDataType>> typesList,
-                                  std::vector <std::vector<char *>> &valuesList, bool sorted);
-
-    void insertTablet(Tablet &tablet);
-
-    void insertTablet(Tablet &tablet, bool sorted);
-
-    void insertTablets(std::map<std::string, Tablet *> &tablets);
-
-    void insertTablets(std::map<std::string, Tablet *> &tablets, bool sorted);
-
-    void testInsertRecord(std::string deviceId, int64_t time, std::vector <std::string> &measurements,
-                          std::vector <std::string> &values);
-
-    void testInsertTablet(Tablet &tablet);
-
-    void testInsertRecords(std::vector <std::string> &deviceIds, std::vector <int64_t> &times,
-                           std::vector <std::vector<std::string>> &measurementsList,
-                           std::vector <std::vector<std::string>> &valuesList);
-
-    void deleteTimeseries(std::string path);
-
-    void deleteTimeseries(std::vector <std::string> &paths);
-
-    void deleteData(std::string path, int64_t time);
-
-    void deleteData(std::vector <std::string> &deviceId, int64_t time);
-
-    void setStorageGroup(std::string storageGroupId);
-
-    void deleteStorageGroup(std::string storageGroup);
-
-    void deleteStorageGroups(std::vector <std::string> &storageGroups);
-
-    void createTimeseries(std::string path, TSDataType::TSDataType dataType, TSEncoding::TSEncoding encoding,
-                          CompressionType::CompressionType compressor);
-
-    void createTimeseries(std::string path, TSDataType::TSDataType dataType, TSEncoding::TSEncoding encoding,
-                          CompressionType::CompressionType compressor,
-                          std::map <std::string, std::string> *props, std::map <std::string, std::string> *tags,
-                          std::map <std::string, std::string> *attributes, std::string measurementAlias);
+class Session
+{
+    private:
+        std::string host;
+        int rpcPort;
+        std::string username;
+        std::string password;
+        TSProtocolVersion::type protocolVersion = TSProtocolVersion::IOTDB_SERVICE_PROTOCOL_V3;
+        std::shared_ptr<TSIServiceIf> client;
+        std::shared_ptr<apache::thrift::transport::TSocket> transport;
+        bool isClosed = true;
+        int64_t sessionId;
+        int64_t statementId;
+        std::string zoneId;
+        int fetchSize;
+        const static int DEFAULT_FETCH_SIZE = 10000;
+        const static int DEFAULT_TIMEOUT_MS = 0;
+
+        bool checkSorted(Tablet& tablet);
+        bool checkSorted(std::vector<int64_t>& times);
+        void sortTablet(Tablet& tablet);
+        void sortIndexByTimestamp(int *index, std::vector<int64_t>& timestamps, int length);
+        std::string getTimeZone();
+        void setTimeZone(std::string zoneId);
+        void appendValues(std::string &buffer, char* value, int size);
+        void putValuesIntoBuffer(std::vector<TSDataType::TSDataType>& types, std::vector<char*>& values, std::string& buf);
+        int8_t getDataTypeNumber(TSDataType::TSDataType type);
+    public:
+        Session(std::string host, int rpcPort) : username("user"), password("password") {
+            this->host = host;
+            this->rpcPort = rpcPort;
+        }
 
-    void createMultiTimeseries(std::vector <std::string> paths, std::vector <TSDataType::TSDataType> dataTypes,
-                               std::vector <TSEncoding::TSEncoding> encodings,
-                               std::vector <CompressionType::CompressionType> compressors,
-                               std::vector <std::map<std::string, std::string>> *propsList,
-                               std::vector <std::map<std::string, std::string>> *tagsList,
-                               std::vector <std::map<std::string, std::string>> *attributesList,
-                               std::vector <std::string> *measurementAliasList);
+        Session(std::string host, int rpcPort, std::string username, std::string password)
+                : fetchSize(10000) {
+            this->host = host;
+            this->rpcPort = rpcPort;
+            this->username = username;
+            this->password = password;
+            this->zoneId = "UTC+08:00";
+        }
 
-    bool checkTimeseriesExists(std::string path);
+        Session(std::string host, int rpcPort, std::string username, std::string password, int fetchSize) {
+            this->host = host;
+            this->rpcPort = rpcPort;
+            this->username = username;
+            this->password = password;
+            this->fetchSize = fetchSize;
+            this->zoneId = "UTC+08:00";
+        }
 
-    std::unique_ptr <SessionDataSet> executeQueryStatement(std::string sql);
+        Session(std::string host, std::string rpcPort, std::string username = "user",
+                std::string password = "password", int fetchSize = 10000) {
+            this->host = host;
+            this->rpcPort = stoi(rpcPort);
+            this->username = username;
+            this->password = password;
+            this->fetchSize = fetchSize;
+            this->zoneId = "UTC+08:00";
+        }
 
-    void executeNonQueryStatement(std::string sql);
+        void open();
+        void open(bool enableRPCCompression);
+        void open(bool enableRPCCompression, int connectionTimeoutInMs);
+        void close();
+        void insertRecord(std::string deviceId, int64_t time, std::vector<std::string>& measurements, std::vector<std::string>& values);
+        void insertRecord(std::string deviceId, int64_t time, std::vector<std::string>& measurements, std::vector<TSDataType::TSDataType>& types, std::vector<char*>& values);
+        void insertRecords(std::vector<std::string>& deviceIds, std::vector<int64_t>& times, std::vector<std::vector<std::string>>& measurementsList, std::vector<std::vector<std::string>>& valuesList);
+        void insertRecords(std::vector<std::string>& deviceIds, std::vector<int64_t>& times, std::vector<std::vector<std::string>>& measurementsList, std::vector<std::vector<TSDataType::TSDataType>> typesList, std::vector<std::vector<char*>>& valuesList);
+        void insertRecordsOfOneDevice(std::string deviceId, std::vector<int64_t>& times, std::vector<std::vector<std::string>> measurementsList, std::vector<std::vector<TSDataType::TSDataType>> typesList, std::vector<std::vector<char*>>& valuesList);
+        void insertRecordsOfOneDevice(std::string deviceId, std::vector<int64_t>& times, std::vector<std::vector<std::string>> measurementsList, std::vector<std::vector<TSDataType::TSDataType>> typesList, std::vector<std::vector<char*>>& valuesList, bool sorted);
+        void insertTablet(Tablet& tablet);
+        void insertTablet(Tablet& tablet, bool sorted);
+        void insertTablets(std::map<std::string, Tablet*>& tablets);
+        void insertTablets(std::map<std::string, Tablet*>& tablets, bool sorted);
+        void testInsertRecord(std::string deviceId, int64_t time, std::vector<std::string>& measurements, std::vector<std::string>& values);
+        void testInsertTablet(Tablet& tablet);
+        void testInsertRecords(std::vector<std::string>& deviceIds, std::vector<int64_t>& times, std::vector<std::vector<std::string>>& measurementsList, std::vector<std::vector<std::string>>& valuesList);
+        void deleteTimeseries(std::string path);
+        void deleteTimeseries(std::vector<std::string>& paths);
+        void deleteData(std::string path, int64_t time);
+        void deleteData(std::vector<std::string>& deviceId, int64_t time);
+        void setStorageGroup(std::string storageGroupId);
+        void deleteStorageGroup(std::string storageGroup);
+        void deleteStorageGroups(std::vector<std::string>& storageGroups);
+        void createTimeseries(std::string path, TSDataType::TSDataType dataType, TSEncoding::TSEncoding encoding, CompressionType::CompressionType compressor);
+        void createTimeseries(std::string path, TSDataType::TSDataType dataType, TSEncoding::TSEncoding encoding, CompressionType::CompressionType compressor,
+            std::map<std::string, std::string>* props, std::map<std::string, std::string>* tags, std::map<std::string, std::string>* attributes, std::string measurementAlias);
+        void createMultiTimeseries(std::vector<std::string> paths, std::vector<TSDataType::TSDataType> dataTypes, std::vector<TSEncoding::TSEncoding> encodings, std::vector<CompressionType::CompressionType> compressors,
+            std::vector<std::map<std::string, std::string>>* propsList, std::vector<std::map<std::string, std::string>>* tagsList, std::vector<std::map<std::string, std::string>>* attributesList, std::vector<std::string>* measurementAliasList);
+        bool checkTimeseriesExists(std::string path);
+        std::unique_ptr<SessionDataSet> executeQueryStatement(std::string sql);
+        void executeNonQueryStatement(std::string sql);
 };
-
-#endif // __IOTDB_SESSION
diff --git a/client-cpp/src/test/CMakeLists.txt b/client-cpp/src/test/CMakeLists.txt
index 2ace790..ba4ddfc 100644
--- a/client-cpp/src/test/CMakeLists.txt
+++ b/client-cpp/src/test/CMakeLists.txt
@@ -29,7 +29,7 @@ INCLUDE_DIRECTORIES(/usr/local/include)
 # Add Session related include files
 INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/../main/generated-sources-cpp)
 # Add Thrift include directory
-INCLUDE_DIRECTORIES(${TOOLS_DIR}/thrift/target/thrift-0.14.1/lib/cpp/src)
+INCLUDE_DIRECTORIES(${TOOLS_DIR}/thrift/target/thrift-0.13.0/lib/cpp/src)
 
 find_package(Boost REQUIRED)
 IF (DEFINED BOOST_INCLUDEDIR)
diff --git a/client-cpp/src/test/main.cpp b/client-cpp/src/test/main.cpp
index 9476343..2256526 100644
--- a/client-cpp/src/test/main.cpp
+++ b/client-cpp/src/test/main.cpp
@@ -18,7 +18,6 @@
  */
 
 #define CATCH_CONFIG_MAIN
-
 #include <catch.hpp>
 #include "Session.h"
 
@@ -28,15 +27,14 @@ struct SessionListener : Catch::TestEventListenerBase {
 
     using TestEventListenerBase::TestEventListenerBase;
 
-    void testCaseStarting(Catch::TestCaseInfo const &testInfo) override {
+    void testCaseStarting( Catch::TestCaseInfo const& testInfo ) override {
         // Perform some setup before a test case is run
         session->open(false);
     }
 
-    void testCaseEnded(Catch::TestCaseStats const &testCaseStats) override {
+    void testCaseEnded( Catch::TestCaseStats const& testCaseStats ) override {
         // Tear-down after a test case is run
         session->close();
     }
 };
-
 CATCH_REGISTER_LISTENER( SessionListener )
\ No newline at end of file
diff --git a/client-py/iotdb/Session.py b/client-py/iotdb/Session.py
index 5a44312..5db100a 100644
--- a/client-py/iotdb/Session.py
+++ b/client-py/iotdb/Session.py
@@ -649,7 +649,6 @@ class Session(object):
             resp.columnNameIndexMap,
             resp.queryId,
             self.__client,
-            self.__statement_id,
             self.__session_id,
             resp.queryDataSet,
             resp.ignoreTimeStamp,
diff --git a/client-py/iotdb/utils/IoTDBConstants.py b/client-py/iotdb/utils/IoTDBConstants.py
index eb2d89c..f053af7 100644
--- a/client-py/iotdb/utils/IoTDBConstants.py
+++ b/client-py/iotdb/utils/IoTDBConstants.py
@@ -28,11 +28,6 @@ class TSDataType(Enum):
     DOUBLE = 4
     TEXT = 5
 
-    # this method is implemented to avoid the issue reported by:
-    # https://bugs.python.org/issue30545
-    def __eq__(self, other) -> bool:
-        return self.value == other.value
-
 
 @unique
 class TSEncoding(Enum):
@@ -46,11 +41,6 @@ class TSEncoding(Enum):
     REGULAR = 7
     GORILLA = 8
 
-    # this method is implemented to avoid the issue reported by:
-    # https://bugs.python.org/issue30545
-    def __eq__(self, other) -> bool:
-        return self.value == other.value
-
 
 @unique
 class Compressor(Enum):
@@ -62,8 +52,3 @@ class Compressor(Enum):
     PAA = 5
     PLA = 6
     LZ4 = 7
-
-    # this method is implemented to avoid the issue reported by:
-    # https://bugs.python.org/issue30545
-    def __eq__(self, other) -> bool:
-        return self.value == other.value
diff --git a/client-py/iotdb/utils/IoTDBRpcDataSet.py b/client-py/iotdb/utils/IoTDBRpcDataSet.py
index 6520a04..f71b5d9 100644
--- a/client-py/iotdb/utils/IoTDBRpcDataSet.py
+++ b/client-py/iotdb/utils/IoTDBRpcDataSet.py
@@ -41,12 +41,10 @@ class IoTDBRpcDataSet(object):
         ignore_timestamp,
         query_id,
         client,
-        statement_id,
         session_id,
         query_data_set,
         fetch_size,
     ):
-        self.__statement_id = statement_id
         self.__session_id = session_id
         self.__ignore_timestamp = ignore_timestamp
         self.__sql = sql
@@ -111,7 +109,7 @@ class IoTDBRpcDataSet(object):
         if self.__client is not None:
             try:
                 status = self.__client.closeOperation(
-                    TSCloseOperationReq(self.__session_id, self.__query_id, self.__statement_id)
+                    TSCloseOperationReq(self.__session_id, self.__query_id)
                 )
                 logger.debug(
                     "close session {}, message: {}".format(
diff --git a/client-py/iotdb/utils/SessionDataSet.py b/client-py/iotdb/utils/SessionDataSet.py
index 7d49c5e..f0f7266 100644
--- a/client-py/iotdb/utils/SessionDataSet.py
+++ b/client-py/iotdb/utils/SessionDataSet.py
@@ -39,7 +39,6 @@ class SessionDataSet(object):
         column_name_index,
         query_id,
         client,
-        statement_id,
         session_id,
         query_data_set,
         ignore_timestamp,
@@ -52,7 +51,6 @@ class SessionDataSet(object):
             ignore_timestamp,
             query_id,
             client,
-            statement_id,
             session_id,
             query_data_set,
             1024,
diff --git a/client-py/pom.xml b/client-py/pom.xml
index a52e0a5..c677dea 100644
--- a/client-py/pom.xml
+++ b/client-py/pom.xml
@@ -24,7 +24,7 @@
     <parent>
         <groupId>org.apache.iotdb</groupId>
         <artifactId>iotdb-parent</artifactId>
-        <version>0.12.4</version>
+        <version>0.12.1-SNAPSHOT</version>
         <relativePath>../pom.xml</relativePath>
     </parent>
     <artifactId>iotdb-python-api</artifactId>
diff --git a/client-py/setup.py b/client-py/setup.py
index b6aa5a1..43cb845 100644
--- a/client-py/setup.py
+++ b/client-py/setup.py
@@ -31,7 +31,7 @@ print(long_description)
 
 setuptools.setup(
     name="apache-iotdb",  # Replace with your own username
-    version="0.12.4",
+    version="0.12.0",
     author=" Apache Software Foundation",
     author_email="dev@iotdb.apache.org",
     description="Apache IoTDB client API",
diff --git a/cluster/pom.xml b/cluster/pom.xml
index b270fc1..5a4c9bd 100644
--- a/cluster/pom.xml
+++ b/cluster/pom.xml
@@ -24,7 +24,7 @@
     <parent>
         <groupId>org.apache.iotdb</groupId>
         <artifactId>iotdb-parent</artifactId>
-        <version>0.12.4</version>
+        <version>0.12.1-SNAPSHOT</version>
         <relativePath>../pom.xml</relativePath>
     </parent>
     <artifactId>iotdb-cluster</artifactId>
@@ -34,13 +34,26 @@
         <cluster.it.skip>${cluster.test.skip}</cluster.it.skip>
         <cluster.ut.skip>${cluster.test.skip}</cluster.ut.skip>
     </properties>
+    <repositories>
+        <!--   repository for moquette    -->
+        <repository>
+            <id>bintray</id>
+            <url>https://jcenter.bintray.com</url>
+            <releases>
+                <enabled>true</enabled>
+            </releases>
+            <snapshots>
+                <enabled>false</enabled>
+            </snapshots>
+        </repository>
+    </repositories>
     <dependencies>
         <!-- The version of thrift is overridden because using 0.13.0 in the cluster module
         will cause unclear bugs -->
         <dependency>
             <groupId>org.apache.thrift</groupId>
             <artifactId>libthrift</artifactId>
-            <version>0.14.1</version>
+            <version>0.12.0</version>
         </dependency>
         <dependency>
             <groupId>org.apache.iotdb</groupId>
@@ -106,7 +119,7 @@
         <dependency>
             <groupId>org.awaitility</groupId>
             <artifactId>awaitility</artifactId>
-            <version>4.1.0</version>
+            <version>4.0.2</version>
             <scope>test</scope>
         </dependency>
         <!-- for mocked test-->
diff --git a/cluster/src/assembly/resources/sbin/start-node.bat b/cluster/src/assembly/resources/sbin/start-node.bat
index 0629850..c324bb7 100755
--- a/cluster/src/assembly/resources/sbin/start-node.bat
+++ b/cluster/src/assembly/resources/sbin/start-node.bat
@@ -41,17 +41,13 @@ for /f tokens^=2-5^ delims^=.-_+^" %%j in ('java -fullversion 2^>^&1') do (
 
 set JAVA_VERSION=%MAJOR_VERSION%
 
-@REM we do not check jdk that version less than 1.8 because they are too stale...
-IF "%JAVA_VERSION%" == "6" (
-	echo IoTDB only supports jdk >= 8, please check your java version.
-	goto finally
-)
-IF "%JAVA_VERSION%" == "7" (
-	echo IoTDB only supports jdk >= 8, please check your java version.
-	goto finally
+IF NOT %JAVA_VERSION% == 8 (
+	IF NOT %JAVA_VERSION% == 11 (
+		echo IoTDB only supports jdk8 or jdk11, please check your java version.
+		goto finally
+	)
 )
 
-
 if "%OS%" == "Windows_NT" setlocal
 
 pushd %~dp0..
@@ -105,9 +101,10 @@ set JAVA_OPTS=-ea^
 
 @REM ***** CLASSPATH library setting *****
 @REM Ensure that any user defined CLASSPATH variables are not used on startup
-set CLASSPATH="%IOTDB_HOME%\lib\*"
+set CLASSPATH="%IOTDB_HOME%\lib"
 
-@REM this special suffix 'iotdb.ClusterMain' is mandatory as stop-node.bat uses it to filter the process id.
+@REM For each jar in the IOTDB_HOME lib directory call append to build the CLASSPATH variable.
+set CLASSPATH=%CLASSPATH%;"%IOTDB_HOME%\lib\*"
 set CLASSPATH=%CLASSPATH%;iotdb.ClusterMain
 goto okClasspath
 
@@ -133,4 +130,4 @@ pause
 
 pause
 
-ENDLOCAL
+ENDLOCAL
\ No newline at end of file
diff --git a/cluster/src/assembly/resources/sbin/stop-node.sh b/cluster/src/assembly/resources/sbin/stop-node.sh
index c5dba79..55961c0 100644
--- a/cluster/src/assembly/resources/sbin/stop-node.sh
+++ b/cluster/src/assembly/resources/sbin/stop-node.sh
@@ -21,12 +21,12 @@
 
 PIDS=$(ps ax | grep -i 'ClusterMain' | grep java | grep -v grep | awk '{print $1}')
 sig=0
-for every_pid in ${PIDS}
+for evry_pid in ${PIDS}
 do
-  cwd_path=$(ls -l /proc/$every_pid | grep "cwd ->" | grep -v grep | awk '{print $NF}')
+  cwd_path=$(ls -l /proc/$evry_pid | grep "cwd ->" | grep -v grep | awk '{print $NF}')
   pwd_path=$(/bin/pwd)
   if [[ $pwd_path =~ $cwd_path ]]; then
-    kill -s TERM $every_pid
+    kill -s TERM $evry_pid
     echo "close IoTDB"
     sig=1
   fi
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/ClusterMain.java b/cluster/src/main/java/org/apache/iotdb/cluster/ClusterMain.java
index 73a375e..27aa61b 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/ClusterMain.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/ClusterMain.java
@@ -96,13 +96,6 @@ public class ClusterMain {
       return;
     }
 
-    // if client ip is the default address, set it same with internal ip
-    if (IoTDBDescriptor.getInstance().getConfig().getRpcAddress().equals("0.0.0.0")) {
-      IoTDBDescriptor.getInstance()
-          .getConfig()
-          .setRpcAddress(ClusterDescriptor.getInstance().getConfig().getInternalIp());
-    }
-
     String mode = args[0];
 
     logger.info("Running mode {}", mode);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/DataClientProvider.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/DataClientProvider.java
index 0950958..8b954ec 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/DataClientProvider.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/client/DataClientProvider.java
@@ -28,6 +28,7 @@ import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.cluster.rpc.thrift.Node;
 import org.apache.iotdb.cluster.rpc.thrift.RaftService.Client;
 
+import org.apache.thrift.TException;
 import org.apache.thrift.protocol.TProtocolFactory;
 
 import java.io.IOException;
@@ -42,8 +43,6 @@ public class DataClientProvider {
 
   private SyncClientPool dataSyncClientPool;
 
-  private static final String GET_CLIENT_FAILED_MSG = "can not get client for node=";
-
   public DataClientProvider(TProtocolFactory factory) {
     if (!ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) {
       dataSyncClientPool = new SyncClientPool(new SyncDataClient.FactorySync(factory));
@@ -61,7 +60,7 @@ public class DataClientProvider {
   }
 
   /**
-   * Get a thrift client from the head of deque that will connect to "node" using the data port.
+   * Get a thrift client that will connect to "node" using the data port.
    *
    * @param node the node to be connected
    * @param timeout timeout threshold of connection
@@ -69,23 +68,7 @@ public class DataClientProvider {
   public AsyncDataClient getAsyncDataClient(Node node, int timeout) throws IOException {
     AsyncDataClient client = (AsyncDataClient) getDataAsyncClientPool().getClient(node);
     if (client == null) {
-      throw new IOException(GET_CLIENT_FAILED_MSG + node);
-    }
-    client.setTimeout(timeout);
-    return client;
-  }
-
-  /**
-   * Get a thrift client from the tail of deque that will connect to "node" using the data port for
-   * refresh.
-   *
-   * @param node the node to be connected
-   * @param timeout timeout threshold of connection
-   */
-  public AsyncDataClient getAsyncDataClientForRefresh(Node node, int timeout) throws IOException {
-    AsyncDataClient client = (AsyncDataClient) getDataAsyncClientPool().getClientForRefresh(node);
-    if (client == null) {
-      throw new IOException(GET_CLIENT_FAILED_MSG + node);
+      throw new IOException("can not get client for node=" + node);
     }
     client.setTimeout(timeout);
     return client;
@@ -96,34 +79,15 @@ public class DataClientProvider {
    * org.apache.iotdb.cluster.utils.ClientUtils#putBackSyncClient(Client)} to put the client back
    * into the client pool, otherwise there is a risk of client leakage.
    *
-   * <p>Get a thrift client from the head of deque that will connect to "node" using the data port.
+   * <p>Get a thrift client that will connect to "node" using the data port.
    *
    * @param node the node to be connected
    * @param timeout timeout threshold of connection
    */
-  public SyncDataClient getSyncDataClient(Node node, int timeout) throws IOException {
+  public SyncDataClient getSyncDataClient(Node node, int timeout) throws TException {
     SyncDataClient client = (SyncDataClient) getDataSyncClientPool().getClient(node);
     if (client == null) {
-      throw new IOException(GET_CLIENT_FAILED_MSG + node);
-    }
-    client.setTimeout(timeout);
-    return client;
-  }
-
-  /**
-   * IMPORTANT!!! After calling this function, the caller should make sure to call {@link
-   * org.apache.iotdb.cluster.utils.ClientUtils#putBackSyncClient(Client)} to put the client back
-   * into the client pool, otherwise there is a risk of client leakage.
-   *
-   * <p>Get a thrift client from the tail of deque that will connect to "node" using the data port.
-   *
-   * @param node the node to be connected
-   * @param timeout timeout threshold of connection
-   */
-  public SyncDataClient getSyncDataClientForRefresh(Node node, int timeout) throws IOException {
-    SyncDataClient client = (SyncDataClient) getDataSyncClientPool().getClientForRefresh(node);
-    if (client == null) {
-      throw new IOException(GET_CLIENT_FAILED_MSG + node);
+      throw new TException("can not get client for node=" + node);
     }
     client.setTimeout(timeout);
     return client;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncClientPool.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncClientPool.java
index ddabff7..a7441e4 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncClientPool.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncClientPool.java
@@ -53,28 +53,6 @@ public class AsyncClientPool {
   }
 
   /**
-   * Get a client of the given node from the cache if one is available, or null.
-   *
-   * <p>IMPORTANT!!! The caller should check whether the return value is null or not!
-   *
-   * @param node the node want to connect
-   * @return if the node can connect, return the client, otherwise null
-   */
-  public AsyncClient getClientForRefresh(Node node) {
-    ClusterNode clusterNode = new ClusterNode(node);
-    // As clientCaches is ConcurrentHashMap, computeIfAbsent is thread safety.
-    Deque<AsyncClient> clientStack =
-        clientCaches.computeIfAbsent(clusterNode, n -> new ArrayDeque<>());
-    synchronized (clientStack) {
-      if (clientStack.isEmpty()) {
-        return null;
-      } else {
-        return clientStack.pollLast();
-      }
-    }
-  }
-
-  /**
    * See getClient(Node node, boolean activatedOnly)
    *
    * @param node
@@ -107,7 +85,7 @@ public class AsyncClientPool {
     // As clientCaches is ConcurrentHashMap, computeIfAbsent is thread safety.
     Deque<AsyncClient> clientStack =
         clientCaches.computeIfAbsent(clusterNode, n -> new ArrayDeque<>());
-    synchronized (clientStack) {
+    synchronized (this) {
       if (clientStack.isEmpty()) {
         int nodeClientNum = nodeClientNumMap.getOrDefault(clusterNode, 0);
         if (nodeClientNum >= maxConnectionForEachNode) {
@@ -145,12 +123,12 @@ public class AsyncClientPool {
     long waitStart = System.currentTimeMillis();
     while (clientStack.isEmpty()) {
       try {
-        clientStack.wait(waitClientTimeutMS);
+        this.wait(waitClientTimeutMS);
         if (clientStack.isEmpty() && System.currentTimeMillis() - waitStart >= waitClientTimeutMS) {
           logger.warn(
-              "{} Cannot get an available client after {}ms, create a new one.",
-              asyncClientFactory,
-              waitClientTimeutMS);
+              "Cannot get an available client after {}ms, create a new one.",
+              waitClientTimeutMS,
+              asyncClientFactory);
           AsyncClient asyncClient = asyncClientFactory.getAsyncClient(clusterNode, this);
           nodeClientNumMap.computeIfPresent(clusterNode, (n, oldValue) -> oldValue + 1);
           return asyncClient;
@@ -181,22 +159,21 @@ public class AsyncClientPool {
     if (call != null) {
       logger.warn("A using client {} is put back while running {}", client.hashCode(), call);
     }
-
-    Deque<AsyncClient> clientStack =
-        clientCaches.computeIfAbsent(clusterNode, n -> new ArrayDeque<>());
-    synchronized (clientStack) {
+    synchronized (this) {
       // As clientCaches is ConcurrentHashMap, computeIfAbsent is thread safety.
+      Deque<AsyncClient> clientStack =
+          clientCaches.computeIfAbsent(clusterNode, n -> new ArrayDeque<>());
       clientStack.push(client);
-      clientStack.notifyAll();
+      this.notifyAll();
     }
   }
 
   void onError(Node node) {
     ClusterNode clusterNode = new ClusterNode(node);
     // clean all cached clients when network fails
-    Deque<AsyncClient> clientStack =
-        clientCaches.computeIfAbsent(clusterNode, n -> new ArrayDeque<>());
-    synchronized (clientStack) {
+    synchronized (this) {
+      Deque<AsyncClient> clientStack =
+          clientCaches.computeIfAbsent(clusterNode, n -> new ArrayDeque<>());
       while (!clientStack.isEmpty()) {
         AsyncClient client = clientStack.pop();
         if (client instanceof AsyncDataClient) {
@@ -206,7 +183,7 @@ public class AsyncClientPool {
         }
       }
       nodeClientNumMap.put(clusterNode, 0);
-      clientStack.notifyAll();
+      this.notifyAll();
       NodeStatusManager.getINSTANCE().deactivate(node);
     }
   }
@@ -218,9 +195,9 @@ public class AsyncClientPool {
 
   void recreateClient(Node node) {
     ClusterNode clusterNode = new ClusterNode(node);
-    Deque<AsyncClient> clientStack =
-        clientCaches.computeIfAbsent(clusterNode, n -> new ArrayDeque<>());
-    synchronized (clientStack) {
+    synchronized (this) {
+      Deque<AsyncClient> clientStack =
+          clientCaches.computeIfAbsent(clusterNode, n -> new ArrayDeque<>());
       try {
         AsyncClient asyncClient = asyncClientFactory.getAsyncClient(node, this);
         clientStack.push(asyncClient);
@@ -228,7 +205,7 @@ public class AsyncClientPool {
         logger.error("Cannot create a new client for {}", node, e);
         nodeClientNumMap.computeIfPresent(clusterNode, (n, cnt) -> cnt - 1);
       }
-      clientStack.notifyAll();
+      this.notifyAll();
     }
   }
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncDataClient.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncDataClient.java
index cf0cd1c..f1c56bf 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncDataClient.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncDataClient.java
@@ -24,11 +24,11 @@ import org.apache.iotdb.cluster.rpc.thrift.Node;
 import org.apache.iotdb.cluster.rpc.thrift.RaftService;
 import org.apache.iotdb.cluster.rpc.thrift.TSDataService.AsyncClient;
 import org.apache.iotdb.cluster.server.RaftServer;
-import org.apache.iotdb.rpc.TNonblockingSocketWrapper;
 
 import org.apache.thrift.async.TAsyncClientManager;
 import org.apache.thrift.async.TAsyncMethodCall;
 import org.apache.thrift.protocol.TProtocolFactory;
+import org.apache.thrift.transport.TNonblockingSocket;
 import org.apache.thrift.transport.TNonblockingTransport;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -66,7 +66,7 @@ public class AsyncDataClient extends AsyncClient {
     super(
         protocolFactory,
         clientManager,
-        TNonblockingSocketWrapper.wrap(
+        new TNonblockingSocket(
             node.getInternalIp(), node.getDataPort(), RaftServer.getConnectionTimeoutInMS()));
     this.node = node;
     this.pool = pool;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncDataHeartbeatClient.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncDataHeartbeatClient.java
index 146c8b7..4d539a2 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncDataHeartbeatClient.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncDataHeartbeatClient.java
@@ -23,10 +23,10 @@ import org.apache.iotdb.cluster.rpc.thrift.Node;
 import org.apache.iotdb.cluster.rpc.thrift.RaftService;
 import org.apache.iotdb.cluster.server.RaftServer;
 import org.apache.iotdb.cluster.utils.ClusterUtils;
-import org.apache.iotdb.rpc.TNonblockingSocketWrapper;
 
 import org.apache.thrift.async.TAsyncClientManager;
 import org.apache.thrift.protocol.TProtocolFactory;
+import org.apache.thrift.transport.TNonblockingSocket;
 
 import java.io.IOException;
 
@@ -45,7 +45,7 @@ public class AsyncDataHeartbeatClient extends AsyncDataClient {
     super(
         protocolFactory,
         clientManager,
-        TNonblockingSocketWrapper.wrap(
+        new TNonblockingSocket(
             node.getInternalIp(),
             node.getDataPort() + ClusterUtils.DATA_HEARTBEAT_PORT_OFFSET,
             RaftServer.getConnectionTimeoutInMS()));
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncMetaClient.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncMetaClient.java
index c615df0..d0bdde0 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncMetaClient.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncMetaClient.java
@@ -23,11 +23,11 @@ import org.apache.iotdb.cluster.rpc.thrift.Node;
 import org.apache.iotdb.cluster.rpc.thrift.RaftService;
 import org.apache.iotdb.cluster.rpc.thrift.TSMetaService.AsyncClient;
 import org.apache.iotdb.cluster.server.RaftServer;
-import org.apache.iotdb.rpc.TNonblockingSocketWrapper;
 
 import org.apache.thrift.async.TAsyncClientManager;
 import org.apache.thrift.async.TAsyncMethodCall;
 import org.apache.thrift.protocol.TProtocolFactory;
+import org.apache.thrift.transport.TNonblockingSocket;
 import org.apache.thrift.transport.TNonblockingTransport;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -64,7 +64,7 @@ public class AsyncMetaClient extends AsyncClient {
     super(
         protocolFactory,
         clientManager,
-        TNonblockingSocketWrapper.wrap(
+        new TNonblockingSocket(
             node.getInternalIp(), node.getMetaPort(), RaftServer.getConnectionTimeoutInMS()));
     this.node = node;
     this.pool = pool;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncMetaHeartbeatClient.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncMetaHeartbeatClient.java
index babeae4..0a05ec3 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncMetaHeartbeatClient.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncMetaHeartbeatClient.java
@@ -23,10 +23,10 @@ import org.apache.iotdb.cluster.rpc.thrift.Node;
 import org.apache.iotdb.cluster.rpc.thrift.RaftService;
 import org.apache.iotdb.cluster.server.RaftServer;
 import org.apache.iotdb.cluster.utils.ClusterUtils;
-import org.apache.iotdb.rpc.TNonblockingSocketWrapper;
 
 import org.apache.thrift.async.TAsyncClientManager;
 import org.apache.thrift.protocol.TProtocolFactory;
+import org.apache.thrift.transport.TNonblockingSocket;
 
 import java.io.IOException;
 
@@ -45,7 +45,7 @@ public class AsyncMetaHeartbeatClient extends AsyncMetaClient {
     super(
         protocolFactory,
         clientManager,
-        TNonblockingSocketWrapper.wrap(
+        new TNonblockingSocket(
             node.getInternalIp(),
             node.getMetaPort() + ClusterUtils.DATA_HEARTBEAT_PORT_OFFSET,
             RaftServer.getConnectionTimeoutInMS()));
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncClientAdaptor.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncClientAdaptor.java
index 7b11ccc..6d73265 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncClientAdaptor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncClientAdaptor.java
@@ -368,16 +368,6 @@ public class SyncClientAdaptor {
     return handler.getResult(RaftServer.getReadOperationTimeoutMS());
   }
 
-  public static Integer getDeviceCount(
-      AsyncDataClient client, Node header, List<String> pathsToQuery)
-      throws InterruptedException, TException {
-    AtomicReference<Integer> remoteResult = new AtomicReference<>(null);
-    GenericHandler<Integer> handler = new GenericHandler<>(client.getNode(), remoteResult);
-
-    client.getDeviceCount(header, pathsToQuery, handler);
-    return handler.getResult(RaftServer.getReadOperationTimeoutMS());
-  }
-
   public static Set<String> getAllDevices(
       AsyncDataClient client, Node header, List<String> pathsToQuery)
       throws InterruptedException, TException {
@@ -496,7 +486,6 @@ public class SyncClientAdaptor {
       AsyncDataClient client,
       List<PartialPath> seriesPaths,
       List<Integer> dataTypeOrdinals,
-      Filter timeFilter,
       QueryContext context,
       Map<String, Set<String>> deviceMeasurements,
       Node header)
@@ -511,9 +500,7 @@ public class SyncClientAdaptor {
             deviceMeasurements,
             header,
             client.getNode());
-    if (timeFilter != null) {
-      request.setFilterBytes(SerializeUtils.serializeFilter(timeFilter));
-    }
+
     client.last(request, handler);
     return handler.getResult(RaftServer.getReadOperationTimeoutMS());
   }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncClientPool.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncClientPool.java
index f607fa3..758296f 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncClientPool.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncClientPool.java
@@ -52,27 +52,6 @@ public class SyncClientPool {
   }
 
   /**
-   * Get a client of the given node from the cache if one is available, or null.
-   *
-   * <p>IMPORTANT!!! The caller should check whether the return value is null or not!
-   *
-   * @param node the node want to connect
-   * @return if the node can connect, return the client, otherwise null
-   */
-  public Client getClientForRefresh(Node node) {
-    ClusterNode clusterNode = new ClusterNode(node);
-    // As clientCaches is ConcurrentHashMap, computeIfAbsent is thread safety.
-    Deque<Client> clientStack = clientCaches.computeIfAbsent(clusterNode, n -> new ArrayDeque<>());
-    synchronized (clientStack) {
-      if (clientStack.isEmpty()) {
-        return null;
-      } else {
-        return clientStack.poll();
-      }
-    }
-  }
-
-  /**
    * See getClient(Node node, boolean activatedOnly)
    *
    * @param node the node want to connect
@@ -101,7 +80,7 @@ public class SyncClientPool {
 
     // As clientCaches is ConcurrentHashMap, computeIfAbsent is thread safety.
     Deque<Client> clientStack = clientCaches.computeIfAbsent(clusterNode, n -> new ArrayDeque<>());
-    synchronized (clientStack) {
+    synchronized (this) {
       if (clientStack.isEmpty()) {
         int nodeClientNum = nodeClientNumMap.getOrDefault(clusterNode, 0);
         if (nodeClientNum >= maxConnectionForEachNode) {
@@ -134,7 +113,7 @@ public class SyncClientPool {
     long waitStart = System.currentTimeMillis();
     while (clientStack.isEmpty()) {
       try {
-        clientStack.wait(waitClientTimeoutMS);
+        this.wait(waitClientTimeoutMS);
         if (clientStack.isEmpty()
             && System.currentTimeMillis() - waitStart >= waitClientTimeoutMS) {
           logger.warn(
@@ -165,7 +144,7 @@ public class SyncClientPool {
     ClusterNode clusterNode = new ClusterNode(node);
     // As clientCaches is ConcurrentHashMap, computeIfAbsent is thread safety.
     Deque<Client> clientStack = clientCaches.computeIfAbsent(clusterNode, n -> new ArrayDeque<>());
-    synchronized (clientStack) {
+    synchronized (this) {
       if (client.getInputProtocol() != null && client.getInputProtocol().getTransport().isOpen()) {
         clientStack.push(client);
         NodeStatusManager.getINSTANCE().activate(node);
@@ -179,7 +158,7 @@ public class SyncClientPool {
           NodeStatusManager.getINSTANCE().deactivate(node);
         }
       }
-      clientStack.notifyAll();
+      this.notifyAll();
     }
   }
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncDataClient.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncDataClient.java
index cf92d14..56bae98 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncDataClient.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncDataClient.java
@@ -24,7 +24,6 @@ import org.apache.iotdb.cluster.rpc.thrift.TSDataService.Client;
 import org.apache.iotdb.cluster.server.RaftServer;
 import org.apache.iotdb.db.utils.TestOnly;
 import org.apache.iotdb.rpc.RpcTransportFactory;
-import org.apache.iotdb.rpc.TConfigurationConst;
 import org.apache.iotdb.rpc.TimeoutChangeableTransport;
 
 import org.apache.thrift.protocol.TProtocol;
@@ -57,7 +56,6 @@ public class SyncDataClient extends Client implements Closeable {
         protocolFactory.getProtocol(
             RpcTransportFactory.INSTANCE.getTransport(
                 new TSocket(
-                    TConfigurationConst.defaultTConfiguration,
                     node.getInternalIp(),
                     node.getDataPort(),
                     RaftServer.getConnectionTimeoutInMS()))));
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncDataHeartbeatClient.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncDataHeartbeatClient.java
index 83603d4..7b7cbdd 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncDataHeartbeatClient.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncDataHeartbeatClient.java
@@ -23,7 +23,6 @@ import org.apache.iotdb.cluster.rpc.thrift.Node;
 import org.apache.iotdb.cluster.server.RaftServer;
 import org.apache.iotdb.cluster.utils.ClusterUtils;
 import org.apache.iotdb.rpc.RpcTransportFactory;
-import org.apache.iotdb.rpc.TConfigurationConst;
 
 import org.apache.thrift.protocol.TProtocolFactory;
 import org.apache.thrift.transport.TSocket;
@@ -42,7 +41,6 @@ public class SyncDataHeartbeatClient extends SyncDataClient {
         protocolFactory.getProtocol(
             RpcTransportFactory.INSTANCE.getTransport(
                 new TSocket(
-                    TConfigurationConst.defaultTConfiguration,
                     node.getInternalIp(),
                     node.getDataPort() + ClusterUtils.DATA_HEARTBEAT_PORT_OFFSET,
                     RaftServer.getConnectionTimeoutInMS()))));
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncMetaClient.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncMetaClient.java
index d29e438..13b023c 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncMetaClient.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncMetaClient.java
@@ -23,7 +23,6 @@ import org.apache.iotdb.cluster.rpc.thrift.Node;
 import org.apache.iotdb.cluster.rpc.thrift.TSMetaService.Client;
 import org.apache.iotdb.cluster.server.RaftServer;
 import org.apache.iotdb.rpc.RpcTransportFactory;
-import org.apache.iotdb.rpc.TConfigurationConst;
 
 import org.apache.thrift.protocol.TProtocol;
 import org.apache.thrift.protocol.TProtocolFactory;
@@ -53,7 +52,6 @@ public class SyncMetaClient extends Client implements Closeable {
         protocolFactory.getProtocol(
             RpcTransportFactory.INSTANCE.getTransport(
                 new TSocket(
-                    TConfigurationConst.defaultTConfiguration,
                     node.getInternalIp(),
                     node.getMetaPort(),
                     RaftServer.getConnectionTimeoutInMS()))));
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncMetaHeartbeatClient.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncMetaHeartbeatClient.java
index 7a06668..f496491 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncMetaHeartbeatClient.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncMetaHeartbeatClient.java
@@ -23,7 +23,6 @@ import org.apache.iotdb.cluster.rpc.thrift.Node;
 import org.apache.iotdb.cluster.server.RaftServer;
 import org.apache.iotdb.cluster.utils.ClusterUtils;
 import org.apache.iotdb.rpc.RpcTransportFactory;
-import org.apache.iotdb.rpc.TConfigurationConst;
 
 import org.apache.thrift.protocol.TProtocolFactory;
 import org.apache.thrift.transport.TSocket;
@@ -42,7 +41,6 @@ public class SyncMetaHeartbeatClient extends SyncMetaClient {
         protocolFactory.getProtocol(
             RpcTransportFactory.INSTANCE.getTransport(
                 new TSocket(
-                    TConfigurationConst.defaultTConfiguration,
                     node.getInternalIp(),
                     node.getMetaPort() + ClusterUtils.META_HEARTBEAT_PORT_OFFSET,
                     RaftServer.getConnectionTimeoutInMS()))));
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/coordinator/Coordinator.java b/cluster/src/main/java/org/apache/iotdb/cluster/coordinator/Coordinator.java
index 4acc8bb..3c305a8 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/coordinator/Coordinator.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/coordinator/Coordinator.java
@@ -40,15 +40,12 @@ import org.apache.iotdb.db.exception.metadata.MetadataException;
 import org.apache.iotdb.db.exception.metadata.PathNotExistException;
 import org.apache.iotdb.db.exception.metadata.StorageGroupNotSetException;
 import org.apache.iotdb.db.exception.query.QueryProcessException;
-import org.apache.iotdb.db.metadata.PartialPath;
-import org.apache.iotdb.db.qp.physical.BatchPlan;
 import org.apache.iotdb.db.qp.physical.PhysicalPlan;
 import org.apache.iotdb.db.qp.physical.crud.DeletePlan;
 import org.apache.iotdb.db.qp.physical.crud.InsertMultiTabletPlan;
 import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
 import org.apache.iotdb.db.qp.physical.crud.InsertRowsPlan;
 import org.apache.iotdb.db.qp.physical.crud.InsertTabletPlan;
-import org.apache.iotdb.db.qp.physical.crud.SetDeviceTemplatePlan;
 import org.apache.iotdb.db.qp.physical.sys.CreateMultiTimeSeriesPlan;
 import org.apache.iotdb.db.qp.physical.sys.CreateTimeSeriesPlan;
 import org.apache.iotdb.db.qp.physical.sys.DeleteTimeSeriesPlan;
@@ -58,13 +55,13 @@ import org.apache.iotdb.rpc.TSStatusCode;
 import org.apache.iotdb.service.rpc.thrift.EndPoint;
 import org.apache.iotdb.service.rpc.thrift.TSStatus;
 
+import org.apache.thrift.TException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 
@@ -184,31 +181,12 @@ public class Coordinator {
       metaGroupMember.waitLeader();
       return metaGroupMember.forwardPlan(plan, metaGroupMember.getLeader(), null);
     }
-    try {
-      createSchemaIfNecessary(plan);
-    } catch (MetadataException | CheckConsistencyException e) {
-      logger.error("{}: Cannot find storage groups for {}", name, plan);
-      return StatusUtils.NO_STORAGE_GROUP;
-    }
+
     List<PartitionGroup> globalGroups = metaGroupMember.getPartitionTable().getGlobalGroups();
     logger.debug("Forwarding global data plan {} to {} groups", plan, globalGroups.size());
     return forwardPlan(globalGroups, plan);
   }
 
-  public void createSchemaIfNecessary(PhysicalPlan plan)
-      throws MetadataException, CheckConsistencyException {
-    if (plan instanceof SetDeviceTemplatePlan) {
-      try {
-        IoTDB.metaManager.getStorageGroupPath(
-            new PartialPath(((SetDeviceTemplatePlan) plan).getPrefixPath()));
-      } catch (IllegalPathException e) {
-        // the plan has been checked
-      } catch (StorageGroupNotSetException e) {
-        ((CMManager) IoTDB.metaManager).createSchema(plan);
-      }
-    }
-  }
-
   /**
    * A partitioned plan (like batch insertion) will be split into several sub-plans, each belongs to
    * a data group. And these sub-plans will be sent to and executed on the corresponding groups
@@ -221,11 +199,6 @@ public class Coordinator {
       return StatusUtils.PARTITION_TABLE_NOT_READY;
     }
 
-    if (!checkPrivilegeForBatchExecution(plan)) {
-      return concludeFinalStatus(
-          plan, plan.getPaths().size(), true, false, false, null, Collections.emptyList());
-    }
-
     // split the plan into sub-plans that each only involve one data group
     Map<PhysicalPlan, PartitionGroup> planGroupMap;
     try {
@@ -258,20 +231,6 @@ public class Coordinator {
   }
 
   /**
-   * check if batch execution plan has privilege on any sg
-   *
-   * @param plan
-   * @return
-   */
-  private boolean checkPrivilegeForBatchExecution(PhysicalPlan plan) {
-    if (plan instanceof BatchPlan) {
-      return ((BatchPlan) plan).getResults().size() != plan.getPaths().size();
-    } else {
-      return true;
-    }
-  }
-
-  /**
    * Forward a plan to all DataGroupMember groups. Only when all nodes time out, will a TIME_OUT be
    * returned. The error messages from each group (if any) will be compacted into one string.
    *
@@ -296,8 +255,6 @@ public class Coordinator {
         status = forwardPlan(plan, partitionGroup);
       }
       if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()
-          && !(plan instanceof SetDeviceTemplatePlan
-              && status.getCode() == TSStatusCode.DUPLICATED_TEMPLATE.getStatusCode())
           && (!(plan instanceof DeleteTimeSeriesPlan)
               || status.getCode() != TSStatusCode.TIMESERIES_NOT_EXIST.getStatusCode())) {
         // execution failed, record the error message
@@ -347,24 +304,25 @@ public class Coordinator {
   private TSStatus forwardPlan(Map<PhysicalPlan, PartitionGroup> planGroupMap, PhysicalPlan plan) {
     // the error codes from the groups that cannot execute the plan
     TSStatus status;
-    // need to create substatus for multiPlan
-
-    // InsertTabletPlan, InsertMultiTabletPlan, InsertRowsPlan and CreateMultiTimeSeriesPlan
-    // contains many rows,
-    // each will correspond to a TSStatus as its execution result,
-    // as the plan is split and the sub-plans may have interleaving ranges,
-    // we must assure that each TSStatus is placed to the right position
-    // e.g., an InsertTabletPlan contains 3 rows, row1 and row3 belong to NodeA and row2
-    // belongs to NodeB, when NodeA returns a success while NodeB returns a failure, the
-    // failure and success should be placed into proper positions in TSStatus.subStatus
-    if (plan instanceof InsertMultiTabletPlan
-        || plan instanceof CreateMultiTimeSeriesPlan
-        || plan instanceof InsertRowsPlan) {
-      status = forwardMultiSubPlan(planGroupMap, plan);
-    } else if (planGroupMap.size() == 1) {
+    if (planGroupMap.size() == 1) {
       status = forwardToSingleGroup(planGroupMap.entrySet().iterator().next());
     } else {
-      status = forwardToMultipleGroup(planGroupMap);
+      if (plan instanceof InsertTabletPlan
+          || plan instanceof InsertMultiTabletPlan
+          || plan instanceof CreateMultiTimeSeriesPlan
+          || plan instanceof InsertRowsPlan) {
+        // InsertTabletPlan, InsertMultiTabletPlan, InsertRowsPlan and CreateMultiTimeSeriesPlan
+        // contains many rows,
+        // each will correspond to a TSStatus as its execution result,
+        // as the plan is split and the sub-plans may have interleaving ranges,
+        // we must assure that each TSStatus is placed to the right position
+        // e.g., an InsertTabletPlan contains 3 rows, row1 and row3 belong to NodeA and row2
+        // belongs to NodeB, when NodeA returns a success while NodeB returns a failure, the
+        // failure and success should be placed into proper positions in TSStatus.subStatus
+        status = forwardMultiSubPlan(planGroupMap, plan);
+      } else {
+        status = forwardToMultipleGroup(planGroupMap);
+      }
     }
     if (plan instanceof InsertPlan
         && status.getCode() == TSStatusCode.TIMESERIES_NOT_EXIST.getStatusCode()
@@ -384,10 +342,10 @@ public class Coordinator {
 
   private TSStatus createTimeseriesForFailedInsertion(
       Map<PhysicalPlan, PartitionGroup> planGroupMap, InsertPlan plan) {
-    for (PhysicalPlan subPlan : planGroupMap.keySet()) {
-      ((InsertPlan) subPlan).recoverFromFailure();
-    }
     // try to create timeseries
+    if (plan.getFailedMeasurements() != null) {
+      plan.getPlanFromFailed();
+    }
     boolean hasCreate;
     try {
       hasCreate = ((CMManager) IoTDB.metaManager).createTimeseries(plan);
@@ -484,8 +442,8 @@ public class Coordinator {
     TSStatus[] subStatus = null;
     boolean noFailure = true;
     boolean isBatchFailure = false;
-    boolean isBatchRedirect = false;
-    int totalRowNum = parentPlan.getPaths().size();
+    EndPoint endPoint = null;
+    int totalRowNum = 0;
     // send sub-plans to each belonging data group and collect results
     for (Map.Entry<PhysicalPlan, PartitionGroup> entry : planGroupMap.entrySet()) {
       tmpStatus = forwardToSingleGroup(entry);
@@ -493,9 +451,10 @@ public class Coordinator {
       noFailure = (tmpStatus.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) && noFailure;
       isBatchFailure =
           (tmpStatus.getCode() == TSStatusCode.MULTIPLE_ERROR.getStatusCode()) || isBatchFailure;
-      if (tmpStatus.getCode() == TSStatusCode.MULTIPLE_ERROR.getStatusCode()
-          || tmpStatus.isSetRedirectNode() && !(parentPlan instanceof CreateMultiTimeSeriesPlan)) {
-        if (parentPlan instanceof InsertMultiTabletPlan) {
+      if (tmpStatus.getCode() == TSStatusCode.MULTIPLE_ERROR.getStatusCode()) {
+        if (parentPlan instanceof InsertTabletPlan) {
+          totalRowNum = ((InsertTabletPlan) parentPlan).getRowCount();
+        } else if (parentPlan instanceof InsertMultiTabletPlan) {
           // the subStatus is the two-dimensional array,
           // The first dimension is the number of InsertTabletPlans,
           // and the second dimension is the number of rows per InsertTabletPlan
@@ -517,46 +476,25 @@ public class Coordinator {
             InsertTabletPlan tmpInsertTabletPlan = tmpMultiTabletPlan.getInsertTabletPlan(i);
             int parentIndex = tmpMultiTabletPlan.getParentIndex(i);
             int parentPlanRowCount = ((InsertMultiTabletPlan) parentPlan).getRowCount(parentIndex);
-            if (tmpStatus.getCode() == TSStatusCode.MULTIPLE_ERROR.getStatusCode()) {
-              subStatus[parentIndex] = tmpStatus.subStatus.get(i);
-              if (tmpStatus.subStatus.get(i).getCode()
-                  == TSStatusCode.MULTIPLE_ERROR.getStatusCode()) {
-                if (subStatus[parentIndex].subStatus == null) {
-                  TSStatus[] tmpSubTsStatus = new TSStatus[parentPlanRowCount];
-                  Arrays.fill(tmpSubTsStatus, RpcUtils.SUCCESS_STATUS);
-                  subStatus[parentIndex].subStatus = Arrays.asList(tmpSubTsStatus);
-                }
-                TSStatus[] reorderTsStatus =
-                    subStatus[parentIndex].subStatus.toArray(new TSStatus[] {});
-
-                PartitionUtils.reordering(
-                    tmpInsertTabletPlan,
-                    reorderTsStatus,
-                    tmpStatus.subStatus.get(i).subStatus.toArray(new TSStatus[] {}));
-                subStatus[parentIndex].subStatus = Arrays.asList(reorderTsStatus);
-              }
-              if (tmpStatus.isSetRedirectNode()) {
-                if (tmpStatus.isSetRedirectNode()
-                    && tmpInsertTabletPlan.getMaxTime()
-                        == ((InsertMultiTabletPlan) parentPlan)
-                            .getInsertTabletPlan(parentIndex)
-                            .getMaxTime()) {
-                  subStatus[parentIndex].setRedirectNode(tmpStatus.redirectNode);
-                  isBatchRedirect = true;
-                }
-              }
-            } else if (tmpStatus.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
-              if (tmpStatus.isSetRedirectNode()
-                  && tmpInsertTabletPlan.getMaxTime()
-                      == ((InsertMultiTabletPlan) parentPlan)
-                          .getInsertTabletPlan(parentIndex)
-                          .getMaxTime()) {
-                subStatus[parentIndex] =
-                    StatusUtils.getStatus(RpcUtils.SUCCESS_STATUS, tmpStatus.redirectNode);
-                isBatchRedirect = true;
-              }
+            if (subStatus[parentIndex].subStatus == null) {
+              TSStatus[] tmpSubTsStatus = new TSStatus[parentPlanRowCount];
+              Arrays.fill(tmpSubTsStatus, RpcUtils.SUCCESS_STATUS);
+              subStatus[parentIndex].subStatus = Arrays.asList(tmpSubTsStatus);
             }
+            TSStatus[] reorderTsStatus =
+                subStatus[parentIndex].subStatus.toArray(new TSStatus[] {});
+
+            PartitionUtils.reordering(
+                tmpInsertTabletPlan,
+                reorderTsStatus,
+                tmpStatus.subStatus.toArray(new TSStatus[] {}));
+            subStatus[parentIndex].subStatus = Arrays.asList(reorderTsStatus);
           }
+        } else if (parentPlan instanceof InsertTabletPlan) {
+          PartitionUtils.reordering(
+              (InsertTabletPlan) entry.getKey(),
+              subStatus,
+              tmpStatus.subStatus.toArray(new TSStatus[] {}));
         } else if (parentPlan instanceof CreateMultiTimeSeriesPlan) {
           CreateMultiTimeSeriesPlan subPlan = (CreateMultiTimeSeriesPlan) entry.getKey();
           for (int i = 0; i < subPlan.getIndexes().size(); i++) {
@@ -564,24 +502,8 @@ public class Coordinator {
           }
         } else if (parentPlan instanceof InsertRowsPlan) {
           InsertRowsPlan subPlan = (InsertRowsPlan) entry.getKey();
-          if (tmpStatus.getCode() == TSStatusCode.MULTIPLE_ERROR.getStatusCode()) {
-            for (int i = 0; i < subPlan.getInsertRowPlanIndexList().size(); i++) {
-              subStatus[subPlan.getInsertRowPlanIndexList().get(i)] = tmpStatus.subStatus.get(i);
-              if (tmpStatus.isSetRedirectNode()) {
-                subStatus[subPlan.getInsertRowPlanIndexList().get(i)].setRedirectNode(
-                    tmpStatus.getRedirectNode());
-                isBatchRedirect = true;
-              }
-            }
-          } else if (tmpStatus.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
-            if (tmpStatus.isSetRedirectNode()) {
-              isBatchRedirect = true;
-              TSStatus redirectStatus =
-                  StatusUtils.getStatus(RpcUtils.SUCCESS_STATUS, tmpStatus.getRedirectNode());
-              for (int i = 0; i < subPlan.getInsertRowPlanIndexList().size(); i++) {
-                subStatus[subPlan.getInsertRowPlanIndexList().get(i)] = redirectStatus;
-              }
-            }
+          for (int i = 0; i < subPlan.getInsertRowPlanIndexList().size(); i++) {
+            subStatus[subPlan.getInsertRowPlanIndexList().get(i)] = tmpStatus.subStatus.get(i);
           }
         }
       }
@@ -596,25 +518,24 @@ public class Coordinator {
                 tmpStatus.getMessage(),
                 tmpStatus.subStatus));
       }
+
+      if (tmpStatus.isSetRedirectNode()) {
+        boolean isLastInsertTabletPlan =
+            parentPlan instanceof InsertTabletPlan
+                && ((InsertTabletPlan) entry.getKey()).getMaxTime()
+                    == ((InsertTabletPlan) parentPlan).getMaxTime();
+
+        boolean isLastInsertMultiTabletPlan =
+            parentPlan instanceof InsertMultiTabletPlan
+                && ((InsertMultiTabletPlan) entry.getKey()).getMaxTime()
+                    == ((InsertMultiTabletPlan) parentPlan).getMaxTime();
+
+        if (isLastInsertTabletPlan || isLastInsertMultiTabletPlan) {
+          endPoint = tmpStatus.getRedirectNode();
+        }
+      }
     }
-    return concludeFinalStatus(
-        parentPlan,
-        totalRowNum,
-        noFailure,
-        isBatchRedirect,
-        isBatchFailure,
-        subStatus,
-        errorCodePartitionGroups);
-  }
 
-  private TSStatus concludeFinalStatus(
-      PhysicalPlan parentPlan,
-      int totalRowNum,
-      boolean noFailure,
-      boolean isBatchRedirect,
-      boolean isBatchFailure,
-      TSStatus[] subStatus,
-      List<String> errorCodePartitionGroups) {
     if (parentPlan instanceof InsertMultiTabletPlan
         && !((InsertMultiTabletPlan) parentPlan).getResults().isEmpty()) {
       if (subStatus == null) {
@@ -657,13 +578,21 @@ public class Coordinator {
       }
     }
 
+    return concludeFinalStatus(
+        noFailure, endPoint, isBatchFailure, subStatus, errorCodePartitionGroups);
+  }
+
+  private TSStatus concludeFinalStatus(
+      boolean noFailure,
+      EndPoint endPoint,
+      boolean isBatchFailure,
+      TSStatus[] subStatus,
+      List<String> errorCodePartitionGroups) {
     TSStatus status;
     if (noFailure) {
-      if (isBatchRedirect) {
-        status = RpcUtils.getStatus(Arrays.asList(subStatus));
-        status.setCode(TSStatusCode.NEED_REDIRECTION.getStatusCode());
-      } else {
-        status = StatusUtils.OK;
+      status = StatusUtils.OK;
+      if (endPoint != null) {
+        status = StatusUtils.getStatus(status, endPoint);
       }
     } else if (isBatchFailure) {
       status = RpcUtils.getStatus(Arrays.asList(subStatus));
@@ -724,11 +653,15 @@ public class Coordinator {
 
   private TSStatus forwardDataPlanSync(PhysicalPlan plan, Node receiver, Node header)
       throws IOException {
-    RaftService.Client client =
-        metaGroupMember
-            .getClientProvider()
-            .getSyncDataClient(receiver, RaftServer.getWriteOperationTimeoutMS());
-
+    RaftService.Client client = null;
+    try {
+      client =
+          metaGroupMember
+              .getClientProvider()
+              .getSyncDataClient(receiver, RaftServer.getWriteOperationTimeoutMS());
+    } catch (TException e) {
+      throw new IOException(e);
+    }
     return this.metaGroupMember.forwardPlanSync(plan, receiver, header, client);
   }
 
@@ -752,7 +685,7 @@ public class Coordinator {
    * @param node the node to be connected
    * @param timeout timeout threshold of connection
    */
-  public SyncDataClient getSyncDataClient(Node node, int timeout) throws IOException {
+  public SyncDataClient getSyncDataClient(Node node, int timeout) throws TException {
     return metaGroupMember.getClientProvider().getSyncDataClient(node, timeout);
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/StableEntryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/StableEntryManager.java
index c82597e..62d34f3 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/StableEntryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/StableEntryManager.java
@@ -19,8 +19,6 @@
 
 package org.apache.iotdb.cluster.log;
 
-import org.apache.iotdb.cluster.log.manage.serializable.LogManagerMeta;
-
 import java.io.IOException;
 import java.util.List;
 
@@ -28,8 +26,6 @@ public interface StableEntryManager {
 
   List<Log> getAllEntriesAfterAppliedIndex();
 
-  List<Log> getAllEntriesAfterCommittedIndex();
-
   void append(List<Log> entries, long maxHaveAppliedCommitIndex) throws IOException;
 
   void flushLogBuffer();
@@ -42,8 +38,6 @@ public interface StableEntryManager {
 
   HardState getHardState();
 
-  LogManagerMeta getMeta();
-
   /**
    * @param startIndex (inclusive) the log start index
    * @param endIndex (inclusive) the log end index
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/BaseApplier.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/BaseApplier.java
index dd1d735..7b6c7e4 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/BaseApplier.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/BaseApplier.java
@@ -26,20 +26,17 @@ import org.apache.iotdb.cluster.query.ClusterPlanExecutor;
 import org.apache.iotdb.cluster.rpc.thrift.Node;
 import org.apache.iotdb.cluster.server.member.DataGroupMember;
 import org.apache.iotdb.cluster.server.member.MetaGroupMember;
-import org.apache.iotdb.db.conf.IoTDBDescriptor;
 import org.apache.iotdb.db.exception.BatchProcessException;
 import org.apache.iotdb.db.exception.StorageEngineException;
 import org.apache.iotdb.db.exception.metadata.MetadataException;
 import org.apache.iotdb.db.exception.metadata.PathNotExistException;
 import org.apache.iotdb.db.exception.metadata.StorageGroupNotSetException;
-import org.apache.iotdb.db.exception.metadata.UndefinedTemplateException;
 import org.apache.iotdb.db.exception.query.QueryProcessException;
 import org.apache.iotdb.db.metadata.PartialPath;
 import org.apache.iotdb.db.qp.executor.PlanExecutor;
 import org.apache.iotdb.db.qp.physical.BatchPlan;
 import org.apache.iotdb.db.qp.physical.PhysicalPlan;
 import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
-import org.apache.iotdb.db.qp.physical.sys.DeleteTimeSeriesPlan;
 import org.apache.iotdb.db.service.IoTDB;
 import org.apache.iotdb.db.utils.SchemaUtils;
 import org.apache.iotdb.rpc.TSStatusCode;
@@ -80,8 +77,7 @@ abstract class BaseApplier implements LogApplier {
       } catch (BatchProcessException e) {
         handleBatchProcessException(e, plan);
       } catch (QueryProcessException e) {
-        if (e.getCause() instanceof StorageGroupNotSetException
-            || e.getCause() instanceof UndefinedTemplateException) {
+        if (e.getCause() instanceof StorageGroupNotSetException) {
           executeAfterSync(plan);
         } else {
           throw e;
@@ -94,51 +90,9 @@ abstract class BaseApplier implements LogApplier {
     }
   }
 
-  private void handleBatchProcessException(
-      BatchProcessException e, InsertPlan plan, DataGroupMember dataGroupMember)
-      throws QueryProcessException, StorageGroupNotSetException, StorageEngineException {
-    if (IoTDBDescriptor.getInstance().getConfig().isEnablePartition()) {
-      TSStatus[] failingStatus = e.getFailingStatus();
-      for (int i = 0; i < failingStatus.length; i++) {
-        TSStatus status = failingStatus[i];
-        // skip succeeded plans in later execution
-        if (status != null
-            && status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()
-            && plan instanceof BatchPlan) {
-          ((BatchPlan) plan).setIsExecuted(i);
-        }
-      }
-
-      boolean needRetry = false, hasError = false;
-      for (int i = 0, failingStatusLength = failingStatus.length; i < failingStatusLength; i++) {
-        TSStatus status = failingStatus[i];
-        if (status != null) {
-          if (status.getCode() == TSStatusCode.TIMESERIES_NOT_EXIST.getStatusCode()
-              && plan instanceof BatchPlan) {
-            ((BatchPlan) plan).unsetIsExecuted(i);
-            needRetry = true;
-          } else if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
-            hasError = true;
-          }
-        }
-      }
-      if (hasError) {
-        throw e;
-      }
-      if (needRetry) {
-        pullTimeseriesSchema(plan, dataGroupMember.getHeader());
-        plan.recoverFromFailure();
-        getQueryExecutor().processNonQuery(plan);
-      }
-    } else {
-      throw e;
-    }
-  }
-
   private void handleBatchProcessException(BatchProcessException e, PhysicalPlan plan)
       throws QueryProcessException, StorageEngineException, StorageGroupNotSetException {
     TSStatus[] failingStatus = e.getFailingStatus();
-    boolean needThrow = false;
     for (int i = 0; i < failingStatus.length; i++) {
       TSStatus status = failingStatus[i];
       // skip succeeded plans in later execution
@@ -147,17 +101,6 @@ abstract class BaseApplier implements LogApplier {
           && plan instanceof BatchPlan) {
         ((BatchPlan) plan).setIsExecuted(i);
       }
-      // handle batch exception thrown by delete timeseries plan, skip timeseries not exist
-      // exception
-      if (plan instanceof DeleteTimeSeriesPlan) {
-        if (status != null && status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
-          if (status.getCode() == TSStatusCode.TIMESERIES_NOT_EXIST.getStatusCode()) {
-            logger.info("{} doesn't exist, it may has been deleted.", plan.getPaths().get(i));
-          } else {
-            needThrow = true;
-          }
-        }
-      }
     }
     boolean needRetry = false;
     for (int i = 0, failingStatusLength = failingStatus.length; i < failingStatusLength; i++) {
@@ -173,9 +116,7 @@ abstract class BaseApplier implements LogApplier {
       executeAfterSync(plan);
       return;
     }
-    if (!(plan instanceof DeleteTimeSeriesPlan) || needThrow) {
-      throw e;
-    }
+    throw e;
   }
 
   private void executeAfterSync(PhysicalPlan plan)
@@ -200,26 +141,24 @@ abstract class BaseApplier implements LogApplier {
       throws QueryProcessException, StorageGroupNotSetException, StorageEngineException {
     try {
       getQueryExecutor().processNonQuery(plan);
-    } catch (BatchProcessException e) {
-      handleBatchProcessException(e, plan, dataGroupMember);
     } catch (QueryProcessException | StorageGroupNotSetException | StorageEngineException e) {
-      if (IoTDBDescriptor.getInstance().getConfig().isEnablePartition()) {
-        // check if this is caused by metadata missing, if so, pull metadata and retry
-        Throwable metaMissingException = SchemaUtils.findMetaMissingException(e);
-        boolean causedByPathNotExist = metaMissingException instanceof PathNotExistException;
-
-        if (causedByPathNotExist) {
-          if (logger.isDebugEnabled()) {
-            logger.debug(
-                "Timeseries is not found locally[{}], try pulling it from another group: {}",
-                metaGroupMember.getName(),
-                e.getCause().getMessage());
-          }
-          pullTimeseriesSchema(plan, dataGroupMember.getHeader());
-          plan.recoverFromFailure();
-          getQueryExecutor().processNonQuery(plan);
-        } else throw e;
-      } else throw e;
+      // check if this is caused by metadata missing, if so, pull metadata and retry
+      Throwable metaMissingException = SchemaUtils.findMetaMissingException(e);
+      boolean causedByPathNotExist = metaMissingException instanceof PathNotExistException;
+
+      if (causedByPathNotExist) {
+        if (logger.isDebugEnabled()) {
+          logger.debug(
+              "Timeseries is not found locally[{}], try pulling it from another group: {}",
+              metaGroupMember.getName(),
+              e.getCause().getMessage());
+        }
+        pullTimeseriesSchema(plan, dataGroupMember.getHeader());
+        plan.recoverFromFailure();
+        getQueryExecutor().processNonQuery(plan);
+      } else {
+        throw e;
+      }
     }
   }
 
@@ -231,14 +170,9 @@ abstract class BaseApplier implements LogApplier {
   private void pullTimeseriesSchema(InsertPlan plan, Node ignoredGroup)
       throws QueryProcessException {
     try {
-      if (plan instanceof BatchPlan) {
-        ((CMManager) IoTDB.metaManager)
-            .pullTimeSeriesSchemas(((BatchPlan) plan).getPrefixPaths(), ignoredGroup);
-      } else {
-        PartialPath path = plan.getDeviceId();
-        ((CMManager) IoTDB.metaManager)
-            .pullTimeSeriesSchemas(Collections.singletonList(path), ignoredGroup);
-      }
+      PartialPath path = plan.getDeviceId();
+      ((CMManager) IoTDB.metaManager)
+          .pullTimeSeriesSchemas(Collections.singletonList(path), ignoredGroup);
     } catch (MetadataException e1) {
       throw new QueryProcessException(e1);
     }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/DataLogApplier.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/DataLogApplier.java
index ba84ee6..7d11d3e 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/DataLogApplier.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/DataLogApplier.java
@@ -52,7 +52,7 @@ public class DataLogApplier extends BaseApplier {
 
   private static final Logger logger = LoggerFactory.getLogger(DataLogApplier.class);
 
-  protected DataGroupMember dataGroupMember;
+  private DataGroupMember dataGroupMember;
 
   public DataLogApplier(MetaGroupMember metaGroupMember, DataGroupMember dataGroupMember) {
     super(metaGroupMember);
@@ -67,7 +67,15 @@ public class DataLogApplier extends BaseApplier {
       if (log instanceof PhysicalPlanLog) {
         PhysicalPlanLog physicalPlanLog = (PhysicalPlanLog) log;
         PhysicalPlan plan = physicalPlanLog.getPlan();
-        applyPhysicalPlan(plan);
+        if (plan instanceof InsertMultiTabletPlan) {
+          applyInsert((InsertMultiTabletPlan) plan);
+        } else if (plan instanceof InsertRowsPlan) {
+          applyInsert((InsertRowsPlan) plan);
+        } else if (plan instanceof InsertPlan) {
+          applyInsert((InsertPlan) plan);
+        } else {
+          applyPhysicalPlan(plan, dataGroupMember);
+        }
       } else if (log instanceof CloseFileLog) {
         CloseFileLog closeFileLog = ((CloseFileLog) log);
         StorageEngine.getInstance()
@@ -90,61 +98,18 @@ public class DataLogApplier extends BaseApplier {
     }
   }
 
-  public void applyPhysicalPlan(PhysicalPlan plan)
-      throws QueryProcessException, StorageGroupNotSetException, StorageEngineException {
-    if (plan instanceof InsertMultiTabletPlan) {
-      applyInsert((InsertMultiTabletPlan) plan);
-    } else if (plan instanceof InsertRowsPlan) {
-      applyInsert((InsertRowsPlan) plan);
-    } else if (plan instanceof InsertPlan) {
-      applyInsert((InsertPlan) plan);
-    } else {
-      applyPhysicalPlan(plan, dataGroupMember);
-    }
-  }
-
   private void applyInsert(InsertMultiTabletPlan plan)
       throws StorageGroupNotSetException, QueryProcessException, StorageEngineException {
-    boolean hasSync = false;
     for (InsertTabletPlan insertTabletPlan : plan.getInsertTabletPlanList()) {
-      try {
-        IoTDB.metaManager.getStorageGroupPath(insertTabletPlan.getDeviceId());
-      } catch (StorageGroupNotSetException e) {
-        try {
-          if (!hasSync) {
-            metaGroupMember.syncLeaderWithConsistencyCheck(true);
-            hasSync = true;
-          } else {
-            throw new StorageEngineException(e.getMessage());
-          }
-        } catch (CheckConsistencyException ce) {
-          throw new QueryProcessException(ce.getMessage());
-        }
-      }
+      applyInsert(insertTabletPlan);
     }
-    applyPhysicalPlan(plan, dataGroupMember);
   }
 
   private void applyInsert(InsertRowsPlan plan)
       throws StorageGroupNotSetException, QueryProcessException, StorageEngineException {
-    boolean hasSync = false;
     for (InsertRowPlan insertRowPlan : plan.getInsertRowPlanList()) {
-      try {
-        IoTDB.metaManager.getStorageGroupPath(insertRowPlan.getDeviceId());
-      } catch (StorageGroupNotSetException e) {
-        try {
-          if (!hasSync) {
-            metaGroupMember.syncLeaderWithConsistencyCheck(true);
-            hasSync = true;
-          } else {
-            throw new StorageEngineException(e.getMessage());
-          }
-        } catch (CheckConsistencyException ce) {
-          throw new QueryProcessException(ce.getMessage());
-        }
-      }
+      applyInsert(insertRowPlan);
     }
-    applyPhysicalPlan(plan, dataGroupMember);
   }
 
   private void applyInsert(InsertPlan plan)
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/CommittedEntryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/CommittedEntryManager.java
index 43b35ce..e86df85 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/CommittedEntryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/CommittedEntryManager.java
@@ -25,7 +25,6 @@ import org.apache.iotdb.cluster.exception.TruncateCommittedEntryException;
 import org.apache.iotdb.cluster.log.Log;
 import org.apache.iotdb.cluster.log.Snapshot;
 import org.apache.iotdb.cluster.log.logtypes.EmptyContentLog;
-import org.apache.iotdb.cluster.log.manage.serializable.LogManagerMeta;
 import org.apache.iotdb.db.utils.TestOnly;
 
 import org.slf4j.Logger;
@@ -54,17 +53,6 @@ public class CommittedEntryManager {
     entryTotalMemSize = 0;
   }
 
-  CommittedEntryManager(int maxNumOfLogInMem, LogManagerMeta meta) {
-    entries = Collections.synchronizedList(new ArrayList<>(maxNumOfLogInMem));
-    entries.add(
-        new EmptyContentLog(
-            meta.getMaxHaveAppliedCommitIndex() == -1
-                ? -1
-                : meta.getMaxHaveAppliedCommitIndex() - 1,
-            meta.getLastLogTerm()));
-    entryTotalMemSize = 0;
-  }
-
   /**
    * Overwrite the contents of this object with those of the given snapshot. Note that this function
    * is only used if you want to override all the contents, otherwise please use
@@ -254,10 +242,6 @@ public class CommittedEntryManager {
       }
       entries.addAll(appendingEntries);
     } else if (entries.size() - offset > 0) {
-      logger.error(
-          "committed entries cannot be truncated: current entries:{}, appendingEntries {}",
-          entries,
-          appendingEntries);
       throw new TruncateCommittedEntryException(
           appendingEntries.get(0).getCurrLogIndex(), getLastIndex());
     } else {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/RaftLogManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/RaftLogManager.java
index 3ae298c..446eefc 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/RaftLogManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/RaftLogManager.java
@@ -30,7 +30,6 @@ import org.apache.iotdb.cluster.log.Log;
 import org.apache.iotdb.cluster.log.LogApplier;
 import org.apache.iotdb.cluster.log.Snapshot;
 import org.apache.iotdb.cluster.log.StableEntryManager;
-import org.apache.iotdb.cluster.log.manage.serializable.LogManagerMeta;
 import org.apache.iotdb.cluster.server.monitor.Timer.Statistic;
 import org.apache.iotdb.db.utils.TestOnly;
 import org.apache.iotdb.tsfile.utils.RamUsageEstimator;
@@ -116,8 +115,7 @@ public abstract class RaftLogManager {
   protected RaftLogManager(StableEntryManager stableEntryManager, LogApplier applier, String name) {
     this.logApplier = applier;
     this.name = name;
-    LogManagerMeta meta = stableEntryManager.getMeta();
-    this.setCommittedEntryManager(new CommittedEntryManager(maxNumOfLogsInMem, meta));
+    this.setCommittedEntryManager(new CommittedEntryManager(maxNumOfLogsInMem));
     this.setStableEntryManager(stableEntryManager);
     try {
       this.getCommittedEntryManager().append(stableEntryManager.getAllEntriesAfterAppliedIndex());
@@ -127,8 +125,6 @@ public abstract class RaftLogManager {
     long first = getCommittedEntryManager().getDummyIndex();
     long last = getCommittedEntryManager().getLastIndex();
     this.setUnCommittedEntryManager(new UnCommittedEntryManager(last + 1));
-    this.getUnCommittedEntryManager()
-        .truncateAndAppend(stableEntryManager.getAllEntriesAfterCommittedIndex());
 
     /** must have applied entry [compactIndex,last] to state machine */
     this.commitIndex = last;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/serializable/SyncLogDequeSerializer.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/serializable/SyncLogDequeSerializer.java
index 7579a51..c81e6d0 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/serializable/SyncLogDequeSerializer.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/serializable/SyncLogDequeSerializer.java
@@ -219,7 +219,6 @@ public class SyncLogDequeSerializer implements StableEntryManager {
   }
 
   /** for log tools */
-  @Override
   public LogManagerMeta getMeta() {
     return meta;
   }
@@ -237,30 +236,6 @@ public class SyncLogDequeSerializer implements StableEntryManager {
     return getLogs(meta.getMaxHaveAppliedCommitIndex(), meta.getCommitLogIndex());
   }
 
-  /**
-   * When raft log files flushed,meta would not be flushed synchronously.So data has flushed to disk
-   * is uncommitted for persistent LogManagerMeta(meta's info is stale).We need to recover these
-   * already persistent logs.
-   *
-   * <p>For example,commitIndex is 5 in persistent LogManagerMeta,But the log file has actually been
-   * flushed to 7,when we restart cluster,we need to recover 6 and 7.
-   *
-   * <p>Maybe,we can extract getAllEntriesAfterAppliedIndex and getAllEntriesAfterCommittedIndex
-   * into getAllEntriesByIndex,but now there are too many test cases using it.
-   */
-  @Override
-  public List<Log> getAllEntriesAfterCommittedIndex() {
-    long lastIndex = firstLogIndex + logIndexOffsetList.size() - 1;
-    logger.debug(
-        "getAllEntriesAfterCommittedIndex, firstUnCommitIndex={}, lastIndexBeforeStart={}",
-        meta.getCommitLogIndex() + 1,
-        lastIndex);
-    if (meta.getCommitLogIndex() >= lastIndex) {
-      return Collections.emptyList();
-    }
-    return getLogs(meta.getCommitLogIndex() + 1, lastIndex);
-  }
-
   @Override
   public void append(List<Log> entries, long maxHaveAppliedCommitIndex) throws IOException {
     lock.lock();
@@ -475,6 +450,11 @@ public class SyncLogDequeSerializer implements StableEntryManager {
     this.firstLogIndex = meta.getCommitLogIndex() + 1;
     try {
       recoverLogFiles();
+
+      logDataFileList.sort(this::comparePersistLogFileName);
+
+      logIndexFileList.sort(this::comparePersistLogFileName);
+
       // add init log file
       if (logDataFileList.isEmpty()) {
         createNewLogFile(metaFile.getParentFile().getPath(), meta.getCommitLogIndex() + 1);
@@ -493,10 +473,6 @@ public class SyncLogDequeSerializer implements StableEntryManager {
     // 2. recover the log data file
     recoverLogFiles(LOG_DATA_FILE_SUFFIX);
 
-    // sort by name before recover
-    logDataFileList.sort(this::comparePersistLogFileName);
-    logIndexFileList.sort(this::comparePersistLogFileName);
-
     // 3. recover the last log file in case of abnormal exit
     recoverTheLastLogFile();
   }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/FileSnapshot.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/FileSnapshot.java
index 86032c7..145c7c2 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/FileSnapshot.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/FileSnapshot.java
@@ -43,8 +43,8 @@ import org.apache.iotdb.db.exception.LoadFileException;
 import org.apache.iotdb.db.exception.StorageEngineException;
 import org.apache.iotdb.db.exception.metadata.IllegalPathException;
 import org.apache.iotdb.db.metadata.PartialPath;
+import org.apache.iotdb.db.utils.FilePathUtils;
 import org.apache.iotdb.db.utils.SchemaUtils;
-import org.apache.iotdb.tsfile.utils.FilePathUtils;
 import org.apache.iotdb.tsfile.utils.Pair;
 import org.apache.iotdb.tsfile.write.schema.TimeseriesSchema;
 
@@ -300,8 +300,7 @@ public class FileSnapshot extends Snapshot implements TimeseriesSchemaSnapshot {
      */
     private boolean isFileAlreadyPulled(RemoteTsFileResource resource) throws IllegalPathException {
       Pair<String, Long> sgNameAndTimePartitionIdPair =
-          FilePathUtils.getLogicalSgNameAndTimePartitionIdPair(
-              resource.getTsFile().getAbsolutePath());
+          FilePathUtils.getLogicalSgNameAndTimePartitionIdPair(resource);
       return StorageEngine.getInstance()
           .isFileAlreadyExist(
               resource,
@@ -393,8 +392,7 @@ public class FileSnapshot extends Snapshot implements TimeseriesSchemaSnapshot {
       // remote/<nodeIdentifier>/<FilePathUtils.getTsFilePrefixPath(resource)>/<tsfile>
       // you can see FilePathUtils.splitTsFilePath() method for details.
       PartialPath storageGroupName =
-          new PartialPath(
-              FilePathUtils.getLogicalStorageGroupName(resource.getTsFile().getAbsolutePath()));
+          new PartialPath(FilePathUtils.getLogicalStorageGroupName(resource));
       File remoteModFile =
           new File(resource.getTsFile().getAbsoluteFile() + ModificationFile.FILE_SUFFIX);
       try {
@@ -449,12 +447,11 @@ public class FileSnapshot extends Snapshot implements TimeseriesSchemaSnapshot {
       // the new file is stored at:
       // remote/<nodeIdentifier>/<FilePathUtils.getTsFilePrefixPath(resource)>/<newTsFile>
       // you can see FilePathUtils.splitTsFilePath() method for details.
-      String tempFileName =
-          FilePathUtils.getTsFileNameWithoutHardLink(resource.getTsFile().getAbsolutePath());
+      String tempFileName = FilePathUtils.getTsFileNameWithoutHardLink(resource);
       String tempFilePath =
           node.getNodeIdentifier()
               + File.separator
-              + FilePathUtils.getTsFilePrefixPath(resource.getTsFile().getAbsolutePath())
+              + FilePathUtils.getTsFilePrefixPath(resource)
               + File.separator
               + tempFileName;
       File tempFile = new File(REMOTE_FILE_TEMP_DIR, tempFilePath);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/metadata/CMManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/metadata/CMManager.java
index 4ef2e12..6e706cc 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/metadata/CMManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/metadata/CMManager.java
@@ -47,9 +47,7 @@ import org.apache.iotdb.db.metadata.MetaUtils;
 import org.apache.iotdb.db.metadata.PartialPath;
 import org.apache.iotdb.db.metadata.mnode.MNode;
 import org.apache.iotdb.db.metadata.mnode.MeasurementMNode;
-import org.apache.iotdb.db.metadata.template.Template;
 import org.apache.iotdb.db.qp.constant.SQLConstant;
-import org.apache.iotdb.db.qp.physical.BatchPlan;
 import org.apache.iotdb.db.qp.physical.PhysicalPlan;
 import org.apache.iotdb.db.qp.physical.crud.InsertMultiTabletPlan;
 import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
@@ -57,7 +55,6 @@ import org.apache.iotdb.db.qp.physical.crud.InsertRowPlan;
 import org.apache.iotdb.db.qp.physical.crud.InsertRowsOfOneDevicePlan;
 import org.apache.iotdb.db.qp.physical.crud.InsertRowsPlan;
 import org.apache.iotdb.db.qp.physical.crud.InsertTabletPlan;
-import org.apache.iotdb.db.qp.physical.crud.SetDeviceTemplatePlan;
 import org.apache.iotdb.db.qp.physical.sys.CreateMultiTimeSeriesPlan;
 import org.apache.iotdb.db.qp.physical.sys.CreateTimeSeriesPlan;
 import org.apache.iotdb.db.qp.physical.sys.SetStorageGroupPlan;
@@ -334,8 +331,7 @@ public class CMManager extends MManager {
   }
 
   @Override
-  public MNode getSeriesSchemasAndReadLockDevice(InsertPlan plan)
-      throws MetadataException, IOException {
+  public MNode getSeriesSchemasAndReadLockDevice(InsertPlan plan) throws MetadataException {
     MeasurementMNode[] measurementMNodes = new MeasurementMNode[plan.getMeasurements().length];
     int nonExistSchemaIndex =
         getMNodesLocally(plan.getDeviceId(), plan.getMeasurements(), measurementMNodes);
@@ -349,47 +345,6 @@ public class CMManager extends MManager {
   }
 
   @Override
-  public MeasurementSchema getSeriesSchema(PartialPath fullPath) throws MetadataException {
-    return getMeasurementMNode(fullPath).getSchema();
-  }
-
-  private MeasurementMNode getMeasurementMNode(PartialPath fullPath) throws MetadataException {
-    MeasurementMNode node = null;
-    // try remote cache first
-    try {
-      cacheLock.readLock().lock();
-      MeasurementMNode measurementMNode = mRemoteMetaCache.get(fullPath);
-      if (measurementMNode != null) {
-        node = measurementMNode;
-      }
-    } finally {
-      cacheLock.readLock().unlock();
-    }
-
-    if (node == null) {
-      // try local MTree
-      try {
-        node = (MeasurementMNode) super.getNodeByPath(fullPath);
-      } catch (PathNotExistException e) {
-        // pull from remote node
-        List<MeasurementSchema> schemas =
-            metaPuller.pullMeasurementSchemas(Collections.singletonList(fullPath));
-        if (!schemas.isEmpty()) {
-          MeasurementSchema measurementSchema = schemas.get(0);
-          MeasurementMNode measurementMNode =
-              new MeasurementMNode(
-                  null, measurementSchema.getMeasurementId(), measurementSchema, null);
-          cacheMeta(fullPath, measurementMNode);
-          node = measurementMNode;
-        } else {
-          throw e;
-        }
-      }
-    }
-    return node;
-  }
-
-  @Override
   public MeasurementSchema getSeriesSchema(PartialPath device, String measurement)
       throws MetadataException {
     try {
@@ -449,16 +404,6 @@ public class CMManager extends MManager {
     }
   }
 
-  @Override
-  public Pair<MNode, Template> getDeviceNodeWithAutoCreate(PartialPath path)
-      throws MetadataException, IOException {
-    return getDeviceNodeWithAutoCreate(
-        path,
-        ClusterDescriptor.getInstance().getConfig().isEnableAutoCreateSchema(),
-        false,
-        config.getDefaultStorageGroupLevel());
-  }
-
   private static class RemoteMetaCache extends LRUCache<PartialPath, MeasurementMNode> {
 
     RemoteMetaCache(int cacheSize) {
@@ -501,19 +446,28 @@ public class CMManager extends MManager {
     // for CreateTimeSeriesPlan, use getPath() to get timeseries to get related storage group,
     // for CreateMultiTimeSeriesPlan, use getPaths() to get all timeseries to get related storage
     // groups.
-    if (plan instanceof BatchPlan) {
-      storageGroups.addAll(getStorageGroups(getValidStorageGroups((BatchPlan) plan)));
-    } else if (plan instanceof InsertRowPlan || plan instanceof InsertTabletPlan) {
+    if (plan instanceof InsertRowPlan
+        || plan instanceof InsertRowsOfOneDevicePlan
+        || plan instanceof InsertTabletPlan) {
       storageGroups.addAll(
           getStorageGroups(Collections.singletonList(((InsertPlan) plan).getDeviceId())));
-    } else if (plan instanceof CreateTimeSeriesPlan) {
+    } else if (plan instanceof InsertRowsPlan) {
       storageGroups.addAll(
-          getStorageGroups(Collections.singletonList(((CreateTimeSeriesPlan) plan).getPath())));
-    } else if (plan instanceof SetDeviceTemplatePlan) {
+          getStorageGroups(
+              ((InsertRowsPlan) plan)
+                  .getInsertRowPlanList().stream()
+                      .map(InsertPlan::getDeviceId)
+                      .collect(Collectors.toList())));
+    } else if (plan instanceof InsertMultiTabletPlan) {
       storageGroups.addAll(
           getStorageGroups(
-              Collections.singletonList(
-                  new PartialPath(((SetDeviceTemplatePlan) plan).getPrefixPath()))));
+              ((InsertMultiTabletPlan) plan)
+                  .getInsertTabletPlanList().stream()
+                      .map(InsertPlan::getDeviceId)
+                      .collect(Collectors.toList())));
+    } else if (plan instanceof CreateTimeSeriesPlan) {
+      storageGroups.addAll(
+          getStorageGroups(Collections.singletonList(((CreateTimeSeriesPlan) plan).getPath())));
     } else {
       storageGroups.addAll(getStorageGroups(plan.getPaths()));
     }
@@ -530,18 +484,6 @@ public class CMManager extends MManager {
     }
   }
 
-  private List<PartialPath> getValidStorageGroups(BatchPlan plan) {
-    List<PartialPath> paths = new ArrayList<>();
-    List<PartialPath> originalPaths = plan.getPrefixPaths();
-    for (int i = 0; i < originalPaths.size(); i++) {
-      // has permission to create sg
-      if (!plan.getResults().containsKey(i)) {
-        paths.add(originalPaths.get(i));
-      }
-    }
-    return paths;
-  }
-
   /** return storage groups paths for given deviceIds or timeseries. */
   private List<PartialPath> getStorageGroups(List<PartialPath> paths) throws MetadataException {
     Set<PartialPath> storageGroups = new HashSet<>();
@@ -644,22 +586,6 @@ public class CMManager extends MManager {
     return allSuccess;
   }
 
-  public boolean createTimeseries(InsertRowsOfOneDevicePlan insertRowsOfOneDevicePlan)
-      throws CheckConsistencyException, IllegalPathException {
-    boolean allSuccess = true;
-    for (InsertRowPlan insertRowPlan : insertRowsOfOneDevicePlan.getRowPlans()) {
-      boolean success = createTimeseries(insertRowPlan);
-      allSuccess = allSuccess && success;
-      if (!success) {
-        logger.error(
-            "create timeseries for device={} failed, plan={}",
-            insertRowPlan.getDeviceId(),
-            insertRowPlan);
-      }
-    }
-    return allSuccess;
-  }
-
   /**
    * Create timeseries automatically for an InsertPlan.
    *
@@ -676,10 +602,6 @@ public class CMManager extends MManager {
       return createTimeseries((InsertRowsPlan) insertPlan);
     }
 
-    if (insertPlan instanceof InsertRowsOfOneDevicePlan) {
-      return createTimeseries((InsertRowsOfOneDevicePlan) insertPlan);
-    }
-
     List<String> seriesList = new ArrayList<>();
     PartialPath deviceId = insertPlan.getDeviceId();
     PartialPath storageGroupName;
@@ -754,11 +676,7 @@ public class CMManager extends MManager {
     }
     if (result.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()
         && result.getCode() != TSStatusCode.PATH_ALREADY_EXIST_ERROR.getStatusCode()
-        && result.getCode() != TSStatusCode.NEED_REDIRECTION.getStatusCode()
-        && !(result.getCode() == TSStatusCode.MULTIPLE_ERROR.getStatusCode()
-            && result.getSubStatus().stream()
-                .allMatch(
-                    s -> s.getCode() == TSStatusCode.PATH_ALREADY_EXIST_ERROR.getStatusCode()))) {
+        && result.getCode() != TSStatusCode.NEED_REDIRECTION.getStatusCode()) {
       logger.error(
           "{} failed to execute create timeseries {}: {}",
           metaGroupMember.getThisNode(),
@@ -816,14 +734,8 @@ public class CMManager extends MManager {
               metaGroupMember
                   .getClientProvider()
                   .getSyncDataClient(node, RaftServer.getReadOperationTimeoutMS())) {
-            try {
-              result =
-                  syncDataClient.getUnregisteredTimeseries(partitionGroup.getHeader(), seriesList);
-            } catch (TException e) {
-              // the connection may be broken, close it to avoid it being reused
-              syncDataClient.getInputProtocol().getTransport().close();
-              throw e;
-            }
+            result =
+                syncDataClient.getUnregisteredTimeseries(partitionGroup.getHeader(), seriesList);
           }
         }
         if (result != null) {
@@ -862,17 +774,14 @@ public class CMManager extends MManager {
    */
   public void pullTimeSeriesSchemas(List<PartialPath> prefixPaths, Node ignoredGroup)
       throws MetadataException {
-    // Remove duplicated prefix paths to optimize
-    Set<PartialPath> prefixPathSet = new HashSet<>(prefixPaths);
-    List<PartialPath> uniquePrefixPaths = new ArrayList<>(prefixPathSet);
     logger.debug(
         "{}: Pulling timeseries schemas of {}, ignored group {}",
         metaGroupMember.getName(),
-        uniquePrefixPaths,
+        prefixPaths,
         ignoredGroup);
     // split the paths by the data groups that should hold them
     Map<PartitionGroup, List<String>> partitionGroupPathMap = new HashMap<>();
-    for (PartialPath prefixPath : uniquePrefixPaths) {
+    for (PartialPath prefixPath : prefixPaths) {
       if (SQLConstant.RESERVED_TIME.equalsIgnoreCase(prefixPath.getFullPath())) {
         continue;
       }
@@ -890,8 +799,8 @@ public class CMManager extends MManager {
       logger.debug(
           "{}: pulling schemas of {} and other {} paths from {} groups",
           metaGroupMember.getName(),
-          uniquePrefixPaths.get(0),
-          uniquePrefixPaths.size() - 1,
+          prefixPaths.get(0),
+          prefixPaths.size() - 1,
           partitionGroupPathMap.size());
     }
     for (Entry<PartitionGroup, List<String>> partitionGroupListEntry :
@@ -1009,18 +918,13 @@ public class CMManager extends MManager {
           metaGroupMember
               .getClientProvider()
               .getSyncDataClient(node, RaftServer.getReadOperationTimeoutMS())) {
-        try {
-          PullSchemaResp pullSchemaResp = syncDataClient.pullTimeSeriesSchema(request);
-          ByteBuffer buffer = pullSchemaResp.schemaBytes;
-          int size = buffer.getInt();
-          schemas = new ArrayList<>(size);
-          for (int i = 0; i < size; i++) {
-            schemas.add(TimeseriesSchema.deserializeFrom(buffer));
-          }
-        } catch (TException e) {
-          // the connection may be broken, close it to avoid it being reused
-          syncDataClient.getInputProtocol().getTransport().close();
-          throw e;
+
+        PullSchemaResp pullSchemaResp = syncDataClient.pullTimeSeriesSchema(request);
+        ByteBuffer buffer = pullSchemaResp.schemaBytes;
+        int size = buffer.getInt();
+        schemas = new ArrayList<>(size);
+        for (int i = 0; i < size; i++) {
+          schemas.add(TimeseriesSchema.deserializeFrom(buffer));
         }
       }
     }
@@ -1248,13 +1152,8 @@ public class CMManager extends MManager {
           metaGroupMember
               .getClientProvider()
               .getSyncDataClient(node, RaftServer.getReadOperationTimeoutMS())) {
-        try {
-          result = syncDataClient.getAllPaths(header, pathsToQuery, withAlias);
-        } catch (TException e) {
-          // the connection may be broken, close it to avoid it being reused
-          syncDataClient.getInputProtocol().getTransport().close();
-          throw e;
-        }
+
+        result = syncDataClient.getAllPaths(header, pathsToQuery, withAlias);
       }
     }
 
@@ -1379,13 +1278,8 @@ public class CMManager extends MManager {
           metaGroupMember
               .getClientProvider()
               .getSyncDataClient(node, RaftServer.getReadOperationTimeoutMS())) {
-        try {
-          paths = syncDataClient.getAllDevices(header, pathsToQuery);
-        } catch (TException e) {
-          // the connection may be broken, close it to avoid it being reused
-          syncDataClient.getInputProtocol().getTransport().close();
-          throw e;
-        }
+
+        paths = syncDataClient.getAllDevices(header, pathsToQuery);
       }
     }
     return paths;
@@ -1618,16 +1512,7 @@ public class CMManager extends MManager {
     ExecutorService pool =
         new ThreadPoolExecutor(
             THREAD_POOL_SIZE, THREAD_POOL_SIZE, 0, TimeUnit.SECONDS, new LinkedBlockingDeque<>());
-
-    List<PartitionGroup> globalGroups = new ArrayList<>();
-    try {
-      PartitionGroup partitionGroup =
-          metaGroupMember.getPartitionTable().partitionByPathTime(plan.getPath(), 0);
-      globalGroups.add(partitionGroup);
-    } catch (MetadataException e) {
-      // if the path location is not find, obtain the path location from all groups.
-      globalGroups = metaGroupMember.getPartitionTable().getGlobalGroups();
-    }
+    List<PartitionGroup> globalGroups = metaGroupMember.getPartitionTable().getGlobalGroups();
 
     int limit = plan.getLimit() == 0 ? Integer.MAX_VALUE : plan.getLimit();
     int offset = plan.getOffset();
@@ -1838,15 +1723,9 @@ public class CMManager extends MManager {
                   .getClientProvider()
                   .getSyncDataClient(node, RaftServer.getReadOperationTimeoutMS())) {
         plan.serialize(dataOutputStream);
-        try {
-          resultBinary =
-              syncDataClient.getAllMeasurementSchema(
-                  group.getHeader(), ByteBuffer.wrap(byteArrayOutputStream.toByteArray()));
-        } catch (TException e) {
-          // the connection may be broken, close it to avoid it being reused
-          syncDataClient.getInputProtocol().getTransport().close();
-          throw e;
-        }
+        resultBinary =
+            syncDataClient.getAllMeasurementSchema(
+                group.getHeader(), ByteBuffer.wrap(byteArrayOutputStream.toByteArray()));
       }
     }
     return resultBinary;
@@ -1870,15 +1749,9 @@ public class CMManager extends MManager {
                   .getSyncDataClient(node, RaftServer.getReadOperationTimeoutMS())) {
 
         plan.serialize(dataOutputStream);
-        try {
-          resultBinary =
-              syncDataClient.getDevices(
-                  group.getHeader(), ByteBuffer.wrap(byteArrayOutputStream.toByteArray()));
-        } catch (TException e) {
-          // the connection may be broken, close it to avoid it being reused
-          syncDataClient.getInputProtocol().getTransport().close();
-          throw e;
-        }
+        resultBinary =
+            syncDataClient.getDevices(
+                group.getHeader(), ByteBuffer.wrap(byteArrayOutputStream.toByteArray()));
       }
     }
     return resultBinary;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/metadata/MetaPuller.java b/cluster/src/main/java/org/apache/iotdb/cluster/metadata/MetaPuller.java
index 9991c5a..e524772 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/metadata/MetaPuller.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/metadata/MetaPuller.java
@@ -231,18 +231,12 @@ public class MetaPuller {
               .getSyncDataClient(node, RaftServer.getReadOperationTimeoutMS())) {
 
         // only need measurement name
-        try {
-          PullSchemaResp pullSchemaResp = syncDataClient.pullMeasurementSchema(request);
-          ByteBuffer buffer = pullSchemaResp.schemaBytes;
-          int size = buffer.getInt();
-          schemas = new ArrayList<>(size);
-          for (int i = 0; i < size; i++) {
-            schemas.add(MeasurementSchema.deserializeFrom(buffer));
-          }
-        } catch (TException e) {
-          // the connection may be broken, close it to avoid it being reused
-          syncDataClient.getInputProtocol().getTransport().close();
-          throw e;
+        PullSchemaResp pullSchemaResp = syncDataClient.pullMeasurementSchema(request);
+        ByteBuffer buffer = pullSchemaResp.schemaBytes;
+        int size = buffer.getInt();
+        schemas = new ArrayList<>(size);
+        for (int i = 0; i < size; i++) {
+          schemas.add(MeasurementSchema.deserializeFrom(buffer));
         }
       }
     }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/partition/slot/SlotPartitionTable.java b/cluster/src/main/java/org/apache/iotdb/cluster/partition/slot/SlotPartitionTable.java
index d10b21d..469e84d 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/partition/slot/SlotPartitionTable.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/partition/slot/SlotPartitionTable.java
@@ -35,9 +35,6 @@ import java.util.Map.Entry;
 import java.util.Objects;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 /**
  * SlotPartitionTable manages the slots (data partition) of each node using a look-up table. Slot:
@@ -53,8 +50,6 @@ public class SlotPartitionTable implements PartitionTable {
 
   // all nodes
   private List<Node> nodeRing = new ArrayList<>();
-  private ReadWriteLock nodeRingLock = new ReentrantReadWriteLock();
-
   // normally, it is equal to ClusterConstant.SLOT_NUM.
   private int totalSlotNumbers;
 
@@ -181,13 +176,9 @@ public class SlotPartitionTable implements PartitionTable {
 
   @Override
   public PartitionGroup route(String storageGroupName, long timestamp) {
-    Lock readLock = nodeRingLock.readLock();
-    readLock.lock();
-    try {
+    synchronized (nodeRing) {
       Node node = routeToHeaderByTime(storageGroupName, timestamp);
       return getHeaderGroup(node);
-    } finally {
-      readLock.unlock();
     }
   }
 
@@ -210,25 +201,18 @@ public class SlotPartitionTable implements PartitionTable {
 
   @Override
   public Node routeToHeaderByTime(String storageGroupName, long timestamp) {
-    Lock readLock = nodeRingLock.readLock();
-    readLock.lock();
-    try {
+    synchronized (nodeRing) {
       int slot =
           getSlotStrategy().calculateSlotByTime(storageGroupName, timestamp, getTotalSlotNumbers());
       Node node = slotNodes[slot];
       logger.trace("The slot of {}@{} is {}, held by {}", storageGroupName, timestamp, slot, node);
       return node;
-    } finally {
-      readLock.unlock();
     }
   }
 
   @Override
   public NodeAdditionResult addNode(Node node) {
-    Lock writeLock = nodeRingLock.writeLock();
-    writeLock.lock();
-
-    try {
+    synchronized (nodeRing) {
       if (nodeRing.contains(node)) {
         return null;
       }
@@ -261,8 +245,6 @@ public class SlotPartitionTable implements PartitionTable {
           }
         }
       }
-    } finally {
-      writeLock.unlock();
     }
 
     SlotNodeAdditionResult result = new SlotNodeAdditionResult();
@@ -437,9 +419,7 @@ public class SlotPartitionTable implements PartitionTable {
 
   @Override
   public NodeRemovalResult removeNode(Node target) {
-    Lock writeLock = nodeRingLock.writeLock();
-    writeLock.lock();
-    try {
+    synchronized (nodeRing) {
       if (!nodeRing.contains(target)) {
         return null;
       }
@@ -482,8 +462,6 @@ public class SlotPartitionTable implements PartitionTable {
       Map<Node, List<Integer>> nodeListMap = retrieveSlots(target);
       result.setNewSlotOwners(nodeListMap);
       return result;
-    } finally {
-      writeLock.unlock();
     }
   }
 
@@ -503,15 +481,11 @@ public class SlotPartitionTable implements PartitionTable {
   @Override
   public List<PartitionGroup> getGlobalGroups() {
     // preventing a thread from getting incomplete globalGroups
-    Lock readLock = nodeRingLock.readLock();
-    readLock.lock();
-    try {
+    synchronized (nodeRing) {
       if (globalGroups == null) {
         calculateGlobalGroups();
       }
       return globalGroups;
-    } finally {
-      readLock.unlock();
     }
   }
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterDataQueryExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterDataQueryExecutor.java
index 3550bbf..f28f088 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterDataQueryExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterDataQueryExecutor.java
@@ -85,7 +85,11 @@ public class ClusterDataQueryExecutor extends RawDataQueryExecutor {
     try {
       List<ManagedSeriesReader> readersOfSelectedSeries = initMultSeriesReader(context);
       return new RawQueryDataSetWithoutValueFilter(
-          context.getQueryId(), queryPlan, readersOfSelectedSeries);
+          context.getQueryId(),
+          queryPlan.getDeduplicatedPaths(),
+          queryPlan.getDeduplicatedDataTypes(),
+          readersOfSelectedSeries,
+          queryPlan.isAscending());
     } catch (InterruptedException e) {
       Thread.currentThread().interrupt();
       throw new StorageEngineException(e.getMessage());
@@ -108,7 +112,9 @@ public class ClusterDataQueryExecutor extends RawDataQueryExecutor {
       throw new StorageEngineException(e);
     }
     List<ManagedSeriesReader> readersOfSelectedSeries = Lists.newArrayList();
-    List<AbstractMultPointReader> multPointReaders =
+    List<AbstractMultPointReader> multPointReaders = Lists.newArrayList();
+
+    multPointReaders =
         readerFactory.getMultSeriesReader(
             queryPlan.getDeduplicatedPaths(),
             queryPlan.getDeviceToMeasurements(),
@@ -124,8 +130,7 @@ public class ClusterDataQueryExecutor extends RawDataQueryExecutor {
       PartialPath partialPath = queryPlan.getDeduplicatedPaths().get(i);
       TSDataType dataType = queryPlan.getDeduplicatedDataTypes().get(i);
       AssignPathManagedMergeReader assignPathManagedMergeReader =
-          new AssignPathManagedMergeReader(
-              partialPath.getFullPath(), dataType, queryPlan.isAscending());
+          new AssignPathManagedMergeReader(partialPath.getFullPath(), dataType);
       for (AbstractMultPointReader multPointReader : multPointReaders) {
         if (multPointReader.getAllPaths().contains(partialPath.getFullPath())) {
           assignPathManagedMergeReader.addReader(multPointReader, 0);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterPlanExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterPlanExecutor.java
index 10e6631..ca16cfb 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterPlanExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterPlanExecutor.java
@@ -34,7 +34,6 @@ import org.apache.iotdb.cluster.rpc.thrift.Node;
 import org.apache.iotdb.cluster.server.RaftServer;
 import org.apache.iotdb.cluster.server.member.DataGroupMember;
 import org.apache.iotdb.cluster.server.member.MetaGroupMember;
-import org.apache.iotdb.db.conf.IoTDBConstant;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
 import org.apache.iotdb.db.engine.StorageEngine;
 import org.apache.iotdb.db.exception.StorageEngineException;
@@ -65,10 +64,8 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -128,160 +125,6 @@ public class ClusterPlanExecutor extends PlanExecutor {
   }
 
   @Override
-  protected int getDevicesNum(PartialPath path) throws MetadataException {
-    // make sure this node knows all storage groups
-    try {
-      metaGroupMember.syncLeaderWithConsistencyCheck(false);
-    } catch (CheckConsistencyException e) {
-      throw new MetadataException(e);
-    }
-    Map<String, String> sgPathMap = IoTDB.metaManager.determineStorageGroup(path);
-    if (sgPathMap.isEmpty()) {
-      throw new PathNotExistException(path.getFullPath());
-    }
-    logger.debug("The storage groups of path {} are {}", path, sgPathMap.keySet());
-    int ret;
-    try {
-      ret = getDeviceCount(sgPathMap, path);
-    } catch (CheckConsistencyException e) {
-      throw new MetadataException(e);
-    }
-    logger.debug("The number of devices satisfying {} is {}", path, ret);
-    return ret;
-  }
-
-  private int getDeviceCount(Map<String, String> sgPathMap, PartialPath queryPath)
-      throws CheckConsistencyException, MetadataException {
-    AtomicInteger result = new AtomicInteger();
-    // split the paths by the data group they belong to
-    Map<PartitionGroup, List<String>> groupPathMap = new HashMap<>();
-    for (String storageGroupName : sgPathMap.keySet()) {
-      PartialPath pathUnderSG = new PartialPath(storageGroupName);
-      // find the data group that should hold the device schemas of the storage group
-      PartitionGroup partitionGroup =
-          metaGroupMember.getPartitionTable().route(storageGroupName, 0);
-      PartialPath targetPath;
-      // If storage group node length is larger than the one of queryPath, we query the device count
-      // of the storage group directly
-      if (pathUnderSG.getNodeLength() >= queryPath.getNodeLength()) {
-        targetPath = pathUnderSG;
-      } else {
-        // Or we replace the prefix of queryPath with the storage group as the target queryPath
-        String[] targetNodes = new String[queryPath.getNodeLength()];
-        for (int i = 0; i < queryPath.getNodeLength(); i++) {
-          if (i < pathUnderSG.getNodeLength()) {
-            targetNodes[i] = pathUnderSG.getNodes()[i];
-          } else {
-            targetNodes[i] = queryPath.getNodes()[i];
-          }
-        }
-        targetPath = new PartialPath(targetNodes);
-      }
-      if (partitionGroup.contains(metaGroupMember.getThisNode())) {
-        // this node is a member of the group, perform a local query after synchronizing with the
-        // leader
-        metaGroupMember
-            .getLocalDataMember(partitionGroup.getHeader())
-            .syncLeaderWithConsistencyCheck(false);
-        int localResult = getLocalDeviceCount(targetPath);
-        logger.debug(
-            "{}: get device count of {} locally, result {}",
-            metaGroupMember.getName(),
-            partitionGroup,
-            localResult);
-        result.addAndGet(localResult);
-      } else {
-        // batch the queries of the same group to reduce communication
-        groupPathMap
-            .computeIfAbsent(partitionGroup, p -> new ArrayList<>())
-            .add(targetPath.getFullPath());
-      }
-    }
-    if (groupPathMap.isEmpty()) {
-      return result.get();
-    }
-
-    ExecutorService remoteQueryThreadPool = Executors.newFixedThreadPool(groupPathMap.size());
-    List<Future<Void>> remoteFutures = new ArrayList<>();
-    // query each data group separately
-    for (Entry<PartitionGroup, List<String>> partitionGroupPathEntry : groupPathMap.entrySet()) {
-      PartitionGroup partitionGroup = partitionGroupPathEntry.getKey();
-      List<String> pathsToQuery = partitionGroupPathEntry.getValue();
-      remoteFutures.add(
-          remoteQueryThreadPool.submit(
-              () -> {
-                try {
-                  result.addAndGet(getRemoteDeviceCount(partitionGroup, pathsToQuery));
-                } catch (MetadataException e) {
-                  logger.warn(
-                      "Cannot get remote device count of {} from {}",
-                      pathsToQuery,
-                      partitionGroup,
-                      e);
-                }
-                return null;
-              }));
-    }
-    waitForThreadPool(remoteFutures, remoteQueryThreadPool, "getDeviceCount()");
-
-    return result.get();
-  }
-
-  private int getLocalDeviceCount(PartialPath path) throws MetadataException {
-    return IoTDB.metaManager.getDevicesNum(path);
-  }
-
-  private int getRemoteDeviceCount(PartitionGroup partitionGroup, List<String> pathsToCount)
-      throws MetadataException {
-    // choose the node with lowest latency or highest throughput
-    List<Node> coordinatedNodes = QueryCoordinator.getINSTANCE().reorderNodes(partitionGroup);
-    for (Node node : coordinatedNodes) {
-      try {
-        Integer count;
-        if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) {
-          AsyncDataClient client =
-              metaGroupMember
-                  .getClientProvider()
-                  .getAsyncDataClient(node, RaftServer.getReadOperationTimeoutMS());
-          client.setTimeout(RaftServer.getReadOperationTimeoutMS());
-          count =
-              SyncClientAdaptor.getDeviceCount(client, partitionGroup.getHeader(), pathsToCount);
-        } else {
-          try (SyncDataClient syncDataClient =
-              metaGroupMember
-                  .getClientProvider()
-                  .getSyncDataClient(node, RaftServer.getReadOperationTimeoutMS())) {
-            try {
-              syncDataClient.setTimeout(RaftServer.getReadOperationTimeoutMS());
-              count = syncDataClient.getDeviceCount(partitionGroup.getHeader(), pathsToCount);
-            } catch (TException e) {
-              // the connection may be broken, close it to avoid it being reused
-              syncDataClient.getInputProtocol().getTransport().close();
-              throw e;
-            }
-          }
-        }
-        logger.debug(
-            "{}: get device count of {} from {}, result {}",
-            metaGroupMember.getName(),
-            partitionGroup,
-            node,
-            count);
-        if (count != null) {
-          return count;
-        }
-      } catch (IOException | TException e) {
-        throw new MetadataException(e);
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-        throw new MetadataException(e);
-      }
-    }
-    logger.warn("Cannot get devices count of {} from {}", pathsToCount, partitionGroup);
-    return 0;
-  }
-
-  @Override
   protected int getPathsNum(PartialPath path) throws MetadataException {
     return getNodesNumInGivenLevel(path, -1);
   }
@@ -294,7 +137,6 @@ public class ClusterPlanExecutor extends PlanExecutor {
     } catch (CheckConsistencyException e) {
       throw new MetadataException(e);
     }
-
     // get all storage groups this path may belong to
     // the key is the storage group name and the value is the path to be queried with storage group
     // added, e.g:
@@ -305,37 +147,9 @@ public class ClusterPlanExecutor extends PlanExecutor {
       throw new PathNotExistException(path.getFullPath());
     }
     logger.debug("The storage groups of path {} are {}", path, sgPathMap.keySet());
-    int ret = 0;
+    int ret;
     try {
-      // level >= 0 is the COUNT NODE query
-      if (level >= 0) {
-        int prefixPartIdx = 0;
-        for (; prefixPartIdx < path.getNodeLength(); prefixPartIdx++) {
-          if (path.getNodes()[prefixPartIdx].equals(IoTDBConstant.PATH_WILDCARD)) {
-            break;
-          }
-        }
-        // if level is less than the query path level, there's no suitable node
-        if (level < prefixPartIdx - 1) {
-          return 0;
-        }
-        Set<String> deletedSg = new HashSet<>();
-        Set<PartialPath> matchedPath = new HashSet<>(0);
-        for (String sg : sgPathMap.keySet()) {
-          PartialPath p = new PartialPath(sg);
-          // if the storage group path level is larger than the query level, then the prefix must be
-          // a suitable node and there's no need to query children nodes later
-          if (p.getNodeLength() - 1 >= level) {
-            deletedSg.add(sg);
-            matchedPath.add(new PartialPath(Arrays.copyOfRange(p.getNodes(), 0, level + 1)));
-          }
-        }
-        for (String sg : deletedSg) {
-          sgPathMap.remove(sg);
-        }
-        ret += matchedPath.size();
-      }
-      ret += getPathCount(sgPathMap, level);
+      ret = getPathCount(sgPathMap, level);
     } catch (CheckConsistencyException e) {
       throw new MetadataException(e);
     }
@@ -445,13 +259,8 @@ public class ClusterPlanExecutor extends PlanExecutor {
               metaGroupMember
                   .getClientProvider()
                   .getSyncDataClient(node, RaftServer.getReadOperationTimeoutMS())) {
-            try {
-              count = syncDataClient.getPathCount(partitionGroup.getHeader(), pathsToQuery, level);
-            } catch (TException e) {
-              // the connection may be broken, close it to avoid it being reused
-              syncDataClient.getInputProtocol().getTransport().close();
-              throw e;
-            }
+            syncDataClient.setTimeout(RaftServer.getReadOperationTimeoutMS());
+            count = syncDataClient.getPathCount(partitionGroup.getHeader(), pathsToQuery, level);
           }
         }
         logger.debug(
@@ -554,14 +363,8 @@ public class ClusterPlanExecutor extends PlanExecutor {
               metaGroupMember
                   .getClientProvider()
                   .getSyncDataClient(node, RaftServer.getReadOperationTimeoutMS())) {
-            try {
-              paths =
-                  syncDataClient.getNodeList(group.getHeader(), schemaPattern.getFullPath(), level);
-            } catch (TException e) {
-              // the connection may be broken, close it to avoid it being reused
-              syncDataClient.getInputProtocol().getTransport().close();
-              throw e;
-            }
+            paths =
+                syncDataClient.getNodeList(group.getHeader(), schemaPattern.getFullPath(), level);
           }
         }
         if (paths != null) {
@@ -646,14 +449,8 @@ public class ClusterPlanExecutor extends PlanExecutor {
               metaGroupMember
                   .getClientProvider()
                   .getSyncDataClient(node, RaftServer.getReadOperationTimeoutMS())) {
-            try {
-              nextChildrenNodes =
-                  syncDataClient.getChildNodeInNextLevel(group.getHeader(), path.getFullPath());
-            } catch (TException e) {
-              // the connection may be broken, close it to avoid it being reused
-              syncDataClient.getInputProtocol().getTransport().close();
-              throw e;
-            }
+            nextChildrenNodes =
+                syncDataClient.getChildNodeInNextLevel(group.getHeader(), path.getFullPath());
           }
         }
         if (nextChildrenNodes != null) {
@@ -761,14 +558,8 @@ public class ClusterPlanExecutor extends PlanExecutor {
               metaGroupMember
                   .getClientProvider()
                   .getSyncDataClient(node, RaftServer.getReadOperationTimeoutMS())) {
-            try {
-              nextChildren =
-                  syncDataClient.getChildNodePathInNextLevel(group.getHeader(), path.getFullPath());
-            } catch (TException e) {
-              // the connection may be broken, close it to avoid it being reused
-              syncDataClient.getInputProtocol().getTransport().close();
-              throw e;
-            }
+            nextChildren =
+                syncDataClient.getChildNodePathInNextLevel(group.getHeader(), path.getFullPath());
           }
         }
         if (nextChildren != null) {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterPlanRouter.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterPlanRouter.java
index e175f9e..acb0b77 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterPlanRouter.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterPlanRouter.java
@@ -32,7 +32,6 @@ import org.apache.iotdb.db.metadata.PartialPath;
 import org.apache.iotdb.db.qp.physical.PhysicalPlan;
 import org.apache.iotdb.db.qp.physical.crud.InsertMultiTabletPlan;
 import org.apache.iotdb.db.qp.physical.crud.InsertRowPlan;
-import org.apache.iotdb.db.qp.physical.crud.InsertRowsOfOneDevicePlan;
 import org.apache.iotdb.db.qp.physical.crud.InsertRowsPlan;
 import org.apache.iotdb.db.qp.physical.crud.InsertTabletPlan;
 import org.apache.iotdb.db.qp.physical.sys.AlterTimeSeriesPlan;
@@ -125,8 +124,6 @@ public class ClusterPlanRouter {
       return splitAndRoutePlan((CreateTimeSeriesPlan) plan);
     } else if (plan instanceof InsertRowPlan) {
       return splitAndRoutePlan((InsertRowPlan) plan);
-    } else if (plan instanceof InsertRowsOfOneDevicePlan) {
-      return splitAndRoutePlan((InsertRowsOfOneDevicePlan) plan);
     } else if (plan instanceof AlterTimeSeriesPlan) {
       return splitAndRoutePlan((AlterTimeSeriesPlan) plan);
     } else if (plan instanceof CreateMultiTimeSeriesPlan) {
@@ -467,37 +464,4 @@ public class ClusterPlanRouter {
     subPlan.setIndexes(new ArrayList<>());
     return subPlan;
   }
-
-  /**
-   * @param plan InsertRowsOfOneDevicePlan
-   * @return key is InsertRowsOfOneDevicePlan, value is the partition group the plan belongs to. All
-   *     InsertRowPlans in InsertRowsOfOneDevicePlan belong to one same storage group.
-   */
-  private Map<PhysicalPlan, PartitionGroup> splitAndRoutePlan(InsertRowsOfOneDevicePlan plan)
-      throws MetadataException {
-    Map<PhysicalPlan, PartitionGroup> result = new HashMap<>();
-    Map<PartitionGroup, List<InsertRowPlan>> groupPlanMap = new HashMap<>();
-    Map<PartitionGroup, List<Integer>> groupPlanIndexMap = new HashMap<>();
-    PartialPath storageGroup = getMManager().getStorageGroupPath(plan.getDeviceId());
-    for (int i = 0; i < plan.getRowPlans().length; i++) {
-      InsertRowPlan p = plan.getRowPlans()[i];
-      PartitionGroup group = partitionTable.route(storageGroup.getFullPath(), p.getTime());
-      List<InsertRowPlan> groupedPlans =
-          groupPlanMap.computeIfAbsent(group, k -> new ArrayList<>());
-      List<Integer> groupedPlanIndex =
-          groupPlanIndexMap.computeIfAbsent(group, k -> new ArrayList<>());
-      groupedPlans.add(p);
-      groupedPlanIndex.add(plan.getRowPlanIndexList()[i]);
-    }
-
-    for (Entry<PartitionGroup, List<InsertRowPlan>> entry : groupPlanMap.entrySet()) {
-      PhysicalPlan reducedPlan =
-          new InsertRowsOfOneDevicePlan(
-              plan.getDeviceId(),
-              entry.getValue().toArray(new InsertRowPlan[0]),
-              groupPlanIndexMap.get(entry.getKey()).stream().mapToInt(i -> i).toArray());
-      result.put(reducedPlan, entry.getKey());
-    }
-    return result;
-  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterPlanner.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterPlanner.java
index 90fd0be..ecbfb74 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterPlanner.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterPlanner.java
@@ -22,9 +22,11 @@ package org.apache.iotdb.cluster.query;
 import org.apache.iotdb.db.exception.query.QueryProcessException;
 import org.apache.iotdb.db.qp.Planner;
 import org.apache.iotdb.db.qp.logical.Operator;
+import org.apache.iotdb.db.qp.logical.crud.SFWOperator;
 import org.apache.iotdb.db.qp.physical.PhysicalPlan;
 import org.apache.iotdb.db.qp.strategy.PhysicalGenerator;
 import org.apache.iotdb.db.qp.strategy.optimizer.ConcatPathOptimizer;
+import org.apache.iotdb.db.query.control.QueryResourceManager;
 
 import java.time.ZoneId;
 
@@ -35,7 +37,16 @@ public class ClusterPlanner extends Planner {
   public PhysicalPlan parseSQLToPhysicalPlan(String sqlStr, ZoneId zoneId, int fetchSize)
       throws QueryProcessException {
     Operator operator = logicalGenerator.generate(sqlStr, zoneId);
-    operator = logicalOptimize(operator);
+    int maxDeduplicatedPathNum =
+        QueryResourceManager.getInstance().getMaxDeduplicatedPathNum(fetchSize);
+    if (operator instanceof SFWOperator && ((SFWOperator) operator).isLastQuery()) {
+      // Dataset of last query actually has only three columns, so we shouldn't limit the path num
+      // while constructing logical plan
+      // To avoid overflowing because logicalOptimize function may do maxDeduplicatedPathNum + 1, we
+      // set it to Integer.MAX_VALUE - 1
+      maxDeduplicatedPathNum = Integer.MAX_VALUE - 1;
+    }
+    operator = logicalOptimize(operator, maxDeduplicatedPathNum);
     PhysicalGenerator physicalGenerator = new ClusterPhysicalGenerator();
     return physicalGenerator.transformToPhysicalPlan(operator, fetchSize);
   }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterQueryRouter.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterQueryRouter.java
index e3be92c..d7edcbe 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterQueryRouter.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterQueryRouter.java
@@ -27,12 +27,11 @@ import org.apache.iotdb.cluster.query.last.ClusterLastQueryExecutor;
 import org.apache.iotdb.cluster.server.member.MetaGroupMember;
 import org.apache.iotdb.db.exception.StorageEngineException;
 import org.apache.iotdb.db.exception.query.QueryProcessException;
+import org.apache.iotdb.db.metadata.PartialPath;
 import org.apache.iotdb.db.qp.physical.crud.AggregationPlan;
-import org.apache.iotdb.db.qp.physical.crud.FillQueryPlan;
 import org.apache.iotdb.db.qp.physical.crud.GroupByTimePlan;
 import org.apache.iotdb.db.qp.physical.crud.LastQueryPlan;
 import org.apache.iotdb.db.qp.physical.crud.RawDataQueryPlan;
-import org.apache.iotdb.db.qp.physical.crud.UDTFPlan;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.dataset.groupby.GroupByWithValueFilterDataSet;
 import org.apache.iotdb.db.query.dataset.groupby.GroupByWithoutValueFilterDataSet;
@@ -41,14 +40,11 @@ import org.apache.iotdb.db.query.executor.FillQueryExecutor;
 import org.apache.iotdb.db.query.executor.LastQueryExecutor;
 import org.apache.iotdb.db.query.executor.QueryRouter;
 import org.apache.iotdb.db.query.executor.RawDataQueryExecutor;
-import org.apache.iotdb.tsfile.exception.filter.QueryFilterOptimizationException;
-import org.apache.iotdb.tsfile.read.expression.ExpressionType;
-import org.apache.iotdb.tsfile.read.expression.IExpression;
-import org.apache.iotdb.tsfile.read.expression.util.ExpressionOptimizer;
-import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
+import org.apache.iotdb.db.query.executor.fill.IFill;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 
-import java.io.IOException;
-import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
 
 public class ClusterQueryRouter extends QueryRouter {
 
@@ -59,8 +55,12 @@ public class ClusterQueryRouter extends QueryRouter {
   }
 
   @Override
-  protected FillQueryExecutor getFillExecutor(FillQueryPlan plan) {
-    return new ClusterFillExecutor(plan, metaGroupMember);
+  protected FillQueryExecutor getFillExecutor(
+      List<PartialPath> fillPaths,
+      List<TSDataType> dataTypes,
+      long queryTime,
+      Map<TSDataType, IFill> fillType) {
+    return new ClusterFillExecutor(fillPaths, dataTypes, queryTime, fillType, metaGroupMember);
   }
 
   @Override
@@ -91,36 +91,4 @@ public class ClusterQueryRouter extends QueryRouter {
   protected LastQueryExecutor getLastQueryExecutor(LastQueryPlan lastQueryPlan) {
     return new ClusterLastQueryExecutor(lastQueryPlan, metaGroupMember);
   }
-
-  @Override
-  public QueryDataSet udtfQuery(UDTFPlan udtfPlan, QueryContext context)
-      throws StorageEngineException, QueryProcessException, IOException, InterruptedException {
-    IExpression expression = udtfPlan.getExpression();
-    IExpression optimizedExpression;
-    try {
-      optimizedExpression =
-          expression == null
-              ? null
-              : ExpressionOptimizer.getInstance()
-                  .optimize(expression, new ArrayList<>(udtfPlan.getDeduplicatedPaths()));
-    } catch (QueryFilterOptimizationException e) {
-      throw new StorageEngineException(e.getMessage());
-    }
-    udtfPlan.setExpression(optimizedExpression);
-
-    boolean withValueFilter =
-        optimizedExpression != null && optimizedExpression.getType() != ExpressionType.GLOBAL_TIME;
-    ClusterUDTFQueryExecutor clusterUDTFQueryExecutor =
-        new ClusterUDTFQueryExecutor(udtfPlan, metaGroupMember);
-
-    if (udtfPlan.isAlignByTime()) {
-      return withValueFilter
-          ? clusterUDTFQueryExecutor.executeWithValueFilterAlignByTime(context)
-          : clusterUDTFQueryExecutor.executeWithoutValueFilterAlignByTime(context);
-    } else {
-      return withValueFilter
-          ? clusterUDTFQueryExecutor.executeWithValueFilterNonAlign(context)
-          : clusterUDTFQueryExecutor.executeWithoutValueFilterNonAlign(context);
-    }
-  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterUDTFQueryExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterUDTFQueryExecutor.java
deleted file mode 100644
index 764437e..0000000
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterUDTFQueryExecutor.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.cluster.query;
-
-import org.apache.iotdb.cluster.server.member.MetaGroupMember;
-import org.apache.iotdb.db.exception.StorageEngineException;
-import org.apache.iotdb.db.exception.query.QueryProcessException;
-import org.apache.iotdb.db.qp.physical.crud.UDTFPlan;
-import org.apache.iotdb.db.query.context.QueryContext;
-import org.apache.iotdb.db.query.dataset.UDTFAlignByTimeDataSet;
-import org.apache.iotdb.db.query.dataset.UDTFNonAlignDataSet;
-import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp;
-import org.apache.iotdb.db.query.reader.series.ManagedSeriesReader;
-import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
-import org.apache.iotdb.tsfile.read.query.timegenerator.TimeGenerator;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import static org.apache.iotdb.tsfile.read.query.executor.ExecutorWithTimeGenerator.markFilterdPaths;
-
-public class ClusterUDTFQueryExecutor extends ClusterDataQueryExecutor {
-  protected final UDTFPlan udtfPlan;
-  protected final MetaGroupMember metaGroupMember;
-
-  public ClusterUDTFQueryExecutor(UDTFPlan udtfPlan, MetaGroupMember metaGroupMember) {
-    super(udtfPlan, metaGroupMember);
-    this.udtfPlan = udtfPlan;
-    this.metaGroupMember = metaGroupMember;
-  }
-
-  public QueryDataSet executeWithoutValueFilterAlignByTime(QueryContext context)
-      throws StorageEngineException, QueryProcessException, IOException, InterruptedException {
-    List<ManagedSeriesReader> readersOfSelectedSeries = initManagedSeriesReader(context);
-    return new UDTFAlignByTimeDataSet(
-        context,
-        udtfPlan,
-        udtfPlan.getDeduplicatedPaths(),
-        udtfPlan.getDeduplicatedDataTypes(),
-        readersOfSelectedSeries);
-  }
-
-  public QueryDataSet executeWithValueFilterAlignByTime(QueryContext context)
-      throws StorageEngineException, QueryProcessException, IOException {
-    TimeGenerator timestampGenerator = getTimeGenerator(context, udtfPlan);
-    List<Boolean> cached =
-        markFilterdPaths(
-            udtfPlan.getExpression(),
-            new ArrayList<>(udtfPlan.getDeduplicatedPaths()),
-            timestampGenerator.hasOrNode());
-    List<IReaderByTimestamp> readersOfSelectedSeries =
-        initSeriesReaderByTimestamp(context, udtfPlan, cached);
-    return new UDTFAlignByTimeDataSet(
-        context,
-        udtfPlan,
-        udtfPlan.getDeduplicatedPaths(),
-        udtfPlan.getDeduplicatedDataTypes(),
-        timestampGenerator,
-        readersOfSelectedSeries,
-        cached);
-  }
-
-  public QueryDataSet executeWithoutValueFilterNonAlign(QueryContext context)
-      throws QueryProcessException, StorageEngineException, IOException, InterruptedException {
-    List<ManagedSeriesReader> readersOfSelectedSeries = initManagedSeriesReader(context);
-    return new UDTFNonAlignDataSet(
-        context,
-        udtfPlan,
-        udtfPlan.getDeduplicatedPaths(),
-        udtfPlan.getDeduplicatedDataTypes(),
-        readersOfSelectedSeries);
-  }
-
-  public QueryDataSet executeWithValueFilterNonAlign(QueryContext context)
-      throws QueryProcessException, StorageEngineException, IOException {
-    TimeGenerator timestampGenerator = getTimeGenerator(context, udtfPlan);
-    List<Boolean> cached =
-        markFilterdPaths(
-            udtfPlan.getExpression(),
-            new ArrayList<>(udtfPlan.getDeduplicatedPaths()),
-            timestampGenerator.hasOrNode());
-    List<IReaderByTimestamp> readersOfSelectedSeries =
-        initSeriesReaderByTimestamp(context, udtfPlan, cached);
-    return new UDTFNonAlignDataSet(
-        context,
-        udtfPlan,
-        udtfPlan.getDeduplicatedPaths(),
-        udtfPlan.getDeduplicatedDataTypes(),
-        timestampGenerator,
-        readersOfSelectedSeries,
-        cached);
-  }
-}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/LocalQueryExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/LocalQueryExecutor.java
index a2fc36a..f078519 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/LocalQueryExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/LocalQueryExecutor.java
@@ -86,10 +86,11 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import static org.apache.iotdb.session.Config.DEFAULT_FETCH_SIZE;
+
 public class LocalQueryExecutor {
 
   private static final Logger logger = LoggerFactory.getLogger(LocalQueryExecutor.class);
-  public static final String DEBUG_SHOW_QUERY_ID = "{}: local queryId for {}#{} is {}";
   private DataGroupMember dataGroupMember;
   private ClusterReaderFactory readerFactory;
   private String name;
@@ -171,7 +172,7 @@ public class LocalQueryExecutor {
     Map<String, ByteBuffer> pathByteBuffers = Maps.newHashMap();
 
     for (String path : paths) {
-      ByteBuffer byteBuffer;
+      ByteBuffer byteBuffer = null;
       if (reader.hasNextBatch(path)) {
         BatchData batchData = reader.nextBatch(path);
 
@@ -229,9 +230,13 @@ public class LocalQueryExecutor {
 
     // the same query from a requester correspond to a context here
     RemoteQueryContext queryContext =
-        queryManager.getQueryContext(request.getRequester(), request.getQueryId());
+        queryManager.getQueryContext(
+            request.getRequester(),
+            request.getQueryId(),
+            request.getFetchSize(),
+            request.getDeduplicatedPathNum());
     logger.debug(
-        DEBUG_SHOW_QUERY_ID,
+        "{}: local queryId for {}#{} is {}",
         name,
         request.getQueryId(),
         request.getPath(),
@@ -305,7 +310,12 @@ public class LocalQueryExecutor {
             });
 
     List<TSDataType> dataTypes = Lists.newArrayList();
-    request.getDataTypeOrdinal().forEach(dataType -> dataTypes.add(TSDataType.values()[dataType]));
+    request
+        .getDataTypeOrdinal()
+        .forEach(
+            dataType -> {
+              dataTypes.add(TSDataType.values()[dataType]);
+            });
 
     Filter timeFilter = null;
     Filter valueFilter = null;
@@ -319,9 +329,13 @@ public class LocalQueryExecutor {
 
     // the same query from a requester correspond to a context here
     RemoteQueryContext queryContext =
-        queryManager.getQueryContext(request.getRequester(), request.getQueryId());
+        queryManager.getQueryContext(
+            request.getRequester(),
+            request.getQueryId(),
+            request.getFetchSize(),
+            request.getDeduplicatedPathNum());
     logger.debug(
-        DEBUG_SHOW_QUERY_ID,
+        "{}: local queryId for {}#{} is {}",
         name,
         request.getQueryId(),
         request.getPath(),
@@ -482,9 +496,13 @@ public class LocalQueryExecutor {
     Set<String> deviceMeasurements = request.getDeviceMeasurements();
 
     RemoteQueryContext queryContext =
-        queryManager.getQueryContext(request.getRequester(), request.getQueryId());
+        queryManager.getQueryContext(
+            request.getRequester(),
+            request.getQueryId(),
+            request.getFetchSize(),
+            request.getDeduplicatedPathNum());
     logger.debug(
-        DEBUG_SHOW_QUERY_ID,
+        "{}: local queryId for {}#{} is {}",
         name,
         request.getQueryId(),
         request.getPath(),
@@ -570,7 +588,8 @@ public class LocalQueryExecutor {
       timeFilter = FilterFactory.deserialize(request.timeFilterBytes);
     }
     RemoteQueryContext queryContext =
-        queryManager.getQueryContext(request.getRequestor(), request.queryId);
+        queryManager.getQueryContext(
+            request.getRequestor(), request.queryId, DEFAULT_FETCH_SIZE, -1);
     Set<String> deviceMeasurements = request.getDeviceMeasurements();
     boolean ascending = request.ascending;
 
@@ -627,35 +646,37 @@ public class LocalQueryExecutor {
 
     ClusterQueryUtils.checkPathExistence(path);
     List<AggregateResult> results = new ArrayList<>();
-    List<AggregateResult> ascResults = new ArrayList<>();
-    List<AggregateResult> descResults = new ArrayList<>();
     for (String aggregation : aggregations) {
-      AggregateResult ar =
-          AggregateResultFactory.getAggrResultByName(aggregation, dataType, ascending);
-      if (ar.isAscending()) {
-        ascResults.add(ar);
-      } else {
-        descResults.add(ar);
-      }
-      results.add(ar);
+      results.add(AggregateResultFactory.getAggrResultByName(aggregation, dataType));
     }
     List<Integer> nodeSlots =
         ((SlotPartitionTable) dataGroupMember.getMetaGroupMember().getPartitionTable())
             .getNodeSlots(dataGroupMember.getHeader());
     try {
-      AggregationExecutor.aggregateOneSeries(
-          new PartialPath(path),
-          allSensors,
-          context,
-          timeFilter,
-          dataType,
-          ascResults,
-          descResults,
-          new SlotTsFileFilter(nodeSlots));
+      if (ascending) {
+        AggregationExecutor.aggregateOneSeries(
+            new PartialPath(path),
+            allSensors,
+            context,
+            timeFilter,
+            dataType,
+            results,
+            null,
+            new SlotTsFileFilter(nodeSlots));
+      } else {
+        AggregationExecutor.aggregateOneSeries(
+            new PartialPath(path),
+            allSensors,
+            context,
+            timeFilter,
+            dataType,
+            null,
+            results,
+            new SlotTsFileFilter(nodeSlots));
+      }
     } catch (IllegalPathException e) {
       // ignore
     }
-
     return results;
   }
 
@@ -763,7 +784,8 @@ public class LocalQueryExecutor {
     Set<String> deviceMeasurements = request.getDeviceMeasurements();
     boolean ascending = request.ascending;
 
-    RemoteQueryContext queryContext = queryManager.getQueryContext(request.getRequestor(), queryId);
+    RemoteQueryContext queryContext =
+        queryManager.getQueryContext(request.getRequestor(), queryId, DEFAULT_FETCH_SIZE, -1);
     LocalGroupByExecutor executor =
         getGroupByExecutor(
             path,
@@ -821,10 +843,6 @@ public class LocalQueryExecutor {
     return resultBuffers;
   }
 
-  /**
-   * returns a non-nul ByteBuffer as thrift response, which not allows null objects. If the
-   * ByteBuffer data equals <0, null>, it means that the NextNotNullValue is null.
-   */
   public ByteBuffer peekNextNotNullValue(long executorId, long startTime, long endTime)
       throws ReaderNotFoundException, IOException {
     GroupByExecutor executor = queryManager.getGroupByExecutor(executorId);
@@ -832,9 +850,6 @@ public class LocalQueryExecutor {
       throw new ReaderNotFoundException(executorId);
     }
     Pair<Long, Object> pair = executor.peekNextNotNullValue(startTime, endTime);
-    if (pair == null) {
-      pair = new Pair<>(0L, null);
-    }
     ByteBuffer resultBuffer;
     ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
     try (DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream)) {
@@ -856,7 +871,8 @@ public class LocalQueryExecutor {
     long beforeRange = request.getBeforeRange();
     Node requester = request.getRequester();
     Set<String> deviceMeasurements = request.getDeviceMeasurements();
-    RemoteQueryContext queryContext = queryManager.getQueryContext(requester, queryId);
+    RemoteQueryContext queryContext =
+        queryManager.getQueryContext(requester, queryId, DEFAULT_FETCH_SIZE, -1);
 
     ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
     DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream);
@@ -914,17 +930,6 @@ public class LocalQueryExecutor {
     return count;
   }
 
-  public int getDeviceCount(List<String> pathsToQuery)
-      throws CheckConsistencyException, MetadataException {
-    dataGroupMember.syncLeaderWithConsistencyCheck(false);
-
-    int count = 0;
-    for (String s : pathsToQuery) {
-      count += getCMManager().getDevicesNum(new PartialPath(s));
-    }
-    return count;
-  }
-
   @SuppressWarnings("java:S1135") // ignore todos
   public ByteBuffer last(LastQueryRequest request)
       throws CheckConsistencyException, QueryProcessException, IOException, StorageEngineException,
@@ -932,7 +937,8 @@ public class LocalQueryExecutor {
     dataGroupMember.syncLeaderWithConsistencyCheck(false);
 
     RemoteQueryContext queryContext =
-        queryManager.getQueryContext(request.getRequestor(), request.getQueryId());
+        queryManager.getQueryContext(
+            request.getRequestor(), request.getQueryId(), DEFAULT_FETCH_SIZE, -1);
     List<PartialPath> partialPaths = new ArrayList<>();
     for (String path : request.getPaths()) {
       partialPaths.add(new PartialPath(path));
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/aggregate/ClusterAggregator.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/aggregate/ClusterAggregator.java
index dc52923..6c33f50 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/aggregate/ClusterAggregator.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/aggregate/ClusterAggregator.java
@@ -274,13 +274,8 @@ public class ClusterAggregator {
           metaGroupMember
               .getClientProvider()
               .getSyncDataClient(node, RaftServer.getReadOperationTimeoutMS())) {
-        try {
-          resultBuffers = syncDataClient.getAggrResult(request);
-        } catch (TException e) {
-          // the connection may be broken, close it to avoid it being reused
-          syncDataClient.getInputProtocol().getTransport().close();
-          throw e;
-        }
+
+        resultBuffers = syncDataClient.getAggrResult(request);
       }
     }
     return resultBuffers;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/fill/ClusterFillExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/fill/ClusterFillExecutor.java
index e2c43282..f6cd707 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/fill/ClusterFillExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/fill/ClusterFillExecutor.java
@@ -19,36 +19,31 @@
 
 package org.apache.iotdb.cluster.query.fill;
 
-import org.apache.iotdb.cluster.query.reader.ClusterReaderFactory;
 import org.apache.iotdb.cluster.server.member.MetaGroupMember;
-import org.apache.iotdb.db.exception.StorageEngineException;
-import org.apache.iotdb.db.exception.query.QueryProcessException;
 import org.apache.iotdb.db.metadata.PartialPath;
-import org.apache.iotdb.db.qp.physical.crud.FillQueryPlan;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.executor.FillQueryExecutor;
 import org.apache.iotdb.db.query.executor.fill.IFill;
 import org.apache.iotdb.db.query.executor.fill.LinearFill;
 import org.apache.iotdb.db.query.executor.fill.PreviousFill;
-import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
-import org.apache.iotdb.tsfile.read.TimeValuePair;
-import org.apache.iotdb.tsfile.utils.TsPrimitiveType;
 
-import java.io.IOException;
-import java.util.ArrayList;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 public class ClusterFillExecutor extends FillQueryExecutor {
 
   private MetaGroupMember metaGroupMember;
-  private ClusterReaderFactory clusterReaderFactory;
 
-  public ClusterFillExecutor(FillQueryPlan plan, MetaGroupMember metaGroupMember) {
-    super(plan);
+  public ClusterFillExecutor(
+      List<PartialPath> selectedSeries,
+      List<TSDataType> dataTypes,
+      long queryTime,
+      Map<TSDataType, IFill> typeIFillMap,
+      MetaGroupMember metaGroupMember) {
+    super(selectedSeries, dataTypes, queryTime, typeIFillMap);
     this.metaGroupMember = metaGroupMember;
-    this.clusterReaderFactory = new ClusterReaderFactory(metaGroupMember);
   }
 
   @Override
@@ -58,8 +53,7 @@ public class ClusterFillExecutor extends FillQueryExecutor {
       TSDataType dataType,
       long queryTime,
       Set<String> deviceMeasurements,
-      QueryContext context)
-      throws QueryProcessException, StorageEngineException {
+      QueryContext context) {
     if (fill instanceof LinearFill) {
       IFill clusterFill = new ClusterLinearFill((LinearFill) fill, metaGroupMember);
       clusterFill.configureFill(path, dataType, queryTime, deviceMeasurements, context);
@@ -68,36 +62,7 @@ public class ClusterFillExecutor extends FillQueryExecutor {
       IFill clusterFill = new ClusterPreviousFill((PreviousFill) fill, metaGroupMember);
       clusterFill.configureFill(path, dataType, queryTime, deviceMeasurements, context);
       return clusterFill;
-    } else {
-      fill.configureFill(path, dataType, queryTime, deviceMeasurements, context);
-      return fill;
-    }
-  }
-
-  @Override
-  protected List<TimeValuePair> getTimeValuePairs(QueryContext context)
-      throws QueryProcessException, StorageEngineException, IOException {
-    List<TimeValuePair> ret = new ArrayList<>(selectedSeries.size());
-
-    for (int i = 0; i < selectedSeries.size(); i++) {
-      PartialPath path = selectedSeries.get(i);
-      TSDataType dataType = dataTypes.get(i);
-      IReaderByTimestamp reader =
-          clusterReaderFactory.getReaderByTimestamp(
-              path,
-              plan.getAllMeasurementsInDevice(path.getDevice()),
-              dataTypes.get(i),
-              context,
-              plan.isAscending());
-
-      Object[] results = reader.getValuesInTimestamps(new long[] {queryTime}, 1);
-      if (results != null && results[0] != null) {
-        ret.add(new TimeValuePair(queryTime, TsPrimitiveType.getByType(dataType, results[0])));
-      } else {
-        ret.add(null);
-      }
     }
-
-    return ret;
+    return null;
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/fill/ClusterLinearFill.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/fill/ClusterLinearFill.java
index 34b2706..ed1cd7a 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/fill/ClusterLinearFill.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/fill/ClusterLinearFill.java
@@ -22,7 +22,6 @@ package org.apache.iotdb.cluster.query.fill;
 import org.apache.iotdb.cluster.query.aggregate.ClusterAggregator;
 import org.apache.iotdb.cluster.server.member.MetaGroupMember;
 import org.apache.iotdb.db.exception.StorageEngineException;
-import org.apache.iotdb.db.exception.query.QueryProcessException;
 import org.apache.iotdb.db.qp.constant.SQLConstant;
 import org.apache.iotdb.db.query.aggregation.AggregateResult;
 import org.apache.iotdb.db.query.executor.fill.LinearFill;
@@ -49,8 +48,7 @@ public class ClusterLinearFill extends LinearFill {
   }
 
   @Override
-  protected TimeValuePair calculatePrecedingPoint()
-      throws QueryProcessException, StorageEngineException {
+  protected TimeValuePair calculatePrecedingPoint() {
     // calculate the preceding point can be viewed as a previous fill
     ClusterPreviousFill clusterPreviousFill =
         new ClusterPreviousFill(dataType, queryTime, beforeRange, metaGroupMember);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/fill/ClusterPreviousFill.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/fill/ClusterPreviousFill.java
index b6f9543..33274e3 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/fill/ClusterPreviousFill.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/fill/ClusterPreviousFill.java
@@ -32,7 +32,6 @@ import org.apache.iotdb.cluster.server.RaftServer;
 import org.apache.iotdb.cluster.server.handlers.caller.PreviousFillHandler;
 import org.apache.iotdb.cluster.server.member.DataGroupMember;
 import org.apache.iotdb.cluster.server.member.MetaGroupMember;
-import org.apache.iotdb.cluster.utils.ClientUtils;
 import org.apache.iotdb.cluster.utils.PartitionUtils.Intervals;
 import org.apache.iotdb.db.exception.StorageEngineException;
 import org.apache.iotdb.db.exception.query.QueryProcessException;
@@ -42,7 +41,6 @@ import org.apache.iotdb.db.query.executor.fill.PreviousFill;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.TimeValuePair;
 
-import org.apache.thrift.TException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -78,11 +76,14 @@ public class ClusterPreviousFill extends PreviousFill {
       TSDataType dataType,
       long queryTime,
       Set<String> deviceMeasurements,
-      QueryContext context)
-      throws QueryProcessException, StorageEngineException {
-    fillResult =
-        performPreviousFill(
-            path, dataType, queryTime, getBeforeRange(), deviceMeasurements, context);
+      QueryContext context) {
+    try {
+      fillResult =
+          performPreviousFill(
+              path, dataType, queryTime, getBeforeRange(), deviceMeasurements, context);
+    } catch (StorageEngineException e) {
+      logger.error("Failed to configure previous fill for Path {}", path, e);
+    }
   }
 
   @Override
@@ -97,7 +98,7 @@ public class ClusterPreviousFill extends PreviousFill {
       long beforeRange,
       Set<String> deviceMeasurements,
       QueryContext context)
-      throws StorageEngineException, QueryProcessException {
+      throws StorageEngineException {
     // make sure the partition table is new
     try {
       metaGroupMember.syncLeaderWithConsistencyCheck(false);
@@ -128,14 +129,10 @@ public class ClusterPreviousFill extends PreviousFill {
     }
     fillService.shutdown();
     try {
-      boolean terminated =
-          fillService.awaitTermination(
-              RaftServer.getReadOperationTimeoutMS(), TimeUnit.MILLISECONDS);
-      if (!terminated) {
-        logger.warn("Executor service termination timed out");
-      }
+      fillService.awaitTermination(RaftServer.getReadOperationTimeoutMS(), TimeUnit.MILLISECONDS);
     } catch (InterruptedException e) {
-      throw new QueryProcessException(e.getMessage());
+      Thread.currentThread().interrupt();
+      logger.error("Unexpected interruption when waiting for fill pool to stop", e);
     }
     return handler.getResult();
   }
@@ -243,28 +240,19 @@ public class ClusterPreviousFill extends PreviousFill {
   private ByteBuffer remoteSyncPreviousFill(
       Node node, PreviousFillRequest request, PreviousFillArguments arguments) {
     ByteBuffer byteBuffer = null;
-    SyncDataClient client = null;
-    try {
-      client =
-          metaGroupMember
-              .getClientProvider()
-              .getSyncDataClient(node, RaftServer.getReadOperationTimeoutMS());
-      byteBuffer = client.previousFill(request);
-    } catch (IOException e) {
-      logger.warn("{}: Cannot connect to {} during previous fill", metaGroupMember, node);
-    } catch (TException e) {
+    try (SyncDataClient syncDataClient =
+        metaGroupMember
+            .getClientProvider()
+            .getSyncDataClient(node, RaftServer.getReadOperationTimeoutMS())) {
+
+      byteBuffer = syncDataClient.previousFill(request);
+    } catch (Exception e) {
       logger.error(
           "{}: Cannot perform previous fill of {} to {}",
           metaGroupMember.getName(),
           arguments.getPath(),
           node,
           e);
-      // the connection may be broken, close it to avoid it being reused
-      client.getInputProtocol().getTransport().close();
-    } finally {
-      if (client != null) {
-        ClientUtils.putBackSyncClient(client);
-      }
     }
     return byteBuffer;
   }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/filter/SlotTsFileFilter.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/filter/SlotTsFileFilter.java
index 3633158..1fef19e 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/filter/SlotTsFileFilter.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/filter/SlotTsFileFilter.java
@@ -23,7 +23,7 @@ import org.apache.iotdb.cluster.config.ClusterConstant;
 import org.apache.iotdb.cluster.partition.slot.SlotPartitionTable;
 import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
 import org.apache.iotdb.db.query.filter.TsFileFilter;
-import org.apache.iotdb.tsfile.utils.FilePathUtils;
+import org.apache.iotdb.db.utils.FilePathUtils;
 import org.apache.iotdb.tsfile.utils.Pair;
 
 import org.slf4j.Logger;
@@ -47,8 +47,7 @@ public class SlotTsFileFilter implements TsFileFilter {
 
   private static boolean fileNotInSlots(TsFileResource resource, List<Integer> nodeSlots) {
     Pair<String, Long> sgNameAndPartitionIdPair =
-        FilePathUtils.getLogicalSgNameAndTimePartitionIdPair(
-            resource.getTsFile().getAbsolutePath());
+        FilePathUtils.getLogicalSgNameAndTimePartitionIdPair(resource);
     int slot =
         SlotPartitionTable.getSlotStrategy()
             .calculateSlotByPartitionNum(
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/groupby/RemoteGroupByExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/groupby/RemoteGroupByExecutor.java
index 3b01e54..02df747 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/groupby/RemoteGroupByExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/groupby/RemoteGroupByExecutor.java
@@ -89,14 +89,8 @@ public class RemoteGroupByExecutor implements GroupByExecutor {
                 .getClientProvider()
                 .getSyncDataClient(source, RaftServer.getReadOperationTimeoutMS())) {
 
-          try {
-            aggrBuffers =
-                syncDataClient.getGroupByResult(header, executorId, curStartTime, curEndTime);
-          } catch (TException e) {
-            // the connection may be broken, close it to avoid it being reused
-            syncDataClient.getInputProtocol().getTransport().close();
-            throw e;
-          }
+          aggrBuffers =
+              syncDataClient.getGroupByResult(header, executorId, curStartTime, curEndTime);
         }
       }
     } catch (TException e) {
@@ -139,14 +133,9 @@ public class RemoteGroupByExecutor implements GroupByExecutor {
             metaGroupMember
                 .getClientProvider()
                 .getSyncDataClient(source, RaftServer.getReadOperationTimeoutMS())) {
-          try {
-            aggrBuffer =
-                syncDataClient.peekNextNotNullValue(header, executorId, nextStartTime, nextEndTime);
-          } catch (TException e) {
-            // the connection may be broken, close it to avoid it being reused
-            syncDataClient.getInputProtocol().getTransport().close();
-            throw e;
-          }
+
+          aggrBuffer =
+              syncDataClient.peekNextNotNullValue(header, executorId, nextStartTime, nextEndTime);
         }
       }
     } catch (TException e) {
@@ -160,9 +149,7 @@ public class RemoteGroupByExecutor implements GroupByExecutor {
     if (aggrBuffer != null) {
       long time = aggrBuffer.getLong();
       Object o = SerializeUtils.deserializeObject(aggrBuffer);
-      if (o != null) {
-        result = new Pair<>(time, o);
-      }
+      result = new Pair<>(time, o);
     }
     logger.debug(
         "Fetched peekNextNotNullValue from {} of [{}, {}]: {}",
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/last/ClusterLastQueryExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/last/ClusterLastQueryExecutor.java
index 2982cc4..d5ec324 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/last/ClusterLastQueryExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/last/ClusterLastQueryExecutor.java
@@ -43,8 +43,6 @@ import org.apache.iotdb.rpc.TSStatusCode;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.TimeValuePair;
 import org.apache.iotdb.tsfile.read.expression.IExpression;
-import org.apache.iotdb.tsfile.read.expression.impl.GlobalTimeExpression;
-import org.apache.iotdb.tsfile.read.filter.basic.Filter;
 import org.apache.iotdb.tsfile.utils.Pair;
 
 import org.apache.thrift.TException;
@@ -76,7 +74,7 @@ public class ClusterLastQueryExecutor extends LastQueryExecutor {
   }
 
   @Override
-  public List<Pair<Boolean, TimeValuePair>> calculateLastPairForSeries(
+  protected List<Pair<Boolean, TimeValuePair>> calculateLastPairForSeries(
       List<PartialPath> seriesPaths,
       List<TSDataType> dataTypes,
       QueryContext context,
@@ -246,17 +244,13 @@ public class ClusterLastQueryExecutor extends LastQueryExecutor {
                 .getClientProvider()
                 .getAsyncDataClient(node, RaftServer.getReadOperationTimeoutMS());
       } catch (IOException e) {
-        logger.warn("can not get client for node= {}", node);
         return null;
       }
-      Filter timeFilter =
-          (expression == null) ? null : ((GlobalTimeExpression) expression).getFilter();
       buffer =
           SyncClientAdaptor.last(
               asyncDataClient,
               seriesPaths,
               dataTypeOrdinals,
-              timeFilter,
               context,
               queryPlan.getDeviceToMeasurements(),
               group.getHeader());
@@ -264,27 +258,19 @@ public class ClusterLastQueryExecutor extends LastQueryExecutor {
     }
 
     private ByteBuffer lastSync(Node node, QueryContext context) throws TException {
-      try (SyncDataClient client =
+      try (SyncDataClient syncDataClient =
           metaGroupMember
               .getClientProvider()
               .getSyncDataClient(node, RaftServer.getReadOperationTimeoutMS())) {
-        LastQueryRequest lastQueryRequest =
+
+        return syncDataClient.last(
             new LastQueryRequest(
                 PartialPath.toStringList(seriesPaths),
                 dataTypeOrdinals,
                 context.getQueryId(),
                 queryPlan.getDeviceToMeasurements(),
                 group.getHeader(),
-                client.getNode());
-        Filter timeFilter =
-            (expression == null) ? null : ((GlobalTimeExpression) expression).getFilter();
-        if (timeFilter != null) {
-          lastQueryRequest.setFilterBytes(SerializeUtils.serializeFilter(timeFilter));
-        }
-        return client.last(lastQueryRequest);
-      } catch (IOException e) {
-        logger.warn("can not get client for node= {}", node);
-        return null;
+                syncDataClient.getNode()));
       }
     }
   }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manage/ClusterQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manage/ClusterQueryManager.java
index f3376ba..455e7fb 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manage/ClusterQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manage/ClusterQueryManager.java
@@ -43,12 +43,16 @@ public class ClusterQueryManager {
   private Map<Long, IAggregateReader> aggrReaderMap = new ConcurrentHashMap<>();
   private Map<Long, GroupByExecutor> groupByExecutorMap = new ConcurrentHashMap<>();
 
-  public synchronized RemoteQueryContext getQueryContext(Node node, long queryId) {
+  public synchronized RemoteQueryContext getQueryContext(
+      Node node, long queryId, int fetchSize, int deduplicatedPathNum) {
     Map<Long, RemoteQueryContext> nodeContextMap =
         queryContextMap.computeIfAbsent(node, n -> new HashMap<>());
     return nodeContextMap.computeIfAbsent(
         queryId,
-        qId -> new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)));
+        qId ->
+            new RemoteQueryContext(
+                QueryResourceManager.getInstance()
+                    .assignQueryId(true, fetchSize, deduplicatedPathNum)));
   }
 
   public long registerReader(IBatchReader reader) {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ClusterReaderFactory.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ClusterReaderFactory.java
index a164974..23603b7 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ClusterReaderFactory.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ClusterReaderFactory.java
@@ -62,7 +62,6 @@ import org.apache.iotdb.db.query.reader.series.SeriesRawDataBatchReader;
 import org.apache.iotdb.db.query.reader.series.SeriesRawDataPointReader;
 import org.apache.iotdb.db.query.reader.series.SeriesReader;
 import org.apache.iotdb.db.query.reader.series.SeriesReaderByTimestamp;
-import org.apache.iotdb.db.query.reader.universal.PriorityMergeReader;
 import org.apache.iotdb.db.utils.SerializeUtils;
 import org.apache.iotdb.rpc.TSStatusCode;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
@@ -237,10 +236,11 @@ public class ClusterReaderFactory {
     for (PartialPath partialPath : paths) {
       List<PartitionGroup> partitionGroups = metaGroupMember.routeFilter(timeFilter, partialPath);
       partitionGroups.forEach(
-          partitionGroup ->
-              partitionGroupListMap
-                  .computeIfAbsent(partitionGroup, n -> new ArrayList<>())
-                  .add(partialPath));
+          partitionGroup -> {
+            partitionGroupListMap
+                .computeIfAbsent(partitionGroup, n -> new ArrayList<>())
+                .add(partialPath);
+          });
     }
 
     List<AbstractMultPointReader> multPointReaders = Lists.newArrayList();
@@ -365,12 +365,7 @@ public class ClusterReaderFactory {
         metaGroupMember.getName(),
         path,
         partitionGroups.size());
-    PriorityMergeReader mergeReader;
-    if (ascending) {
-      mergeReader = new ManagedPriorityMergeReader(dataType);
-    } else {
-      mergeReader = new ManagedDescPriorityMergeReader(dataType);
-    }
+    ManagedMergeReader mergeReader = new ManagedMergeReader(dataType);
     try {
       // build a reader for each group and merge them
       for (PartitionGroup partitionGroup : partitionGroups) {
@@ -389,9 +384,7 @@ public class ClusterReaderFactory {
     } catch (IOException | QueryProcessException e) {
       throw new StorageEngineException(e);
     }
-    // The instance of merge reader is either ManagedPriorityMergeReader or
-    // ManagedDescPriorityMergeReader, which is safe to cast type.
-    return (ManagedSeriesReader) mergeReader;
+    return mergeReader;
   }
 
   /**
@@ -520,7 +513,6 @@ public class ClusterReaderFactory {
         ((SlotPartitionTable) metaGroupMember.getPartitionTable()).getNodeSlots(header);
     QueryDataSource queryDataSource =
         QueryResourceManager.getInstance().getQueryDataSource(path, context, timeFilter);
-    valueFilter = queryDataSource.updateFilterUsingTTL(valueFilter);
     return new SeriesReader(
         path,
         allSensors,
@@ -592,7 +584,7 @@ public class ClusterReaderFactory {
       return new MultEmptyReader(fullPaths);
     }
     throw new StorageEngineException(
-        new RequestTimeOutException("Query multi-series: " + paths + " in " + partitionGroup));
+        new RequestTimeOutException("Query " + paths + " in " + partitionGroup));
   }
 
   /**
@@ -674,7 +666,10 @@ public class ClusterReaderFactory {
         });
 
     List<Integer> dataTypeOrdinals = Lists.newArrayList();
-    dataTypes.forEach(dataType -> dataTypeOrdinals.add(dataType.ordinal()));
+    dataTypes.forEach(
+        dataType -> {
+          dataTypeOrdinals.add(dataType.ordinal());
+        });
 
     request.setPath(fullPaths);
     request.setHeader(partitionGroup.getHeader());
@@ -853,6 +848,8 @@ public class ClusterReaderFactory {
         }
 
         if (executorId != -1) {
+          // record the queried node to release resources later
+          ((RemoteQueryContext) context).registerRemoteNode(node, partitionGroup.getHeader());
           logger.debug(
               "{}: get an executorId {} for {}@{} from {}",
               metaGroupMember.getName(),
@@ -881,9 +878,6 @@ public class ClusterReaderFactory {
       } catch (InterruptedException e) {
         Thread.currentThread().interrupt();
         logger.error("{}: Cannot query {} from {}", metaGroupMember.getName(), path, node, e);
-      } finally {
-        // record the queried node to release resources later
-        ((RemoteQueryContext) context).registerRemoteNode(node, partitionGroup.getHeader());
       }
     }
     throw new StorageEngineException(
@@ -905,13 +899,7 @@ public class ClusterReaderFactory {
               .getClientProvider()
               .getSyncDataClient(node, RaftServer.getReadOperationTimeoutMS())) {
 
-        try {
-          executorId = syncDataClient.getGroupByExecutor(request);
-        } catch (TException e) {
-          // the connection may be broken, close it to avoid it being reused
-          syncDataClient.getInputProtocol().getTransport().close();
-          throw e;
-        }
+        executorId = syncDataClient.getGroupByExecutor(request);
       }
     }
     return executorId;
@@ -983,7 +971,7 @@ public class ClusterReaderFactory {
       QueryContext context,
       DataGroupMember dataGroupMember,
       boolean ascending)
-      throws StorageEngineException, QueryProcessException {
+      throws StorageEngineException, QueryProcessException, IOException {
     // pull the newest data
     try {
       dataGroupMember.syncLeaderWithConsistencyCheck(false);
@@ -1034,13 +1022,12 @@ public class ClusterReaderFactory {
     } catch (CheckConsistencyException e) {
       throw new StorageEngineException(e);
     }
-    Filter timeFilter = TimeFilter.defaultTimeFilter(ascending);
     SeriesReader seriesReader =
         getSeriesReader(
             path,
             allSensors,
             dataType,
-            timeFilter,
+            TimeFilter.gtEq(Long.MIN_VALUE),
             null,
             context,
             dataGroupMember.getHeader(),
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ClusterTimeGenerator.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ClusterTimeGenerator.java
index 4b13e98..43d2668 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ClusterTimeGenerator.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ClusterTimeGenerator.java
@@ -31,7 +31,6 @@ import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.reader.series.ManagedSeriesReader;
 import org.apache.iotdb.db.query.timegenerator.ServerTimeGenerator;
 import org.apache.iotdb.db.service.IoTDB;
-import org.apache.iotdb.db.utils.TestOnly;
 import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.expression.ExpressionType;
@@ -80,38 +79,13 @@ public class ClusterTimeGenerator extends ServerTimeGenerator {
     }
   }
 
-  @TestOnly
-  public ClusterTimeGenerator(
-      QueryContext context,
-      MetaGroupMember metaGroupMember,
-      ClusterReaderFactory clusterReaderFactory,
-      RawDataQueryPlan rawDataQueryPlan,
-      boolean onlyCheckLocalData)
-      throws StorageEngineException {
-    super(context);
-    this.queryPlan = rawDataQueryPlan;
-    this.readerFactory = clusterReaderFactory;
-    try {
-      readerFactory.syncMetaGroup();
-      if (onlyCheckLocalData) {
-        whetherHasLocalDataGroup(
-            queryPlan.getExpression(), metaGroupMember, queryPlan.isAscending());
-      } else {
-        constructNode(queryPlan.getExpression());
-      }
-    } catch (IOException | CheckConsistencyException e) {
-      throw new StorageEngineException(e);
-    }
-  }
-
   @Override
   protected IBatchReader generateNewBatchReader(SingleSeriesExpression expression)
       throws IOException {
     Filter filter = expression.getFilter();
-    Filter timeFilter = getTimeFilter(filter);
     PartialPath path = (PartialPath) expression.getSeriesPath();
     TSDataType dataType;
-    ManagedSeriesReader mergeReader;
+    ManagedSeriesReader mergeReader = null;
     try {
       dataType =
           ((CMManager) IoTDB.metaManager)
@@ -123,7 +97,7 @@ public class ClusterTimeGenerator extends ServerTimeGenerator {
               path,
               queryPlan.getAllMeasurementsInDevice(path.getDevice()),
               dataType,
-              timeFilter,
+              null,
               filter,
               context,
               queryPlan.isAscending());
@@ -137,6 +111,18 @@ public class ClusterTimeGenerator extends ServerTimeGenerator {
     return hasLocalReader;
   }
 
+  public void setHasLocalReader(boolean hasLocalReader) {
+    this.hasLocalReader = hasLocalReader;
+  }
+
+  public QueryDataSet.EndPoint getEndPoint() {
+    return endPoint;
+  }
+
+  public void setEndPoint(QueryDataSet.EndPoint endPoint) {
+    this.endPoint = endPoint;
+  }
+
   @Override
   public String toString() {
     return super.toString() + ", has local reader:" + hasLocalReader;
@@ -175,7 +161,6 @@ public class ClusterTimeGenerator extends ServerTimeGenerator {
   private void checkHasLocalReader(
       SingleSeriesExpression expression, MetaGroupMember metaGroupMember) throws IOException {
     Filter filter = expression.getFilter();
-    Filter timeFilter = getTimeFilter(filter);
     PartialPath path = (PartialPath) expression.getSeriesPath();
     TSDataType dataType;
     try {
@@ -199,7 +184,7 @@ public class ClusterTimeGenerator extends ServerTimeGenerator {
                   path,
                   queryPlan.getAllMeasurementsInDevice(path.getDevice()),
                   dataType,
-                  timeFilter,
+                  null,
                   filter,
                   context,
                   dataGroupMember,
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/DataSourceInfo.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/DataSourceInfo.java
index e609816..8889535 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/DataSourceInfo.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/DataSourceInfo.java
@@ -97,6 +97,8 @@ public class DataSourceInfo {
         if (newReaderId != null) {
           logger.debug("get a readerId {} for {} from {}", newReaderId, request.path, node);
           if (newReaderId != -1) {
+            // register the node so the remote resources can be released
+            context.registerRemoteNode(node, partitionGroup.getHeader());
             this.readerId = newReaderId;
             this.curSource = node;
             this.curPos = nextNodePos;
@@ -114,9 +116,6 @@ public class DataSourceInfo {
       } catch (InterruptedException e) {
         Thread.currentThread().interrupt();
         logger.error("Cannot query {} from {}", this.request.path, node, e);
-      } finally {
-        // register the node so the remote resources can be released
-        context.registerRemoteNode(node, partitionGroup.getHeader());
       }
       nextNodePos = (nextNodePos + 1) % this.nodes.size();
       if (nextNodePos == this.curPos) {
@@ -154,31 +153,27 @@ public class DataSourceInfo {
   }
 
   private Long applyForReaderIdSync(Node node, boolean byTimestamp, long timestamp)
-      throws TException, IOException {
-    long newReaderId;
+      throws TException {
+
+    Long newReaderId;
     try (SyncDataClient client =
         this.metaGroupMember
             .getClientProvider()
             .getSyncDataClient(node, RaftServer.getReadOperationTimeoutMS())) {
-      try {
-        if (byTimestamp) {
-          newReaderId = client.querySingleSeriesByTimestamp(request);
+
+      if (byTimestamp) {
+        newReaderId = client.querySingleSeriesByTimestamp(request);
+      } else {
+        Filter newFilter;
+        // add timestamp to as a timeFilter to skip the data which has been read
+        if (request.isSetTimeFilterBytes()) {
+          Filter timeFilter = FilterFactory.deserialize(request.timeFilterBytes);
+          newFilter = new AndFilter(timeFilter, TimeFilter.gt(timestamp));
         } else {
-          Filter newFilter;
-          // add timestamp to as a timeFilter to skip the data which has been read
-          if (request.isSetTimeFilterBytes()) {
-            Filter timeFilter = FilterFactory.deserialize(request.timeFilterBytes);
-            newFilter = new AndFilter(timeFilter, TimeFilter.gt(timestamp));
-          } else {
-            newFilter = TimeFilter.gt(timestamp);
-          }
-          request.setTimeFilterBytes(SerializeUtils.serializeFilter(newFilter));
-          newReaderId = client.querySingleSeries(request);
+          newFilter = TimeFilter.gt(timestamp);
         }
-      } catch (TException e) {
-        // the connection may be broken, close it to avoid it being reused
-        client.getInputProtocol().getTransport().close();
-        throw e;
+        request.setTimeFilterBytes(SerializeUtils.serializeFilter(newFilter));
+        newReaderId = client.querySingleSeries(request);
       }
       return newReaderId;
     }
@@ -206,7 +201,7 @@ public class DataSourceInfo {
         : metaGroupMember.getClientProvider().getAsyncDataClient(this.curSource, timeout);
   }
 
-  SyncDataClient getCurSyncClient(int timeout) throws IOException {
+  SyncDataClient getCurSyncClient(int timeout) throws TException {
     return isNoClient
         ? null
         : metaGroupMember.getClientProvider().getSyncDataClient(this.curSource, timeout);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ManagedDescPriorityMergeReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ManagedDescPriorityMergeReader.java
deleted file mode 100644
index 653dea4..0000000
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ManagedDescPriorityMergeReader.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.cluster.query.reader;
-
-import org.apache.iotdb.db.query.reader.series.ManagedSeriesReader;
-import org.apache.iotdb.db.query.reader.universal.DescPriorityMergeReader;
-import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
-import org.apache.iotdb.tsfile.read.TimeValuePair;
-import org.apache.iotdb.tsfile.read.common.BatchData;
-import org.apache.iotdb.tsfile.read.reader.IPointReader;
-
-import java.io.IOException;
-import java.util.NoSuchElementException;
-
-@SuppressWarnings("common-java:DuplicatedBlocks")
-public class ManagedDescPriorityMergeReader extends DescPriorityMergeReader
-    implements ManagedSeriesReader, IPointReader {
-
-  private static final int BATCH_SIZE = 4096;
-
-  private volatile boolean managedByPool;
-  private volatile boolean hasRemaining;
-
-  private BatchData batchData;
-  private TSDataType dataType;
-
-  public ManagedDescPriorityMergeReader(TSDataType dataType) {
-    this.dataType = dataType;
-  }
-
-  @Override
-  public boolean isManagedByQueryManager() {
-    return managedByPool;
-  }
-
-  @Override
-  public void setManagedByQueryManager(boolean managedByQueryManager) {
-    this.managedByPool = managedByQueryManager;
-  }
-
-  @Override
-  public boolean hasRemaining() {
-    return hasRemaining;
-  }
-
-  @Override
-  public void setHasRemaining(boolean hasRemaining) {
-    this.hasRemaining = hasRemaining;
-  }
-
-  @Override
-  public boolean hasNextBatch() throws IOException {
-    if (batchData != null) {
-      return true;
-    }
-    constructBatch();
-    return batchData != null;
-  }
-
-  private void constructBatch() throws IOException {
-    if (hasNextTimeValuePair()) {
-      batchData = new BatchData(dataType);
-      while (hasNextTimeValuePair() && batchData.length() < BATCH_SIZE) {
-        TimeValuePair next = nextTimeValuePair();
-        batchData.putAnObject(next.getTimestamp(), next.getValue().getValue());
-      }
-    }
-  }
-
-  @Override
-  public BatchData nextBatch() throws IOException {
-    if (!hasNextBatch()) {
-      throw new NoSuchElementException();
-    }
-    BatchData ret = batchData;
-    batchData = null;
-    return ret;
-  }
-}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ManagedPriorityMergeReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ManagedMergeReader.java
similarity index 94%
rename from cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ManagedPriorityMergeReader.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ManagedMergeReader.java
index e57f4d9..e54dede 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ManagedPriorityMergeReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ManagedMergeReader.java
@@ -29,7 +29,7 @@ import java.io.IOException;
 import java.util.NoSuchElementException;
 
 @SuppressWarnings("common-java:DuplicatedBlocks")
-public class ManagedPriorityMergeReader extends PriorityMergeReader implements ManagedSeriesReader {
+public class ManagedMergeReader extends PriorityMergeReader implements ManagedSeriesReader {
 
   private static final int BATCH_SIZE = 4096;
 
@@ -39,7 +39,7 @@ public class ManagedPriorityMergeReader extends PriorityMergeReader implements M
   private BatchData batchData;
   private TSDataType dataType;
 
-  public ManagedPriorityMergeReader(TSDataType dataType) {
+  public ManagedMergeReader(TSDataType dataType) {
     this.dataType = dataType;
   }
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/RemoteSeriesReaderByTimestamp.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/RemoteSeriesReaderByTimestamp.java
index e266af8..d077f02 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/RemoteSeriesReaderByTimestamp.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/RemoteSeriesReaderByTimestamp.java
@@ -108,8 +108,6 @@ public class RemoteSeriesReaderByTimestamp implements IReaderByTimestamp {
       return curSyncClient.fetchSingleSeriesByTimestamps(
           sourceInfo.getHeader(), sourceInfo.getReaderId(), timestampList);
     } catch (TException e) {
-      // the connection may be broken, close it to avoid it being reused
-      curSyncClient.getInputProtocol().getTransport().close();
       // try other node
       if (!sourceInfo.switchNode(true, timestamps[0])) {
         return null;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/RemoteSimpleSeriesReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/RemoteSimpleSeriesReader.java
index f53f2bc..2dcc1b7 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/RemoteSimpleSeriesReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/RemoteSimpleSeriesReader.java
@@ -149,8 +149,6 @@ public class RemoteSimpleSeriesReader implements IPointReader {
       curSyncClient = sourceInfo.getCurSyncClient(RaftServer.getReadOperationTimeoutMS());
       return curSyncClient.fetchSingleSeries(sourceInfo.getHeader(), sourceInfo.getReaderId());
     } catch (TException e) {
-      // the connection may be broken, close it to avoid it being reused
-      curSyncClient.getInputProtocol().getTransport().close();
       // try other node
       if (!sourceInfo.switchNode(false, lastTimestamp)) {
         return null;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/AssignPathDescPriorityMergeReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/AssignPathDescPriorityMergeReader.java
deleted file mode 100644
index a38f491..0000000
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/AssignPathDescPriorityMergeReader.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.iotdb.cluster.query.reader.mult;
-
-import org.apache.iotdb.db.query.reader.universal.DescPriorityMergeReader;
-import org.apache.iotdb.db.query.reader.universal.Element;
-
-import java.util.PriorityQueue;
-
-/**
- * This class extends {@link extends DescPriorityMergeReader} for data sources with different
- * priorities.
- */
-public class AssignPathDescPriorityMergeReader extends DescPriorityMergeReader
-    implements IAssignPathPriorityMergeReader {
-
-  private String fullPath;
-
-  public AssignPathDescPriorityMergeReader(String fullPath) {
-    super();
-    this.fullPath = fullPath;
-  }
-
-  @Override
-  public PriorityQueue<Element> getHeap() {
-    return heap;
-  }
-
-  @Override
-  public String getFullPath() {
-    return fullPath;
-  }
-}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/AssignPathManagedMergeReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/AssignPathManagedMergeReader.java
index 6208966..34ecc13 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/AssignPathManagedMergeReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/AssignPathManagedMergeReader.java
@@ -22,12 +22,12 @@ import org.apache.iotdb.db.query.reader.series.ManagedSeriesReader;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.TimeValuePair;
 import org.apache.iotdb.tsfile.read.common.BatchData;
-import org.apache.iotdb.tsfile.read.reader.IPointReader;
 
 import java.io.IOException;
 import java.util.NoSuchElementException;
 
-public class AssignPathManagedMergeReader implements ManagedSeriesReader, IPointReader {
+public class AssignPathManagedMergeReader extends AssignPathPriorityMergeReader
+    implements ManagedSeriesReader {
 
   private static final int BATCH_SIZE = 4096;
   private volatile boolean managedByPool;
@@ -36,20 +36,11 @@ public class AssignPathManagedMergeReader implements ManagedSeriesReader, IPoint
   private BatchData batchData;
   private TSDataType dataType;
 
-  private final IAssignPathPriorityMergeReader underlyingReader;
-
-  public AssignPathManagedMergeReader(String fullPath, TSDataType dataType, boolean isAscending) {
-    underlyingReader =
-        isAscending
-            ? new AssignPathAscPriorityMergeReader(fullPath)
-            : new AssignPathDescPriorityMergeReader(fullPath);
+  public AssignPathManagedMergeReader(String fullPath, TSDataType dataType) {
+    super(fullPath);
     this.dataType = dataType;
   }
 
-  public void addReader(AbstractMultPointReader reader, long priority) throws IOException {
-    underlyingReader.addReader(reader, priority);
-  }
-
   @Override
   public boolean isManagedByQueryManager() {
     return managedByPool;
@@ -80,10 +71,10 @@ public class AssignPathManagedMergeReader implements ManagedSeriesReader, IPoint
   }
 
   private void constructBatch() throws IOException {
-    if (underlyingReader.hasNextTimeValuePair()) {
+    if (hasNextTimeValuePair()) {
       batchData = new BatchData(dataType);
-      while (underlyingReader.hasNextTimeValuePair() && batchData.length() < BATCH_SIZE) {
-        TimeValuePair next = underlyingReader.nextTimeValuePair();
+      while (hasNextTimeValuePair() && batchData.length() < BATCH_SIZE) {
+        TimeValuePair next = nextTimeValuePair();
         batchData.putAnObject(next.getTimestamp(), next.getValue().getValue());
       }
     }
@@ -98,24 +89,4 @@ public class AssignPathManagedMergeReader implements ManagedSeriesReader, IPoint
     batchData = null;
     return ret;
   }
-
-  @Override
-  public boolean hasNextTimeValuePair() throws IOException {
-    return underlyingReader.hasNextTimeValuePair();
-  }
-
-  @Override
-  public TimeValuePair nextTimeValuePair() throws IOException {
-    return underlyingReader.nextTimeValuePair();
-  }
-
-  @Override
-  public TimeValuePair currentTimeValuePair() throws IOException {
-    return underlyingReader.currentTimeValuePair();
-  }
-
-  @Override
-  public void close() throws IOException {
-    underlyingReader.close();
-  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/AssignPathAscPriorityMergeReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/AssignPathPriorityMergeReader.java
similarity index 53%
rename from cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/AssignPathAscPriorityMergeReader.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/AssignPathPriorityMergeReader.java
index 7891ac7..30c6f70 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/AssignPathAscPriorityMergeReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/AssignPathPriorityMergeReader.java
@@ -20,30 +20,47 @@ package org.apache.iotdb.cluster.query.reader.mult;
 
 import org.apache.iotdb.db.query.reader.universal.Element;
 import org.apache.iotdb.db.query.reader.universal.PriorityMergeReader;
+import org.apache.iotdb.tsfile.read.TimeValuePair;
 
-import java.util.PriorityQueue;
+import java.io.IOException;
 
 /**
  * This class extends {@link extends PriorityMergeReader} for data sources with different
  * priorities.
  */
-public class AssignPathAscPriorityMergeReader extends PriorityMergeReader
-    implements IAssignPathPriorityMergeReader {
+public class AssignPathPriorityMergeReader extends PriorityMergeReader {
 
   private String fullPath;
 
-  public AssignPathAscPriorityMergeReader(String fullPath) {
+  public AssignPathPriorityMergeReader(String fullPath) {
     super();
     this.fullPath = fullPath;
   }
 
-  @Override
-  public PriorityQueue<Element> getHeap() {
-    return heap;
+  public void addReader(AbstractMultPointReader reader, long priority) throws IOException {
+    if (reader.hasNextTimeValuePair(fullPath)) {
+      heap.add(
+          new MultElement(
+              reader, reader.nextTimeValuePair(fullPath), new MergeReaderPriority(priority, 0)));
+    } else {
+      reader.close();
+    }
   }
 
-  @Override
-  public String getFullPath() {
-    return fullPath;
+  public class MultElement extends Element {
+    public MultElement(
+        AbstractMultPointReader reader, TimeValuePair timeValuePair, MergeReaderPriority priority) {
+      super(reader, timeValuePair, priority);
+    }
+
+    @Override
+    public boolean hasNext() throws IOException {
+      return ((AbstractMultPointReader) reader).hasNextTimeValuePair(fullPath);
+    }
+
+    @Override
+    public void next() throws IOException {
+      timeValuePair = ((AbstractMultPointReader) reader).nextTimeValuePair(fullPath);
+    }
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/IAssignPathPriorityMergeReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/IAssignPathPriorityMergeReader.java
deleted file mode 100644
index a344288..0000000
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/IAssignPathPriorityMergeReader.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
... 55867 lines suppressed ...