You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by qi...@apache.org on 2019/11/28 07:29:27 UTC

[incubator-iotdb] 02/02: fix some read and write issues

This is an automated email from the ASF dual-hosted git repository.

qiaojialin pushed a commit to branch cluster
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git

commit 62be1866bd5a98455e58fa08664efa4ec430614e
Author: lta <li...@163.com>
AuthorDate: Mon Jul 22 12:22:16 2019 +0800

    fix some read and write issues
---
 .travis.yml                                        |  40 +-
 License                                            |   1 +
 README.md                                          |   9 +
 cluster/pom.xml                                    |   5 +
 .../iotdb/cluster/concurrent/ThreadName.java       |   5 +
 ...Manager.java => NodeAsClientThreadManager.java} |  20 +-
 ...QPTaskManager.java => QPTaskThreadManager.java} |  14 +-
 ...erManager.java => QueryTimerThreadManager.java} |  10 +-
 .../cluster/concurrent/pool/ThreadPoolManager.java |  15 +-
 .../apache/iotdb/cluster/config/ClusterConfig.java |  42 +-
 .../ClusterConsistencyLevel.java}                  |  30 +-
 .../iotdb/cluster/config/ClusterConstant.java      |   8 +-
 .../iotdb/cluster/config/ClusterDescriptor.java    |  43 +-
 .../org/apache/iotdb/cluster/entity/Server.java    |  48 +-
 .../cluster/entity/raft/DataStateMachine.java      |  20 +-
 .../cluster/entity/raft/MetadataStateManchine.java |   2 +-
 .../iotdb/cluster/entity/raft/RaftService.java     |  20 +-
 .../cluster/qp/executor/AbstractQPExecutor.java    | 117 +++-
 .../qp/executor/ClusterQueryProcessExecutor.java   |  18 +-
 .../cluster/qp/executor/NonQueryExecutor.java      |  84 +--
 .../cluster/qp/executor/QueryMetadataExecutor.java | 200 ++++--
 .../apache/iotdb/cluster/qp/task/BatchQPTask.java  | 116 ++--
 .../iotdb/cluster/qp/task/DataQueryTask.java       |  12 +-
 .../org/apache/iotdb/cluster/qp/task/QPTask.java   |  54 +-
 .../apache/iotdb/cluster/qp/task/SingleQPTask.java |  10 +-
 .../query/common/ClusterNullableBatchData.java     |  79 +++
 .../dataset/ClusterDataSetWithTimeGenerator.java   |   5 +-
 .../ClusterGroupByDataSetWithOnlyTimeFilter.java   | 159 +++++
 .../ClusterGroupByDataSetWithTimeGenerator.java    | 112 +--
 .../executor/ClusterAggregateEngineExecutor.java   | 249 +++++++
 .../executor/ClusterExecutorWithTimeGenerator.java |  38 +-
 .../ClusterExecutorWithoutTimeGenerator.java       |  25 +-
 .../query/executor/ClusterFillEngineExecutor.java  |  75 +-
 .../cluster/query/executor/ClusterQueryRouter.java | 111 ++-
 .../query/factory/ClusterSeriesReaderFactory.java  |  31 +-
 .../coordinatornode/ClusterRpcQueryManager.java    |  14 +-
 .../ClusterRpcSingleQueryManager.java              | 245 +++----
 ...oupEntity.java => FilterSeriesGroupEntity.java} |   4 +-
 .../coordinatornode/IClusterRpcQueryManager.java   |   5 +
 .../IClusterRpcSingleQueryManager.java             |  11 +-
 ...oupEntity.java => SelectSeriesGroupEntity.java} |  56 +-
 .../querynode/ClusterLocalQueryManager.java        |  14 +-
 .../querynode/ClusterLocalSingleQueryManager.java  | 273 ++++++--
 .../querynode/IClusterLocalQueryManager.java       |   9 +-
 .../querynode/IClusterLocalSingleQueryManager.java |   5 +-
 .../AbstractClusterPointReader.java                |   7 +-
 .../coordinatornode/ClusterFilterSeriesReader.java |  19 +-
 .../coordinatornode/ClusterSelectSeriesReader.java |  25 +-
 ...ava => ClusterFillSelectSeriesBatchReader.java} |  29 +-
 ...a => ClusterFilterSeriesBatchReaderEntity.java} |  15 +-
 ...lusterGroupBySelectSeriesBatchReaderEntity.java |  84 +++
 ...or.java => ClusterSelectSeriesBatchReader.java} |  14 +-
 ...ClusterSelectSeriesBatchReaderByTimestamp.java} |   7 +-
 ...a => ClusterSelectSeriesBatchReaderEntity.java} |  41 +-
 ...r.java => IClusterSelectSeriesBatchReader.java} |   4 +-
 ...r.java => IClusterSeriesBatchReaderEntity.java} |   6 +-
 .../timegenerator/ClusterNodeConstructor.java      |   4 +-
 .../cluster/query/utils/ClusterRpcReaderUtils.java | 105 ++-
 .../query/utils/ClusterTimeValuePairUtils.java     |  70 ++
 .../iotdb/cluster/query/utils/ExpressionUtils.java |  14 +-
 .../query/utils/QueryPlanPartitionUtils.java       | 224 ++++--
 .../iotdb/cluster/rpc/raft/NodeAsClient.java       |  14 +-
 .../rpc/raft/impl/RaftNodeAsClientManager.java     | 197 ++----
 ...ocessor.java => QueryMetricAsyncProcessor.java} |  26 +-
 .../nonquery/DataGroupNonQueryAsyncProcessor.java  |  10 +-
 .../nonquery/MetaGroupNonQueryAsyncProcessor.java  |   2 +-
 .../querydata/InitSeriesReaderSyncProcessor.java   |   9 +-
 .../querydata/QuerySeriesDataSyncProcessor.java    |   1 -
 .../querymetadata/QueryMetadataAsyncProcessor.java |   6 +-
 .../QueryMetadataInStringAsyncProcessor.java       |   4 +-
 .../querymetadata/QueryPathsAsyncProcessor.java    |   4 +-
 .../QuerySeriesTypeAsyncProcessor.java             |   4 +-
 .../QueryTimeSeriesAsyncProcessor.java             |   4 +-
 .../QueryJobNumAsyncProcessor.java}                |  27 +-
 .../QueryLeaderAsyncProcessor.java}                |  27 +-
 .../querymetric/QueryMetricAsyncProcessor.java     |  45 ++
 .../QueryStatusAsyncProcessor.java}                |  26 +-
 .../rpc/raft/request/BasicNonQueryRequest.java     |   1 -
 .../rpc/raft/request/QueryMetricRequest.java       |  21 +-
 .../request/querydata/InitSeriesReaderRequest.java |  72 +-
 .../QuerySeriesDataByTimestampRequest.java         |  17 +-
 .../request/querydata/QuerySeriesDataRequest.java  |  16 +-
 .../request/querymetric/QueryJobNumRequest.java}   |  25 +-
 .../request/querymetric/QueryLeaderRequest.java}   |  25 +-
 .../request/querymetric/QueryMetricRequest.java}   |  28 +-
 .../request/querymetric/QueryStatusRequest.java}   |  27 +-
 .../rpc/raft/response/QueryMetricResponse.java     |  29 +-
 .../nonquery/DataGroupNonQueryResponse.java        |  12 +
 .../QueryJobNumResponse.java}                      |  28 +-
 .../QueryLeaderResponse.java}                      |  28 +-
 .../QueryMetricResponse.java}                      |  28 +-
 .../response/querymetric/QueryStatusResponse.java} |  33 +-
 .../iotdb/cluster/service/ClusterMonitor.java      | 124 ++++
 .../iotdb/cluster/service/ClusterMonitorMBean.java |  94 +++
 .../cluster/service/TSServiceClusterImpl.java      | 102 ++-
 .../iotdb/cluster/service/nodetool/Host.java       |  84 +++
 .../apache/iotdb/cluster/service/nodetool/Lag.java |  33 +-
 .../iotdb/cluster/service/nodetool/NodeTool.java   | 148 ++++
 .../iotdb/cluster/service/nodetool/Query.java      |  47 ++
 .../iotdb/cluster/service/nodetool/Ring.java       |  28 +-
 .../iotdb/cluster/service/nodetool/Status.java     |  36 +-
 .../cluster/service/nodetool/StorageGroup.java     |  54 ++
 .../iotdb/cluster/utils/QPExecutorUtils.java       |  23 +-
 .../org/apache/iotdb/cluster/utils/RaftUtils.java  | 545 +++++++++++++--
 .../iotdb/cluster/utils/hash/PhysicalNode.java     |  14 +
 .../apache/iotdb/cluster/utils/hash/Router.java    |  48 +-
 .../iotdb/cluster/utils/hash/VirtualNode.java      |  19 +-
 ...nagerTest.java => QPTaskThreadManagerTest.java} |  16 +-
 .../cluster/config/ClusterDescriptorTest.java      |  18 +-
 .../apache/iotdb/cluster/integration/Constant.java | 100 +++
 .../iotdb/cluster/integration/IOTDBGroupByIT.java  | 490 +++++++++++++
 .../cluster}/integration/IoTDBAggregationIT.java   | 157 ++++-
 .../integration/IoTDBAggregationLargeDataIT.java   | 124 ++--
 .../integration/IoTDBAggregationSmallDataIT.java   | 641 +++++++++++++++++
 .../cluster/integration/IoTDBFillQueryIT.java      | 357 ++++++++++
 .../integration/IoTDBMetadataFetchAbstract.java    |  63 +-
 .../integration/IoTDBMetadataFetchRemoteIT.java    |   5 +-
 .../IoTDBQueryIT.java}                             |   5 +-
 .../IoTDBQueryLargeDataIT.java}                    |   6 +-
 .../iotdb/cluster/qp/AbstractQPExecutorTest.java   |  91 ++-
 .../cluster/qp/executor/NonQueryExecutorTest.java  |  23 +-
 .../query/manager/ClusterLocalManagerTest.java     | 136 ++--
 .../query/manager/ClusterRpcManagerTest.java       |  46 +-
 .../cluster/query/utils/ExpressionUtilsTest.java   |  17 +-
 .../query/utils/QueryPlanPartitionUtilsTest.java   |  60 +-
 .../apache/iotdb/cluster/utils/RaftUtilsTest.java  |  79 ++-
 .../java/org/apache/iotdb/cluster/utils/Utils.java |   4 +
 .../iotdb/cluster/utils/hash/MD5HashTest.java      |   8 +-
 .../iotdb/cluster/utils/hash/PhysicalNodeTest.java |   4 +-
 .../iotdb/cluster/utils/hash/RouterTest.java       |  27 +-
 cluster/src/test/resources/logback.xml             |   2 +-
 docs/Documentation/QuickStart.md                   |  84 ++-
 .../UserGuideV0.7.0/4-Deployment and Management.md |  71 +-
 .../UserGuideV0.7.0/5-SQL Documentation.md         |  17 +-
 .../UserGuideV0.7.0/7-Tools-NodeTool.md            | 356 ++++++++++
 .../Documentation/UserGuideV0.7.0/7-Tools-spark.md | 286 ++++----
 .../iotdb/tsfile/hadoop/TSFRecordWriter.java       |  11 +-
 .../iotdb/tsfile/hadoop/example/TsFileHelper.java  |   6 +-
 .../cn/edu/thu/tsfile/hadoop/TsFileTestHelper.java |   6 +-
 iotdb-cli/cli/bin/export-csv.bat                   |  10 +-
 iotdb-cli/cli/bin/export-csv.sh                    |   8 +-
 iotdb-cli/cli/bin/import-csv.bat                   |  10 +-
 iotdb-cli/cli/bin/import-csv.sh                    |   8 +-
 iotdb-cli/cli/bin/start-client.bat                 |   8 +-
 iotdb-cli/cli/bin/start-client.sh                  |   6 +-
 iotdb-cli/pom.xml                                  |   7 +-
 .../apache/iotdb/cli/client/AbstractClient.java    |   4 +
 .../java/org/apache/iotdb/cli/tool/ExportCsv.java  |  24 +-
 .../java/org/apache/iotdb/cli/tool/ImportCsv.java  |  44 +-
 .../iotdb/bin/nodetool.bat                         |   9 +-
 clean.sh => iotdb/iotdb/bin/nodetool.sh            |  46 +-
 iotdb/iotdb/conf/iotdb-cluster.properties          |  29 +-
 iotdb/iotdb/conf/iotdb-engine.properties           | 125 ++--
 iotdb/iotdb/conf/iotdb-env.sh                      |   2 +-
 iotdb/iotdb/conf/iotdb-sync-client.properties      |   8 +-
 .../org/apache/iotdb/db/sql/parse/TSParser.g       |   8 +-
 .../iotdb/db/auth/authorizer/BasicAuthorizer.java  |   3 +-
 .../apache/iotdb/db/auth/entity/PathPrivilege.java |   1 -
 .../org/apache/iotdb/db/conf/IoTDBConstant.java    |   4 +
 .../org/apache/iotdb/db/conf/IoTDBDescriptor.java  |  10 +-
 .../java/org/apache/iotdb/db/engine/Processor.java |   2 +-
 .../engine/bufferwrite/BufferWriteProcessor.java   | 170 +++--
 .../bufferwrite/RestorableTsFileIOWriter.java      |  19 +-
 .../iotdb/db/engine/filenode/FileNodeManager.java  |  52 +-
 .../db/engine/filenode/FileNodeProcessor.java      | 173 +++--
 .../iotdb/db/engine/filenode/TsFileResource.java   | 216 +++---
 .../db/engine/memcontrol/BasicMemController.java   |   6 +-
 .../engine/memcontrol/DisabledMemController.java   |  41 +-
 .../iotdb/db/engine/memtable/AbstractMemTable.java |  14 +-
 .../apache/iotdb/db/engine/memtable/IMemTable.java |   7 +-
 .../db/engine/memtable/IWritableMemChunk.java      |   2 +
 .../db/engine/memtable/MemTableFlushUtil.java      |  10 +-
 .../iotdb/db/engine/memtable/WritableMemChunk.java |  27 +-
 .../io/LocalTextModificationAccessor.java          |   1 +
 .../db/engine/overflow/io/OverflowProcessor.java   | 123 ++--
 .../overflow/io/OverflowedTsFileIOWriter.java      |  25 +-
 .../db/engine/querycontext/ReadOnlyMemChunk.java   |   2 +
 .../exception/BufferWriteProcessorException.java   |   2 +-
 .../db/exception/FileNodeProcessorException.java   |   6 +-
 .../db/exception/OverflowProcessorException.java   |   2 +-
 .../db/exception/qp/IllegalASTFormatException.java |   7 +
 .../db/exception/qp/LogicalOperatorException.java  |   7 +
 .../db/exception/qp/LogicalOptimizeException.java  |   7 +
 .../db/exception/qp/QueryProcessorException.java   |   4 +
 .../java/org/apache/iotdb/db/metadata/MGraph.java  |  15 +-
 .../org/apache/iotdb/db/metadata/MManager.java     |  16 +-
 .../java/org/apache/iotdb/db/metadata/MTree.java   |  11 +-
 .../org/apache/iotdb/db/metadata/Metadata.java     | 102 +--
 .../org/apache/iotdb/db/monitor/StatMonitor.java   |   1 -
 .../org/apache/iotdb/db/qp/QueryProcessor.java     |   2 +-
 .../db/qp/executor/IQueryProcessExecutor.java      |  15 +-
 .../iotdb/db/qp/executor/OverflowQPExecutor.java   | 281 +++++---
 .../iotdb/db/qp/executor/QueryProcessExecutor.java |  24 +-
 .../db/qp/logical/crud/BasicFunctionOperator.java  |   2 +-
 .../iotdb/db/qp/logical/crud/InsertOperator.java   |  12 +-
 .../apache/iotdb/db/qp/physical/PhysicalPlan.java  |   4 +
 .../iotdb/db/qp/physical/crud/InsertPlan.java      |  27 +-
 .../iotdb/db/qp/physical/sys/AuthorPlan.java       |   6 +
 .../db/qp/physical/transfer/CodecInstances.java    |  18 +-
 .../iotdb/db/qp/strategy/LogicalGenerator.java     |   8 +-
 .../iotdb/db/qp/strategy/PhysicalGenerator.java    |   2 +-
 .../qp/strategy/optimizer/ConcatPathOptimizer.java |  12 +-
 .../db/query/aggregation/AggregateFunction.java    |   1 -
 .../db/query/aggregation/impl/MeanAggrFunc.java    |  13 +-
 .../db/query/aggregation/impl/SumAggrFunc.java     |  10 +
 .../db/query/control/QueryResourceManager.java     |   6 +-
 .../SumAggrFunc.java => dataset/AuthDataSet.java}  |  34 +-
 .../dataset/groupby/GroupByEngineDataSet.java      |   5 +-
 .../groupby/GroupByWithOnlyTimeFilterDataSet.java  |   7 +-
 .../groupby/GroupByWithValueFilterDataSet.java     |  20 +-
 ...a => AbstractExecutorWithoutTimeGenerator.java} |   6 +-
 .../db/query/executor/AbstractQueryRouter.java     | 120 ++++
 .../db/query/executor/AggregateEngineExecutor.java |  85 ++-
 .../EngineExecutorWithoutTimeGenerator.java        |   5 +-
 .../iotdb/db/query/executor/EngineQueryRouter.java |  47 +-
 .../db/query/executor/FillEngineExecutor.java      |  11 +-
 .../db/query/executor/IFillEngineExecutor.java     |  22 +-
 .../java/org/apache/iotdb/db/query/fill/IFill.java |  14 +-
 .../org/apache/iotdb/db/query/fill/LinearFill.java |   4 +-
 .../apache/iotdb/db/query/fill/PreviousFill.java   |   6 +-
 .../timegenerator/AbstractNodeConstructor.java     |   4 +
 .../apache/iotdb/db/service/CloseMergeService.java |   6 +-
 .../java/org/apache/iotdb/db/service/IoTDB.java    |   9 +-
 .../org/apache/iotdb/db/service/JDBCService.java   |   9 +-
 .../org/apache/iotdb/db/service/JMXService.java    |   5 +-
 .../java/org/apache/iotdb/db/service/Monitor.java  |   3 +-
 .../apache/iotdb/db/service/RegisterManager.java   |   2 +-
 .../org/apache/iotdb/db/service/ServiceType.java   |   1 +
 .../org/apache/iotdb/db/service/TSServiceImpl.java | 270 +++++---
 .../org/apache/iotdb/db/sql/parse/ParseDriver.java |   2 +-
 .../org/apache/iotdb/db/sync/conf/Constans.java    |  10 +
 .../iotdb/db/sync/conf/SyncSenderConfig.java       |  24 +-
 .../iotdb/db/sync/conf/SyncSenderDescriptor.java   |   7 +-
 .../iotdb/db/sync/receiver/SyncServiceImpl.java    |  91 +--
 .../apache/iotdb/db/sync/sender/SyncSender.java    |  10 +
 .../iotdb/db/sync/sender/SyncSenderImpl.java       | 106 ++-
 .../java/org/apache/iotdb/db/tools/WalChecker.java |  11 +-
 .../java/org/apache/iotdb/db/utils/MemUtils.java   |  24 +
 .../org/apache/iotdb/db/utils/RecordUtils.java     |   2 +-
 .../apache/iotdb/db/writelog/io/RAFLogReader.java  |   3 +-
 .../writelog/manager/MultiFileLogNodeManager.java  |  10 +-
 .../db/writelog/manager/WriteLogNodeManager.java   |   6 +-
 .../db/writelog/node/ExclusiveWriteLogNode.java    |   2 +-
 .../recover/ExclusiveLogRecoverPerformer.java      |  11 +-
 .../db/writelog/replay/ConcreteLogReplayer.java    |  14 +-
 .../bufferwrite/BufferWriteProcessorNewTest.java   |   1 -
 .../db/engine/filenode/TsFileResourceTest.java     |  12 +-
 .../engine/overflow/io/OverflowProcessorTest.java  |   2 +
 .../engine/overflow/io/OverflowResourceTest.java   |  24 +-
 .../iotdb/db/integration/IoTDBAggregationIT.java   |  86 ++-
 .../integration/IoTDBAggregationLargeDataIT.java   |   2 -
 .../iotdb/db/integration/IoTDBAuthorizationIT.java | 416 +++++------
 .../transfer/PhysicalPlanLogTransferTest.java      |   2 +-
 .../apache/iotdb/db/qp/utils/MemIntQpExecutor.java |  10 +-
 .../org/apache/iotdb/db/tools/WalCheckerTest.java  |  34 +-
 .../apache/iotdb/db/utils/EnvironmentUtils.java    |  10 +-
 .../apache/iotdb/db/writelog/PerformanceTest.java  |  14 +-
 .../org/apache/iotdb/db/writelog/RecoverTest.java  |  12 +-
 .../iotdb/db/writelog/WriteLogNodeManagerTest.java |   8 +-
 .../apache/iotdb/db/writelog/WriteLogNodeTest.java |  25 +-
 .../iotdb/db/writelog/io/LogWriterReaderTest.java  |   8 +-
 jdbc/README.md                                     |   4 +-
 .../org/apache/iotdb/jdbc/IoTDBConnection.java     |   6 +-
 .../org/apache/iotdb/jdbc/IoTDBQueryResultSet.java |   9 +
 .../java/org/apache/iotdb/jdbc/IoTDBStatement.java |  15 +-
 pom.xml                                            |   5 +-
 service-rpc/src/main/thrift/rpc.thrift             |   3 +-
 spark/README.md                                    | 407 +++++++----
 spark/pom.xml                                      |   7 +-
 .../org/apache/iotdb/tsfile/io/CreateTSFile.java   | 150 ----
 .../java/org/apache/iotdb/tsfile/io/HDFSInput.java | 147 ++++
 .../apache/iotdb/tsfile/io/HDFSInputStream.java    | 111 ---
 .../io/{HDFSOutputStream.java => HDFSOutput.java}  |  52 +-
 .../apache/iotdb/tsfile/io/TsFileOutputFormat.java |  10 +-
 .../apache/iotdb/tsfile/io/TsFileRecordWriter.java |  19 +-
 .../java/org/apache/iotdb/tsfile/qp/Executor.java  |  51 --
 .../org/apache/iotdb/tsfile/qp/QueryProcessor.java | 153 -----
 .../iotdb/tsfile/qp/common/BasicOperator.java      |  75 --
 .../iotdb/tsfile/qp/common/FilterOperator.java     | 157 -----
 .../apache/iotdb/tsfile/qp/common/SQLConstant.java | 150 ----
 .../apache/iotdb/tsfile/qp/common/SingleQuery.java |  63 --
 .../apache/iotdb/tsfile/qp/common/TSQueryPlan.java |  63 --
 .../tsfile/qp/exception/DNFOptimizeException.java  |  34 -
 .../qp/exception/LogicalOptimizeException.java     |  33 -
 .../tsfile/qp/exception/MergeFilterException.java  |  30 -
 .../tsfile/qp/exception/RemoveNotException.java    |  34 -
 .../tsfile/qp/optimizer/DNFFilterOptimizer.java    | 157 -----
 .../tsfile/qp/optimizer/IFilterOptimizer.java      |  34 -
 .../qp/optimizer/MergeSingleFilterOptimizer.java   | 141 ----
 .../tsfile/qp/optimizer/PhysicalOptimizer.java     | 228 ------
 .../tsfile/qp/optimizer/RemoveNotOptimizer.java    | 108 ---
 .../scala/org/apache/iotdb/tsfile/Converter.scala  | 764 ++++++++++++---------
 .../org/apache/iotdb/tsfile/DefaultSource.scala    | 177 ++---
 .../apache/iotdb/tsfile/TsFileOutputWriter.scala   |  52 +-
 .../apache/iotdb/tsfile/TsFileWriterFactory.scala  |  42 +-
 .../scala/org/apache/iotdb/tsfile/package.scala    |  36 +-
 spark/src/test/resources/test.tsfile               | Bin 1406 -> 0 bytes
 .../cn/edu/tsinghua/tsfile/ConverterTest.scala     | 130 ----
 .../scala/cn/edu/tsinghua/tsfile/TSFileSuit.scala  | 194 ------
 .../scala/org/apache/iotdb/tool/TsFileExample.java | 106 +++
 .../scala/org/apache/iotdb/tool/TsFileWrite.java   | 215 ++++++
 .../org/apache/iotdb/tsfile/ConverterTest.scala    | 266 +++++++
 .../org/apache/iotdb/tsfile/HDFSInputTest.java     |  79 +++
 .../scala/org/apache/iotdb/tsfile/TSFileSuit.scala | 217 ++++++
 .../apache/iotdb/tsfile/TsFileSequenceRead.java    |   4 +-
 .../tsfile/common/constant/QueryConstant.java      |  11 +-
 .../tsfile/exception/write/PageException.java      |   8 +
 .../exception/write/WriteProcessException.java     |  16 +-
 .../apache/iotdb/tsfile/read/ReadOnlyTsFile.java   |  19 +-
 .../iotdb/tsfile/read/TsFileSequenceReader.java    |  15 +-
 .../apache/iotdb/tsfile/read/common/BatchData.java |   4 +-
 .../org/apache/iotdb/tsfile/read/common/Path.java  |  11 +-
 .../apache/iotdb/tsfile/read/common/TimeRange.java | 328 +++++++++
 .../tsfile/read/controller/MetadataQuerier.java    |  15 +
 .../read/controller/MetadataQuerierByFileImpl.java | 159 +++--
 .../tsfile/read/expression/ExpressionType.java     |  29 +-
 .../query/executor/ExecutorWithTimeGenerator.java  |  33 +-
 .../tsfile/read/query/executor/TsFileExecutor.java |  52 +-
 .../apache/iotdb/tsfile/write/TsFileWriter.java    |  12 +
 .../iotdb/tsfile/write/chunk/ChunkBuffer.java      |   7 +-
 .../iotdb/tsfile/write/chunk/ChunkWriterImpl.java  |   7 +-
 .../iotdb/tsfile/write/writer/TsFileIOWriter.java  |  11 +
 .../iotdb/tsfile/read/ReadInPartitionTest.java     | 240 +++++++
 .../org/apache/iotdb/tsfile/read/ReadTest.java     |   2 +-
 .../iotdb/tsfile/read/common/TimeRangeTest.java    | 265 +++++++
 .../controller/MetadataQuerierByFileImplTest.java  | 137 +++-
 .../iotdb/tsfile/write/TsFileReadWriteTest.java    |   4 +-
 327 files changed, 11903 insertions(+), 6443 deletions(-)

diff --git a/.travis.yml b/.travis.yml
index 077d8ab..6202bf5 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -31,6 +31,11 @@ matrix:
     - os: osx
       osx_image: xcode10.1 # with JDK11.0.1+13 installed
       name: osx-oraclejdk11
+      script:
+        - java -version
+        - mvn -version
+        - mvn -B apache-rat:check
+        - mvn -B clean package -pl iotdb,grafana,iotdb-cli,example,:kafka-example,:rocketmq-example -am integration-test
     - os: osx
       osx_image: xcode9.3  # with JDK1.8.0_112-b16 installed
       name: osx-oraclejdk8
@@ -44,6 +49,11 @@ matrix:
             - AdoptOpenJDK/openjdk
           update: true
           casks: adoptopenjdk-openjdk11
+      script:
+        - java -version
+        - mvn -version
+        - mvn -B apache-rat:check
+        - mvn -B clean package -pl iotdb,grafana,iotdb-cli,example,:kafka-example,:rocketmq-example -am integration-test
     - os: osx
       osx_image: xcode9.3  # with JDK1.8.0_112-b16 installed
       name: osx-openjdk8
@@ -54,7 +64,6 @@ matrix:
            - AdoptOpenJDK/openjdk
           update: true
           casks: adoptopenjdk-openjdk8
-
     - os: windows
       language: c
       name: win-oraclejdk11
@@ -62,8 +71,7 @@ matrix:
         - choco install jdk11 -params 'installdir=c:\\java11'
         - export PATH=$PATH:"/c/java11/bin"
         - export JAVA_HOME="/c/java11"
-#        - choco install maven
-        - wget https://www-eu.apache.org/dist/maven/maven-3/3.6.1/binaries/apache-maven-3.6.1-bin.zip
+        - wget -q https://www-eu.apache.org/dist/maven/maven-3/3.6.1/binaries/apache-maven-3.6.1-bin.zip
         - /C/Progra~1/7-Zip/7z.exe x apache-maven-3.6.1-bin.zip -o/c/mvn361
         - export "MAVEN_HOME=/c/mvn361/apache-maven-3.6.1"
         - export "M2_HOME=/c/mvn361/apache-maven-3.6.1"
@@ -71,6 +79,25 @@ matrix:
       script:
         - java -version
         - mvn -version
+        - mvn -B clean package -pl iotdb,grafana,iotdb-cli,example,:kafka-example,:rocketmq-example -am integration-test
+          
+    - os: windows
+      language: c
+      name: win-oraclejdk8
+      before_install:
+        - choco install jdk8 -params 'installdir=c:\\jdk8'
+        - wget https://www-eu.apache.org/dist/maven/maven-3/3.6.1/binaries/apache-maven-3.6.1-bin.zip
+        - /C/Progra~1/7-Zip/7z.exe x apache-maven-3.6.1-bin.zip -o/c/mvn361
+      before_script:
+        - export "JAVA_HOME=/c/jdk8"
+        - export "PATH=/c/jdk8/bin:$PATH"
+        - export "PATH=/c/jdk8/jre/bin:$PATH"
+        - export "MAVEN_HOME=/c/mvn361/apache-maven-3.6.1"
+        - export "M2_HOME=/c/mvn361/apache-maven-3.6.1"
+        - export "PATH=/c/mvn361/apache-maven-3.6.1/bin:$PATH"
+      script:
+        - java -version
+        - mvn -version
         - mvn -B clean integration-test
 
     - os: linux
@@ -85,7 +112,7 @@ matrix:
         - export PATH=$JAVA_HOME/bin:$PATH
       script:
         - java -version
-        - mvn -B clean integration-test
+        - mvn -B clean package -pl iotdb,grafana,iotdb-cli,example,:kafka-example,:rocketmq-example -am integration-test
     - os: linux
       name: linux-openjdk8
       dist: trusty
@@ -98,6 +125,11 @@ matrix:
       name: linux-oraclejdk11
       dist: trusty
       jdk: oraclejdk11
+      script:
+        - java -version
+        - mvn -version
+        - mvn -B apache-rat:check
+        - mvn -B clean package -pl iotdb,grafana,iotdb-cli,example,:kafka-example,:rocketmq-example -am integration-test
 
 cache:
   directories:
diff --git a/License b/License
index 1ea4af3..58f5c09 100644
--- a/License
+++ b/License
@@ -255,6 +255,7 @@ MIT License
 ------------
 org.slf4j:slf4j-api
 org.mockito:mockito-all:1.10.19
+me.tongfei:progressbar:0.7.3
 
 
 EDL 1.0
diff --git a/README.md b/README.md
index a418329..d412168 100644
--- a/README.md
+++ b/README.md
@@ -99,6 +99,15 @@ Let $IOTDB_HOME = /workspace/incubator-iotdb/iotdb/iotdb/
 
 Let $IOTDB_CLI_HOME = /workspace/incubator-iotdb/iotdb-cli/cli
 
+Note:
+* if `IOTDB_HOME` is not explicitly assigned, 
+then by default `IOTDB_HOME` is the direct parent directory of `bin/start-server.sh` on Unix/OS X 
+(or that of `bin\start-server.bat` on Windows).
+
+* if `IOTDB_CLI_HOME` is not explicitly assigned, 
+then by default `IOTDB_CLI_HOME` is the direct parent directory of `bin/start-client.sh` on 
+Unix/OS X (or that of `bin\start-client.bat` on Windows).
+
 If you are not the first time that building IoTDB, remember deleting the following files:
 
 ```
diff --git a/cluster/pom.xml b/cluster/pom.xml
index 25d13ea..4630f70 100644
--- a/cluster/pom.xml
+++ b/cluster/pom.xml
@@ -76,6 +76,11 @@
                 </exclusion>
             </exclusions>
         </dependency>
+        <dependency>
+            <groupId>io.airlift</groupId>
+            <artifactId>airline</artifactId>
+            <version>0.8</version>
+        </dependency>
     </dependencies>
     <build>
         <plugins>
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
index 9212258..2e4cef6 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
@@ -21,6 +21,11 @@ package org.apache.iotdb.cluster.concurrent;
 public enum ThreadName {
 
   /**
+   * Node as client thread
+   */
+  NODE_AS_CLIENT("Node-As-Client-Thread"),
+
+  /**
    * QP Task thread
    */
   QP_TASK("QP-Task-Thread"),
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/NodeAsClientThreadManager.java
similarity index 72%
copy from cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskManager.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/NodeAsClientThreadManager.java
index cc26913..3b93623 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/NodeAsClientThreadManager.java
@@ -19,23 +19,21 @@
 package org.apache.iotdb.cluster.concurrent.pool;
 
 import org.apache.iotdb.cluster.concurrent.ThreadName;
-import org.apache.iotdb.cluster.config.ClusterConfig;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
-import org.apache.iotdb.db.concurrent.IoTDBThreadPoolFactory;
 
 /**
- * Manage all qp tasks in thread.
+ * Manage all node as client in thread.
  */
-public class QPTaskManager extends ThreadPoolManager {
+public class NodeAsClientThreadManager extends ThreadPoolManager {
 
-  private static final String MANAGER_NAME = "qp task manager";
+  private static final String MANAGER_NAME = "node as client thread manager";
 
-  private QPTaskManager() {
+  private NodeAsClientThreadManager() {
     init();
   }
 
-  public static QPTaskManager getInstance() {
-    return QPTaskManager.InstanceHolder.instance;
+  public static NodeAsClientThreadManager getInstance() {
+    return NodeAsClientThreadManager.InstanceHolder.instance;
   }
 
   /**
@@ -48,12 +46,12 @@ public class QPTaskManager extends ThreadPoolManager {
 
   @Override
   public String getThreadName() {
-    return ThreadName.QP_TASK.getName();
+    return ThreadName.NODE_AS_CLIENT.getName();
   }
 
   @Override
   public int getThreadPoolSize() {
-    return ClusterDescriptor.getInstance().getConfig().getConcurrentQPSubTaskThread();
+    return ClusterDescriptor.getInstance().getConfig().getConcurrentInnerRpcClientThread();
   }
 
   private static class InstanceHolder {
@@ -61,6 +59,6 @@ public class QPTaskManager extends ThreadPoolManager {
     private InstanceHolder() {
     }
 
-    private static QPTaskManager instance = new QPTaskManager();
+    private static NodeAsClientThreadManager instance = new NodeAsClientThreadManager();
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskThreadManager.java
similarity index 77%
rename from cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskManager.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskThreadManager.java
index cc26913..1e33b77 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskThreadManager.java
@@ -19,23 +19,21 @@
 package org.apache.iotdb.cluster.concurrent.pool;
 
 import org.apache.iotdb.cluster.concurrent.ThreadName;
-import org.apache.iotdb.cluster.config.ClusterConfig;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
-import org.apache.iotdb.db.concurrent.IoTDBThreadPoolFactory;
 
 /**
  * Manage all qp tasks in thread.
  */
-public class QPTaskManager extends ThreadPoolManager {
+public class QPTaskThreadManager extends ThreadPoolManager {
 
-  private static final String MANAGER_NAME = "qp task manager";
+  private static final String MANAGER_NAME = "qp-task-thread-manager";
 
-  private QPTaskManager() {
+  private QPTaskThreadManager() {
     init();
   }
 
-  public static QPTaskManager getInstance() {
-    return QPTaskManager.InstanceHolder.instance;
+  public static QPTaskThreadManager getInstance() {
+    return QPTaskThreadManager.InstanceHolder.instance;
   }
 
   /**
@@ -61,6 +59,6 @@ public class QPTaskManager extends ThreadPoolManager {
     private InstanceHolder() {
     }
 
-    private static QPTaskManager instance = new QPTaskManager();
+    private static QPTaskThreadManager instance = new QPTaskThreadManager();
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QueryTimerManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QueryTimerThreadManager.java
similarity index 87%
rename from cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QueryTimerManager.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QueryTimerThreadManager.java
index 779488c..1362825 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QueryTimerManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QueryTimerThreadManager.java
@@ -28,9 +28,9 @@ import org.apache.iotdb.db.concurrent.IoTDBThreadPoolFactory;
  * Manage all query timer in query node, if timer is timeout, close all query resource for remote
  * coordinator node.
  */
-public class QueryTimerManager extends ThreadPoolManager {
+public class QueryTimerThreadManager extends ThreadPoolManager {
 
-  private static final String MANAGER_NAME = "remote-query-timer-manager";
+  private static final String MANAGER_NAME = "remote-query-timer-thread-manager";
 
   private static final int CORE_POOL_SIZE = 1;
 
@@ -39,8 +39,8 @@ public class QueryTimerManager extends ThreadPoolManager {
     pool = IoTDBThreadPoolFactory.newScheduledThreadPool(getThreadPoolSize(), getThreadName());
   }
 
-  public static QueryTimerManager getInstance() {
-    return QueryTimerManager.QueryTimerManagerHolder.INSTANCE;
+  public static QueryTimerThreadManager getInstance() {
+    return QueryTimerThreadManager.QueryTimerManagerHolder.INSTANCE;
   }
 
   @Override
@@ -65,7 +65,7 @@ public class QueryTimerManager extends ThreadPoolManager {
 
   private static class QueryTimerManagerHolder {
 
-    private static final QueryTimerManager INSTANCE = new QueryTimerManager();
+    private static final QueryTimerThreadManager INSTANCE = new QueryTimerThreadManager();
 
     private QueryTimerManagerHolder() {
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/ThreadPoolManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/ThreadPoolManager.java
index 828cc1a..60e8a75 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/ThreadPoolManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/ThreadPoolManager.java
@@ -24,9 +24,13 @@ import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import org.apache.iotdb.db.concurrent.IoTDBThreadPoolFactory;
 import org.apache.iotdb.db.exception.ProcessorException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public abstract class ThreadPoolManager {
 
+  private static final Logger LOGGER = LoggerFactory.getLogger(ThreadPoolManager.class);
+
   ExecutorService pool;
 
   public void checkInit() {
@@ -38,7 +42,7 @@ public abstract class ThreadPoolManager {
   /**
    * Init pool manager
    */
-  public void init(){
+  public void init() {
     pool = IoTDBThreadPoolFactory.newFixedThreadPool(getThreadPoolSize(), getThreadName());
   }
 
@@ -53,14 +57,13 @@ public abstract class ThreadPoolManager {
   public void close(boolean block, long timeout) throws ProcessorException {
     if (pool != null) {
       try {
-        pool.shutdown();
+        pool.shutdownNow();
         if (block) {
           try {
             if (!pool.awaitTermination(timeout, TimeUnit.MILLISECONDS)) {
-              throw new ProcessorException(
-                  String
-                      .format("%s thread pool doesn't exit after %d ms", getManagerName(),
-                          timeout));
+              LOGGER
+                  .debug(String.format("%s thread pool doesn't exit after %d ms", getManagerName(),
+                      timeout));
             }
           } catch (InterruptedException e) {
             Thread.currentThread().interrupt();
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java
index 0e6472d..95905dd 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java
@@ -110,36 +110,38 @@ public class ClusterConfig {
   private int numOfVirtualNodes = 2;
 
   /**
-   * Maximum number of @NodeAsClient usage
+   * Maximum number of inner rpc client thread. When this value <= 0, use CPU core number * 5
    */
-  private int maxNumOfInnerRpcClient = 500;
+  private int concurrentInnerRpcClientThread = Runtime.getRuntime().availableProcessors() * 5;
 
   /**
-   * Maximum number of queue length to use @NodeAsClient, the request which exceed to this number
-   * will be rejected.
+   * Maximum number of queue length of qp task which is waiting to be executed. If the num of
+   * waiting qp tasks exceed to this number, new qp task will be rejected.
    */
-  private int maxQueueNumOfInnerRpcClient = 500;
+  private int maxQueueNumOfQPTask = 500;
 
   /**
-   * ReadMetadataConsistencyLevel: 1 Strong consistency, 2 Weak consistency
+   * ReadMetadataConsistencyLevel: strong or weak. Default consistency level is strong.
+   * This parameter is case-insensitive.
    */
-  private int readMetadataConsistencyLevel = 1;
+  private int readMetadataConsistencyLevel = ClusterConsistencyLevel.STRONG.ordinal();
 
   /**
-   * ReadDataConsistencyLevel: 1 Strong consistency, 2 Weak consistency
+   * ReadDataConsistencyLevel: strong or weak. Default consistency level is strong.
+   * This parameter is case-insensitive.
    */
-  private int readDataConsistencyLevel = 1;
+  private int readDataConsistencyLevel = ClusterConsistencyLevel.STRONG.ordinal();
 
   /**
    * Maximum number of threads which execute tasks generated by client requests concurrently. Each
    * client request corresponds to a QP Task. A QP task may be divided into several sub-tasks. So
    * this value is the sum of all sub-tasks. When this value <= 0, use CPU core number * 10
    */
-  private int concurrentQPSubTaskThread = Runtime.getRuntime().availableProcessors() * 10;
+  private int concurrentQPSubTaskThread = Runtime.getRuntime().availableProcessors() * 5;
 
   /**
-   * Batch data size read from remote query node once while reading, default value is 10000.
-   * The smaller the parameter, the more communication times and the more time-consuming it is.
+   * Batch data size read from remote query node once while reading, default value is 10000. The
+   * smaller the parameter, the more communication times and the more time-consuming it is.
    */
   private int batchReadSize = 10000;
 
@@ -297,20 +299,20 @@ public class ClusterConfig {
     this.numOfVirtualNodes = numOfVirtualNodes;
   }
 
-  public int getMaxNumOfInnerRpcClient() {
-    return maxNumOfInnerRpcClient;
+  public int getConcurrentInnerRpcClientThread() {
+    return concurrentInnerRpcClientThread;
   }
 
-  public void setMaxNumOfInnerRpcClient(int maxNumOfInnerRpcClient) {
-    this.maxNumOfInnerRpcClient = maxNumOfInnerRpcClient;
+  public void setConcurrentInnerRpcClientThread(int concurrentInnerRpcClientThread) {
+    this.concurrentInnerRpcClientThread = concurrentInnerRpcClientThread;
   }
 
-  public int getMaxQueueNumOfInnerRpcClient() {
-    return maxQueueNumOfInnerRpcClient;
+  public int getMaxQueueNumOfQPTask() {
+    return maxQueueNumOfQPTask;
   }
 
-  public void setMaxQueueNumOfInnerRpcClient(int maxQueueNumOfInnerRpcClient) {
-    this.maxQueueNumOfInnerRpcClient = maxQueueNumOfInnerRpcClient;
+  public void setMaxQueueNumOfQPTask(int maxQueueNumOfQPTask) {
+    this.maxQueueNumOfQPTask = maxQueueNumOfQPTask;
   }
 
   public int getReadMetadataConsistencyLevel() {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConsistencyLevel.java
similarity index 59%
copy from cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConsistencyLevel.java
index 9212258..80f0c4a 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConsistencyLevel.java
@@ -16,27 +16,33 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.concurrent;
-
-public enum ThreadName {
+package org.apache.iotdb.cluster.config;
 
+public enum ClusterConsistencyLevel {
   /**
-   * QP Task thread
+   * Strong consistency level
    */
-  QP_TASK("QP-Task-Thread"),
+  STRONG("strong"),
 
   /**
-   * Remote query timer
+   * Weak consistency level
    */
-  REMOTE_QUERY_TIMER("Remote-Query-Timer");
+  WEAK("weak");
+
+  private String levelName;
 
-  private String name;
+  public static final int UNSUPPORT_LEVEL = -1;
 
-  ThreadName(String name) {
-    this.name = name;
+  ClusterConsistencyLevel(String levelName) {
+    this.levelName = levelName;
   }
 
-  public String getName() {
-    return name;
+  public static int getLevel(String levelName) {
+    for(ClusterConsistencyLevel consistencyLevel: values()){
+      if(consistencyLevel.levelName.equals(levelName)){
+        return consistencyLevel.ordinal();
+      }
+    }
+    return UNSUPPORT_LEVEL;
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConstant.java b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConstant.java
index 5448847..fba692f 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConstant.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConstant.java
@@ -26,10 +26,8 @@ public class ClusterConstant {
   /**
    * Set read metadata consistency level pattern
    */
-  public static final String SET_READ_METADATA_CONSISTENCY_LEVEL_PATTERN = "set\\s+read\\s+metadata\\s+level\\s+to\\s+\\d+";
-  public static final String SET_READ_DATA_CONSISTENCY_LEVEL_PATTERN = "set\\s+read\\s+data\\s+level\\s+to\\s+\\d+";
-  public static final int MAX_CONSISTENCY_LEVEL = 2;
-  public static final int STRONG_CONSISTENCY_LEVEL = 1;
+  public static final String SET_READ_METADATA_CONSISTENCY_LEVEL_PATTERN = "(set\\s+)(read\\s+metadata\\s+level\\s+)(to\\s+.*)";
+  public static final String SET_READ_DATA_CONSISTENCY_LEVEL_PATTERN = "(set\\s+)(read\\s+data\\s+level\\s+)(to\\s+.*)";
   public static final int WEAK_CONSISTENCY_LEVEL = 2;
 
   /**
@@ -37,7 +35,7 @@ public class ClusterConstant {
    * queue until end. Each client request corresponds to a QP Task. A QP task may be divided into
    * several sub-tasks.The unit is milliseconds.
    */
-  public static final int CLOSE_QP_SUB_TASK_BLOCK_TIMEOUT = 1000;
+  public static final int CLOSE_THREAD_POOL_BLOCK_TIMEOUT = 1000;
 
   /**
    * Query timeout in query node. If time interval between last communications with coordinator node
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterDescriptor.java b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterDescriptor.java
index b90d781..56fba70 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterDescriptor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterDescriptor.java
@@ -56,8 +56,11 @@ public class ClusterDescriptor {
    * test. In most case, you should invoke this method.
    */
   public void loadProps() {
+    // modify iotdb config
     ioTDBConf.setRpcImplClassName(TSServiceClusterImpl.class.getName());
     ioTDBConf.setEnableWal(false);
+
+    // cluster config
     conf.setDefaultPath();
     InputStream inputStream;
     String url = System.getProperty(IoTDBConstant.IOTDB_CONF, null);
@@ -89,8 +92,12 @@ public class ClusterDescriptor {
     Properties properties = new Properties();
     try {
       properties.load(inputStream);
-      conf.setNodes(properties.getProperty("nodes", ClusterConfig.DEFAULT_NODE)
-          .split(","));
+      String[] nodes = properties.getProperty("nodes", ClusterConfig.DEFAULT_NODE)
+          .split(",");
+      for(int i = 0 ; i < nodes.length ; i++){
+        nodes[i] = nodes[i].trim();
+      }
+      conf.setNodes(nodes);
 
       conf.setReplication(Integer
           .parseInt(properties.getProperty("replication",
@@ -137,21 +144,27 @@ public class ClusterDescriptor {
           .parseInt(properties.getProperty("num_of_virtual_nodes",
               Integer.toString(conf.getNumOfVirtualNodes()))));
 
-      conf.setMaxNumOfInnerRpcClient(Integer
-          .parseInt(properties.getProperty("max_num_of_inner_rpc_client",
-              Integer.toString(conf.getMaxNumOfInnerRpcClient()))));
+      conf.setConcurrentInnerRpcClientThread(Integer
+          .parseInt(properties.getProperty("concurrent_inner_rpc_client_thread",
+              Integer.toString(conf.getConcurrentInnerRpcClientThread()))));
 
-      conf.setMaxQueueNumOfInnerRpcClient(Integer
+      conf.setMaxQueueNumOfQPTask(Integer
           .parseInt(properties.getProperty("max_queue_num_of_inner_rpc_client",
-              Integer.toString(conf.getMaxQueueNumOfInnerRpcClient()))));
+              Integer.toString(conf.getMaxQueueNumOfQPTask()))));
 
-      conf.setReadMetadataConsistencyLevel(Integer
-          .parseInt(properties.getProperty("read_metadata_consistency_level",
-              Integer.toString(conf.getReadMetadataConsistencyLevel()))));
+      String readMetadataLevelName = properties.getProperty("read_metadata_consistency_level", "");
+      int readMetadataLevel = ClusterConsistencyLevel.getLevel(readMetadataLevelName);
+      if(readMetadataLevel == ClusterConsistencyLevel.UNSUPPORT_LEVEL){
+        readMetadataLevel = ClusterConsistencyLevel.STRONG.ordinal();
+      }
+      conf.setReadMetadataConsistencyLevel(readMetadataLevel);
 
-      conf.setReadDataConsistencyLevel(Integer
-          .parseInt(properties.getProperty("read_data_consistency_level",
-              Integer.toString(conf.getReadDataConsistencyLevel()))));
+      String readDataLevelName = properties.getProperty("read_data_consistency_level", "");
+      int readDataLevel = ClusterConsistencyLevel.getLevel(readDataLevelName);
+      if(readDataLevel == ClusterConsistencyLevel.UNSUPPORT_LEVEL){
+        readDataLevel = ClusterConsistencyLevel.STRONG.ordinal();
+      }
+      conf.setReadDataConsistencyLevel(readDataLevel);
 
       conf.setConcurrentQPSubTaskThread(Integer
           .parseInt(properties.getProperty("concurrent_qp_sub_task_thread",
@@ -168,6 +181,10 @@ public class ClusterDescriptor {
         conf.setConcurrentQPSubTaskThread(Runtime.getRuntime().availableProcessors() * 10);
       }
 
+      if (conf.getConcurrentInnerRpcClientThread() <= 0) {
+        conf.setConcurrentInnerRpcClientThread(Runtime.getRuntime().availableProcessors() * 10);
+      }
+
       if (conf.getMaxCachedBatchDataListSize() <= 0) {
         conf.setMaxCachedBatchDataListSize(2);
       }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/entity/Server.java b/cluster/src/main/java/org/apache/iotdb/cluster/entity/Server.java
index 0efb70d..41c4cb1 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/entity/Server.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/entity/Server.java
@@ -23,7 +23,7 @@ import com.alipay.sofa.jraft.entity.PeerId;
 import com.alipay.sofa.jraft.rpc.RaftRpcServerFactory;
 import java.util.HashMap;
 import java.util.Map;
-import org.apache.iotdb.cluster.concurrent.pool.QPTaskManager;
+import org.apache.iotdb.cluster.concurrent.pool.QPTaskThreadManager;
 import org.apache.iotdb.cluster.config.ClusterConfig;
 import org.apache.iotdb.cluster.config.ClusterConstant;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
@@ -31,7 +31,11 @@ import org.apache.iotdb.cluster.entity.data.DataPartitionHolder;
 import org.apache.iotdb.cluster.entity.metadata.MetadataHolder;
 import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.MetadataRaftHolder;
+import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcQueryManager;
+import org.apache.iotdb.cluster.query.manager.querynode.ClusterLocalQueryManager;
 import org.apache.iotdb.cluster.rpc.raft.impl.RaftNodeAsClientManager;
+import org.apache.iotdb.cluster.rpc.raft.processor.QueryMetricAsyncProcessor;
 import org.apache.iotdb.cluster.rpc.raft.processor.nonquery.DataGroupNonQueryAsyncProcessor;
 import org.apache.iotdb.cluster.rpc.raft.processor.nonquery.MetaGroupNonQueryAsyncProcessor;
 import org.apache.iotdb.cluster.rpc.raft.processor.querydata.CloseSeriesReaderSyncProcessor;
@@ -43,11 +47,18 @@ import org.apache.iotdb.cluster.rpc.raft.processor.querymetadata.QueryMetadataIn
 import org.apache.iotdb.cluster.rpc.raft.processor.querymetadata.QueryPathsAsyncProcessor;
 import org.apache.iotdb.cluster.rpc.raft.processor.querymetadata.QuerySeriesTypeAsyncProcessor;
 import org.apache.iotdb.cluster.rpc.raft.processor.querymetadata.QueryTimeSeriesAsyncProcessor;
+import org.apache.iotdb.cluster.rpc.raft.processor.querymetric.QueryJobNumAsyncProcessor;
+import org.apache.iotdb.cluster.rpc.raft.processor.querymetric.QueryLeaderAsyncProcessor;
+import org.apache.iotdb.cluster.rpc.raft.processor.querymetric.QueryStatusAsyncProcessor;
+import org.apache.iotdb.cluster.service.ClusterMonitor;
 import org.apache.iotdb.cluster.utils.RaftUtils;
 import org.apache.iotdb.cluster.utils.hash.PhysicalNode;
 import org.apache.iotdb.cluster.utils.hash.Router;
+import org.apache.iotdb.db.exception.FileNodeManagerException;
 import org.apache.iotdb.db.exception.ProcessorException;
+import org.apache.iotdb.db.exception.StartupException;
 import org.apache.iotdb.db.service.IoTDB;
+import org.apache.iotdb.db.service.RegisterManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -83,12 +94,17 @@ public class Server {
    */
   private IoTDB iotdb;
 
-  public static void main(String[] args) {
+  private RegisterManager registerManager = new RegisterManager();
+
+  public static void main(String[] args)
+      throws ProcessorException, RaftConnectionException, FileNodeManagerException {
     Server server = Server.getInstance();
     server.start();
   }
 
-  public void start() {
+  public void start()
+      throws ProcessorException, RaftConnectionException, FileNodeManagerException {
+
     /** Stand-alone version of IoTDB, be careful to replace the internal JDBC Server with a cluster version **/
     iotdb = new IoTDB();
     iotdb.active();
@@ -97,12 +113,15 @@ public class Server {
     /** Init raft groups **/
     PeerId[] peerIds = RaftUtils.convertStringArrayToPeerIdArray(CLUSTER_CONF.getNodes());
     serverId = new PeerId(CLUSTER_CONF.getIp(), CLUSTER_CONF.getPort());
+
+    // Rpc between raft groups
     RpcServer rpcServer = new RpcServer(serverId.getPort());
     RaftRpcServerFactory.addRaftRequestProcessors(rpcServer);
 
     registerNonQueryProcessor(rpcServer);
     registerQueryMetadataProcessor(rpcServer);
     registerQueryDataProcessor(rpcServer);
+    registerQueryMetricProcessor(rpcServer);
 
     metadataHolder = new MetadataRaftHolder(peerIds, serverId, rpcServer, true);
     metadataHolder.init();
@@ -126,6 +145,12 @@ public class Server {
       Router.getInstance().showPhysicalNodes(groupId);
     }
 
+    try {
+      LOGGER.info("Register Cluster Monitor to JMX service.");
+      registerManager.register(ClusterMonitor.INSTANCE);
+    } catch (StartupException e) {
+      stop();
+    }
   }
 
   private void registerNonQueryProcessor(RpcServer rpcServer) {
@@ -148,14 +173,25 @@ public class Server {
     rpcServer.registerUserProcessor(new CloseSeriesReaderSyncProcessor());
   }
 
-  public void stop() throws ProcessorException, InterruptedException {
-    QPTaskManager.getInstance().close(true, ClusterConstant.CLOSE_QP_SUB_TASK_BLOCK_TIMEOUT);
-    iotdb.deactivate();
+  private void registerQueryMetricProcessor(RpcServer rpcServer) {
+    rpcServer.registerUserProcessor(new QueryMetricAsyncProcessor());
+    rpcServer.registerUserProcessor(new QueryJobNumAsyncProcessor());
+    rpcServer.registerUserProcessor(new QueryStatusAsyncProcessor());
+    rpcServer.registerUserProcessor(new QueryLeaderAsyncProcessor());
+  }
+
+  public void stop() throws ProcessorException, RaftConnectionException, FileNodeManagerException {
+    QPTaskThreadManager.getInstance().close(true, ClusterConstant.CLOSE_THREAD_POOL_BLOCK_TIMEOUT);
+    ClusterRpcQueryManager.getInstance().close();
+    ClusterLocalQueryManager.getInstance().close();
     CLIENT_MANAGER.shutdown();
+    iotdb.deactivate();
     metadataHolder.stop();
     for (DataPartitionHolder dataPartitionHolder : dataPartitionHolderMap.values()) {
       dataPartitionHolder.stop();
     }
+
+    registerManager.deregisterAll();
   }
 
   public PeerId getServerId() {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/DataStateMachine.java b/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/DataStateMachine.java
index b8c6f43..eb9db25 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/DataStateMachine.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/DataStateMachine.java
@@ -32,7 +32,7 @@ import java.util.List;
 import java.util.concurrent.atomic.AtomicLong;
 import org.apache.iotdb.cluster.rpc.raft.closure.ResponseClosure;
 import org.apache.iotdb.cluster.rpc.raft.request.nonquery.DataGroupNonQueryRequest;
-import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.nonquery.DataGroupNonQueryResponse;
 import org.apache.iotdb.cluster.utils.RaftUtils;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.exception.ProcessorException;
@@ -92,7 +92,8 @@ public class DataStateMachine extends StateMachineAdapter {
    */
   private void applySingleTask(Closure closure, ByteBuffer data) {
     /** If closure is not null, the node is leader **/
-    BasicResponse response = (closure == null) ? null : ((ResponseClosure) closure).getResponse();
+    DataGroupNonQueryResponse response = (closure == null) ? null
+        : (DataGroupNonQueryResponse) ((ResponseClosure) closure).getResponse();
     DataGroupNonQueryRequest request;
     try {
       request = SerializerManager.getSerializer(SerializerManager.Hessian2)
@@ -116,12 +117,13 @@ public class DataStateMachine extends StateMachineAdapter {
         PhysicalPlan plan = PhysicalPlanLogTransfer.logToOperator(planByte);
 
         LOGGER.debug("OperatorType :{}", plan.getOperatorType());
-        /** If the request is to set path and sg of the path doesn't exist, it needs to run null-read in meta group to avoid out of data sync **/
+        /** If the request is to set path and sg of the path doesn't exist, it needs to receive null-read in meta group to avoid out of data sync **/
         if (plan.getOperatorType() == OperatorType.CREATE_TIMESERIES && !checkPathExistence(
             ((MetadataPlan) plan).getPath().getFullPath())) {
           RaftUtils.handleNullReadToMetaGroup(status);
           if(!status.isOk()){
             addResult(response, false);
+            addErrorMsg(response, status.getErrorMsg());
             continue;
           }
         }
@@ -131,6 +133,7 @@ public class DataStateMachine extends StateMachineAdapter {
         LOGGER.error("Execute physical plan error", e);
         status = new Status(-1, e.getMessage());
         addResult(response, false);
+        addErrorMsg(response, status.getErrorMsg());
       }
     }
     if (closure != null) {
@@ -141,13 +144,22 @@ public class DataStateMachine extends StateMachineAdapter {
   /**
    * Add result to response
    */
-  private void addResult(BasicResponse response, boolean result){
+  private void addResult(DataGroupNonQueryResponse response, boolean result){
     if(response != null){
       response.addResult(result);
     }
   }
 
   /**
+   * Add result to response
+   */
+  private void addErrorMsg(DataGroupNonQueryResponse response, String errorMsg){
+    if(response != null){
+      response.addErrorMsg(errorMsg);
+    }
+  }
+
+  /**
    * Check the existence of a specific path
    */
   private boolean checkPathExistence(String path) throws PathErrorException {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/MetadataStateManchine.java b/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/MetadataStateManchine.java
index 3cc9001..78dd3e8 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/MetadataStateManchine.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/MetadataStateManchine.java
@@ -148,7 +148,7 @@ public class MetadataStateManchine extends StateMachineAdapter {
     mManager.setStorageLevelToMTree(sg);
   }
 
-  public Set<String> getAllStorageGroups() throws PathErrorException {
+  public Set<String> getAllStorageGroups() {
     return mManager.getAllStorageGroup();
   }
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/RaftService.java b/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/RaftService.java
index 1d08f09..d910f0f 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/RaftService.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/RaftService.java
@@ -25,9 +25,11 @@ import com.alipay.sofa.jraft.StateMachine;
 import com.alipay.sofa.jraft.conf.Configuration;
 import com.alipay.sofa.jraft.entity.PeerId;
 import com.alipay.sofa.jraft.option.NodeOptions;
+import com.codahale.metrics.ConsoleReporter;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
+import java.util.concurrent.TimeUnit;
 import org.apache.iotdb.cluster.config.ClusterConfig;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.cluster.entity.service.IService;
@@ -45,11 +47,11 @@ public class RaftService implements IService {
 
   public RaftService(String groupId, PeerId[] peerIds, PeerId serverId, RpcServer rpcServer, StateMachine fsm, boolean startRpcServer) {
     this.peerIdList = new ArrayList<>(peerIds.length);
-    peerIdList.addAll(Arrays.asList(peerIds));
+    this.peerIdList.addAll(Arrays.asList(peerIds));
     this.fsm = fsm;
     this.groupId = groupId;
     this.startRpcServer = startRpcServer;
-    raftGroupService = new RaftGroupService(groupId, serverId, null, rpcServer);
+    this.raftGroupService = new RaftGroupService(groupId, serverId, null, rpcServer);
   }
 
   @Override
@@ -61,6 +63,7 @@ public class RaftService implements IService {
     nodeOptions.setRaftMetaUri(FilePathUtils.regularizePath(config.getRaftMetadataPath()) + groupId);
     nodeOptions.setSnapshotUri(FilePathUtils.regularizePath(config.getRaftSnapshotPath()) + groupId);
     nodeOptions.setElectionTimeoutMs(config.getElectionTimeoutMs());
+    nodeOptions.setEnableMetrics(true);
     final Configuration initConf = new Configuration();
     initConf.setPeers(peerIdList);
     nodeOptions.setInitialConf(initConf);
@@ -70,6 +73,12 @@ public class RaftService implements IService {
   @Override
   public void start() {
     this.node = raftGroupService.start(startRpcServer);
+
+//    ConsoleReporter reporter = ConsoleReporter.forRegistry(node.getNodeMetrics().getMetricRegistry())
+//        .convertRatesTo(TimeUnit.SECONDS)
+//        .convertDurationsTo(TimeUnit.MILLISECONDS)
+//        .build();
+//    reporter.start(30, TimeUnit.SECONDS);
   }
 
   @Override
@@ -93,4 +102,11 @@ public class RaftService implements IService {
     this.node = node;
   }
 
+  public StateMachine getFsm() {
+    return fsm;
+  }
+
+  public String getGroupId() {
+    return groupId;
+  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/AbstractQPExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/AbstractQPExecutor.java
index 492b7ad..5059e06 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/AbstractQPExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/AbstractQPExecutor.java
@@ -19,16 +19,16 @@
 package org.apache.iotdb.cluster.qp.executor;
 
 import com.alipay.sofa.jraft.entity.PeerId;
+import java.util.HashSet;
+import java.util.Set;
 import org.apache.iotdb.cluster.config.ClusterConfig;
-import org.apache.iotdb.cluster.config.ClusterConstant;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.cluster.entity.Server;
-import org.apache.iotdb.cluster.exception.ConsistencyLevelException;
 import org.apache.iotdb.cluster.exception.RaftConnectionException;
 import org.apache.iotdb.cluster.qp.task.QPTask;
 import org.apache.iotdb.cluster.qp.task.QPTask.TaskState;
 import org.apache.iotdb.cluster.qp.task.SingleQPTask;
-import org.apache.iotdb.cluster.rpc.raft.NodeAsClient;
+import org.apache.iotdb.cluster.rpc.raft.impl.RaftNodeAsClientManager;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 import org.apache.iotdb.cluster.utils.RaftUtils;
 import org.apache.iotdb.cluster.utils.hash.Router;
@@ -87,59 +87,102 @@ public abstract class AbstractQPExecutor {
    * Async handle QPTask by QPTask and leader id
    *
    * @param task request QPTask
-   * @param leader leader of the target raft group
    * @param taskRetryNum Number of QPTask retries due to timeout and redirected.
    * @return basic response
    */
-  protected BasicResponse asyncHandleNonQuerySingleTaskGetRes(SingleQPTask task, PeerId leader,
-      int taskRetryNum)
+  private BasicResponse syncHandleSingleTaskGetRes(SingleQPTask task, int taskRetryNum, String taskInfo, String groupId, Set<PeerId> downNodeSet)
       throws InterruptedException, RaftConnectionException {
-    asyncSendNonQuerySingleTask(task, leader, taskRetryNum);
-    return syncGetNonQueryRes(task, leader, taskRetryNum);
+    PeerId firstNode = task.getTargetNode();
+    RaftUtils.updatePeerIDOrder(firstNode, groupId);
+    BasicResponse response;
+    try {
+      asyncSendSingleTask(task, taskRetryNum);
+      response = syncGetSingleTaskRes(task, taskRetryNum, taskInfo, groupId, downNodeSet);
+      return response;
+    } catch (RaftConnectionException ex) {
+      downNodeSet.add(firstNode);
+      while (true) {
+        PeerId nextNode = null;
+        try {
+          nextNode = RaftUtils.getPeerIDInOrder(groupId);
+          if (firstNode.equals(nextNode)) {
+            break;
+          }
+          LOGGER.debug(
+              "Previous task fail, then send {} task for group {} to node {}.", taskInfo, groupId,
+              nextNode);
+          task.resetTask();
+          task.setTargetNode(nextNode);
+          asyncSendSingleTask(task, taskRetryNum);
+          response = syncGetSingleTaskRes(task, taskRetryNum, taskInfo, groupId, downNodeSet);
+          LOGGER.debug("{} task for group {} to node {} succeed.", taskInfo, groupId, nextNode);
+          return response;
+        } catch (RaftConnectionException e1) {
+          LOGGER.debug("{} task for group {} to node {} fail.", taskInfo, groupId, nextNode);
+          downNodeSet.add(nextNode);
+        }
+      }
+      throw new RaftConnectionException(String
+          .format("Can not %s in all nodes of group<%s>, please check cluster status.",
+              taskInfo, groupId));
+    }
+  }
+
+  protected BasicResponse syncHandleSingleTaskGetRes(SingleQPTask task, int taskRetryNum, String taskInfo, String groupId)
+      throws RaftConnectionException, InterruptedException {
+    return syncHandleSingleTaskGetRes(task, taskRetryNum, taskInfo, groupId, new HashSet<>());
   }
 
   /**
    * Asynchronous send rpc task via client
    *  @param task rpc task
-   * @param leader leader node of the group
    * @param taskRetryNum Retry time of the task
    */
-  protected void asyncSendNonQuerySingleTask(SingleQPTask task, PeerId leader, int taskRetryNum)
+  protected void asyncSendSingleTask(SingleQPTask task, int taskRetryNum)
       throws RaftConnectionException {
     if (taskRetryNum >= TASK_MAX_RETRY) {
       throw new RaftConnectionException(String.format("QPTask retries reach the upper bound %s",
           TASK_MAX_RETRY));
     }
-    NodeAsClient client = RaftUtils.getRaftNodeAsClient();
-    /** Call async method **/
-    client.asyncHandleRequest(task.getRequest(), leader, task);
+    RaftNodeAsClientManager.getInstance().produceQPTask(task);
   }
 
   /**
    * Synchronous get task response. If it's redirected or status is exception, the task needs to be
    * resent. Note: If status is Exception, it marks that an exception occurred during the task is
    * being sent instead of executed.
-   *  @param task rpc task
-   * @param leader leader node of the group
+   * @param task rpc task
    * @param taskRetryNum Retry time of the task
    */
-  private BasicResponse syncGetNonQueryRes(SingleQPTask task, PeerId leader, int taskRetryNum)
+  private BasicResponse syncGetSingleTaskRes(SingleQPTask task, int taskRetryNum, String taskInfo, String groupId, Set<PeerId> downNodeSet)
       throws InterruptedException, RaftConnectionException {
     task.await();
+    PeerId leader;
     if (task.getTaskState() != TaskState.FINISH) {
-      if (task.getTaskState() == TaskState.REDIRECT) {
-        /** redirect to the right leader **/
+      if (task.getTaskState() == TaskState.RAFT_CONNECTION_EXCEPTION) {
+        throw new RaftConnectionException(
+            String.format("Can not connect to remote node : %s", task.getTargetNode()));
+      } else if (task.getTaskState() == TaskState.REDIRECT) {
+        // redirect to the right leader
         leader = PeerId.parsePeer(task.getResponse().getLeaderStr());
-        LOGGER.debug("Redirect leader: {}, group id = {}", leader, task.getRequest().getGroupID());
-        RaftUtils.updateRaftGroupLeader(task.getRequest().getGroupID(), leader);
+
+        if (downNodeSet.contains(leader)) {
+          LOGGER.debug("Redirect leader {} is down, group {} might be down.", leader, groupId);
+          throw new RaftConnectionException(
+              String.format("Can not connect to leader of remote node : %s", task.getTargetNode()));
+        } else {
+          LOGGER
+              .debug("Redirect leader: {}, group id = {}", leader, task.getRequest().getGroupID());
+          RaftUtils.updateRaftGroupLeader(task.getRequest().getGroupID(), leader);
+        }
       } else {
-        String groupId = task.getRequest().getGroupID();
         RaftUtils.removeCachedRaftGroupLeader(groupId);
         LOGGER.debug("Remove cached raft group leader of {}", groupId);
-        leader = RaftUtils.getLeaderPeerID(groupId);
+        leader = RaftUtils.getLocalLeaderPeerID(groupId);
       }
+      task.setTargetNode(leader);
       task.resetTask();
-      return asyncHandleNonQuerySingleTaskGetRes(task, leader, taskRetryNum + 1);
+      return syncHandleSingleTaskGetRes(task, taskRetryNum + 1, taskInfo, groupId, downNodeSet);
     }
     return task.getResponse();
   }
@@ -150,20 +193,12 @@ public abstract class AbstractQPExecutor {
     }
   }
 
-  public void setReadMetadataConsistencyLevel(int level) throws ConsistencyLevelException {
-    if (level <= ClusterConstant.MAX_CONSISTENCY_LEVEL) {
-      readMetadataConsistencyLevel.set(level);
-    } else {
-      throw new ConsistencyLevelException(String.format("Consistency level %d not support", level));
-    }
+  public void setReadMetadataConsistencyLevel(int level) {
+    readMetadataConsistencyLevel.set(level);
   }
 
-  public void setReadDataConsistencyLevel(int level) throws ConsistencyLevelException {
-    if (level <= ClusterConstant.MAX_CONSISTENCY_LEVEL) {
-      readDataConsistencyLevel.set(level);
-    } else {
-      throw new ConsistencyLevelException(String.format("Consistency level %d not support", level));
-    }
+  public void setReadDataConsistencyLevel(int level) {
+    readDataConsistencyLevel.set(level);
   }
 
   public int getReadMetadataConsistencyLevel() {
@@ -175,4 +210,16 @@ public abstract class AbstractQPExecutor {
     checkInitConsistencyLevel();
     return readDataConsistencyLevel.get();
   }
+
+  /**
+   * Async handle task by SingleQPTask and leader id.
+   *
+   * @param task request SingleQPTask
+   * @return request result
+   */
+  public boolean syncHandleSingleTask(SingleQPTask task, String taskInfo, String groupId)
+      throws RaftConnectionException, InterruptedException {
+    BasicResponse response = syncHandleSingleTaskGetRes(task, 0, taskInfo, groupId);
+    return response != null && response.isSuccess();
+  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/ClusterQueryProcessExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/ClusterQueryProcessExecutor.java
index c5032fc..39324d8 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/ClusterQueryProcessExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/ClusterQueryProcessExecutor.java
@@ -41,19 +41,28 @@ import org.apache.iotdb.tsfile.read.expression.IExpression;
 import org.apache.iotdb.tsfile.read.expression.QueryExpression;
 import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
 import org.apache.iotdb.tsfile.utils.Pair;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class ClusterQueryProcessExecutor extends AbstractQPExecutor implements IQueryProcessExecutor {
 
+  private static final Logger LOGGER = LoggerFactory.getLogger(ClusterQueryProcessExecutor.class);
   private ThreadLocal<Integer> fetchSize = new ThreadLocal<>();
   private ClusterQueryRouter clusterQueryRouter = new ClusterQueryRouter();
 
-  private QueryMetadataExecutor queryMetadataExecutor = new QueryMetadataExecutor();
+  private QueryMetadataExecutor queryMetadataExecutor;
+
+  public ClusterQueryProcessExecutor(
+      QueryMetadataExecutor queryMetadataExecutor) {
+    this.queryMetadataExecutor = queryMetadataExecutor;
+  }
 
   @Override
-  public QueryDataSet processQuery(QueryPlan queryPlan, QueryContext context)
+  public QueryDataSet processQuery(PhysicalPlan plan, QueryContext context)
       throws IOException, FileNodeManagerException, PathErrorException,
       QueryFilterOptimizationException, ProcessorException {
 
+    QueryPlan queryPlan = (QueryPlan) plan;
     QueryExpression queryExpression = QueryExpression.create().setSelectSeries(queryPlan.getPaths())
         .setExpression(queryPlan.getExpression());
     clusterQueryRouter.setReadDataConsistencyLevel(getReadDataConsistencyLevel());
@@ -117,6 +126,7 @@ public class ClusterQueryProcessExecutor extends AbstractQPExecutor implements I
   public List<String> getAllPaths(String originPath)
       throws PathErrorException {
     try {
+      LOGGER.debug(String.format("read metadata level :%d", getReadMetadataConsistencyLevel()));
       return queryMetadataExecutor.processPathsQuery(originPath);
     } catch (InterruptedException | ProcessorException e) {
       throw new PathErrorException(e.getMessage());
@@ -165,8 +175,8 @@ public class ClusterQueryProcessExecutor extends AbstractQPExecutor implements I
   }
 
   @Override
-  public int multiInsert(String deviceId, long insertTime, List<String> measurementList,
-      List<String> insertValues) throws ProcessorException {
+  public int multiInsert(String deviceId, long insertTime, String[] measurementList,
+      String[] insertValues) throws ProcessorException {
     throw new UnsupportedOperationException();
   }
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/NonQueryExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/NonQueryExecutor.java
index 1420370..1e6abea 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/NonQueryExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/NonQueryExecutor.java
@@ -23,6 +23,7 @@ import com.alipay.sofa.jraft.entity.PeerId;
 import java.io.IOException;
 import java.sql.Statement;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -68,10 +69,10 @@ public class NonQueryExecutor extends AbstractQPExecutor {
   private static final String OPERATION_NOT_SUPPORTED = "Operation %s does not support";
 
   /**
-   * When executing Metadata Plan, it's necessary to do null-read in single non query request or do
-   * the first null-read in batch non query request
+   * When executing Metadata Plan, it's necessary to do empty-read in single non query request or do
+   * the first empty-read in batch non query request
    */
-  private boolean nullReaderEnable = false;
+  private boolean emptyTaskEnable = false;
 
   public NonQueryExecutor() {
     super();
@@ -82,12 +83,12 @@ public class NonQueryExecutor extends AbstractQPExecutor {
    */
   public boolean processNonQuery(PhysicalPlan plan) throws ProcessorException {
     try {
-      nullReaderEnable = true;
+      emptyTaskEnable = true;
       String groupId = getGroupIdFromPhysicalPlan(plan);
       return handleNonQueryRequest(groupId, plan);
     } catch (RaftConnectionException e) {
       LOGGER.error(e.getMessage());
-      throw new ProcessorException("Raft connection occurs error.", e);
+      throw new ProcessorException(e.getMessage());
     } catch (InterruptedException | PathErrorException | IOException e) {
       throw new ProcessorException(e);
     }
@@ -104,27 +105,25 @@ public class NonQueryExecutor extends AbstractQPExecutor {
 
     Status nullReadTaskStatus = Status.OK();
     RaftUtils.handleNullReadToMetaGroup(nullReadTaskStatus);
-    if(!nullReadTaskStatus.isOk()){
+    if (!nullReadTaskStatus.isOk()) {
       throw new ProcessorException("Null read while processing batch failed");
     }
-    nullReaderEnable = false;
+    emptyTaskEnable = false;
 
-    /** 1. Classify physical plans by group id **/
+    /* 1. Classify physical plans by group id */
     Map<String, List<PhysicalPlan>> physicalPlansMap = new HashMap<>();
     Map<String, List<Integer>> planIndexMap = new HashMap<>();
     classifyPhysicalPlanByGroupId(physicalPlans, batchResult, physicalPlansMap, planIndexMap);
 
-    /** 2. Construct Multiple Data Group Requests **/
+    /* 2. Construct Multiple Data Group Requests */
     Map<String, SingleQPTask> subTaskMap = new HashMap<>();
     constructMultipleRequests(physicalPlansMap, planIndexMap, subTaskMap, batchResult);
 
-    /** 3. Execute Multiple Sub Tasks **/
+    /* 3. Execute Multiple Sub Tasks */
     BatchQPTask task = new BatchQPTask(subTaskMap.size(), batchResult, subTaskMap, planIndexMap);
     currentTask.set(task);
-    task.execute(this);
+    task.executeBy(this);
     task.await();
-    batchResult.setAllSuccessful(task.isAllSuccessful());
-    batchResult.setBatchErrorMessage(task.getBatchErrorMessage());
   }
 
   /**
@@ -132,7 +131,8 @@ public class NonQueryExecutor extends AbstractQPExecutor {
    */
   private void classifyPhysicalPlanByGroupId(PhysicalPlan[] physicalPlans, BatchResult batchResult,
       Map<String, List<PhysicalPlan>> physicalPlansMap, Map<String, List<Integer>> planIndexMap) {
-    int[] result = batchResult.getResult();
+
+    int[] result = batchResult.getResultArray();
     for (int i = 0; i < result.length; i++) {
       /** Check if the request has failed. If it has failed, ignore it. **/
       if (result[i] != Statement.EXECUTE_FAILED) {
@@ -140,24 +140,22 @@ public class NonQueryExecutor extends AbstractQPExecutor {
         try {
           String groupId = getGroupIdFromPhysicalPlan(plan);
           if (groupId.equals(ClusterConfig.METADATA_GROUP_ID)) {
+
+            // this is for set storage group statement and role/user management statement.
             LOGGER.debug("Execute metadata group task");
             boolean executeResult = handleNonQueryRequest(groupId, plan);
-            nullReaderEnable = true;
-            result[i] =  executeResult ? Statement.SUCCESS_NO_INFO
+            emptyTaskEnable = true;
+            result[i] = executeResult ? Statement.SUCCESS_NO_INFO
                 : Statement.EXECUTE_FAILED;
             batchResult.setAllSuccessful(executeResult);
-          }else {
-            if (!physicalPlansMap.containsKey(groupId)) {
-              physicalPlansMap.put(groupId, new ArrayList<>());
-              planIndexMap.put(groupId, new ArrayList<>());
-            }
-            physicalPlansMap.get(groupId).add(plan);
-            planIndexMap.get(groupId).add(i);
+          } else {
+            physicalPlansMap.computeIfAbsent(groupId, l -> new ArrayList<>()).add(plan);
+            planIndexMap.computeIfAbsent(groupId, l -> new ArrayList<>()).add(i);
           }
         } catch (PathErrorException | ProcessorException | IOException | RaftConnectionException | InterruptedException e) {
           result[i] = Statement.EXECUTE_FAILED;
           batchResult.setAllSuccessful(false);
-          batchResult.setBatchErrorMessage(e.getMessage());
+          batchResult.addBatchErrorMessage(i, e.getMessage());
           LOGGER.error(e.getMessage());
         }
       }
@@ -170,7 +168,7 @@ public class NonQueryExecutor extends AbstractQPExecutor {
   private void constructMultipleRequests(Map<String, List<PhysicalPlan>> physicalPlansMap,
       Map<String, List<Integer>> planIndexMap, Map<String, SingleQPTask> subTaskMap,
       BatchResult batchResult) {
-    int[] result = batchResult.getResult();
+    int[] result = batchResult.getResultArray();
     for (Entry<String, List<PhysicalPlan>> entry : physicalPlansMap.entrySet()) {
       String groupId = entry.getKey();
       SingleQPTask singleQPTask;
@@ -182,7 +180,9 @@ public class NonQueryExecutor extends AbstractQPExecutor {
         subTaskMap.put(groupId, singleQPTask);
       } catch (IOException e) {
         batchResult.setAllSuccessful(false);
-        batchResult.setBatchErrorMessage(e.getMessage());
+        for (int index : planIndexMap.get(groupId)) {
+          batchResult.addBatchErrorMessage(index, e.getMessage());
+        }
         for (int index : planIndexMap.get(groupId)) {
           result[index] = Statement.EXECUTE_FAILED;
         }
@@ -237,13 +237,13 @@ public class NonQueryExecutor extends AbstractQPExecutor {
       case CREATE_TIMESERIES:
       case SET_STORAGE_GROUP:
       case METADATA:
-        if(nullReaderEnable){
+        if (emptyTaskEnable) {
           Status nullReadTaskStatus = Status.OK();
           RaftUtils.handleNullReadToMetaGroup(nullReadTaskStatus);
-          if(!nullReadTaskStatus.isOk()){
+          if (!nullReadTaskStatus.isOk()) {
             throw new ProcessorException("Null read to metadata group failed");
           }
-          nullReaderEnable = false;
+          emptyTaskEnable = false;
         }
         groupId = getGroupIdFromMetadataPlan((MetadataPlan) plan);
         break;
@@ -311,23 +311,23 @@ public class NonQueryExecutor extends AbstractQPExecutor {
    */
   private boolean handleNonQueryRequest(String groupId, PhysicalPlan plan)
       throws IOException, RaftConnectionException, InterruptedException {
-    List<PhysicalPlan> plans = new ArrayList<>();
-    plans.add(plan);
+    List<PhysicalPlan> plans = Collections.singletonList(plan);
     BasicRequest request;
     if (groupId.equals(ClusterConfig.METADATA_GROUP_ID)) {
       request = new MetaGroupNonQueryRequest(groupId, plans);
     } else {
       request = new DataGroupNonQueryRequest(groupId, plans);
     }
-    SingleQPTask qpTask = new SingleQPTask(false, request);
+    SingleQPTask qpTask = new SingleQPTask(true, request);
     currentTask.set(qpTask);
 
     /** Check if the plan can be executed locally. **/
     if (QPExecutorUtils.canHandleNonQueryByGroupId(groupId)) {
       return handleNonQueryRequestLocally(groupId, qpTask);
     } else {
-      PeerId leader = RaftUtils.getLeaderPeerID(groupId);
-      return asyncHandleNonQueryTask(qpTask, leader);
+      PeerId leader = RaftUtils.getLocalLeaderPeerID(groupId);
+      qpTask.setTargetNode(leader);
+      return syncHandleSingleTask(qpTask, "execute non-query", groupId);
     }
   }
 
@@ -351,20 +351,4 @@ public class NonQueryExecutor extends AbstractQPExecutor {
     /** Apply qpTask to Raft Node **/
     return RaftUtils.executeRaftTaskForLocalProcessor(service, qpTask, response);
   }
-
-
-
-  /**
-   * Async handle task by QPTask and leader id.
-   *
-   * @param task request QPTask
-   * @param leader leader of the target raft group
-   * @return request result
-   */
-  public boolean asyncHandleNonQueryTask(SingleQPTask task, PeerId leader)
-      throws RaftConnectionException, InterruptedException {
-    BasicResponse response = asyncHandleNonQuerySingleTaskGetRes(task, leader, 0);
-    return response != null && response.isSuccess();
-  }
-
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/QueryMetadataExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/QueryMetadataExecutor.java
index 82325e1..ce4b920 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/QueryMetadataExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/QueryMetadataExecutor.java
@@ -22,16 +22,19 @@ import com.alipay.sofa.jraft.Status;
 import com.alipay.sofa.jraft.closure.ReadIndexClosure;
 import com.alipay.sofa.jraft.entity.PeerId;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
-import org.apache.iotdb.cluster.qp.task.SingleQPTask;
 import org.apache.iotdb.cluster.config.ClusterConfig;
-import org.apache.iotdb.cluster.config.ClusterConstant;
+import org.apache.iotdb.cluster.config.ClusterConsistencyLevel;
 import org.apache.iotdb.cluster.entity.raft.MetadataRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.RaftService;
 import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.qp.task.BatchQPTask;
+import org.apache.iotdb.cluster.qp.task.QPTask.TaskState;
+import org.apache.iotdb.cluster.qp.task.SingleQPTask;
 import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QueryMetadataInStringRequest;
 import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QueryMetadataRequest;
 import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QueryPathsRequest;
@@ -45,6 +48,7 @@ import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QueryPathsRespon
 import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QuerySeriesTypeResponse;
 import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QueryStorageGroupResponse;
 import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QueryTimeSeriesResponse;
+import org.apache.iotdb.cluster.service.TSServiceClusterImpl.BatchResult;
 import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.cluster.utils.RaftUtils;
 import org.apache.iotdb.db.exception.PathErrorException;
@@ -104,19 +108,19 @@ public class QueryMetadataExecutor extends AbstractQPExecutor {
         StringBuilder path = new StringBuilder();
         String[] storageGroupNodes = storageGroup.split(DOUB_SEPARATOR);
         String[] queryPathNodes = queryPath.split(DOUB_SEPARATOR);
-        for(int  i = 0 ; i < queryPathNodes.length ; i++){
-          if(i >= storageGroupNodes.length){
+        for (int i = 0; i < queryPathNodes.length; i++) {
+          if (i >= storageGroupNodes.length) {
             path.append(queryPathNodes[i]).append(SINGLE_SEPARATOR);
           } else {
             path.append(storageGroupNodes[i]).append(SINGLE_SEPARATOR);
           }
         }
-        paths.add(path.deleteCharAt(path.length()-1).toString());
+        paths.add(path.deleteCharAt(path.length() - 1).toString());
       }
     }
     return paths;
   }
-  
+
   /**
    * Handle query timeseries in one data group
    *
@@ -132,59 +136,84 @@ public class QueryMetadataExecutor extends AbstractQPExecutor {
     PeerId holder;
     /** Check if the plan can be executed locally. **/
     if (QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
-      LOGGER.debug("Execute show timeseries {} statement locally for group {} by sending request to local node.", pathList, groupId);
+      LOGGER.debug(
+          "Execute show timeseries {} statement locally for group {} by sending request to local node.",
+          pathList, groupId);
       holder = this.server.getServerId();
     } else {
-      holder = RaftUtils.getRandomPeerID(groupId);
+      holder = RaftUtils.getPeerIDInOrder(groupId);
     }
+    task.setTargetNode(holder);
     try {
-      res.addAll(queryTimeSeries(task, holder));
+      LOGGER.debug("Send show timeseries {} task for group {} to node {}.", pathList, groupId,
+          holder);
+      res.addAll(queryTimeSeries(task, pathList, groupId));
     } catch (RaftConnectionException e) {
-      throw new ProcessorException("Raft connection occurs error.", e);
+      throw new ProcessorException(e.getMessage());
     }
   }
 
+  private List<List<String>> queryTimeSeries(SingleQPTask task, List<String> pathList, String groupId)
+      throws InterruptedException, RaftConnectionException {
+    BasicResponse response = syncHandleSingleTaskGetRes(task, 0, "query timeseries " + pathList, groupId);
+    return response == null ? new ArrayList<>()
+        : ((QueryTimeSeriesResponse) response).getTimeSeries();
+  }
+
   public String processMetadataInStringQuery()
       throws InterruptedException, ProcessorException {
     Set<String> groupIdSet = router.getAllGroupId();
 
     List<String> metadataList = new ArrayList<>(groupIdSet.size());
-    List<SingleQPTask> taskList = new ArrayList<>();
+
+    BatchResult batchResult = new BatchResult(true, new StringBuilder(), new int[groupIdSet.size()]);
+    Map<String, List<Integer>> planIndexMap = new HashMap<>();
+    Map<String, SingleQPTask> subTaskMap = new HashMap<>();
+
+    int index = 0;
     for (String groupId : groupIdSet) {
       QueryMetadataInStringRequest request = new QueryMetadataInStringRequest(groupId,
           getReadMetadataConsistencyLevel());
       SingleQPTask task = new SingleQPTask(false, request);
-      taskList.add(task);
 
       LOGGER.debug("Execute show metadata in string statement for group {}.", groupId);
       PeerId holder;
       /** Check if the plan can be executed locally. **/
       if (QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
-        LOGGER.debug("Execute show metadata in string statement locally for group {} by sending request to local node.", groupId);
+        LOGGER.debug(
+            "Execute show metadata in string statement locally for group {} by sending request to local node.",
+            groupId);
         holder = this.server.getServerId();
       } else {
-        holder = RaftUtils.getRandomPeerID(groupId);
-      }
-      try {
-        asyncSendNonQuerySingleTask(task, holder, 0);
-      } catch (RaftConnectionException e) {
-        throw new ProcessorException("Raft connection occurs error.", e);
+        holder = RaftUtils.getPeerIDInOrder(groupId);
       }
+      task.setTargetNode(holder);
+      subTaskMap.put(groupId, task);
+      planIndexMap.computeIfAbsent(groupId, l -> new ArrayList<>()).add(index++);
     }
-    for (int i = 0; i < taskList.size(); i++) {
-      SingleQPTask task = taskList.get(i);
-      task.await();
-      BasicResponse response = task.getResponse();
+
+    BatchQPTask batchTask = new BatchQPTask(subTaskMap.size(), batchResult, subTaskMap, planIndexMap);
+    currentTask.set(batchTask);
+    batchTask.executeQueryMetadataBy(this, "show metadata in string");
+    batchTask.await();
+
+    for (SingleQPTask subTask : subTaskMap.values()) {
+      BasicResponse response = subTask.getResponse();
       if (response == null || !response.isSuccess()) {
-        throw new ProcessorException();
+        String errorMessage = "response is null";
+        if (response != null && response.getErrorMsg() != null) {
+          errorMessage = response.getErrorMsg();
+        }
+        throw new ProcessorException(
+            "Execute show metadata in string statement fail because " + errorMessage);
       }
-      metadataList.add(((QueryMetadataInStringResponse)response).getMetadata());
+      metadataList.add(((QueryMetadataInStringResponse) response).getMetadata());
     }
     return combineMetadataInStringList(metadataList);
   }
 
   public Metadata processMetadataQuery()
-      throws InterruptedException, ProcessorException, PathErrorException {
+      throws InterruptedException, ProcessorException {
     Set<String> groupIdSet = router.getAllGroupId();
 
     Metadata[] metadatas = new Metadata[groupIdSet.size()];
@@ -199,15 +228,46 @@ public class QueryMetadataExecutor extends AbstractQPExecutor {
       PeerId holder;
       /** Check if the plan can be executed locally. **/
       if (QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
-        LOGGER.debug("Execute query metadata statement locally for group {} by sending request to local node.", groupId);
+        LOGGER.debug(
+            "Execute query metadata statement locally for group {} by sending request to local node.",
+            groupId);
         holder = this.server.getServerId();
       } else {
-        holder = RaftUtils.getRandomPeerID(groupId);
+        holder = RaftUtils.getPeerIDInOrder(groupId);
       }
+      task.setTargetNode(holder);
       try {
-        asyncSendNonQuerySingleTask(task, holder, 0);
+        LOGGER.debug("Send query metadata task for group {} to node {}.", groupId, holder);
+        asyncSendSingleTask(task, 0);
       } catch (RaftConnectionException e) {
-        throw new ProcessorException("Raft connection occurs error.", e);
+        boolean success = false;
+        while (!success) {
+          PeerId nextNode = null;
+          try {
+            nextNode = RaftUtils.getPeerIDInOrder(groupId);
+            if (holder.equals(nextNode)) {
+              break;
+            }
+            LOGGER
+                .debug("Previous task fail, then send query metadata task for group {} to node {}.",
+                    groupId, nextNode);
+            task.resetTask();
+            task.setTargetNode(nextNode);
+            task.setTaskState(TaskState.INITIAL);
+            asyncSendSingleTask(task, 0);
+            LOGGER.debug("Query metadata task for group {} to node {} succeed.", groupId, nextNode);
+            success = true;
+          } catch (RaftConnectionException e1) {
+            LOGGER.debug("Query metadata task for group {} to node {} fail.", groupId, nextNode);
+          }
+        }
+        LOGGER.debug("The final result for query metadata task is {}", success);
+        if (!success) {
+          throw new ProcessorException(String
+              .format(
+                  "Can not query metadata in all nodes of group<%s>, please check cluster status.",
+                  groupId));
+        }
       }
     }
     for (int i = 0; i < taskList.size(); i++) {
@@ -219,9 +279,10 @@ public class QueryMetadataExecutor extends AbstractQPExecutor {
         if (response != null && response.getErrorMsg() != null) {
           errorMessage = response.getErrorMsg();
         }
-        throw new ProcessorException("Execute query metadata statement false because " + errorMessage);
+        throw new ProcessorException(
+            "Execute query metadata statement fail because " + errorMessage);
       }
-      metadatas[i] = ((QueryMetadataResponse)response).getMetadata();
+      metadatas[i] = ((QueryMetadataResponse) response).getMetadata();
     }
     return Metadata.combineMetadatas(metadatas);
   }
@@ -242,20 +303,32 @@ public class QueryMetadataExecutor extends AbstractQPExecutor {
       PeerId holder;
       /** Check if the plan can be executed locally. **/
       if (QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
-        LOGGER.debug("Execute get series type for {} statement locally for group {} by sending request to local node.", path, groupId);
+        LOGGER.debug(
+            "Execute get series type for {} statement locally for group {} by sending request to local node.",
+            path, groupId);
         holder = this.server.getServerId();
       } else {
-        holder = RaftUtils.getRandomPeerID(groupId);
+        holder = RaftUtils.getPeerIDInOrder(groupId);
       }
+      task.setTargetNode(holder);
       try {
-        dataType = querySeriesType(task, holder);
+        LOGGER.debug("Send get series type for {} task for group {} to node {}.", path, groupId,
+            holder);
+        dataType = querySeriesType(task, path, groupId);
       } catch (RaftConnectionException e) {
-        throw new ProcessorException("Raft connection occurs error.", e);
+        throw new ProcessorException(e.getMessage());
       }
     }
     return dataType;
   }
 
+  private TSDataType querySeriesType(SingleQPTask task, String path, String groupId)
+      throws InterruptedException, RaftConnectionException {
+    BasicResponse response = syncHandleSingleTaskGetRes(task, 0, "get series type for " + path, groupId);
+    return response == null ? null
+        : ((QuerySeriesTypeResponse) response).getDataType();
+  }
+
   /**
    * Handle show timeseries <path> statement
    */
@@ -291,30 +364,28 @@ public class QueryMetadataExecutor extends AbstractQPExecutor {
     PeerId holder;
     /** Check if the plan can be executed locally. **/
     if (QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
-      LOGGER.debug("Execute get paths for {} statement locally for group {} by sending request to local node.", pathList, groupId);
+      LOGGER.debug(
+          "Execute get paths for {} statement locally for group {} by sending request to local node.",
+          pathList, groupId);
       holder = this.server.getServerId();
     } else {
-      holder = RaftUtils.getRandomPeerID(groupId);
+      holder = RaftUtils.getPeerIDInOrder(groupId);
     }
+    task.setTargetNode(holder);
     try {
-      res.addAll(queryPaths(task, holder));
+      LOGGER
+          .debug("Send get paths for {} task for group {} to node {}.", pathList, groupId, holder);
+      res.addAll(queryPaths(task, pathList, groupId));
     } catch (RaftConnectionException e) {
-      throw new ProcessorException("Raft connection occurs error.", e);
+      throw new ProcessorException(e.getMessage());
     }
   }
 
-  private List<List<String>> queryTimeSeries(SingleQPTask task, PeerId leader)
+  private List<String> queryPaths(SingleQPTask task, List<String> pathList, String groupId)
       throws InterruptedException, RaftConnectionException {
-    BasicResponse response = asyncHandleNonQuerySingleTaskGetRes(task, leader, 0);
+    BasicResponse response = syncHandleSingleTaskGetRes(task, 0, "get paths for " + pathList, groupId);
     return response == null ? new ArrayList<>()
-        : ((QueryTimeSeriesResponse) response).getTimeSeries();
-  }
-
-  private TSDataType querySeriesType(SingleQPTask task, PeerId leader)
-      throws InterruptedException, RaftConnectionException {
-    BasicResponse response = asyncHandleNonQuerySingleTaskGetRes(task, leader, 0);
-    return response == null ? null
-        : ((QuerySeriesTypeResponse) response).getDataType();
+        : ((QueryPathsResponse) response).getPaths();
   }
 
   /**
@@ -328,15 +399,11 @@ public class QueryMetadataExecutor extends AbstractQPExecutor {
         ClusterConfig.METADATA_GROUP_ID, getReadMetadataConsistencyLevel());
     SingleQPTask task = new SingleQPTask(false, request);
     MetadataRaftHolder metadataHolder = (MetadataRaftHolder) server.getMetadataHolder();
-    if (getReadMetadataConsistencyLevel() == ClusterConstant.WEAK_CONSISTENCY_LEVEL) {
+    if (getReadMetadataConsistencyLevel() == ClusterConsistencyLevel.WEAK.ordinal()) {
       QueryStorageGroupResponse response;
-      try {
-        response = QueryStorageGroupResponse
-            .createSuccessResponse(metadataHolder.getFsm().getAllStorageGroups());
-      } catch (final PathErrorException e) {
-        response = QueryStorageGroupResponse.createErrorResponse(e.getMessage());
-      }
-      task.run(response);
+      response = QueryStorageGroupResponse
+          .createSuccessResponse(metadataHolder.getFsm().getAllStorageGroups());
+      task.receive(response);
     } else {
       ((RaftService) metadataHolder.getService()).getNode()
           .readIndex(reqContext, new ReadIndexClosure() {
@@ -345,16 +412,12 @@ public class QueryMetadataExecutor extends AbstractQPExecutor {
             public void run(Status status, long index, byte[] reqCtx) {
               QueryStorageGroupResponse response;
               if (status.isOk()) {
-                try {
-                  response = QueryStorageGroupResponse
-                      .createSuccessResponse(metadataHolder.getFsm().getAllStorageGroups());
-                } catch (final PathErrorException e) {
-                  response = QueryStorageGroupResponse.createErrorResponse(e.getMessage());
-                }
+                response = QueryStorageGroupResponse
+                    .createSuccessResponse(metadataHolder.getFsm().getAllStorageGroups());
               } else {
                 response = QueryStorageGroupResponse.createErrorResponse(status.getErrorMsg());
               }
-              task.run(response);
+              task.receive(response);
             }
           });
     }
@@ -362,13 +425,6 @@ public class QueryMetadataExecutor extends AbstractQPExecutor {
     return ((QueryStorageGroupResponse) task.getResponse()).getStorageGroups();
   }
 
-  private List<String> queryPaths(SingleQPTask task, PeerId leader)
-      throws InterruptedException, RaftConnectionException {
-    BasicResponse response = asyncHandleNonQuerySingleTaskGetRes(task, leader, 0);
-    return response == null ? new ArrayList<>()
-        : ((QueryPathsResponse) response).getPaths();
-  }
-
   /**
    * Combine multiple metadata in String format into single String
    *
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/BatchQPTask.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/BatchQPTask.java
index 43edd67..8ede57d 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/BatchQPTask.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/BatchQPTask.java
@@ -26,9 +26,11 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.concurrent.Future;
 import java.util.concurrent.locks.ReentrantLock;
-import org.apache.iotdb.cluster.concurrent.pool.QPTaskManager;
+import org.apache.iotdb.cluster.concurrent.pool.QPTaskThreadManager;
 import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.qp.executor.AbstractQPExecutor;
 import org.apache.iotdb.cluster.qp.executor.NonQueryExecutor;
+import org.apache.iotdb.cluster.qp.executor.QueryMetadataExecutor;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 import org.apache.iotdb.cluster.rpc.raft.response.nonquery.DataGroupNonQueryResponse;
 import org.apache.iotdb.cluster.service.TSServiceClusterImpl.BatchResult;
@@ -38,7 +40,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * Execute batch statement task. It's thread-safe.
+ * Execute batch statement tasks. It's thread-safe.
  */
 public class BatchQPTask extends MultiQPTask {
 
@@ -46,40 +48,33 @@ public class BatchQPTask extends MultiQPTask {
   private static final Logger LOGGER = LoggerFactory.getLogger(BatchQPTask.class);
 
   /**
-   * Record the index of physical plans in a data group. The index means the position in batchResult
+   * Record the index of physical plans in a data group. The index means the position in result
    * String: group id
    */
   private Map<String, List<Integer>> planIndexMap;
 
   /**
-   * Batch result
-   */
-  private int[] batchResult;
-
-  /**
-   * Mark if the batch is all successful.
+   * Batch result array, mark the result type, which is in BatchResult
    */
-  private boolean isAllSuccessful;
+  private int[] resultArray;
 
   /**
-   * Batch error message.
+   * Batch result
    */
-  private String batchErrorMessage;
+  private BatchResult batchResult;
 
   /**
    * Lock to update result
    */
   private ReentrantLock lock = new ReentrantLock();
 
-  private NonQueryExecutor executor;
-
+  private AbstractQPExecutor executor;
 
-  public BatchQPTask(int taskNum, BatchResult batchResult, Map<String, SingleQPTask> taskMap,
+  public BatchQPTask(int taskNum, BatchResult result, Map<String, SingleQPTask> taskMap,
       Map<String, List<Integer>> planIndexMap) {
     super(false, taskNum, TaskType.BATCH);
-    this.batchResult = batchResult.getResult();
-    this.isAllSuccessful = batchResult.isAllSuccessful();
-    this.batchErrorMessage = batchResult.getBatchErrorMessage();
+    this.resultArray = result.getResultArray();
+    this.batchResult = result;
     this.taskMap = taskMap;
     this.planIndexMap = planIndexMap;
     this.taskThreadMap = new HashMap<>();
@@ -91,7 +86,7 @@ public class BatchQPTask extends MultiQPTask {
    * @param basicResponse response from receiver
    */
   @Override
-  public void run(BasicResponse basicResponse) {
+  public void receive(BasicResponse basicResponse) {
     lock.lock();
     try {
       String groupId = basicResponse.getGroupId();
@@ -99,23 +94,55 @@ public class BatchQPTask extends MultiQPTask {
       List<Integer> indexList = planIndexMap.get(groupId);
       for (int i = 0; i < indexList.size(); i++) {
         if (i >= results.size()) {
-          batchResult[indexList.get(i)] = Statement.EXECUTE_FAILED;
+          resultArray[indexList.get(i)] = Statement.EXECUTE_FAILED;
+          batchResult.addBatchErrorMessage(indexList.get(i), basicResponse.getErrorMsg());
         } else {
-          batchResult[indexList.get(i)] =
-              results.get(i) ? Statement.SUCCESS_NO_INFO : Statement.EXECUTE_FAILED;
+          if (results.get(i)) {
+            resultArray[indexList.get(i)] = Statement.SUCCESS_NO_INFO;
+          } else {
+            resultArray[indexList.get(i)] = Statement.EXECUTE_FAILED;
+            batchResult.addBatchErrorMessage(indexList.get(i), basicResponse.getErrorMsg());
+          }
         }
       }
       if (!basicResponse.isSuccess()) {
-        isAllSuccessful = false;
-        batchErrorMessage = basicResponse.getErrorMsg();
+        batchResult.setAllSuccessful(false);
       }
+    } catch (Exception ex) {
+      LOGGER.error("Execute batch statement occurs error.", ex);
     } finally {
       lock.unlock();
     }
     taskCountDownLatch.countDown();
   }
 
-  public void execute(NonQueryExecutor executor) {
+  public void executeQueryMetadataBy(QueryMetadataExecutor executor, String taskInfo) {
+    this.executor = executor;
+
+    for (Entry<String, SingleQPTask> entry : taskMap.entrySet()) {
+      String groupId = entry.getKey();
+      SingleQPTask subTask = entry.getValue();
+      Future<?> taskThread;
+      taskThread = QPTaskThreadManager.getInstance()
+          .submit(() -> executeRpcSubQueryMetadataTask(subTask, taskInfo, groupId));
+      taskThreadMap.put(groupId, taskThread);
+    }
+  }
+
+  /**
+   * Execute RPC sub task
+   */
+  private void executeRpcSubQueryMetadataTask(SingleQPTask subTask, String taskInfo, String groupId) {
+    try {
+      executor.syncHandleSingleTask(subTask, taskInfo, groupId);
+      this.receive(subTask.getResponse());
+    } catch (RaftConnectionException | InterruptedException e) {
+      LOGGER.error("Async handle sub {} task failed.", taskInfo);
+      this.receive(DataGroupNonQueryResponse.createErrorResponse(groupId, e.getMessage()));
+    }
+  }
+
+  public void executeBy(NonQueryExecutor executor) {
     this.executor = executor;
 
     for (Entry<String, SingleQPTask> entry : taskMap.entrySet()) {
@@ -123,12 +150,13 @@ public class BatchQPTask extends MultiQPTask {
       SingleQPTask subTask = entry.getValue();
       Future<?> taskThread;
       if (QPExecutorUtils.canHandleNonQueryByGroupId(groupId)) {
-        taskThread = QPTaskManager.getInstance()
+        taskThread = QPTaskThreadManager.getInstance()
             .submit(() -> executeLocalSubTask(subTask, groupId));
       } else {
-        PeerId leader = RaftUtils.getLeaderPeerID(groupId);
-        taskThread = QPTaskManager.getInstance()
-            .submit(() -> executeRpcSubTask(subTask, leader, groupId));
+        PeerId leader = RaftUtils.getLocalLeaderPeerID(groupId);
+        subTask.setTargetNode(leader);
+        taskThread = QPTaskThreadManager.getInstance()
+            .submit(() -> executeRpcSubTask(subTask, groupId));
       }
       taskThreadMap.put(groupId, taskThread);
     }
@@ -139,40 +167,24 @@ public class BatchQPTask extends MultiQPTask {
    */
   private void executeLocalSubTask(QPTask subTask, String groupId) {
     try {
-      executor.handleNonQueryRequestLocally(groupId, subTask);
-      this.run(subTask.getResponse());
+      ((NonQueryExecutor) executor).handleNonQueryRequestLocally(groupId, subTask);
+      this.receive(subTask.getResponse());
     } catch (InterruptedException e) {
       LOGGER.error("Handle sub task locally failed.");
-      this.run(DataGroupNonQueryResponse.createErrorResponse(groupId, e.getMessage()));
+      this.receive(DataGroupNonQueryResponse.createErrorResponse(groupId, e.getMessage()));
     }
   }
 
   /**
    * Execute RPC sub task
    */
-  private void executeRpcSubTask(SingleQPTask subTask, PeerId leader, String groupId) {
+  private void executeRpcSubTask(SingleQPTask subTask, String groupId) {
     try {
-      executor.asyncHandleNonQueryTask(subTask, leader);
-      this.run(subTask.getResponse());
+      executor.syncHandleSingleTask(subTask, "execute sub non-query", groupId);
+      this.receive(subTask.getResponse());
     } catch (RaftConnectionException | InterruptedException e) {
       LOGGER.error("Async handle sub task failed.");
-      this.run(DataGroupNonQueryResponse.createErrorResponse(groupId, e.getMessage()));
+      this.receive(DataGroupNonQueryResponse.createErrorResponse(groupId, e.getMessage()));
     }
   }
-
-  public boolean isAllSuccessful() {
-    return isAllSuccessful;
-  }
-
-  public void setAllSuccessful(boolean allSuccessful) {
-    isAllSuccessful = allSuccessful;
-  }
-
-  public String getBatchErrorMessage() {
-    return batchErrorMessage;
-  }
-
-  public void setBatchErrorMessage(String batchErrorMessage) {
-    this.batchErrorMessage = batchErrorMessage;
-  }
 }
diff --git a/spark/src/main/java/org/apache/iotdb/tsfile/qp/exception/QueryOperatorException.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/DataQueryTask.java
similarity index 75%
rename from spark/src/main/java/org/apache/iotdb/tsfile/qp/exception/QueryOperatorException.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/qp/task/DataQueryTask.java
index fc9f177..f861f55 100644
--- a/spark/src/main/java/org/apache/iotdb/tsfile/qp/exception/QueryOperatorException.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/DataQueryTask.java
@@ -16,14 +16,14 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.tsfile.qp.exception;
+package org.apache.iotdb.cluster.qp.task;
 
-public class QueryOperatorException extends LogicalOptimizeException {
+import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
 
-  private static final long serialVersionUID = 8581594261924961899L;
+public class DataQueryTask extends SingleQPTask {
 
-  public QueryOperatorException(String msg) {
-    super(msg);
+  public DataQueryTask(boolean isSyncTask,
+      BasicRequest request) {
+    super(isSyncTask, request);
   }
-
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/QPTask.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/QPTask.java
index 96a517a..f3182c9 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/QPTask.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/QPTask.java
@@ -18,6 +18,7 @@
  */
 package org.apache.iotdb.cluster.qp.task;
 
+import com.alipay.sofa.jraft.entity.PeerId;
 import java.util.concurrent.CountDownLatch;
 import org.apache.iotdb.cluster.entity.Server;
 import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
@@ -36,6 +37,11 @@ public abstract class QPTask {
   protected BasicRequest request;
 
   /**
+   * The target peer of this task
+   */
+  protected PeerId targetNode;
+
+  /**
    * Whether it's a synchronization task or not.
    */
   boolean isSyncTask;
@@ -78,7 +84,7 @@ public abstract class QPTask {
    *
    * @param basicResponse response from receiver
    */
-  public abstract void run(BasicResponse basicResponse);
+  public abstract void receive(BasicResponse basicResponse);
 
   public boolean isSyncTask() {
     return isSyncTask;
@@ -94,6 +100,7 @@ public abstract class QPTask {
 
   public void resetTask() {
     this.taskCountDownLatch = new CountDownLatch(taskNum);
+    this.taskState = TaskState.INITIAL;
   }
 
   public TaskState getTaskState() {
@@ -122,11 +129,44 @@ public abstract class QPTask {
   }
 
   public enum TaskState {
-    INITIAL, REDIRECT, FINISH, EXCEPTION
+
+    /**
+     * Initial state
+     */
+    INITIAL,
+
+    /**
+     * Redirect leader
+     */
+    REDIRECT,
+
+    /**
+     * Task finish
+     */
+    FINISH,
+
+    /**
+     * Occur exception in remote node
+     */
+    EXCEPTION,
+
+    /**
+     * Can not connect to remote node
+     */
+    RAFT_CONNECTION_EXCEPTION
   }
 
   public enum TaskType {
-    SINGLE, BATCH
+
+    /**
+     * Single task
+     */
+    SINGLE,
+
+    /**
+     * Batch task
+     */
+    BATCH
   }
 
   /**
@@ -137,4 +177,12 @@ public abstract class QPTask {
   }
 
   public abstract void shutdown();
+
+  public PeerId getTargetNode() {
+    return targetNode;
+  }
+
+  public void setTargetNode(PeerId targetNode) {
+    this.targetNode = targetNode;
+  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/SingleQPTask.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/SingleQPTask.java
index 805834e..16ddf60 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/SingleQPTask.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/SingleQPTask.java
@@ -20,16 +20,12 @@ package org.apache.iotdb.cluster.qp.task;
 
 import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
- * Process single task.
+ * Process task(s) for only one raft group, which is used for operations except for querying data.
  */
 public class SingleQPTask extends QPTask {
 
-  private static final Logger LOGGER = LoggerFactory.getLogger(SingleQPTask.class);
-
   private static final int TASK_NUM = 1;
 
   public SingleQPTask(boolean isSyncTask, BasicRequest request) {
@@ -41,11 +37,11 @@ public class SingleQPTask extends QPTask {
    * Process response. If it's necessary to redirect leader, redo the task.
    */
   @Override
-  public void run(BasicResponse response) {
+  public void receive(BasicResponse response) {
     if(taskState != TaskState.EXCEPTION) {
       this.response = response;
       if(response == null){
-        LOGGER.error("Response is null");
+        this.taskState = TaskState.RAFT_CONNECTION_EXCEPTION;
       } else if (response.isRedirected()) {
         this.taskState = TaskState.REDIRECT;
       } else {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/common/ClusterNullableBatchData.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/common/ClusterNullableBatchData.java
new file mode 100644
index 0000000..699db3e
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/common/ClusterNullableBatchData.java
@@ -0,0 +1,79 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.common;
+
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.iotdb.db.utils.TimeValuePair;
+import org.apache.iotdb.tsfile.read.common.BatchData;
+
+/**
+ * <code>ClusterNullableBatchData</code> is a self-defined data structure which is used in cluster
+ * query process of fill type and group by type, which may contain <code>null</code> in list of
+ * TimeValuePair.
+ */
+public class ClusterNullableBatchData extends BatchData {
+
+  private List<TimeValuePair> timeValuePairList;
+  private int index;
+
+  public ClusterNullableBatchData() {
+    this.timeValuePairList = new ArrayList<>();
+    this.index = 0;
+  }
+
+  @Override
+  public boolean hasNext() {
+    return index < timeValuePairList.size();
+  }
+
+  @Override
+  public void next() {
+    index++;
+  }
+
+  @Override
+  public long currentTime() {
+    rangeCheckForTime(index);
+    return timeValuePairList.get(index).getTimestamp();
+  }
+
+  @Override
+  public Object currentValue() {
+    if (index < length()) {
+      return timeValuePairList.get(index).getValue() == null ? null
+          : timeValuePairList.get(index).getValue().getValue();
+    } else {
+      return null;
+    }
+  }
+
+  @Override
+  public int length() {
+    return timeValuePairList.size();
+  }
+
+  public TimeValuePair getCurrentTimeValuePair() {
+    return index < length() ? timeValuePairList.get(index) : null;
+  }
+
+  public void addTimeValuePair(TimeValuePair timeValuePair){
+    timeValuePairList.add(timeValuePair);
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/dataset/ClusterDataSetWithTimeGenerator.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/dataset/ClusterDataSetWithTimeGenerator.java
index f3e4eaf..5a06ca8 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/dataset/ClusterDataSetWithTimeGenerator.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/dataset/ClusterDataSetWithTimeGenerator.java
@@ -141,9 +141,6 @@ public class ClusterDataSetWithTimeGenerator extends QueryDataSet {
         }
       }
     }
-    if (cachedBatchTimestamp != null && cachedBatchTimestamp.hasNext()) {
-      return true;
-    }
-    return false;
+    return cachedBatchTimestamp != null && cachedBatchTimestamp.hasNext();
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/dataset/ClusterGroupByDataSetWithOnlyTimeFilter.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/dataset/ClusterGroupByDataSetWithOnlyTimeFilter.java
new file mode 100644
index 0000000..599439a
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/dataset/ClusterGroupByDataSetWithOnlyTimeFilter.java
@@ -0,0 +1,159 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.dataset;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.SelectSeriesGroupEntity;
+import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
+import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
+import org.apache.iotdb.db.exception.FileNodeManagerException;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.exception.ProcessorException;
+import org.apache.iotdb.db.query.aggregation.AggreResultData;
+import org.apache.iotdb.db.query.context.QueryContext;
+import org.apache.iotdb.db.query.control.QueryResourceManager;
+import org.apache.iotdb.db.query.dataset.groupby.GroupByWithOnlyTimeFilterDataSet;
+import org.apache.iotdb.db.query.factory.SeriesReaderFactory;
+import org.apache.iotdb.db.query.reader.IPointReader;
+import org.apache.iotdb.db.query.reader.merge.PriorityMergeReader;
+import org.apache.iotdb.db.query.reader.sequence.SequenceDataReader;
+import org.apache.iotdb.db.utils.TimeValuePair;
+import org.apache.iotdb.tsfile.read.common.Field;
+import org.apache.iotdb.tsfile.read.common.Path;
+import org.apache.iotdb.tsfile.read.common.RowRecord;
+import org.apache.iotdb.tsfile.read.expression.IExpression;
+import org.apache.iotdb.tsfile.read.expression.impl.GlobalTimeExpression;
+import org.apache.iotdb.tsfile.utils.Pair;
+
+/**
+ * Handle group by query with only time filter
+ */
+public class ClusterGroupByDataSetWithOnlyTimeFilter extends GroupByWithOnlyTimeFilterDataSet {
+
+  private ClusterRpcSingleQueryManager queryManager;
+  private List<IPointReader> readersOfSelectedSeries;
+
+  /**
+   * constructor.
+   */
+  public ClusterGroupByDataSetWithOnlyTimeFilter(long jobId,
+      List<Path> paths, long unit, long origin,
+      List<Pair<Long, Long>> mergedIntervals, ClusterRpcSingleQueryManager queryManager) {
+    super(jobId, paths, unit, origin, mergedIntervals);
+    this.queryManager = queryManager;
+    this.readersOfSelectedSeries = new ArrayList<>();
+  }
+
+
+  /**
+   * init reader and aggregate function.
+   */
+  @Override
+  public void initGroupBy(QueryContext context, List<String> aggres, IExpression expression)
+      throws FileNodeManagerException, PathErrorException, ProcessorException, IOException {
+    initAggreFuction(aggres);
+
+    /** add query token for query series which can handle locally **/
+    List<Path> localQuerySeries = new ArrayList<>(selectedSeries);
+    Set<Path> remoteQuerySeries = new HashSet<>();
+    queryManager.getSelectSeriesGroupEntityMap().values().forEach(
+        selectSeriesGroupEntity -> remoteQuerySeries
+            .addAll(selectSeriesGroupEntity.getSelectPaths()));
+    localQuerySeries.removeAll(remoteQuerySeries);
+    QueryResourceManager.getInstance()
+        .beginQueryOfGivenQueryPaths(context.getJobId(), localQuerySeries);
+    if (expression != null) {
+      timeFilter = ((GlobalTimeExpression) expression).getFilter();
+    }
+
+    Map<String, SelectSeriesGroupEntity> selectSeriesGroupEntityMap = queryManager
+        .getSelectSeriesGroupEntityMap();
+    //Mark filter series reader index group by group id
+    Map<String, Integer> selectSeriesReaderIndex = new HashMap<>();
+    for (int i = 0; i < selectedSeries.size(); i++) {
+      Path path = selectedSeries.get(i);
+      String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
+      if (selectSeriesGroupEntityMap.containsKey(groupId)) {
+        int index = selectSeriesReaderIndex.getOrDefault(groupId, 0);
+        ClusterSelectSeriesReader reader = selectSeriesGroupEntityMap.get(groupId)
+            .getSelectSeriesReaders().get(index);
+        readersOfSelectedSeries.add(reader);
+        selectSeriesReaderIndex.put(groupId, index + 1);
+      } else {
+        readersOfSelectedSeries.add(null);
+        QueryDataSource queryDataSource = QueryResourceManager.getInstance()
+            .getQueryDataSource(selectedSeries.get(i), context);
+
+        // sequence reader for sealed tsfile, unsealed tsfile, memory
+        SequenceDataReader sequenceReader = new SequenceDataReader(
+            queryDataSource.getSeqDataSource(),
+            timeFilter, context, false);
+
+        // unseq reader for all chunk groups in unSeqFile, memory
+        PriorityMergeReader unSeqMergeReader = SeriesReaderFactory.getInstance()
+            .createUnSeqMergeReader(queryDataSource.getOverflowSeriesDataSource(), timeFilter);
+
+        sequenceReaderList.add(sequenceReader);
+        unSequenceReaderList.add(unSeqMergeReader);
+      }
+    }
+  }
+
+  @Override
+  public RowRecord next() throws IOException {
+    if (!hasCachedTimeInterval) {
+      throw new IOException("need to call hasNext() before calling next() "
+          + "in GroupByWithOnlyTimeFilterDataSet.");
+    }
+    hasCachedTimeInterval = false;
+    RowRecord record = new RowRecord(startTime);
+    for (int i = 0; i < functions.size(); i++) {
+      IPointReader reader = readersOfSelectedSeries.get(i);
+      if (reader != null) {
+        TimeValuePair timeValuePair = reader.next();
+        if (timeValuePair == null) {
+          record.addField(new Field(null));
+        } else {
+          record.addField(getField(timeValuePair.getValue().getValue(), dataTypes.get(i)));
+        }
+      } else {
+        AggreResultData res;
+        try {
+          res = nextSeries(i);
+        } catch (ProcessorException e) {
+          throw new IOException(e);
+        }
+        if (res == null) {
+          record.addField(new Field(null));
+        } else {
+          record.addField(getField(res));
+        }
+      }
+    }
+    return record;
+  }
+}
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/query/dataset/groupby/GroupByWithValueFilterDataSet.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/dataset/ClusterGroupByDataSetWithTimeGenerator.java
similarity index 53%
copy from iotdb/src/main/java/org/apache/iotdb/db/query/dataset/groupby/GroupByWithValueFilterDataSet.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/query/dataset/ClusterGroupByDataSetWithTimeGenerator.java
index f7ffa29..89ed1b9 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/dataset/groupby/GroupByWithValueFilterDataSet.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/dataset/ClusterGroupByDataSetWithTimeGenerator.java
@@ -16,69 +16,80 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
-package org.apache.iotdb.db.query.dataset.groupby;
+package org.apache.iotdb.cluster.query.dataset;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.List;
-import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import java.util.Set;
+import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.query.factory.ClusterSeriesReaderFactory;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterSeriesGroupEntity;
+import org.apache.iotdb.cluster.query.timegenerator.ClusterTimeGenerator;
 import org.apache.iotdb.db.exception.FileNodeManagerException;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.exception.ProcessorException;
 import org.apache.iotdb.db.query.aggregation.AggregateFunction;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.control.QueryResourceManager;
-import org.apache.iotdb.db.query.factory.SeriesReaderFactory;
-import org.apache.iotdb.db.query.reader.merge.EngineReaderByTimeStamp;
-import org.apache.iotdb.db.query.timegenerator.EngineTimeGenerator;
+import org.apache.iotdb.db.query.dataset.groupby.GroupByWithValueFilterDataSet;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.Path;
 import org.apache.iotdb.tsfile.read.common.RowRecord;
 import org.apache.iotdb.tsfile.read.expression.IExpression;
-import org.apache.iotdb.tsfile.read.query.timegenerator.TimeGenerator;
 import org.apache.iotdb.tsfile.utils.Pair;
 
-public class GroupByWithValueFilterDataSet extends GroupByEngineDataSet {
+public class ClusterGroupByDataSetWithTimeGenerator extends GroupByWithValueFilterDataSet {
 
-  private List<EngineReaderByTimeStamp> allDataReaderList;
-  private TimeGenerator timestampGenerator;
-  /**
-   * cached timestamp for next group by partition.
-   */
-  private long timestamp;
-  /**
-   * if this object has cached timestamp for next group by partition.
-   */
-  private boolean hasCachedTimestamp;
+  private ClusterRpcSingleQueryManager queryManager;
 
-  /**
-   * group by batch calculation size.
-   */
-  private int timeStampFetchSize;
+  private List<TSDataType> selectSeriesDataTypes;
 
   /**
    * constructor.
    */
-  public GroupByWithValueFilterDataSet(long jobId, List<Path> paths, long unit, long origin,
-      List<Pair<Long, Long>> mergedIntervals) {
+  public ClusterGroupByDataSetWithTimeGenerator(long jobId,
+      List<Path> paths, long unit, long origin,
+      List<Pair<Long, Long>> mergedIntervals, ClusterRpcSingleQueryManager queryManager) {
     super(jobId, paths, unit, origin, mergedIntervals);
-    this.allDataReaderList = new ArrayList<>();
-    this.timeStampFetchSize = 10 * IoTDBDescriptor.getInstance().getConfig().getFetchSize();
+    this.queryManager = queryManager;
+    selectSeriesDataTypes = new ArrayList<>();
   }
 
   /**
    * init reader and aggregate function.
    */
+  @Override
   public void initGroupBy(QueryContext context, List<String> aggres, IExpression expression)
       throws FileNodeManagerException, PathErrorException, ProcessorException, IOException {
     initAggreFuction(aggres);
 
-    QueryResourceManager.getInstance().beginQueryOfGivenExpression(context.getJobId(), expression);
-    QueryResourceManager
-        .getInstance().beginQueryOfGivenQueryPaths(context.getJobId(), selectedSeries);
-    this.timestampGenerator = new EngineTimeGenerator(expression, context);
-    this.allDataReaderList = SeriesReaderFactory
-        .getByTimestampReadersOfSelectedPaths(selectedSeries, context);
+    /** add query token for filter series which can handle locally **/
+    Set<String> deviceIdSet = new HashSet<>();
+    for (FilterSeriesGroupEntity filterSeriesGroupEntity : queryManager
+        .getFilterSeriesGroupEntityMap().values()) {
+      List<Path> remoteFilterSeries = filterSeriesGroupEntity.getFilterPaths();
+      remoteFilterSeries.forEach(seriesPath -> deviceIdSet.add(seriesPath.getDevice()));
+    }
+    QueryResourceManager.getInstance()
+        .beginQueryOfGivenExpression(context.getJobId(), expression, deviceIdSet);
+
+    /** add query token for query series which can handle locally **/
+    List<Path> localQuerySeries = new ArrayList<>(selectedSeries);
+    Set<Path> remoteQuerySeries = new HashSet<>();
+    queryManager.getSelectSeriesGroupEntityMap().values().forEach(
+        selectSeriesGroupEntity -> remoteQuerySeries
+            .addAll(selectSeriesGroupEntity.getSelectPaths()));
+    localQuerySeries.removeAll(remoteQuerySeries);
+    QueryResourceManager.getInstance()
+        .beginQueryOfGivenQueryPaths(context.getJobId(), localQuerySeries);
+
+    this.timestampGenerator = new ClusterTimeGenerator(expression, context, queryManager);
+    this.allDataReaderList = ClusterSeriesReaderFactory
+        .createReadersByTimestampOfSelectedPaths(selectedSeries, context, queryManager,
+            selectSeriesDataTypes);
   }
 
   @Override
@@ -92,7 +103,7 @@ public class GroupByWithValueFilterDataSet extends GroupByEngineDataSet {
       function.init();
     }
 
-    long[] timestampArray = new long[timeStampFetchSize];
+    long[] timestampArray = new long[timestampFetchSize];
     int timeArrayLength = 0;
     if (hasCachedTimestamp) {
       if (timestamp < endTime) {
@@ -107,6 +118,8 @@ public class GroupByWithValueFilterDataSet extends GroupByEngineDataSet {
       // construct timestamp array
       timeArrayLength = constructTimeArrayForOneCal(timestampArray, timeArrayLength);
 
+      fetchSelectDataFromRemoteNode(timeArrayLength, timestampArray);
+
       // cal result using timestamp array
       for (int i = 0; i < selectedSeries.size(); i++) {
         functions.get(i).calcAggregationUsingTimestamps(
@@ -121,6 +134,9 @@ public class GroupByWithValueFilterDataSet extends GroupByEngineDataSet {
       }
     }
 
+    // fetch select series data from remote node
+    fetchSelectDataFromRemoteNode(timeArrayLength, timestampArray);
+
     if (timeArrayLength > 0) {
       // cal result using timestamp array
       for (int i = 0; i < selectedSeries.size(); i++) {
@@ -132,6 +148,28 @@ public class GroupByWithValueFilterDataSet extends GroupByEngineDataSet {
   }
 
   /**
+   * Get select series batch data by batch timestamp
+   * @param timeArrayLength length of batch timestamp
+   * @param timestampArray timestamp array
+   */
+  private void fetchSelectDataFromRemoteNode(int timeArrayLength, long[] timestampArray)
+      throws IOException {
+    if(timeArrayLength != 0){
+      List<Long> batchTimestamp = new ArrayList<>();
+      for(int i = 0 ; i < timeArrayLength; i++){
+        batchTimestamp.add(timestampArray[i]);
+      }
+
+      try {
+        queryManager.fetchBatchDataByTimestampForAllSelectPaths(batchTimestamp);
+      } catch (
+          RaftConnectionException e) {
+        throw new IOException(e);
+      }
+    }
+  }
+
+  /**
    * construct an array of timestamps for one batch of a group by partition calculating.
    *
    * @param timestampArray timestamp array
@@ -140,7 +178,7 @@ public class GroupByWithValueFilterDataSet extends GroupByEngineDataSet {
    */
   private int constructTimeArrayForOneCal(long[] timestampArray, int timeArrayLength)
       throws IOException {
-    for (int cnt = 1; cnt < timeStampFetchSize && timestampGenerator.hasNext(); cnt++) {
+    for (int cnt = 1; cnt < timestampFetchSize && timestampGenerator.hasNext(); cnt++) {
       timestamp = timestampGenerator.next();
       if (timestamp < endTime) {
         timestampArray[timeArrayLength++] = timestamp;
@@ -151,10 +189,4 @@ public class GroupByWithValueFilterDataSet extends GroupByEngineDataSet {
     }
     return timeArrayLength;
   }
-
-  private RowRecord constructRowRecord() {
-    RowRecord record = new RowRecord(startTime);
-    functions.forEach(function -> record.addField(getField(function.getResult())));
-    return record;
-  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterAggregateEngineExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterAggregateEngineExecutor.java
new file mode 100644
index 0000000..808eab8
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterAggregateEngineExecutor.java
@@ -0,0 +1,249 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.executor;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.query.factory.ClusterSeriesReaderFactory;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterSeriesGroupEntity;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.SelectSeriesGroupEntity;
+import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
+import org.apache.iotdb.cluster.query.timegenerator.ClusterTimeGenerator;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
+import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
+import org.apache.iotdb.db.exception.FileNodeManagerException;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.exception.ProcessorException;
+import org.apache.iotdb.db.metadata.MManager;
+import org.apache.iotdb.db.query.aggregation.AggreResultData;
+import org.apache.iotdb.db.query.aggregation.AggregateFunction;
+import org.apache.iotdb.db.query.aggregation.impl.LastAggrFunc;
+import org.apache.iotdb.db.query.aggregation.impl.MaxTimeAggrFunc;
+import org.apache.iotdb.db.query.context.QueryContext;
+import org.apache.iotdb.db.query.control.QueryResourceManager;
+import org.apache.iotdb.db.query.dataset.AggreResultDataPointReader;
+import org.apache.iotdb.db.query.dataset.EngineDataSetWithoutTimeGenerator;
+import org.apache.iotdb.db.query.executor.AggregateEngineExecutor;
+import org.apache.iotdb.db.query.factory.AggreFuncFactory;
+import org.apache.iotdb.db.query.factory.SeriesReaderFactory;
+import org.apache.iotdb.db.query.reader.IPointReader;
+import org.apache.iotdb.db.query.reader.merge.EngineReaderByTimeStamp;
+import org.apache.iotdb.db.query.reader.merge.PriorityMergeReader;
+import org.apache.iotdb.db.query.reader.sequence.SequenceDataReader;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.read.common.Path;
+import org.apache.iotdb.tsfile.read.expression.IExpression;
+import org.apache.iotdb.tsfile.read.expression.impl.GlobalTimeExpression;
+import org.apache.iotdb.tsfile.read.filter.basic.Filter;
+import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
+import org.apache.iotdb.tsfile.read.query.timegenerator.TimeGenerator;
+
+/**
+ * Handle aggregation query and construct dataset in cluster
+ */
+public class ClusterAggregateEngineExecutor extends AggregateEngineExecutor {
+
+  private ClusterRpcSingleQueryManager queryManager;
+
+
+  public ClusterAggregateEngineExecutor(List<Path> selectedSeries, List<String> aggres,
+      IExpression expression, ClusterRpcSingleQueryManager queryManager) {
+    super(selectedSeries, aggres, expression);
+    this.queryManager = queryManager;
+  }
+
+  @Override
+  public QueryDataSet executeWithoutTimeGenerator(QueryContext context)
+      throws FileNodeManagerException, IOException, PathErrorException, ProcessorException {
+    Filter timeFilter = expression != null ? ((GlobalTimeExpression) expression).getFilter() : null;
+    Map<String, SelectSeriesGroupEntity> selectSeriesGroupEntityMap = queryManager
+        .getSelectSeriesGroupEntityMap();
+
+    List<Path> paths = new ArrayList<>();
+    List<IPointReader> readers = new ArrayList<>();
+    List<TSDataType> dataTypes = new ArrayList<>();
+    //Mark filter series reader index group by group id
+    Map<String, Integer> selectSeriesReaderIndex = new HashMap<>();
+    for (int i = 0; i < selectedSeries.size(); i++) {
+      Path path = selectedSeries.get(i);
+      String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
+
+      if (selectSeriesGroupEntityMap.containsKey(groupId)) {
+        int index = selectSeriesReaderIndex.getOrDefault(groupId, 0);
+        ClusterSelectSeriesReader reader = selectSeriesGroupEntityMap.get(groupId)
+            .getSelectSeriesReaders().get(index);
+        readers.add(reader);
+        dataTypes.add(reader.getDataType());
+        selectSeriesReaderIndex.put(groupId, index + 1);
+      } else {
+        paths.add(path);
+        // construct AggregateFunction
+        TSDataType tsDataType = MManager.getInstance()
+            .getSeriesType(path.getFullPath());
+        AggregateFunction function = AggreFuncFactory.getAggrFuncByName(aggres.get(i), tsDataType);
+        function.init();
+
+        QueryDataSource queryDataSource = QueryResourceManager.getInstance()
+            .getQueryDataSource(selectedSeries.get(i), context);
+
+        // sequence reader for sealed tsfile, unsealed tsfile, memory
+        SequenceDataReader sequenceReader;
+        if (function instanceof MaxTimeAggrFunc || function instanceof LastAggrFunc) {
+          sequenceReader = new SequenceDataReader(queryDataSource.getSeqDataSource(), timeFilter,
+              context, true);
+        } else {
+          sequenceReader = new SequenceDataReader(queryDataSource.getSeqDataSource(), timeFilter,
+              context, false);
+        }
+
+        // unseq reader for all chunk groups in unSeqFile, memory
+        PriorityMergeReader unSeqMergeReader = SeriesReaderFactory.getInstance()
+            .createUnSeqMergeReader(queryDataSource.getOverflowSeriesDataSource(), timeFilter);
+
+        AggreResultData aggreResultData = aggregateWithoutTimeGenerator(function,
+            sequenceReader, unSeqMergeReader, timeFilter);
+
+        dataTypes.add(aggreResultData.getDataType());
+        readers.add(new AggreResultDataPointReader(aggreResultData));
+      }
+    }
+    QueryResourceManager.getInstance()
+        .beginQueryOfGivenQueryPaths(context.getJobId(), paths);
+
+    return new EngineDataSetWithoutTimeGenerator(selectedSeries, dataTypes, readers);
+  }
+
+  /**
+   * execute aggregate function with value filter.
+   *
+   * @param context query context.
+   */
+  @Override
+  public QueryDataSet executeWithTimeGenerator(QueryContext context)
+      throws FileNodeManagerException, PathErrorException, IOException, ProcessorException {
+
+    /** add query token for query series which can handle locally **/
+    List<Path> localQuerySeries = new ArrayList<>(selectedSeries);
+    Set<Path> remoteQuerySeries = new HashSet<>();
+    queryManager.getSelectSeriesGroupEntityMap().values().forEach(
+        selectSeriesGroupEntity -> remoteQuerySeries
+            .addAll(selectSeriesGroupEntity.getSelectPaths()));
+    localQuerySeries.removeAll(remoteQuerySeries);
+    QueryResourceManager.getInstance()
+        .beginQueryOfGivenQueryPaths(context.getJobId(), localQuerySeries);
+
+    /** add query token for filter series which can handle locally **/
+    Set<String> deviceIdSet = new HashSet<>();
+    for (FilterSeriesGroupEntity filterSeriesGroupEntity : queryManager
+        .getFilterSeriesGroupEntityMap().values()) {
+      List<Path> remoteFilterSeries = filterSeriesGroupEntity.getFilterPaths();
+      remoteFilterSeries.forEach(seriesPath -> deviceIdSet.add(seriesPath.getDevice()));
+    }
+    QueryResourceManager.getInstance()
+        .beginQueryOfGivenExpression(context.getJobId(), expression, deviceIdSet);
+
+    ClusterTimeGenerator timestampGenerator;
+    List<EngineReaderByTimeStamp> readersOfSelectedSeries;
+    // origin data type of select paths
+    List<TSDataType> originDataTypes = new ArrayList<>();
+    try {
+      timestampGenerator = new ClusterTimeGenerator(expression, context,
+          queryManager);
+      readersOfSelectedSeries = ClusterSeriesReaderFactory
+          .createReadersByTimestampOfSelectedPaths(selectedSeries, context,
+              queryManager, originDataTypes);
+    } catch (IOException ex) {
+      throw new FileNodeManagerException(ex);
+    }
+
+    List<AggregateFunction> aggregateFunctions = new ArrayList<>();
+    for (int i = 0; i < selectedSeries.size(); i++) {
+      TSDataType type = originDataTypes.get(i);
+      AggregateFunction function = AggreFuncFactory.getAggrFuncByName(aggres.get(i), type);
+      function.init();
+      aggregateFunctions.add(function);
+    }
+    List<AggreResultData> aggreResultDataList = aggregateWithTimeGenerator(aggregateFunctions,
+        timestampGenerator,
+        readersOfSelectedSeries);
+
+    List<IPointReader> resultDataPointReaders = new ArrayList<>();
+    List<TSDataType> dataTypes = new ArrayList<>();
+    for (AggreResultData resultData : aggreResultDataList) {
+      dataTypes.add(resultData.getDataType());
+      resultDataPointReaders.add(new AggreResultDataPointReader(resultData));
+    }
+    return new EngineDataSetWithoutTimeGenerator(selectedSeries, dataTypes, resultDataPointReaders);
+  }
+
+  /**
+   * calculation aggregate result with value filter.
+   */
+  @Override
+  protected List<AggreResultData> aggregateWithTimeGenerator(
+      List<AggregateFunction> aggregateFunctions,
+      TimeGenerator timestampGenerator,
+      List<EngineReaderByTimeStamp> readersOfSelectedSeries)
+      throws IOException {
+
+    while (timestampGenerator.hasNext()) {
+
+      // generate timestamps for aggregate
+      long[] timeArray = new long[aggregateFetchSize];
+      List<Long> batchTimestamp = new ArrayList<>();
+      int timeArrayLength = 0;
+      for (int cnt = 0; cnt < aggregateFetchSize; cnt++) {
+        if (!timestampGenerator.hasNext()) {
+          break;
+        }
+        long time = timestampGenerator.next();
+        timeArray[timeArrayLength++] = time;
+        batchTimestamp.add(time);
+      }
+
+      // fetch all remote select series data by timestamp list.
+      if (!batchTimestamp.isEmpty()) {
+        try {
+          queryManager.fetchBatchDataByTimestampForAllSelectPaths(batchTimestamp);
+        } catch (RaftConnectionException e) {
+          throw new IOException(e);
+        }
+      }
+
+      // cal part of aggregate result
+      for (int i = 0; i < readersOfSelectedSeries.size(); i++) {
+        aggregateFunctions.get(i).calcAggregationUsingTimestamps(timeArray, timeArrayLength,
+            readersOfSelectedSeries.get(i));
+      }
+    }
+
+    List<AggreResultData> aggreResultDataArrayList = new ArrayList<>();
+    for (AggregateFunction function : aggregateFunctions) {
+      aggreResultDataArrayList.add(function.getResult());
+    }
+    return aggreResultDataArrayList;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithTimeGenerator.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithTimeGenerator.java
index fed8c0d..0cdd457 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithTimeGenerator.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithTimeGenerator.java
@@ -22,17 +22,14 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.List;
-import java.util.Map;
 import java.util.Set;
 import org.apache.iotdb.cluster.query.dataset.ClusterDataSetWithTimeGenerator;
 import org.apache.iotdb.cluster.query.factory.ClusterSeriesReaderFactory;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
-import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterGroupEntity;
-import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterSeriesGroupEntity;
 import org.apache.iotdb.cluster.query.timegenerator.ClusterTimeGenerator;
 import org.apache.iotdb.db.exception.FileNodeManagerException;
 import org.apache.iotdb.db.exception.PathErrorException;
-import org.apache.iotdb.db.metadata.MManager;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.control.QueryResourceManager;
 import org.apache.iotdb.db.query.reader.merge.EngineReaderByTimeStamp;
@@ -71,15 +68,19 @@ public class ClusterExecutorWithTimeGenerator {
 
     /** add query token for query series which can handle locally **/
     List<Path> localQuerySeries = new ArrayList<>(queryExpression.getSelectedSeries());
-    Set<Path> remoteQuerySeries = queryManager.getSelectSeriesReaders().keySet();
+    Set<Path> remoteQuerySeries = new HashSet<>();
+    queryManager.getSelectSeriesGroupEntityMap().values().forEach(
+        selectSeriesGroupEntity -> selectSeriesGroupEntity.getSelectPaths()
+            .forEach(path -> remoteQuerySeries.add(path)));
     localQuerySeries.removeAll(remoteQuerySeries);
     QueryResourceManager.getInstance()
         .beginQueryOfGivenQueryPaths(context.getJobId(), localQuerySeries);
 
     /** add query token for filter series which can handle locally **/
     Set<String> deviceIdSet = new HashSet<>();
-    for (FilterGroupEntity filterGroupEntity : queryManager.getFilterGroupEntityMap().values()) {
-      List<Path> remoteFilterSeries = filterGroupEntity.getFilterPaths();
+    for (FilterSeriesGroupEntity filterSeriesGroupEntity : queryManager
+        .getFilterSeriesGroupEntityMap().values()) {
+      List<Path> remoteFilterSeries = filterSeriesGroupEntity.getFilterPaths();
       remoteFilterSeries.forEach(seriesPath -> deviceIdSet.add(seriesPath.getDevice()));
     }
     QueryResourceManager.getInstance()
@@ -88,33 +89,18 @@ public class ClusterExecutorWithTimeGenerator {
 
     ClusterTimeGenerator timestampGenerator;
     List<EngineReaderByTimeStamp> readersOfSelectedSeries;
+    /** Get data type of select paths **/
+    List<TSDataType> dataTypes = new ArrayList<>();
     try {
       timestampGenerator = new ClusterTimeGenerator(queryExpression.getExpression(), context,
           queryManager);
       readersOfSelectedSeries = ClusterSeriesReaderFactory
           .createReadersByTimestampOfSelectedPaths(queryExpression.getSelectedSeries(), context,
-              queryManager);
-    } catch (IOException ex) {
+              queryManager, dataTypes);
+    } catch (IOException | PathErrorException ex) {
       throw new FileNodeManagerException(ex);
     }
 
-    /** Get data type of select paths **/
-    List<TSDataType> dataTypes = new ArrayList<>();
-    Map<Path, ClusterSelectSeriesReader> selectSeriesReaders = queryManager
-        .getSelectSeriesReaders();
-    for (Path path : queryExpression.getSelectedSeries()) {
-      try {
-        if (selectSeriesReaders.containsKey(path)) {
-          dataTypes.add(selectSeriesReaders.get(path).getDataType());
-        } else {
-          dataTypes.add(MManager.getInstance().getSeriesType(path.getFullPath()));
-        }
-      } catch (PathErrorException e) {
-        throw new FileNodeManagerException(e);
-      }
-
-    }
-
     EngineReaderByTimeStamp[] readersOfSelectedSeriesArray = new EngineReaderByTimeStamp[readersOfSelectedSeries
         .size()];
     int index = 0;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithoutTimeGenerator.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithoutTimeGenerator.java
index 65bd87b..95e5f1a 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithoutTimeGenerator.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithoutTimeGenerator.java
@@ -20,15 +20,19 @@ package org.apache.iotdb.cluster.query.executor;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.SelectSeriesGroupEntity;
 import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.db.exception.FileNodeManagerException;
+import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.control.QueryResourceManager;
 import org.apache.iotdb.db.query.dataset.EngineDataSetWithoutTimeGenerator;
-import org.apache.iotdb.db.query.executor.ExecutorWithoutTimeGenerator;
+import org.apache.iotdb.db.query.executor.AbstractExecutorWithoutTimeGenerator;
 import org.apache.iotdb.db.query.reader.IPointReader;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.Path;
@@ -37,7 +41,7 @@ import org.apache.iotdb.tsfile.read.expression.impl.GlobalTimeExpression;
 import org.apache.iotdb.tsfile.read.filter.basic.Filter;
 import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
 
-public class ClusterExecutorWithoutTimeGenerator extends ExecutorWithoutTimeGenerator {
+public class ClusterExecutorWithoutTimeGenerator extends AbstractExecutorWithoutTimeGenerator {
 
   /**
    * Query expression
@@ -62,7 +66,7 @@ public class ClusterExecutorWithoutTimeGenerator extends ExecutorWithoutTimeGene
    * Execute query without filter or with only global time filter.
    */
   public QueryDataSet execute(QueryContext context)
-      throws FileNodeManagerException {
+      throws FileNodeManagerException, PathErrorException {
 
     Filter timeFilter = null;
     if (queryExpression.getExpression() != null) {
@@ -72,15 +76,22 @@ public class ClusterExecutorWithoutTimeGenerator extends ExecutorWithoutTimeGene
     List<IPointReader> readersOfSelectedSeries = new ArrayList<>();
     List<TSDataType> dataTypes = new ArrayList<>();
 
-    Map<Path, ClusterSelectSeriesReader> selectPathReaders = queryManager.getSelectSeriesReaders();
+    Map<String, SelectSeriesGroupEntity> selectSeriesGroupEntityMap = queryManager
+        .getSelectSeriesGroupEntityMap();
     List<Path> paths = new ArrayList<>();
+    //Mark filter series reader index group by group id
+    Map<String, Integer> selectSeriesReaderIndex = new HashMap<>();
     for (Path path : queryExpression.getSelectedSeries()) {
 
-      if (selectPathReaders.containsKey(path)) {
-        ClusterSelectSeriesReader reader = selectPathReaders.get(path);
+      String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
+
+      if (selectSeriesGroupEntityMap.containsKey(groupId)) {
+        int index = selectSeriesReaderIndex.getOrDefault(groupId, 0);
+        ClusterSelectSeriesReader reader = selectSeriesGroupEntityMap.get(groupId)
+            .getSelectSeriesReaders().get(index);
         readersOfSelectedSeries.add(reader);
         dataTypes.add(reader.getDataType());
-
+        selectSeriesReaderIndex.put(groupId, index + 1);
       } else {
         IPointReader reader = createSeriesReader(context, path, dataTypes, timeFilter);
         readersOfSelectedSeries.add(reader);
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/query/executor/FillEngineExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterFillEngineExecutor.java
similarity index 50%
copy from iotdb/src/main/java/org/apache/iotdb/db/query/executor/FillEngineExecutor.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterFillEngineExecutor.java
index 83c5fa9..608a479 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/executor/FillEngineExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterFillEngineExecutor.java
@@ -16,13 +16,17 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
-package org.apache.iotdb.db.query.executor;
+package org.apache.iotdb.cluster.query.executor;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.SelectSeriesGroupEntity;
+import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
 import org.apache.iotdb.db.exception.FileNodeManagerException;
 import org.apache.iotdb.db.exception.PathErrorException;
@@ -30,6 +34,7 @@ import org.apache.iotdb.db.metadata.MManager;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.control.QueryResourceManager;
 import org.apache.iotdb.db.query.dataset.EngineDataSetWithoutTimeGenerator;
+import org.apache.iotdb.db.query.executor.IFillEngineExecutor;
 import org.apache.iotdb.db.query.fill.IFill;
 import org.apache.iotdb.db.query.fill.PreviousFill;
 import org.apache.iotdb.db.query.reader.IPointReader;
@@ -37,55 +42,63 @@ import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.Path;
 import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
 
-public class FillEngineExecutor {
+public class ClusterFillEngineExecutor implements IFillEngineExecutor {
 
-  private long jobId;
   private List<Path> selectedSeries;
   private long queryTime;
   private Map<TSDataType, IFill> typeIFillMap;
+  private ClusterRpcSingleQueryManager queryManager;
+
 
-  public FillEngineExecutor(long jobId, List<Path> selectedSeries, long queryTime,
-      Map<TSDataType, IFill> typeIFillMap) {
-    this.jobId = jobId;
+  public ClusterFillEngineExecutor(List<Path> selectedSeries, long queryTime,
+      Map<TSDataType, IFill> typeIFillMap, ClusterRpcSingleQueryManager queryManager) {
     this.selectedSeries = selectedSeries;
     this.queryTime = queryTime;
     this.typeIFillMap = typeIFillMap;
+    this.queryManager = queryManager;
   }
 
-  /**
-   * execute fill.
-   *
-   * @param context query context
-   */
+  @Override
   public QueryDataSet execute(QueryContext context)
       throws FileNodeManagerException, PathErrorException, IOException {
-    QueryResourceManager.getInstance().beginQueryOfGivenQueryPaths(jobId, selectedSeries);
-
+    List<Path> paths = new ArrayList<>();
     List<IFill> fillList = new ArrayList<>();
     List<TSDataType> dataTypeList = new ArrayList<>();
+    List<IPointReader> readers = new ArrayList<>();
+    Map<String, SelectSeriesGroupEntity> selectSeriesEntityMap = queryManager.getSelectSeriesGroupEntityMap();
+    //Mark filter series reader index group by group id
+    Map<String, Integer> selectSeriesReaderIndex = new HashMap<>();
     for (Path path : selectedSeries) {
-      QueryDataSource queryDataSource = QueryResourceManager.getInstance()
-          .getQueryDataSource(path, context);
-      TSDataType dataType = MManager.getInstance().getSeriesType(path.getFullPath());
-      dataTypeList.add(dataType);
-      IFill fill = null;
-      if (!typeIFillMap.containsKey(dataType)) {
-        fill = new PreviousFill(dataType, queryTime, 0);
+      String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
+
+      if (selectSeriesEntityMap.containsKey(groupId)) {
+        int index = selectSeriesReaderIndex.getOrDefault(groupId, 0);
+        ClusterSelectSeriesReader reader = selectSeriesEntityMap.get(groupId).getSelectSeriesReaders().get(index);
+        readers.add(reader);
+        dataTypeList.add(reader.getDataType());
+        selectSeriesReaderIndex.put(groupId, index + 1);
       } else {
-        fill = typeIFillMap.get(dataType).copy(path);
+        QueryDataSource queryDataSource = QueryResourceManager.getInstance()
+            .getQueryDataSource(path, context);
+        TSDataType dataType = MManager.getInstance().getSeriesType(path.getFullPath());
+        dataTypeList.add(dataType);
+        IFill fill;
+        if (!typeIFillMap.containsKey(dataType)) {
+          fill = new PreviousFill(dataType, queryTime, 0);
+        } else {
+          fill = typeIFillMap.get(dataType).copy(path);
+        }
+        fill.setDataType(dataType);
+        fill.setQueryTime(queryTime);
+        fill.constructReaders(queryDataSource, context);
+        fillList.add(fill);
+        readers.add(fill.getFillResult());
       }
-      fill.setDataType(dataType);
-      fill.setQueryTime(queryTime);
-      fill.constructReaders(queryDataSource, context);
-      fillList.add(fill);
     }
 
-    List<IPointReader> readers = new ArrayList<>();
-    for (IFill fill : fillList) {
-      readers.add(fill.getFillResult());
-    }
+    QueryResourceManager.getInstance()
+        .beginQueryOfGivenQueryPaths(context.getJobId(), paths);
 
     return new EngineDataSetWithoutTimeGenerator(selectedSeries, dataTypeList, readers);
   }
-
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterQueryRouter.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterQueryRouter.java
index 4211528..3db7c6a 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterQueryRouter.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterQueryRouter.java
@@ -23,13 +23,16 @@ import java.util.List;
 import java.util.Map;
 import org.apache.iotdb.cluster.exception.RaftConnectionException;
 import org.apache.iotdb.cluster.query.QueryType;
+import org.apache.iotdb.cluster.query.dataset.ClusterGroupByDataSetWithOnlyTimeFilter;
+import org.apache.iotdb.cluster.query.dataset.ClusterGroupByDataSetWithTimeGenerator;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcQueryManager;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
 import org.apache.iotdb.db.exception.FileNodeManagerException;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.exception.ProcessorException;
 import org.apache.iotdb.db.query.context.QueryContext;
-import org.apache.iotdb.db.query.executor.IEngineQueryRouter;
+import org.apache.iotdb.db.query.executor.AbstractQueryRouter;
+import org.apache.iotdb.db.query.executor.AggregateEngineExecutor;
 import org.apache.iotdb.db.query.fill.IFill;
 import org.apache.iotdb.tsfile.exception.filter.QueryFilterOptimizationException;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
@@ -37,7 +40,10 @@ import org.apache.iotdb.tsfile.read.common.Path;
 import org.apache.iotdb.tsfile.read.expression.ExpressionType;
 import org.apache.iotdb.tsfile.read.expression.IExpression;
 import org.apache.iotdb.tsfile.read.expression.QueryExpression;
+import org.apache.iotdb.tsfile.read.expression.impl.BinaryExpression;
+import org.apache.iotdb.tsfile.read.expression.impl.GlobalTimeExpression;
 import org.apache.iotdb.tsfile.read.expression.util.ExpressionOptimizer;
+import org.apache.iotdb.tsfile.read.filter.TimeFilter;
 import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
 import org.apache.iotdb.tsfile.utils.Pair;
 
@@ -45,7 +51,7 @@ import org.apache.iotdb.tsfile.utils.Pair;
  * Query entrance class of cluster query process. All query clause will be transformed to physical
  * plan, physical plan will be executed by ClusterQueryRouter.
  */
-public class ClusterQueryRouter implements IEngineQueryRouter {
+public class ClusterQueryRouter extends AbstractQueryRouter {
 
   /**
    * Consistency level of reading data
@@ -86,15 +92,43 @@ public class ClusterQueryRouter implements IEngineQueryRouter {
         return engineExecutor.execute(context);
       }
     } catch (QueryFilterOptimizationException | IOException | RaftConnectionException e) {
-      throw new FileNodeManagerException(e);
+      throw new FileNodeManagerException(e.getMessage());
     }
   }
 
   @Override
   public QueryDataSet aggregate(List<Path> selectedSeries, List<String> aggres,
       IExpression expression, QueryContext context)
-      throws QueryFilterOptimizationException, FileNodeManagerException, IOException, PathErrorException, ProcessorException {
-    throw new UnsupportedOperationException();
+      throws FileNodeManagerException, PathErrorException, ProcessorException {
+
+    ClusterRpcSingleQueryManager queryManager = ClusterRpcQueryManager.getInstance()
+        .getSingleQuery(context.getJobId());
+
+    try {
+      if (expression != null) {
+        IExpression optimizedExpression = ExpressionOptimizer.getInstance()
+            .optimize(expression, selectedSeries);
+        // update query expression of origin query plan, it's necessary.
+        queryManager.getOriginQueryPlan().setExpression(optimizedExpression);
+
+        AggregateEngineExecutor engineExecutor = new ClusterAggregateEngineExecutor(
+            selectedSeries, aggres, optimizedExpression, queryManager);
+        if (optimizedExpression.getType() == ExpressionType.GLOBAL_TIME) {
+          queryManager.initQueryResource(QueryType.GLOBAL_TIME, getReadDataConsistencyLevel());
+          return engineExecutor.executeWithoutTimeGenerator(context);
+        } else {
+          queryManager.initQueryResource(QueryType.FILTER, getReadDataConsistencyLevel());
+          return engineExecutor.executeWithTimeGenerator(context);
+        }
+      } else {
+        AggregateEngineExecutor engineExecutor = new ClusterAggregateEngineExecutor(
+            selectedSeries, aggres, null, queryManager);
+        queryManager.initQueryResource(QueryType.NO_FILTER, getReadDataConsistencyLevel());
+        return engineExecutor.executeWithoutTimeGenerator(context);
+      }
+    } catch (QueryFilterOptimizationException | IOException | RaftConnectionException e) {
+      throw new FileNodeManagerException(e);
+    }
   }
 
   @Override
@@ -102,13 +136,76 @@ public class ClusterQueryRouter implements IEngineQueryRouter {
       IExpression expression, long unit, long origin, List<Pair<Long, Long>> intervals,
       QueryContext context)
       throws ProcessorException, QueryFilterOptimizationException, FileNodeManagerException, PathErrorException, IOException {
-    throw new UnsupportedOperationException();
+
+    long jobId = context.getJobId();
+    ClusterRpcSingleQueryManager queryManager = ClusterRpcQueryManager.getInstance()
+        .getSingleQuery(jobId);
+
+    //check the legitimacy of intervals
+    checkIntervals(intervals);
+
+    // merge intervals
+    List<Pair<Long, Long>> mergedIntervalList = mergeInterval(intervals);
+
+    // construct groupBy intervals filter
+    BinaryExpression intervalFilter = null;
+    for (Pair<Long, Long> pair : mergedIntervalList) {
+      BinaryExpression pairFilter = BinaryExpression
+          .and(new GlobalTimeExpression(TimeFilter.gtEq(pair.left)),
+              new GlobalTimeExpression(TimeFilter.ltEq(pair.right)));
+      if (intervalFilter != null) {
+        intervalFilter = BinaryExpression.or(intervalFilter, pairFilter);
+      } else {
+        intervalFilter = pairFilter;
+      }
+    }
+
+    // merge interval filter and filtering conditions after where statements
+    if (expression == null) {
+      expression = intervalFilter;
+    } else {
+      expression = BinaryExpression.and(expression, intervalFilter);
+    }
+
+    IExpression optimizedExpression = ExpressionOptimizer.getInstance()
+        .optimize(expression, selectedSeries);
+    // update query expression of origin query plan, it's necessary.
+    queryManager.getOriginQueryPlan().setExpression(optimizedExpression);
+
+    try {
+      if (optimizedExpression.getType() == ExpressionType.GLOBAL_TIME) {
+        queryManager.initQueryResource(QueryType.GLOBAL_TIME, getReadDataConsistencyLevel());
+        ClusterGroupByDataSetWithOnlyTimeFilter groupByEngine = new ClusterGroupByDataSetWithOnlyTimeFilter(
+            jobId, selectedSeries, unit, origin, mergedIntervalList, queryManager);
+        groupByEngine.initGroupBy(context, aggres, optimizedExpression);
+        return groupByEngine;
+      } else {
+        queryManager.initQueryResource(QueryType.FILTER, getReadDataConsistencyLevel());
+        ClusterGroupByDataSetWithTimeGenerator groupByEngine = new ClusterGroupByDataSetWithTimeGenerator(
+            jobId, selectedSeries, unit, origin, mergedIntervalList, queryManager);
+        groupByEngine.initGroupBy(context, aggres, optimizedExpression);
+        return groupByEngine;
+      }
+    } catch (RaftConnectionException e) {
+      throw new FileNodeManagerException(e);
+    }
   }
 
   @Override
   public QueryDataSet fill(List<Path> fillPaths, long queryTime, Map<TSDataType, IFill> fillType,
       QueryContext context) throws FileNodeManagerException, PathErrorException, IOException {
-    throw new UnsupportedOperationException();
+    ClusterRpcSingleQueryManager queryManager = ClusterRpcQueryManager.getInstance()
+        .getSingleQuery(context.getJobId());
+    try {
+      queryManager.initQueryResource(QueryType.NO_FILTER, getReadDataConsistencyLevel());
+
+      ClusterFillEngineExecutor fillEngineExecutor = new ClusterFillEngineExecutor(fillPaths,
+          queryTime,
+          fillType, queryManager);
+      return fillEngineExecutor.execute(context);
+    } catch (IOException | RaftConnectionException e) {
+      throw new FileNodeManagerException(e);
+    }
   }
 
   public int getReadDataConsistencyLevel() {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/factory/ClusterSeriesReaderFactory.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/factory/ClusterSeriesReaderFactory.java
index ddfa5eb..a9ee032 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/factory/ClusterSeriesReaderFactory.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/factory/ClusterSeriesReaderFactory.java
@@ -20,18 +20,24 @@ package org.apache.iotdb.cluster.query.factory;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.SelectSeriesGroupEntity;
 import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
 import org.apache.iotdb.db.exception.FileNodeManagerException;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.metadata.MManager;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.control.QueryResourceManager;
 import org.apache.iotdb.db.query.factory.SeriesReaderFactory;
 import org.apache.iotdb.db.query.reader.merge.EngineReaderByTimeStamp;
 import org.apache.iotdb.db.query.reader.merge.PriorityMergeReaderByTimestamp;
 import org.apache.iotdb.db.query.reader.sequence.SequenceDataReaderByTimestamp;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.Path;
 
 /**
@@ -39,28 +45,39 @@ import org.apache.iotdb.tsfile.read.common.Path;
  */
 public class ClusterSeriesReaderFactory {
 
+  private ClusterSeriesReaderFactory() {
+  }
+
   /**
-   * Construct ReaderByTimestamp , include sequential data and unsequential data.
+   * Construct ReaderByTimestamp , include sequential data and unsequential data. And get all series dataType.
    *
    * @param paths selected series path
    * @param context query context
    * @return the list of EngineReaderByTimeStamp
    */
   public static List<EngineReaderByTimeStamp> createReadersByTimestampOfSelectedPaths(
-      List<Path> paths, QueryContext context, ClusterRpcSingleQueryManager queryManager)
-      throws IOException, FileNodeManagerException {
+      List<Path> paths, QueryContext context, ClusterRpcSingleQueryManager queryManager, List<TSDataType> dataTypes)
+      throws IOException, FileNodeManagerException, PathErrorException {
 
-    Map<Path, ClusterSelectSeriesReader> selectSeriesReaders = queryManager.getSelectSeriesReaders();
+    Map<String, SelectSeriesGroupEntity> selectSeriesEntityMap = queryManager
+        .getSelectSeriesGroupEntityMap();
     List<EngineReaderByTimeStamp> readersOfSelectedSeries = new ArrayList<>();
+    //Mark filter series reader index group by group id
+    Map<String, Integer> selectSeriesReaderIndex = new HashMap<>();
 
     for (Path path : paths) {
-
-      if (selectSeriesReaders.containsKey(path)) {
-        readersOfSelectedSeries.add(selectSeriesReaders.get(path));
+      String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
+      if (selectSeriesEntityMap.containsKey(groupId)) {
+        int index = selectSeriesReaderIndex.getOrDefault(groupId, 0);
+        ClusterSelectSeriesReader reader = selectSeriesEntityMap.get(groupId).getSelectSeriesReaders().get(index);
+        readersOfSelectedSeries.add(reader);
+        dataTypes.add(reader.getDataType());
+        selectSeriesReaderIndex.put(groupId, index + 1);
       } else {
         /** can handle series query locally **/
         EngineReaderByTimeStamp readerByTimeStamp = createReaderByTimeStamp(path, context);
         readersOfSelectedSeries.add(readerByTimeStamp);
+        dataTypes.add(MManager.getInstance().getSeriesType(path.getFullPath()));
       }
     }
     return readersOfSelectedSeries;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcQueryManager.java
index faece2b..f57c538 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcQueryManager.java
@@ -20,7 +20,9 @@ package org.apache.iotdb.cluster.query.manager.coordinatornode;
 
 import com.alipay.sofa.jraft.util.OnlyForTest;
 import java.util.HashMap;
+import java.util.Iterator;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.concurrent.ConcurrentHashMap;
 import org.apache.iotdb.cluster.config.ClusterConfig;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
@@ -80,13 +82,23 @@ public class ClusterRpcQueryManager implements IClusterRpcQueryManager {
   public Map<String, Integer> getAllReadUsage() {
     Map<String, Integer> readerUsageMap = new HashMap<>();
     SINGLE_QUERY_MANAGER_MAP.values().forEach(singleQueryManager -> {
-      for(String groupId:singleQueryManager.getDataGroupUsage()) {
+      for (String groupId : singleQueryManager.getDataGroupUsage()) {
         readerUsageMap.put(groupId, readerUsageMap.getOrDefault(groupId, 0) + 1);
       }
     });
     return readerUsageMap;
   }
 
+  @Override
+  public void close() throws RaftConnectionException {
+    Iterator<Map.Entry<String, ClusterRpcSingleQueryManager>> iterator = SINGLE_QUERY_MANAGER_MAP.entrySet().iterator();
+    while(iterator.hasNext()){
+      Entry<String, ClusterRpcSingleQueryManager> entry = iterator.next();
+      entry.getValue().releaseQueryResource();
+      iterator.remove();
+    }
+  }
+
   @OnlyForTest
   public static ConcurrentHashMap<Long, String> getJobIdMapTaskId() {
     return JOB_ID_MAP_TASK_ID;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java
index d9a5859..c9dc701 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java
@@ -24,6 +24,7 @@ import java.util.ArrayList;
 import java.util.EnumMap;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -37,24 +38,31 @@ import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterFilterSeries
 import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
 import org.apache.iotdb.cluster.query.utils.ClusterRpcReaderUtils;
 import org.apache.iotdb.cluster.query.utils.QueryPlanPartitionUtils;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.CloseSeriesReaderRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.InitSeriesReaderRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataByTimestampRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataRequest;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicQueryDataResponse;
 import org.apache.iotdb.cluster.rpc.raft.response.querydata.InitSeriesReaderResponse;
 import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataByTimestampResponse;
 import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
 import org.apache.iotdb.cluster.utils.QPExecutorUtils;
-import org.apache.iotdb.cluster.utils.RaftUtils;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.BatchData;
 import org.apache.iotdb.tsfile.read.common.Path;
 import org.apache.iotdb.tsfile.read.filter.basic.Filter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Manage all remote series reader resource in a query resource in coordinator node.
  */
 public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManager {
 
+  private static final Logger LOGGER = LoggerFactory.getLogger(ClusterRpcSingleQueryManager.class);
   /**
    * Statistic all usage of local data group.
    */
@@ -82,27 +90,15 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
 
   // select path resource
   /**
-   * Query plans of select paths which are divided from queryPlan group by group id, it contains all
-   * group id ,including local data group if it involves.
+   * Select series group entity group by data group, key is group id(only contain remote group id)
    */
-  private Map<String, QueryPlan> selectPathPlans = new HashMap<>();
-
-  /**
-   * Key is group id (only contains remote group id), value is all select series in group id.
-   */
-  private Map<String, List<Path>> selectSeriesByGroupId = new HashMap<>();
-
-  /**
-   * Series reader of select paths (only contains remote series), key is series path , value is
-   * reader
-   */
-  private Map<Path, ClusterSelectSeriesReader> selectSeriesReaders = new HashMap<>();
+  private Map<String, SelectSeriesGroupEntity> selectSeriesGroupEntityMap = new HashMap<>();
 
   // filter path resource
   /**
-   * Filter group entity group by data group, key is group id(only contain remote group id)
+   * Filter series group entity group by data group, key is group id(only contain remote group id)
    */
-  private Map<String, FilterGroupEntity> filterGroupEntityMap = new HashMap<>();
+  private Map<String, FilterSeriesGroupEntity> filterSeriesGroupEntityMap = new HashMap<>();
 
   private static final ClusterConfig CLUSTER_CONF = ClusterDescriptor.getInstance().getConfig();
 
@@ -134,48 +130,61 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
    * group
    */
   private void initSeriesReader(int readDataConsistencyLevel)
-      throws IOException, RaftConnectionException {
+      throws RaftConnectionException, IOException {
     // Init all series with data group of select series,if filter series has the same data group, init them together.
-    for (Entry<String, QueryPlan> entry : selectPathPlans.entrySet()) {
+    Iterator<Map.Entry<String, SelectSeriesGroupEntity>> selectIterator = selectSeriesGroupEntityMap
+        .entrySet().iterator();
+    while (selectIterator.hasNext()) {
+      Entry<String, SelectSeriesGroupEntity> entry = selectIterator.next();
       String groupId = entry.getKey();
-      QueryPlan queryPlan = entry.getValue();
+      SelectSeriesGroupEntity selectEntity = entry.getValue();
+      QueryPlan queryPlan = selectEntity.getQueryPlan();
       if (!QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
-        PeerId randomPeer = RaftUtils.getRandomPeerID(groupId);
-        queryNodes.put(groupId, randomPeer);
+        LOGGER.debug("Init series reader for group id {} from remote node.", groupId);
         Map<PathType, QueryPlan> allQueryPlan = new EnumMap<>(PathType.class);
         allQueryPlan.put(PathType.SELECT_PATH, queryPlan);
-        List<Filter> filterList = null;
-        if (filterGroupEntityMap.containsKey(groupId)) {
-          FilterGroupEntity filterGroupEntity = filterGroupEntityMap.get(groupId);
-          allQueryPlan.put(PathType.FILTER_PATH, filterGroupEntity.getQueryPlan());
-          filterList = filterGroupEntity.getFilters();
+        List<Filter> filterList = new ArrayList<>();
+        if (filterSeriesGroupEntityMap.containsKey(groupId)) {
+          FilterSeriesGroupEntity filterSeriesGroupEntity = filterSeriesGroupEntityMap.get(groupId);
+          allQueryPlan.put(PathType.FILTER_PATH, filterSeriesGroupEntity.getQueryPlan());
+          filterList = filterSeriesGroupEntity.getFilters();
         }
+        /** create request **/
+        BasicRequest request = InitSeriesReaderRequest
+            .createInitialQueryRequest(groupId, taskId, readDataConsistencyLevel,
+                allQueryPlan, filterList);
         InitSeriesReaderResponse response = (InitSeriesReaderResponse) ClusterRpcReaderUtils
-            .createClusterSeriesReader(groupId, randomPeer, readDataConsistencyLevel,
-                allQueryPlan, taskId, filterList);
+            .createClusterSeriesReader(groupId, request, this);
         handleInitReaderResponse(groupId, allQueryPlan, response);
       } else {
+        LOGGER.debug("Init series reader for group id {} locally.", groupId);
         dataGroupUsage.add(groupId);
-        selectSeriesByGroupId.remove(groupId);
-        if (filterGroupEntityMap.containsKey(groupId)) {
-          filterGroupEntityMap.remove(groupId);
-        }
+        selectIterator.remove();
+        filterSeriesGroupEntityMap.remove(groupId);
       }
     }
 
     //Init series reader with data groups of filter series, which don't exist in data groups list of select series.
-    for (Entry<String, FilterGroupEntity> entry : filterGroupEntityMap.entrySet()) {
+    Iterator<Map.Entry<String, FilterSeriesGroupEntity>> filterIterator = filterSeriesGroupEntityMap
+        .entrySet().iterator();
+    while (filterIterator.hasNext()) {
+      Entry<String, FilterSeriesGroupEntity> entry = filterIterator.next();
       String groupId = entry.getKey();
-      if (!selectPathPlans.containsKey(groupId)) {
-        PeerId randomPeer = RaftUtils.getRandomPeerID(groupId);
+      if (!selectSeriesGroupEntityMap.containsKey(groupId) && !QPExecutorUtils
+          .canHandleQueryByGroupId(groupId)) {
         Map<PathType, QueryPlan> allQueryPlan = new EnumMap<>(PathType.class);
-        FilterGroupEntity filterGroupEntity = filterGroupEntityMap.get(groupId);
-        allQueryPlan.put(PathType.FILTER_PATH, filterGroupEntity.getQueryPlan());
-        List<Filter> filterList = filterGroupEntity.getFilters();
+        FilterSeriesGroupEntity filterSeriesGroupEntity = filterSeriesGroupEntityMap.get(groupId);
+        allQueryPlan.put(PathType.FILTER_PATH, filterSeriesGroupEntity.getQueryPlan());
+        List<Filter> filterList = filterSeriesGroupEntity.getFilters();
+        BasicRequest request = InitSeriesReaderRequest
+            .createInitialQueryRequest(groupId, taskId, readDataConsistencyLevel,
+                allQueryPlan, filterList);
         InitSeriesReaderResponse response = (InitSeriesReaderResponse) ClusterRpcReaderUtils
-            .createClusterSeriesReader(groupId, randomPeer, readDataConsistencyLevel,
-                allQueryPlan, taskId, filterList);
+            .createClusterSeriesReader(groupId, request, this);
         handleInitReaderResponse(groupId, allQueryPlan, response);
+      } else if (!selectSeriesGroupEntityMap.containsKey(groupId)) {
+        dataGroupUsage.add(groupId);
+        filterIterator.remove();
       }
     }
   }
@@ -185,6 +194,7 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
    */
   private void handleInitReaderResponse(String groupId, Map<PathType, QueryPlan> allQueryPlan,
       InitSeriesReaderResponse response) {
+    LOGGER.debug("Handle init reader response of group id {}", groupId);
     /** create cluster series reader **/
     if (allQueryPlan.containsKey(PathType.SELECT_PATH)) {
       QueryPlan plan = allQueryPlan.get(PathType.SELECT_PATH);
@@ -195,7 +205,7 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
         TSDataType dataType = seriesType.get(i);
         ClusterSelectSeriesReader seriesReader = new ClusterSelectSeriesReader(groupId, seriesPath,
             dataType, this);
-        selectSeriesReaders.put(seriesPath, seriesReader);
+        selectSeriesGroupEntityMap.get(groupId).addSelectSeriesReader(seriesReader);
       }
     }
     if (allQueryPlan.containsKey(PathType.FILTER_PATH)) {
@@ -207,41 +217,44 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
         TSDataType dataType = seriesType.get(i);
         ClusterFilterSeriesReader seriesReader = new ClusterFilterSeriesReader(groupId, seriesPath,
             dataType, this);
-        if (!filterGroupEntityMap.containsKey(groupId)) {
-          filterGroupEntityMap.put(groupId, new FilterGroupEntity(groupId));
-        }
-        filterGroupEntityMap.get(groupId).addFilterSeriesReader(seriesReader);
+        filterSeriesGroupEntityMap.get(groupId).addFilterSeriesReader(seriesReader);
       }
     }
   }
 
   @Override
   public void fetchBatchDataForSelectPaths(String groupId) throws RaftConnectionException {
-    List<String> fetchDataSeries = new ArrayList<>();
-    Map<String, List<Path>> seriesByGroupId;
-    Map<Path, ClusterSelectSeriesReader> seriesReaders;
-    seriesByGroupId = selectSeriesByGroupId;
-    seriesReaders = selectSeriesReaders;
-    if (seriesByGroupId.containsKey(groupId)) {
-      List<Path> allFilterSeries = seriesByGroupId.get(groupId);
-      for (Path series : allFilterSeries) {
-        if (seriesReaders.get(series).enableFetchData()) {
-          fetchDataSeries.add(series.getFullPath());
-        }
+    List<Integer> fetchDataSeriesIndexs = new ArrayList<>();
+    List<Path> fetchDataSeries = new ArrayList<>();
+    List<Path> selectSeries = selectSeriesGroupEntityMap.get(groupId).getSelectPaths();
+    List<ClusterSelectSeriesReader> seriesReaders = selectSeriesGroupEntityMap.get(groupId)
+        .getSelectSeriesReaders();
+    for (int i = 0; i < selectSeries.size(); i++) {
+      if (seriesReaders.get(i).enableFetchData()) {
+        fetchDataSeriesIndexs.add(i);
+        fetchDataSeries.add(selectSeries.get(i));
       }
     }
-    QuerySeriesDataResponse response = ClusterRpcReaderUtils
-        .fetchBatchData(groupId, queryNodes.get(groupId), taskId, PathType.SELECT_PATH,
-            fetchDataSeries,
+    LOGGER.debug("Fetch data for paths {} of group id {} from node {}", fetchDataSeries, groupId,
+        queryNodes.get(groupId));
+    BasicRequest request = QuerySeriesDataRequest
+        .createFetchDataRequest(groupId, taskId, PathType.SELECT_PATH, fetchDataSeriesIndexs,
             queryRounds++);
-    handleFetchDataResponseForSelectPaths(fetchDataSeries, response);
+    QuerySeriesDataResponse response = (QuerySeriesDataResponse) ClusterRpcReaderUtils
+        .handleQueryRequest(request, queryNodes.get(groupId), 0);
+
+    handleFetchDataResponseForSelectPaths(groupId, fetchDataSeriesIndexs, response);
   }
 
   @Override
-  public void fetchBatchDataForFilterPaths(String groupId) throws RaftConnectionException {
-    QuerySeriesDataResponse response = ClusterRpcReaderUtils
-        .fetchBatchData(groupId, queryNodes.get(groupId), taskId, PathType.FILTER_PATH, null,
-            queryRounds++);
+  public void fetchBatchDataForAllFilterPaths(String groupId) throws RaftConnectionException {
+    LOGGER.debug("Fetch Data for filter paths {} of group id {} from node {}",
+        filterSeriesGroupEntityMap.get(groupId).getFilterPaths(), groupId, queryNodes.get(groupId));
+    BasicRequest request = QuerySeriesDataRequest
+        .createFetchDataRequest(groupId, taskId, PathType.FILTER_PATH, null, queryRounds++);
+    QuerySeriesDataResponse response = (QuerySeriesDataResponse) ClusterRpcReaderUtils
+        .handleQueryRequest(request, queryNodes.get(groupId), 0);
+
     handleFetchDataResponseForFilterPaths(groupId, response);
   }
 
@@ -249,41 +262,41 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
   @Override
   public void fetchBatchDataByTimestampForAllSelectPaths(List<Long> batchTimestamp)
       throws RaftConnectionException {
-    for (Entry<String, List<Path>> entry : selectSeriesByGroupId.entrySet()) {
+    for (Entry<String, SelectSeriesGroupEntity> entry : selectSeriesGroupEntityMap.entrySet()) {
       String groupId = entry.getKey();
-      List<String> fetchDataFilterSeries = new ArrayList<>();
-      entry.getValue().forEach(path -> fetchDataFilterSeries.add(path.getFullPath()));
-      QuerySeriesDataByTimestampResponse response = ClusterRpcReaderUtils
-          .fetchBatchDataByTimestamp(groupId, queryNodes.get(groupId), taskId, queryRounds++,
-              batchTimestamp, fetchDataFilterSeries);
-      handleFetchDataByTimestampResponseForSelectPaths(fetchDataFilterSeries, response);
+      BasicRequest request = QuerySeriesDataByTimestampRequest
+          .createRequest(groupId, queryRounds++, taskId, batchTimestamp);
+      QuerySeriesDataByTimestampResponse response = (QuerySeriesDataByTimestampResponse) ClusterRpcReaderUtils
+          .handleQueryRequest(request, queryNodes.get(groupId), 0);
+      handleFetchDataByTimestampResponseForSelectPaths(groupId, response);
     }
   }
 
   /**
    * Handle response of fetching data, and add batch data to corresponding reader.
    */
-  private void handleFetchDataByTimestampResponseForSelectPaths(List<String> fetchDataSeries,
+  private void handleFetchDataByTimestampResponseForSelectPaths(String groupId,
       BasicQueryDataResponse response) {
     List<BatchData> batchDataList = response.getSeriesBatchData();
-    for (int i = 0; i < fetchDataSeries.size(); i++) {
-      String series = fetchDataSeries.get(i);
+    List<ClusterSelectSeriesReader> selectSeriesReaders = selectSeriesGroupEntityMap.get(groupId)
+        .getSelectSeriesReaders();
+    for (int i = 0; i < selectSeriesReaders.size(); i++) {
       BatchData batchData = batchDataList.get(i);
-      selectSeriesReaders.get(new Path(series))
-          .addBatchData(batchData, true);
+      selectSeriesReaders.get(i).addBatchData(batchData, true);
     }
   }
 
   /**
    * Handle response of fetching data, and add batch data to corresponding reader.
    */
-  private void handleFetchDataResponseForSelectPaths(List<String> fetchDataSeries,
-      BasicQueryDataResponse response) {
+  private void handleFetchDataResponseForSelectPaths(String groupId,
+      List<Integer> selectSeriesIndexs, BasicQueryDataResponse response) {
     List<BatchData> batchDataList = response.getSeriesBatchData();
-    for (int i = 0; i < fetchDataSeries.size(); i++) {
-      String series = fetchDataSeries.get(i);
+    List<ClusterSelectSeriesReader> selectSeriesReaders = selectSeriesGroupEntityMap.get(groupId)
+        .getSelectSeriesReaders();
+    for (int i = 0; i < selectSeriesIndexs.size(); i++) {
       BatchData batchData = batchDataList.get(i);
-      selectSeriesReaders.get(new Path(series))
+      selectSeriesReaders.get(selectSeriesIndexs.get(i))
           .addBatchData(batchData, batchData.length() < CLUSTER_CONF.getBatchReadSize());
     }
   }
@@ -293,10 +306,11 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
    */
   private void handleFetchDataResponseForFilterPaths(String groupId,
       QuerySeriesDataResponse response) {
-    FilterGroupEntity filterGroupEntity = filterGroupEntityMap.get(groupId);
-    List<Path> fetchDataSeries = filterGroupEntity.getFilterPaths();
+    FilterSeriesGroupEntity filterSeriesGroupEntity = filterSeriesGroupEntityMap.get(groupId);
+    List<Path> fetchDataSeries = filterSeriesGroupEntity.getFilterPaths();
     List<BatchData> batchDataList = response.getSeriesBatchData();
-    List<ClusterFilterSeriesReader> filterReaders = filterGroupEntity.getFilterSeriesReaders();
+    List<ClusterFilterSeriesReader> filterReaders = filterSeriesGroupEntity
+        .getFilterSeriesReaders();
     boolean remoteDataFinish = true;
     for (int i = 0; i < batchDataList.size(); i++) {
       if (batchDataList.get(i).length() != 0) {
@@ -313,11 +327,6 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
   }
 
   @Override
-  public QueryPlan getSelectPathQueryPlan(String fullPath) {
-    return selectPathPlans.get(fullPath);
-  }
-
-  @Override
   public void setDataGroupReaderNode(String groupId, PeerId readerNode) {
     queryNodes.put(groupId, readerNode);
   }
@@ -332,7 +341,8 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
     for (Entry<String, PeerId> entry : queryNodes.entrySet()) {
       String groupId = entry.getKey();
       PeerId queryNode = entry.getValue();
-      ClusterRpcReaderUtils.releaseRemoteQueryResource(groupId, queryNode, taskId);
+      BasicRequest request = CloseSeriesReaderRequest.createReleaseResourceRequest(groupId, taskId);
+      ClusterRpcReaderUtils.handleQueryRequest(request, queryNode, 0);
     }
   }
 
@@ -356,60 +366,19 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
     return queryRounds;
   }
 
-  public void setQueryRounds(long queryRounds) {
-    this.queryRounds = queryRounds;
-  }
-
   public QueryPlan getOriginQueryPlan() {
     return originQueryPlan;
   }
 
-  public void setOriginQueryPlan(QueryPlan queryPlan) {
-    this.originQueryPlan = queryPlan;
-  }
-
-  public Map<String, PeerId> getQueryNodes() {
-    return queryNodes;
-  }
-
-  public void setQueryNodes(
-      Map<String, PeerId> queryNodes) {
-    this.queryNodes = queryNodes;
-  }
-
-  public Map<String, QueryPlan> getSelectPathPlans() {
-    return selectPathPlans;
-  }
-
-  public void setSelectPathPlans(
-      Map<String, QueryPlan> selectPathPlans) {
-    this.selectPathPlans = selectPathPlans;
-  }
-
-  public Map<String, List<Path>> getSelectSeriesByGroupId() {
-    return selectSeriesByGroupId;
-  }
-
-  public void setSelectSeriesByGroupId(
-      Map<String, List<Path>> selectSeriesByGroupId) {
-    this.selectSeriesByGroupId = selectSeriesByGroupId;
-  }
-
-  public Map<Path, ClusterSelectSeriesReader> getSelectSeriesReaders() {
-    return selectSeriesReaders;
-  }
-
-  public void setSelectSeriesReaders(
-      Map<Path, ClusterSelectSeriesReader> selectSeriesReaders) {
-    this.selectSeriesReaders = selectSeriesReaders;
+  public void setQueryNode(String groupID, PeerId peerId) {
+    this.queryNodes.put(groupID, peerId);
   }
 
-  public Map<String, FilterGroupEntity> getFilterGroupEntityMap() {
-    return filterGroupEntityMap;
+  public Map<String, SelectSeriesGroupEntity> getSelectSeriesGroupEntityMap() {
+    return selectSeriesGroupEntityMap;
   }
 
-  public void setFilterGroupEntityMap(
-      Map<String, FilterGroupEntity> filterGroupEntityMap) {
-    this.filterGroupEntityMap = filterGroupEntityMap;
+  public Map<String, FilterSeriesGroupEntity> getFilterSeriesGroupEntityMap() {
+    return filterSeriesGroupEntityMap;
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterGroupEntity.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterSeriesGroupEntity.java
similarity index 97%
copy from cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterGroupEntity.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterSeriesGroupEntity.java
index 326af11..19407a0 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterGroupEntity.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterSeriesGroupEntity.java
@@ -28,7 +28,7 @@ import org.apache.iotdb.tsfile.read.filter.basic.Filter;
 /**
  * Filter entities of a data group, concluding QueryPlan, filters, all filter paths and filter readers
  */
-public class FilterGroupEntity {
+public class FilterSeriesGroupEntity {
 
   /**
    * Group id
@@ -62,7 +62,7 @@ public class FilterGroupEntity {
    */
   private List<ClusterFilterSeriesReader> filterSeriesReaders;
 
-  public FilterGroupEntity(String groupId) {
+  public FilterSeriesGroupEntity(String groupId) {
     this.groupId = groupId;
     this.filterPaths = new ArrayList<>();
     this.filters = new ArrayList<>();
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcQueryManager.java
index b8e4f5d..0917631 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcQueryManager.java
@@ -66,4 +66,9 @@ public interface IClusterRpcQueryManager {
    * Get all read usage count group by data group id, key is group id, value is usage count
    */
   Map<String, Integer> getAllReadUsage();
+
+  /**
+   * Close manager
+   */
+  void close() throws RaftConnectionException;
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcSingleQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcSingleQueryManager.java
index c4aec9c..19d8f25 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcSingleQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcSingleQueryManager.java
@@ -53,11 +53,11 @@ public interface IClusterRpcSingleQueryManager {
   void fetchBatchDataForSelectPaths(String groupId) throws RaftConnectionException;
 
   /**
-   * Fetch data for filter path.
+   * Fetch data for all filter paths.
    *
    * @param groupId data group id
    */
-  void fetchBatchDataForFilterPaths(String groupId) throws RaftConnectionException;
+  void fetchBatchDataForAllFilterPaths(String groupId) throws RaftConnectionException;
 
   /**
    * Fetch batch data for all select paths by batch timestamp. If target data can be fetched, skip
@@ -69,13 +69,6 @@ public interface IClusterRpcSingleQueryManager {
       throws RaftConnectionException;
 
   /**
-   * Get query plan of select path
-   *
-   * @param fullPath Timeseries full path in select paths
-   */
-  QueryPlan getSelectPathQueryPlan(String fullPath);
-
-  /**
    * Set reader node of a data group
    *
    * @param groupId data group id
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterGroupEntity.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/SelectSeriesGroupEntity.java
similarity index 54%
rename from cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterGroupEntity.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/SelectSeriesGroupEntity.java
index 326af11..1de26bd 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterGroupEntity.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/SelectSeriesGroupEntity.java
@@ -20,16 +20,14 @@ package org.apache.iotdb.cluster.query.manager.coordinatornode;
 
 import java.util.ArrayList;
 import java.util.List;
-import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterFilterSeriesReader;
+import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
 import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
 import org.apache.iotdb.tsfile.read.common.Path;
-import org.apache.iotdb.tsfile.read.filter.basic.Filter;
 
 /**
- * Filter entities of a data group, concluding QueryPlan, filters, all filter paths and filter readers
+ * Select series entity entities of a data group, concluding QueryPlan, all select paths and series readers
  */
-public class FilterGroupEntity {
-
+public class SelectSeriesGroupEntity {
   /**
    * Group id
    */
@@ -41,32 +39,24 @@ public class FilterGroupEntity {
   private QueryPlan queryPlan;
 
   /**
-   * Filters of filter path.
-   */
-  private List<Filter> filters;
-
-  /**
    *
-   * all filter series
+   * all select series
    * <p>
-   * Note: It may contain multiple series in a complicated tree
-   * for example: select * from root.vehicle where d0.s0 > 10 and d0.s0 < 101 or time = 12,
-   * filter tree: <code>[[[[root.vehicle.d0.s0:time == 12] || [root.vehicle.d0.s1:time == 12]] || [root.vehicle.d1.s2:time == 12]] || [root.vehicle.d1.s3:time == 12]]</code>
+   * Note: It may contain multiple series in a query
+   * for example: select sum(s0), max(s0) from root.vehicle.d0 where s0 > 10
    * </p>
    */
-  private List<Path> filterPaths;
-
+  private List<Path> selectPaths;
 
   /**
    * Series reader of filter paths (only contains remote series)
    */
-  private List<ClusterFilterSeriesReader> filterSeriesReaders;
+  private List<ClusterSelectSeriesReader> selectSeriesReaders;
 
-  public FilterGroupEntity(String groupId) {
+  public SelectSeriesGroupEntity(String groupId) {
     this.groupId = groupId;
-    this.filterPaths = new ArrayList<>();
-    this.filters = new ArrayList<>();
-    this.filterSeriesReaders = new ArrayList<>();
+    this.selectPaths = new ArrayList<>();
+    this.selectSeriesReaders = new ArrayList<>();
   }
 
   public String getGroupId() {
@@ -85,27 +75,19 @@ public class FilterGroupEntity {
     this.queryPlan = queryPlan;
   }
 
-  public List<Filter> getFilters() {
-    return filters;
-  }
-
-  public void addFilter(Filter filter) {
-    this.filters.add(filter);
-  }
-
-  public List<Path> getFilterPaths() {
-    return filterPaths;
+  public List<Path> getSelectPaths() {
+    return selectPaths;
   }
 
-  public void addFilterPaths(Path filterPath) {
-    this.filterPaths.add(filterPath);
+  public void addSelectPaths(Path selectPath) {
+    this.selectPaths.add(selectPath);
   }
 
-  public List<ClusterFilterSeriesReader> getFilterSeriesReaders() {
-    return filterSeriesReaders;
+  public List<ClusterSelectSeriesReader> getSelectSeriesReaders() {
+    return selectSeriesReaders;
   }
 
-  public void addFilterSeriesReader(ClusterFilterSeriesReader filterSeriesReader) {
-    this.filterSeriesReaders.add(filterSeriesReader);
+  public void addSelectSeriesReader(ClusterSelectSeriesReader selectSeriesReader) {
+    this.selectSeriesReaders.add(selectSeriesReader);
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalQueryManager.java
index fe3ac52..e6149f2 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalQueryManager.java
@@ -21,7 +21,9 @@ package org.apache.iotdb.cluster.query.manager.querynode;
 import com.alipay.sofa.jraft.util.OnlyForTest;
 import java.io.IOException;
 import java.util.HashMap;
+import java.util.Iterator;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.concurrent.ConcurrentHashMap;
 import org.apache.iotdb.cluster.rpc.raft.request.querydata.InitSeriesReaderRequest;
 import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataByTimestampRequest;
@@ -53,7 +55,7 @@ public class ClusterLocalQueryManager implements IClusterLocalQueryManager {
 
   @Override
   public InitSeriesReaderResponse createQueryDataSet(InitSeriesReaderRequest request)
-      throws IOException, FileNodeManagerException, PathErrorException, ProcessorException, QueryFilterOptimizationException {
+      throws IOException, FileNodeManagerException, PathErrorException, ProcessorException, QueryFilterOptimizationException, ClassNotFoundException {
     long jobId = QueryResourceManager.getInstance().assignJobId();
     String taskId = request.getTaskId();
     TASK_ID_MAP_JOB_ID.put(taskId, jobId);
@@ -113,6 +115,16 @@ public class ClusterLocalQueryManager implements IClusterLocalQueryManager {
     return readerUsageMap;
   }
 
+  @Override
+  public void close() throws FileNodeManagerException {
+    Iterator<Entry<Long, ClusterLocalSingleQueryManager>> iterator = SINGLE_QUERY_MANAGER_MAP.entrySet().iterator();
+    while(iterator.hasNext()){
+      Entry<Long, ClusterLocalSingleQueryManager> entry = iterator.next();
+      entry.getValue().close();
+      iterator.remove();
+    }
+  }
+
   @OnlyForTest
   public static ConcurrentHashMap<String, Long> getTaskIdMapJobId() {
     return TASK_ID_MAP_JOB_ID;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalSingleQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalSingleQueryManager.java
index 559575a..097f24d 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalSingleQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalSingleQueryManager.java
@@ -24,21 +24,24 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ScheduledFuture;
-import org.apache.iotdb.cluster.concurrent.pool.QueryTimerManager;
+import org.apache.iotdb.cluster.concurrent.pool.QueryTimerThreadManager;
 import org.apache.iotdb.cluster.config.ClusterConstant;
 import org.apache.iotdb.cluster.query.PathType;
 import org.apache.iotdb.cluster.query.factory.ClusterSeriesReaderFactory;
-import org.apache.iotdb.cluster.query.reader.querynode.AbstractClusterBatchReader;
-import org.apache.iotdb.cluster.query.reader.querynode.ClusterBatchReaderByTimestamp;
-import org.apache.iotdb.cluster.query.reader.querynode.ClusterBatchReaderWithoutTimeGenerator;
-import org.apache.iotdb.cluster.query.reader.querynode.ClusterFilterSeriesBatchReader;
-import org.apache.iotdb.cluster.query.reader.querynode.IClusterFilterSeriesBatchReader;
+import org.apache.iotdb.cluster.query.reader.querynode.IClusterSelectSeriesBatchReader;
+import org.apache.iotdb.cluster.query.reader.querynode.ClusterFillSelectSeriesBatchReader;
+import org.apache.iotdb.cluster.query.reader.querynode.ClusterFilterSeriesBatchReaderEntity;
+import org.apache.iotdb.cluster.query.reader.querynode.ClusterGroupBySelectSeriesBatchReaderEntity;
+import org.apache.iotdb.cluster.query.reader.querynode.ClusterSelectSeriesBatchReader;
+import org.apache.iotdb.cluster.query.reader.querynode.ClusterSelectSeriesBatchReaderByTimestamp;
+import org.apache.iotdb.cluster.query.reader.querynode.ClusterSelectSeriesBatchReaderEntity;
 import org.apache.iotdb.cluster.rpc.raft.request.querydata.InitSeriesReaderRequest;
 import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataByTimestampRequest;
 import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataRequest;
 import org.apache.iotdb.cluster.rpc.raft.response.querydata.InitSeriesReaderResponse;
 import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataByTimestampResponse;
 import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
+import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
 import org.apache.iotdb.db.exception.FileNodeManagerException;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.exception.ProcessorException;
@@ -46,11 +49,16 @@ import org.apache.iotdb.db.metadata.MManager;
 import org.apache.iotdb.db.qp.executor.OverflowQPExecutor;
 import org.apache.iotdb.db.qp.executor.QueryProcessExecutor;
 import org.apache.iotdb.db.qp.physical.crud.AggregationPlan;
+import org.apache.iotdb.db.qp.physical.crud.FillQueryPlan;
 import org.apache.iotdb.db.qp.physical.crud.GroupByPlan;
 import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.control.QueryResourceManager;
-import org.apache.iotdb.db.query.executor.ExecutorWithoutTimeGenerator;
+import org.apache.iotdb.db.query.dataset.groupby.GroupByWithOnlyTimeFilterDataSet;
+import org.apache.iotdb.db.query.executor.AbstractExecutorWithoutTimeGenerator;
+import org.apache.iotdb.db.query.executor.AggregateEngineExecutor;
+import org.apache.iotdb.db.query.fill.IFill;
+import org.apache.iotdb.db.query.fill.PreviousFill;
 import org.apache.iotdb.db.query.reader.IPointReader;
 import org.apache.iotdb.db.query.reader.merge.EngineReaderByTimeStamp;
 import org.apache.iotdb.tsfile.exception.filter.QueryFilterOptimizationException;
@@ -67,11 +75,17 @@ import org.slf4j.LoggerFactory;
 
 public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryManager {
 
-  private static final Logger LOGGER = LoggerFactory.getLogger(ClusterLocalSingleQueryManager.class);
+  private static final Logger LOGGER = LoggerFactory
+      .getLogger(ClusterLocalSingleQueryManager.class);
 
   private String groupId;
 
   /**
+   * Mark whether this manager has initialized or not.
+   */
+  private boolean isInit = false;
+
+  /**
    * Timer of Query, if the time is up, close query resource.
    */
   private ScheduledFuture<?> queryTimer;
@@ -87,14 +101,19 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
   private long queryRound = -1;
 
   /**
-   * Key is series full path, value is reader of select series
+   * Select reader entity
+   */
+  private ClusterSelectSeriesBatchReaderEntity selectReaderEntity;
+
+  /**
+   * Select reader entity of group by query, which handle group by query with only time filter
    */
-  private Map<String, AbstractClusterBatchReader> selectSeriesReaders = new HashMap<>();
+  private ClusterGroupBySelectSeriesBatchReaderEntity groupBySelectReaderEntity;
 
   /**
-   * Filter reader
+   * Filter reader entity
    */
-  private IClusterFilterSeriesBatchReader filterReader;
+  private ClusterFilterSeriesBatchReaderEntity filterReaderEntity;
 
   /**
    * Key is series full path, value is data type of series
@@ -113,30 +132,33 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
    */
   public ClusterLocalSingleQueryManager(long jobId) {
     this.jobId = jobId;
-    queryTimer = QueryTimerManager.getInstance()
+    queryTimer = QueryTimerThreadManager.getInstance()
         .execute(new QueryTimerRunnable(), ClusterConstant.QUERY_TIMEOUT_IN_QUERY_NODE);
   }
 
   @Override
   public InitSeriesReaderResponse createSeriesReader(InitSeriesReaderRequest request)
-      throws IOException, PathErrorException, FileNodeManagerException, ProcessorException, QueryFilterOptimizationException {
+      throws IOException, PathErrorException, FileNodeManagerException, ProcessorException, QueryFilterOptimizationException, ClassNotFoundException {
+    if (isInit) {
+      throw new IOException(String
+          .format("ClusterLocalSingleQueryManager has already initialized. Job id = %s", jobId));
+    }
+    isInit = true;
     this.groupId = request.getGroupID();
     InitSeriesReaderResponse response = new InitSeriesReaderResponse(groupId);
     QueryContext context = new QueryContext(jobId);
     Map<PathType, QueryPlan> queryPlanMap = request.getAllQueryPlan();
     if (queryPlanMap.containsKey(PathType.SELECT_PATH)) {
+      selectReaderEntity = new ClusterSelectSeriesBatchReaderEntity();
       QueryPlan plan = queryPlanMap.get(PathType.SELECT_PATH);
       if (plan instanceof GroupByPlan) {
-        throw new UnsupportedOperationException();
+        handleGroupBySeriesReader(plan, context, response);
       } else if (plan instanceof AggregationPlan) {
-        throw new UnsupportedOperationException();
+        handleAggreSeriesReader(plan, context, response);
+      } else if (plan instanceof FillQueryPlan) {
+        handleFillSeriesReader(plan, context, response);
       } else {
-        if (plan.getExpression() == null
-            || plan.getExpression().getType() == ExpressionType.GLOBAL_TIME) {
-          handleSelectReaderWithoutTimeGenerator(plan, context, response);
-        } else {
-          handleSelectReaderWithTimeGenerator(plan, context, response);
-        }
+        handleSelectSeriesReader(plan, context, response);
       }
     }
     if (queryPlanMap.containsKey(PathType.FILTER_PATH)) {
@@ -147,22 +169,145 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
   }
 
   /**
-   * Handle filter series reader
+   * Handle fill series reader
    *
-   * @param plan filter series query plan
+   * @param queryPlan fill query plan
    */
-  private void handleFilterSeriesReader(QueryPlan plan, QueryContext context,
-      InitSeriesReaderRequest request, InitSeriesReaderResponse response, PathType pathType)
-      throws PathErrorException, QueryFilterOptimizationException, FileNodeManagerException, ProcessorException, IOException {
-    QueryDataSet queryDataSet = queryProcessExecutor
-        .processQuery(plan, context);
-    List<Path> paths = plan.getPaths();
+  private void handleFillSeriesReader(QueryPlan queryPlan, QueryContext context,
+      InitSeriesReaderResponse response)
+      throws FileNodeManagerException, PathErrorException, IOException {
+    FillQueryPlan fillQueryPlan = (FillQueryPlan) queryPlan;
+
+    List<Path> selectedPaths = queryPlan.getPaths();
+    List<TSDataType> dataTypes = new ArrayList<>();
+    QueryResourceManager.getInstance().beginQueryOfGivenQueryPaths(jobId, selectedPaths);
+
+    Map<TSDataType, IFill> typeIFillMap = fillQueryPlan.getFillType();
+    for (Path path : selectedPaths) {
+      QueryDataSource queryDataSource = QueryResourceManager.getInstance()
+          .getQueryDataSource(path, context);
+      TSDataType dataType = MManager.getInstance().getSeriesType(path.getFullPath());
+      dataTypes.add(dataType);
+      IFill fill;
+      if (!typeIFillMap.containsKey(dataType)) {
+        fill = new PreviousFill(dataType, fillQueryPlan.getQueryTime(), 0);
+      } else {
+        fill = typeIFillMap.get(dataType).copy(path);
+      }
+      fill.setDataType(dataType);
+      fill.setQueryTime(fillQueryPlan.getQueryTime());
+      fill.constructReaders(queryDataSource, context);
+      selectReaderEntity.addPath(path.getFullPath());
+      selectReaderEntity
+          .addReaders(new ClusterFillSelectSeriesBatchReader(dataType, fill.getFillResult()));
+      dataTypeMap.put(path.getFullPath(), dataType);
+    }
+
+    response.getSeriesDataTypes().put(PathType.SELECT_PATH, dataTypes);
+  }
+
+
+  /**
+   * Handle aggregation series reader
+   *
+   * @param queryPlan fill query plan
+   */
+  private void handleGroupBySeriesReader(QueryPlan queryPlan, QueryContext context,
+      InitSeriesReaderResponse response)
+      throws FileNodeManagerException, PathErrorException, IOException, ProcessorException, QueryFilterOptimizationException {
+    if (queryPlan.getExpression() == null
+        || queryPlan.getExpression().getType() == ExpressionType.GLOBAL_TIME) {
+      handleGroupBySeriesReaderWithoutTimeGenerator(queryPlan, context, response);
+    } else {
+      handleSelectReaderWithTimeGenerator(queryPlan, context, response);
+    }
+  }
+
+
+  /**
+   * Handle aggregation series reader without value filter
+   *
+   * @param queryPlan fill query plan
+   */
+  private void handleGroupBySeriesReaderWithoutTimeGenerator(QueryPlan queryPlan,
+      QueryContext context,
+      InitSeriesReaderResponse response)
+      throws FileNodeManagerException, PathErrorException, IOException, ProcessorException, QueryFilterOptimizationException {
+    QueryDataSet queryDataSet = queryProcessExecutor.processQuery(queryPlan, context);
+    List<Path> paths = queryDataSet.getPaths();
     List<TSDataType> dataTypes = queryDataSet.getDataTypes();
     for (int i = 0; i < paths.size(); i++) {
       dataTypeMap.put(paths.get(i).getFullPath(), dataTypes.get(i));
     }
-    response.getSeriesDataTypes().put(pathType, dataTypes);
-    filterReader = new ClusterFilterSeriesBatchReader(queryDataSet, paths, request.getFilterList());
+    groupBySelectReaderEntity = new ClusterGroupBySelectSeriesBatchReaderEntity(paths, dataTypes,
+        (GroupByWithOnlyTimeFilterDataSet) queryDataSet);
+    response.getSeriesDataTypes().put(PathType.SELECT_PATH, dataTypes);
+  }
+
+  /**
+   * Handle aggregation series reader
+   *
+   * @param queryPlan fill query plan
+   */
+  private void handleAggreSeriesReader(QueryPlan queryPlan, QueryContext context,
+      InitSeriesReaderResponse response)
+      throws FileNodeManagerException, PathErrorException, IOException, ProcessorException {
+    if (queryPlan.getExpression() == null
+        || queryPlan.getExpression().getType() == ExpressionType.GLOBAL_TIME) {
+      handleAggreSeriesReaderWithoutTimeGenerator(queryPlan, context, response);
+    } else {
+      handleSelectReaderWithTimeGenerator(queryPlan, context, response);
+    }
+  }
+
+  /**
+   * Handle aggregation series reader without value filter
+   *
+   * @param queryPlan fill query plan
+   */
+  private void handleAggreSeriesReaderWithoutTimeGenerator(QueryPlan queryPlan,
+      QueryContext context,
+      InitSeriesReaderResponse response)
+      throws FileNodeManagerException, PathErrorException, IOException, ProcessorException {
+    AggregationPlan fillQueryPlan = (AggregationPlan) queryPlan;
+
+    List<Path> selectedPaths = fillQueryPlan.getPaths();
+    QueryResourceManager.getInstance().beginQueryOfGivenQueryPaths(jobId, selectedPaths);
+
+    AggregateEngineExecutor engineExecutor = new AggregateEngineExecutor(
+        selectedPaths, fillQueryPlan.getAggregations(), fillQueryPlan.getExpression());
+
+    List<IPointReader> readers = engineExecutor.constructAggreReadersWithoutTimeGenerator(context);
+
+    List<TSDataType> dataTypes = engineExecutor.getDataTypes();
+
+    for (int i = 0; i < selectedPaths.size(); i++) {
+      Path path = selectedPaths.get(i);
+      selectReaderEntity.addPath(path.getFullPath());
+      selectReaderEntity.addReaders(
+          new ClusterSelectSeriesBatchReader(dataTypes.get(i), readers.get(i)));
+      dataTypeMap.put(path.getFullPath(), dataTypes.get(i));
+    }
+
+    response.getSeriesDataTypes().put(PathType.SELECT_PATH, dataTypes);
+  }
+
+  /**
+   * Handle select series query
+   *
+   * @param plan plan query plan
+   * @param context query context
+   * @param response response for coordinator node
+   */
+  private void handleSelectSeriesReader(QueryPlan plan, QueryContext context,
+      InitSeriesReaderResponse response)
+      throws FileNodeManagerException, IOException, PathErrorException {
+    if (plan.getExpression() == null
+        || plan.getExpression().getType() == ExpressionType.GLOBAL_TIME) {
+      handleSelectReaderWithoutTimeGenerator(plan, context, response);
+    } else {
+      handleSelectReaderWithTimeGenerator(plan, context, response);
+    }
   }
 
   /**
@@ -185,16 +330,35 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
         .beginQueryOfGivenQueryPaths(context.getJobId(), plan.getPaths());
     for (int i = 0; i < paths.size(); i++) {
       String fullPath = paths.get(i).getFullPath();
-      IPointReader reader = ExecutorWithoutTimeGenerator
+      IPointReader reader = AbstractExecutorWithoutTimeGenerator
           .createSeriesReader(context, paths.get(i), dataTypes, timeFilter);
-      selectSeriesReaders
-          .put(fullPath, new ClusterBatchReaderWithoutTimeGenerator(dataTypes.get(i), reader));
+      selectReaderEntity.addPath(fullPath);
+      selectReaderEntity.addReaders(new ClusterSelectSeriesBatchReader(dataTypes.get(i), reader));
       dataTypeMap.put(fullPath, dataTypes.get(i));
     }
     response.getSeriesDataTypes().put(PathType.SELECT_PATH, dataTypes);
   }
 
   /**
+   * Handle filter series reader
+   *
+   * @param plan filter series query plan
+   */
+  private void handleFilterSeriesReader(QueryPlan plan, QueryContext context,
+      InitSeriesReaderRequest request, InitSeriesReaderResponse response, PathType pathType)
+      throws PathErrorException, QueryFilterOptimizationException, FileNodeManagerException, ProcessorException, IOException, ClassNotFoundException {
+    QueryDataSet queryDataSet = queryProcessExecutor.processQuery(plan, context);
+    List<Path> paths = plan.getPaths();
+    List<TSDataType> dataTypes = queryDataSet.getDataTypes();
+    for (int i = 0; i < paths.size(); i++) {
+      dataTypeMap.put(paths.get(i).getFullPath(), dataTypes.get(i));
+    }
+    response.getSeriesDataTypes().put(pathType, dataTypes);
+    filterReaderEntity = new ClusterFilterSeriesBatchReaderEntity(queryDataSet, paths,
+        request.getFilterList());
+  }
+
+  /**
    * Handle select series query with value filter
    *
    * @param plan plan query plan
@@ -211,8 +375,9 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
       EngineReaderByTimeStamp readerByTimeStamp = ClusterSeriesReaderFactory
           .createReaderByTimeStamp(path, context);
       TSDataType dataType = MManager.getInstance().getSeriesType(path.getFullPath());
-      selectSeriesReaders
-          .put(path.getFullPath(), new ClusterBatchReaderByTimestamp(readerByTimeStamp, dataType));
+      selectReaderEntity.addPath(path.getFullPath());
+      selectReaderEntity
+          .addReaders(new ClusterSelectSeriesBatchReaderByTimestamp(readerByTimeStamp, dataType));
       dataTypeMap.put(path.getFullPath(), dataType);
       dataTypeList.add(dataType);
     }
@@ -228,10 +393,12 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
     if (targetQueryRounds != this.queryRound) {
       this.queryRound = targetQueryRounds;
       PathType pathType = request.getPathType();
-      List<String> paths = request.getSeriesPaths();
       List<BatchData> batchDataList;
       if (pathType == PathType.SELECT_PATH) {
-        batchDataList = readSelectSeriesBatchData(paths);
+        // check whether it's a group by query with only time filter
+        batchDataList =
+            groupBySelectReaderEntity != null ? groupBySelectReaderEntity.nextBatchList()
+                : readSelectSeriesBatchData(request.getSeriesPathIndexs());
       } else {
         batchDataList = readFilterSeriesBatchData();
       }
@@ -247,13 +414,12 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
       throws IOException {
     resetQueryTimer();
     QuerySeriesDataByTimestampResponse response = new QuerySeriesDataByTimestampResponse(groupId);
-    List<String> fetchDataSeries = request.getFetchDataSeries();
     long targetQueryRounds = request.getQueryRounds();
     if (targetQueryRounds != this.queryRound) {
       this.queryRound = targetQueryRounds;
+      List<IClusterSelectSeriesBatchReader> readers = selectReaderEntity.getAllReaders();
       List<BatchData> batchDataList = new ArrayList<>();
-      for (String series : fetchDataSeries) {
-        AbstractClusterBatchReader reader = selectSeriesReaders.get(series);
+      for (IClusterSelectSeriesBatchReader reader : readers) {
         batchDataList.add(reader.nextBatch(request.getBatchTimestamp()));
       }
       cachedBatchDataResult = batchDataList;
@@ -265,19 +431,20 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
   @Override
   public void resetQueryTimer() {
     queryTimer.cancel(false);
-    queryTimer = QueryTimerManager.getInstance()
+    queryTimer = QueryTimerThreadManager.getInstance()
         .execute(new QueryTimerRunnable(), ClusterConstant.QUERY_TIMEOUT_IN_QUERY_NODE);
   }
 
   /**
-   * Read batch data of select series
+   * Read batch data of select series by series index
    *
-   * @param paths all series to query
+   * @param seriesIndexs all series index to query
    */
-  private List<BatchData> readSelectSeriesBatchData(List<String> paths) throws IOException {
+  private List<BatchData> readSelectSeriesBatchData(List<Integer> seriesIndexs) throws IOException {
     List<BatchData> batchDataList = new ArrayList<>();
-    for (String fullPath : paths) {
-      batchDataList.add(selectSeriesReaders.get(fullPath).nextBatch());
+    for (int index : seriesIndexs) {
+      IClusterSelectSeriesBatchReader reader = selectReaderEntity.getReaderByIndex(index);
+      batchDataList.add(reader.nextBatch());
     }
     return batchDataList;
   }
@@ -288,7 +455,7 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
    * @return batch data of all filter series
    */
   private List<BatchData> readFilterSeriesBatchData() throws IOException {
-    return filterReader.nextBatchList();
+    return filterReaderEntity.nextBatchList();
   }
 
   public String getGroupId() {
@@ -309,12 +476,12 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
     return queryRound;
   }
 
-  public Map<String, AbstractClusterBatchReader> getSelectSeriesReaders() {
-    return selectSeriesReaders;
+  public ClusterSelectSeriesBatchReaderEntity getSelectReaderEntity() {
+    return selectReaderEntity;
   }
 
-  public IClusterFilterSeriesBatchReader getFilterReader() {
-    return filterReader;
+  public ClusterFilterSeriesBatchReaderEntity getFilterReaderEntity() {
+    return filterReaderEntity;
   }
 
   public Map<String, TSDataType> getDataTypeMap() {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalQueryManager.java
index cc0f103..42374d5 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalQueryManager.java
@@ -42,7 +42,7 @@ public interface IClusterLocalQueryManager {
    * @param request request for query data from coordinator node
    */
   InitSeriesReaderResponse createQueryDataSet(InitSeriesReaderRequest request)
-      throws IOException, FileNodeManagerException, PathErrorException, ProcessorException, QueryFilterOptimizationException;
+      throws IOException, FileNodeManagerException, PathErrorException, ProcessorException, QueryFilterOptimizationException, ClassNotFoundException;
 
   /**
    * Read batch data of all querying series in request and set response.
@@ -54,8 +54,8 @@ public interface IClusterLocalQueryManager {
 
   /**
    * Read batch data of select series by batch timestamp which is used in query with value filter
-   *  @param request request of querying select paths
    *
+   * @param request request of querying select paths
    */
   QuerySeriesDataByTimestampResponse readBatchDataByTimestamp(
       QuerySeriesDataByTimestampRequest request) throws IOException;
@@ -79,4 +79,9 @@ public interface IClusterLocalQueryManager {
    * Get all read usage count group by data group id, key is group id, value is usage count
    */
   Map<String, Integer> getAllReadUsage();
+
+  /**
+   * Close manager
+   */
+  void close() throws FileNodeManagerException;
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalSingleQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalSingleQueryManager.java
index 318772f..1d89c5c 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalSingleQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalSingleQueryManager.java
@@ -40,18 +40,19 @@ public interface IClusterLocalSingleQueryManager {
 
   /**
    * Initially create corresponding series readers.
+   *
    * @param request request of querying series data
    */
   InitSeriesReaderResponse createSeriesReader(InitSeriesReaderRequest request)
-      throws IOException, PathErrorException, FileNodeManagerException, ProcessorException, QueryFilterOptimizationException;
+      throws IOException, PathErrorException, FileNodeManagerException, ProcessorException, QueryFilterOptimizationException, ClassNotFoundException;
 
   /**
    * <p>
    * Read batch data If query round in cache is equal to target query round, it means that batch
    * data in query node transfer to coordinator fail and return cached batch data.
    * </p>
-   *  @param request request of querying series data
    *
+   * @param request request of querying series data
    */
   QuerySeriesDataResponse readBatchData(QuerySeriesDataRequest request)
       throws IOException;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/AbstractClusterPointReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/AbstractClusterPointReader.java
index 72c7c70..c0012a1 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/AbstractClusterPointReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/AbstractClusterPointReader.java
@@ -20,9 +20,9 @@ package org.apache.iotdb.cluster.query.reader.coordinatornode;
 
 import java.io.IOException;
 import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.query.utils.ClusterTimeValuePairUtils;
 import org.apache.iotdb.db.query.reader.IPointReader;
 import org.apache.iotdb.db.utils.TimeValuePair;
-import org.apache.iotdb.db.utils.TimeValuePairUtils;
 import org.apache.iotdb.tsfile.read.common.BatchData;
 
 /**
@@ -63,11 +63,14 @@ public abstract class AbstractClusterPointReader implements IPointReader {
   @Override
   public TimeValuePair next() throws IOException {
     if (hasNext()) {
-      TimeValuePair timeValuePair = TimeValuePairUtils.getCurrentTimeValuePair(currentBatchData);
+      TimeValuePair timeValuePair = ClusterTimeValuePairUtils
+          .getCurrentTimeValuePair(currentBatchData);
       currentTimeValuePair = timeValuePair;
       currentBatchData.next();
       return timeValuePair;
     }
     return null;
   }
+
+  public abstract void addBatchData(BatchData batchData, boolean remoteDataFinish);
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterFilterSeriesReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterFilterSeriesReader.java
index 805d3af..9d60ae2 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterFilterSeriesReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterFilterSeriesReader.java
@@ -83,7 +83,7 @@ public class ClusterFilterSeriesReader extends AbstractClusterPointReader {
   @Override
   protected void updateCurrentBatchData() throws RaftConnectionException {
     if (batchDataList.isEmpty() && !remoteDataFinish) {
-      queryManager.fetchBatchDataForFilterPaths(groupId);
+      queryManager.fetchBatchDataForAllFilterPaths(groupId);
     }
     if (!batchDataList.isEmpty()) {
       currentBatchData = batchDataList.removeFirst();
@@ -95,14 +95,6 @@ public class ClusterFilterSeriesReader extends AbstractClusterPointReader {
     //Do nothing
   }
 
-  public Path getSeriesPath() {
-    return seriesPath;
-  }
-
-  public void setSeriesPath(Path seriesPath) {
-    this.seriesPath = seriesPath;
-  }
-
   public TSDataType getDataType() {
     return dataType;
   }
@@ -111,14 +103,7 @@ public class ClusterFilterSeriesReader extends AbstractClusterPointReader {
     this.dataType = dataType;
   }
 
-  public BatchData getCurrentBatchData() {
-    return currentBatchData;
-  }
-
-  public void setCurrentBatchData(BatchData currentBatchData) {
-    this.currentBatchData = currentBatchData;
-  }
-
+  @Override
   public void addBatchData(BatchData batchData, boolean remoteDataFinish) {
     batchDataList.addLast(batchData);
     this.remoteDataFinish = remoteDataFinish;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterSelectSeriesReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterSelectSeriesReader.java
index 0a507d5..c640b53 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterSelectSeriesReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterSelectSeriesReader.java
@@ -119,14 +119,6 @@ public class ClusterSelectSeriesReader extends AbstractClusterPointReader implem
     batchDataList = null;
   }
 
-  public Path getSeriesPath() {
-    return seriesPath;
-  }
-
-  public void setSeriesPath(Path seriesPath) {
-    this.seriesPath = seriesPath;
-  }
-
   public TSDataType getDataType() {
     return dataType;
   }
@@ -135,27 +127,12 @@ public class ClusterSelectSeriesReader extends AbstractClusterPointReader implem
     this.dataType = dataType;
   }
 
-  public BatchData getCurrentBatchData() {
-    return currentBatchData;
-  }
-
-  public void setCurrentBatchData(BatchData currentBatchData) {
-    this.currentBatchData = currentBatchData;
-  }
-
+  @Override
   public void addBatchData(BatchData batchData, boolean remoteDataFinish) {
     batchDataList.addLast(batchData);
     this.remoteDataFinish = remoteDataFinish;
   }
 
-  public boolean isRemoteDataFinish() {
-    return remoteDataFinish;
-  }
-
-  public void setRemoteDataFinish(boolean remoteDataFinish) {
-    this.remoteDataFinish = remoteDataFinish;
-  }
-
   /**
    * Check if this series need to fetch data from remote query node
    */
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFillSelectSeriesBatchReader.java
similarity index 57%
copy from cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReader.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFillSelectSeriesBatchReader.java
index 218d68b..a581d07 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFillSelectSeriesBatchReader.java
@@ -19,18 +19,27 @@
 package org.apache.iotdb.cluster.query.reader.querynode;
 
 import java.io.IOException;
-import java.util.List;
+import org.apache.iotdb.cluster.query.common.ClusterNullableBatchData;
+import org.apache.iotdb.db.query.reader.IPointReader;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.BatchData;
 
-/**
- * Batch reader for filter series which is used in query node.
- */
-public interface IClusterFilterSeriesBatchReader {
+public class ClusterFillSelectSeriesBatchReader extends ClusterSelectSeriesBatchReader {
 
-  boolean hasNext() throws IOException;
+  public ClusterFillSelectSeriesBatchReader(
+      TSDataType dataType,
+      IPointReader reader) {
+    super(dataType, reader);
+  }
 
-  /**
-   * Get next batch data of all filter series.
-   */
-  List<BatchData> nextBatchList() throws IOException;
+  @Override
+  public BatchData nextBatch() throws IOException {
+    if(hasNext()){
+      ClusterNullableBatchData batchData = new ClusterNullableBatchData();
+      batchData.addTimeValuePair(reader.next());
+      return batchData;
+    }else{
+      return new ClusterNullableBatchData();
+    }
+  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFilterSeriesBatchReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFilterSeriesBatchReaderEntity.java
similarity index 88%
rename from cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFilterSeriesBatchReader.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFilterSeriesBatchReaderEntity.java
index 6690999..ddcb35d 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFilterSeriesBatchReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFilterSeriesBatchReaderEntity.java
@@ -22,7 +22,6 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import org.apache.iotdb.cluster.config.ClusterConfig;
-import org.apache.iotdb.cluster.config.ClusterConstant;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.BatchData;
@@ -33,9 +32,9 @@ import org.apache.iotdb.tsfile.read.filter.basic.Filter;
 import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
 
 /**
- * Batch reader for all filter paths.
+ * Batch reader entity for all filter paths.
  */
-public class ClusterFilterSeriesBatchReader implements IClusterFilterSeriesBatchReader {
+public class ClusterFilterSeriesBatchReaderEntity implements IClusterSeriesBatchReaderEntity {
 
   private List<Path> allFilterPath;
 
@@ -45,7 +44,7 @@ public class ClusterFilterSeriesBatchReader implements IClusterFilterSeriesBatch
 
   private static final ClusterConfig CLUSTER_CONF = ClusterDescriptor.getInstance().getConfig();
 
-  public ClusterFilterSeriesBatchReader(QueryDataSet queryDataSet, List<Path> allFilterPath,
+  public ClusterFilterSeriesBatchReaderEntity(QueryDataSet queryDataSet, List<Path> allFilterPath,
       List<Filter> filters) {
     this.queryDataSet = queryDataSet;
     this.allFilterPath = allFilterPath;
@@ -69,12 +68,12 @@ public class ClusterFilterSeriesBatchReader implements IClusterFilterSeriesBatch
       batchDataList.add(new BatchData(dataTypeList.get(i), true));
     }
     int dataPointCount = 0;
-    while(true){
-      if(!hasNext() || dataPointCount == CLUSTER_CONF.getBatchReadSize()){
+    while (true) {
+      if (!hasNext() || dataPointCount == CLUSTER_CONF.getBatchReadSize()) {
         break;
       }
-      if(hasNext() && addTimeValuePair(batchDataList, dataTypeList)){
-          dataPointCount++;
+      if (hasNext() && addTimeValuePair(batchDataList, dataTypeList)) {
+        dataPointCount++;
       }
     }
     return batchDataList;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterGroupBySelectSeriesBatchReaderEntity.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterGroupBySelectSeriesBatchReaderEntity.java
new file mode 100644
index 0000000..30ecf1b
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterGroupBySelectSeriesBatchReaderEntity.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.reader.querynode;
+
+import static org.apache.iotdb.cluster.query.reader.querynode.ClusterSelectSeriesBatchReader.CLUSTER_CONF;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.iotdb.cluster.query.common.ClusterNullableBatchData;
+import org.apache.iotdb.cluster.query.utils.ClusterTimeValuePairUtils;
+import org.apache.iotdb.db.query.dataset.groupby.GroupByWithOnlyTimeFilterDataSet;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.read.common.BatchData;
+import org.apache.iotdb.tsfile.read.common.Field;
+import org.apache.iotdb.tsfile.read.common.Path;
+import org.apache.iotdb.tsfile.read.common.RowRecord;
+
+/**
+ * Batch reader entity for select paths in group by query with only time filter.
+ */
+public class ClusterGroupBySelectSeriesBatchReaderEntity implements
+    IClusterSeriesBatchReaderEntity {
+
+  private List<Path> paths;
+  private List<TSDataType> dataTypes;
+
+  private GroupByWithOnlyTimeFilterDataSet queryDataSet;
+
+  public ClusterGroupBySelectSeriesBatchReaderEntity(
+      List<Path> paths,
+      List<TSDataType> dataTypes,
+      GroupByWithOnlyTimeFilterDataSet queryDataSet) {
+    this.paths = paths;
+    this.dataTypes = dataTypes;
+    this.queryDataSet = queryDataSet;
+  }
+
+  @Override
+  public boolean hasNext() throws IOException {
+    return queryDataSet.hasNext();
+  }
+
+  @Override
+  public List<BatchData> nextBatchList() throws IOException {
+    List<BatchData> batchDataList = new ArrayList<>(paths.size());
+    for (int i = 0; i < paths.size(); i++) {
+      batchDataList.add(new ClusterNullableBatchData());
+    }
+    int dataPointCount = 0;
+    while (true) {
+      if (!hasNext() || dataPointCount == CLUSTER_CONF.getBatchReadSize()) {
+        break;
+      }
+      dataPointCount++;
+      RowRecord rowRecord = queryDataSet.next();
+      long time = rowRecord.getTimestamp();
+      List<Field> fieldList = rowRecord.getFields();
+      for (int j = 0; j < paths.size(); j++) {
+        ClusterNullableBatchData batchData = (ClusterNullableBatchData) batchDataList.get(j);
+        Object value = fieldList.get(j).getObjectValue(dataTypes.get(j));
+        batchData.addTimeValuePair(fieldList.get(j).toString().equals("null") ? null
+            : ClusterTimeValuePairUtils.getTimeValuePair(time, value, dataTypes.get(j)));
+      }
+    }
+    return batchDataList;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterBatchReaderWithoutTimeGenerator.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReader.java
similarity index 83%
rename from cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterBatchReaderWithoutTimeGenerator.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReader.java
index f3d443f..b3c05d8 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterBatchReaderWithoutTimeGenerator.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReader.java
@@ -21,7 +21,6 @@ package org.apache.iotdb.cluster.query.reader.querynode;
 import java.io.IOException;
 import java.util.List;
 import org.apache.iotdb.cluster.config.ClusterConfig;
-import org.apache.iotdb.cluster.config.ClusterConstant;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.db.query.reader.IPointReader;
 import org.apache.iotdb.db.utils.TimeValuePair;
@@ -31,21 +30,22 @@ import org.apache.iotdb.tsfile.read.common.BatchData;
 /**
  * BatchReader without time generator for cluster which is used in query node.
  */
-public class ClusterBatchReaderWithoutTimeGenerator extends AbstractClusterBatchReader {
+public class ClusterSelectSeriesBatchReader implements
+    IClusterSelectSeriesBatchReader {
 
   /**
    * Data type
    */
-  private TSDataType dataType;
+  protected TSDataType dataType;
 
   /**
    * Point reader
    */
-  private IPointReader reader;
+  protected IPointReader reader;
 
-  private static final ClusterConfig CLUSTER_CONF = ClusterDescriptor.getInstance().getConfig();
+  static final ClusterConfig CLUSTER_CONF = ClusterDescriptor.getInstance().getConfig();
 
-  public ClusterBatchReaderWithoutTimeGenerator(
+  public ClusterSelectSeriesBatchReader(
       TSDataType dataType, IPointReader reader) {
     this.dataType = dataType;
     this.reader = reader;
@@ -81,7 +81,7 @@ public class ClusterBatchReaderWithoutTimeGenerator extends AbstractClusterBatch
   @Override
   public BatchData nextBatch(List<Long> batchTime) throws IOException {
     throw new IOException(
-        "nextBatch(List<Long> batchTime) in ClusterBatchReaderWithoutTimeGenerator is an empty method.");
+        "nextBatch(List<Long> batchTime) in ClusterSelectSeriesBatchReader is an empty method.");
   }
 
   public TSDataType getDataType() {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterBatchReaderByTimestamp.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReaderByTimestamp.java
similarity index 90%
rename from cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterBatchReaderByTimestamp.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReaderByTimestamp.java
index b8c36eb..fc6fe31 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterBatchReaderByTimestamp.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReaderByTimestamp.java
@@ -27,7 +27,8 @@ import org.apache.iotdb.tsfile.read.common.BatchData;
 /**
  * BatchReader by timestamp for cluster which is used in query node.
  */
-public class ClusterBatchReaderByTimestamp extends AbstractClusterBatchReader {
+public class ClusterSelectSeriesBatchReaderByTimestamp implements
+    IClusterSelectSeriesBatchReader {
 
   /**
    * Reader
@@ -39,7 +40,7 @@ public class ClusterBatchReaderByTimestamp extends AbstractClusterBatchReader {
    */
   private TSDataType dataType;
 
-  public ClusterBatchReaderByTimestamp(
+  public ClusterSelectSeriesBatchReaderByTimestamp(
       EngineReaderByTimeStamp readerByTimeStamp,
       TSDataType dataType) {
     this.readerByTimeStamp = readerByTimeStamp;
@@ -54,7 +55,7 @@ public class ClusterBatchReaderByTimestamp extends AbstractClusterBatchReader {
   @Override
   public BatchData nextBatch() throws IOException {
     throw new UnsupportedOperationException(
-        "nextBatch() in ClusterBatchReaderByTimestamp is an empty method.");
+        "nextBatch() in ClusterSelectSeriesBatchReaderByTimestamp is an empty method.");
   }
 
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReaderEntity.java
similarity index 52%
copy from cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReader.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReaderEntity.java
index 218d68b..7150ffa 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReaderEntity.java
@@ -18,19 +18,46 @@
  */
 package org.apache.iotdb.cluster.query.reader.querynode;
 
-import java.io.IOException;
+import java.util.ArrayList;
 import java.util.List;
-import org.apache.iotdb.tsfile.read.common.BatchData;
 
 /**
- * Batch reader for filter series which is used in query node.
+ * Batch reader entity for all select paths.
  */
-public interface IClusterFilterSeriesBatchReader {
+public class ClusterSelectSeriesBatchReaderEntity {
 
-  boolean hasNext() throws IOException;
+  /**
+   * All select paths
+   */
+  private List<String> paths;
 
   /**
-   * Get next batch data of all filter series.
+   * All select readers
    */
-  List<BatchData> nextBatchList() throws IOException;
+  private List<IClusterSelectSeriesBatchReader> readers;
+
+  public ClusterSelectSeriesBatchReaderEntity() {
+    paths = new ArrayList<>();
+    readers = new ArrayList<>();
+  }
+
+  public void addPath(String path) {
+    this.paths.add(path);
+  }
+
+  public void addReaders(IClusterSelectSeriesBatchReader reader) {
+    this.readers.add(reader);
+  }
+
+  public List<IClusterSelectSeriesBatchReader> getAllReaders() {
+    return readers;
+  }
+
+  public IClusterSelectSeriesBatchReader getReaderByIndex(int index) {
+    return readers.get(index);
+  }
+
+  public List<String> getAllPaths() {
+    return paths;
+  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/AbstractClusterBatchReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterSelectSeriesBatchReader.java
similarity index 89%
rename from cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/AbstractClusterBatchReader.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterSelectSeriesBatchReader.java
index b0a86bd..87a8329 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/AbstractClusterBatchReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterSelectSeriesBatchReader.java
@@ -26,7 +26,7 @@ import org.apache.iotdb.tsfile.read.common.BatchData;
 /**
  * Cluster batch reader, which provides another method to get batch data by batch timestamp.
  */
-public abstract class AbstractClusterBatchReader implements IBatchReader {
+public interface IClusterSelectSeriesBatchReader extends IBatchReader {
 
   /**
    * Get batch data by batch time
@@ -34,6 +34,6 @@ public abstract class AbstractClusterBatchReader implements IBatchReader {
    * @param batchTime valid batch timestamp
    * @return corresponding batch data
    */
-  public abstract BatchData nextBatch(List<Long> batchTime) throws IOException;
+  BatchData nextBatch(List<Long> batchTime) throws IOException;
 
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterSeriesBatchReaderEntity.java
similarity index 87%
copy from cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReader.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterSeriesBatchReaderEntity.java
index 218d68b..80e72b6 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterSeriesBatchReaderEntity.java
@@ -23,14 +23,14 @@ import java.util.List;
 import org.apache.iotdb.tsfile.read.common.BatchData;
 
 /**
- * Batch reader for filter series which is used in query node.
+ * Batch reader for series which is used in query node.
  */
-public interface IClusterFilterSeriesBatchReader {
+public interface IClusterSeriesBatchReaderEntity {
 
   boolean hasNext() throws IOException;
 
   /**
-   * Get next batch data of all filter series.
+   * Get next batch data of all series.
    */
   List<BatchData> nextBatchList() throws IOException;
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/timegenerator/ClusterNodeConstructor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/timegenerator/ClusterNodeConstructor.java
index 639dce8..2b3ab18 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/timegenerator/ClusterNodeConstructor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/timegenerator/ClusterNodeConstructor.java
@@ -25,7 +25,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
-import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterGroupEntity;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterSeriesGroupEntity;
 import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterFilterSeriesReader;
 import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.db.exception.FileNodeManagerException;
@@ -65,7 +65,7 @@ public class ClusterNodeConstructor extends AbstractNodeConstructor {
    * Init filter series reader
    */
   private void init(ClusterRpcSingleQueryManager queryManager) {
-    Map<String, FilterGroupEntity> filterGroupEntityMap = queryManager.getFilterGroupEntityMap();
+    Map<String, FilterSeriesGroupEntity> filterGroupEntityMap = queryManager.getFilterSeriesGroupEntityMap();
     filterGroupEntityMap.forEach(
         (key, value) -> filterSeriesReadersByGroupId.put(key, value.getFilterSeriesReaders()));
     filterSeriesReadersByGroupId.forEach((key, value) -> filterSeriesReaderIndex.put(key, 0));
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterRpcReaderUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterRpcReaderUtils.java
index c3df421..bab0a67 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterRpcReaderUtils.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterRpcReaderUtils.java
@@ -19,86 +19,61 @@
 package org.apache.iotdb.cluster.query.utils;
 
 import com.alipay.sofa.jraft.entity.PeerId;
-import java.io.IOException;
 import java.util.List;
-import java.util.Map;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.cluster.entity.Server;
 import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.qp.task.DataQueryTask;
 import org.apache.iotdb.cluster.qp.task.QPTask.TaskState;
-import org.apache.iotdb.cluster.qp.task.QueryTask;
-import org.apache.iotdb.cluster.query.PathType;
-import org.apache.iotdb.cluster.rpc.raft.NodeAsClient;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.cluster.rpc.raft.impl.RaftNodeAsClientManager;
 import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
-import org.apache.iotdb.cluster.rpc.raft.request.querydata.CloseSeriesReaderRequest;
-import org.apache.iotdb.cluster.rpc.raft.request.querydata.InitSeriesReaderRequest;
-import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataByTimestampRequest;
-import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataRequest;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
-import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataByTimestampResponse;
-import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
 import org.apache.iotdb.cluster.utils.RaftUtils;
-import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
-import org.apache.iotdb.tsfile.read.filter.basic.Filter;
+import org.apache.iotdb.cluster.utils.hash.Router;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Utils for cluster reader which needs to acquire data from remote query node.
  */
 public class ClusterRpcReaderUtils {
 
+  private static final Logger LOGGER = LoggerFactory.getLogger(ClusterRpcReaderUtils.class);
+
   /**
    * Count limit to redo a task
    */
   private static final int TASK_MAX_RETRY = ClusterDescriptor.getInstance().getConfig()
       .getQpTaskRedoCount();
 
-  /**
-   * Create cluster series reader
-   *
-   * @param peerId query node to fetch data
-   * @param readDataConsistencyLevel consistency level of read data
-   * @param taskId task id assigned by coordinator node
-   */
-  public static BasicResponse createClusterSeriesReader(String groupId, PeerId peerId,
-      int readDataConsistencyLevel, Map<PathType, QueryPlan> allQueryPlan, String taskId,
-      List<Filter> filterList)
-      throws IOException, RaftConnectionException {
-
-    /** handle request **/
-    BasicRequest request = InitSeriesReaderRequest
-        .createInitialQueryRequest(groupId, taskId, readDataConsistencyLevel,
-            allQueryPlan, filterList);
-    return handleQueryRequest(request, peerId, 0);
-  }
-
-  public static QuerySeriesDataResponse fetchBatchData(String groupID, PeerId peerId, String taskId,
-      PathType pathType, List<String> fetchDataSeries, long queryRounds)
-      throws RaftConnectionException {
-    BasicRequest request = QuerySeriesDataRequest
-        .createFetchDataRequest(groupID, taskId, pathType, fetchDataSeries, queryRounds);
-    return (QuerySeriesDataResponse) handleQueryRequest(request, peerId, 0);
-  }
-
-  public static QuerySeriesDataByTimestampResponse fetchBatchDataByTimestamp(String groupId,
-      PeerId peerId, String taskId, long queryRounds, List<Long> batchTimestamp,
-      List<String> fetchDataSeries)
-      throws RaftConnectionException {
-    BasicRequest request = QuerySeriesDataByTimestampRequest
-        .createRequest(groupId, queryRounds, taskId, batchTimestamp, fetchDataSeries);
-    return (QuerySeriesDataByTimestampResponse) handleQueryRequest(request, peerId, 0);
+  private ClusterRpcReaderUtils() {
   }
 
   /**
-   * Release remote query resources
-   *
-   * @param groupId data group id
-   * @param peerId target query node
-   * @param taskId unique task id
+   * Create cluster series reader
    */
-  public static void releaseRemoteQueryResource(String groupId, PeerId peerId, String taskId)
+  public static BasicResponse createClusterSeriesReader(String groupId, BasicRequest request,
+      ClusterRpcSingleQueryManager manager)
       throws RaftConnectionException {
 
-    BasicRequest request = CloseSeriesReaderRequest.createReleaseResourceRequest(groupId, taskId);
-    handleQueryRequest(request, peerId, 0);
+    List<PeerId> peerIdList = RaftUtils
+        .getPeerIDList(groupId, Server.getInstance(), Router.getInstance());
+    int randomPeerIndex = RaftUtils.getRandomInt(peerIdList.size());
+    BasicResponse response;
+    for (int i = 0; i < peerIdList.size(); i++) {
+      PeerId peerId = peerIdList.get((i + randomPeerIndex) % peerIdList.size());
+      try {
+        response = handleQueryRequest(request, peerId, 0);
+        manager.setQueryNode(groupId, peerId);
+        LOGGER.debug("Init series reader in Node<{}> of group<{}> success.", peerId, groupId);
+        return response;
+      } catch (RaftConnectionException e) {
+        LOGGER.debug("Can not init series reader in Node<{}> of group<{}>", peerId, groupId, e);
+      }
+    }
+    throw new RaftConnectionException(
+        String.format("Can not init series reader in all nodes of group<%s>, please check cluster status.", groupId));
   }
 
   /**
@@ -109,7 +84,7 @@ public class ClusterRpcReaderUtils {
    * @param taskRetryNum retry num of the request
    * @return Response from remote query node
    */
-  private static BasicResponse handleQueryRequest(BasicRequest request, PeerId peerId,
+  public static BasicResponse handleQueryRequest(BasicRequest request, PeerId peerId,
       int taskRetryNum)
       throws RaftConnectionException {
     if (taskRetryNum > TASK_MAX_RETRY) {
@@ -117,10 +92,20 @@ public class ClusterRpcReaderUtils {
           String.format("Query request retries reach the upper bound %s",
               TASK_MAX_RETRY));
     }
-    NodeAsClient nodeAsClient = RaftUtils.getRaftNodeAsClient();
-    QueryTask queryTask = nodeAsClient.syncHandleRequest(request, peerId);
-    if (queryTask.getState() == TaskState.FINISH) {
-      return queryTask.getBasicResponse();
+    DataQueryTask dataQueryTask = new DataQueryTask(true, request);
+    dataQueryTask.setTargetNode(peerId);
+    RaftNodeAsClientManager.getInstance().produceQPTask(dataQueryTask);
+    try {
+      dataQueryTask.await();
+    } catch (InterruptedException e) {
+      throw new RaftConnectionException(
+          String.format("Can not connect to remote node {%s} for query", peerId));
+    }
+    if (dataQueryTask.getTaskState() == TaskState.RAFT_CONNECTION_EXCEPTION) {
+      throw new RaftConnectionException(
+          String.format("Can not connect to remote node {%s} for query", peerId));
+    } else if (dataQueryTask.getTaskState() == TaskState.FINISH) {
+      return dataQueryTask.getResponse();
     } else {
       return handleQueryRequest(request, peerId, taskRetryNum + 1);
     }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterTimeValuePairUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterTimeValuePairUtils.java
new file mode 100644
index 0000000..3141f99
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterTimeValuePairUtils.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.utils;
+
+import org.apache.iotdb.cluster.query.common.ClusterNullableBatchData;
+import org.apache.iotdb.db.utils.TimeValuePair;
+import org.apache.iotdb.db.utils.TimeValuePairUtils;
+import org.apache.iotdb.db.utils.TsPrimitiveType;
+import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.read.common.BatchData;
+import org.apache.iotdb.tsfile.utils.Binary;
+
+public class ClusterTimeValuePairUtils {
+
+  private ClusterTimeValuePairUtils() {
+  }
+
+  /**
+   * get given data's current (time,value) pair.
+   *
+   * @param data -batch data
+   * @return -given data's (time,value) pair
+   */
+  public static TimeValuePair getCurrentTimeValuePair(BatchData data) {
+    if (data instanceof ClusterNullableBatchData) {
+      return ((ClusterNullableBatchData) data).getCurrentTimeValuePair();
+    } else {
+      return TimeValuePairUtils.getCurrentTimeValuePair(data);
+    }
+  }
+
+  /**
+   * Get (time,value) pair according to data type
+   */
+  public static TimeValuePair getTimeValuePair(long time, Object v, TSDataType dataType) {
+    switch (dataType) {
+      case INT32:
+        return new TimeValuePair(time, new TsPrimitiveType.TsInt((int) v));
+      case INT64:
+        return new TimeValuePair(time, new TsPrimitiveType.TsLong((long) v));
+      case FLOAT:
+        return new TimeValuePair(time, new TsPrimitiveType.TsFloat((float) v));
+      case DOUBLE:
+        return new TimeValuePair(time, new TsPrimitiveType.TsDouble((double) v));
+      case TEXT:
+        return new TimeValuePair(time, new TsPrimitiveType.TsBinary((Binary) v));
+      case BOOLEAN:
+        return new TimeValuePair(time, new TsPrimitiveType.TsBoolean((boolean) v));
+      default:
+        throw new UnSupportedDataTypeException(String.valueOf(v));
+    }
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ExpressionUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ExpressionUtils.java
index 0024138..e6577ee 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ExpressionUtils.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ExpressionUtils.java
@@ -26,7 +26,7 @@ import static org.apache.iotdb.tsfile.read.expression.ExpressionType.TRUE;
 import java.util.List;
 import java.util.Map;
 import org.apache.iotdb.cluster.query.expression.TrueExpression;
-import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterGroupEntity;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterSeriesGroupEntity;
 import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
@@ -46,17 +46,15 @@ public class ExpressionUtils {
    * Get all series path of expression group by group id
    */
   public static void getAllExpressionSeries(IExpression expression,
-      Map<String, FilterGroupEntity> filterGroupEntityMap)
+      Map<String, FilterSeriesGroupEntity> filterGroupEntityMap)
       throws PathErrorException {
     if (expression.getType() == ExpressionType.SERIES) {
       Path path = ((SingleSeriesExpression) expression).getSeriesPath();
       String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
-      if (!filterGroupEntityMap.containsKey(groupId)) {
-        filterGroupEntityMap.put(groupId, new FilterGroupEntity(groupId));
-      }
-      FilterGroupEntity filterGroupEntity = filterGroupEntityMap.get(groupId);
-      filterGroupEntity.addFilterPaths(path);
-      filterGroupEntity.addFilter(((SingleSeriesExpression) expression).getFilter());
+      filterGroupEntityMap.putIfAbsent(groupId, new FilterSeriesGroupEntity(groupId));
+      FilterSeriesGroupEntity filterSeriesGroupEntity = filterGroupEntityMap.get(groupId);
+      filterSeriesGroupEntity.addFilterPaths(path);
+      filterSeriesGroupEntity.addFilter(((SingleSeriesExpression) expression).getFilter());
     } else if (expression.getType() == OR || expression.getType() == AND) {
       getAllExpressionSeries(((IBinaryExpression) expression).getLeft(), filterGroupEntityMap);
       getAllExpressionSeries(((IBinaryExpression) expression).getRight(), filterGroupEntityMap);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtils.java
index 4f7a5fe..2b09492 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtils.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtils.java
@@ -19,21 +19,23 @@
 package org.apache.iotdb.cluster.query.utils;
 
 import java.util.ArrayList;
+import java.util.EnumMap;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
-import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterGroupEntity;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterSeriesGroupEntity;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.SelectSeriesGroupEntity;
 import org.apache.iotdb.cluster.utils.QPExecutorUtils;
-import org.apache.iotdb.cluster.utils.hash.Router;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.qp.physical.crud.AggregationPlan;
+import org.apache.iotdb.db.qp.physical.crud.FillQueryPlan;
 import org.apache.iotdb.db.qp.physical.crud.GroupByPlan;
 import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
 import org.apache.iotdb.tsfile.read.common.Path;
 import org.apache.iotdb.tsfile.read.expression.ExpressionType;
 import org.apache.iotdb.tsfile.read.expression.IExpression;
-import org.apache.iotdb.tsfile.read.filter.basic.Filter;
 
 /**
  * Utils for splitting query plan to several sub query plans by group id.
@@ -45,11 +47,36 @@ public class QueryPlanPartitionUtils {
   }
 
   /**
-   * Split query plan with no filter or with only global time filter by group id
+   * Split query plan with no filter, with only global time filter by group id or fill query
    */
-  public static void splitQueryPlanWithoutValueFilter(ClusterRpcSingleQueryManager singleQueryManager)
+  public static void splitQueryPlanWithoutValueFilter(
+      ClusterRpcSingleQueryManager singleQueryManager)
       throws PathErrorException {
-    splitQueryPlanBySelectPath(singleQueryManager);
+    QueryPlan queryPLan = singleQueryManager.getOriginQueryPlan();
+    if (queryPLan instanceof FillQueryPlan) {
+      splitFillPlan(singleQueryManager);
+    } else if (queryPLan instanceof GroupByPlan) {
+      splitGroupByPlanBySelectPath(singleQueryManager);
+    } else if (queryPLan instanceof AggregationPlan) {
+      splitAggregationPlanBySelectPath(singleQueryManager);
+    } else {
+      splitQueryPlanBySelectPath(singleQueryManager);
+    }
+  }
+
+  /**
+   * Split query plan with filter.
+   */
+  public static void splitQueryPlanWithValueFilter(
+      ClusterRpcSingleQueryManager singleQueryManager) throws PathErrorException {
+    QueryPlan queryPlan = singleQueryManager.getOriginQueryPlan();
+    if (queryPlan instanceof GroupByPlan) {
+      splitGroupByPlanWithFilter(singleQueryManager);
+    } else if (queryPlan instanceof AggregationPlan) {
+      splitAggregationPlanWithFilter(singleQueryManager);
+    } else {
+      splitQueryPlanWithFilter(singleQueryManager);
+    }
   }
 
   /**
@@ -58,61 +85,39 @@ public class QueryPlanPartitionUtils {
   private static void splitQueryPlanBySelectPath(ClusterRpcSingleQueryManager singleQueryManager)
       throws PathErrorException {
     QueryPlan queryPlan = singleQueryManager.getOriginQueryPlan();
-    Map<String, List<Path>> selectSeriesByGroupId = singleQueryManager.getSelectSeriesByGroupId();
-    Map<String, QueryPlan> selectPathPlans = singleQueryManager.getSelectPathPlans();
+    // split query plan by select path
+    Map<String, SelectSeriesGroupEntity> selectGroupEntityMap = singleQueryManager
+        .getSelectSeriesGroupEntityMap();
     List<Path> selectPaths = queryPlan.getPaths();
     for (Path path : selectPaths) {
       String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
-      if (!selectSeriesByGroupId.containsKey(groupId)) {
-        selectSeriesByGroupId.put(groupId, new ArrayList<>());
-      }
-      selectSeriesByGroupId.get(groupId).add(path);
+      selectGroupEntityMap.putIfAbsent(groupId, new SelectSeriesGroupEntity(groupId));
+      selectGroupEntityMap.get(groupId).addSelectPaths(path);
     }
-    for (Entry<String, List<Path>> entry : selectSeriesByGroupId.entrySet()) {
-      String groupId = entry.getKey();
-      List<Path> paths = entry.getValue();
+    for (SelectSeriesGroupEntity entity : selectGroupEntityMap.values()) {
+      List<Path> paths = entity.getSelectPaths();
       QueryPlan subQueryPlan = new QueryPlan();
       subQueryPlan.setProposer(queryPlan.getProposer());
       subQueryPlan.setPaths(paths);
       subQueryPlan.setExpression(queryPlan.getExpression());
-      selectPathPlans.put(groupId, subQueryPlan);
+      entity.setQueryPlan(subQueryPlan);
     }
   }
 
+
   /**
-   * Split query plan with filter.
+   * Split query plan by filter paths
    */
-  public static void splitQueryPlanWithValueFilter(
-      ClusterRpcSingleQueryManager singleQueryManager) throws PathErrorException {
+  private static void splitQueryPlanByFilterPath(ClusterRpcSingleQueryManager singleQueryManager)
+      throws PathErrorException {
     QueryPlan queryPlan = singleQueryManager.getOriginQueryPlan();
-    if (queryPlan instanceof GroupByPlan) {
-      splitGroupByPlan((GroupByPlan) queryPlan, singleQueryManager);
-    } else if (queryPlan instanceof AggregationPlan) {
-      splitAggregationPlan((AggregationPlan) queryPlan, singleQueryManager);
-    } else {
-      splitQueryPlan(queryPlan, singleQueryManager);
-    }
-  }
-
-  private static void splitGroupByPlan(GroupByPlan queryPlan,
-      ClusterRpcSingleQueryManager singleQueryManager) {
-    throw new UnsupportedOperationException();
-  }
-
-  private static void splitAggregationPlan(AggregationPlan aggregationPlan,
-      ClusterRpcSingleQueryManager singleQueryManager) {
-    throw new UnsupportedOperationException();
-  }
-
-  private static void splitQueryPlan(QueryPlan queryPlan,
-      ClusterRpcSingleQueryManager singleQueryManager) throws PathErrorException {
-    splitQueryPlanBySelectPath(singleQueryManager);
     // split query plan by filter path
-    Map<String, FilterGroupEntity> filterGroupEntityMap = singleQueryManager.getFilterGroupEntityMap();
+    Map<String, FilterSeriesGroupEntity> filterGroupEntityMap = singleQueryManager
+        .getFilterSeriesGroupEntityMap();
     IExpression expression = queryPlan.getExpression();
     ExpressionUtils.getAllExpressionSeries(expression, filterGroupEntityMap);
-    for(FilterGroupEntity filterGroupEntity: filterGroupEntityMap.values()){
-      List<Path> filterSeriesList = filterGroupEntity.getFilterPaths();
+    for (FilterSeriesGroupEntity filterSeriesGroupEntity : filterGroupEntityMap.values()) {
+      List<Path> filterSeriesList = filterSeriesGroupEntity.getFilterPaths();
       // create filter sub query plan
       QueryPlan subQueryPlan = new QueryPlan();
       subQueryPlan.setPaths(filterSeriesList);
@@ -121,7 +126,136 @@ public class QueryPlanPartitionUtils {
       if (subExpression.getType() != ExpressionType.TRUE) {
         subQueryPlan.setExpression(subExpression);
       }
-      filterGroupEntity.setQueryPlan(subQueryPlan);
+      filterSeriesGroupEntity.setQueryPlan(subQueryPlan);
     }
   }
+
+  /**
+   * Split group by plan by select path
+   */
+  private static void splitGroupByPlanBySelectPath(
+      ClusterRpcSingleQueryManager singleQueryManager) throws PathErrorException {
+    GroupByPlan queryPlan = (GroupByPlan) singleQueryManager.getOriginQueryPlan();
+    List<Path> selectPaths = queryPlan.getPaths();
+    List<String> aggregations = queryPlan.getAggregations();
+    Map<String, SelectSeriesGroupEntity> selectGroupEntityMap = singleQueryManager
+        .getSelectSeriesGroupEntityMap();
+    Map<String, List<String>> selectAggregationByGroupId = new HashMap<>();
+    for (int i = 0; i < selectPaths.size(); i++) {
+      String aggregation = aggregations.get(i);
+      Path path = selectPaths.get(i);
+      String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
+      if (!selectGroupEntityMap.containsKey(groupId)) {
+        selectGroupEntityMap.put(groupId, new SelectSeriesGroupEntity(groupId));
+        selectAggregationByGroupId.put(groupId, new ArrayList<>());
+      }
+      selectGroupEntityMap.get(groupId).addSelectPaths(path);
+      selectAggregationByGroupId.get(groupId).add(aggregation);
+    }
+    for (Entry<String, SelectSeriesGroupEntity> entry : selectGroupEntityMap.entrySet()) {
+      String groupId = entry.getKey();
+      SelectSeriesGroupEntity entity = entry.getValue();
+      List<Path> paths = entity.getSelectPaths();
+      GroupByPlan subQueryPlan = new GroupByPlan();
+      subQueryPlan.setIntervals(queryPlan.getIntervals());
+      subQueryPlan.setOrigin(queryPlan.getOrigin());
+      subQueryPlan.setUnit(queryPlan.getUnit());
+      subQueryPlan.setProposer(queryPlan.getProposer());
+      subQueryPlan.setPaths(paths);
+      subQueryPlan.setExpression(queryPlan.getExpression());
+      subQueryPlan.setAggregations(selectAggregationByGroupId.get(groupId));
+      entity.setQueryPlan(subQueryPlan);
+    }
+  }
+
+  /**
+   * Split group by plan with filter path
+   */
+  private static void splitGroupByPlanWithFilter(ClusterRpcSingleQueryManager singleQueryManager)
+      throws PathErrorException {
+    splitGroupByPlanBySelectPath(singleQueryManager);
+    splitQueryPlanByFilterPath(singleQueryManager);
+  }
+
+  /**
+   * Split aggregation plan by select path
+   */
+  private static void splitAggregationPlanBySelectPath(
+      ClusterRpcSingleQueryManager singleQueryManager)
+      throws PathErrorException {
+    AggregationPlan queryPlan = (AggregationPlan) singleQueryManager.getOriginQueryPlan();
+    List<Path> selectPaths = queryPlan.getPaths();
+    List<String> aggregations = queryPlan.getAggregations();
+    Map<String, List<String>> selectAggregationByGroupId = new HashMap<>();
+    Map<String, SelectSeriesGroupEntity> selectGroupEntityMap = singleQueryManager
+        .getSelectSeriesGroupEntityMap();
+    for (int i = 0; i < selectPaths.size(); i++) {
+      Path path = selectPaths.get(i);
+      String aggregation = aggregations.get(i);
+      String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
+      if (!selectGroupEntityMap.containsKey(groupId)) {
+        selectGroupEntityMap.put(groupId, new SelectSeriesGroupEntity(groupId));
+        selectAggregationByGroupId.put(groupId, new ArrayList<>());
+      }
+      selectAggregationByGroupId.get(groupId).add(aggregation);
+      selectGroupEntityMap.get(groupId).addSelectPaths(path);
+    }
+    for (Entry<String, SelectSeriesGroupEntity> entry : selectGroupEntityMap.entrySet()) {
+      String groupId = entry.getKey();
+      SelectSeriesGroupEntity entity = entry.getValue();
+      List<Path> paths = entity.getSelectPaths();
+      AggregationPlan subQueryPlan = new AggregationPlan();
+      subQueryPlan.setProposer(queryPlan.getProposer());
+      subQueryPlan.setPaths(paths);
+      subQueryPlan.setExpression(queryPlan.getExpression());
+      subQueryPlan.setAggregations(selectAggregationByGroupId.get(groupId));
+      entity.setQueryPlan(subQueryPlan);
+    }
+  }
+
+  /**
+   * Split aggregation plan with filter path
+   */
+  private static void splitAggregationPlanWithFilter(
+      ClusterRpcSingleQueryManager singleQueryManager)
+      throws PathErrorException {
+    splitAggregationPlanBySelectPath(singleQueryManager);
+    splitQueryPlanByFilterPath(singleQueryManager);
+  }
+
+  /**
+   * Split fill plan which only contain select paths.
+   */
+  private static void splitFillPlan(ClusterRpcSingleQueryManager singleQueryManager)
+      throws PathErrorException {
+    FillQueryPlan fillQueryPlan = (FillQueryPlan) singleQueryManager.getOriginQueryPlan();
+    List<Path> selectPaths = fillQueryPlan.getPaths();
+    Map<String, SelectSeriesGroupEntity> selectGroupEntityMap = singleQueryManager
+        .getSelectSeriesGroupEntityMap();
+    for (Path path : selectPaths) {
+      String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
+      selectGroupEntityMap.putIfAbsent(groupId, new SelectSeriesGroupEntity(groupId));
+      selectGroupEntityMap.get(groupId).addSelectPaths(path);
+    }
+    for (SelectSeriesGroupEntity entity : selectGroupEntityMap.values()) {
+      List<Path> paths = entity.getSelectPaths();
+      FillQueryPlan subQueryPlan = new FillQueryPlan();
+      subQueryPlan.setProposer(fillQueryPlan.getProposer());
+      subQueryPlan.setPaths(paths);
+      subQueryPlan.setExpression(fillQueryPlan.getExpression());
+      subQueryPlan.setQueryTime(fillQueryPlan.getQueryTime());
+      subQueryPlan.setFillType(new EnumMap<>(fillQueryPlan.getFillType()));
+      entity.setQueryPlan(subQueryPlan);
+    }
+  }
+
+  /**
+   * Split query plan with filter
+   */
+  private static void splitQueryPlanWithFilter(ClusterRpcSingleQueryManager singleQueryManager)
+      throws PathErrorException {
+    splitQueryPlanBySelectPath(singleQueryManager);
+    splitQueryPlanByFilterPath(singleQueryManager);
+  }
+
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/NodeAsClient.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/NodeAsClient.java
index bab1536..d0690cd 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/NodeAsClient.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/NodeAsClient.java
@@ -22,7 +22,7 @@ import com.alipay.sofa.jraft.entity.PeerId;
 import org.apache.iotdb.cluster.exception.RaftConnectionException;
 import org.apache.iotdb.cluster.qp.task.SingleQPTask;
 import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
-import org.apache.iotdb.cluster.qp.task.QueryTask;
+import org.apache.iotdb.cluster.qp.task.DataQueryTask;
 
 /**
  * Handle the request and process the result as a client with the current node
@@ -31,19 +31,9 @@ public interface NodeAsClient {
 
   /**
    * Asynchronous processing requests
-   *  @param leader leader node of the target group
    * @param qpTask single QPTask to be executed
    */
-  void asyncHandleRequest(BasicRequest request, PeerId leader,
-      SingleQPTask qpTask) throws RaftConnectionException;
-
-  /**
-   * Synchronous processing requests
-   * @param peerId leader node of the target group
-   *
-   */
-  QueryTask syncHandleRequest(BasicRequest request, PeerId peerId)
-      throws RaftConnectionException;
+  void asyncHandleRequest(SingleQPTask qpTask) throws RaftConnectionException;
 
   /**
    * Shut down client
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/impl/RaftNodeAsClientManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/impl/RaftNodeAsClientManager.java
index 19f1343..cf41ae6 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/impl/RaftNodeAsClientManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/impl/RaftNodeAsClientManager.java
@@ -18,26 +18,22 @@
  */
 package org.apache.iotdb.cluster.rpc.raft.impl;
 
-import com.alipay.remoting.InvokeCallback;
 import com.alipay.remoting.exception.RemotingException;
-import com.alipay.sofa.jraft.entity.PeerId;
 import com.alipay.sofa.jraft.option.CliOptions;
 import com.alipay.sofa.jraft.rpc.impl.cli.BoltCliClientService;
 import java.util.LinkedList;
-import java.util.concurrent.Executor;
-import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.Condition;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
+import org.apache.iotdb.cluster.concurrent.pool.NodeAsClientThreadManager;
 import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterConstant;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.cluster.exception.RaftConnectionException;
-import org.apache.iotdb.cluster.qp.task.QPTask.TaskState;
-import org.apache.iotdb.cluster.qp.task.QueryTask;
 import org.apache.iotdb.cluster.qp.task.SingleQPTask;
 import org.apache.iotdb.cluster.rpc.raft.NodeAsClient;
-import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
+import org.apache.iotdb.db.exception.ProcessorException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -55,30 +51,20 @@ public class RaftNodeAsClientManager {
   private static final int TASK_TIMEOUT_MS = CLUSTER_CONFIG.getQpTaskTimeout();
 
   /**
-   * Max valid number of @NodeAsClient usage, represent the number can run simultaneously
-   * at the same time
-   */
-  private static final int MAX_VALID_CLIENT_NUM = CLUSTER_CONFIG.getMaxNumOfInnerRpcClient();
-
-  /**
    * Max request number in queue
    */
-  private static final int MAX_QUEUE_CLIENT_NUM = CLUSTER_CONFIG.getMaxNumOfInnerRpcClient();
-
-  /**
-   * RaftNodeAsClient list
-   */
-  private final LinkedList<RaftNodeAsClient> clientList = new LinkedList<>();
+  private static final int MAX_QUEUE_TASK_NUM = CLUSTER_CONFIG.getMaxQueueNumOfQPTask();
 
   /**
-   * Number of clients in use
+   * Node as client thread pool manager
    */
-  private AtomicInteger clientNumInUse = new AtomicInteger(0);
+  private static final NodeAsClientThreadManager THREAD_POOL_MANAGER = NodeAsClientThreadManager
+      .getInstance();
 
   /**
-   * Number of requests for clients in queue
+   * QPTask queue list
    */
-  private int queueClientNum = 0;
+  private final LinkedList<SingleQPTask> taskQueue = new LinkedList<>();
 
   /**
    * Lock to update clientNumInUse
@@ -95,101 +81,100 @@ public class RaftNodeAsClientManager {
    */
   private volatile boolean isShuttingDown;
 
+  /**
+   * Mark whether manager init or not
+   */
+  private volatile boolean isInit;
+
   private RaftNodeAsClientManager() {
 
   }
 
   public void init() {
     isShuttingDown = false;
+    isInit = true;
+    taskQueue.clear();
+    for (int i = 0; i < CLUSTER_CONFIG.getConcurrentInnerRpcClientThread(); i++) {
+      THREAD_POOL_MANAGER.execute(() -> {
+        RaftNodeAsClient client = new RaftNodeAsClient();
+        while (true) {
+          consumeQPTask(client);
+          if (Thread.currentThread().isInterrupted()) {
+            break;
+          }
+        }
+        client.shutdown();
+      });
+    }
   }
 
   /**
-   * Try to get clientList, return null if num of queue clientList exceeds threshold.
+   * Produce qp task to be executed.
    */
-  public RaftNodeAsClient getRaftNodeAsClient() throws RaftConnectionException {
+  public void produceQPTask(SingleQPTask qpTask) throws RaftConnectionException {
+    checkInit();
     resourceLock.lock();
     try {
-      if (queueClientNum >= MAX_QUEUE_CLIENT_NUM) {
+      checkInit();
+      checkShuttingDown();
+      if (taskQueue.size() >= MAX_QUEUE_TASK_NUM) {
         throw new RaftConnectionException(String
             .format("Raft inner rpc clients have reached the max numbers %s",
-                CLUSTER_CONFIG.getMaxNumOfInnerRpcClient() + CLUSTER_CONFIG
-                    .getMaxQueueNumOfInnerRpcClient()));
-      }
-      queueClientNum++;
-      try {
-        while (true) {
-          checkShuttingDown();
-          if (clientNumInUse.get() < MAX_VALID_CLIENT_NUM) {
-            clientNumInUse.incrementAndGet();
-            return getClient();
-          }
-          resourceCondition.await();
-        }
-      } catch (InterruptedException e) {
-        throw new RaftConnectionException("An error occurred when trying to get NodeAsClient", e);
-      } finally {
-        queueClientNum--;
+                CLUSTER_CONFIG.getConcurrentInnerRpcClientThread() + CLUSTER_CONFIG
+                    .getMaxQueueNumOfQPTask()));
       }
+      taskQueue.addLast(qpTask);
+      resourceCondition.signal();
     } finally {
       resourceLock.unlock();
     }
   }
 
-  private void checkShuttingDown() throws RaftConnectionException {
-    if (isShuttingDown) {
-      throw new RaftConnectionException(
-          "Reject to provide RaftNodeAsClient client because cluster system is shutting down");
-    }
-  }
-
-  /**
-   * No-safe method, get client
-   */
-  private RaftNodeAsClient getClient() {
-    if (clientList.isEmpty()) {
-      return new RaftNodeAsClient();
-    } else {
-      return clientList.removeFirst();
+  public void checkInit(){
+    if(!isInit){
+      init();
     }
   }
 
   /**
-   * Release usage of a client
+   * Consume qp task
    */
-  public void releaseClient(RaftNodeAsClient client) {
+  private void consumeQPTask(RaftNodeAsClient client) {
     resourceLock.lock();
     try {
-      clientNumInUse.decrementAndGet();
-      resourceCondition.signalAll();
-      clientList.addLast(client);
+      while (taskQueue.isEmpty()) {
+        if (Thread.currentThread().isInterrupted()) {
+          return;
+        }
+        resourceCondition.await();
+      }
+      client.asyncHandleRequest(taskQueue.removeFirst());
+    } catch (InterruptedException e) {
+      Thread.currentThread().interrupt();
+      LOGGER.debug("Occur interruption when await for ResourceContidion", e);
     } finally {
       resourceLock.unlock();
     }
   }
 
-  public void shutdown() throws InterruptedException {
-    isShuttingDown = true;
-    while (clientNumInUse.get() != 0 && queueClientNum != 0) {
-      // wait until releasing all usage of clients.
-      resourceCondition.await();
-    }
-    while (!clientList.isEmpty()) {
-      clientList.removeFirst().shutdown();
+
+  private void checkShuttingDown() throws RaftConnectionException {
+    if (isShuttingDown) {
+      throw new RaftConnectionException(
+          "Reject to execute QPTask because cluster system is shutting down");
     }
   }
 
-  /**
-   * Get client number in use
-   */
-  public int getClientNumInUse() {
-    return clientNumInUse.get();
+  public void shutdown() throws ProcessorException {
+    isShuttingDown = true;
+    THREAD_POOL_MANAGER.close(true, ClusterConstant.CLOSE_THREAD_POOL_BLOCK_TIMEOUT);
   }
 
   /**
-   * Get client number in queue
+   * Get qp task number in queue
    */
-  public int getClientNumInQueue() {
-    return queueClientNum;
+  public int getQPTaskNumInQueue() {
+    return taskQueue.size();
   }
 
   public static final RaftNodeAsClientManager getInstance() {
@@ -227,59 +212,21 @@ public class RaftNodeAsClientManager {
     }
 
     @Override
-    public void asyncHandleRequest(BasicRequest request, PeerId leader,
-        SingleQPTask qpTask)
-        throws RaftConnectionException {
-      LOGGER.debug("Node as client to send request to leader: {}", leader);
-      try {
-        boltClientService.getRpcClient()
-            .invokeWithCallback(leader.getEndpoint().toString(), request,
-                new InvokeCallback() {
-
-                  @Override
-                  public void onResponse(Object result) {
-                    BasicResponse response = (BasicResponse) result;
-                    releaseClient(RaftNodeAsClient.this);
-                    qpTask.run(response);
-                  }
-
-                  @Override
-                  public void onException(Throwable e) {
-                    LOGGER.error("Bolt rpc client occurs errors when handling Request", e);
-                    qpTask.setTaskState(TaskState.EXCEPTION);
-                    releaseClient(RaftNodeAsClient.this);
-                    qpTask.run(null);
-                  }
-
-                  @Override
-                  public Executor getExecutor() {
-                    return null;
-                  }
-                }, TASK_TIMEOUT_MS);
-      } catch (RemotingException | InterruptedException e) {
-        LOGGER.error(e.getMessage());
-        qpTask.setTaskState(TaskState.EXCEPTION);
-        releaseClient(RaftNodeAsClient.this);
-        qpTask.run(null);
-        throw new RaftConnectionException(e);
-      }
-    }
-
-    @Override
-    public QueryTask syncHandleRequest(BasicRequest request, PeerId peerId) {
+    public void asyncHandleRequest(SingleQPTask qpTask) {
+      LOGGER.debug("Node as client to send request to leader: {}", qpTask.getTargetNode());
       try {
         BasicResponse response = (BasicResponse) boltClientService.getRpcClient()
-            .invokeSync(peerId.getEndpoint().toString(), request, TASK_TIMEOUT_MS);
-        return new QueryTask(response, TaskState.FINISH);
+            .invokeSync(qpTask.getTargetNode().getEndpoint().toString(),
+                qpTask.getRequest(), TASK_TIMEOUT_MS);
+        qpTask.receive(response);
       } catch (RemotingException | InterruptedException e) {
-        return new QueryTask(null, TaskState.EXCEPTION);
-      } finally {
-        releaseClient(RaftNodeAsClient.this);
+        LOGGER.error(e.getMessage());
+        qpTask.receive(null);
       }
     }
 
     /**
-     * Shut down clientList
+     * Shut down taskQueue
      */
     @Override
     public void shutdown() {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/QueryMetricAsyncProcessor.java
similarity index 52%
copy from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/QueryMetricAsyncProcessor.java
index 90dc24a..a76d2a6 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/QueryMetricAsyncProcessor.java
@@ -16,25 +16,29 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.processor.querydata;
+package org.apache.iotdb.cluster.rpc.raft.processor;
 
+import com.alipay.remoting.AsyncContext;
 import com.alipay.remoting.BizContext;
-import org.apache.iotdb.cluster.query.manager.querynode.ClusterLocalQueryManager;
-import org.apache.iotdb.cluster.rpc.raft.processor.BasicSyncUserProcessor;
-import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataRequest;
-import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
+import org.apache.iotdb.cluster.rpc.raft.request.QueryMetricRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.QueryMetricResponse;
+import org.apache.iotdb.cluster.utils.RaftUtils;
 
-public class QuerySeriesDataSyncProcessor extends
-    BasicSyncUserProcessor<QuerySeriesDataRequest> {
+public class QueryMetricAsyncProcessor extends BasicAsyncUserProcessor<QueryMetricRequest> {
 
   @Override
-  public Object handleRequest(BizContext bizContext, QuerySeriesDataRequest request)
-      throws Exception {
-    return ClusterLocalQueryManager.getInstance().readBatchData(request);
+  public void handleRequest(BizContext bizContext, AsyncContext asyncContext,
+      QueryMetricRequest request) {
+    String groupId = request.getGroupID();
+
+    QueryMetricResponse response = QueryMetricResponse.createSuccessResponse(groupId,
+        RaftUtils.getReplicaMetric(request.getGroupID(), request.getMetric()));
+    response.addResult(true);
+    asyncContext.sendResponse(response);
   }
 
   @Override
   public String interest() {
-    return QuerySeriesDataRequest.class.getName();
+    return QueryMetricRequest.class.getName();
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/DataGroupNonQueryAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/DataGroupNonQueryAsyncProcessor.java
index de2d2ab..291da32 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/DataGroupNonQueryAsyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/DataGroupNonQueryAsyncProcessor.java
@@ -40,19 +40,16 @@ public class DataGroupNonQueryAsyncProcessor extends
   private static final Logger LOGGER = LoggerFactory
       .getLogger(DataGroupNonQueryAsyncProcessor.class);
 
-  public DataGroupNonQueryAsyncProcessor() {
-  }
-
   @Override
   public void handleRequest(BizContext bizContext, AsyncContext asyncContext,
       DataGroupNonQueryRequest request) {
     LOGGER.debug("Handle data non query request");
 
-    /** Check if it's the leader **/
+    /* Check if it's the leader */
     String groupId = request.getGroupID();
     DataPartitionRaftHolder dataPartitionRaftHolder = RaftUtils.getDataPartitonRaftHolder(groupId);
     if (!dataPartitionRaftHolder.getFsm().isLeader()) {
-      PeerId leader = RaftUtils.getLeaderPeerID(groupId);
+      PeerId leader = RaftUtils.getLocalLeaderPeerID(groupId);
       LOGGER.debug("Request need to redirect leader: {}, groupId : {} ", leader, groupId);
 
       DataGroupNonQueryResponse response = DataGroupNonQueryResponse
@@ -61,7 +58,8 @@ public class DataGroupNonQueryAsyncProcessor extends
     } else {
       LOGGER.debug("Apply task to raft node");
 
-      /** Apply Task to Raft Node **/
+
+      /* Apply Task to Raft Node */
       BasicResponse response = DataGroupNonQueryResponse.createEmptyResponse(groupId);
       RaftService service = (RaftService) dataPartitionRaftHolder.getService();
       RaftUtils.executeRaftTaskForRpcProcessor(service, asyncContext, request, response);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/MetaGroupNonQueryAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/MetaGroupNonQueryAsyncProcessor.java
index 9f09bbb..95f9e32 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/MetaGroupNonQueryAsyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/MetaGroupNonQueryAsyncProcessor.java
@@ -49,7 +49,7 @@ public class MetaGroupNonQueryAsyncProcessor extends
     String groupId = request.getGroupID();
     MetadataRaftHolder metadataHolder = RaftUtils.getMetadataRaftHolder();
     if (!metadataHolder.getFsm().isLeader()) {
-      PeerId leader = RaftUtils.getLeaderPeerID(groupId);
+      PeerId leader = RaftUtils.getLocalLeaderPeerID(groupId);
       LOGGER.debug("Request need to redirect leader: {}, groupId : {} ", leader, groupId);
 
       MetaGroupNonQueryResponse response = MetaGroupNonQueryResponse
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/InitSeriesReaderSyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/InitSeriesReaderSyncProcessor.java
index 894d9eb..8a388d3 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/InitSeriesReaderSyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/InitSeriesReaderSyncProcessor.java
@@ -20,17 +20,21 @@ package org.apache.iotdb.cluster.rpc.raft.processor.querydata;
 
 import com.alipay.remoting.BizContext;
 import com.alipay.sofa.jraft.Status;
+import org.apache.iotdb.cluster.config.ClusterConsistencyLevel;
 import org.apache.iotdb.cluster.config.ClusterConstant;
 import org.apache.iotdb.cluster.query.manager.querynode.ClusterLocalQueryManager;
 import org.apache.iotdb.cluster.rpc.raft.processor.BasicSyncUserProcessor;
 import org.apache.iotdb.cluster.rpc.raft.request.querydata.InitSeriesReaderRequest;
-import org.apache.iotdb.cluster.rpc.raft.response.querydata.InitSeriesReaderResponse;
 import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.cluster.utils.RaftUtils;
 import org.apache.iotdb.db.exception.ProcessorException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class InitSeriesReaderSyncProcessor extends BasicSyncUserProcessor<InitSeriesReaderRequest> {
 
+  private static final Logger LOGGER = LoggerFactory.getLogger(InitSeriesReaderSyncProcessor.class);
+
   @Override
   public Object handleRequest(BizContext bizContext, InitSeriesReaderRequest request)
       throws Exception {
@@ -47,7 +51,8 @@ public class InitSeriesReaderSyncProcessor extends BasicSyncUserProcessor<InitSe
    * @param groupId group id
    */
   private void handleNullRead(int readConsistencyLevel, String groupId) throws ProcessorException {
-    if (readConsistencyLevel == ClusterConstant.STRONG_CONSISTENCY_LEVEL && !QPExecutorUtils
+    LOGGER.debug("Read data level is {}", readConsistencyLevel);
+    if (readConsistencyLevel == ClusterConsistencyLevel.STRONG.ordinal() && !QPExecutorUtils
         .checkDataGroupLeader(groupId)) {
       Status nullReadTaskStatus = Status.OK();
       RaftUtils.handleNullReadToDataGroup(nullReadTaskStatus, groupId);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java
index 90dc24a..4c8e599 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java
@@ -22,7 +22,6 @@ import com.alipay.remoting.BizContext;
 import org.apache.iotdb.cluster.query.manager.querynode.ClusterLocalQueryManager;
 import org.apache.iotdb.cluster.rpc.raft.processor.BasicSyncUserProcessor;
 import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataRequest;
-import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
 
 public class QuerySeriesDataSyncProcessor extends
     BasicSyncUserProcessor<QuerySeriesDataRequest> {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryMetadataAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryMetadataAsyncProcessor.java
index 36e657c..a8fa1fa 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryMetadataAsyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryMetadataAsyncProcessor.java
@@ -22,7 +22,7 @@ import com.alipay.remoting.AsyncContext;
 import com.alipay.remoting.BizContext;
 import com.alipay.sofa.jraft.Status;
 import com.alipay.sofa.jraft.closure.ReadIndexClosure;
-import org.apache.iotdb.cluster.config.ClusterConstant;
+import org.apache.iotdb.cluster.config.ClusterConsistencyLevel;
 import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.RaftService;
 import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
@@ -42,8 +42,8 @@ public class QueryMetadataAsyncProcessor extends
       QueryMetadataRequest request) {
     String groupId = request.getGroupID();
 
-    if (request.getReadConsistencyLevel() == ClusterConstant.WEAK_CONSISTENCY_LEVEL) {
-      QueryMetadataResponse response = null;
+    if (request.getReadConsistencyLevel() == ClusterConsistencyLevel.WEAK.ordinal()) {
+      QueryMetadataResponse response;
       try {
         response = QueryMetadataResponse
             .createSuccessResponse(groupId, mManager.getMetadata());
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryMetadataInStringAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryMetadataInStringAsyncProcessor.java
index 8771eea..7a46a14 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryMetadataInStringAsyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryMetadataInStringAsyncProcessor.java
@@ -22,7 +22,7 @@ import com.alipay.remoting.AsyncContext;
 import com.alipay.remoting.BizContext;
 import com.alipay.sofa.jraft.Status;
 import com.alipay.sofa.jraft.closure.ReadIndexClosure;
-import org.apache.iotdb.cluster.config.ClusterConstant;
+import org.apache.iotdb.cluster.config.ClusterConsistencyLevel;
 import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.RaftService;
 import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
@@ -41,7 +41,7 @@ public class QueryMetadataInStringAsyncProcessor extends
       QueryMetadataInStringRequest request) {
     String groupId = request.getGroupID();
 
-    if (request.getReadConsistencyLevel() == ClusterConstant.WEAK_CONSISTENCY_LEVEL) {
+    if (request.getReadConsistencyLevel() == ClusterConsistencyLevel.WEAK.ordinal()) {
       QueryMetadataInStringResponse response = QueryMetadataInStringResponse
           .createSuccessResponse(groupId, mManager.getMetadataInString());
       response.addResult(true);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryPathsAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryPathsAsyncProcessor.java
index 8e1e47b..3736105 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryPathsAsyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryPathsAsyncProcessor.java
@@ -22,7 +22,7 @@ import com.alipay.remoting.AsyncContext;
 import com.alipay.remoting.BizContext;
 import com.alipay.sofa.jraft.Status;
 import com.alipay.sofa.jraft.closure.ReadIndexClosure;
-import org.apache.iotdb.cluster.config.ClusterConstant;
+import org.apache.iotdb.cluster.config.ClusterConsistencyLevel;
 import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.RaftService;
 import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
@@ -41,7 +41,7 @@ public class QueryPathsAsyncProcessor extends BasicAsyncUserProcessor<QueryPaths
       QueryPathsRequest request) {
     String groupId = request.getGroupID();
 
-    if (request.getReadConsistencyLevel() == ClusterConstant.WEAK_CONSISTENCY_LEVEL) {
+    if (request.getReadConsistencyLevel() == ClusterConsistencyLevel.WEAK.ordinal()) {
       QueryPathsResponse response = QueryPathsResponse
           .createEmptyResponse(groupId);
       try {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QuerySeriesTypeAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QuerySeriesTypeAsyncProcessor.java
index 9e4b1c7..c8df5a2 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QuerySeriesTypeAsyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QuerySeriesTypeAsyncProcessor.java
@@ -22,7 +22,7 @@ import com.alipay.remoting.AsyncContext;
 import com.alipay.remoting.BizContext;
 import com.alipay.sofa.jraft.Status;
 import com.alipay.sofa.jraft.closure.ReadIndexClosure;
-import org.apache.iotdb.cluster.config.ClusterConstant;
+import org.apache.iotdb.cluster.config.ClusterConsistencyLevel;
 import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.RaftService;
 import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
@@ -41,7 +41,7 @@ public class QuerySeriesTypeAsyncProcessor extends BasicAsyncUserProcessor<Query
       QuerySeriesTypeRequest request) {
     String groupId = request.getGroupID();
 
-    if (request.getReadConsistencyLevel() == ClusterConstant.WEAK_CONSISTENCY_LEVEL) {
+    if (request.getReadConsistencyLevel() == ClusterConsistencyLevel.WEAK.ordinal()) {
       QuerySeriesTypeResponse response;
       try {
         response = QuerySeriesTypeResponse.createSuccessResponse(groupId, mManager.getSeriesType(request.getPath()));
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryTimeSeriesAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryTimeSeriesAsyncProcessor.java
index 593f99d..d08cd1a 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryTimeSeriesAsyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryTimeSeriesAsyncProcessor.java
@@ -22,7 +22,7 @@ import com.alipay.remoting.AsyncContext;
 import com.alipay.remoting.BizContext;
 import com.alipay.sofa.jraft.Status;
 import com.alipay.sofa.jraft.closure.ReadIndexClosure;
-import org.apache.iotdb.cluster.config.ClusterConstant;
+import org.apache.iotdb.cluster.config.ClusterConsistencyLevel;
 import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.RaftService;
 import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
@@ -42,7 +42,7 @@ public class QueryTimeSeriesAsyncProcessor extends BasicAsyncUserProcessor<Query
       QueryTimeSeriesRequest request) {
     String groupId = request.getGroupID();
 
-    if (request.getReadConsistencyLevel() == ClusterConstant.WEAK_CONSISTENCY_LEVEL) {
+    if (request.getReadConsistencyLevel() == ClusterConsistencyLevel.WEAK.ordinal()) {
       QueryTimeSeriesResponse response = QueryTimeSeriesResponse
           .createEmptyResponse(groupId);
       try {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetric/QueryJobNumAsyncProcessor.java
similarity index 50%
copy from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetric/QueryJobNumAsyncProcessor.java
index 90dc24a..5074d45 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetric/QueryJobNumAsyncProcessor.java
@@ -16,25 +16,30 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.processor.querydata;
+package org.apache.iotdb.cluster.rpc.raft.processor.querymetric;
 
+import com.alipay.remoting.AsyncContext;
 import com.alipay.remoting.BizContext;
-import org.apache.iotdb.cluster.query.manager.querynode.ClusterLocalQueryManager;
-import org.apache.iotdb.cluster.rpc.raft.processor.BasicSyncUserProcessor;
-import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataRequest;
-import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
+import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetric.QueryJobNumRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetric.QueryJobNumResponse;
+import org.apache.iotdb.cluster.utils.RaftUtils;
 
-public class QuerySeriesDataSyncProcessor extends
-    BasicSyncUserProcessor<QuerySeriesDataRequest> {
+public class QueryJobNumAsyncProcessor extends BasicAsyncUserProcessor<QueryJobNumRequest> {
 
   @Override
-  public Object handleRequest(BizContext bizContext, QuerySeriesDataRequest request)
-      throws Exception {
-    return ClusterLocalQueryManager.getInstance().readBatchData(request);
+  public void handleRequest(BizContext bizContext, AsyncContext asyncContext,
+      QueryJobNumRequest request) {
+    String groupId = request.getGroupID();
+
+    QueryJobNumResponse response = QueryJobNumResponse.createSuccessResponse(groupId,
+        RaftUtils.getLocalQueryJobNumMap());
+    response.addResult(true);
+    asyncContext.sendResponse(response);
   }
 
   @Override
   public String interest() {
-    return QuerySeriesDataRequest.class.getName();
+    return QueryJobNumRequest.class.getName();
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetric/QueryLeaderAsyncProcessor.java
similarity index 50%
copy from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetric/QueryLeaderAsyncProcessor.java
index 90dc24a..9c5a2bf 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetric/QueryLeaderAsyncProcessor.java
@@ -16,25 +16,30 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.processor.querydata;
+package org.apache.iotdb.cluster.rpc.raft.processor.querymetric;
 
+import com.alipay.remoting.AsyncContext;
 import com.alipay.remoting.BizContext;
-import org.apache.iotdb.cluster.query.manager.querynode.ClusterLocalQueryManager;
-import org.apache.iotdb.cluster.rpc.raft.processor.BasicSyncUserProcessor;
-import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataRequest;
-import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
+import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetric.QueryLeaderRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetric.QueryLeaderResponse;
+import org.apache.iotdb.cluster.utils.RaftUtils;
 
-public class QuerySeriesDataSyncProcessor extends
-    BasicSyncUserProcessor<QuerySeriesDataRequest> {
+public class QueryLeaderAsyncProcessor extends BasicAsyncUserProcessor<QueryLeaderRequest> {
 
   @Override
-  public Object handleRequest(BizContext bizContext, QuerySeriesDataRequest request)
-      throws Exception {
-    return ClusterLocalQueryManager.getInstance().readBatchData(request);
+  public void handleRequest(BizContext bizContext, AsyncContext asyncContext,
+      QueryLeaderRequest request) {
+    String groupId = request.getGroupID();
+
+    QueryLeaderResponse response = QueryLeaderResponse.createSuccessResponse(groupId,
+        RaftUtils.getLocalLeaderPeerID(groupId));
+    response.addResult(true);
+    asyncContext.sendResponse(response);
   }
 
   @Override
   public String interest() {
-    return QuerySeriesDataRequest.class.getName();
+    return QueryLeaderRequest.class.getName();
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetric/QueryMetricAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetric/QueryMetricAsyncProcessor.java
new file mode 100644
index 0000000..c2dfef1
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetric/QueryMetricAsyncProcessor.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.processor.querymetric;
+
+import com.alipay.remoting.AsyncContext;
+import com.alipay.remoting.BizContext;
+import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetric.QueryMetricRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetric.QueryMetricResponse;
+import org.apache.iotdb.cluster.utils.RaftUtils;
+
+public class QueryMetricAsyncProcessor extends BasicAsyncUserProcessor<QueryMetricRequest> {
+
+  @Override
+  public void handleRequest(BizContext bizContext, AsyncContext asyncContext,
+      QueryMetricRequest request) {
+    String groupId = request.getGroupID();
+
+    QueryMetricResponse response = QueryMetricResponse.createSuccessResponse(groupId,
+        RaftUtils.getReplicaMetric(request.getGroupID(), request.getMetric()));
+    response.addResult(true);
+    asyncContext.sendResponse(response);
+  }
+
+  @Override
+  public String interest() {
+    return QueryMetricRequest.class.getName();
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetric/QueryStatusAsyncProcessor.java
similarity index 52%
copy from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetric/QueryStatusAsyncProcessor.java
index 90dc24a..615eaf6 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetric/QueryStatusAsyncProcessor.java
@@ -16,25 +16,29 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.processor.querydata;
+package org.apache.iotdb.cluster.rpc.raft.processor.querymetric;
 
+import com.alipay.remoting.AsyncContext;
 import com.alipay.remoting.BizContext;
-import org.apache.iotdb.cluster.query.manager.querynode.ClusterLocalQueryManager;
-import org.apache.iotdb.cluster.rpc.raft.processor.BasicSyncUserProcessor;
-import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataRequest;
-import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
+import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetric.QueryStatusRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetric.QueryStatusResponse;
 
-public class QuerySeriesDataSyncProcessor extends
-    BasicSyncUserProcessor<QuerySeriesDataRequest> {
+public class QueryStatusAsyncProcessor extends BasicAsyncUserProcessor<QueryStatusRequest> {
 
   @Override
-  public Object handleRequest(BizContext bizContext, QuerySeriesDataRequest request)
-      throws Exception {
-    return ClusterLocalQueryManager.getInstance().readBatchData(request);
+  public void handleRequest(BizContext bizContext, AsyncContext asyncContext,
+      QueryStatusRequest request) {
+    String groupId = request.getGroupID();
+
+    QueryStatusResponse response = QueryStatusResponse.createSuccessResponse(groupId,
+        true);
+    response.addResult(true);
+    asyncContext.sendResponse(response);
   }
 
   @Override
   public String interest() {
-    return QuerySeriesDataRequest.class.getName();
+    return QueryStatusRequest.class.getName();
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicNonQueryRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicNonQueryRequest.java
index dc15158..33a4d8e 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicNonQueryRequest.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicNonQueryRequest.java
@@ -19,7 +19,6 @@
 package org.apache.iotdb.cluster.rpc.raft.request;
 
 import java.io.IOException;
-import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.List;
 import org.apache.iotdb.db.qp.physical.PhysicalPlan;
diff --git a/spark/src/main/java/org/apache/iotdb/tsfile/qp/exception/BasicOperatorException.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/QueryMetricRequest.java
old mode 100755
new mode 100644
similarity index 67%
rename from spark/src/main/java/org/apache/iotdb/tsfile/qp/exception/BasicOperatorException.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/QueryMetricRequest.java
index be36f35..eb81769
--- a/spark/src/main/java/org/apache/iotdb/tsfile/qp/exception/BasicOperatorException.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/QueryMetricRequest.java
@@ -16,19 +16,20 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.tsfile.qp.exception;
+package org.apache.iotdb.cluster.rpc.raft.request;
 
+import java.io.Serializable;
 
-/**
- * This exception is threw whiling meeting error in BasicOperator
- *
- */
-public class BasicOperatorException extends QueryProcessorException {
+public class QueryMetricRequest extends BasicQueryRequest implements Serializable {
 
-  private static final long serialVersionUID = -2163809754074237707L;
+  private String metric;
 
-  public BasicOperatorException(String msg) {
-    super(msg);
+  public QueryMetricRequest(String groupID, int readConsistencyLevel, String metric) {
+    super(groupID, readConsistencyLevel);
+    this.metric = metric;
   }
 
-}
+  public String getMetric() {
+    return metric;
+  }
+}
\ No newline at end of file
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/InitSeriesReaderRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/InitSeriesReaderRequest.java
index c974e2f..e28ac15 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/InitSeriesReaderRequest.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/InitSeriesReaderRequest.java
@@ -18,10 +18,16 @@
  */
 package org.apache.iotdb.cluster.rpc.raft.request.querydata;
 
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
 import java.util.ArrayList;
 import java.util.EnumMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 import org.apache.iotdb.cluster.query.PathType;
 import org.apache.iotdb.cluster.rpc.raft.request.BasicQueryRequest;
 import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
@@ -42,12 +48,12 @@ public class InitSeriesReaderRequest extends BasicQueryRequest {
   /**
    * Key is series type, value is query plan
    */
-  private Map<PathType, QueryPlan> allQueryPlan = new EnumMap<>(PathType.class);
+  private Map<PathType, byte[]> allQueryPlan = new EnumMap<>(PathType.class);
 
   /**
    * Represent all filter of leaf node in filter tree while executing a query with value filter.
    */
-  private List<Filter> filterList = new ArrayList<>();
+  private List<byte[]> filterList = new ArrayList<>();
 
 
   private InitSeriesReaderRequest(String groupID, String taskId) {
@@ -55,12 +61,17 @@ public class InitSeriesReaderRequest extends BasicQueryRequest {
     this.taskId = taskId;
   }
 
-  public static InitSeriesReaderRequest createInitialQueryRequest(String groupId, String taskId, int readConsistencyLevel,
-      Map<PathType, QueryPlan> allQueryPlan, List<Filter> filterList){
+  public static InitSeriesReaderRequest createInitialQueryRequest(String groupId, String taskId,
+      int readConsistencyLevel,
+      Map<PathType, QueryPlan> allQueryPlan, List<Filter> filterList) throws IOException {
     InitSeriesReaderRequest request = new InitSeriesReaderRequest(groupId, taskId);
     request.setReadConsistencyLevel(readConsistencyLevel);
-    request.allQueryPlan = allQueryPlan;
-    request.filterList = filterList;
+    for (Entry<PathType, QueryPlan> entry : allQueryPlan.entrySet()) {
+      request.allQueryPlan.put(entry.getKey(), toByteArray(entry.getValue()));
+    }
+    for (Filter filter : filterList) {
+      request.filterList.add(toByteArray(filter));
+    }
     return request;
   }
 
@@ -72,20 +83,51 @@ public class InitSeriesReaderRequest extends BasicQueryRequest {
     this.taskId = taskId;
   }
 
-  public Map<PathType, QueryPlan> getAllQueryPlan() {
-    return allQueryPlan;
+  public Map<PathType, QueryPlan> getAllQueryPlan() throws IOException, ClassNotFoundException {
+    Map<PathType, QueryPlan> queryPlanMap = new EnumMap<>(PathType.class);
+    for (Entry<PathType, byte[]> entry : allQueryPlan.entrySet()) {
+      queryPlanMap.put(entry.getKey(), (QueryPlan) toObject(entry.getValue()));
+    }
+    return queryPlanMap;
   }
 
-  public void setAllQueryPlan(
-      Map<PathType, QueryPlan> allQueryPlan) {
-    this.allQueryPlan = allQueryPlan;
+  public List<Filter> getFilterList() throws IOException, ClassNotFoundException {
+    List<Filter> filters = new ArrayList<>();
+    for (byte[] filterBytes : filterList) {
+      filters.add((Filter) toObject(filterBytes));
+    }
+    return filters;
   }
 
-  public List<Filter> getFilterList() {
-    return filterList;
+  /**
+   * Convert an object to byte array
+   *
+   * @param obj Object, which need to implement Serializable
+   * @return byte array of object
+   */
+  private static byte[] toByteArray(Object obj) throws IOException {
+    ByteArrayOutputStream bos = new ByteArrayOutputStream();
+    ObjectOutputStream oos = new ObjectOutputStream(bos);
+    oos.writeObject(obj);
+    oos.flush();
+    byte[] bytes = bos.toByteArray();
+    oos.close();
+    bos.close();
+    return bytes;
   }
 
-  public void setFilterList(List<Filter> filterList) {
-    this.filterList = filterList;
+  /**
+   * Convert byte array back to Object
+   *
+   * @param bytes byte array of object
+   * @return object
+   */
+  private static Object toObject(byte[] bytes) throws IOException, ClassNotFoundException {
+    ByteArrayInputStream bis = new ByteArrayInputStream(bytes);
+    ObjectInputStream ois = new ObjectInputStream(bis);
+    Object obj = ois.readObject();
+    ois.close();
+    bis.close();
+    return obj;
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataByTimestampRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataByTimestampRequest.java
index 351e6eb..cbcef15 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataByTimestampRequest.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataByTimestampRequest.java
@@ -39,21 +39,16 @@ public class QuerySeriesDataByTimestampRequest extends BasicQueryRequest {
    */
   private List<Long> batchTimestamp;
 
-  /**
-   * Series to fetch data from remote query node
-   */
-  private List<String> fetchDataSeries;
-
   private QuerySeriesDataByTimestampRequest(String groupID) {
     super(groupID);
   }
 
-  public static QuerySeriesDataByTimestampRequest createRequest(String groupId, long queryRounds, String taskId, List<Long> batchTimestamp, List<String> fetchDataSeries){
+  public static QuerySeriesDataByTimestampRequest createRequest(String groupId, long queryRounds,
+      String taskId, List<Long> batchTimestamp) {
     QuerySeriesDataByTimestampRequest request = new QuerySeriesDataByTimestampRequest(groupId);
     request.queryRounds = queryRounds;
     request.taskId = taskId;
     request.batchTimestamp = batchTimestamp;
-    request.fetchDataSeries = fetchDataSeries;
     return request;
   }
 
@@ -80,12 +75,4 @@ public class QuerySeriesDataByTimestampRequest extends BasicQueryRequest {
   public void setBatchTimestamp(List<Long> batchTimestamp) {
     this.batchTimestamp = batchTimestamp;
   }
-
-  public List<String> getFetchDataSeries() {
-    return fetchDataSeries;
-  }
-
-  public void setFetchDataSeries(List<String> fetchDataSeries) {
-    this.fetchDataSeries = fetchDataSeries;
-  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataRequest.java
index 554b8c1..e0fc23c 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataRequest.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataRequest.java
@@ -46,9 +46,9 @@ public class QuerySeriesDataRequest extends BasicQueryRequest {
   private PathType pathType;
 
   /**
-   * Key is series type, value is series list
+   * list of series path index.
    */
-  private List<String> seriesPaths = new ArrayList<>();
+  private List<Integer> seriesPathIndexs = new ArrayList<>();
 
   private QuerySeriesDataRequest(String groupID, String taskId) {
     super(groupID);
@@ -56,10 +56,10 @@ public class QuerySeriesDataRequest extends BasicQueryRequest {
   }
 
   public static QuerySeriesDataRequest createFetchDataRequest(String groupId, String taskId,
-      PathType pathType, List<String> seriesPaths, long queryRounds) {
+      PathType pathType, List<Integer> seriesPathIndexs, long queryRounds) {
     QuerySeriesDataRequest request = new QuerySeriesDataRequest(groupId, taskId);
     request.pathType = pathType;
-    request.seriesPaths = seriesPaths;
+    request.seriesPathIndexs = seriesPathIndexs;
     request.queryRounds = queryRounds;
     return request;
   }
@@ -88,11 +88,7 @@ public class QuerySeriesDataRequest extends BasicQueryRequest {
     this.pathType = pathType;
   }
 
-  public List<String> getSeriesPaths() {
-    return seriesPaths;
-  }
-
-  public void setSeriesPaths(List<String> seriesPaths) {
-    this.seriesPaths = seriesPaths;
+  public List<Integer> getSeriesPathIndexs() {
+    return seriesPathIndexs;
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetric/QueryJobNumRequest.java
similarity index 68%
copy from cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetric/QueryJobNumRequest.java
index 9212258..61069e8 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetric/QueryJobNumRequest.java
@@ -16,27 +16,16 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.concurrent;
+package org.apache.iotdb.cluster.rpc.raft.request.querymetric;
 
-public enum ThreadName {
+import java.io.Serializable;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
 
-  /**
-   * QP Task thread
-   */
-  QP_TASK("QP-Task-Thread"),
+public class QueryJobNumRequest extends BasicRequest implements Serializable {
 
-  /**
-   * Remote query timer
-   */
-  REMOTE_QUERY_TIMER("Remote-Query-Timer");
+  private static final long serialVersionUID = 8438291845259380829L;
 
-  private String name;
-
-  ThreadName(String name) {
-    this.name = name;
-  }
-
-  public String getName() {
-    return name;
+  public QueryJobNumRequest(String groupID) {
+    super(groupID);
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetric/QueryLeaderRequest.java
similarity index 68%
copy from cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetric/QueryLeaderRequest.java
index 9212258..a3a2c06 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetric/QueryLeaderRequest.java
@@ -16,27 +16,16 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.concurrent;
+package org.apache.iotdb.cluster.rpc.raft.request.querymetric;
 
-public enum ThreadName {
+import java.io.Serializable;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
 
-  /**
-   * QP Task thread
-   */
-  QP_TASK("QP-Task-Thread"),
+public class QueryLeaderRequest extends BasicRequest implements Serializable {
 
-  /**
-   * Remote query timer
-   */
-  REMOTE_QUERY_TIMER("Remote-Query-Timer");
+  private static final long serialVersionUID = 8438291563829380829L;
 
-  private String name;
-
-  ThreadName(String name) {
-    this.name = name;
-  }
-
-  public String getName() {
-    return name;
+  public QueryLeaderRequest(String groupID) {
+    super(groupID);
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetric/QueryMetricRequest.java
similarity index 62%
copy from cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetric/QueryMetricRequest.java
index 9212258..4d0c0f6 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetric/QueryMetricRequest.java
@@ -16,27 +16,23 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.concurrent;
+package org.apache.iotdb.cluster.rpc.raft.request.querymetric;
 
-public enum ThreadName {
+import java.io.Serializable;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
 
-  /**
-   * QP Task thread
-   */
-  QP_TASK("QP-Task-Thread"),
+public class QueryMetricRequest extends BasicRequest implements Serializable {
 
-  /**
-   * Remote query timer
-   */
-  REMOTE_QUERY_TIMER("Remote-Query-Timer");
+  private static final long serialVersionUID = 8434915883945730829L;
 
-  private String name;
+  private String metric;
 
-  ThreadName(String name) {
-    this.name = name;
+  public QueryMetricRequest(String groupID, String metric) {
+    super(groupID);
+    this.metric = metric;
   }
 
-  public String getName() {
-    return name;
+  public String getMetric() {
+    return metric;
   }
-}
+}
\ No newline at end of file
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetric/QueryStatusRequest.java
similarity index 68%
copy from cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetric/QueryStatusRequest.java
index 9212258..b88b08e 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetric/QueryStatusRequest.java
@@ -16,27 +16,16 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.concurrent;
+package org.apache.iotdb.cluster.rpc.raft.request.querymetric;
 
-public enum ThreadName {
+import java.io.Serializable;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
 
-  /**
-   * QP Task thread
-   */
-  QP_TASK("QP-Task-Thread"),
+public class QueryStatusRequest extends BasicRequest implements Serializable {
 
-  /**
-   * Remote query timer
-   */
-  REMOTE_QUERY_TIMER("Remote-Query-Timer");
+  private static final long serialVersionUID = 8434915883943829829L;
 
-  private String name;
-
-  ThreadName(String name) {
-    this.name = name;
-  }
-
-  public String getName() {
-    return name;
+  public QueryStatusRequest(String groupID) {
+    super(groupID);
   }
-}
+}
\ No newline at end of file
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/exception/BufferWriteProcessorException.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/QueryMetricResponse.java
similarity index 51%
copy from iotdb/src/main/java/org/apache/iotdb/db/exception/BufferWriteProcessorException.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/QueryMetricResponse.java
index bf6a349..9c77792 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/exception/BufferWriteProcessorException.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/QueryMetricResponse.java
@@ -16,26 +16,31 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.db.exception;
+package org.apache.iotdb.cluster.rpc.raft.response;
 
-public class BufferWriteProcessorException extends ProcessorException {
+import java.util.Map;
 
-  private static final long serialVersionUID = 6817880163296469038L;
+public class QueryMetricResponse extends BasicResponse {
 
-  public BufferWriteProcessorException() {
-    super();
-  }
+  private Map<String, Long> value;
 
-  public BufferWriteProcessorException(Exception pathExcp) {
-    super(pathExcp.getMessage());
+  private QueryMetricResponse(String groupId, boolean redirected, String leaderStr,
+      String errorMsg) {
+    super(groupId, redirected, leaderStr, errorMsg);
   }
 
-  public BufferWriteProcessorException(String msg) {
-    super(msg);
+  public static QueryMetricResponse createSuccessResponse(String groupId, Map<String, Long> value) {
+    QueryMetricResponse response = new QueryMetricResponse(groupId, false, null,
+        null);
+    response.value = value;
+    return response;
   }
 
-  public BufferWriteProcessorException(Throwable throwable) {
-    super(throwable.getMessage());
+  public static QueryMetricResponse createErrorResponse(String groupId, String errorMsg) {
+    return new QueryMetricResponse(groupId, false, null, errorMsg);
   }
 
+  public Map<String, Long> getValue() {
+    return value;
+  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java
index 9d86398..e9e858d 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java
@@ -18,6 +18,8 @@
  */
 package org.apache.iotdb.cluster.rpc.raft.response.nonquery;
 
+import java.util.ArrayList;
+import java.util.List;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 
 /**
@@ -27,9 +29,12 @@ public class DataGroupNonQueryResponse extends BasicResponse {
 
   private static final long serialVersionUID = -8288044965888956717L;
 
+  private List<String> errorMsgList;
+
   private DataGroupNonQueryResponse(String groupId, boolean redirected, String leaderStr,
       String errorMsg) {
     super(groupId, redirected, leaderStr, errorMsg);
+    errorMsgList = new ArrayList<>();
   }
 
   public static DataGroupNonQueryResponse createRedirectedResponse(String groupId, String leaderStr) {
@@ -44,4 +49,11 @@ public class DataGroupNonQueryResponse extends BasicResponse {
     return new DataGroupNonQueryResponse(groupId, false, null, errorMsg);
   }
 
+  public List<String> getErrorMsgList() {
+    return errorMsgList;
+  }
+
+  public void addErrorMsg(String errorMsg) {
+    this.errorMsgList.add(errorMsg);
+  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetric/QueryJobNumResponse.java
similarity index 54%
copy from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetric/QueryJobNumResponse.java
index 9d86398..c681390 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetric/QueryJobNumResponse.java
@@ -16,32 +16,32 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.response.nonquery;
+package org.apache.iotdb.cluster.rpc.raft.response.querymetric;
 
+import java.util.Map;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 
-/**
- * Handle response from data group leader
- */
-public class DataGroupNonQueryResponse extends BasicResponse {
+public class QueryJobNumResponse extends BasicResponse {
 
-  private static final long serialVersionUID = -8288044965888956717L;
+  private Map<String, Integer> value;
 
-  private DataGroupNonQueryResponse(String groupId, boolean redirected, String leaderStr,
+  private QueryJobNumResponse(String groupId, boolean redirected, String leaderStr,
       String errorMsg) {
     super(groupId, redirected, leaderStr, errorMsg);
   }
 
-  public static DataGroupNonQueryResponse createRedirectedResponse(String groupId, String leaderStr) {
-    return new DataGroupNonQueryResponse(groupId, true, leaderStr, null);
+  public static QueryJobNumResponse createSuccessResponse(String groupId, Map<String, Integer> value) {
+    QueryJobNumResponse response = new QueryJobNumResponse(groupId, false, null,
+        null);
+    response.value = value;
+    return response;
   }
 
-  public static DataGroupNonQueryResponse createEmptyResponse(String groupId) {
-    return new DataGroupNonQueryResponse(groupId, false, null, null);
+  public static QueryJobNumResponse createErrorResponse(String groupId, String errorMsg) {
+    return new QueryJobNumResponse(groupId, false, null, errorMsg);
   }
 
-  public static DataGroupNonQueryResponse createErrorResponse(String groupId, String errorMsg) {
-    return new DataGroupNonQueryResponse(groupId, false, null, errorMsg);
+  public Map<String, Integer> getValue() {
+    return value;
   }
-
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetric/QueryLeaderResponse.java
similarity index 54%
copy from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetric/QueryLeaderResponse.java
index 9d86398..ad536aa 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetric/QueryLeaderResponse.java
@@ -16,32 +16,32 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.response.nonquery;
+package org.apache.iotdb.cluster.rpc.raft.response.querymetric;
 
+import com.alipay.sofa.jraft.entity.PeerId;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 
-/**
- * Handle response from data group leader
- */
-public class DataGroupNonQueryResponse extends BasicResponse {
+public class QueryLeaderResponse extends BasicResponse {
 
-  private static final long serialVersionUID = -8288044965888956717L;
+  private PeerId leader;
 
-  private DataGroupNonQueryResponse(String groupId, boolean redirected, String leaderStr,
+  private QueryLeaderResponse(String groupId, boolean redirected, String leaderStr,
       String errorMsg) {
     super(groupId, redirected, leaderStr, errorMsg);
   }
 
-  public static DataGroupNonQueryResponse createRedirectedResponse(String groupId, String leaderStr) {
-    return new DataGroupNonQueryResponse(groupId, true, leaderStr, null);
+  public static QueryLeaderResponse createSuccessResponse(String groupId, PeerId leader) {
+    QueryLeaderResponse response = new QueryLeaderResponse(groupId, false, null,
+        null);
+    response.leader = leader;
+    return response;
   }
 
-  public static DataGroupNonQueryResponse createEmptyResponse(String groupId) {
-    return new DataGroupNonQueryResponse(groupId, false, null, null);
+  public static QueryLeaderResponse createErrorResponse(String groupId, String errorMsg) {
+    return new QueryLeaderResponse(groupId, false, null, errorMsg);
   }
 
-  public static DataGroupNonQueryResponse createErrorResponse(String groupId, String errorMsg) {
-    return new DataGroupNonQueryResponse(groupId, false, null, errorMsg);
+  public PeerId getLeader() {
+    return leader;
   }
-
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetric/QueryMetricResponse.java
similarity index 54%
copy from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetric/QueryMetricResponse.java
index 9d86398..2f847db 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetric/QueryMetricResponse.java
@@ -16,32 +16,32 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.response.nonquery;
+package org.apache.iotdb.cluster.rpc.raft.response.querymetric;
 
+import java.util.Map;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 
-/**
- * Handle response from data group leader
- */
-public class DataGroupNonQueryResponse extends BasicResponse {
+public class QueryMetricResponse extends BasicResponse {
 
-  private static final long serialVersionUID = -8288044965888956717L;
+  private Map<String, Long> value;
 
-  private DataGroupNonQueryResponse(String groupId, boolean redirected, String leaderStr,
+  private QueryMetricResponse(String groupId, boolean redirected, String leaderStr,
       String errorMsg) {
     super(groupId, redirected, leaderStr, errorMsg);
   }
 
-  public static DataGroupNonQueryResponse createRedirectedResponse(String groupId, String leaderStr) {
-    return new DataGroupNonQueryResponse(groupId, true, leaderStr, null);
+  public static QueryMetricResponse createSuccessResponse(String groupId, Map<String, Long> value) {
+    QueryMetricResponse response = new QueryMetricResponse(groupId, false, null,
+        null);
+    response.value = value;
+    return response;
   }
 
-  public static DataGroupNonQueryResponse createEmptyResponse(String groupId) {
-    return new DataGroupNonQueryResponse(groupId, false, null, null);
+  public static QueryMetricResponse createErrorResponse(String groupId, String errorMsg) {
+    return new QueryMetricResponse(groupId, false, null, errorMsg);
   }
 
-  public static DataGroupNonQueryResponse createErrorResponse(String groupId, String errorMsg) {
-    return new DataGroupNonQueryResponse(groupId, false, null, errorMsg);
+  public Map<String, Long> getValue() {
+    return value;
   }
-
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/QueryTask.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetric/QueryStatusResponse.java
similarity index 54%
rename from cluster/src/main/java/org/apache/iotdb/cluster/qp/task/QueryTask.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetric/QueryStatusResponse.java
index f4cb4b5..2044f5e 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/QueryTask.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetric/QueryStatusResponse.java
@@ -16,34 +16,31 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.qp.task;
+package org.apache.iotdb.cluster.rpc.raft.response.querymetric;
 
-import org.apache.iotdb.cluster.qp.task.QPTask.TaskState;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 
-public class QueryTask {
-  private BasicResponse basicResponse;
-  private TaskState state;
+public class QueryStatusResponse extends BasicResponse {
 
-  public QueryTask(BasicResponse basicResponse,
-      TaskState state) {
-    this.basicResponse = basicResponse;
-    this.state = state;
-  }
+  private boolean status;
 
-  public BasicResponse getBasicResponse() {
-    return basicResponse;
+  private QueryStatusResponse(String groupId, boolean redirected, String leaderStr,
+      String errorMsg) {
+    super(groupId, redirected, leaderStr, errorMsg);
   }
 
-  public void setBasicResponse(BasicResponse basicResponse) {
-    this.basicResponse = basicResponse;
+  public static QueryStatusResponse createSuccessResponse(String groupId, boolean status) {
+    QueryStatusResponse response = new QueryStatusResponse(groupId, false, null,
+        null);
+    response.status = status;
+    return response;
   }
 
-  public TaskState getState() {
-    return state;
+  public static QueryStatusResponse createErrorResponse(String groupId, String errorMsg) {
+    return new QueryStatusResponse(groupId, false, null, errorMsg);
   }
 
-  public void setState(TaskState state) {
-    this.state = state;
+  public boolean getStatus() {
+    return status;
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/service/ClusterMonitor.java b/cluster/src/main/java/org/apache/iotdb/cluster/service/ClusterMonitor.java
new file mode 100644
index 0000000..01fe095
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/service/ClusterMonitor.java
@@ -0,0 +1,124 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.service;
+
+import com.alipay.sofa.jraft.entity.PeerId;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import org.apache.iotdb.cluster.utils.RaftUtils;
+import org.apache.iotdb.db.exception.StartupException;
+import org.apache.iotdb.db.service.IService;
+import org.apache.iotdb.db.service.JMXService;
+import org.apache.iotdb.db.service.ServiceType;
+
+public class ClusterMonitor implements ClusterMonitorMBean, IService {
+
+  /**
+   * Original format = String.format("%s:%s=%s",
+   * IoTDBConstant.IOTDB_PACKAGE, IoTDBConstant.JMX_TYPE, getID().getJmxName()
+   */
+  public static final String MBEAN_NAME = "org.apache.iotdb.service:type=Cluster Monitor";
+
+  public static final ClusterMonitor INSTANCE = new ClusterMonitor();
+
+  public String getMbeanName() {
+    return MBEAN_NAME;
+  }
+
+  @Override
+  public void start() throws StartupException {
+    try {
+      JMXService.registerMBean(INSTANCE, MBEAN_NAME);
+    } catch (Exception e) {
+      String errorMessage = String
+          .format("Failed to start %s because of %s", this.getID().getName(),
+              e.getMessage());
+      throw new StartupException(errorMessage);
+    }
+  }
+
+  @Override
+  public void stop() {
+    JMXService.deregisterMBean(MBEAN_NAME);
+  }
+
+  @Override
+  public ServiceType getID() {
+    return ServiceType.CLUSTER_MONITOR_SERVICE;
+  }
+
+  @Override
+  public Map<Integer, String> getPhysicalRing() {
+    return RaftUtils.getPhysicalRing();
+  }
+
+  @Override
+  public Map<Integer, String> getVirtualRing() {
+    return RaftUtils.getVirtualRing();
+  }
+
+  @Override
+  public Map<String, String> getAllLeaders() {
+    Map<String, String> map = new HashMap<>();
+    RaftUtils.getGroupLeaderCache().entrySet().forEach(entry -> map.put(entry.getKey(), entry.getValue().toString()));
+    return map;
+  }
+
+  @Override
+  public String getDataPartitionOfSG(String sg) {
+    PeerId[] nodes = RaftUtils.getDataPartitionOfSG(sg);
+    StringBuilder builder = new StringBuilder();
+    builder.append(nodes[0].getIp()).append(" (leader)");
+    for (int i = 1; i < nodes.length; i++) {
+      builder.append(", ").append(nodes[i].getIp());
+    }
+    return builder.toString();
+  }
+
+  @Override
+  public Set<String> getAllStorageGroupsLocally() {
+    return RaftUtils.getAllStorageGroupsLocally();
+  }
+
+  @Override
+  public Map<String[], String[]> getDataPartitonOfNode(String ip) {
+    return RaftUtils.getDataPartitionOfNode(ip);
+  }
+
+  @Override
+  public Map<String[], String[]> getDataPartitonOfNode(String ip, int port) {
+    return RaftUtils.getDataPartitionOfNode(ip, port);
+  }
+
+  @Override
+  public Map<String, Map<String, Long>> getReplicaLagMap() {
+    return RaftUtils.getReplicaLagMap();
+  }
+
+  @Override
+  public Map<String, Map<String, Integer>> getQueryJobNumMap() {
+    return RaftUtils.getQueryJobNumMapForCluster();
+  }
+
+  @Override
+  public Map<String, Boolean> getStatusMap() {
+    return RaftUtils.getStatusMapForCluster();
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/service/ClusterMonitorMBean.java b/cluster/src/main/java/org/apache/iotdb/cluster/service/ClusterMonitorMBean.java
new file mode 100644
index 0000000..cca0820
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/service/ClusterMonitorMBean.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.service;
+
+import java.util.Map;
+import java.util.Set;
+
+public interface ClusterMonitorMBean {
+
+  /**
+   * Get physical hash ring
+   *
+   * @return key: hash value, value: node ip
+   */
+  Map<Integer, String> getPhysicalRing();
+
+  /**
+   * Get virtual hash ring
+   *
+   * @return key: hash value, value: node ip
+   */
+  Map<Integer, String> getVirtualRing();
+
+  /**
+   * Get currents leaders of each data partition
+   *
+   * @return key: group id, value: leader node ip
+   */
+  Map<String, String> getAllLeaders();
+
+  /**
+   * Get data partition information of input storage group in String format. The node ips are split
+   * by ',', and the first ip is the currnt leader.
+   *
+   * @param sg input storage group path
+   * @return data partition information in String format
+   */
+  String getDataPartitionOfSG(String sg);
+
+  /**
+   * Get all storage groups
+   *
+   * @return Set of all storage groups
+   */
+  Set<String> getAllStorageGroupsLocally();
+
+  /**
+   * Get data partitions that input node belongs to.
+   *
+   * @param ip node ip
+   * @param port node rpc port
+   * @return key: node ips of one data partition, value: storage group paths that belong to this
+   * data partition
+   */
+  Map<String[], String[]> getDataPartitonOfNode(String ip, int port);
+  Map<String[], String[]> getDataPartitonOfNode(String ip);
+
+  /**
+   * Get replica lag for metadata group and each data partition
+   *
+   * @return key: groupId, value: ip -> replica lag
+   */
+  Map<String, Map<String, Long>> getReplicaLagMap();
+
+  /**
+   * Get number of query jobs on each data partition for all nodes
+   *
+   * @return outer key: ip, inner key: groupId, value: number of query jobs
+   */
+  Map<String, Map<String, Integer>> getQueryJobNumMap();
+
+  /**
+   * Get status of all nodes
+   *
+   * @return key: node ip, value: live or not
+   */
+  Map<String, Boolean> getStatusMap();
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/service/TSServiceClusterImpl.java b/cluster/src/main/java/org/apache/iotdb/cluster/service/TSServiceClusterImpl.java
index bfc74c1..505afd9 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/service/TSServiceClusterImpl.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/service/TSServiceClusterImpl.java
@@ -27,6 +27,7 @@ import java.util.Map;
 import java.util.Set;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
+import org.apache.iotdb.cluster.config.ClusterConsistencyLevel;
 import org.apache.iotdb.cluster.config.ClusterConstant;
 import org.apache.iotdb.cluster.exception.ConsistencyLevelException;
 import org.apache.iotdb.cluster.qp.executor.ClusterQueryProcessExecutor;
@@ -66,9 +67,10 @@ public class TSServiceClusterImpl extends TSServiceImpl {
 
   private static final Logger LOGGER = LoggerFactory.getLogger(TSServiceClusterImpl.class);
 
-  private ClusterQueryProcessExecutor queryDataExecutor = new ClusterQueryProcessExecutor();
-  private NonQueryExecutor nonQueryExecutor = new NonQueryExecutor();
   private QueryMetadataExecutor queryMetadataExecutor = new QueryMetadataExecutor();
+  private ClusterQueryProcessExecutor queryDataExecutor = new ClusterQueryProcessExecutor(
+      queryMetadataExecutor);
+  private NonQueryExecutor nonQueryExecutor = new NonQueryExecutor();
 
   private IClusterRpcQueryManager queryManager = ClusterRpcQueryManager.getInstance();
 
@@ -102,7 +104,8 @@ public class TSServiceClusterImpl extends TSServiceImpl {
   }
 
   @Override
-  protected TSDataType getSeriesType(String path) throws PathErrorException, InterruptedException, ProcessorException {
+  protected TSDataType getSeriesType(String path)
+      throws PathErrorException, InterruptedException, ProcessorException {
     return queryMetadataExecutor.processSeriesTypeQuery(path);
   }
 
@@ -123,17 +126,17 @@ public class TSServiceClusterImpl extends TSServiceImpl {
       List<String> statements = req.getStatements();
       PhysicalPlan[] physicalPlans = new PhysicalPlan[statements.size()];
       int[] result = new int[statements.size()];
-      String batchErrorMessage = "";
+      StringBuilder batchErrorMessage = new StringBuilder();
       boolean isAllSuccessful = true;
 
-      /** find all valid physical plans **/
+      /* find all valid physical plans */
       for (int i = 0; i < statements.size(); i++) {
         try {
           PhysicalPlan plan = processor
               .parseSQLToPhysicalPlan(statements.get(i), zoneIds.get());
           plan.setProposer(username.get());
 
-          /** if meet a query, handle all requests before the query request. **/
+          /* if meet a query, handle all requests before the query request. */
           if (plan.isQuery()) {
             int[] resultTemp = new int[i];
             PhysicalPlan[] physicalPlansTemp = new PhysicalPlan[i];
@@ -143,8 +146,11 @@ public class TSServiceClusterImpl extends TSServiceImpl {
             physicalPlans = physicalPlansTemp;
             BatchResult batchResult = new BatchResult(isAllSuccessful, batchErrorMessage, result);
             nonQueryExecutor.processBatch(physicalPlans, batchResult);
+            batchErrorMessage.append(String
+                .format(ERROR_MESSAGE_FORMAT_IN_BATCH, i,
+                    "statement is query :" + statements.get(i)));
             return getTSBathExecuteStatementResp(TS_StatusCode.ERROR_STATUS,
-                "statement is query :" + statements.get(i), Arrays.stream(result).boxed().collect(
+                statements.get(i), Arrays.stream(result).boxed().collect(
                     Collectors.toList()));
           }
 
@@ -155,7 +161,7 @@ public class TSServiceClusterImpl extends TSServiceImpl {
                 plan.getOperatorType());
             result[i] = Statement.EXECUTE_FAILED;
             isAllSuccessful = false;
-            batchErrorMessage = errMessage;
+            batchErrorMessage.append(String.format(ERROR_MESSAGE_FORMAT_IN_BATCH, i, errMessage));
           } else {
             physicalPlans[i] = plan;
           }
@@ -165,19 +171,19 @@ public class TSServiceClusterImpl extends TSServiceImpl {
               e.getMessage());
           result[i] = Statement.EXECUTE_FAILED;
           isAllSuccessful = false;
-          batchErrorMessage = errMessage;
+          batchErrorMessage.append(String.format(ERROR_MESSAGE_FORMAT_IN_BATCH, i, errMessage));
         } catch (Exception e) {
           String errMessage = String.format("Fail to generate physcial plan" + "%s beacuse %s",
               statements.get(i), e.getMessage());
           result[i] = Statement.EXECUTE_FAILED;
           isAllSuccessful = false;
-          batchErrorMessage = errMessage;
+          batchErrorMessage.append(String.format(ERROR_MESSAGE_FORMAT_IN_BATCH, i, errMessage));
         }
       }
 
       BatchResult batchResult = new BatchResult(isAllSuccessful, batchErrorMessage, result);
       nonQueryExecutor.processBatch(physicalPlans, batchResult);
-      batchErrorMessage = batchResult.batchErrorMessage;
+      batchErrorMessage.append(batchResult.batchErrorMessage);
       isAllSuccessful = batchResult.isAllSuccessful;
 
       if (isAllSuccessful) {
@@ -185,7 +191,8 @@ public class TSServiceClusterImpl extends TSServiceImpl {
             "Execute batch statements successfully", Arrays.stream(result).boxed().collect(
                 Collectors.toList()));
       } else {
-        return getTSBathExecuteStatementResp(TS_StatusCode.ERROR_STATUS, batchErrorMessage,
+        return getTSBathExecuteStatementResp(TS_StatusCode.ERROR_STATUS,
+            batchErrorMessage.toString(),
             Arrays.stream(result).boxed().collect(
                 Collectors.toList()));
       }
@@ -198,16 +205,17 @@ public class TSServiceClusterImpl extends TSServiceImpl {
   /**
    * Present batch results.
    */
-  public class BatchResult {
+  public static class BatchResult {
 
     private boolean isAllSuccessful;
-    private String batchErrorMessage;
-    private int[] result;
+    private StringBuilder batchErrorMessage;
+    private int[] resultArray;
 
-    private BatchResult(boolean isAllSuccessful, String batchErrorMessage, int[] result) {
+    public BatchResult(boolean isAllSuccessful, StringBuilder batchErrorMessage,
+        int[] resultArray) {
       this.isAllSuccessful = isAllSuccessful;
       this.batchErrorMessage = batchErrorMessage;
-      this.result = result;
+      this.resultArray = resultArray;
     }
 
     public boolean isAllSuccessful() {
@@ -218,20 +226,21 @@ public class TSServiceClusterImpl extends TSServiceImpl {
       isAllSuccessful = allSuccessful;
     }
 
-    public String getBatchErrorMessage() {
+    public StringBuilder getBatchErrorMessage() {
       return batchErrorMessage;
     }
 
-    public void setBatchErrorMessage(String batchErrorMessage) {
-      this.batchErrorMessage = batchErrorMessage;
+    public void addBatchErrorMessage(int index, String batchErrorMessage) {
+      this.batchErrorMessage
+          .append(String.format(ERROR_MESSAGE_FORMAT_IN_BATCH, index, batchErrorMessage));
     }
 
-    public int[] getResult() {
-      return result;
+    public int[] getResultArray() {
+      return resultArray;
     }
 
-    public void setResult(int[] result) {
-      this.result = result;
+    public void setResultArray(int[] resultArray) {
+      this.resultArray = resultArray;
     }
   }
 
@@ -243,38 +252,44 @@ public class TSServiceClusterImpl extends TSServiceImpl {
     statement = statement.toLowerCase().trim();
     try {
       if (Pattern.matches(ClusterConstant.SET_READ_METADATA_CONSISTENCY_LEVEL_PATTERN, statement)) {
-        String[] splits = statement.split("\\s+");
-        int level = Integer.parseInt(splits[splits.length - 1]);
+        int level = parseConsistencyLevel(statement);
         queryMetadataExecutor.setReadMetadataConsistencyLevel(level);
         return true;
       } else if (Pattern
           .matches(ClusterConstant.SET_READ_DATA_CONSISTENCY_LEVEL_PATTERN, statement)) {
-        String[] splits = statement.split("\\s+");
-        int level = Integer.parseInt(splits[splits.length - 1]);
+        int level = parseConsistencyLevel(statement);
         queryDataExecutor.setReadDataConsistencyLevel(level);
         return true;
       } else {
         return false;
       }
-    } catch (ConsistencyLevelException e){
+    } catch (ConsistencyLevelException e) {
       throw new Exception(e.getMessage());
     }
   }
 
+  private int parseConsistencyLevel(String statement) throws ConsistencyLevelException {
+    String[] splits = statement.split("\\s+");
+    String levelName = splits[splits.length - 1].toLowerCase();
+    int level = ClusterConsistencyLevel.getLevel(levelName);
+    if (level == ClusterConsistencyLevel.UNSUPPORT_LEVEL) {
+      throw new ConsistencyLevelException(String.format("Consistency level %s not support", levelName));
+    }
+    return level;
+  }
+
   @Override
   protected boolean executeNonQuery(PhysicalPlan plan) throws ProcessorException {
     return nonQueryExecutor.processNonQuery(plan);
   }
 
-  /**
-   * It's unnecessary to do this check. It has benn checked in transforming query physical plan.
-   */
   @Override
-  public void checkFileLevelSet(List<Path> paths) throws PathErrorException {
+  protected void checkFileLevelSet(List<Path> paths) throws PathErrorException {
+    //It's unnecessary to do this check. It has benn checked in transforming query physical plan.
   }
 
   @Override
-  public void releaseQueryResource(TSCloseOperationReq req) throws Exception {
+  protected void releaseQueryResource(TSCloseOperationReq req) throws Exception {
     Map<Long, QueryContext> contextMap = contextMapLocal.get();
     if (contextMap == null) {
       return;
@@ -294,7 +309,7 @@ public class TSServiceClusterImpl extends TSServiceImpl {
   }
 
   @Override
-  public QueryDataSet createNewDataSet(String statement, int fetchSize, TSFetchResultsReq req)
+  protected QueryDataSet createNewDataSet(String statement, int fetchSize, TSFetchResultsReq req)
       throws PathErrorException, QueryFilterOptimizationException, FileNodeManagerException,
       ProcessorException, IOException {
     PhysicalPlan physicalPlan = queryStatus.get().get(statement);
@@ -306,15 +321,26 @@ public class TSServiceClusterImpl extends TSServiceImpl {
     contextMapLocal.get().put(req.queryId, context);
 
     queryManager.addSingleQuery(jobId, (QueryPlan) physicalPlan);
-    QueryDataSet queryDataSet = processor.getExecutor().processQuery((QueryPlan) physicalPlan,
+    QueryDataSet queryDataSet = processor.getExecutor().processQuery(physicalPlan,
         context);
-    queryRet.get().put(statement, queryDataSet);
+    try {
+      queryRet.get().put(statement, queryDataSet);
+    }catch (Exception e){
+      e.printStackTrace();
+    }
     return queryDataSet;
   }
+
+  @Override
+  public void handleClientExit() throws TException {
+    closeClusterService();
+    closeOperation(null);
+    closeSession(null);
+  }
+
   /**
    * Close cluster service
    */
-  @Override
   public void closeClusterService() {
     nonQueryExecutor.shutdown();
     queryMetadataExecutor.shutdown();
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Host.java b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Host.java
new file mode 100644
index 0000000..21b02be
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Host.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.service.nodetool;
+
+import io.airlift.airline.Command;
+import io.airlift.airline.Option;
+import java.util.Map;
+import java.util.Map.Entry;
+import org.apache.iotdb.cluster.service.nodetool.NodeTool.NodeToolCmd;
+import org.apache.iotdb.cluster.service.ClusterMonitorMBean;
+
+@Command(name = "host", description = "Print all data partitions information which specific host belongs to")
+public class Host extends NodeToolCmd {
+
+  private static final int DEFAULT_PORT = -1;
+
+  @Option(title = "ip", name = {"-i", "--ip"}, description = "Specify the host ip for accurate hosts information")
+  private String ip = "127.0.0.1";
+
+  @Option(title = "port", name = {"-p", "--port"}, description = "Specify the host port for accurate hosts information")
+  private int port = DEFAULT_PORT;
+
+  @Option(title = "sg_detail", name = {"-d", "--detail"}, description = "Show path of storage groups")
+  private boolean detail = false;
+
+  @Override
+  public void execute(ClusterMonitorMBean proxy) {
+    Map<String[], String[]> map;
+    if (port == DEFAULT_PORT) {
+      map = proxy.getDataPartitonOfNode(ip);
+    } else {
+      map = proxy.getDataPartitonOfNode(ip, port);
+    }
+
+    if (map == null) {
+      System.out.println("Can't find the input IP.");
+      return;
+    }
+
+    for (Entry<String[], String[]> entry : map.entrySet()) {
+      StringBuilder builder = new StringBuilder();
+      String[] ips = entry.getKey();
+      String[] sgs = entry.getValue();
+      builder.append('(');
+      for (int i = 0; i < ips.length; i++) {
+        builder.append(ips[i]).append(", ");
+      }
+      builder.delete(builder.length() - 2, builder.length());
+      builder.append(')');
+
+      builder.append("\t->\t");
+      if (detail) {
+        builder.append('(');
+        for (int i = 0; i < sgs.length; i++) {
+          builder.append(sgs[i]).append(", ");
+        }
+        if (sgs.length > 0) {
+          builder.delete(builder.length() - 2, builder.length());
+        }
+        builder.append(')');
+      } else {
+        builder.append(sgs.length);
+      }
+
+      System.out.println(builder.toString());
+    }
+  }
+}
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/query/aggregation/impl/SumAggrFunc.java b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Lag.java
similarity index 50%
copy from iotdb/src/main/java/org/apache/iotdb/db/query/aggregation/impl/SumAggrFunc.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Lag.java
index 3bb8d7d..1cc852b 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/aggregation/impl/SumAggrFunc.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Lag.java
@@ -16,22 +16,27 @@
  * specific language governing permissions and limitations
  * under the License.
  */
+package org.apache.iotdb.cluster.service.nodetool;
 
-package org.apache.iotdb.db.query.aggregation.impl;
+import io.airlift.airline.Command;
+import java.util.Map;
+import java.util.Map.Entry;
+import org.apache.iotdb.cluster.service.nodetool.NodeTool.NodeToolCmd;
+import org.apache.iotdb.cluster.service.ClusterMonitorMBean;
 
-import org.apache.iotdb.db.query.aggregation.AggreResultData;
-import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
-
-public class SumAggrFunc extends MeanAggrFunc {
-
-  public SumAggrFunc(TSDataType seriesDataType) {
-    super(seriesDataType);
-  }
+@Command(name = "lag", description = "Print log lag for all groups of connected host")
+public class Lag extends NodeToolCmd {
 
   @Override
-  public AggreResultData getResult() {
-    resultData.setDoubleRet(sum);
-    resultData.setTimestamp(0);
-    return resultData;
+  public void execute(ClusterMonitorMBean proxy)
+  {
+    Map<String, Map<String, Long>> groupMap = proxy.getReplicaLagMap();
+    for (Entry<String, Map<String, Long>> entry : groupMap.entrySet()) {
+      if (entry.getValue() == null) {
+        continue;
+      }
+      System.out.println(entry.getKey() + ":");
+      entry.getValue().forEach((node, lag) -> System.out.println("\t" + node + "\t->\t" + lag));
+    }
   }
-}
+}
\ No newline at end of file
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/NodeTool.java b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/NodeTool.java
new file mode 100644
index 0000000..9d464b3
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/NodeTool.java
@@ -0,0 +1,148 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.service.nodetool;
+
+import static java.lang.String.format;
+
+import com.google.common.base.Throwables;
+import com.google.common.collect.Lists;
+import io.airlift.airline.Cli;
+import io.airlift.airline.Help;
+import io.airlift.airline.Option;
+import io.airlift.airline.OptionType;
+import io.airlift.airline.ParseArgumentsMissingException;
+import io.airlift.airline.ParseArgumentsUnexpectedException;
+import io.airlift.airline.ParseCommandMissingException;
+import io.airlift.airline.ParseCommandUnrecognizedException;
+import io.airlift.airline.ParseOptionConversionException;
+import io.airlift.airline.ParseOptionMissingException;
+import io.airlift.airline.ParseOptionMissingValueException;
+import java.io.IOException;
+import java.util.List;
+import javax.management.JMX;
+import javax.management.MBeanServerConnection;
+import javax.management.MalformedObjectNameException;
+import javax.management.ObjectName;
+import javax.management.remote.JMXConnector;
+import javax.management.remote.JMXConnectorFactory;
+import javax.management.remote.JMXServiceURL;
+import org.apache.iotdb.cluster.service.ClusterMonitor;
+import org.apache.iotdb.cluster.service.ClusterMonitorMBean;
+
+public class NodeTool {
+
+  public static void main(String... args) {
+    List<Class<? extends Runnable>> commands = Lists.newArrayList(
+        Help.class,
+        Ring.class,
+        StorageGroup.class,
+        Host.class,
+        Lag.class,
+        Query.class,
+        Status.class
+    );
+
+    Cli.CliBuilder<Runnable> builder = Cli.builder("nodetool");
+
+    builder.withDescription("Manage your IoTDB cluster")
+        .withDefaultCommand(Help.class)
+        .withCommands(commands);
+
+    Cli<Runnable> parser = builder.build();
+
+    int status = 0;
+    try {
+      Runnable parse = parser.parse(args);
+      parse.run();
+    } catch (IllegalArgumentException |
+        IllegalStateException |
+        ParseArgumentsMissingException |
+        ParseArgumentsUnexpectedException |
+        ParseOptionConversionException |
+        ParseOptionMissingException |
+        ParseOptionMissingValueException |
+        ParseCommandMissingException |
+        ParseCommandUnrecognizedException e) {
+      badUse(e);
+      status = 1;
+    } catch (Exception e) {
+      err(Throwables.getRootCause(e));
+      status = 2;
+    }
+
+    System.exit(status);
+  }
+
+  private static void badUse(Exception e) {
+    System.out.println("nodetool: " + e.getMessage());
+    System.out.println("See 'nodetool help' or 'nodetool help <command>'.");
+  }
+
+  private static void err(Throwable e) {
+    System.err.println("error: " + e.getMessage());
+    System.err.println("-- StackTrace --");
+    System.err.println(Throwables.getStackTraceAsString(e));
+  }
+
+  public abstract static class NodeToolCmd implements Runnable {
+
+    @Option(type = OptionType.GLOBAL, name = {"-h",
+        "--host"}, description = "Node hostname or ip address")
+    private String host = "127.0.0.1";
+
+    @Option(type = OptionType.GLOBAL, name = {"-p",
+        "--port"}, description = "Remote jmx agent port number")
+    private String port = "31999";
+
+    private static final String JMX_URL_FORMAT = "service:jmx:rmi:///jndi/rmi://%s:%s/jmxrmi";
+
+    @Override
+    public void run() {
+      try {
+        MBeanServerConnection mbsc = connect();
+        ObjectName name = new ObjectName(ClusterMonitor.MBEAN_NAME);
+        ClusterMonitorMBean clusterMonitorProxy = JMX
+            .newMBeanProxy(mbsc, name, ClusterMonitorMBean.class);
+        execute(clusterMonitorProxy);
+      } catch (MalformedObjectNameException e) {
+        e.printStackTrace();
+      }
+    }
+
+    protected abstract void execute(ClusterMonitorMBean probe);
+
+    private MBeanServerConnection connect() {
+      MBeanServerConnection mbsc = null;
+
+      try {
+        String jmxURL = String.format(JMX_URL_FORMAT, host, port);
+        JMXServiceURL serviceURL = new JMXServiceURL(jmxURL);
+        JMXConnector connector = JMXConnectorFactory.connect(serviceURL);
+        mbsc = connector.getMBeanServerConnection();
+      } catch (IOException e) {
+        Throwable rootCause = Throwables.getRootCause(e);
+        System.err.println(format("nodetool: Failed to connect to '%s:%s' - %s: '%s'.", host, port,
+            rootCause.getClass().getSimpleName(), rootCause.getMessage()));
+        System.exit(1);
+      }
+
+      return mbsc;
+    }
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Query.java b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Query.java
new file mode 100644
index 0000000..d0682fd
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Query.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.service.nodetool;
+
+import io.airlift.airline.Command;
+import java.util.Map;
+import org.apache.iotdb.cluster.service.nodetool.NodeTool.NodeToolCmd;
+import org.apache.iotdb.cluster.service.ClusterMonitorMBean;
+
+@Command(name = "query", description = "Print number of query jobs for all data partitions for all hosts")
+public class Query extends NodeToolCmd {
+
+  @Override
+  public void execute(ClusterMonitorMBean proxy)
+  {
+    Map<String, Map<String, Integer>> queryNumMap = proxy.getQueryJobNumMap();
+    queryNumMap.forEach((ip, map) -> {
+      System.out.println(ip + ":");
+      if (map != null) {
+        map.forEach((groupId, num) -> System.out.println("\t" + groupId + "\t->\t" + num));
+      }
+    });
+    final int[] sum = {0};
+    queryNumMap.forEach((ip, map) -> {
+      if (map != null) {
+        map.forEach((groupId, num) -> sum[0] += num);
+      }
+    });
+    System.out.println("Total\t->\t" + sum[0]);
+  }
+}
\ No newline at end of file
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/query/aggregation/impl/SumAggrFunc.java b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Ring.java
similarity index 51%
copy from iotdb/src/main/java/org/apache/iotdb/db/query/aggregation/impl/SumAggrFunc.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Ring.java
index 3bb8d7d..8dd96a0 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/aggregation/impl/SumAggrFunc.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Ring.java
@@ -16,22 +16,24 @@
  * specific language governing permissions and limitations
  * under the License.
  */
+package org.apache.iotdb.cluster.service.nodetool;
 
-package org.apache.iotdb.db.query.aggregation.impl;
+import io.airlift.airline.Command;
+import io.airlift.airline.Option;
+import java.util.Map;
+import org.apache.iotdb.cluster.service.ClusterMonitorMBean;
+import org.apache.iotdb.cluster.service.nodetool.NodeTool.NodeToolCmd;
 
-import org.apache.iotdb.db.query.aggregation.AggreResultData;
-import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+@Command(name = "ring", description = "Print information about the hash ring")
+public class Ring extends NodeToolCmd {
 
-public class SumAggrFunc extends MeanAggrFunc {
-
-  public SumAggrFunc(TSDataType seriesDataType) {
-    super(seriesDataType);
-  }
+  @Option(title = "physical_ring", name = {"-p", "--physical"}, description = "Show physical nodes instead of virtual ones")
+  private boolean physical = false;
 
   @Override
-  public AggreResultData getResult() {
-    resultData.setDoubleRet(sum);
-    resultData.setTimestamp(0);
-    return resultData;
+  public void execute(ClusterMonitorMBean proxy)
+  {
+    Map<Integer, String> map = physical ? proxy.getPhysicalRing() : proxy.getVirtualRing();
+    map.forEach((hash, ip) -> System.out.println(hash + "\t->\t" + ip));
   }
-}
+}
\ No newline at end of file
diff --git a/spark/src/main/java/org/apache/iotdb/tsfile/qp/common/Operator.java b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Status.java
old mode 100755
new mode 100644
similarity index 59%
copy from spark/src/main/java/org/apache/iotdb/tsfile/qp/common/Operator.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Status.java
index 684e0e5..302a1c7
--- a/spark/src/main/java/org/apache/iotdb/tsfile/qp/common/Operator.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Status.java
@@ -16,32 +16,20 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.tsfile.qp.common;
+package org.apache.iotdb.cluster.service.nodetool;
 
-/**
- * This class is a superclass of all operator. 
- *
- */
-public abstract class Operator {
+import io.airlift.airline.Command;
+import java.util.Map;
+import org.apache.iotdb.cluster.service.nodetool.NodeTool.NodeToolCmd;
+import org.apache.iotdb.cluster.service.ClusterMonitorMBean;
 
-  int tokenIntType;
-  String tokenSymbol;
-
-  Operator(int tokenIntType) {
-    this.tokenIntType = tokenIntType;
-    this.tokenSymbol = SQLConstant.tokenSymbol.get(tokenIntType);
-  }
-
-  public int getTokenIntType() {
-    return tokenIntType;
-  }
-
-  public String getTokenSymbol() {
-    return tokenSymbol;
-  }
+@Command(name = "status", description = "Print status of all hosts")
+public class Status extends NodeToolCmd {
 
   @Override
-  public String toString() {
-    return tokenSymbol;
+  public void execute(ClusterMonitorMBean proxy)
+  {
+    Map<String, Boolean> statusMap = proxy.getStatusMap();
+    statusMap.forEach((ip, status) -> System.out.println(ip + "\t->\t" + (status ? "on" : "off")));
   }
-}
+}
\ No newline at end of file
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/StorageGroup.java b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/StorageGroup.java
new file mode 100644
index 0000000..e44aa64
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/StorageGroup.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.service.nodetool;
+
+import io.airlift.airline.Command;
+import io.airlift.airline.Option;
+import java.util.HashSet;
+import java.util.Set;
+import org.apache.iotdb.cluster.service.ClusterMonitorMBean;
+import org.apache.iotdb.cluster.service.nodetool.NodeTool.NodeToolCmd;
+
+@Command(name = "storagegroup", description = "Print all hosts information of specific storage group")
+public class StorageGroup extends NodeToolCmd {
+
+  @Option(title = "all storagegroup", name = {"-a", "--all"}, description = "Show hosts info of all storage groups")
+  private boolean showAll = false;
+
+  @Option(title = "storage group", name = {"-sg",
+      "--storagegroup"}, description = "Specify a storage group for accurate hosts information")
+  private String sg = null;
+
+  @Override
+  public void execute(ClusterMonitorMBean proxy) {
+    Set<String> sgSet;
+    if (showAll) {
+      sgSet = proxy.getAllStorageGroupsLocally();
+    } else {
+      sgSet = new HashSet<>();
+      sgSet.add(sg);
+    }
+
+    if (!showAll && sg == null) {
+      System.out.println("Metadata\t->\t" + proxy.getDataPartitionOfSG(sg));
+    } else {
+      sgSet.forEach(sg -> System.out.println(sg + "\t->\t" + proxy.getDataPartitionOfSG(sg)));
+    }
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/QPExecutorUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/QPExecutorUtils.java
index 809a01c..80dec3a 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/QPExecutorUtils.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/utils/QPExecutorUtils.java
@@ -45,10 +45,13 @@ public class QPExecutorUtils {
   private static final PhysicalNode localNode = new PhysicalNode(CLUSTER_CONFIG.getIp(),
       CLUSTER_CONFIG.getPort());
 
-  private static final  MManager mManager = MManager.getInstance();
+  private static final MManager mManager = MManager.getInstance();
 
   private static final Server server = Server.getInstance();
 
+  private QPExecutorUtils() {
+  }
+
   /**
    * Get Storage Group Name by device name
    */
@@ -85,13 +88,8 @@ public class QPExecutorUtils {
     for (int i = 0; i < sgList.size(); i++) {
       String sg = sgList.get(i);
       String groupId = router.getGroupIdBySG(sg);
-      if (map.containsKey(groupId)) {
-        map.get(groupId).add(sg);
-      } else {
-        Set<String> set = new HashSet<>();
-        set.add(sg);
-        map.put(groupId, set);
-      }
+      map.putIfAbsent(groupId, new HashSet<>());
+      map.get(groupId).add(sg);
     }
     return map;
   }
@@ -102,9 +100,9 @@ public class QPExecutorUtils {
    */
   public static boolean canHandleNonQueryByGroupId(String groupId) {
     boolean canHandle = false;
-    if(groupId.equals(ClusterConfig.METADATA_GROUP_ID)){
+    if (groupId.equals(ClusterConfig.METADATA_GROUP_ID)) {
       canHandle = ((MetadataRaftHolder) (server.getMetadataHolder())).getFsm().isLeader();
-    }else {
+    } else {
       if (checkDataGroupLeader(groupId)) {
         canHandle = true;
       }
@@ -121,7 +119,7 @@ public class QPExecutorUtils {
   public static boolean checkDataGroupLeader(String groupId) {
     boolean isLeader = false;
     if (router.containPhysicalNodeByGroupId(groupId, localNode) && RaftUtils
-        .getPhysicalNodeFrom(RaftUtils.getLeaderPeerID(groupId)).equals(localNode)) {
+        .getPhysicalNodeFrom(RaftUtils.getLocalLeaderPeerID(groupId)).equals(localNode)) {
       isLeader = true;
     }
     return isLeader;
@@ -141,8 +139,7 @@ public class QPExecutorUtils {
    */
   public static String getGroupIdByDevice(String device) throws PathErrorException {
     String storageGroup = QPExecutorUtils.getStroageGroupByDevice(device);
-    String groupId = Router.getInstance().getGroupIdBySG(storageGroup);
-    return groupId;
+    return Router.getInstance().getGroupIdBySG(storageGroup);
   }
 
   /**
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/RaftUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/RaftUtils.java
index be6eea0..96e0363 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/RaftUtils.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/utils/RaftUtils.java
@@ -23,12 +23,24 @@ import com.alipay.remoting.exception.CodecException;
 import com.alipay.remoting.serialization.SerializerManager;
 import com.alipay.sofa.jraft.Status;
 import com.alipay.sofa.jraft.closure.ReadIndexClosure;
+import com.alipay.sofa.jraft.core.NodeImpl;
 import com.alipay.sofa.jraft.entity.PeerId;
 import com.alipay.sofa.jraft.entity.Task;
 import com.alipay.sofa.jraft.util.Bits;
 import com.alipay.sofa.jraft.util.OnlyForTest;
+import com.codahale.metrics.Gauge;
 import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
 import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -37,31 +49,41 @@ import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.cluster.entity.Server;
 import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.MetadataRaftHolder;
+import org.apache.iotdb.cluster.entity.raft.MetadataStateManchine;
 import org.apache.iotdb.cluster.entity.raft.RaftService;
 import org.apache.iotdb.cluster.exception.RaftConnectionException;
 import org.apache.iotdb.cluster.qp.task.QPTask;
+import org.apache.iotdb.cluster.qp.task.QPTask.TaskState;
 import org.apache.iotdb.cluster.qp.task.SingleQPTask;
-import org.apache.iotdb.cluster.rpc.raft.NodeAsClient;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcQueryManager;
 import org.apache.iotdb.cluster.rpc.raft.closure.ResponseClosure;
 import org.apache.iotdb.cluster.rpc.raft.impl.RaftNodeAsClientManager;
 import org.apache.iotdb.cluster.rpc.raft.request.BasicNonQueryRequest;
 import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetric.QueryJobNumRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetric.QueryLeaderRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetric.QueryMetricRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetric.QueryStatusRequest;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 import org.apache.iotdb.cluster.rpc.raft.response.nonquery.DataGroupNonQueryResponse;
 import org.apache.iotdb.cluster.rpc.raft.response.nonquery.MetaGroupNonQueryResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetric.QueryJobNumResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetric.QueryLeaderResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetric.QueryMetricResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetric.QueryStatusResponse;
 import org.apache.iotdb.cluster.utils.hash.PhysicalNode;
 import org.apache.iotdb.cluster.utils.hash.Router;
+import org.apache.iotdb.cluster.utils.hash.VirtualNode;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class RaftUtils {
 
-  private static final ClusterConfig CLUSTER_CONFIG = ClusterDescriptor.getInstance().getConfig();
-
   private static final Logger LOGGER = LoggerFactory.getLogger(RaftUtils.class);
   private static final Server server = Server.getInstance();
   private static final Router router = Router.getInstance();
   private static final AtomicInteger requestId = new AtomicInteger(0);
+  private static final ClusterConfig config = ClusterDescriptor.getInstance().getConfig();
   /**
    * Raft as client manager.
    */
@@ -70,27 +92,128 @@ public class RaftUtils {
 
   /**
    * The cache will be update in two case: 1. When @onLeaderStart() method of state machine is
-   * called, the cache will be update. 2. When @getLeaderPeerID() in this class is called and cache
-   * don't have the key, it's will get random peer and update. 3. When @redirected of BasicRequest
-   * is true, the task will be retry and the cache will update.
+   * called, the cache will be update. 2. When @getLocalLeaderPeerID() in this class is called and
+   * cache don't have the key, it's will get random peer and update. 3. When @redirected of
+   * BasicRequest is true, the task will be retry and the cache will update.
    */
   private static final ConcurrentHashMap<String, PeerId> groupLeaderCache = new ConcurrentHashMap<>();
 
+  private static ThreadLocal<Map<String, Integer>> nodeIndexMap = ThreadLocal.withInitial(() -> {
+    Map<String, Integer> map = new HashMap<>();
+    router.getAllGroupId().forEach(groupId -> {
+      PhysicalNode[] physicalNodes = router.getNodesByGroupId(groupId);
+      map.put(groupId, getRandomInt(physicalNodes.length));
+    });
+    return map;
+  });
+
   private RaftUtils() {
   }
 
   /**
+   * Get peer ID in order
+   *
+   * @return node id
+   */
+  public static PeerId getPeerIDInOrder(String groupId) {
+    int index;
+    PeerId peerId;
+    int len;
+    if (groupId.equals(ClusterConfig.METADATA_GROUP_ID)) {
+      RaftService service = (RaftService) server.getMetadataHolder().getService();
+      List<PeerId> peerIdList = service.getPeerIdList();
+      len = peerIdList.size();
+      index = nodeIndexMap.get().getOrDefault(groupId, getRandomInt(peerIdList.size()));
+      peerId = peerIdList.get(index);
+    } else {
+      PhysicalNode[] physicalNodes = router.getNodesByGroupId(groupId);
+      len = physicalNodes.length;
+      index = nodeIndexMap.get().getOrDefault(groupId, getRandomInt(physicalNodes.length));
+      peerId = getPeerIDFrom(physicalNodes[index]);
+    }
+    nodeIndexMap.get().put(groupId, (index + 1) % len);
+
+    LOGGER.debug("Get node {} for group {}", peerId, groupId);
+
+    return peerId;
+  }
+
+  public static void updatePeerIDOrder(PeerId peerId, String groupId) {
+    int index = -1;
+    int len;
+    if (groupId.equals(ClusterConfig.METADATA_GROUP_ID)) {
+      RaftService service = (RaftService) server.getMetadataHolder().getService();
+      List<PeerId> peerIdList = service.getPeerIdList();
+      len = peerIdList.size();
+      index = peerIdList.indexOf(peerId);
+    } else {
+      PhysicalNode[] physicalNodes = router.getNodesByGroupId(groupId);
+      len = physicalNodes.length;
+      PhysicalNode node = getPhysicalNodeFrom(peerId);
+      for (int i = 0; i < physicalNodes.length; i++) {
+        if (physicalNodes[i].equals(node)) {
+          index = i;
+          break;
+        }
+      }
+    }
+
+    if (index == -1) {
+      LOGGER.warn(
+          "Fail to update order of node {} for group {}, because the group doesn't contain it.",
+          peerId, groupId);
+    } else {
+      LOGGER.debug("Update order of node {} for group {}, current index is {}", peerId, groupId,
+          index);
+      nodeIndexMap.get().put(groupId, (index + 1) % len);
+    }
+  }
+
+  /**
    * Get peer id to send request. If groupLeaderCache has the group id, then return leader id of the
    * group.Otherwise, random get a peer of the group.
    *
    * @return leader id
    */
-  public static PeerId getLeaderPeerID(String groupId) {
-    if (!groupLeaderCache.containsKey(groupId)) {
-      PeerId randomPeerId = getRandomPeerID(groupId);
-      groupLeaderCache.put(groupId, randomPeerId);
+  public static PeerId getLocalLeaderPeerID(String groupId) {
+    PeerId leader;
+    if (groupLeaderCache.containsKey(groupId)) {
+      leader = groupLeaderCache.get(groupId);
+    } else {
+      leader = getRandomPeerID(groupId);
+      groupLeaderCache.put(groupId, leader);
+    }
+    LOGGER.debug("Get local cached leader {} of group {}.", leader, groupId);
+    return leader;
+  }
+
+  /**
+   * Get peer id to send request. If groupLeaderCache has the group id, then return leader id of the
+   * group.Otherwise, random get a peer of the group.
+   *
+   * @return leader id
+   */
+  public static PeerId getLeaderPeerIDFromRemoteNode(PeerId peerId, String groupId) {
+    QueryLeaderRequest request = new QueryLeaderRequest(groupId);
+    SingleQPTask task = new SingleQPTask(false, request);
+    task.setTargetNode(peerId);
+    LOGGER.debug("Execute get leader of group {} from node {}.", groupId, peerId);
+    try {
+      CLIENT_MANAGER.produceQPTask(task);
+
+      task.await();
+      PeerId leader = null;
+      if (task.getTaskState() == TaskState.FINISH) {
+        BasicResponse response = task.getResponse();
+        leader = response == null ? null : ((QueryLeaderResponse) response).getLeader();
+      }
+      LOGGER.debug("Get leader {} of group {} from node {}.", leader, groupId, peerId);
+      return leader;
+    } catch (RaftConnectionException | InterruptedException e) {
+      LOGGER.error("Fail to get leader of group {} from remote node {} because of {}.", groupId,
+          peerId, e.getMessage());
+      return null;
     }
-    return groupLeaderCache.get(groupId);
   }
 
   /**
@@ -100,18 +223,29 @@ public class RaftUtils {
     return getRandomPeerID(groupId, server, router);
   }
 
+  /**
+   * Get random peer id
+   */
   public static PeerId getRandomPeerID(String groupId, Server server, Router router) {
-    PeerId randomPeerId;
+    List<PeerId> peerIdList = getPeerIDList(groupId, server, router);
+    return peerIdList.get(getRandomInt(peerIdList.size()));
+  }
+
+  /**
+   * Get peer id list by groupid
+   */
+  public static List<PeerId> getPeerIDList(String groupId, Server server, Router router) {
+    List<PeerId> peerIdList = new ArrayList<>();
     if (groupId.equals(ClusterConfig.METADATA_GROUP_ID)) {
       RaftService service = (RaftService) server.getMetadataHolder().getService();
-      List<PeerId> peerIdList = service.getPeerIdList();
-      randomPeerId = peerIdList.get(getRandomInt(peerIdList.size()));
+      peerIdList.addAll(service.getPeerIdList());
     } else {
       PhysicalNode[] physicalNodes = router.getNodesByGroupId(groupId);
-      PhysicalNode node = physicalNodes[getRandomInt(physicalNodes.length)];
-      randomPeerId = getPeerIDFrom(node);
+      for (PhysicalNode node : physicalNodes) {
+        peerIdList.add(getPeerIDFrom(node));
+      }
     }
-    return randomPeerId;
+    return peerIdList;
   }
 
   /**
@@ -140,16 +274,6 @@ public class RaftUtils {
     return peerIds;
   }
 
-  @Deprecated
-  public static int getIndexOfIpFromRaftNodeList(String ip, PeerId[] peerIds) {
-    for (int i = 0; i < peerIds.length; i++) {
-      if (peerIds[i].getIp().equals(ip)) {
-        return i;
-      }
-    }
-    return -1;
-  }
-
   public static PhysicalNode[] getPhysicalNodeArrayFrom(PeerId[] peerIds) {
     PhysicalNode[] physicalNodes = new PhysicalNode[peerIds.length];
     for (int i = 0; i < peerIds.length; i++) {
@@ -188,7 +312,7 @@ public class RaftUtils {
 
   @OnlyForTest
   public static void clearRaftGroupLeader() {
-	  groupLeaderCache.clear();
+    groupLeaderCache.clear();
   }
 
   /**
@@ -206,7 +330,7 @@ public class RaftUtils {
       if (!status.isOk()) {
         response.setErrorMsg(status.getErrorMsg());
       }
-      qpTask.run(response);
+      qpTask.receive(response);
     });
     task.setDone(closure);
     try {
@@ -236,8 +360,10 @@ public class RaftUtils {
       }
       asyncContext.sendResponse(response);
     });
-    LOGGER.debug(
-        String.format("Processor batch size() : %d", request.getPhysicalPlanBytes().size()));
+    if (LOGGER.isDebugEnabled()) {
+      LOGGER.debug(
+          String.format("Processor batch size() : %d", request.getPhysicalPlanBytes().size()));
+    }
     task.setDone(closure);
     try {
       task.setData(ByteBuffer
@@ -305,11 +431,12 @@ public class RaftUtils {
                 status.setCode(-1);
                 status.setErrorMsg(status.getErrorMsg());
               }
-              nullReadTask.run(response);
+              nullReadTask.receive(response);
             }
           });
       nullReadTask.await();
     } catch (InterruptedException e) {
+      LOGGER.warn("Exception {} occurs while handling null read to metadata group.", e);
       status.setCode(-1);
       status.setErrorMsg(e.getMessage());
     }
@@ -323,12 +450,13 @@ public class RaftUtils {
     handleNullReadToDataGroup(status, server, nullReadTask, groupId);
   }
 
-  private static void handleNullReadToDataGroup(Status status, Server server,
+  private static void handleNullReadToDataGroup(Status resultStatus, Server server,
       SingleQPTask nullReadTask, String groupId) {
     try {
       LOGGER.debug("Handle null-read in data group for reading.");
       final byte[] reqContext = RaftUtils.createRaftRequestContext();
-      DataPartitionRaftHolder dataPartitionRaftHolder = (DataPartitionRaftHolder) server.getDataPartitionHolder(groupId);
+      DataPartitionRaftHolder dataPartitionRaftHolder = (DataPartitionRaftHolder) server
+          .getDataPartitionHolder(groupId);
       ((RaftService) dataPartitionRaftHolder.getService()).getNode()
           .readIndex(reqContext, new ReadIndexClosure() {
             @Override
@@ -336,37 +464,354 @@ public class RaftUtils {
               BasicResponse response = DataGroupNonQueryResponse
                   .createEmptyResponse(groupId);
               if (!status.isOk()) {
-                status.setCode(-1);
-                status.setErrorMsg(status.getErrorMsg());
+                resultStatus.setCode(-1);
+                resultStatus.setErrorMsg(status.getErrorMsg());
               }
-              nullReadTask.run(response);
+              nullReadTask.receive(response);
             }
           });
       nullReadTask.await();
     } catch (InterruptedException e) {
-      status.setCode(-1);
-      status.setErrorMsg(e.getMessage());
+      resultStatus.setCode(-1);
+      resultStatus.setErrorMsg(e.getMessage());
     }
   }
 
-  public static Status createErrorStatus(String errorMsg){
+  public static Status createErrorStatus(String errorMsg) {
     Status status = new Status();
     status.setErrorMsg(errorMsg);
     status.setCode(-1);
     return status;
   }
 
+  public static Map<String, PeerId> getGroupLeaderCache() {
+    return groupLeaderCache;
+  }
+
+  public static Map<Integer, String> getPhysicalRing() {
+    SortedMap<Integer, PhysicalNode> hashNodeMap = router.getPhysicalRing();
+    Map<Integer, String> res = new LinkedHashMap<>();
+    hashNodeMap.forEach((key, value) -> res.put(key, value.getIp()));
+    return res;
+  }
+
+  public static Map<Integer, String> getVirtualRing() {
+    SortedMap<Integer, VirtualNode> hashNodeMap = router.getVirtualRing();
+    Map<Integer, String> res = new LinkedHashMap<>();
+    hashNodeMap.forEach((key, value) -> res.put(key, value.getPhysicalNode().getIp()));
+    return res;
+  }
+
   /**
-   * try to get raft rpc client
+   * Get all node information of the data group of input storage group. The first node is the
+   * current leader
+   *
+   * @param sg storage group ID. If null, return metadata group info
    */
-  public static NodeAsClient getRaftNodeAsClient() throws RaftConnectionException {
-    NodeAsClient client = CLIENT_MANAGER.getRaftNodeAsClient();
-    if (client == null) {
-      throw new RaftConnectionException(String
-          .format("Raft inner rpc clients have reached the max numbers %s",
-              CLUSTER_CONFIG.getMaxNumOfInnerRpcClient() + CLUSTER_CONFIG
-                  .getMaxQueueNumOfInnerRpcClient()));
-    }
-    return client;
+  public static PeerId[] getDataPartitionOfSG(String sg) {
+    return getDataPartitionOfSG(sg, server, router);
+  }
+
+  public static PeerId[] getDataPartitionOfSG(String sg, Server server, Router router) {
+    String groupId;
+    PeerId[] nodes;
+    if (sg == null) {
+      groupId = ClusterConfig.METADATA_GROUP_ID;
+      List<PeerId> peerIdList = ((RaftService) server.getMetadataHolder().getService())
+          .getPeerIdList();
+      nodes = peerIdList.toArray(new PeerId[peerIdList.size()]);
+    } else {
+      PhysicalNode[] group = router.routeGroup(sg);
+      groupId = router.getGroupID(group);
+      nodes = getPeerIdArrayFrom(group);
+    }
+
+    PeerId leader = null;
+    for (PeerId node : nodes) {
+      LOGGER.debug("Try to get leader of group {} from node {}.", groupId, node);
+      leader = getLeaderPeerIDFromRemoteNode(node, groupId);
+      LOGGER.debug("Get leader {} of group {} from node {}.", leader, groupId, node);
+      if (leader != null) {
+        break;
+      }
+    }
+
+    if (leader == null) {
+      LOGGER
+          .debug("Fail to get leader of group {} from all remote nodes, get it locally.", groupId);
+      leader = RaftUtils.getLocalLeaderPeerID(groupId);
+      LOGGER.debug("Get leader {} of group {} locally.", leader, groupId);
+    }
+
+    for (int i = 0; i < nodes.length; i++) {
+      if (leader.equals(nodes[i])) {
+        PeerId t = nodes[i];
+        nodes[i] = nodes[0];
+        nodes[0] = t;
+        break;
+      }
+    }
+    return nodes;
+  }
+
+  /**
+   * Get data partitions that input node belongs to.
+   *
+   * @param ip node ip
+   * @return key: node ips of one data partition, value: storage group paths that belong to this
+   * data partition
+   */
+  public static Map<String[], String[]> getDataPartitionOfNode(String ip) {
+    return getDataPartitionOfNode(ip, config.getPort());
+  }
+
+  public static Map<String[], String[]> getDataPartitionOfNode(String ip, int port) {
+    return getDataPartitionOfNode(ip, port, server, router);
+  }
+
+  public static Map<String[], String[]> getDataPartitionOfNode(String ip, int port, Server server,
+      Router router) {
+    PhysicalNode[][] groups = router.getGroupsNodes(ip, port);
+    if (groups == null) {
+      return null;
+    }
+
+    Map<String, List<String>> groupSGMap = new LinkedHashMap<>();
+    for (int i = 0; i < groups.length; i++) {
+      groupSGMap.put(generateStringKey(groups[i]), new ArrayList<>());
+    }
+    Set<String> allSGList = ((MetadataStateManchine) ((RaftService) server.getMetadataHolder()
+        .getService()).getFsm()).getAllStorageGroups();
+    for (String sg : allSGList) {
+      String key = generateStringKey(router.routeGroup(sg));
+      if (groupSGMap.containsKey(key)) {
+        groupSGMap.get(key).add(sg);
+      }
+    }
+
+    String[][] groupIps = new String[groups.length][];
+    for (int i = 0; i < groups.length; i++) {
+      groupIps[i] = new String[groups[i].length];
+      for (int j = 0; j < groups[i].length; j++) {
+        groupIps[i][j] = groups[i][j].getIp();
+      }
+    }
+
+    Map<String[], String[]> res = new HashMap<>();
+    int index = 0;
+    for (Entry<String, List<String>> entry : groupSGMap.entrySet()) {
+      res.put(groupIps[index], entry.getValue().toArray(new String[entry.getValue().size()]));
+      index++;
+    }
+    return res;
+  }
+
+  private static String generateStringKey(PhysicalNode[] nodes) {
+    if (nodes == null || nodes.length == 0) {
+      return "";
+    }
+    Arrays.sort(nodes, Comparator.comparing(PhysicalNode::toString));
+    StringBuilder builder = new StringBuilder();
+    builder.append(nodes[0]);
+    for (int i = 1; i < nodes.length; i++) {
+      builder.append('#').append(nodes[i]);
+    }
+    return builder.toString();
+  }
+
+  /**
+   * Get replica lag for metadata group and each data partition
+   *
+   * @return key: groupId, value: ip -> replica lag
+   */
+  public static Map<String, Map<String, Long>> getReplicaLagMap() {
+    return getReplicaMetricMap("log-lags");
+  }
+
+  public static Map<String, Map<String, Long>> getReplicaMetricMap(String metric) {
+    Map<String, Map<String, Long>> metricMap = new HashMap<>();
+    RaftService raftService = (RaftService) server.getMetadataHolder().getService();
+    metricMap.put(raftService.getGroupId(), getReplicaMetricFromRaftService(raftService, metric));
+
+    router.getAllGroupId()
+        .forEach(groupId -> metricMap.put(groupId, getReplicaMetric(groupId, metric)));
+    return metricMap;
+  }
+
+  public static Map<String, Long> getReplicaMetric(String groupId, String metric) {
+    if (server.getDataPartitionHolderMap().containsKey(groupId)) {
+      RaftService service = (RaftService) server.getDataPartitionHolder(groupId).getService();
+      return getReplicaMetricFromRaftService(service, metric);
+    } else {
+      LOGGER.debug("Current host does not contain group {}, all groups are {}.", groupId,
+          server.getDataPartitionHolderMap().keySet());
+      return getReplicaMetricFromRemoteNode(groupId, metric);
+    }
+  }
+
+  private static Map<String, Long> getReplicaMetricFromRaftService(RaftService service,
+      String metric) {
+    String groupId = service.getGroupId();
+    LOGGER.debug("Get replica metric {} for group {}.", metric, service.getGroupId());
+    NodeImpl node = (NodeImpl) service.getNode();
+    Map<String, Long> lagMap;
+    if (node.isLeader()) {
+      LOGGER.debug("Get metric locally.");
+      List<PeerId> nodes = service.getPeerIdList();
+      Map<String, Gauge> metrics = service.getNode().getNodeMetrics().getMetricRegistry()
+          .getGauges();
+
+      lagMap = new HashMap<>();
+      String keyFormat = "replicator-%s/%s.%s";
+      for (int i = 0; i < nodes.size(); i++) {
+        // leader doesn't have lag metric
+        if (nodes.get(i).equals(node.getServerId())) {
+          lagMap.put(nodes.get(i).getIp() + " (leader)", 0L);
+          continue;
+        }
+
+        String key = String.format(keyFormat, groupId, nodes.get(i), metric);
+        long value = -1;
+        if (metrics.containsKey(key)) {
+          value = (long) metrics.get(key).getValue();
+        } else {
+          LOGGER.warn("Metric map {} should contain key {}, but not.", metrics, key);
+        }
+        lagMap.put(nodes.get(i).getIp(), value);
+      }
+    } else {
+      lagMap = getReplicaMetricFromRemoteNode(groupId, metric);
+    }
+    return lagMap;
+  }
+
+  private static Map<String, Long> getReplicaMetricFromRemoteNode(String groupId, String metric) {
+    QueryMetricRequest request = new QueryMetricRequest(groupId, metric);
+    SingleQPTask task = new SingleQPTask(false, request);
+
+    LOGGER.debug("Execute get metric for {} statement for group {}.", metric, groupId);
+    PeerId holder = RaftUtils.getLocalLeaderPeerID(groupId);
+    LOGGER.debug("Get metric from node {}.", holder);
+    task.setTargetNode(holder);
+    try {
+      CLIENT_MANAGER.produceQPTask(task);
+
+      task.await();
+      Map<String, Long> value = null;
+      if (task.getTaskState() == TaskState.FINISH) {
+        BasicResponse response = task.getResponse();
+        value = response == null ? null : ((QueryMetricResponse) response).getValue();
+      }
+      return value;
+    } catch (RaftConnectionException | InterruptedException e) {
+      LOGGER.error("Fail to get replica metric from remote node because of {}.", e);
+      return null;
+    }
+  }
+
+  /**
+   * Get query job number running on each data partition for all nodes
+   *
+   * @return outer key: ip, inner key: groupId, value: number of query jobs
+   */
+  public static Map<String, Map<String, Integer>> getQueryJobNumMapForCluster() {
+    PeerId[] peerIds = RaftUtils.convertStringArrayToPeerIdArray(config.getNodes());
+    Map<String, Map<String, Integer>> res = new HashMap<>();
+    for (int i = 0; i < peerIds.length; i++) {
+      PeerId peerId = peerIds[i];
+      res.put(peerId.getIp(), getQueryJobNumMapFromRemoteNode(peerId));
+    }
+
+    return res;
+  }
+
+  public static Map<String, Integer> getLocalQueryJobNumMap() {
+    return ClusterRpcQueryManager.getInstance().getAllReadUsage();
+  }
+
+  private static Map<String, Integer> getQueryJobNumMapFromRemoteNode(PeerId peerId) {
+    QueryJobNumRequest request = new QueryJobNumRequest("");
+    SingleQPTask task = new SingleQPTask(false, request);
+    task.setTargetNode(peerId);
+    LOGGER.debug("Execute get query job num map for node {}.", peerId);
+    try {
+      CLIENT_MANAGER.produceQPTask(task);
+
+      task.await();
+      Map<String, Integer> value = null;
+      if (task.getTaskState() == TaskState.FINISH) {
+        BasicResponse response = task.getResponse();
+        value = response == null ? null : ((QueryJobNumResponse) response).getValue();
+      }
+      return value;
+    } catch (RaftConnectionException | InterruptedException e) {
+      LOGGER.error("Fail to get query job num map from remote node {} because of {}.", peerId, e);
+      return null;
+    }
+  }
+
+  /**
+   * Get status of each node in cluster
+   *
+   * @return key: node ip, value: live or not
+   */
+  public static Map<String, Boolean> getStatusMapForCluster() {
+    PeerId[] peerIds = RaftUtils.convertStringArrayToPeerIdArray(config.getNodes());
+    SortedMap<String, Boolean> treeMap = new TreeMap<>(new Comparator<String>() {
+      @Override
+      public int compare(String o1, String o2) {
+        int[] nums1 = convertIPToNums(o1);
+        int[] nums2 = convertIPToNums(o2);
+        for (int i = 0; i < Math.min(nums1.length, nums2.length); i++) {
+          if (nums1[i] != nums2[i]) {
+            return Integer.compare(nums1[i], nums2[i]);
+          }
+        }
+        return 0;
+      }
+
+      private int[] convertIPToNums(String ip) {
+        String[] ss = ip.split("\\.");
+        int[] nums = new int[ss.length];
+        for (int i = 0; i < nums.length; i++) {
+          nums[i] = Integer.parseInt(ss[i]);
+        }
+        return nums;
+      }
+    });
+    for (int i = 0; i < peerIds.length; i++) {
+      PeerId peerId = peerIds[i];
+      treeMap.put(peerId.getIp(), getStatusOfNode(peerId));
+    }
+
+    Map<String, Boolean> res = new LinkedHashMap<>();
+    treeMap.forEach((ip, status) -> res.put(ip, status));
+    return res;
+  }
+
+  private static boolean getStatusOfNode(PeerId peerId) {
+    QueryStatusRequest request = new QueryStatusRequest("");
+    SingleQPTask task = new SingleQPTask(false, request);
+    task.setTargetNode(peerId);
+    LOGGER.debug("Execute get status for node {}.", peerId);
+    try {
+      CLIENT_MANAGER.produceQPTask(task);
+
+      task.await();
+      boolean status = false;
+      if (task.getTaskState() == TaskState.FINISH) {
+        BasicResponse response = task.getResponse();
+        status = response == null ? null : ((QueryStatusResponse) response).getStatus();
+      }
+      return status;
+    } catch (RaftConnectionException | InterruptedException e) {
+      LOGGER.error("Fail to get status from remote node {} because of {}.", peerId, e);
+      return false;
+    }
+  }
+
+  public static Set<String> getAllStorageGroupsLocally() {
+    MetadataRaftHolder metadataRaftHolder = (MetadataRaftHolder) server.getMetadataHolder();
+    return metadataRaftHolder.getFsm().getAllStorageGroups();
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/PhysicalNode.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/PhysicalNode.java
index 66544a8..b8b6854 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/PhysicalNode.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/PhysicalNode.java
@@ -23,8 +23,14 @@ import com.alipay.sofa.jraft.util.OnlyForTest;
 public class PhysicalNode {
 
   private String ip;
+
   private int port;
 
+  /**
+   * Group id of data group which first node is this PhysicalNode.
+   */
+  private String groupId;
+
   public PhysicalNode(String ip, int port) {
     this.ip = ip;
     this.port = port;
@@ -77,6 +83,14 @@ public class PhysicalNode {
     return port;
   }
 
+  public String getGroupId() {
+    return groupId;
+  }
+
+  public void setGroupId(String groupId) {
+    this.groupId = groupId;
+  }
+
   @OnlyForTest
   public void setIp(String ip) {
     this.ip = ip;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/Router.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/Router.java
index 544c0fc..0460e05 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/Router.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/Router.java
@@ -51,11 +51,6 @@ public class Router {
   private Map<PhysicalNode, PhysicalNode[][]> dataPartitionCache = new HashMap<>();
 
   /**
-   * Key is the first node of the group, value is group id.
-   */
-  private Map<PhysicalNode, String> nodeMapGroupIdCache = new HashMap<>();
-
-  /**
    * Key is group id, value is the first node of the group.
    */
   private Map<String, PhysicalNode> groupIdMapNodeCache = new HashMap<>();
@@ -66,7 +61,9 @@ public class Router {
   public static final String DATA_GROUP_STR = "data-group-";
 
   private HashFunction hashFunction = new MD5Hash();
+
   private final SortedMap<Integer, PhysicalNode> physicalRing = new TreeMap<>();
+
   private final SortedMap<Integer, VirtualNode> virtualRing = new TreeMap<>();
 
   private static class RouterHolder {
@@ -86,8 +83,11 @@ public class Router {
    * Change this method to public for test, you should not invoke this method explicitly.
    */
   public void init() {
+    init(ClusterDescriptor.getInstance().getConfig());
+  }
+
+  public void init(ClusterConfig config) {
     reset();
-    ClusterConfig config = ClusterDescriptor.getInstance().getConfig();
     String[] hosts = config.getNodes();
     int replicator = config.getReplication();
     int numOfVirtualNodes = config.getNumOfVirtualNodes();
@@ -99,17 +99,9 @@ public class Router {
       if (len < replicator) {
         throw new ErrorConfigureExecption(String.format("Replicator number %d is greater "
             + "than cluster number %d", replicator, len));
-      } else if (len == replicator) {
-        PhysicalNode[][] val = new PhysicalNode[1][len];
-        nodeMapGroupIdCache.put(first, DATA_GROUP_STR + "0");
-        groupIdMapNodeCache.put(DATA_GROUP_STR + "0", first);
-        for (int j = 0; j < len; j++) {
-          val[0][j] = nodes[(i + j) % len];
-        }
-        dataPartitionCache.put(first, val);
-      }  else {
+      } else {
         PhysicalNode[][] val = new PhysicalNode[replicator][replicator];
-        nodeMapGroupIdCache.put(first, DATA_GROUP_STR + i);
+        first.setGroupId(DATA_GROUP_STR + i);
         groupIdMapNodeCache.put(DATA_GROUP_STR + i, first);
         for (int j = 0; j < replicator; j++) {
           for (int k = 0; k < replicator; k++) {
@@ -121,7 +113,7 @@ public class Router {
     }
   }
 
-  private void createHashRing(String[] hosts, int numOfVirtualNodes){
+  private void createHashRing(String[] hosts, int numOfVirtualNodes) {
     for (String host : hosts) {
       String[] values = host.split(":");
       PhysicalNode node = new PhysicalNode(values[0].trim(), Integer.parseInt(values[1].trim()));
@@ -145,7 +137,7 @@ public class Router {
   }
 
   public String getGroupID(PhysicalNode[] nodes) {
-    return nodeMapGroupIdCache.get(nodes[0]);
+    return nodes[0].getGroupId();
   }
 
   public PhysicalNode[][] getGroupsNodes(String ip, int port) {
@@ -159,14 +151,14 @@ public class Router {
     physicalRing.put(hashFunction.hash(node.getKey()), node);
     for (int i = 0; i < virtualNum; i++) {
       VirtualNode vNode = new VirtualNode(i, node);
-      virtualRing.put(hashFunction.hash(vNode.getKey()), vNode);
+      virtualRing.put(hashFunction.hash(vNode.toString()), vNode);
     }
   }
 
   /**
    * For a storage group, compute the nearest physical node on the hash ring
    */
-  public PhysicalNode routeNode(String objectKey) {
+  PhysicalNode routeNode(String objectKey) {
     int hashVal = hashFunction.hash(objectKey);
     SortedMap<Integer, VirtualNode> tailMap = virtualRing.tailMap(hashVal);
     Integer nodeHashVal = !tailMap.isEmpty() ? tailMap.firstKey() : virtualRing.firstKey();
@@ -188,7 +180,6 @@ public class Router {
     virtualRing.clear();
     sgRouter.clear();
     dataPartitionCache.clear();
-    nodeMapGroupIdCache.clear();
     groupIdMapNodeCache.clear();
   }
 
@@ -202,15 +193,10 @@ public class Router {
   @OnlyForTest
   public void showVirtualRing() {
     for (Entry<Integer, VirtualNode> entry : virtualRing.entrySet()) {
-      LOGGER.info("{}-{}", entry.getKey(), entry.getValue().getKey());
+      LOGGER.info("{}-{}", entry.getKey(), entry.getValue());
     }
   }
 
-  public boolean containPhysicalNodeBySG(String storageGroup, PhysicalNode node) {
-    PhysicalNode[] nodes = routeGroup(storageGroup);
-    return Arrays.asList(nodes).contains(node);
-  }
-
   public boolean containPhysicalNodeByGroupId(String groupId, PhysicalNode node) {
     PhysicalNode[] nodes = getNodesByGroupId(groupId);
     return Arrays.asList(nodes).contains(node);
@@ -238,6 +224,14 @@ public class Router {
     return groupIdMapNodeCache.keySet();
   }
 
+  public SortedMap<Integer, PhysicalNode> getPhysicalRing() {
+    return physicalRing;
+  }
+
+  public SortedMap<Integer, VirtualNode> getVirtualRing() {
+    return virtualRing;
+  }
+
   /**
    * Get raft group id by storage group name
    */
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/VirtualNode.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/VirtualNode.java
index 88816cf..891f755 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/VirtualNode.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/VirtualNode.java
@@ -19,20 +19,25 @@
 package org.apache.iotdb.cluster.utils.hash;
 
 public class VirtualNode {
-  //the index of the virtual node in the physicalNode
-  private final int replicaIndex;
+
+  /**
+   * the index of the virtual node in the physicalNode
+   */
+  private final int index;
+
   private final PhysicalNode physicalNode;
 
-  VirtualNode(int replicaIndex, PhysicalNode physicalNode) {
-    this.replicaIndex = replicaIndex;
+  VirtualNode(int index, PhysicalNode physicalNode) {
+    this.index = index;
     this.physicalNode = physicalNode;
   }
 
-  PhysicalNode getPhysicalNode() {
+  public PhysicalNode getPhysicalNode() {
     return this.physicalNode;
   }
 
-  String getKey() {
-    return String.format("%s-%d", physicalNode.getKey(), replicaIndex);
+  @Override
+  public String toString() {
+    return String.format("%s-%d", physicalNode.getKey(), index);
   }
 }
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskManagerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskThreadManagerTest.java
similarity index 80%
rename from cluster/src/test/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskManagerTest.java
rename to cluster/src/test/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskThreadManagerTest.java
index 148d25d..01333a0 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskManagerTest.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskThreadManagerTest.java
@@ -28,9 +28,9 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
-public class QPTaskManagerTest {
+public class QPTaskThreadManagerTest {
 
-  private QPTaskManager qpTaskManager = QPTaskManager.getInstance();
+  private QPTaskThreadManager qpTaskThreadManager = QPTaskThreadManager.getInstance();
 
   private ClusterConfig clusterConfig = ClusterDescriptor.getInstance().getConfig();
 
@@ -64,22 +64,22 @@ public class QPTaskManagerTest {
   @Test
   public void testSubmitAndClose() throws InterruptedException {
 
-    assertEquals(clusterConfig.getConcurrentQPSubTaskThread(), qpTaskManager.getThreadPoolSize());
+    assertEquals(clusterConfig.getConcurrentQPSubTaskThread(), qpTaskThreadManager.getThreadPoolSize());
 
-    int threadPoolSize = qpTaskManager.getThreadPoolSize();
+    int threadPoolSize = qpTaskThreadManager.getThreadPoolSize();
     // test thread num
     for (int i = 1; i <= threadPoolSize + 2; i++) {
-      qpTaskManager.submit(testRunnable);
+      qpTaskThreadManager.submit(testRunnable);
       Thread.sleep(10);
-      assertEquals(Math.min(i, threadPoolSize), qpTaskManager.getActiveCnt());
+      assertEquals(Math.min(i, threadPoolSize), qpTaskThreadManager.getActiveCnt());
     }
 
     // test close
     try {
       new Thread(changeMark).start();
-      qpTaskManager.close(true, blockTimeOut);
+      qpTaskThreadManager.close(true, blockTimeOut);
     } catch (ProcessorException e) {
-      assertEquals("qp task manager thread pool doesn't exit after 10 ms", e.getMessage());
+      assertEquals("qp-task-thread-manager thread pool doesn't exit after 10 ms", e.getMessage());
     }
   }
 }
\ No newline at end of file
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/config/ClusterDescriptorTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/config/ClusterDescriptorTest.java
index a03ee99..4c3286e 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/config/ClusterDescriptorTest.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/config/ClusterDescriptorTest.java
@@ -54,8 +54,8 @@ public class ClusterDescriptorTest {
   private String testVNodesNew = "4";
   private String testClientNumNew = "400000";
   private String testQueueLenNew = "300000";
-  private String testMetadataConsistencyNew = "2";
-  private String testDataConsistencyNew = "4";
+  private String testMetadataConsistencyNew = String.valueOf(ClusterConsistencyLevel.STRONG.ordinal());
+  private String testDataConsistencyNew = String.valueOf(ClusterConsistencyLevel.STRONG.ordinal());
   private String testConcurrentQPTaskThreadNew = "6";
   private String testConcurrentRaftTaskThreadNew = "11";
 
@@ -96,7 +96,7 @@ public class ClusterDescriptorTest {
       put("qp_task_redo_count", testTaskRedoCountNew);
       put("qp_task_timeout_ms", testTaskTimeoutMSNew);
       put("num_of_virtual_nodes", testVNodesNew);
-      put("max_num_of_inner_rpc_client", testClientNumNew);
+      put("concurrent_inner_rpc_client_thread", testClientNumNew);
       put("max_queue_num_of_inner_rpc_client", testQueueLenNew);
       put("read_metadata_consistency_level", testMetadataConsistencyNew);
       put("read_data_consistency_level", testDataConsistencyNew);
@@ -143,8 +143,8 @@ public class ClusterDescriptorTest {
     assertEquals(testTaskRedoCountNew, config.getQpTaskRedoCount() + "");
     assertEquals(testTaskTimeoutMSNew, config.getQpTaskTimeout() + "");
     assertEquals(testVNodesNew, config.getNumOfVirtualNodes() + "");
-    assertEquals(testClientNumNew, config.getMaxNumOfInnerRpcClient() + "");
-    assertEquals(testQueueLenNew, config.getMaxQueueNumOfInnerRpcClient() + "");
+    assertEquals(testClientNumNew, config.getConcurrentInnerRpcClientThread() + "");
+    assertEquals(testQueueLenNew, config.getMaxQueueNumOfQPTask() + "");
     assertEquals(testMetadataConsistencyNew, config.getReadMetadataConsistencyLevel() + "");
     assertEquals(testDataConsistencyNew, config.getReadDataConsistencyLevel() + "");
     assertEquals(testConcurrentQPTaskThreadNew, config.getConcurrentQPSubTaskThread() + "");
@@ -198,8 +198,8 @@ public class ClusterDescriptorTest {
     testTaskRedoCountOld = config.getQpTaskRedoCount();
     testTaskTimeoutMSOld = config.getQpTaskTimeout();
     testVNodesOld = config.getNumOfVirtualNodes();
-    testClientNumOld = config.getMaxNumOfInnerRpcClient();
-    testQueueLenOld = config.getMaxQueueNumOfInnerRpcClient();
+    testClientNumOld = config.getConcurrentInnerRpcClientThread();
+    testQueueLenOld = config.getMaxQueueNumOfQPTask();
     testMetadataConsistencyOld = config.getReadMetadataConsistencyLevel();
     testDataConsistencyOld = config.getReadDataConsistencyLevel();
     testConcurrentQPTaskThreadOld = config.getConcurrentQPSubTaskThread();
@@ -221,8 +221,8 @@ public class ClusterDescriptorTest {
     config.setQpTaskRedoCount(testTaskRedoCountOld);
     config.setQpTaskTimeout(testTaskTimeoutMSOld);
     config.setNumOfVirtualNodes(testVNodesOld);
-    config.setMaxNumOfInnerRpcClient(testClientNumOld);
-    config.setMaxQueueNumOfInnerRpcClient(testQueueLenOld);
+    config.setConcurrentInnerRpcClientThread(testClientNumOld);
+    config.setMaxQueueNumOfQPTask(testQueueLenOld);
     config.setReadMetadataConsistencyLevel(testMetadataConsistencyOld);
     config.setReadDataConsistencyLevel(testDataConsistencyOld);
     config.setConcurrentQPSubTaskThread(testConcurrentQPTaskThreadOld);
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/integration/Constant.java b/cluster/src/test/java/org/apache/iotdb/cluster/integration/Constant.java
new file mode 100644
index 0000000..71cf523
--- /dev/null
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/integration/Constant.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.integration;
+
+import org.apache.iotdb.tsfile.write.record.TSRecord;
+import org.apache.iotdb.tsfile.write.record.datapoint.DataPoint;
+
+public class Constant {
+
+  public static final String d0s0 = "root.vehicle.d0.s0";
+  public static final String d0s1 = "root.vehicle.d0.s1";
+  public static final String d0s2 = "root.vehicle.d0.s2";
+  public static final String d0s3 = "root.vehicle.d0.s3";
+  public static final String d0s4 = "root.vehicle.d0.s4";
+  public static final String d0s5 = "root.vehicle.d0.s5";
+  public static final String d1s0 = "root.vehicle.d1.s0";
+  public static final String d1s1 = "root.vehicle.d1.s1";
+  public static final String TIMESTAMP_STR = "Time";
+  public static boolean testFlag = true;
+  public static String[] stringValue = new String[]{"A", "B", "C", "D", "E"};
+  public static String[] booleanValue = new String[]{"true", "false"};
+
+  public static String[] create_sql = new String[]{"SET STORAGE GROUP TO root.vehicle",
+
+      "CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=INT32, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d0.s1 WITH DATATYPE=INT64, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d0.s2 WITH DATATYPE=FLOAT, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d0.s3 WITH DATATYPE=TEXT, ENCODING=PLAIN",
+      "CREATE TIMESERIES root.vehicle.d0.s4 WITH DATATYPE=BOOLEAN, ENCODING=PLAIN",
+      "CREATE TIMESERIES root.vehicle.d0.s5 WITH DATATYPE=DOUBLE, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d1.s0 WITH DATATYPE=INT32, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d1.s1 WITH DATATYPE=INT64, ENCODING=RLE",
+
+  };
+
+  public static String insertTemplate = "insert into %s(timestamp%s) values(%d%s)";
+
+  public static String first(String path) {
+    return String.format("first(%s)", path);
+  }
+
+  public static String last(String path) {
+    return String.format("last(%s)", path);
+  }
+
+  public static String sum(String path) {
+    return String.format("sum(%s)", path);
+  }
+
+  public static String mean(String path) {
+    return String.format("mean(%s)", path);
+  }
+
+  public static String count(String path) {
+    return String.format("count(%s)", path);
+  }
+
+  public static String max_time(String path) {
+    return String.format("max_time(%s)", path);
+  }
+
+  public static String min_time(String path) {
+    return String.format("min_time(%s)", path);
+  }
+
+  public static String max_value(String path) {
+    return String.format("max_value(%s)", path);
+  }
+
+  public static String min_value(String path) {
+    return String.format("min_value(%s)", path);
+  }
+
+  public static String recordToInsert(TSRecord record) {
+    StringBuilder measurements = new StringBuilder();
+    StringBuilder values = new StringBuilder();
+    for (DataPoint dataPoint : record.dataPointList) {
+      measurements.append(",").append(dataPoint.getMeasurementId());
+      values.append(",").append(dataPoint.getValue());
+    }
+    return String
+        .format(insertTemplate, record.deviceId, measurements.toString(), record.time, values);
+  }
+}
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IOTDBGroupByIT.java b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IOTDBGroupByIT.java
new file mode 100644
index 0000000..0165bba
--- /dev/null
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IOTDBGroupByIT.java
@@ -0,0 +1,490 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.integration;
+
+import static org.apache.iotdb.cluster.integration.Constant.count;
+import static org.apache.iotdb.cluster.integration.Constant.first;
+import static org.apache.iotdb.cluster.integration.Constant.last;
+import static org.apache.iotdb.cluster.integration.Constant.max_time;
+import static org.apache.iotdb.cluster.integration.Constant.max_value;
+import static org.apache.iotdb.cluster.integration.Constant.mean;
+import static org.apache.iotdb.cluster.integration.Constant.min_time;
+import static org.apache.iotdb.cluster.integration.Constant.min_value;
+import static org.apache.iotdb.cluster.integration.Constant.sum;
+import static org.apache.iotdb.cluster.utils.Utils.insertData;
+import static org.junit.Assert.fail;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.cluster.entity.Server;
+import org.apache.iotdb.cluster.utils.EnvironmentUtils;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
+import org.apache.iotdb.cluster.utils.hash.PhysicalNode;
+import org.apache.iotdb.jdbc.Config;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class IOTDBGroupByIT {
+
+  private Server server;
+  private static final ClusterConfig CLUSTER_CONFIG = ClusterDescriptor.getInstance().getConfig();
+  private static final PhysicalNode localNode = new PhysicalNode(CLUSTER_CONFIG.getIp(),
+      CLUSTER_CONFIG.getPort());
+
+  private static String[] createSqls = new String[]{
+      "SET STORAGE GROUP TO root.ln.wf01.wt01",
+      "CREATE TIMESERIES root.ln.wf01.wt01.status WITH DATATYPE=BOOLEAN, ENCODING=PLAIN",
+      "CREATE TIMESERIES root.ln.wf01.wt01.temperature WITH DATATYPE=DOUBLE, ENCODING=PLAIN",
+      "CREATE TIMESERIES root.ln.wf01.wt01.hardware WITH DATATYPE=INT32, ENCODING=PLAIN"};
+  private static String[] insertSqls = new String[]{
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(1, 1.1, false, 11)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(2, 2.2, true, 22)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(3, 3.3, false, 33 )",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(4, 4.4, false, 44)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(5, 5.5, false, 55)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(100, 100.1, false, 110)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(150, 200.2, true, 220)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(200, 300.3, false, 330 )",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(250, 400.4, false, 440)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(300, 500.5, false, 550)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(10, 10.1, false, 110)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(20, 20.2, true, 220)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(30, 30.3, false, 330 )",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(40, 40.4, false, 440)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(50, 50.5, false, 550)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(500, 100.1, false, 110)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(510, 200.2, true, 220)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(520, 300.3, false, 330 )",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(530, 400.4, false, 440)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(540, 500.5, false, 550)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(580, 100.1, false, 110)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(590, 200.2, true, 220)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(600, 300.3, false, 330 )",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(610, 400.4, false, 440)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(620, 500.5, false, 550)",
+  };
+
+  private static final String TIMESTAMP_STR = "Time";
+
+  @Before
+  public void setUp() throws Exception {
+    EnvironmentUtils.closeStatMonitor();
+    EnvironmentUtils.closeMemControl();
+    CLUSTER_CONFIG.createAllPath();
+    server = Server.getInstance();
+    server.start();
+    EnvironmentUtils.envSetUp();
+    Class.forName(Config.JDBC_DRIVER_NAME);
+    insertSql();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    server.stop();
+    QPExecutorUtils.setLocalNodeAddr(localNode.getIp(), localNode.getPort());
+    EnvironmentUtils.cleanEnv();
+  }
+
+  @Test
+  public void countSumMeanTest() {
+    String[] retArray1 = new String[]{
+        "2,1,4.4,4.4",
+        "5,3,35.8,11.933333333333332",
+        "25,1,30.3,30.3",
+        "50,1,50.5,50.5",
+        "65,0,0.0,null",
+        "85,1,100.1,100.1",
+        "105,0,0.0,null",
+        "125,0,0.0,null",
+        "145,1,200.2,200.2",
+        "310,0,0.0,null"
+    };
+    String[] retArray2 = new String[]{
+        "2,2,7.7,3.85",
+        "5,3,35.8,11.933333333333332",
+        "25,1,30.3,30.3",
+        "50,1,50.5,50.5",
+        "65,0,0.0,null",
+        "85,1,100.1,100.1",
+        "105,0,0.0,null",
+        "125,0,0.0,null",
+        "145,1,200.2,200.2",
+        "310,0,0.0,null"
+    };
+    try (Connection connection = DriverManager.
+        getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root")) {
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute(
+          "select count(temperature), sum(temperature), mean(temperature) from "
+              + "root.ln.wf01.wt01 where time > 3 "
+              + "GROUP BY (20ms, 5,[2,30], [35,37], [50, 160], [310, 314])");
+
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet
+            .getString(count("root.ln.wf01.wt01.temperature")) + "," +
+            resultSet.getString(sum("root.ln.wf01.wt01.temperature")) + "," + resultSet
+            .getString(mean("root.ln.wf01.wt01.temperature"));
+        Assert.assertEquals(retArray1[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(retArray1.length, cnt);
+      statement.close();
+
+      statement = connection.createStatement();
+      hasResultSet = statement.execute(
+          "select count(temperature), sum(temperature), mean(temperature) from "
+              + "root.ln.wf01.wt01 where temperature > 3 "
+              + "GROUP BY (20ms, 5,[2,30], [35,37], [50, 160], [310, 314])");
+
+      Assert.assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet
+            .getString(count("root.ln.wf01.wt01.temperature")) + "," +
+            resultSet.getString(sum("root.ln.wf01.wt01.temperature")) + "," + resultSet
+            .getString(mean("root.ln.wf01.wt01.temperature"));
+        Assert.assertEquals(retArray2[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(retArray2.length, cnt);
+      statement.close();
+
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    }
+  }
+
+  @Test
+  public void maxMinValeTimeTest() {
+    String[] retArray1 = new String[]{
+        "2,4.4,4.4,4,4",
+        "5,20.2,5.5,20,5",
+        "25,30.3,30.3,30,30",
+        "50,50.5,50.5,50,50",
+        "65,null,null,null,null",
+        "85,100.1,100.1,100,100",
+        "105,null,null,null,null",
+        "125,null,null,null,null",
+        "145,200.2,200.2,150,150",
+        "310,null,null,null,null"
+    };
+    String[] retArray2 = new String[]{
+        "2,4.4,3.3,4,3",
+        "5,20.2,5.5,20,5",
+        "25,30.3,30.3,30,30",
+        "50,50.5,50.5,50,50",
+        "65,null,null,null,null",
+        "85,100.1,100.1,100,100",
+        "105,null,null,null,null",
+        "125,null,null,null,null",
+        "145,200.2,200.2,150,150",
+        "310,null,null,null,null"
+    };
+    try (Connection connection = DriverManager.
+        getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root")) {
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute(
+          "select max_value(temperature), min_value(temperature), max_time(temperature), "
+              + "min_time(temperature) from root.ln.wf01.wt01 where time > 3 "
+              + "GROUP BY (20ms, 5,[2,30], [35,37], [50, 160], [310, 314])");
+
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet
+            .getString(max_value("root.ln.wf01.wt01.temperature"))
+            + "," + resultSet.getString(min_value("root.ln.wf01.wt01.temperature")) + ","
+            + resultSet.getString(max_time("root.ln.wf01.wt01.temperature"))
+            + "," + resultSet.getString(min_time("root.ln.wf01.wt01.temperature"));
+        Assert.assertEquals(retArray1[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(retArray1.length, cnt);
+      statement.close();
+
+      statement = connection.createStatement();
+      hasResultSet = statement.execute(
+          "select max_value(temperature), min_value(temperature), max_time(temperature), "
+              + "min_time(temperature) from root.ln.wf01.wt01 where temperature > 3 "
+              + "GROUP BY (20ms, 5,[2,30], [35,37], [50, 160], [310, 314])");
+
+      Assert.assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet
+            .getString(max_value("root.ln.wf01.wt01.temperature"))
+            + "," + resultSet.getString(min_value("root.ln.wf01.wt01.temperature")) + ","
+            + resultSet.getString(max_time("root.ln.wf01.wt01.temperature"))
+            + "," + resultSet.getString(min_time("root.ln.wf01.wt01.temperature"));
+        Assert.assertEquals(retArray2[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(retArray2.length, cnt);
+      statement.close();
+
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    }
+  }
+
+  @Test
+  public void firstLastTest() {
+    String[] retArray1 = new String[]{
+        "2,4.4,4.4",
+        "5,20.2,5.5",
+        "25,30.3,30.3",
+        "50,50.5,50.5",
+        "65,null,null",
+        "85,100.1,100.1",
+        "105,null,null",
+        "125,null,null",
+        "145,200.2,200.2",
+        "310,null,null"
+    };
+    String[] retArray2 = new String[]{
+        "2,4.4,3.3",
+        "5,20.2,5.5",
+        "25,30.3,30.3",
+        "50,50.5,50.5",
+        "65,null,null",
+        "85,100.1,100.1",
+        "105,null,null",
+        "125,null,null",
+        "145,200.2,200.2",
+        "310,null,null"
+    };
+    try (Connection connection = DriverManager.
+        getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root")) {
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute(
+          "select last(temperature), first(temperature) from root.ln.wf01.wt01 where time > 3 "
+              + "GROUP BY (20ms, 5,[2,30], [35,37], [50, 160], [310, 314])");
+
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet
+            .getString(last("root.ln.wf01.wt01.temperature"))
+            + "," + resultSet.getString(first("root.ln.wf01.wt01.temperature"));
+        Assert.assertEquals(retArray1[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(retArray1.length, cnt);
+      statement.close();
+
+      statement = connection.createStatement();
+      hasResultSet = statement.execute(
+          "select first(temperature), last(temperature) from root.ln.wf01.wt01 "
+              + "where temperature > 3 "
+              + "GROUP BY (20ms, 5,[2,30], [35,37], [50, 160], [310, 314])");
+
+      Assert.assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet
+            .getString(last("root.ln.wf01.wt01.temperature"))
+            + "," + resultSet.getString(first("root.ln.wf01.wt01.temperature"));
+        Assert.assertEquals(retArray2[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(retArray2.length, cnt);
+      statement.close();
+
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    }
+  }
+
+  @Test
+  public void largeIntervalTest() {
+    String[] retArray1 = new String[]{
+        "2,4.4,4,20,4",
+        "30,30.3,16,610,30",
+        "620,500.5,1,620,620"
+    };
+    String[] retArray2 = new String[]{
+        "2,3.3,5,20,3",
+        "30,30.3,16,610,30",
+        "620,500.5,1,620,620"
+    };
+    try (Connection connection = DriverManager.
+        getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root")) {
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute(
+          "select min_value(temperature), count(temperature), max_time(temperature), "
+              + "min_time(temperature) from root.ln.wf01.wt01 where time > 3 GROUP BY "
+              + "(590ms, 30, [2, 30], [30, 120], [100, 120], [123, 125], [155, 550], [540, 680])");
+
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet
+            .getString(min_value("root.ln.wf01.wt01.temperature"))
+            + "," + resultSet.getString(count("root.ln.wf01.wt01.temperature")) + "," +
+            resultSet.getString(max_time("root.ln.wf01.wt01.temperature"))
+            + "," + resultSet.getString(min_time("root.ln.wf01.wt01.temperature"));
+        Assert.assertEquals(retArray1[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(retArray1.length, cnt);
+      statement.close();
+
+      statement = connection.createStatement();
+      hasResultSet = statement.execute(
+          "select min_value(temperature), count (temperature), max_time(temperature), "
+              + "min_time(temperature) from root.ln.wf01.wt01 where temperature > 3 GROUP BY "
+              + "(590ms, 30, [2, 30], [30, 120], [100, 120], [123, 125], [155, 550],[540, 680])");
+
+      Assert.assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet
+            .getString(min_value("root.ln.wf01.wt01.temperature"))
+            + "," + resultSet.getString(count("root.ln.wf01.wt01.temperature")) + ","
+            + resultSet.getString(max_time("root.ln.wf01.wt01.temperature"))
+            + "," + resultSet.getString(min_time("root.ln.wf01.wt01.temperature"));
+        Assert.assertEquals(retArray2[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(retArray2.length, cnt);
+      statement.close();
+
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    }
+  }
+
+  @Test
+  public void smallPartitionTest() {
+    String[] retArray1 = new String[]{
+        "50,100.1,50.5,150.6",
+        "615,500.5,500.5,500.5"
+
+    };
+    String[] retArray2 = new String[]{
+        "50,100.1,50.5,150.6",
+        "585,null,null,0.0",
+        "590,500.5,200.2,700.7"
+    };
+    try (Connection connection = DriverManager.
+        getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root")) {
... 19050 lines suppressed ...