You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by lt...@apache.org on 2019/07/22 04:39:10 UTC

[incubator-iotdb] branch cluster updated (d969533 -> 62be186)

This is an automated email from the ASF dual-hosted git repository.

lta pushed a change to branch cluster
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git.


 discard d969533  add trim of nodes address when initialization
 discard 539e130  Merge branch 'cluster' of github.com:apache/incubator-iotdb into cluster
 discard d644533  modify iotdb-cluster property
 discard 5c0cc58  modify iotdb-cluster property
 discard 6b94837  fix sonar issues
 discard db7d992  add error statement
 discard b4c70a6  fix a serve bug of set consistency level
 discard b2c61a0  Merge remote-tracking branch 'origin/master' into cluster
    omit 8a642a4  Merge pull request #193 from apache/f_construct_path
    omit a7c0221  simplify Path construction
    omit 6f40c05  Merge pull request #186 from apache/fix_missing_wal_node
 discard 0089e73  Merge t pushbranch 'cluster' of github.com:apache/incubator-iotdb into cluster
 discard 0c8c17a  merge master'
    omit 5c15ac3  Organize properties (#189)
    omit e2fb430  fix that ignoreTimeStamp in response is incorrectly set and used (#191)
 discard 6e4b783  Merge remote-tracking branch 'origin/cluster' into cluster
 discard 873724f  sort ip for Status
 discard 27252b1  Merge branch 'cluster' of github.com:apache/incubator-iotdb into cluster
 discard ca9e9e0  modify set read consistency level
 discard 57e6b58  merge master
    omit 87ad8ab  add init method
 discard fb6847a  update StorageGroup to query all storage groups
    omit 1108632  organize properties
    omit 39e82b2  organize properties
    omit 343c7bd  [IOTDB-108]Fix mistakes in doc (#187)
    omit 5cc46a9  [IOTDB-96]Refactor author query (#179)
    omit 68844b2  fix missing wal log node after recovery
 discard ffc9d89  update
 discard 3f7af57  update engine properties
 discard 130bcff  update engine properties
 discard f57d49f  merge master
 discard 305906a  fix a bug of aggre query and groupby query
 discard 1af785f  fix bug
 discard 20b05ae  fix a bug of read data
 discard 4916e26  update logic of trying all nodes based on update of NodeAsClient
    omit 000891f  fix bug where IOTDB_HOME is misused (#184)
 discard cff0544  Merge remote-tracking branch 'origin/cluster' into cluster
 discard 3964bcd  update RaftUtil
    omit 2fbd949  [IOTDB-95]keep stack trace when logging or throwing an Exception (#180)
 discard 8cb0bb5  modify for raft node as client
    omit 84bdd5a  [IOTDB-100]Fix return error message while executing sum aggregation query (#181)
    omit 5b26774  modify error statement in doc (#182)
 discard 9b152fb  modify test log from info to error
 discard 7394372  reformat code
 discard e4f07e2  fix init of raft node as manager
    omit 51f91b6  fix lots of conflicts with cluster_framework
    omit 41a49ad  add cluster_framework
    omit 411ea77  remove groupIdMapNodeCache
    omit 2ac9056  format codes
    omit 7ca00da  fix  some bug
    omit 61e8452  remove useless code
    omit 2a30289  Merge branch 'cluster' of github.com:apache/incubator-iotdb into cluster
    omit 626f2a3  add group by it and add ClusterNullableBatach data to handle null timevalue pair
    omit 39c8865  fix non-query bug: fail to execute when leader down
    omit 0d4a985  Merge branch 'cluster' of github.com:apache/incubator-iotdb into cluster
    omit d10c2f1  remove useless code
    omit 78f7800  fix a serve bug, concurrent hashmap modification
    omit 2bb0a04  add group by features
    omit 99ffb6b  Merge remote-tracking branch 'origin/cluster' into cluster
    omit ce845f2  make nodetool get accurate leader of data group
    omit 45fece1  remove syso
    omit ba0d647  remove system.out
    omit 3577857  fix a bug
    omit 20c93d1  fix a serve bug
    omit 6a733d9  fix a serve bug of set readmetadata level
    omit d079f5e  Merge branch 'cluster' into cluster_fill_aggre_groupby
    omit 9487dbf  add it test of aggregation function
    omit 618cc39  improve robustness of query metadata
    omit 3ee37c3  fix some error bugs: add select series group entity, add query for all nodes features
    omit 3020603  Increase the function of query polling
    omit 4abde09  add aggregation query it
    omit 8fc0c7e  fix unstable unit test and polish ReadOnlyTsFile query logic (#177)
    omit aa3aa0d  fix a serve bug of filter serializable
    omit 2a02dbe  implement Status of NodeTool
    omit fe9937a  add aggre feature without timegenerator
    omit a4ec93c  update Query of NodeTool
    omit 167a30e  add fill feature
    omit 6ddf6fd  merge master
    omit e7886ca  Refactor iotdb code for cluster (#172)
    omit 77a9bd3  fix sonar issues
    omit 0043172  Update README.md
    omit 451e87d  change print to log (#176)
    omit f8754f9  add the print of information about chunkgroups for debug (#175)
    omit 0e1135d  fix sonar list
    omit 29f8eea  modify close clusterService
    omit ec72e4e  Merge pull request #173 from LeiRui/master
    omit bf62685  Update QueryConstant.java
    omit b1b35f6  fix bug about BOOLEAN
    omit f3d5167  fix some issues
    omit 7f41085  Cluster nodetool (#154)
    omit 3568b32  [IOTDB-64] TsFile-Spark-Connector update (#108)
    omit 1b9911e  merge master
    omit 4f554c7  Refactor query resource count (#168)
    omit 7675fc5  [IOTDB-84]fix in proper hashcode methods in processors (#169)
    omit 6d0f3d2  Cluster read (#152)
    omit 1f13d9f   Fix a clerical error in console display when run 'start-server.sh' (#171)
    omit 0fb2a96  Merge pull request #111 from apache/fix_faster_release_mem
    omit 7c88b7d  [IOTDB-58] Replace list by array and refactor TsFileResource (#163)
    omit 4f6aa05  Add disabled mem control & improve memory efficiency (#149)
    omit bb9b958  [IOTDB-76] Reformat MManager.getMetadataInString() in JSON format (#132)
    omit cfb16b2  close MemMonitor in  EnvironmentUtil.cleanEnv() (#164)
    omit 30b36b7  Merge pull request #167 from xiaoyekanren/travis_jdk8_win
    omit 6c7b851  [IOTDB-74]fix that the damaged log will be skipped if it is the only log (#166)
    omit 1b50cd0  supporting travis + win +jdk8
    omit 519e398  [IOTDB-83] Add process bar for import script and show how many rows have been exported (#165)
    omit 4cf0920  Merge pull request #162 from apache/print_asf_rat_lost_on_console
    omit f888966  print apache-rat violation result on console
    omit a8ffbd9  set parsing incorrect cmd as tracing level log in ParseDriver; set TsserviceImple failed executing as info leve log (#153)
    omit 67647fa  release memory asap in ReadOnlyMemChunk
     new 7299350  Cluster read (#152)
     new 62be186  fix some read and write issues

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (d969533)
            \
             N -- N -- N   refs/heads/cluster (62be186)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:


[incubator-iotdb] 02/02: fix some read and write issues

Posted by lt...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

lta pushed a commit to branch cluster
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git

commit 62be1866bd5a98455e58fa08664efa4ec430614e
Author: lta <li...@163.com>
AuthorDate: Mon Jul 22 12:22:16 2019 +0800

    fix some read and write issues
---
 .travis.yml                                        |  40 +-
 License                                            |   1 +
 README.md                                          |   9 +
 cluster/pom.xml                                    |   5 +
 .../iotdb/cluster/concurrent/ThreadName.java       |   5 +
 ...Manager.java => NodeAsClientThreadManager.java} |  20 +-
 ...QPTaskManager.java => QPTaskThreadManager.java} |  14 +-
 ...erManager.java => QueryTimerThreadManager.java} |  10 +-
 .../cluster/concurrent/pool/ThreadPoolManager.java |  15 +-
 .../apache/iotdb/cluster/config/ClusterConfig.java |  42 +-
 .../ClusterConsistencyLevel.java}                  |  30 +-
 .../iotdb/cluster/config/ClusterConstant.java      |   8 +-
 .../iotdb/cluster/config/ClusterDescriptor.java    |  43 +-
 .../org/apache/iotdb/cluster/entity/Server.java    |  48 +-
 .../cluster/entity/raft/DataStateMachine.java      |  20 +-
 .../cluster/entity/raft/MetadataStateManchine.java |   2 +-
 .../iotdb/cluster/entity/raft/RaftService.java     |  20 +-
 .../cluster/qp/executor/AbstractQPExecutor.java    | 117 +++-
 .../qp/executor/ClusterQueryProcessExecutor.java   |  18 +-
 .../cluster/qp/executor/NonQueryExecutor.java      |  84 +--
 .../cluster/qp/executor/QueryMetadataExecutor.java | 200 ++++--
 .../apache/iotdb/cluster/qp/task/BatchQPTask.java  | 116 ++--
 .../iotdb/cluster/qp/task/DataQueryTask.java       |  12 +-
 .../org/apache/iotdb/cluster/qp/task/QPTask.java   |  54 +-
 .../apache/iotdb/cluster/qp/task/SingleQPTask.java |  10 +-
 .../query/common/ClusterNullableBatchData.java     |  79 +++
 .../dataset/ClusterDataSetWithTimeGenerator.java   |   5 +-
 .../ClusterGroupByDataSetWithOnlyTimeFilter.java   | 159 +++++
 .../ClusterGroupByDataSetWithTimeGenerator.java    | 112 +--
 .../executor/ClusterAggregateEngineExecutor.java   | 249 +++++++
 .../executor/ClusterExecutorWithTimeGenerator.java |  38 +-
 .../ClusterExecutorWithoutTimeGenerator.java       |  25 +-
 .../query/executor/ClusterFillEngineExecutor.java  |  75 +-
 .../cluster/query/executor/ClusterQueryRouter.java | 111 ++-
 .../query/factory/ClusterSeriesReaderFactory.java  |  31 +-
 .../coordinatornode/ClusterRpcQueryManager.java    |  14 +-
 .../ClusterRpcSingleQueryManager.java              | 245 +++----
 ...oupEntity.java => FilterSeriesGroupEntity.java} |   4 +-
 .../coordinatornode/IClusterRpcQueryManager.java   |   5 +
 .../IClusterRpcSingleQueryManager.java             |  11 +-
 ...oupEntity.java => SelectSeriesGroupEntity.java} |  56 +-
 .../querynode/ClusterLocalQueryManager.java        |  14 +-
 .../querynode/ClusterLocalSingleQueryManager.java  | 273 ++++++--
 .../querynode/IClusterLocalQueryManager.java       |   9 +-
 .../querynode/IClusterLocalSingleQueryManager.java |   5 +-
 .../AbstractClusterPointReader.java                |   7 +-
 .../coordinatornode/ClusterFilterSeriesReader.java |  19 +-
 .../coordinatornode/ClusterSelectSeriesReader.java |  25 +-
 ...ava => ClusterFillSelectSeriesBatchReader.java} |  29 +-
 ...a => ClusterFilterSeriesBatchReaderEntity.java} |  15 +-
 ...lusterGroupBySelectSeriesBatchReaderEntity.java |  84 +++
 ...or.java => ClusterSelectSeriesBatchReader.java} |  14 +-
 ...ClusterSelectSeriesBatchReaderByTimestamp.java} |   7 +-
 ...a => ClusterSelectSeriesBatchReaderEntity.java} |  41 +-
 ...r.java => IClusterSelectSeriesBatchReader.java} |   4 +-
 ...r.java => IClusterSeriesBatchReaderEntity.java} |   6 +-
 .../timegenerator/ClusterNodeConstructor.java      |   4 +-
 .../cluster/query/utils/ClusterRpcReaderUtils.java | 105 ++-
 .../query/utils/ClusterTimeValuePairUtils.java     |  70 ++
 .../iotdb/cluster/query/utils/ExpressionUtils.java |  14 +-
 .../query/utils/QueryPlanPartitionUtils.java       | 224 ++++--
 .../iotdb/cluster/rpc/raft/NodeAsClient.java       |  14 +-
 .../rpc/raft/impl/RaftNodeAsClientManager.java     | 197 ++----
 ...ocessor.java => QueryMetricAsyncProcessor.java} |  26 +-
 .../nonquery/DataGroupNonQueryAsyncProcessor.java  |  10 +-
 .../nonquery/MetaGroupNonQueryAsyncProcessor.java  |   2 +-
 .../querydata/InitSeriesReaderSyncProcessor.java   |   9 +-
 .../querydata/QuerySeriesDataSyncProcessor.java    |   1 -
 .../querymetadata/QueryMetadataAsyncProcessor.java |   6 +-
 .../QueryMetadataInStringAsyncProcessor.java       |   4 +-
 .../querymetadata/QueryPathsAsyncProcessor.java    |   4 +-
 .../QuerySeriesTypeAsyncProcessor.java             |   4 +-
 .../QueryTimeSeriesAsyncProcessor.java             |   4 +-
 .../QueryJobNumAsyncProcessor.java}                |  27 +-
 .../QueryLeaderAsyncProcessor.java}                |  27 +-
 .../querymetric/QueryMetricAsyncProcessor.java     |  45 ++
 .../QueryStatusAsyncProcessor.java}                |  26 +-
 .../rpc/raft/request/BasicNonQueryRequest.java     |   1 -
 .../rpc/raft/request/QueryMetricRequest.java       |  21 +-
 .../request/querydata/InitSeriesReaderRequest.java |  72 +-
 .../QuerySeriesDataByTimestampRequest.java         |  17 +-
 .../request/querydata/QuerySeriesDataRequest.java  |  16 +-
 .../request/querymetric/QueryJobNumRequest.java}   |  25 +-
 .../request/querymetric/QueryLeaderRequest.java}   |  25 +-
 .../request/querymetric/QueryMetricRequest.java}   |  28 +-
 .../request/querymetric/QueryStatusRequest.java}   |  27 +-
 .../rpc/raft/response/QueryMetricResponse.java     |  29 +-
 .../nonquery/DataGroupNonQueryResponse.java        |  12 +
 .../QueryJobNumResponse.java}                      |  28 +-
 .../QueryLeaderResponse.java}                      |  28 +-
 .../QueryMetricResponse.java}                      |  28 +-
 .../response/querymetric/QueryStatusResponse.java} |  33 +-
 .../iotdb/cluster/service/ClusterMonitor.java      | 124 ++++
 .../iotdb/cluster/service/ClusterMonitorMBean.java |  94 +++
 .../cluster/service/TSServiceClusterImpl.java      | 102 ++-
 .../iotdb/cluster/service/nodetool/Host.java       |  84 +++
 .../apache/iotdb/cluster/service/nodetool/Lag.java |  33 +-
 .../iotdb/cluster/service/nodetool/NodeTool.java   | 148 ++++
 .../iotdb/cluster/service/nodetool/Query.java      |  47 ++
 .../iotdb/cluster/service/nodetool/Ring.java       |  28 +-
 .../iotdb/cluster/service/nodetool/Status.java     |  36 +-
 .../cluster/service/nodetool/StorageGroup.java     |  54 ++
 .../iotdb/cluster/utils/QPExecutorUtils.java       |  23 +-
 .../org/apache/iotdb/cluster/utils/RaftUtils.java  | 545 +++++++++++++--
 .../iotdb/cluster/utils/hash/PhysicalNode.java     |  14 +
 .../apache/iotdb/cluster/utils/hash/Router.java    |  48 +-
 .../iotdb/cluster/utils/hash/VirtualNode.java      |  19 +-
 ...nagerTest.java => QPTaskThreadManagerTest.java} |  16 +-
 .../cluster/config/ClusterDescriptorTest.java      |  18 +-
 .../apache/iotdb/cluster/integration/Constant.java | 100 +++
 .../iotdb/cluster/integration/IOTDBGroupByIT.java  | 490 +++++++++++++
 .../cluster}/integration/IoTDBAggregationIT.java   | 157 ++++-
 .../integration/IoTDBAggregationLargeDataIT.java   | 124 ++--
 .../integration/IoTDBAggregationSmallDataIT.java   | 641 +++++++++++++++++
 .../cluster/integration/IoTDBFillQueryIT.java      | 357 ++++++++++
 .../integration/IoTDBMetadataFetchAbstract.java    |  63 +-
 .../integration/IoTDBMetadataFetchRemoteIT.java    |   5 +-
 .../IoTDBQueryIT.java}                             |   5 +-
 .../IoTDBQueryLargeDataIT.java}                    |   6 +-
 .../iotdb/cluster/qp/AbstractQPExecutorTest.java   |  91 ++-
 .../cluster/qp/executor/NonQueryExecutorTest.java  |  23 +-
 .../query/manager/ClusterLocalManagerTest.java     | 136 ++--
 .../query/manager/ClusterRpcManagerTest.java       |  46 +-
 .../cluster/query/utils/ExpressionUtilsTest.java   |  17 +-
 .../query/utils/QueryPlanPartitionUtilsTest.java   |  60 +-
 .../apache/iotdb/cluster/utils/RaftUtilsTest.java  |  79 ++-
 .../java/org/apache/iotdb/cluster/utils/Utils.java |   4 +
 .../iotdb/cluster/utils/hash/MD5HashTest.java      |   8 +-
 .../iotdb/cluster/utils/hash/PhysicalNodeTest.java |   4 +-
 .../iotdb/cluster/utils/hash/RouterTest.java       |  27 +-
 cluster/src/test/resources/logback.xml             |   2 +-
 docs/Documentation/QuickStart.md                   |  84 ++-
 .../UserGuideV0.7.0/4-Deployment and Management.md |  71 +-
 .../UserGuideV0.7.0/5-SQL Documentation.md         |  17 +-
 .../UserGuideV0.7.0/7-Tools-NodeTool.md            | 356 ++++++++++
 .../Documentation/UserGuideV0.7.0/7-Tools-spark.md | 286 ++++----
 .../iotdb/tsfile/hadoop/TSFRecordWriter.java       |  11 +-
 .../iotdb/tsfile/hadoop/example/TsFileHelper.java  |   6 +-
 .../cn/edu/thu/tsfile/hadoop/TsFileTestHelper.java |   6 +-
 iotdb-cli/cli/bin/export-csv.bat                   |  10 +-
 iotdb-cli/cli/bin/export-csv.sh                    |   8 +-
 iotdb-cli/cli/bin/import-csv.bat                   |  10 +-
 iotdb-cli/cli/bin/import-csv.sh                    |   8 +-
 iotdb-cli/cli/bin/start-client.bat                 |   8 +-
 iotdb-cli/cli/bin/start-client.sh                  |   6 +-
 iotdb-cli/pom.xml                                  |   7 +-
 .../apache/iotdb/cli/client/AbstractClient.java    |   4 +
 .../java/org/apache/iotdb/cli/tool/ExportCsv.java  |  24 +-
 .../java/org/apache/iotdb/cli/tool/ImportCsv.java  |  44 +-
 .../iotdb/bin/nodetool.bat                         |   9 +-
 clean.sh => iotdb/iotdb/bin/nodetool.sh            |  46 +-
 iotdb/iotdb/conf/iotdb-cluster.properties          |  29 +-
 iotdb/iotdb/conf/iotdb-engine.properties           | 125 ++--
 iotdb/iotdb/conf/iotdb-env.sh                      |   2 +-
 iotdb/iotdb/conf/iotdb-sync-client.properties      |   8 +-
 .../org/apache/iotdb/db/sql/parse/TSParser.g       |   8 +-
 .../iotdb/db/auth/authorizer/BasicAuthorizer.java  |   3 +-
 .../apache/iotdb/db/auth/entity/PathPrivilege.java |   1 -
 .../org/apache/iotdb/db/conf/IoTDBConstant.java    |   4 +
 .../org/apache/iotdb/db/conf/IoTDBDescriptor.java  |  10 +-
 .../java/org/apache/iotdb/db/engine/Processor.java |   2 +-
 .../engine/bufferwrite/BufferWriteProcessor.java   | 170 +++--
 .../bufferwrite/RestorableTsFileIOWriter.java      |  19 +-
 .../iotdb/db/engine/filenode/FileNodeManager.java  |  52 +-
 .../db/engine/filenode/FileNodeProcessor.java      | 173 +++--
 .../iotdb/db/engine/filenode/TsFileResource.java   | 216 +++---
 .../db/engine/memcontrol/BasicMemController.java   |   6 +-
 .../engine/memcontrol/DisabledMemController.java   |  41 +-
 .../iotdb/db/engine/memtable/AbstractMemTable.java |  14 +-
 .../apache/iotdb/db/engine/memtable/IMemTable.java |   7 +-
 .../db/engine/memtable/IWritableMemChunk.java      |   2 +
 .../db/engine/memtable/MemTableFlushUtil.java      |  10 +-
 .../iotdb/db/engine/memtable/WritableMemChunk.java |  27 +-
 .../io/LocalTextModificationAccessor.java          |   1 +
 .../db/engine/overflow/io/OverflowProcessor.java   | 123 ++--
 .../overflow/io/OverflowedTsFileIOWriter.java      |  25 +-
 .../db/engine/querycontext/ReadOnlyMemChunk.java   |   2 +
 .../exception/BufferWriteProcessorException.java   |   2 +-
 .../db/exception/FileNodeProcessorException.java   |   6 +-
 .../db/exception/OverflowProcessorException.java   |   2 +-
 .../db/exception/qp/IllegalASTFormatException.java |   7 +
 .../db/exception/qp/LogicalOperatorException.java  |   7 +
 .../db/exception/qp/LogicalOptimizeException.java  |   7 +
 .../db/exception/qp/QueryProcessorException.java   |   4 +
 .../java/org/apache/iotdb/db/metadata/MGraph.java  |  15 +-
 .../org/apache/iotdb/db/metadata/MManager.java     |  16 +-
 .../java/org/apache/iotdb/db/metadata/MTree.java   |  11 +-
 .../org/apache/iotdb/db/metadata/Metadata.java     | 102 +--
 .../org/apache/iotdb/db/monitor/StatMonitor.java   |   1 -
 .../org/apache/iotdb/db/qp/QueryProcessor.java     |   2 +-
 .../db/qp/executor/IQueryProcessExecutor.java      |  15 +-
 .../iotdb/db/qp/executor/OverflowQPExecutor.java   | 281 +++++---
 .../iotdb/db/qp/executor/QueryProcessExecutor.java |  24 +-
 .../db/qp/logical/crud/BasicFunctionOperator.java  |   2 +-
 .../iotdb/db/qp/logical/crud/InsertOperator.java   |  12 +-
 .../apache/iotdb/db/qp/physical/PhysicalPlan.java  |   4 +
 .../iotdb/db/qp/physical/crud/InsertPlan.java      |  27 +-
 .../iotdb/db/qp/physical/sys/AuthorPlan.java       |   6 +
 .../db/qp/physical/transfer/CodecInstances.java    |  18 +-
 .../iotdb/db/qp/strategy/LogicalGenerator.java     |   8 +-
 .../iotdb/db/qp/strategy/PhysicalGenerator.java    |   2 +-
 .../qp/strategy/optimizer/ConcatPathOptimizer.java |  12 +-
 .../db/query/aggregation/AggregateFunction.java    |   1 -
 .../db/query/aggregation/impl/MeanAggrFunc.java    |  13 +-
 .../db/query/aggregation/impl/SumAggrFunc.java     |  10 +
 .../db/query/control/QueryResourceManager.java     |   6 +-
 .../SumAggrFunc.java => dataset/AuthDataSet.java}  |  34 +-
 .../dataset/groupby/GroupByEngineDataSet.java      |   5 +-
 .../groupby/GroupByWithOnlyTimeFilterDataSet.java  |   7 +-
 .../groupby/GroupByWithValueFilterDataSet.java     |  20 +-
 ...a => AbstractExecutorWithoutTimeGenerator.java} |   6 +-
 .../db/query/executor/AbstractQueryRouter.java     | 120 ++++
 .../db/query/executor/AggregateEngineExecutor.java |  85 ++-
 .../EngineExecutorWithoutTimeGenerator.java        |   5 +-
 .../iotdb/db/query/executor/EngineQueryRouter.java |  47 +-
 .../db/query/executor/FillEngineExecutor.java      |  11 +-
 .../db/query/executor/IFillEngineExecutor.java     |  22 +-
 .../java/org/apache/iotdb/db/query/fill/IFill.java |  14 +-
 .../org/apache/iotdb/db/query/fill/LinearFill.java |   4 +-
 .../apache/iotdb/db/query/fill/PreviousFill.java   |   6 +-
 .../timegenerator/AbstractNodeConstructor.java     |   4 +
 .../apache/iotdb/db/service/CloseMergeService.java |   6 +-
 .../java/org/apache/iotdb/db/service/IoTDB.java    |   9 +-
 .../org/apache/iotdb/db/service/JDBCService.java   |   9 +-
 .../org/apache/iotdb/db/service/JMXService.java    |   5 +-
 .../java/org/apache/iotdb/db/service/Monitor.java  |   3 +-
 .../apache/iotdb/db/service/RegisterManager.java   |   2 +-
 .../org/apache/iotdb/db/service/ServiceType.java   |   1 +
 .../org/apache/iotdb/db/service/TSServiceImpl.java | 270 +++++---
 .../org/apache/iotdb/db/sql/parse/ParseDriver.java |   2 +-
 .../org/apache/iotdb/db/sync/conf/Constans.java    |  10 +
 .../iotdb/db/sync/conf/SyncSenderConfig.java       |  24 +-
 .../iotdb/db/sync/conf/SyncSenderDescriptor.java   |   7 +-
 .../iotdb/db/sync/receiver/SyncServiceImpl.java    |  91 +--
 .../apache/iotdb/db/sync/sender/SyncSender.java    |  10 +
 .../iotdb/db/sync/sender/SyncSenderImpl.java       | 106 ++-
 .../java/org/apache/iotdb/db/tools/WalChecker.java |  11 +-
 .../java/org/apache/iotdb/db/utils/MemUtils.java   |  24 +
 .../org/apache/iotdb/db/utils/RecordUtils.java     |   2 +-
 .../apache/iotdb/db/writelog/io/RAFLogReader.java  |   3 +-
 .../writelog/manager/MultiFileLogNodeManager.java  |  10 +-
 .../db/writelog/manager/WriteLogNodeManager.java   |   6 +-
 .../db/writelog/node/ExclusiveWriteLogNode.java    |   2 +-
 .../recover/ExclusiveLogRecoverPerformer.java      |  11 +-
 .../db/writelog/replay/ConcreteLogReplayer.java    |  14 +-
 .../bufferwrite/BufferWriteProcessorNewTest.java   |   1 -
 .../db/engine/filenode/TsFileResourceTest.java     |  12 +-
 .../engine/overflow/io/OverflowProcessorTest.java  |   2 +
 .../engine/overflow/io/OverflowResourceTest.java   |  24 +-
 .../iotdb/db/integration/IoTDBAggregationIT.java   |  86 ++-
 .../integration/IoTDBAggregationLargeDataIT.java   |   2 -
 .../iotdb/db/integration/IoTDBAuthorizationIT.java | 416 +++++------
 .../transfer/PhysicalPlanLogTransferTest.java      |   2 +-
 .../apache/iotdb/db/qp/utils/MemIntQpExecutor.java |  10 +-
 .../org/apache/iotdb/db/tools/WalCheckerTest.java  |  34 +-
 .../apache/iotdb/db/utils/EnvironmentUtils.java    |  10 +-
 .../apache/iotdb/db/writelog/PerformanceTest.java  |  14 +-
 .../org/apache/iotdb/db/writelog/RecoverTest.java  |  12 +-
 .../iotdb/db/writelog/WriteLogNodeManagerTest.java |   8 +-
 .../apache/iotdb/db/writelog/WriteLogNodeTest.java |  25 +-
 .../iotdb/db/writelog/io/LogWriterReaderTest.java  |   8 +-
 jdbc/README.md                                     |   4 +-
 .../org/apache/iotdb/jdbc/IoTDBConnection.java     |   6 +-
 .../org/apache/iotdb/jdbc/IoTDBQueryResultSet.java |   9 +
 .../java/org/apache/iotdb/jdbc/IoTDBStatement.java |  15 +-
 pom.xml                                            |   5 +-
 service-rpc/src/main/thrift/rpc.thrift             |   3 +-
 spark/README.md                                    | 407 +++++++----
 spark/pom.xml                                      |   7 +-
 .../org/apache/iotdb/tsfile/io/CreateTSFile.java   | 150 ----
 .../java/org/apache/iotdb/tsfile/io/HDFSInput.java | 147 ++++
 .../apache/iotdb/tsfile/io/HDFSInputStream.java    | 111 ---
 .../io/{HDFSOutputStream.java => HDFSOutput.java}  |  52 +-
 .../apache/iotdb/tsfile/io/TsFileOutputFormat.java |  10 +-
 .../apache/iotdb/tsfile/io/TsFileRecordWriter.java |  19 +-
 .../java/org/apache/iotdb/tsfile/qp/Executor.java  |  51 --
 .../org/apache/iotdb/tsfile/qp/QueryProcessor.java | 153 -----
 .../iotdb/tsfile/qp/common/BasicOperator.java      |  75 --
 .../iotdb/tsfile/qp/common/FilterOperator.java     | 157 -----
 .../apache/iotdb/tsfile/qp/common/SQLConstant.java | 150 ----
 .../apache/iotdb/tsfile/qp/common/SingleQuery.java |  63 --
 .../apache/iotdb/tsfile/qp/common/TSQueryPlan.java |  63 --
 .../tsfile/qp/exception/DNFOptimizeException.java  |  34 -
 .../qp/exception/LogicalOptimizeException.java     |  33 -
 .../tsfile/qp/exception/MergeFilterException.java  |  30 -
 .../tsfile/qp/exception/RemoveNotException.java    |  34 -
 .../tsfile/qp/optimizer/DNFFilterOptimizer.java    | 157 -----
 .../tsfile/qp/optimizer/IFilterOptimizer.java      |  34 -
 .../qp/optimizer/MergeSingleFilterOptimizer.java   | 141 ----
 .../tsfile/qp/optimizer/PhysicalOptimizer.java     | 228 ------
 .../tsfile/qp/optimizer/RemoveNotOptimizer.java    | 108 ---
 .../scala/org/apache/iotdb/tsfile/Converter.scala  | 764 ++++++++++++---------
 .../org/apache/iotdb/tsfile/DefaultSource.scala    | 177 ++---
 .../apache/iotdb/tsfile/TsFileOutputWriter.scala   |  52 +-
 .../apache/iotdb/tsfile/TsFileWriterFactory.scala  |  42 +-
 .../scala/org/apache/iotdb/tsfile/package.scala    |  36 +-
 spark/src/test/resources/test.tsfile               | Bin 1406 -> 0 bytes
 .../cn/edu/tsinghua/tsfile/ConverterTest.scala     | 130 ----
 .../scala/cn/edu/tsinghua/tsfile/TSFileSuit.scala  | 194 ------
 .../scala/org/apache/iotdb/tool/TsFileExample.java | 106 +++
 .../scala/org/apache/iotdb/tool/TsFileWrite.java   | 215 ++++++
 .../org/apache/iotdb/tsfile/ConverterTest.scala    | 266 +++++++
 .../org/apache/iotdb/tsfile/HDFSInputTest.java     |  79 +++
 .../scala/org/apache/iotdb/tsfile/TSFileSuit.scala | 217 ++++++
 .../apache/iotdb/tsfile/TsFileSequenceRead.java    |   4 +-
 .../tsfile/common/constant/QueryConstant.java      |  11 +-
 .../tsfile/exception/write/PageException.java      |   8 +
 .../exception/write/WriteProcessException.java     |  16 +-
 .../apache/iotdb/tsfile/read/ReadOnlyTsFile.java   |  19 +-
 .../iotdb/tsfile/read/TsFileSequenceReader.java    |  15 +-
 .../apache/iotdb/tsfile/read/common/BatchData.java |   4 +-
 .../org/apache/iotdb/tsfile/read/common/Path.java  |  11 +-
 .../apache/iotdb/tsfile/read/common/TimeRange.java | 328 +++++++++
 .../tsfile/read/controller/MetadataQuerier.java    |  15 +
 .../read/controller/MetadataQuerierByFileImpl.java | 159 +++--
 .../tsfile/read/expression/ExpressionType.java     |  29 +-
 .../query/executor/ExecutorWithTimeGenerator.java  |  33 +-
 .../tsfile/read/query/executor/TsFileExecutor.java |  52 +-
 .../apache/iotdb/tsfile/write/TsFileWriter.java    |  12 +
 .../iotdb/tsfile/write/chunk/ChunkBuffer.java      |   7 +-
 .../iotdb/tsfile/write/chunk/ChunkWriterImpl.java  |   7 +-
 .../iotdb/tsfile/write/writer/TsFileIOWriter.java  |  11 +
 .../iotdb/tsfile/read/ReadInPartitionTest.java     | 240 +++++++
 .../org/apache/iotdb/tsfile/read/ReadTest.java     |   2 +-
 .../iotdb/tsfile/read/common/TimeRangeTest.java    | 265 +++++++
 .../controller/MetadataQuerierByFileImplTest.java  | 137 +++-
 .../iotdb/tsfile/write/TsFileReadWriteTest.java    |   4 +-
 327 files changed, 11903 insertions(+), 6443 deletions(-)

diff --git a/.travis.yml b/.travis.yml
index 077d8ab..6202bf5 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -31,6 +31,11 @@ matrix:
     - os: osx
       osx_image: xcode10.1 # with JDK11.0.1+13 installed
       name: osx-oraclejdk11
+      script:
+        - java -version
+        - mvn -version
+        - mvn -B apache-rat:check
+        - mvn -B clean package -pl iotdb,grafana,iotdb-cli,example,:kafka-example,:rocketmq-example -am integration-test
     - os: osx
       osx_image: xcode9.3  # with JDK1.8.0_112-b16 installed
       name: osx-oraclejdk8
@@ -44,6 +49,11 @@ matrix:
             - AdoptOpenJDK/openjdk
           update: true
           casks: adoptopenjdk-openjdk11
+      script:
+        - java -version
+        - mvn -version
+        - mvn -B apache-rat:check
+        - mvn -B clean package -pl iotdb,grafana,iotdb-cli,example,:kafka-example,:rocketmq-example -am integration-test
     - os: osx
       osx_image: xcode9.3  # with JDK1.8.0_112-b16 installed
       name: osx-openjdk8
@@ -54,7 +64,6 @@ matrix:
            - AdoptOpenJDK/openjdk
           update: true
           casks: adoptopenjdk-openjdk8
-
     - os: windows
       language: c
       name: win-oraclejdk11
@@ -62,8 +71,7 @@ matrix:
         - choco install jdk11 -params 'installdir=c:\\java11'
         - export PATH=$PATH:"/c/java11/bin"
         - export JAVA_HOME="/c/java11"
-#        - choco install maven
-        - wget https://www-eu.apache.org/dist/maven/maven-3/3.6.1/binaries/apache-maven-3.6.1-bin.zip
+        - wget -q https://www-eu.apache.org/dist/maven/maven-3/3.6.1/binaries/apache-maven-3.6.1-bin.zip
         - /C/Progra~1/7-Zip/7z.exe x apache-maven-3.6.1-bin.zip -o/c/mvn361
         - export "MAVEN_HOME=/c/mvn361/apache-maven-3.6.1"
         - export "M2_HOME=/c/mvn361/apache-maven-3.6.1"
@@ -71,6 +79,25 @@ matrix:
       script:
         - java -version
         - mvn -version
+        - mvn -B clean package -pl iotdb,grafana,iotdb-cli,example,:kafka-example,:rocketmq-example -am integration-test
+          
+    - os: windows
+      language: c
+      name: win-oraclejdk8
+      before_install:
+        - choco install jdk8 -params 'installdir=c:\\jdk8'
+        - wget https://www-eu.apache.org/dist/maven/maven-3/3.6.1/binaries/apache-maven-3.6.1-bin.zip
+        - /C/Progra~1/7-Zip/7z.exe x apache-maven-3.6.1-bin.zip -o/c/mvn361
+      before_script:
+        - export "JAVA_HOME=/c/jdk8"
+        - export "PATH=/c/jdk8/bin:$PATH"
+        - export "PATH=/c/jdk8/jre/bin:$PATH"
+        - export "MAVEN_HOME=/c/mvn361/apache-maven-3.6.1"
+        - export "M2_HOME=/c/mvn361/apache-maven-3.6.1"
+        - export "PATH=/c/mvn361/apache-maven-3.6.1/bin:$PATH"
+      script:
+        - java -version
+        - mvn -version
         - mvn -B clean integration-test
 
     - os: linux
@@ -85,7 +112,7 @@ matrix:
         - export PATH=$JAVA_HOME/bin:$PATH
       script:
         - java -version
-        - mvn -B clean integration-test
+        - mvn -B clean package -pl iotdb,grafana,iotdb-cli,example,:kafka-example,:rocketmq-example -am integration-test
     - os: linux
       name: linux-openjdk8
       dist: trusty
@@ -98,6 +125,11 @@ matrix:
       name: linux-oraclejdk11
       dist: trusty
       jdk: oraclejdk11
+      script:
+        - java -version
+        - mvn -version
+        - mvn -B apache-rat:check
+        - mvn -B clean package -pl iotdb,grafana,iotdb-cli,example,:kafka-example,:rocketmq-example -am integration-test
 
 cache:
   directories:
diff --git a/License b/License
index 1ea4af3..58f5c09 100644
--- a/License
+++ b/License
@@ -255,6 +255,7 @@ MIT License
 ------------
 org.slf4j:slf4j-api
 org.mockito:mockito-all:1.10.19
+me.tongfei:progressbar:0.7.3
 
 
 EDL 1.0
diff --git a/README.md b/README.md
index a418329..d412168 100644
--- a/README.md
+++ b/README.md
@@ -99,6 +99,15 @@ Let $IOTDB_HOME = /workspace/incubator-iotdb/iotdb/iotdb/
 
 Let $IOTDB_CLI_HOME = /workspace/incubator-iotdb/iotdb-cli/cli
 
+Note:
+* if `IOTDB_HOME` is not explicitly assigned, 
+then by default `IOTDB_HOME` is the direct parent directory of `bin/start-server.sh` on Unix/OS X 
+(or that of `bin\start-server.bat` on Windows).
+
+* if `IOTDB_CLI_HOME` is not explicitly assigned, 
+then by default `IOTDB_CLI_HOME` is the direct parent directory of `bin/start-client.sh` on 
+Unix/OS X (or that of `bin\start-client.bat` on Windows).
+
 If you are not the first time that building IoTDB, remember deleting the following files:
 
 ```
diff --git a/cluster/pom.xml b/cluster/pom.xml
index 25d13ea..4630f70 100644
--- a/cluster/pom.xml
+++ b/cluster/pom.xml
@@ -76,6 +76,11 @@
                 </exclusion>
             </exclusions>
         </dependency>
+        <dependency>
+            <groupId>io.airlift</groupId>
+            <artifactId>airline</artifactId>
+            <version>0.8</version>
+        </dependency>
     </dependencies>
     <build>
         <plugins>
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
index 9212258..2e4cef6 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
@@ -21,6 +21,11 @@ package org.apache.iotdb.cluster.concurrent;
 public enum ThreadName {
 
   /**
+   * Node as client thread
+   */
+  NODE_AS_CLIENT("Node-As-Client-Thread"),
+
+  /**
    * QP Task thread
    */
   QP_TASK("QP-Task-Thread"),
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/NodeAsClientThreadManager.java
similarity index 72%
copy from cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskManager.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/NodeAsClientThreadManager.java
index cc26913..3b93623 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/NodeAsClientThreadManager.java
@@ -19,23 +19,21 @@
 package org.apache.iotdb.cluster.concurrent.pool;
 
 import org.apache.iotdb.cluster.concurrent.ThreadName;
-import org.apache.iotdb.cluster.config.ClusterConfig;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
-import org.apache.iotdb.db.concurrent.IoTDBThreadPoolFactory;
 
 /**
- * Manage all qp tasks in thread.
+ * Manage all node as client in thread.
  */
-public class QPTaskManager extends ThreadPoolManager {
+public class NodeAsClientThreadManager extends ThreadPoolManager {
 
-  private static final String MANAGER_NAME = "qp task manager";
+  private static final String MANAGER_NAME = "node as client thread manager";
 
-  private QPTaskManager() {
+  private NodeAsClientThreadManager() {
     init();
   }
 
-  public static QPTaskManager getInstance() {
-    return QPTaskManager.InstanceHolder.instance;
+  public static NodeAsClientThreadManager getInstance() {
+    return NodeAsClientThreadManager.InstanceHolder.instance;
   }
 
   /**
@@ -48,12 +46,12 @@ public class QPTaskManager extends ThreadPoolManager {
 
   @Override
   public String getThreadName() {
-    return ThreadName.QP_TASK.getName();
+    return ThreadName.NODE_AS_CLIENT.getName();
   }
 
   @Override
   public int getThreadPoolSize() {
-    return ClusterDescriptor.getInstance().getConfig().getConcurrentQPSubTaskThread();
+    return ClusterDescriptor.getInstance().getConfig().getConcurrentInnerRpcClientThread();
   }
 
   private static class InstanceHolder {
@@ -61,6 +59,6 @@ public class QPTaskManager extends ThreadPoolManager {
     private InstanceHolder() {
     }
 
-    private static QPTaskManager instance = new QPTaskManager();
+    private static NodeAsClientThreadManager instance = new NodeAsClientThreadManager();
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskThreadManager.java
similarity index 77%
rename from cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskManager.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskThreadManager.java
index cc26913..1e33b77 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskThreadManager.java
@@ -19,23 +19,21 @@
 package org.apache.iotdb.cluster.concurrent.pool;
 
 import org.apache.iotdb.cluster.concurrent.ThreadName;
-import org.apache.iotdb.cluster.config.ClusterConfig;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
-import org.apache.iotdb.db.concurrent.IoTDBThreadPoolFactory;
 
 /**
  * Manage all qp tasks in thread.
  */
-public class QPTaskManager extends ThreadPoolManager {
+public class QPTaskThreadManager extends ThreadPoolManager {
 
-  private static final String MANAGER_NAME = "qp task manager";
+  private static final String MANAGER_NAME = "qp-task-thread-manager";
 
-  private QPTaskManager() {
+  private QPTaskThreadManager() {
     init();
   }
 
-  public static QPTaskManager getInstance() {
-    return QPTaskManager.InstanceHolder.instance;
+  public static QPTaskThreadManager getInstance() {
+    return QPTaskThreadManager.InstanceHolder.instance;
   }
 
   /**
@@ -61,6 +59,6 @@ public class QPTaskManager extends ThreadPoolManager {
     private InstanceHolder() {
     }
 
-    private static QPTaskManager instance = new QPTaskManager();
+    private static QPTaskThreadManager instance = new QPTaskThreadManager();
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QueryTimerManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QueryTimerThreadManager.java
similarity index 87%
rename from cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QueryTimerManager.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QueryTimerThreadManager.java
index 779488c..1362825 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QueryTimerManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QueryTimerThreadManager.java
@@ -28,9 +28,9 @@ import org.apache.iotdb.db.concurrent.IoTDBThreadPoolFactory;
  * Manage all query timer in query node, if timer is timeout, close all query resource for remote
  * coordinator node.
  */
-public class QueryTimerManager extends ThreadPoolManager {
+public class QueryTimerThreadManager extends ThreadPoolManager {
 
-  private static final String MANAGER_NAME = "remote-query-timer-manager";
+  private static final String MANAGER_NAME = "remote-query-timer-thread-manager";
 
   private static final int CORE_POOL_SIZE = 1;
 
@@ -39,8 +39,8 @@ public class QueryTimerManager extends ThreadPoolManager {
     pool = IoTDBThreadPoolFactory.newScheduledThreadPool(getThreadPoolSize(), getThreadName());
   }
 
-  public static QueryTimerManager getInstance() {
-    return QueryTimerManager.QueryTimerManagerHolder.INSTANCE;
+  public static QueryTimerThreadManager getInstance() {
+    return QueryTimerThreadManager.QueryTimerManagerHolder.INSTANCE;
   }
 
   @Override
@@ -65,7 +65,7 @@ public class QueryTimerManager extends ThreadPoolManager {
 
   private static class QueryTimerManagerHolder {
 
-    private static final QueryTimerManager INSTANCE = new QueryTimerManager();
+    private static final QueryTimerThreadManager INSTANCE = new QueryTimerThreadManager();
 
     private QueryTimerManagerHolder() {
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/ThreadPoolManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/ThreadPoolManager.java
index 828cc1a..60e8a75 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/ThreadPoolManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/ThreadPoolManager.java
@@ -24,9 +24,13 @@ import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import org.apache.iotdb.db.concurrent.IoTDBThreadPoolFactory;
 import org.apache.iotdb.db.exception.ProcessorException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public abstract class ThreadPoolManager {
 
+  private static final Logger LOGGER = LoggerFactory.getLogger(ThreadPoolManager.class);
+
   ExecutorService pool;
 
   public void checkInit() {
@@ -38,7 +42,7 @@ public abstract class ThreadPoolManager {
   /**
    * Init pool manager
    */
-  public void init(){
+  public void init() {
     pool = IoTDBThreadPoolFactory.newFixedThreadPool(getThreadPoolSize(), getThreadName());
   }
 
@@ -53,14 +57,13 @@ public abstract class ThreadPoolManager {
   public void close(boolean block, long timeout) throws ProcessorException {
     if (pool != null) {
       try {
-        pool.shutdown();
+        pool.shutdownNow();
         if (block) {
           try {
             if (!pool.awaitTermination(timeout, TimeUnit.MILLISECONDS)) {
-              throw new ProcessorException(
-                  String
-                      .format("%s thread pool doesn't exit after %d ms", getManagerName(),
-                          timeout));
+              LOGGER
+                  .debug(String.format("%s thread pool doesn't exit after %d ms", getManagerName(),
+                      timeout));
             }
           } catch (InterruptedException e) {
             Thread.currentThread().interrupt();
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java
index 0e6472d..95905dd 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java
@@ -110,36 +110,38 @@ public class ClusterConfig {
   private int numOfVirtualNodes = 2;
 
   /**
-   * Maximum number of @NodeAsClient usage
+   * Maximum number of inner rpc client thread. When this value <= 0, use CPU core number * 5
    */
-  private int maxNumOfInnerRpcClient = 500;
+  private int concurrentInnerRpcClientThread = Runtime.getRuntime().availableProcessors() * 5;
 
   /**
-   * Maximum number of queue length to use @NodeAsClient, the request which exceed to this number
-   * will be rejected.
+   * Maximum number of queue length of qp task which is waiting to be executed. If the num of
+   * waiting qp tasks exceed to this number, new qp task will be rejected.
    */
-  private int maxQueueNumOfInnerRpcClient = 500;
+  private int maxQueueNumOfQPTask = 500;
 
   /**
-   * ReadMetadataConsistencyLevel: 1 Strong consistency, 2 Weak consistency
+   * ReadMetadataConsistencyLevel: strong or weak. Default consistency level is strong.
+   * This parameter is case-insensitive.
    */
-  private int readMetadataConsistencyLevel = 1;
+  private int readMetadataConsistencyLevel = ClusterConsistencyLevel.STRONG.ordinal();
 
   /**
-   * ReadDataConsistencyLevel: 1 Strong consistency, 2 Weak consistency
+   * ReadDataConsistencyLevel: strong or weak. Default consistency level is strong.
+   * This parameter is case-insensitive.
    */
-  private int readDataConsistencyLevel = 1;
+  private int readDataConsistencyLevel = ClusterConsistencyLevel.STRONG.ordinal();
 
   /**
    * Maximum number of threads which execute tasks generated by client requests concurrently. Each
    * client request corresponds to a QP Task. A QP task may be divided into several sub-tasks. So
    * this value is the sum of all sub-tasks. When this value <= 0, use CPU core number * 10
    */
-  private int concurrentQPSubTaskThread = Runtime.getRuntime().availableProcessors() * 10;
+  private int concurrentQPSubTaskThread = Runtime.getRuntime().availableProcessors() * 5;
 
   /**
-   * Batch data size read from remote query node once while reading, default value is 10000.
-   * The smaller the parameter, the more communication times and the more time-consuming it is.
+   * Batch data size read from remote query node once while reading, default value is 10000. The
+   * smaller the parameter, the more communication times and the more time-consuming it is.
    */
   private int batchReadSize = 10000;
 
@@ -297,20 +299,20 @@ public class ClusterConfig {
     this.numOfVirtualNodes = numOfVirtualNodes;
   }
 
-  public int getMaxNumOfInnerRpcClient() {
-    return maxNumOfInnerRpcClient;
+  public int getConcurrentInnerRpcClientThread() {
+    return concurrentInnerRpcClientThread;
   }
 
-  public void setMaxNumOfInnerRpcClient(int maxNumOfInnerRpcClient) {
-    this.maxNumOfInnerRpcClient = maxNumOfInnerRpcClient;
+  public void setConcurrentInnerRpcClientThread(int concurrentInnerRpcClientThread) {
+    this.concurrentInnerRpcClientThread = concurrentInnerRpcClientThread;
   }
 
-  public int getMaxQueueNumOfInnerRpcClient() {
-    return maxQueueNumOfInnerRpcClient;
+  public int getMaxQueueNumOfQPTask() {
+    return maxQueueNumOfQPTask;
   }
 
-  public void setMaxQueueNumOfInnerRpcClient(int maxQueueNumOfInnerRpcClient) {
-    this.maxQueueNumOfInnerRpcClient = maxQueueNumOfInnerRpcClient;
+  public void setMaxQueueNumOfQPTask(int maxQueueNumOfQPTask) {
+    this.maxQueueNumOfQPTask = maxQueueNumOfQPTask;
   }
 
   public int getReadMetadataConsistencyLevel() {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConsistencyLevel.java
similarity index 59%
copy from cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConsistencyLevel.java
index 9212258..80f0c4a 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConsistencyLevel.java
@@ -16,27 +16,33 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.concurrent;
-
-public enum ThreadName {
+package org.apache.iotdb.cluster.config;
 
+public enum ClusterConsistencyLevel {
   /**
-   * QP Task thread
+   * Strong consistency level
    */
-  QP_TASK("QP-Task-Thread"),
+  STRONG("strong"),
 
   /**
-   * Remote query timer
+   * Weak consistency level
    */
-  REMOTE_QUERY_TIMER("Remote-Query-Timer");
+  WEAK("weak");
+
+  private String levelName;
 
-  private String name;
+  public static final int UNSUPPORT_LEVEL = -1;
 
-  ThreadName(String name) {
-    this.name = name;
+  ClusterConsistencyLevel(String levelName) {
+    this.levelName = levelName;
   }
 
-  public String getName() {
-    return name;
+  public static int getLevel(String levelName) {
+    for(ClusterConsistencyLevel consistencyLevel: values()){
+      if(consistencyLevel.levelName.equals(levelName)){
+        return consistencyLevel.ordinal();
+      }
+    }
+    return UNSUPPORT_LEVEL;
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConstant.java b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConstant.java
index 5448847..fba692f 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConstant.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConstant.java
@@ -26,10 +26,8 @@ public class ClusterConstant {
   /**
    * Set read metadata consistency level pattern
    */
-  public static final String SET_READ_METADATA_CONSISTENCY_LEVEL_PATTERN = "set\\s+read\\s+metadata\\s+level\\s+to\\s+\\d+";
-  public static final String SET_READ_DATA_CONSISTENCY_LEVEL_PATTERN = "set\\s+read\\s+data\\s+level\\s+to\\s+\\d+";
-  public static final int MAX_CONSISTENCY_LEVEL = 2;
-  public static final int STRONG_CONSISTENCY_LEVEL = 1;
+  public static final String SET_READ_METADATA_CONSISTENCY_LEVEL_PATTERN = "(set\\s+)(read\\s+metadata\\s+level\\s+)(to\\s+.*)";
+  public static final String SET_READ_DATA_CONSISTENCY_LEVEL_PATTERN = "(set\\s+)(read\\s+data\\s+level\\s+)(to\\s+.*)";
   public static final int WEAK_CONSISTENCY_LEVEL = 2;
 
   /**
@@ -37,7 +35,7 @@ public class ClusterConstant {
    * queue until end. Each client request corresponds to a QP Task. A QP task may be divided into
    * several sub-tasks.The unit is milliseconds.
    */
-  public static final int CLOSE_QP_SUB_TASK_BLOCK_TIMEOUT = 1000;
+  public static final int CLOSE_THREAD_POOL_BLOCK_TIMEOUT = 1000;
 
   /**
    * Query timeout in query node. If time interval between last communications with coordinator node
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterDescriptor.java b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterDescriptor.java
index b90d781..56fba70 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterDescriptor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterDescriptor.java
@@ -56,8 +56,11 @@ public class ClusterDescriptor {
    * test. In most case, you should invoke this method.
    */
   public void loadProps() {
+    // modify iotdb config
     ioTDBConf.setRpcImplClassName(TSServiceClusterImpl.class.getName());
     ioTDBConf.setEnableWal(false);
+
+    // cluster config
     conf.setDefaultPath();
     InputStream inputStream;
     String url = System.getProperty(IoTDBConstant.IOTDB_CONF, null);
@@ -89,8 +92,12 @@ public class ClusterDescriptor {
     Properties properties = new Properties();
     try {
       properties.load(inputStream);
-      conf.setNodes(properties.getProperty("nodes", ClusterConfig.DEFAULT_NODE)
-          .split(","));
+      String[] nodes = properties.getProperty("nodes", ClusterConfig.DEFAULT_NODE)
+          .split(",");
+      for(int i = 0 ; i < nodes.length ; i++){
+        nodes[i] = nodes[i].trim();
+      }
+      conf.setNodes(nodes);
 
       conf.setReplication(Integer
           .parseInt(properties.getProperty("replication",
@@ -137,21 +144,27 @@ public class ClusterDescriptor {
           .parseInt(properties.getProperty("num_of_virtual_nodes",
               Integer.toString(conf.getNumOfVirtualNodes()))));
 
-      conf.setMaxNumOfInnerRpcClient(Integer
-          .parseInt(properties.getProperty("max_num_of_inner_rpc_client",
-              Integer.toString(conf.getMaxNumOfInnerRpcClient()))));
+      conf.setConcurrentInnerRpcClientThread(Integer
+          .parseInt(properties.getProperty("concurrent_inner_rpc_client_thread",
+              Integer.toString(conf.getConcurrentInnerRpcClientThread()))));
 
-      conf.setMaxQueueNumOfInnerRpcClient(Integer
+      conf.setMaxQueueNumOfQPTask(Integer
           .parseInt(properties.getProperty("max_queue_num_of_inner_rpc_client",
-              Integer.toString(conf.getMaxQueueNumOfInnerRpcClient()))));
+              Integer.toString(conf.getMaxQueueNumOfQPTask()))));
 
-      conf.setReadMetadataConsistencyLevel(Integer
-          .parseInt(properties.getProperty("read_metadata_consistency_level",
-              Integer.toString(conf.getReadMetadataConsistencyLevel()))));
+      String readMetadataLevelName = properties.getProperty("read_metadata_consistency_level", "");
+      int readMetadataLevel = ClusterConsistencyLevel.getLevel(readMetadataLevelName);
+      if(readMetadataLevel == ClusterConsistencyLevel.UNSUPPORT_LEVEL){
+        readMetadataLevel = ClusterConsistencyLevel.STRONG.ordinal();
+      }
+      conf.setReadMetadataConsistencyLevel(readMetadataLevel);
 
-      conf.setReadDataConsistencyLevel(Integer
-          .parseInt(properties.getProperty("read_data_consistency_level",
-              Integer.toString(conf.getReadDataConsistencyLevel()))));
+      String readDataLevelName = properties.getProperty("read_data_consistency_level", "");
+      int readDataLevel = ClusterConsistencyLevel.getLevel(readDataLevelName);
+      if(readDataLevel == ClusterConsistencyLevel.UNSUPPORT_LEVEL){
+        readDataLevel = ClusterConsistencyLevel.STRONG.ordinal();
+      }
+      conf.setReadDataConsistencyLevel(readDataLevel);
 
       conf.setConcurrentQPSubTaskThread(Integer
           .parseInt(properties.getProperty("concurrent_qp_sub_task_thread",
@@ -168,6 +181,10 @@ public class ClusterDescriptor {
         conf.setConcurrentQPSubTaskThread(Runtime.getRuntime().availableProcessors() * 10);
       }
 
+      if (conf.getConcurrentInnerRpcClientThread() <= 0) {
+        conf.setConcurrentInnerRpcClientThread(Runtime.getRuntime().availableProcessors() * 10);
+      }
+
       if (conf.getMaxCachedBatchDataListSize() <= 0) {
         conf.setMaxCachedBatchDataListSize(2);
       }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/entity/Server.java b/cluster/src/main/java/org/apache/iotdb/cluster/entity/Server.java
index 0efb70d..41c4cb1 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/entity/Server.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/entity/Server.java
@@ -23,7 +23,7 @@ import com.alipay.sofa.jraft.entity.PeerId;
 import com.alipay.sofa.jraft.rpc.RaftRpcServerFactory;
 import java.util.HashMap;
 import java.util.Map;
-import org.apache.iotdb.cluster.concurrent.pool.QPTaskManager;
+import org.apache.iotdb.cluster.concurrent.pool.QPTaskThreadManager;
 import org.apache.iotdb.cluster.config.ClusterConfig;
 import org.apache.iotdb.cluster.config.ClusterConstant;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
@@ -31,7 +31,11 @@ import org.apache.iotdb.cluster.entity.data.DataPartitionHolder;
 import org.apache.iotdb.cluster.entity.metadata.MetadataHolder;
 import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.MetadataRaftHolder;
+import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcQueryManager;
+import org.apache.iotdb.cluster.query.manager.querynode.ClusterLocalQueryManager;
 import org.apache.iotdb.cluster.rpc.raft.impl.RaftNodeAsClientManager;
+import org.apache.iotdb.cluster.rpc.raft.processor.QueryMetricAsyncProcessor;
 import org.apache.iotdb.cluster.rpc.raft.processor.nonquery.DataGroupNonQueryAsyncProcessor;
 import org.apache.iotdb.cluster.rpc.raft.processor.nonquery.MetaGroupNonQueryAsyncProcessor;
 import org.apache.iotdb.cluster.rpc.raft.processor.querydata.CloseSeriesReaderSyncProcessor;
@@ -43,11 +47,18 @@ import org.apache.iotdb.cluster.rpc.raft.processor.querymetadata.QueryMetadataIn
 import org.apache.iotdb.cluster.rpc.raft.processor.querymetadata.QueryPathsAsyncProcessor;
 import org.apache.iotdb.cluster.rpc.raft.processor.querymetadata.QuerySeriesTypeAsyncProcessor;
 import org.apache.iotdb.cluster.rpc.raft.processor.querymetadata.QueryTimeSeriesAsyncProcessor;
+import org.apache.iotdb.cluster.rpc.raft.processor.querymetric.QueryJobNumAsyncProcessor;
+import org.apache.iotdb.cluster.rpc.raft.processor.querymetric.QueryLeaderAsyncProcessor;
+import org.apache.iotdb.cluster.rpc.raft.processor.querymetric.QueryStatusAsyncProcessor;
+import org.apache.iotdb.cluster.service.ClusterMonitor;
 import org.apache.iotdb.cluster.utils.RaftUtils;
 import org.apache.iotdb.cluster.utils.hash.PhysicalNode;
 import org.apache.iotdb.cluster.utils.hash.Router;
+import org.apache.iotdb.db.exception.FileNodeManagerException;
 import org.apache.iotdb.db.exception.ProcessorException;
+import org.apache.iotdb.db.exception.StartupException;
 import org.apache.iotdb.db.service.IoTDB;
+import org.apache.iotdb.db.service.RegisterManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -83,12 +94,17 @@ public class Server {
    */
   private IoTDB iotdb;
 
-  public static void main(String[] args) {
+  private RegisterManager registerManager = new RegisterManager();
+
+  public static void main(String[] args)
+      throws ProcessorException, RaftConnectionException, FileNodeManagerException {
     Server server = Server.getInstance();
     server.start();
   }
 
-  public void start() {
+  public void start()
+      throws ProcessorException, RaftConnectionException, FileNodeManagerException {
+
     /** Stand-alone version of IoTDB, be careful to replace the internal JDBC Server with a cluster version **/
     iotdb = new IoTDB();
     iotdb.active();
@@ -97,12 +113,15 @@ public class Server {
     /** Init raft groups **/
     PeerId[] peerIds = RaftUtils.convertStringArrayToPeerIdArray(CLUSTER_CONF.getNodes());
     serverId = new PeerId(CLUSTER_CONF.getIp(), CLUSTER_CONF.getPort());
+
+    // Rpc between raft groups
     RpcServer rpcServer = new RpcServer(serverId.getPort());
     RaftRpcServerFactory.addRaftRequestProcessors(rpcServer);
 
     registerNonQueryProcessor(rpcServer);
     registerQueryMetadataProcessor(rpcServer);
     registerQueryDataProcessor(rpcServer);
+    registerQueryMetricProcessor(rpcServer);
 
     metadataHolder = new MetadataRaftHolder(peerIds, serverId, rpcServer, true);
     metadataHolder.init();
@@ -126,6 +145,12 @@ public class Server {
       Router.getInstance().showPhysicalNodes(groupId);
     }
 
+    try {
+      LOGGER.info("Register Cluster Monitor to JMX service.");
+      registerManager.register(ClusterMonitor.INSTANCE);
+    } catch (StartupException e) {
+      stop();
+    }
   }
 
   private void registerNonQueryProcessor(RpcServer rpcServer) {
@@ -148,14 +173,25 @@ public class Server {
     rpcServer.registerUserProcessor(new CloseSeriesReaderSyncProcessor());
   }
 
-  public void stop() throws ProcessorException, InterruptedException {
-    QPTaskManager.getInstance().close(true, ClusterConstant.CLOSE_QP_SUB_TASK_BLOCK_TIMEOUT);
-    iotdb.deactivate();
+  private void registerQueryMetricProcessor(RpcServer rpcServer) {
+    rpcServer.registerUserProcessor(new QueryMetricAsyncProcessor());
+    rpcServer.registerUserProcessor(new QueryJobNumAsyncProcessor());
+    rpcServer.registerUserProcessor(new QueryStatusAsyncProcessor());
+    rpcServer.registerUserProcessor(new QueryLeaderAsyncProcessor());
+  }
+
+  public void stop() throws ProcessorException, RaftConnectionException, FileNodeManagerException {
+    QPTaskThreadManager.getInstance().close(true, ClusterConstant.CLOSE_THREAD_POOL_BLOCK_TIMEOUT);
+    ClusterRpcQueryManager.getInstance().close();
+    ClusterLocalQueryManager.getInstance().close();
     CLIENT_MANAGER.shutdown();
+    iotdb.deactivate();
     metadataHolder.stop();
     for (DataPartitionHolder dataPartitionHolder : dataPartitionHolderMap.values()) {
       dataPartitionHolder.stop();
     }
+
+    registerManager.deregisterAll();
   }
 
   public PeerId getServerId() {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/DataStateMachine.java b/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/DataStateMachine.java
index b8c6f43..eb9db25 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/DataStateMachine.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/DataStateMachine.java
@@ -32,7 +32,7 @@ import java.util.List;
 import java.util.concurrent.atomic.AtomicLong;
 import org.apache.iotdb.cluster.rpc.raft.closure.ResponseClosure;
 import org.apache.iotdb.cluster.rpc.raft.request.nonquery.DataGroupNonQueryRequest;
-import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.nonquery.DataGroupNonQueryResponse;
 import org.apache.iotdb.cluster.utils.RaftUtils;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.exception.ProcessorException;
@@ -92,7 +92,8 @@ public class DataStateMachine extends StateMachineAdapter {
    */
   private void applySingleTask(Closure closure, ByteBuffer data) {
     /** If closure is not null, the node is leader **/
-    BasicResponse response = (closure == null) ? null : ((ResponseClosure) closure).getResponse();
+    DataGroupNonQueryResponse response = (closure == null) ? null
+        : (DataGroupNonQueryResponse) ((ResponseClosure) closure).getResponse();
     DataGroupNonQueryRequest request;
     try {
       request = SerializerManager.getSerializer(SerializerManager.Hessian2)
@@ -116,12 +117,13 @@ public class DataStateMachine extends StateMachineAdapter {
         PhysicalPlan plan = PhysicalPlanLogTransfer.logToOperator(planByte);
 
         LOGGER.debug("OperatorType :{}", plan.getOperatorType());
-        /** If the request is to set path and sg of the path doesn't exist, it needs to run null-read in meta group to avoid out of data sync **/
+        /** If the request is to set path and sg of the path doesn't exist, it needs to receive null-read in meta group to avoid out of data sync **/
         if (plan.getOperatorType() == OperatorType.CREATE_TIMESERIES && !checkPathExistence(
             ((MetadataPlan) plan).getPath().getFullPath())) {
           RaftUtils.handleNullReadToMetaGroup(status);
           if(!status.isOk()){
             addResult(response, false);
+            addErrorMsg(response, status.getErrorMsg());
             continue;
           }
         }
@@ -131,6 +133,7 @@ public class DataStateMachine extends StateMachineAdapter {
         LOGGER.error("Execute physical plan error", e);
         status = new Status(-1, e.getMessage());
         addResult(response, false);
+        addErrorMsg(response, status.getErrorMsg());
       }
     }
     if (closure != null) {
@@ -141,13 +144,22 @@ public class DataStateMachine extends StateMachineAdapter {
   /**
    * Add result to response
    */
-  private void addResult(BasicResponse response, boolean result){
+  private void addResult(DataGroupNonQueryResponse response, boolean result){
     if(response != null){
       response.addResult(result);
     }
   }
 
   /**
+   * Add result to response
+   */
+  private void addErrorMsg(DataGroupNonQueryResponse response, String errorMsg){
+    if(response != null){
+      response.addErrorMsg(errorMsg);
+    }
+  }
+
+  /**
    * Check the existence of a specific path
    */
   private boolean checkPathExistence(String path) throws PathErrorException {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/MetadataStateManchine.java b/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/MetadataStateManchine.java
index 3cc9001..78dd3e8 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/MetadataStateManchine.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/MetadataStateManchine.java
@@ -148,7 +148,7 @@ public class MetadataStateManchine extends StateMachineAdapter {
     mManager.setStorageLevelToMTree(sg);
   }
 
-  public Set<String> getAllStorageGroups() throws PathErrorException {
+  public Set<String> getAllStorageGroups() {
     return mManager.getAllStorageGroup();
   }
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/RaftService.java b/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/RaftService.java
index 1d08f09..d910f0f 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/RaftService.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/RaftService.java
@@ -25,9 +25,11 @@ import com.alipay.sofa.jraft.StateMachine;
 import com.alipay.sofa.jraft.conf.Configuration;
 import com.alipay.sofa.jraft.entity.PeerId;
 import com.alipay.sofa.jraft.option.NodeOptions;
+import com.codahale.metrics.ConsoleReporter;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
+import java.util.concurrent.TimeUnit;
 import org.apache.iotdb.cluster.config.ClusterConfig;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.cluster.entity.service.IService;
@@ -45,11 +47,11 @@ public class RaftService implements IService {
 
   public RaftService(String groupId, PeerId[] peerIds, PeerId serverId, RpcServer rpcServer, StateMachine fsm, boolean startRpcServer) {
     this.peerIdList = new ArrayList<>(peerIds.length);
-    peerIdList.addAll(Arrays.asList(peerIds));
+    this.peerIdList.addAll(Arrays.asList(peerIds));
     this.fsm = fsm;
     this.groupId = groupId;
     this.startRpcServer = startRpcServer;
-    raftGroupService = new RaftGroupService(groupId, serverId, null, rpcServer);
+    this.raftGroupService = new RaftGroupService(groupId, serverId, null, rpcServer);
   }
 
   @Override
@@ -61,6 +63,7 @@ public class RaftService implements IService {
     nodeOptions.setRaftMetaUri(FilePathUtils.regularizePath(config.getRaftMetadataPath()) + groupId);
     nodeOptions.setSnapshotUri(FilePathUtils.regularizePath(config.getRaftSnapshotPath()) + groupId);
     nodeOptions.setElectionTimeoutMs(config.getElectionTimeoutMs());
+    nodeOptions.setEnableMetrics(true);
     final Configuration initConf = new Configuration();
     initConf.setPeers(peerIdList);
     nodeOptions.setInitialConf(initConf);
@@ -70,6 +73,12 @@ public class RaftService implements IService {
   @Override
   public void start() {
     this.node = raftGroupService.start(startRpcServer);
+
+//    ConsoleReporter reporter = ConsoleReporter.forRegistry(node.getNodeMetrics().getMetricRegistry())
+//        .convertRatesTo(TimeUnit.SECONDS)
+//        .convertDurationsTo(TimeUnit.MILLISECONDS)
+//        .build();
+//    reporter.start(30, TimeUnit.SECONDS);
   }
 
   @Override
@@ -93,4 +102,11 @@ public class RaftService implements IService {
     this.node = node;
   }
 
+  public StateMachine getFsm() {
+    return fsm;
+  }
+
+  public String getGroupId() {
+    return groupId;
+  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/AbstractQPExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/AbstractQPExecutor.java
index 492b7ad..5059e06 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/AbstractQPExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/AbstractQPExecutor.java
@@ -19,16 +19,16 @@
 package org.apache.iotdb.cluster.qp.executor;
 
 import com.alipay.sofa.jraft.entity.PeerId;
+import java.util.HashSet;
+import java.util.Set;
 import org.apache.iotdb.cluster.config.ClusterConfig;
-import org.apache.iotdb.cluster.config.ClusterConstant;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.cluster.entity.Server;
-import org.apache.iotdb.cluster.exception.ConsistencyLevelException;
 import org.apache.iotdb.cluster.exception.RaftConnectionException;
 import org.apache.iotdb.cluster.qp.task.QPTask;
 import org.apache.iotdb.cluster.qp.task.QPTask.TaskState;
 import org.apache.iotdb.cluster.qp.task.SingleQPTask;
-import org.apache.iotdb.cluster.rpc.raft.NodeAsClient;
+import org.apache.iotdb.cluster.rpc.raft.impl.RaftNodeAsClientManager;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 import org.apache.iotdb.cluster.utils.RaftUtils;
 import org.apache.iotdb.cluster.utils.hash.Router;
@@ -87,59 +87,102 @@ public abstract class AbstractQPExecutor {
    * Async handle QPTask by QPTask and leader id
    *
    * @param task request QPTask
-   * @param leader leader of the target raft group
    * @param taskRetryNum Number of QPTask retries due to timeout and redirected.
    * @return basic response
    */
-  protected BasicResponse asyncHandleNonQuerySingleTaskGetRes(SingleQPTask task, PeerId leader,
-      int taskRetryNum)
+  private BasicResponse syncHandleSingleTaskGetRes(SingleQPTask task, int taskRetryNum, String taskInfo, String groupId, Set<PeerId> downNodeSet)
       throws InterruptedException, RaftConnectionException {
-    asyncSendNonQuerySingleTask(task, leader, taskRetryNum);
-    return syncGetNonQueryRes(task, leader, taskRetryNum);
+    PeerId firstNode = task.getTargetNode();
+    RaftUtils.updatePeerIDOrder(firstNode, groupId);
+    BasicResponse response;
+    try {
+      asyncSendSingleTask(task, taskRetryNum);
+      response = syncGetSingleTaskRes(task, taskRetryNum, taskInfo, groupId, downNodeSet);
+      return response;
+    } catch (RaftConnectionException ex) {
+      downNodeSet.add(firstNode);
+      while (true) {
+        PeerId nextNode = null;
+        try {
+          nextNode = RaftUtils.getPeerIDInOrder(groupId);
+          if (firstNode.equals(nextNode)) {
+            break;
+          }
+          LOGGER.debug(
+              "Previous task fail, then send {} task for group {} to node {}.", taskInfo, groupId,
+              nextNode);
+          task.resetTask();
+          task.setTargetNode(nextNode);
+          asyncSendSingleTask(task, taskRetryNum);
+          response = syncGetSingleTaskRes(task, taskRetryNum, taskInfo, groupId, downNodeSet);
+          LOGGER.debug("{} task for group {} to node {} succeed.", taskInfo, groupId, nextNode);
+          return response;
+        } catch (RaftConnectionException e1) {
+          LOGGER.debug("{} task for group {} to node {} fail.", taskInfo, groupId, nextNode);
+          downNodeSet.add(nextNode);
+        }
+      }
+      throw new RaftConnectionException(String
+          .format("Can not %s in all nodes of group<%s>, please check cluster status.",
+              taskInfo, groupId));
+    }
+  }
+
+  protected BasicResponse syncHandleSingleTaskGetRes(SingleQPTask task, int taskRetryNum, String taskInfo, String groupId)
+      throws RaftConnectionException, InterruptedException {
+    return syncHandleSingleTaskGetRes(task, taskRetryNum, taskInfo, groupId, new HashSet<>());
   }
 
   /**
    * Asynchronous send rpc task via client
    *  @param task rpc task
-   * @param leader leader node of the group
    * @param taskRetryNum Retry time of the task
    */
-  protected void asyncSendNonQuerySingleTask(SingleQPTask task, PeerId leader, int taskRetryNum)
+  protected void asyncSendSingleTask(SingleQPTask task, int taskRetryNum)
       throws RaftConnectionException {
     if (taskRetryNum >= TASK_MAX_RETRY) {
       throw new RaftConnectionException(String.format("QPTask retries reach the upper bound %s",
           TASK_MAX_RETRY));
     }
-    NodeAsClient client = RaftUtils.getRaftNodeAsClient();
-    /** Call async method **/
-    client.asyncHandleRequest(task.getRequest(), leader, task);
+    RaftNodeAsClientManager.getInstance().produceQPTask(task);
   }
 
   /**
    * Synchronous get task response. If it's redirected or status is exception, the task needs to be
    * resent. Note: If status is Exception, it marks that an exception occurred during the task is
    * being sent instead of executed.
-   *  @param task rpc task
-   * @param leader leader node of the group
+   * @param task rpc task
    * @param taskRetryNum Retry time of the task
    */
-  private BasicResponse syncGetNonQueryRes(SingleQPTask task, PeerId leader, int taskRetryNum)
+  private BasicResponse syncGetSingleTaskRes(SingleQPTask task, int taskRetryNum, String taskInfo, String groupId, Set<PeerId> downNodeSet)
       throws InterruptedException, RaftConnectionException {
     task.await();
+    PeerId leader;
     if (task.getTaskState() != TaskState.FINISH) {
-      if (task.getTaskState() == TaskState.REDIRECT) {
-        /** redirect to the right leader **/
+      if (task.getTaskState() == TaskState.RAFT_CONNECTION_EXCEPTION) {
+        throw new RaftConnectionException(
+            String.format("Can not connect to remote node : %s", task.getTargetNode()));
+      } else if (task.getTaskState() == TaskState.REDIRECT) {
+        // redirect to the right leader
         leader = PeerId.parsePeer(task.getResponse().getLeaderStr());
-        LOGGER.debug("Redirect leader: {}, group id = {}", leader, task.getRequest().getGroupID());
-        RaftUtils.updateRaftGroupLeader(task.getRequest().getGroupID(), leader);
+
+        if (downNodeSet.contains(leader)) {
+          LOGGER.debug("Redirect leader {} is down, group {} might be down.", leader, groupId);
+          throw new RaftConnectionException(
+              String.format("Can not connect to leader of remote node : %s", task.getTargetNode()));
+        } else {
+          LOGGER
+              .debug("Redirect leader: {}, group id = {}", leader, task.getRequest().getGroupID());
+          RaftUtils.updateRaftGroupLeader(task.getRequest().getGroupID(), leader);
+        }
       } else {
-        String groupId = task.getRequest().getGroupID();
         RaftUtils.removeCachedRaftGroupLeader(groupId);
         LOGGER.debug("Remove cached raft group leader of {}", groupId);
-        leader = RaftUtils.getLeaderPeerID(groupId);
+        leader = RaftUtils.getLocalLeaderPeerID(groupId);
       }
+      task.setTargetNode(leader);
       task.resetTask();
-      return asyncHandleNonQuerySingleTaskGetRes(task, leader, taskRetryNum + 1);
+      return syncHandleSingleTaskGetRes(task, taskRetryNum + 1, taskInfo, groupId, downNodeSet);
     }
     return task.getResponse();
   }
@@ -150,20 +193,12 @@ public abstract class AbstractQPExecutor {
     }
   }
 
-  public void setReadMetadataConsistencyLevel(int level) throws ConsistencyLevelException {
-    if (level <= ClusterConstant.MAX_CONSISTENCY_LEVEL) {
-      readMetadataConsistencyLevel.set(level);
-    } else {
-      throw new ConsistencyLevelException(String.format("Consistency level %d not support", level));
-    }
+  public void setReadMetadataConsistencyLevel(int level) {
+    readMetadataConsistencyLevel.set(level);
   }
 
-  public void setReadDataConsistencyLevel(int level) throws ConsistencyLevelException {
-    if (level <= ClusterConstant.MAX_CONSISTENCY_LEVEL) {
-      readDataConsistencyLevel.set(level);
-    } else {
-      throw new ConsistencyLevelException(String.format("Consistency level %d not support", level));
-    }
+  public void setReadDataConsistencyLevel(int level) {
+    readDataConsistencyLevel.set(level);
   }
 
   public int getReadMetadataConsistencyLevel() {
@@ -175,4 +210,16 @@ public abstract class AbstractQPExecutor {
     checkInitConsistencyLevel();
     return readDataConsistencyLevel.get();
   }
+
+  /**
+   * Async handle task by SingleQPTask and leader id.
+   *
+   * @param task request SingleQPTask
+   * @return request result
+   */
+  public boolean syncHandleSingleTask(SingleQPTask task, String taskInfo, String groupId)
+      throws RaftConnectionException, InterruptedException {
+    BasicResponse response = syncHandleSingleTaskGetRes(task, 0, taskInfo, groupId);
+    return response != null && response.isSuccess();
+  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/ClusterQueryProcessExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/ClusterQueryProcessExecutor.java
index c5032fc..39324d8 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/ClusterQueryProcessExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/ClusterQueryProcessExecutor.java
@@ -41,19 +41,28 @@ import org.apache.iotdb.tsfile.read.expression.IExpression;
 import org.apache.iotdb.tsfile.read.expression.QueryExpression;
 import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
 import org.apache.iotdb.tsfile.utils.Pair;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class ClusterQueryProcessExecutor extends AbstractQPExecutor implements IQueryProcessExecutor {
 
+  private static final Logger LOGGER = LoggerFactory.getLogger(ClusterQueryProcessExecutor.class);
   private ThreadLocal<Integer> fetchSize = new ThreadLocal<>();
   private ClusterQueryRouter clusterQueryRouter = new ClusterQueryRouter();
 
-  private QueryMetadataExecutor queryMetadataExecutor = new QueryMetadataExecutor();
+  private QueryMetadataExecutor queryMetadataExecutor;
+
+  public ClusterQueryProcessExecutor(
+      QueryMetadataExecutor queryMetadataExecutor) {
+    this.queryMetadataExecutor = queryMetadataExecutor;
+  }
 
   @Override
-  public QueryDataSet processQuery(QueryPlan queryPlan, QueryContext context)
+  public QueryDataSet processQuery(PhysicalPlan plan, QueryContext context)
       throws IOException, FileNodeManagerException, PathErrorException,
       QueryFilterOptimizationException, ProcessorException {
 
+    QueryPlan queryPlan = (QueryPlan) plan;
     QueryExpression queryExpression = QueryExpression.create().setSelectSeries(queryPlan.getPaths())
         .setExpression(queryPlan.getExpression());
     clusterQueryRouter.setReadDataConsistencyLevel(getReadDataConsistencyLevel());
@@ -117,6 +126,7 @@ public class ClusterQueryProcessExecutor extends AbstractQPExecutor implements I
   public List<String> getAllPaths(String originPath)
       throws PathErrorException {
     try {
+      LOGGER.debug(String.format("read metadata level :%d", getReadMetadataConsistencyLevel()));
       return queryMetadataExecutor.processPathsQuery(originPath);
     } catch (InterruptedException | ProcessorException e) {
       throw new PathErrorException(e.getMessage());
@@ -165,8 +175,8 @@ public class ClusterQueryProcessExecutor extends AbstractQPExecutor implements I
   }
 
   @Override
-  public int multiInsert(String deviceId, long insertTime, List<String> measurementList,
-      List<String> insertValues) throws ProcessorException {
+  public int multiInsert(String deviceId, long insertTime, String[] measurementList,
+      String[] insertValues) throws ProcessorException {
     throw new UnsupportedOperationException();
   }
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/NonQueryExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/NonQueryExecutor.java
index 1420370..1e6abea 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/NonQueryExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/NonQueryExecutor.java
@@ -23,6 +23,7 @@ import com.alipay.sofa.jraft.entity.PeerId;
 import java.io.IOException;
 import java.sql.Statement;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -68,10 +69,10 @@ public class NonQueryExecutor extends AbstractQPExecutor {
   private static final String OPERATION_NOT_SUPPORTED = "Operation %s does not support";
 
   /**
-   * When executing Metadata Plan, it's necessary to do null-read in single non query request or do
-   * the first null-read in batch non query request
+   * When executing Metadata Plan, it's necessary to do empty-read in single non query request or do
+   * the first empty-read in batch non query request
    */
-  private boolean nullReaderEnable = false;
+  private boolean emptyTaskEnable = false;
 
   public NonQueryExecutor() {
     super();
@@ -82,12 +83,12 @@ public class NonQueryExecutor extends AbstractQPExecutor {
    */
   public boolean processNonQuery(PhysicalPlan plan) throws ProcessorException {
     try {
-      nullReaderEnable = true;
+      emptyTaskEnable = true;
       String groupId = getGroupIdFromPhysicalPlan(plan);
       return handleNonQueryRequest(groupId, plan);
     } catch (RaftConnectionException e) {
       LOGGER.error(e.getMessage());
-      throw new ProcessorException("Raft connection occurs error.", e);
+      throw new ProcessorException(e.getMessage());
     } catch (InterruptedException | PathErrorException | IOException e) {
       throw new ProcessorException(e);
     }
@@ -104,27 +105,25 @@ public class NonQueryExecutor extends AbstractQPExecutor {
 
     Status nullReadTaskStatus = Status.OK();
     RaftUtils.handleNullReadToMetaGroup(nullReadTaskStatus);
-    if(!nullReadTaskStatus.isOk()){
+    if (!nullReadTaskStatus.isOk()) {
       throw new ProcessorException("Null read while processing batch failed");
     }
-    nullReaderEnable = false;
+    emptyTaskEnable = false;
 
-    /** 1. Classify physical plans by group id **/
+    /* 1. Classify physical plans by group id */
     Map<String, List<PhysicalPlan>> physicalPlansMap = new HashMap<>();
     Map<String, List<Integer>> planIndexMap = new HashMap<>();
     classifyPhysicalPlanByGroupId(physicalPlans, batchResult, physicalPlansMap, planIndexMap);
 
-    /** 2. Construct Multiple Data Group Requests **/
+    /* 2. Construct Multiple Data Group Requests */
     Map<String, SingleQPTask> subTaskMap = new HashMap<>();
     constructMultipleRequests(physicalPlansMap, planIndexMap, subTaskMap, batchResult);
 
-    /** 3. Execute Multiple Sub Tasks **/
+    /* 3. Execute Multiple Sub Tasks */
     BatchQPTask task = new BatchQPTask(subTaskMap.size(), batchResult, subTaskMap, planIndexMap);
     currentTask.set(task);
-    task.execute(this);
+    task.executeBy(this);
     task.await();
-    batchResult.setAllSuccessful(task.isAllSuccessful());
-    batchResult.setBatchErrorMessage(task.getBatchErrorMessage());
   }
 
   /**
@@ -132,7 +131,8 @@ public class NonQueryExecutor extends AbstractQPExecutor {
    */
   private void classifyPhysicalPlanByGroupId(PhysicalPlan[] physicalPlans, BatchResult batchResult,
       Map<String, List<PhysicalPlan>> physicalPlansMap, Map<String, List<Integer>> planIndexMap) {
-    int[] result = batchResult.getResult();
+
+    int[] result = batchResult.getResultArray();
     for (int i = 0; i < result.length; i++) {
       /** Check if the request has failed. If it has failed, ignore it. **/
       if (result[i] != Statement.EXECUTE_FAILED) {
@@ -140,24 +140,22 @@ public class NonQueryExecutor extends AbstractQPExecutor {
         try {
           String groupId = getGroupIdFromPhysicalPlan(plan);
           if (groupId.equals(ClusterConfig.METADATA_GROUP_ID)) {
+
+            // this is for set storage group statement and role/user management statement.
             LOGGER.debug("Execute metadata group task");
             boolean executeResult = handleNonQueryRequest(groupId, plan);
-            nullReaderEnable = true;
-            result[i] =  executeResult ? Statement.SUCCESS_NO_INFO
+            emptyTaskEnable = true;
+            result[i] = executeResult ? Statement.SUCCESS_NO_INFO
                 : Statement.EXECUTE_FAILED;
             batchResult.setAllSuccessful(executeResult);
-          }else {
-            if (!physicalPlansMap.containsKey(groupId)) {
-              physicalPlansMap.put(groupId, new ArrayList<>());
-              planIndexMap.put(groupId, new ArrayList<>());
-            }
-            physicalPlansMap.get(groupId).add(plan);
-            planIndexMap.get(groupId).add(i);
+          } else {
+            physicalPlansMap.computeIfAbsent(groupId, l -> new ArrayList<>()).add(plan);
+            planIndexMap.computeIfAbsent(groupId, l -> new ArrayList<>()).add(i);
           }
         } catch (PathErrorException | ProcessorException | IOException | RaftConnectionException | InterruptedException e) {
           result[i] = Statement.EXECUTE_FAILED;
           batchResult.setAllSuccessful(false);
-          batchResult.setBatchErrorMessage(e.getMessage());
+          batchResult.addBatchErrorMessage(i, e.getMessage());
           LOGGER.error(e.getMessage());
         }
       }
@@ -170,7 +168,7 @@ public class NonQueryExecutor extends AbstractQPExecutor {
   private void constructMultipleRequests(Map<String, List<PhysicalPlan>> physicalPlansMap,
       Map<String, List<Integer>> planIndexMap, Map<String, SingleQPTask> subTaskMap,
       BatchResult batchResult) {
-    int[] result = batchResult.getResult();
+    int[] result = batchResult.getResultArray();
     for (Entry<String, List<PhysicalPlan>> entry : physicalPlansMap.entrySet()) {
       String groupId = entry.getKey();
       SingleQPTask singleQPTask;
@@ -182,7 +180,9 @@ public class NonQueryExecutor extends AbstractQPExecutor {
         subTaskMap.put(groupId, singleQPTask);
       } catch (IOException e) {
         batchResult.setAllSuccessful(false);
-        batchResult.setBatchErrorMessage(e.getMessage());
+        for (int index : planIndexMap.get(groupId)) {
+          batchResult.addBatchErrorMessage(index, e.getMessage());
+        }
         for (int index : planIndexMap.get(groupId)) {
           result[index] = Statement.EXECUTE_FAILED;
         }
@@ -237,13 +237,13 @@ public class NonQueryExecutor extends AbstractQPExecutor {
       case CREATE_TIMESERIES:
       case SET_STORAGE_GROUP:
       case METADATA:
-        if(nullReaderEnable){
+        if (emptyTaskEnable) {
           Status nullReadTaskStatus = Status.OK();
           RaftUtils.handleNullReadToMetaGroup(nullReadTaskStatus);
-          if(!nullReadTaskStatus.isOk()){
+          if (!nullReadTaskStatus.isOk()) {
             throw new ProcessorException("Null read to metadata group failed");
           }
-          nullReaderEnable = false;
+          emptyTaskEnable = false;
         }
         groupId = getGroupIdFromMetadataPlan((MetadataPlan) plan);
         break;
@@ -311,23 +311,23 @@ public class NonQueryExecutor extends AbstractQPExecutor {
    */
   private boolean handleNonQueryRequest(String groupId, PhysicalPlan plan)
       throws IOException, RaftConnectionException, InterruptedException {
-    List<PhysicalPlan> plans = new ArrayList<>();
-    plans.add(plan);
+    List<PhysicalPlan> plans = Collections.singletonList(plan);
     BasicRequest request;
     if (groupId.equals(ClusterConfig.METADATA_GROUP_ID)) {
       request = new MetaGroupNonQueryRequest(groupId, plans);
     } else {
       request = new DataGroupNonQueryRequest(groupId, plans);
     }
-    SingleQPTask qpTask = new SingleQPTask(false, request);
+    SingleQPTask qpTask = new SingleQPTask(true, request);
     currentTask.set(qpTask);
 
     /** Check if the plan can be executed locally. **/
     if (QPExecutorUtils.canHandleNonQueryByGroupId(groupId)) {
       return handleNonQueryRequestLocally(groupId, qpTask);
     } else {
-      PeerId leader = RaftUtils.getLeaderPeerID(groupId);
-      return asyncHandleNonQueryTask(qpTask, leader);
+      PeerId leader = RaftUtils.getLocalLeaderPeerID(groupId);
+      qpTask.setTargetNode(leader);
+      return syncHandleSingleTask(qpTask, "execute non-query", groupId);
     }
   }
 
@@ -351,20 +351,4 @@ public class NonQueryExecutor extends AbstractQPExecutor {
     /** Apply qpTask to Raft Node **/
     return RaftUtils.executeRaftTaskForLocalProcessor(service, qpTask, response);
   }
-
-
-
-  /**
-   * Async handle task by QPTask and leader id.
-   *
-   * @param task request QPTask
-   * @param leader leader of the target raft group
-   * @return request result
-   */
-  public boolean asyncHandleNonQueryTask(SingleQPTask task, PeerId leader)
-      throws RaftConnectionException, InterruptedException {
-    BasicResponse response = asyncHandleNonQuerySingleTaskGetRes(task, leader, 0);
-    return response != null && response.isSuccess();
-  }
-
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/QueryMetadataExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/QueryMetadataExecutor.java
index 82325e1..ce4b920 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/QueryMetadataExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/QueryMetadataExecutor.java
@@ -22,16 +22,19 @@ import com.alipay.sofa.jraft.Status;
 import com.alipay.sofa.jraft.closure.ReadIndexClosure;
 import com.alipay.sofa.jraft.entity.PeerId;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
-import org.apache.iotdb.cluster.qp.task.SingleQPTask;
 import org.apache.iotdb.cluster.config.ClusterConfig;
-import org.apache.iotdb.cluster.config.ClusterConstant;
+import org.apache.iotdb.cluster.config.ClusterConsistencyLevel;
 import org.apache.iotdb.cluster.entity.raft.MetadataRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.RaftService;
 import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.qp.task.BatchQPTask;
+import org.apache.iotdb.cluster.qp.task.QPTask.TaskState;
+import org.apache.iotdb.cluster.qp.task.SingleQPTask;
 import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QueryMetadataInStringRequest;
 import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QueryMetadataRequest;
 import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QueryPathsRequest;
@@ -45,6 +48,7 @@ import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QueryPathsRespon
 import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QuerySeriesTypeResponse;
 import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QueryStorageGroupResponse;
 import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QueryTimeSeriesResponse;
+import org.apache.iotdb.cluster.service.TSServiceClusterImpl.BatchResult;
 import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.cluster.utils.RaftUtils;
 import org.apache.iotdb.db.exception.PathErrorException;
@@ -104,19 +108,19 @@ public class QueryMetadataExecutor extends AbstractQPExecutor {
         StringBuilder path = new StringBuilder();
         String[] storageGroupNodes = storageGroup.split(DOUB_SEPARATOR);
         String[] queryPathNodes = queryPath.split(DOUB_SEPARATOR);
-        for(int  i = 0 ; i < queryPathNodes.length ; i++){
-          if(i >= storageGroupNodes.length){
+        for (int i = 0; i < queryPathNodes.length; i++) {
+          if (i >= storageGroupNodes.length) {
             path.append(queryPathNodes[i]).append(SINGLE_SEPARATOR);
           } else {
             path.append(storageGroupNodes[i]).append(SINGLE_SEPARATOR);
           }
         }
-        paths.add(path.deleteCharAt(path.length()-1).toString());
+        paths.add(path.deleteCharAt(path.length() - 1).toString());
       }
     }
     return paths;
   }
-  
+
   /**
    * Handle query timeseries in one data group
    *
@@ -132,59 +136,84 @@ public class QueryMetadataExecutor extends AbstractQPExecutor {
     PeerId holder;
     /** Check if the plan can be executed locally. **/
     if (QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
-      LOGGER.debug("Execute show timeseries {} statement locally for group {} by sending request to local node.", pathList, groupId);
+      LOGGER.debug(
+          "Execute show timeseries {} statement locally for group {} by sending request to local node.",
+          pathList, groupId);
       holder = this.server.getServerId();
     } else {
-      holder = RaftUtils.getRandomPeerID(groupId);
+      holder = RaftUtils.getPeerIDInOrder(groupId);
     }
+    task.setTargetNode(holder);
     try {
-      res.addAll(queryTimeSeries(task, holder));
+      LOGGER.debug("Send show timeseries {} task for group {} to node {}.", pathList, groupId,
+          holder);
+      res.addAll(queryTimeSeries(task, pathList, groupId));
     } catch (RaftConnectionException e) {
-      throw new ProcessorException("Raft connection occurs error.", e);
+      throw new ProcessorException(e.getMessage());
     }
   }
 
+  private List<List<String>> queryTimeSeries(SingleQPTask task, List<String> pathList, String groupId)
+      throws InterruptedException, RaftConnectionException {
+    BasicResponse response = syncHandleSingleTaskGetRes(task, 0, "query timeseries " + pathList, groupId);
+    return response == null ? new ArrayList<>()
+        : ((QueryTimeSeriesResponse) response).getTimeSeries();
+  }
+
   public String processMetadataInStringQuery()
       throws InterruptedException, ProcessorException {
     Set<String> groupIdSet = router.getAllGroupId();
 
     List<String> metadataList = new ArrayList<>(groupIdSet.size());
-    List<SingleQPTask> taskList = new ArrayList<>();
+
+    BatchResult batchResult = new BatchResult(true, new StringBuilder(), new int[groupIdSet.size()]);
+    Map<String, List<Integer>> planIndexMap = new HashMap<>();
+    Map<String, SingleQPTask> subTaskMap = new HashMap<>();
+
+    int index = 0;
     for (String groupId : groupIdSet) {
       QueryMetadataInStringRequest request = new QueryMetadataInStringRequest(groupId,
           getReadMetadataConsistencyLevel());
       SingleQPTask task = new SingleQPTask(false, request);
-      taskList.add(task);
 
       LOGGER.debug("Execute show metadata in string statement for group {}.", groupId);
       PeerId holder;
       /** Check if the plan can be executed locally. **/
       if (QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
-        LOGGER.debug("Execute show metadata in string statement locally for group {} by sending request to local node.", groupId);
+        LOGGER.debug(
+            "Execute show metadata in string statement locally for group {} by sending request to local node.",
+            groupId);
         holder = this.server.getServerId();
       } else {
-        holder = RaftUtils.getRandomPeerID(groupId);
-      }
-      try {
-        asyncSendNonQuerySingleTask(task, holder, 0);
-      } catch (RaftConnectionException e) {
-        throw new ProcessorException("Raft connection occurs error.", e);
+        holder = RaftUtils.getPeerIDInOrder(groupId);
       }
+      task.setTargetNode(holder);
+      subTaskMap.put(groupId, task);
+      planIndexMap.computeIfAbsent(groupId, l -> new ArrayList<>()).add(index++);
     }
-    for (int i = 0; i < taskList.size(); i++) {
-      SingleQPTask task = taskList.get(i);
-      task.await();
-      BasicResponse response = task.getResponse();
+
+    BatchQPTask batchTask = new BatchQPTask(subTaskMap.size(), batchResult, subTaskMap, planIndexMap);
+    currentTask.set(batchTask);
+    batchTask.executeQueryMetadataBy(this, "show metadata in string");
+    batchTask.await();
+
+    for (SingleQPTask subTask : subTaskMap.values()) {
+      BasicResponse response = subTask.getResponse();
       if (response == null || !response.isSuccess()) {
-        throw new ProcessorException();
+        String errorMessage = "response is null";
+        if (response != null && response.getErrorMsg() != null) {
+          errorMessage = response.getErrorMsg();
+        }
+        throw new ProcessorException(
+            "Execute show metadata in string statement fail because " + errorMessage);
       }
-      metadataList.add(((QueryMetadataInStringResponse)response).getMetadata());
+      metadataList.add(((QueryMetadataInStringResponse) response).getMetadata());
     }
     return combineMetadataInStringList(metadataList);
   }
 
   public Metadata processMetadataQuery()
-      throws InterruptedException, ProcessorException, PathErrorException {
+      throws InterruptedException, ProcessorException {
     Set<String> groupIdSet = router.getAllGroupId();
 
     Metadata[] metadatas = new Metadata[groupIdSet.size()];
@@ -199,15 +228,46 @@ public class QueryMetadataExecutor extends AbstractQPExecutor {
       PeerId holder;
       /** Check if the plan can be executed locally. **/
       if (QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
-        LOGGER.debug("Execute query metadata statement locally for group {} by sending request to local node.", groupId);
+        LOGGER.debug(
+            "Execute query metadata statement locally for group {} by sending request to local node.",
+            groupId);
         holder = this.server.getServerId();
       } else {
-        holder = RaftUtils.getRandomPeerID(groupId);
+        holder = RaftUtils.getPeerIDInOrder(groupId);
       }
+      task.setTargetNode(holder);
       try {
-        asyncSendNonQuerySingleTask(task, holder, 0);
+        LOGGER.debug("Send query metadata task for group {} to node {}.", groupId, holder);
+        asyncSendSingleTask(task, 0);
       } catch (RaftConnectionException e) {
-        throw new ProcessorException("Raft connection occurs error.", e);
+        boolean success = false;
+        while (!success) {
+          PeerId nextNode = null;
+          try {
+            nextNode = RaftUtils.getPeerIDInOrder(groupId);
+            if (holder.equals(nextNode)) {
+              break;
+            }
+            LOGGER
+                .debug("Previous task fail, then send query metadata task for group {} to node {}.",
+                    groupId, nextNode);
+            task.resetTask();
+            task.setTargetNode(nextNode);
+            task.setTaskState(TaskState.INITIAL);
+            asyncSendSingleTask(task, 0);
+            LOGGER.debug("Query metadata task for group {} to node {} succeed.", groupId, nextNode);
+            success = true;
+          } catch (RaftConnectionException e1) {
+            LOGGER.debug("Query metadata task for group {} to node {} fail.", groupId, nextNode);
+          }
+        }
+        LOGGER.debug("The final result for query metadata task is {}", success);
+        if (!success) {
+          throw new ProcessorException(String
+              .format(
+                  "Can not query metadata in all nodes of group<%s>, please check cluster status.",
+                  groupId));
+        }
       }
     }
     for (int i = 0; i < taskList.size(); i++) {
@@ -219,9 +279,10 @@ public class QueryMetadataExecutor extends AbstractQPExecutor {
         if (response != null && response.getErrorMsg() != null) {
           errorMessage = response.getErrorMsg();
         }
-        throw new ProcessorException("Execute query metadata statement false because " + errorMessage);
+        throw new ProcessorException(
+            "Execute query metadata statement fail because " + errorMessage);
       }
-      metadatas[i] = ((QueryMetadataResponse)response).getMetadata();
+      metadatas[i] = ((QueryMetadataResponse) response).getMetadata();
     }
     return Metadata.combineMetadatas(metadatas);
   }
@@ -242,20 +303,32 @@ public class QueryMetadataExecutor extends AbstractQPExecutor {
       PeerId holder;
       /** Check if the plan can be executed locally. **/
       if (QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
-        LOGGER.debug("Execute get series type for {} statement locally for group {} by sending request to local node.", path, groupId);
+        LOGGER.debug(
+            "Execute get series type for {} statement locally for group {} by sending request to local node.",
+            path, groupId);
         holder = this.server.getServerId();
       } else {
-        holder = RaftUtils.getRandomPeerID(groupId);
+        holder = RaftUtils.getPeerIDInOrder(groupId);
       }
+      task.setTargetNode(holder);
       try {
-        dataType = querySeriesType(task, holder);
+        LOGGER.debug("Send get series type for {} task for group {} to node {}.", path, groupId,
+            holder);
+        dataType = querySeriesType(task, path, groupId);
       } catch (RaftConnectionException e) {
-        throw new ProcessorException("Raft connection occurs error.", e);
+        throw new ProcessorException(e.getMessage());
       }
     }
     return dataType;
   }
 
+  private TSDataType querySeriesType(SingleQPTask task, String path, String groupId)
+      throws InterruptedException, RaftConnectionException {
+    BasicResponse response = syncHandleSingleTaskGetRes(task, 0, "get series type for " + path, groupId);
+    return response == null ? null
+        : ((QuerySeriesTypeResponse) response).getDataType();
+  }
+
   /**
    * Handle show timeseries <path> statement
    */
@@ -291,30 +364,28 @@ public class QueryMetadataExecutor extends AbstractQPExecutor {
     PeerId holder;
     /** Check if the plan can be executed locally. **/
     if (QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
-      LOGGER.debug("Execute get paths for {} statement locally for group {} by sending request to local node.", pathList, groupId);
+      LOGGER.debug(
+          "Execute get paths for {} statement locally for group {} by sending request to local node.",
+          pathList, groupId);
       holder = this.server.getServerId();
     } else {
-      holder = RaftUtils.getRandomPeerID(groupId);
+      holder = RaftUtils.getPeerIDInOrder(groupId);
     }
+    task.setTargetNode(holder);
     try {
-      res.addAll(queryPaths(task, holder));
+      LOGGER
+          .debug("Send get paths for {} task for group {} to node {}.", pathList, groupId, holder);
+      res.addAll(queryPaths(task, pathList, groupId));
     } catch (RaftConnectionException e) {
-      throw new ProcessorException("Raft connection occurs error.", e);
+      throw new ProcessorException(e.getMessage());
     }
   }
 
-  private List<List<String>> queryTimeSeries(SingleQPTask task, PeerId leader)
+  private List<String> queryPaths(SingleQPTask task, List<String> pathList, String groupId)
       throws InterruptedException, RaftConnectionException {
-    BasicResponse response = asyncHandleNonQuerySingleTaskGetRes(task, leader, 0);
+    BasicResponse response = syncHandleSingleTaskGetRes(task, 0, "get paths for " + pathList, groupId);
     return response == null ? new ArrayList<>()
-        : ((QueryTimeSeriesResponse) response).getTimeSeries();
-  }
-
-  private TSDataType querySeriesType(SingleQPTask task, PeerId leader)
-      throws InterruptedException, RaftConnectionException {
-    BasicResponse response = asyncHandleNonQuerySingleTaskGetRes(task, leader, 0);
-    return response == null ? null
-        : ((QuerySeriesTypeResponse) response).getDataType();
+        : ((QueryPathsResponse) response).getPaths();
   }
 
   /**
@@ -328,15 +399,11 @@ public class QueryMetadataExecutor extends AbstractQPExecutor {
         ClusterConfig.METADATA_GROUP_ID, getReadMetadataConsistencyLevel());
     SingleQPTask task = new SingleQPTask(false, request);
     MetadataRaftHolder metadataHolder = (MetadataRaftHolder) server.getMetadataHolder();
-    if (getReadMetadataConsistencyLevel() == ClusterConstant.WEAK_CONSISTENCY_LEVEL) {
+    if (getReadMetadataConsistencyLevel() == ClusterConsistencyLevel.WEAK.ordinal()) {
       QueryStorageGroupResponse response;
-      try {
-        response = QueryStorageGroupResponse
-            .createSuccessResponse(metadataHolder.getFsm().getAllStorageGroups());
-      } catch (final PathErrorException e) {
-        response = QueryStorageGroupResponse.createErrorResponse(e.getMessage());
-      }
-      task.run(response);
+      response = QueryStorageGroupResponse
+          .createSuccessResponse(metadataHolder.getFsm().getAllStorageGroups());
+      task.receive(response);
     } else {
       ((RaftService) metadataHolder.getService()).getNode()
           .readIndex(reqContext, new ReadIndexClosure() {
@@ -345,16 +412,12 @@ public class QueryMetadataExecutor extends AbstractQPExecutor {
             public void run(Status status, long index, byte[] reqCtx) {
               QueryStorageGroupResponse response;
               if (status.isOk()) {
-                try {
-                  response = QueryStorageGroupResponse
-                      .createSuccessResponse(metadataHolder.getFsm().getAllStorageGroups());
-                } catch (final PathErrorException e) {
-                  response = QueryStorageGroupResponse.createErrorResponse(e.getMessage());
-                }
+                response = QueryStorageGroupResponse
+                    .createSuccessResponse(metadataHolder.getFsm().getAllStorageGroups());
               } else {
                 response = QueryStorageGroupResponse.createErrorResponse(status.getErrorMsg());
               }
-              task.run(response);
+              task.receive(response);
             }
           });
     }
@@ -362,13 +425,6 @@ public class QueryMetadataExecutor extends AbstractQPExecutor {
     return ((QueryStorageGroupResponse) task.getResponse()).getStorageGroups();
   }
 
-  private List<String> queryPaths(SingleQPTask task, PeerId leader)
-      throws InterruptedException, RaftConnectionException {
-    BasicResponse response = asyncHandleNonQuerySingleTaskGetRes(task, leader, 0);
-    return response == null ? new ArrayList<>()
-        : ((QueryPathsResponse) response).getPaths();
-  }
-
   /**
    * Combine multiple metadata in String format into single String
    *
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/BatchQPTask.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/BatchQPTask.java
index 43edd67..8ede57d 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/BatchQPTask.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/BatchQPTask.java
@@ -26,9 +26,11 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.concurrent.Future;
 import java.util.concurrent.locks.ReentrantLock;
-import org.apache.iotdb.cluster.concurrent.pool.QPTaskManager;
+import org.apache.iotdb.cluster.concurrent.pool.QPTaskThreadManager;
 import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.qp.executor.AbstractQPExecutor;
 import org.apache.iotdb.cluster.qp.executor.NonQueryExecutor;
+import org.apache.iotdb.cluster.qp.executor.QueryMetadataExecutor;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 import org.apache.iotdb.cluster.rpc.raft.response.nonquery.DataGroupNonQueryResponse;
 import org.apache.iotdb.cluster.service.TSServiceClusterImpl.BatchResult;
@@ -38,7 +40,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * Execute batch statement task. It's thread-safe.
+ * Execute batch statement tasks. It's thread-safe.
  */
 public class BatchQPTask extends MultiQPTask {
 
@@ -46,40 +48,33 @@ public class BatchQPTask extends MultiQPTask {
   private static final Logger LOGGER = LoggerFactory.getLogger(BatchQPTask.class);
 
   /**
-   * Record the index of physical plans in a data group. The index means the position in batchResult
+   * Record the index of physical plans in a data group. The index means the position in result
    * String: group id
    */
   private Map<String, List<Integer>> planIndexMap;
 
   /**
-   * Batch result
-   */
-  private int[] batchResult;
-
-  /**
-   * Mark if the batch is all successful.
+   * Batch result array, mark the result type, which is in BatchResult
    */
-  private boolean isAllSuccessful;
+  private int[] resultArray;
 
   /**
-   * Batch error message.
+   * Batch result
    */
-  private String batchErrorMessage;
+  private BatchResult batchResult;
 
   /**
    * Lock to update result
    */
   private ReentrantLock lock = new ReentrantLock();
 
-  private NonQueryExecutor executor;
-
+  private AbstractQPExecutor executor;
 
-  public BatchQPTask(int taskNum, BatchResult batchResult, Map<String, SingleQPTask> taskMap,
+  public BatchQPTask(int taskNum, BatchResult result, Map<String, SingleQPTask> taskMap,
       Map<String, List<Integer>> planIndexMap) {
     super(false, taskNum, TaskType.BATCH);
-    this.batchResult = batchResult.getResult();
-    this.isAllSuccessful = batchResult.isAllSuccessful();
-    this.batchErrorMessage = batchResult.getBatchErrorMessage();
+    this.resultArray = result.getResultArray();
+    this.batchResult = result;
     this.taskMap = taskMap;
     this.planIndexMap = planIndexMap;
     this.taskThreadMap = new HashMap<>();
@@ -91,7 +86,7 @@ public class BatchQPTask extends MultiQPTask {
    * @param basicResponse response from receiver
    */
   @Override
-  public void run(BasicResponse basicResponse) {
+  public void receive(BasicResponse basicResponse) {
     lock.lock();
     try {
       String groupId = basicResponse.getGroupId();
@@ -99,23 +94,55 @@ public class BatchQPTask extends MultiQPTask {
       List<Integer> indexList = planIndexMap.get(groupId);
       for (int i = 0; i < indexList.size(); i++) {
         if (i >= results.size()) {
-          batchResult[indexList.get(i)] = Statement.EXECUTE_FAILED;
+          resultArray[indexList.get(i)] = Statement.EXECUTE_FAILED;
+          batchResult.addBatchErrorMessage(indexList.get(i), basicResponse.getErrorMsg());
         } else {
-          batchResult[indexList.get(i)] =
-              results.get(i) ? Statement.SUCCESS_NO_INFO : Statement.EXECUTE_FAILED;
+          if (results.get(i)) {
+            resultArray[indexList.get(i)] = Statement.SUCCESS_NO_INFO;
+          } else {
+            resultArray[indexList.get(i)] = Statement.EXECUTE_FAILED;
+            batchResult.addBatchErrorMessage(indexList.get(i), basicResponse.getErrorMsg());
+          }
         }
       }
       if (!basicResponse.isSuccess()) {
-        isAllSuccessful = false;
-        batchErrorMessage = basicResponse.getErrorMsg();
+        batchResult.setAllSuccessful(false);
       }
+    } catch (Exception ex) {
+      LOGGER.error("Execute batch statement occurs error.", ex);
     } finally {
       lock.unlock();
     }
     taskCountDownLatch.countDown();
   }
 
-  public void execute(NonQueryExecutor executor) {
+  public void executeQueryMetadataBy(QueryMetadataExecutor executor, String taskInfo) {
+    this.executor = executor;
+
+    for (Entry<String, SingleQPTask> entry : taskMap.entrySet()) {
+      String groupId = entry.getKey();
+      SingleQPTask subTask = entry.getValue();
+      Future<?> taskThread;
+      taskThread = QPTaskThreadManager.getInstance()
+          .submit(() -> executeRpcSubQueryMetadataTask(subTask, taskInfo, groupId));
+      taskThreadMap.put(groupId, taskThread);
+    }
+  }
+
+  /**
+   * Execute RPC sub task
+   */
+  private void executeRpcSubQueryMetadataTask(SingleQPTask subTask, String taskInfo, String groupId) {
+    try {
+      executor.syncHandleSingleTask(subTask, taskInfo, groupId);
+      this.receive(subTask.getResponse());
+    } catch (RaftConnectionException | InterruptedException e) {
+      LOGGER.error("Async handle sub {} task failed.", taskInfo);
+      this.receive(DataGroupNonQueryResponse.createErrorResponse(groupId, e.getMessage()));
+    }
+  }
+
+  public void executeBy(NonQueryExecutor executor) {
     this.executor = executor;
 
     for (Entry<String, SingleQPTask> entry : taskMap.entrySet()) {
@@ -123,12 +150,13 @@ public class BatchQPTask extends MultiQPTask {
       SingleQPTask subTask = entry.getValue();
       Future<?> taskThread;
       if (QPExecutorUtils.canHandleNonQueryByGroupId(groupId)) {
-        taskThread = QPTaskManager.getInstance()
+        taskThread = QPTaskThreadManager.getInstance()
             .submit(() -> executeLocalSubTask(subTask, groupId));
       } else {
-        PeerId leader = RaftUtils.getLeaderPeerID(groupId);
-        taskThread = QPTaskManager.getInstance()
-            .submit(() -> executeRpcSubTask(subTask, leader, groupId));
+        PeerId leader = RaftUtils.getLocalLeaderPeerID(groupId);
+        subTask.setTargetNode(leader);
+        taskThread = QPTaskThreadManager.getInstance()
+            .submit(() -> executeRpcSubTask(subTask, groupId));
       }
       taskThreadMap.put(groupId, taskThread);
     }
@@ -139,40 +167,24 @@ public class BatchQPTask extends MultiQPTask {
    */
   private void executeLocalSubTask(QPTask subTask, String groupId) {
     try {
-      executor.handleNonQueryRequestLocally(groupId, subTask);
-      this.run(subTask.getResponse());
+      ((NonQueryExecutor) executor).handleNonQueryRequestLocally(groupId, subTask);
+      this.receive(subTask.getResponse());
     } catch (InterruptedException e) {
       LOGGER.error("Handle sub task locally failed.");
-      this.run(DataGroupNonQueryResponse.createErrorResponse(groupId, e.getMessage()));
+      this.receive(DataGroupNonQueryResponse.createErrorResponse(groupId, e.getMessage()));
     }
   }
 
   /**
    * Execute RPC sub task
    */
-  private void executeRpcSubTask(SingleQPTask subTask, PeerId leader, String groupId) {
+  private void executeRpcSubTask(SingleQPTask subTask, String groupId) {
     try {
-      executor.asyncHandleNonQueryTask(subTask, leader);
-      this.run(subTask.getResponse());
+      executor.syncHandleSingleTask(subTask, "execute sub non-query", groupId);
+      this.receive(subTask.getResponse());
     } catch (RaftConnectionException | InterruptedException e) {
       LOGGER.error("Async handle sub task failed.");
-      this.run(DataGroupNonQueryResponse.createErrorResponse(groupId, e.getMessage()));
+      this.receive(DataGroupNonQueryResponse.createErrorResponse(groupId, e.getMessage()));
     }
   }
-
-  public boolean isAllSuccessful() {
-    return isAllSuccessful;
-  }
-
-  public void setAllSuccessful(boolean allSuccessful) {
-    isAllSuccessful = allSuccessful;
-  }
-
-  public String getBatchErrorMessage() {
-    return batchErrorMessage;
-  }
-
-  public void setBatchErrorMessage(String batchErrorMessage) {
-    this.batchErrorMessage = batchErrorMessage;
-  }
 }
diff --git a/spark/src/main/java/org/apache/iotdb/tsfile/qp/exception/QueryOperatorException.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/DataQueryTask.java
similarity index 75%
rename from spark/src/main/java/org/apache/iotdb/tsfile/qp/exception/QueryOperatorException.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/qp/task/DataQueryTask.java
index fc9f177..f861f55 100644
--- a/spark/src/main/java/org/apache/iotdb/tsfile/qp/exception/QueryOperatorException.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/DataQueryTask.java
@@ -16,14 +16,14 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.tsfile.qp.exception;
+package org.apache.iotdb.cluster.qp.task;
 
-public class QueryOperatorException extends LogicalOptimizeException {
+import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
 
-  private static final long serialVersionUID = 8581594261924961899L;
+public class DataQueryTask extends SingleQPTask {
 
-  public QueryOperatorException(String msg) {
-    super(msg);
+  public DataQueryTask(boolean isSyncTask,
+      BasicRequest request) {
+    super(isSyncTask, request);
   }
-
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/QPTask.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/QPTask.java
index 96a517a..f3182c9 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/QPTask.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/QPTask.java
@@ -18,6 +18,7 @@
  */
 package org.apache.iotdb.cluster.qp.task;
 
+import com.alipay.sofa.jraft.entity.PeerId;
 import java.util.concurrent.CountDownLatch;
 import org.apache.iotdb.cluster.entity.Server;
 import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
@@ -36,6 +37,11 @@ public abstract class QPTask {
   protected BasicRequest request;
 
   /**
+   * The target peer of this task
+   */
+  protected PeerId targetNode;
+
+  /**
    * Whether it's a synchronization task or not.
    */
   boolean isSyncTask;
@@ -78,7 +84,7 @@ public abstract class QPTask {
    *
    * @param basicResponse response from receiver
    */
-  public abstract void run(BasicResponse basicResponse);
+  public abstract void receive(BasicResponse basicResponse);
 
   public boolean isSyncTask() {
     return isSyncTask;
@@ -94,6 +100,7 @@ public abstract class QPTask {
 
   public void resetTask() {
     this.taskCountDownLatch = new CountDownLatch(taskNum);
+    this.taskState = TaskState.INITIAL;
   }
 
   public TaskState getTaskState() {
@@ -122,11 +129,44 @@ public abstract class QPTask {
   }
 
   public enum TaskState {
-    INITIAL, REDIRECT, FINISH, EXCEPTION
+
+    /**
+     * Initial state
+     */
+    INITIAL,
+
+    /**
+     * Redirect leader
+     */
+    REDIRECT,
+
+    /**
+     * Task finish
+     */
+    FINISH,
+
+    /**
+     * Occur exception in remote node
+     */
+    EXCEPTION,
+
+    /**
+     * Can not connect to remote node
+     */
+    RAFT_CONNECTION_EXCEPTION
   }
 
   public enum TaskType {
-    SINGLE, BATCH
+
+    /**
+     * Single task
+     */
+    SINGLE,
+
+    /**
+     * Batch task
+     */
+    BATCH
   }
 
   /**
@@ -137,4 +177,12 @@ public abstract class QPTask {
   }
 
   public abstract void shutdown();
+
+  public PeerId getTargetNode() {
+    return targetNode;
+  }
+
+  public void setTargetNode(PeerId targetNode) {
+    this.targetNode = targetNode;
+  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/SingleQPTask.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/SingleQPTask.java
index 805834e..16ddf60 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/SingleQPTask.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/SingleQPTask.java
@@ -20,16 +20,12 @@ package org.apache.iotdb.cluster.qp.task;
 
 import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
- * Process single task.
+ * Process task(s) for only one raft group, which is used for operations except for querying data.
  */
 public class SingleQPTask extends QPTask {
 
-  private static final Logger LOGGER = LoggerFactory.getLogger(SingleQPTask.class);
-
   private static final int TASK_NUM = 1;
 
   public SingleQPTask(boolean isSyncTask, BasicRequest request) {
@@ -41,11 +37,11 @@ public class SingleQPTask extends QPTask {
    * Process response. If it's necessary to redirect leader, redo the task.
    */
   @Override
-  public void run(BasicResponse response) {
+  public void receive(BasicResponse response) {
     if(taskState != TaskState.EXCEPTION) {
       this.response = response;
       if(response == null){
-        LOGGER.error("Response is null");
+        this.taskState = TaskState.RAFT_CONNECTION_EXCEPTION;
       } else if (response.isRedirected()) {
         this.taskState = TaskState.REDIRECT;
       } else {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/common/ClusterNullableBatchData.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/common/ClusterNullableBatchData.java
new file mode 100644
index 0000000..699db3e
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/common/ClusterNullableBatchData.java
@@ -0,0 +1,79 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.common;
+
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.iotdb.db.utils.TimeValuePair;
+import org.apache.iotdb.tsfile.read.common.BatchData;
+
+/**
+ * <code>ClusterNullableBatchData</code> is a self-defined data structure which is used in cluster
+ * query process of fill type and group by type, which may contain <code>null</code> in list of
+ * TimeValuePair.
+ */
+public class ClusterNullableBatchData extends BatchData {
+
+  private List<TimeValuePair> timeValuePairList;
+  private int index;
+
+  public ClusterNullableBatchData() {
+    this.timeValuePairList = new ArrayList<>();
+    this.index = 0;
+  }
+
+  @Override
+  public boolean hasNext() {
+    return index < timeValuePairList.size();
+  }
+
+  @Override
+  public void next() {
+    index++;
+  }
+
+  @Override
+  public long currentTime() {
+    rangeCheckForTime(index);
+    return timeValuePairList.get(index).getTimestamp();
+  }
+
+  @Override
+  public Object currentValue() {
+    if (index < length()) {
+      return timeValuePairList.get(index).getValue() == null ? null
+          : timeValuePairList.get(index).getValue().getValue();
+    } else {
+      return null;
+    }
+  }
+
+  @Override
+  public int length() {
+    return timeValuePairList.size();
+  }
+
+  public TimeValuePair getCurrentTimeValuePair() {
+    return index < length() ? timeValuePairList.get(index) : null;
+  }
+
+  public void addTimeValuePair(TimeValuePair timeValuePair){
+    timeValuePairList.add(timeValuePair);
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/dataset/ClusterDataSetWithTimeGenerator.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/dataset/ClusterDataSetWithTimeGenerator.java
index f3e4eaf..5a06ca8 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/dataset/ClusterDataSetWithTimeGenerator.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/dataset/ClusterDataSetWithTimeGenerator.java
@@ -141,9 +141,6 @@ public class ClusterDataSetWithTimeGenerator extends QueryDataSet {
         }
       }
     }
-    if (cachedBatchTimestamp != null && cachedBatchTimestamp.hasNext()) {
-      return true;
-    }
-    return false;
+    return cachedBatchTimestamp != null && cachedBatchTimestamp.hasNext();
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/dataset/ClusterGroupByDataSetWithOnlyTimeFilter.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/dataset/ClusterGroupByDataSetWithOnlyTimeFilter.java
new file mode 100644
index 0000000..599439a
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/dataset/ClusterGroupByDataSetWithOnlyTimeFilter.java
@@ -0,0 +1,159 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.dataset;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.SelectSeriesGroupEntity;
+import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
+import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
+import org.apache.iotdb.db.exception.FileNodeManagerException;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.exception.ProcessorException;
+import org.apache.iotdb.db.query.aggregation.AggreResultData;
+import org.apache.iotdb.db.query.context.QueryContext;
+import org.apache.iotdb.db.query.control.QueryResourceManager;
+import org.apache.iotdb.db.query.dataset.groupby.GroupByWithOnlyTimeFilterDataSet;
+import org.apache.iotdb.db.query.factory.SeriesReaderFactory;
+import org.apache.iotdb.db.query.reader.IPointReader;
+import org.apache.iotdb.db.query.reader.merge.PriorityMergeReader;
+import org.apache.iotdb.db.query.reader.sequence.SequenceDataReader;
+import org.apache.iotdb.db.utils.TimeValuePair;
+import org.apache.iotdb.tsfile.read.common.Field;
+import org.apache.iotdb.tsfile.read.common.Path;
+import org.apache.iotdb.tsfile.read.common.RowRecord;
+import org.apache.iotdb.tsfile.read.expression.IExpression;
+import org.apache.iotdb.tsfile.read.expression.impl.GlobalTimeExpression;
+import org.apache.iotdb.tsfile.utils.Pair;
+
+/**
+ * Handle group by query with only time filter
+ */
+public class ClusterGroupByDataSetWithOnlyTimeFilter extends GroupByWithOnlyTimeFilterDataSet {
+
+  private ClusterRpcSingleQueryManager queryManager;
+  private List<IPointReader> readersOfSelectedSeries;
+
+  /**
+   * constructor.
+   */
+  public ClusterGroupByDataSetWithOnlyTimeFilter(long jobId,
+      List<Path> paths, long unit, long origin,
+      List<Pair<Long, Long>> mergedIntervals, ClusterRpcSingleQueryManager queryManager) {
+    super(jobId, paths, unit, origin, mergedIntervals);
+    this.queryManager = queryManager;
+    this.readersOfSelectedSeries = new ArrayList<>();
+  }
+
+
+  /**
+   * init reader and aggregate function.
+   */
+  @Override
+  public void initGroupBy(QueryContext context, List<String> aggres, IExpression expression)
+      throws FileNodeManagerException, PathErrorException, ProcessorException, IOException {
+    initAggreFuction(aggres);
+
+    /** add query token for query series which can handle locally **/
+    List<Path> localQuerySeries = new ArrayList<>(selectedSeries);
+    Set<Path> remoteQuerySeries = new HashSet<>();
+    queryManager.getSelectSeriesGroupEntityMap().values().forEach(
+        selectSeriesGroupEntity -> remoteQuerySeries
+            .addAll(selectSeriesGroupEntity.getSelectPaths()));
+    localQuerySeries.removeAll(remoteQuerySeries);
+    QueryResourceManager.getInstance()
+        .beginQueryOfGivenQueryPaths(context.getJobId(), localQuerySeries);
+    if (expression != null) {
+      timeFilter = ((GlobalTimeExpression) expression).getFilter();
+    }
+
+    Map<String, SelectSeriesGroupEntity> selectSeriesGroupEntityMap = queryManager
+        .getSelectSeriesGroupEntityMap();
+    //Mark filter series reader index group by group id
+    Map<String, Integer> selectSeriesReaderIndex = new HashMap<>();
+    for (int i = 0; i < selectedSeries.size(); i++) {
+      Path path = selectedSeries.get(i);
+      String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
+      if (selectSeriesGroupEntityMap.containsKey(groupId)) {
+        int index = selectSeriesReaderIndex.getOrDefault(groupId, 0);
+        ClusterSelectSeriesReader reader = selectSeriesGroupEntityMap.get(groupId)
+            .getSelectSeriesReaders().get(index);
+        readersOfSelectedSeries.add(reader);
+        selectSeriesReaderIndex.put(groupId, index + 1);
+      } else {
+        readersOfSelectedSeries.add(null);
+        QueryDataSource queryDataSource = QueryResourceManager.getInstance()
+            .getQueryDataSource(selectedSeries.get(i), context);
+
+        // sequence reader for sealed tsfile, unsealed tsfile, memory
+        SequenceDataReader sequenceReader = new SequenceDataReader(
+            queryDataSource.getSeqDataSource(),
+            timeFilter, context, false);
+
+        // unseq reader for all chunk groups in unSeqFile, memory
+        PriorityMergeReader unSeqMergeReader = SeriesReaderFactory.getInstance()
+            .createUnSeqMergeReader(queryDataSource.getOverflowSeriesDataSource(), timeFilter);
+
+        sequenceReaderList.add(sequenceReader);
+        unSequenceReaderList.add(unSeqMergeReader);
+      }
+    }
+  }
+
+  @Override
+  public RowRecord next() throws IOException {
+    if (!hasCachedTimeInterval) {
+      throw new IOException("need to call hasNext() before calling next() "
+          + "in GroupByWithOnlyTimeFilterDataSet.");
+    }
+    hasCachedTimeInterval = false;
+    RowRecord record = new RowRecord(startTime);
+    for (int i = 0; i < functions.size(); i++) {
+      IPointReader reader = readersOfSelectedSeries.get(i);
+      if (reader != null) {
+        TimeValuePair timeValuePair = reader.next();
+        if (timeValuePair == null) {
+          record.addField(new Field(null));
+        } else {
+          record.addField(getField(timeValuePair.getValue().getValue(), dataTypes.get(i)));
+        }
+      } else {
+        AggreResultData res;
+        try {
+          res = nextSeries(i);
+        } catch (ProcessorException e) {
+          throw new IOException(e);
+        }
+        if (res == null) {
+          record.addField(new Field(null));
+        } else {
+          record.addField(getField(res));
+        }
+      }
+    }
+    return record;
+  }
+}
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/query/dataset/groupby/GroupByWithValueFilterDataSet.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/dataset/ClusterGroupByDataSetWithTimeGenerator.java
similarity index 53%
copy from iotdb/src/main/java/org/apache/iotdb/db/query/dataset/groupby/GroupByWithValueFilterDataSet.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/query/dataset/ClusterGroupByDataSetWithTimeGenerator.java
index f7ffa29..89ed1b9 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/dataset/groupby/GroupByWithValueFilterDataSet.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/dataset/ClusterGroupByDataSetWithTimeGenerator.java
@@ -16,69 +16,80 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
-package org.apache.iotdb.db.query.dataset.groupby;
+package org.apache.iotdb.cluster.query.dataset;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.List;
-import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import java.util.Set;
+import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.query.factory.ClusterSeriesReaderFactory;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterSeriesGroupEntity;
+import org.apache.iotdb.cluster.query.timegenerator.ClusterTimeGenerator;
 import org.apache.iotdb.db.exception.FileNodeManagerException;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.exception.ProcessorException;
 import org.apache.iotdb.db.query.aggregation.AggregateFunction;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.control.QueryResourceManager;
-import org.apache.iotdb.db.query.factory.SeriesReaderFactory;
-import org.apache.iotdb.db.query.reader.merge.EngineReaderByTimeStamp;
-import org.apache.iotdb.db.query.timegenerator.EngineTimeGenerator;
+import org.apache.iotdb.db.query.dataset.groupby.GroupByWithValueFilterDataSet;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.Path;
 import org.apache.iotdb.tsfile.read.common.RowRecord;
 import org.apache.iotdb.tsfile.read.expression.IExpression;
-import org.apache.iotdb.tsfile.read.query.timegenerator.TimeGenerator;
 import org.apache.iotdb.tsfile.utils.Pair;
 
-public class GroupByWithValueFilterDataSet extends GroupByEngineDataSet {
+public class ClusterGroupByDataSetWithTimeGenerator extends GroupByWithValueFilterDataSet {
 
-  private List<EngineReaderByTimeStamp> allDataReaderList;
-  private TimeGenerator timestampGenerator;
-  /**
-   * cached timestamp for next group by partition.
-   */
-  private long timestamp;
-  /**
-   * if this object has cached timestamp for next group by partition.
-   */
-  private boolean hasCachedTimestamp;
+  private ClusterRpcSingleQueryManager queryManager;
 
-  /**
-   * group by batch calculation size.
-   */
-  private int timeStampFetchSize;
+  private List<TSDataType> selectSeriesDataTypes;
 
   /**
    * constructor.
    */
-  public GroupByWithValueFilterDataSet(long jobId, List<Path> paths, long unit, long origin,
-      List<Pair<Long, Long>> mergedIntervals) {
+  public ClusterGroupByDataSetWithTimeGenerator(long jobId,
+      List<Path> paths, long unit, long origin,
+      List<Pair<Long, Long>> mergedIntervals, ClusterRpcSingleQueryManager queryManager) {
     super(jobId, paths, unit, origin, mergedIntervals);
-    this.allDataReaderList = new ArrayList<>();
-    this.timeStampFetchSize = 10 * IoTDBDescriptor.getInstance().getConfig().getFetchSize();
+    this.queryManager = queryManager;
+    selectSeriesDataTypes = new ArrayList<>();
   }
 
   /**
    * init reader and aggregate function.
    */
+  @Override
   public void initGroupBy(QueryContext context, List<String> aggres, IExpression expression)
       throws FileNodeManagerException, PathErrorException, ProcessorException, IOException {
     initAggreFuction(aggres);
 
-    QueryResourceManager.getInstance().beginQueryOfGivenExpression(context.getJobId(), expression);
-    QueryResourceManager
-        .getInstance().beginQueryOfGivenQueryPaths(context.getJobId(), selectedSeries);
-    this.timestampGenerator = new EngineTimeGenerator(expression, context);
-    this.allDataReaderList = SeriesReaderFactory
-        .getByTimestampReadersOfSelectedPaths(selectedSeries, context);
+    /** add query token for filter series which can handle locally **/
+    Set<String> deviceIdSet = new HashSet<>();
+    for (FilterSeriesGroupEntity filterSeriesGroupEntity : queryManager
+        .getFilterSeriesGroupEntityMap().values()) {
+      List<Path> remoteFilterSeries = filterSeriesGroupEntity.getFilterPaths();
+      remoteFilterSeries.forEach(seriesPath -> deviceIdSet.add(seriesPath.getDevice()));
+    }
+    QueryResourceManager.getInstance()
+        .beginQueryOfGivenExpression(context.getJobId(), expression, deviceIdSet);
+
+    /** add query token for query series which can handle locally **/
+    List<Path> localQuerySeries = new ArrayList<>(selectedSeries);
+    Set<Path> remoteQuerySeries = new HashSet<>();
+    queryManager.getSelectSeriesGroupEntityMap().values().forEach(
+        selectSeriesGroupEntity -> remoteQuerySeries
+            .addAll(selectSeriesGroupEntity.getSelectPaths()));
+    localQuerySeries.removeAll(remoteQuerySeries);
+    QueryResourceManager.getInstance()
+        .beginQueryOfGivenQueryPaths(context.getJobId(), localQuerySeries);
+
+    this.timestampGenerator = new ClusterTimeGenerator(expression, context, queryManager);
+    this.allDataReaderList = ClusterSeriesReaderFactory
+        .createReadersByTimestampOfSelectedPaths(selectedSeries, context, queryManager,
+            selectSeriesDataTypes);
   }
 
   @Override
@@ -92,7 +103,7 @@ public class GroupByWithValueFilterDataSet extends GroupByEngineDataSet {
       function.init();
     }
 
-    long[] timestampArray = new long[timeStampFetchSize];
+    long[] timestampArray = new long[timestampFetchSize];
     int timeArrayLength = 0;
     if (hasCachedTimestamp) {
       if (timestamp < endTime) {
@@ -107,6 +118,8 @@ public class GroupByWithValueFilterDataSet extends GroupByEngineDataSet {
       // construct timestamp array
       timeArrayLength = constructTimeArrayForOneCal(timestampArray, timeArrayLength);
 
+      fetchSelectDataFromRemoteNode(timeArrayLength, timestampArray);
+
       // cal result using timestamp array
       for (int i = 0; i < selectedSeries.size(); i++) {
         functions.get(i).calcAggregationUsingTimestamps(
@@ -121,6 +134,9 @@ public class GroupByWithValueFilterDataSet extends GroupByEngineDataSet {
       }
     }
 
+    // fetch select series data from remote node
+    fetchSelectDataFromRemoteNode(timeArrayLength, timestampArray);
+
     if (timeArrayLength > 0) {
       // cal result using timestamp array
       for (int i = 0; i < selectedSeries.size(); i++) {
@@ -132,6 +148,28 @@ public class GroupByWithValueFilterDataSet extends GroupByEngineDataSet {
   }
 
   /**
+   * Get select series batch data by batch timestamp
+   * @param timeArrayLength length of batch timestamp
+   * @param timestampArray timestamp array
+   */
+  private void fetchSelectDataFromRemoteNode(int timeArrayLength, long[] timestampArray)
+      throws IOException {
+    if(timeArrayLength != 0){
+      List<Long> batchTimestamp = new ArrayList<>();
+      for(int i = 0 ; i < timeArrayLength; i++){
+        batchTimestamp.add(timestampArray[i]);
+      }
+
+      try {
+        queryManager.fetchBatchDataByTimestampForAllSelectPaths(batchTimestamp);
+      } catch (
+          RaftConnectionException e) {
+        throw new IOException(e);
+      }
+    }
+  }
+
+  /**
    * construct an array of timestamps for one batch of a group by partition calculating.
    *
    * @param timestampArray timestamp array
@@ -140,7 +178,7 @@ public class GroupByWithValueFilterDataSet extends GroupByEngineDataSet {
    */
   private int constructTimeArrayForOneCal(long[] timestampArray, int timeArrayLength)
       throws IOException {
-    for (int cnt = 1; cnt < timeStampFetchSize && timestampGenerator.hasNext(); cnt++) {
+    for (int cnt = 1; cnt < timestampFetchSize && timestampGenerator.hasNext(); cnt++) {
       timestamp = timestampGenerator.next();
       if (timestamp < endTime) {
         timestampArray[timeArrayLength++] = timestamp;
@@ -151,10 +189,4 @@ public class GroupByWithValueFilterDataSet extends GroupByEngineDataSet {
     }
     return timeArrayLength;
   }
-
-  private RowRecord constructRowRecord() {
-    RowRecord record = new RowRecord(startTime);
-    functions.forEach(function -> record.addField(getField(function.getResult())));
-    return record;
-  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterAggregateEngineExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterAggregateEngineExecutor.java
new file mode 100644
index 0000000..808eab8
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterAggregateEngineExecutor.java
@@ -0,0 +1,249 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.executor;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.query.factory.ClusterSeriesReaderFactory;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterSeriesGroupEntity;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.SelectSeriesGroupEntity;
+import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
+import org.apache.iotdb.cluster.query.timegenerator.ClusterTimeGenerator;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
+import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
+import org.apache.iotdb.db.exception.FileNodeManagerException;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.exception.ProcessorException;
+import org.apache.iotdb.db.metadata.MManager;
+import org.apache.iotdb.db.query.aggregation.AggreResultData;
+import org.apache.iotdb.db.query.aggregation.AggregateFunction;
+import org.apache.iotdb.db.query.aggregation.impl.LastAggrFunc;
+import org.apache.iotdb.db.query.aggregation.impl.MaxTimeAggrFunc;
+import org.apache.iotdb.db.query.context.QueryContext;
+import org.apache.iotdb.db.query.control.QueryResourceManager;
+import org.apache.iotdb.db.query.dataset.AggreResultDataPointReader;
+import org.apache.iotdb.db.query.dataset.EngineDataSetWithoutTimeGenerator;
+import org.apache.iotdb.db.query.executor.AggregateEngineExecutor;
+import org.apache.iotdb.db.query.factory.AggreFuncFactory;
+import org.apache.iotdb.db.query.factory.SeriesReaderFactory;
+import org.apache.iotdb.db.query.reader.IPointReader;
+import org.apache.iotdb.db.query.reader.merge.EngineReaderByTimeStamp;
+import org.apache.iotdb.db.query.reader.merge.PriorityMergeReader;
+import org.apache.iotdb.db.query.reader.sequence.SequenceDataReader;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.read.common.Path;
+import org.apache.iotdb.tsfile.read.expression.IExpression;
+import org.apache.iotdb.tsfile.read.expression.impl.GlobalTimeExpression;
+import org.apache.iotdb.tsfile.read.filter.basic.Filter;
+import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
+import org.apache.iotdb.tsfile.read.query.timegenerator.TimeGenerator;
+
+/**
+ * Handle aggregation query and construct dataset in cluster
+ */
+public class ClusterAggregateEngineExecutor extends AggregateEngineExecutor {
+
+  private ClusterRpcSingleQueryManager queryManager;
+
+
+  public ClusterAggregateEngineExecutor(List<Path> selectedSeries, List<String> aggres,
+      IExpression expression, ClusterRpcSingleQueryManager queryManager) {
+    super(selectedSeries, aggres, expression);
+    this.queryManager = queryManager;
+  }
+
+  @Override
+  public QueryDataSet executeWithoutTimeGenerator(QueryContext context)
+      throws FileNodeManagerException, IOException, PathErrorException, ProcessorException {
+    Filter timeFilter = expression != null ? ((GlobalTimeExpression) expression).getFilter() : null;
+    Map<String, SelectSeriesGroupEntity> selectSeriesGroupEntityMap = queryManager
+        .getSelectSeriesGroupEntityMap();
+
+    List<Path> paths = new ArrayList<>();
+    List<IPointReader> readers = new ArrayList<>();
+    List<TSDataType> dataTypes = new ArrayList<>();
+    //Mark filter series reader index group by group id
+    Map<String, Integer> selectSeriesReaderIndex = new HashMap<>();
+    for (int i = 0; i < selectedSeries.size(); i++) {
+      Path path = selectedSeries.get(i);
+      String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
+
+      if (selectSeriesGroupEntityMap.containsKey(groupId)) {
+        int index = selectSeriesReaderIndex.getOrDefault(groupId, 0);
+        ClusterSelectSeriesReader reader = selectSeriesGroupEntityMap.get(groupId)
+            .getSelectSeriesReaders().get(index);
+        readers.add(reader);
+        dataTypes.add(reader.getDataType());
+        selectSeriesReaderIndex.put(groupId, index + 1);
+      } else {
+        paths.add(path);
+        // construct AggregateFunction
+        TSDataType tsDataType = MManager.getInstance()
+            .getSeriesType(path.getFullPath());
+        AggregateFunction function = AggreFuncFactory.getAggrFuncByName(aggres.get(i), tsDataType);
+        function.init();
+
+        QueryDataSource queryDataSource = QueryResourceManager.getInstance()
+            .getQueryDataSource(selectedSeries.get(i), context);
+
+        // sequence reader for sealed tsfile, unsealed tsfile, memory
+        SequenceDataReader sequenceReader;
+        if (function instanceof MaxTimeAggrFunc || function instanceof LastAggrFunc) {
+          sequenceReader = new SequenceDataReader(queryDataSource.getSeqDataSource(), timeFilter,
+              context, true);
+        } else {
+          sequenceReader = new SequenceDataReader(queryDataSource.getSeqDataSource(), timeFilter,
+              context, false);
+        }
+
+        // unseq reader for all chunk groups in unSeqFile, memory
+        PriorityMergeReader unSeqMergeReader = SeriesReaderFactory.getInstance()
+            .createUnSeqMergeReader(queryDataSource.getOverflowSeriesDataSource(), timeFilter);
+
+        AggreResultData aggreResultData = aggregateWithoutTimeGenerator(function,
+            sequenceReader, unSeqMergeReader, timeFilter);
+
+        dataTypes.add(aggreResultData.getDataType());
+        readers.add(new AggreResultDataPointReader(aggreResultData));
+      }
+    }
+    QueryResourceManager.getInstance()
+        .beginQueryOfGivenQueryPaths(context.getJobId(), paths);
+
+    return new EngineDataSetWithoutTimeGenerator(selectedSeries, dataTypes, readers);
+  }
+
+  /**
+   * execute aggregate function with value filter.
+   *
+   * @param context query context.
+   */
+  @Override
+  public QueryDataSet executeWithTimeGenerator(QueryContext context)
+      throws FileNodeManagerException, PathErrorException, IOException, ProcessorException {
+
+    /** add query token for query series which can handle locally **/
+    List<Path> localQuerySeries = new ArrayList<>(selectedSeries);
+    Set<Path> remoteQuerySeries = new HashSet<>();
+    queryManager.getSelectSeriesGroupEntityMap().values().forEach(
+        selectSeriesGroupEntity -> remoteQuerySeries
+            .addAll(selectSeriesGroupEntity.getSelectPaths()));
+    localQuerySeries.removeAll(remoteQuerySeries);
+    QueryResourceManager.getInstance()
+        .beginQueryOfGivenQueryPaths(context.getJobId(), localQuerySeries);
+
+    /** add query token for filter series which can handle locally **/
+    Set<String> deviceIdSet = new HashSet<>();
+    for (FilterSeriesGroupEntity filterSeriesGroupEntity : queryManager
+        .getFilterSeriesGroupEntityMap().values()) {
+      List<Path> remoteFilterSeries = filterSeriesGroupEntity.getFilterPaths();
+      remoteFilterSeries.forEach(seriesPath -> deviceIdSet.add(seriesPath.getDevice()));
+    }
+    QueryResourceManager.getInstance()
+        .beginQueryOfGivenExpression(context.getJobId(), expression, deviceIdSet);
+
+    ClusterTimeGenerator timestampGenerator;
+    List<EngineReaderByTimeStamp> readersOfSelectedSeries;
+    // origin data type of select paths
+    List<TSDataType> originDataTypes = new ArrayList<>();
+    try {
+      timestampGenerator = new ClusterTimeGenerator(expression, context,
+          queryManager);
+      readersOfSelectedSeries = ClusterSeriesReaderFactory
+          .createReadersByTimestampOfSelectedPaths(selectedSeries, context,
+              queryManager, originDataTypes);
+    } catch (IOException ex) {
+      throw new FileNodeManagerException(ex);
+    }
+
+    List<AggregateFunction> aggregateFunctions = new ArrayList<>();
+    for (int i = 0; i < selectedSeries.size(); i++) {
+      TSDataType type = originDataTypes.get(i);
+      AggregateFunction function = AggreFuncFactory.getAggrFuncByName(aggres.get(i), type);
+      function.init();
+      aggregateFunctions.add(function);
+    }
+    List<AggreResultData> aggreResultDataList = aggregateWithTimeGenerator(aggregateFunctions,
+        timestampGenerator,
+        readersOfSelectedSeries);
+
+    List<IPointReader> resultDataPointReaders = new ArrayList<>();
+    List<TSDataType> dataTypes = new ArrayList<>();
+    for (AggreResultData resultData : aggreResultDataList) {
+      dataTypes.add(resultData.getDataType());
+      resultDataPointReaders.add(new AggreResultDataPointReader(resultData));
+    }
+    return new EngineDataSetWithoutTimeGenerator(selectedSeries, dataTypes, resultDataPointReaders);
+  }
+
+  /**
+   * calculation aggregate result with value filter.
+   */
+  @Override
+  protected List<AggreResultData> aggregateWithTimeGenerator(
+      List<AggregateFunction> aggregateFunctions,
+      TimeGenerator timestampGenerator,
+      List<EngineReaderByTimeStamp> readersOfSelectedSeries)
+      throws IOException {
+
+    while (timestampGenerator.hasNext()) {
+
+      // generate timestamps for aggregate
+      long[] timeArray = new long[aggregateFetchSize];
+      List<Long> batchTimestamp = new ArrayList<>();
+      int timeArrayLength = 0;
+      for (int cnt = 0; cnt < aggregateFetchSize; cnt++) {
+        if (!timestampGenerator.hasNext()) {
+          break;
+        }
+        long time = timestampGenerator.next();
+        timeArray[timeArrayLength++] = time;
+        batchTimestamp.add(time);
+      }
+
+      // fetch all remote select series data by timestamp list.
+      if (!batchTimestamp.isEmpty()) {
+        try {
+          queryManager.fetchBatchDataByTimestampForAllSelectPaths(batchTimestamp);
+        } catch (RaftConnectionException e) {
+          throw new IOException(e);
+        }
+      }
+
+      // cal part of aggregate result
+      for (int i = 0; i < readersOfSelectedSeries.size(); i++) {
+        aggregateFunctions.get(i).calcAggregationUsingTimestamps(timeArray, timeArrayLength,
+            readersOfSelectedSeries.get(i));
+      }
+    }
+
+    List<AggreResultData> aggreResultDataArrayList = new ArrayList<>();
+    for (AggregateFunction function : aggregateFunctions) {
+      aggreResultDataArrayList.add(function.getResult());
+    }
+    return aggreResultDataArrayList;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithTimeGenerator.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithTimeGenerator.java
index fed8c0d..0cdd457 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithTimeGenerator.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithTimeGenerator.java
@@ -22,17 +22,14 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.List;
-import java.util.Map;
 import java.util.Set;
 import org.apache.iotdb.cluster.query.dataset.ClusterDataSetWithTimeGenerator;
 import org.apache.iotdb.cluster.query.factory.ClusterSeriesReaderFactory;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
-import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterGroupEntity;
-import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterSeriesGroupEntity;
 import org.apache.iotdb.cluster.query.timegenerator.ClusterTimeGenerator;
 import org.apache.iotdb.db.exception.FileNodeManagerException;
 import org.apache.iotdb.db.exception.PathErrorException;
-import org.apache.iotdb.db.metadata.MManager;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.control.QueryResourceManager;
 import org.apache.iotdb.db.query.reader.merge.EngineReaderByTimeStamp;
@@ -71,15 +68,19 @@ public class ClusterExecutorWithTimeGenerator {
 
     /** add query token for query series which can handle locally **/
     List<Path> localQuerySeries = new ArrayList<>(queryExpression.getSelectedSeries());
-    Set<Path> remoteQuerySeries = queryManager.getSelectSeriesReaders().keySet();
+    Set<Path> remoteQuerySeries = new HashSet<>();
+    queryManager.getSelectSeriesGroupEntityMap().values().forEach(
+        selectSeriesGroupEntity -> selectSeriesGroupEntity.getSelectPaths()
+            .forEach(path -> remoteQuerySeries.add(path)));
     localQuerySeries.removeAll(remoteQuerySeries);
     QueryResourceManager.getInstance()
         .beginQueryOfGivenQueryPaths(context.getJobId(), localQuerySeries);
 
     /** add query token for filter series which can handle locally **/
     Set<String> deviceIdSet = new HashSet<>();
-    for (FilterGroupEntity filterGroupEntity : queryManager.getFilterGroupEntityMap().values()) {
-      List<Path> remoteFilterSeries = filterGroupEntity.getFilterPaths();
+    for (FilterSeriesGroupEntity filterSeriesGroupEntity : queryManager
+        .getFilterSeriesGroupEntityMap().values()) {
+      List<Path> remoteFilterSeries = filterSeriesGroupEntity.getFilterPaths();
       remoteFilterSeries.forEach(seriesPath -> deviceIdSet.add(seriesPath.getDevice()));
     }
     QueryResourceManager.getInstance()
@@ -88,33 +89,18 @@ public class ClusterExecutorWithTimeGenerator {
 
     ClusterTimeGenerator timestampGenerator;
     List<EngineReaderByTimeStamp> readersOfSelectedSeries;
+    /** Get data type of select paths **/
+    List<TSDataType> dataTypes = new ArrayList<>();
     try {
       timestampGenerator = new ClusterTimeGenerator(queryExpression.getExpression(), context,
           queryManager);
       readersOfSelectedSeries = ClusterSeriesReaderFactory
           .createReadersByTimestampOfSelectedPaths(queryExpression.getSelectedSeries(), context,
-              queryManager);
-    } catch (IOException ex) {
+              queryManager, dataTypes);
+    } catch (IOException | PathErrorException ex) {
       throw new FileNodeManagerException(ex);
     }
 
-    /** Get data type of select paths **/
-    List<TSDataType> dataTypes = new ArrayList<>();
-    Map<Path, ClusterSelectSeriesReader> selectSeriesReaders = queryManager
-        .getSelectSeriesReaders();
-    for (Path path : queryExpression.getSelectedSeries()) {
-      try {
-        if (selectSeriesReaders.containsKey(path)) {
-          dataTypes.add(selectSeriesReaders.get(path).getDataType());
-        } else {
-          dataTypes.add(MManager.getInstance().getSeriesType(path.getFullPath()));
-        }
-      } catch (PathErrorException e) {
-        throw new FileNodeManagerException(e);
-      }
-
-    }
-
     EngineReaderByTimeStamp[] readersOfSelectedSeriesArray = new EngineReaderByTimeStamp[readersOfSelectedSeries
         .size()];
     int index = 0;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithoutTimeGenerator.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithoutTimeGenerator.java
index 65bd87b..95e5f1a 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithoutTimeGenerator.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithoutTimeGenerator.java
@@ -20,15 +20,19 @@ package org.apache.iotdb.cluster.query.executor;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.SelectSeriesGroupEntity;
 import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.db.exception.FileNodeManagerException;
+import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.control.QueryResourceManager;
 import org.apache.iotdb.db.query.dataset.EngineDataSetWithoutTimeGenerator;
-import org.apache.iotdb.db.query.executor.ExecutorWithoutTimeGenerator;
+import org.apache.iotdb.db.query.executor.AbstractExecutorWithoutTimeGenerator;
 import org.apache.iotdb.db.query.reader.IPointReader;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.Path;
@@ -37,7 +41,7 @@ import org.apache.iotdb.tsfile.read.expression.impl.GlobalTimeExpression;
 import org.apache.iotdb.tsfile.read.filter.basic.Filter;
 import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
 
-public class ClusterExecutorWithoutTimeGenerator extends ExecutorWithoutTimeGenerator {
+public class ClusterExecutorWithoutTimeGenerator extends AbstractExecutorWithoutTimeGenerator {
 
   /**
    * Query expression
@@ -62,7 +66,7 @@ public class ClusterExecutorWithoutTimeGenerator extends ExecutorWithoutTimeGene
    * Execute query without filter or with only global time filter.
    */
   public QueryDataSet execute(QueryContext context)
-      throws FileNodeManagerException {
+      throws FileNodeManagerException, PathErrorException {
 
     Filter timeFilter = null;
     if (queryExpression.getExpression() != null) {
@@ -72,15 +76,22 @@ public class ClusterExecutorWithoutTimeGenerator extends ExecutorWithoutTimeGene
     List<IPointReader> readersOfSelectedSeries = new ArrayList<>();
     List<TSDataType> dataTypes = new ArrayList<>();
 
-    Map<Path, ClusterSelectSeriesReader> selectPathReaders = queryManager.getSelectSeriesReaders();
+    Map<String, SelectSeriesGroupEntity> selectSeriesGroupEntityMap = queryManager
+        .getSelectSeriesGroupEntityMap();
     List<Path> paths = new ArrayList<>();
+    //Mark filter series reader index group by group id
+    Map<String, Integer> selectSeriesReaderIndex = new HashMap<>();
     for (Path path : queryExpression.getSelectedSeries()) {
 
-      if (selectPathReaders.containsKey(path)) {
-        ClusterSelectSeriesReader reader = selectPathReaders.get(path);
+      String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
+
+      if (selectSeriesGroupEntityMap.containsKey(groupId)) {
+        int index = selectSeriesReaderIndex.getOrDefault(groupId, 0);
+        ClusterSelectSeriesReader reader = selectSeriesGroupEntityMap.get(groupId)
+            .getSelectSeriesReaders().get(index);
         readersOfSelectedSeries.add(reader);
         dataTypes.add(reader.getDataType());
-
+        selectSeriesReaderIndex.put(groupId, index + 1);
       } else {
         IPointReader reader = createSeriesReader(context, path, dataTypes, timeFilter);
         readersOfSelectedSeries.add(reader);
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/query/executor/FillEngineExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterFillEngineExecutor.java
similarity index 50%
copy from iotdb/src/main/java/org/apache/iotdb/db/query/executor/FillEngineExecutor.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterFillEngineExecutor.java
index 83c5fa9..608a479 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/executor/FillEngineExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterFillEngineExecutor.java
@@ -16,13 +16,17 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
-package org.apache.iotdb.db.query.executor;
+package org.apache.iotdb.cluster.query.executor;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.SelectSeriesGroupEntity;
+import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
 import org.apache.iotdb.db.exception.FileNodeManagerException;
 import org.apache.iotdb.db.exception.PathErrorException;
@@ -30,6 +34,7 @@ import org.apache.iotdb.db.metadata.MManager;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.control.QueryResourceManager;
 import org.apache.iotdb.db.query.dataset.EngineDataSetWithoutTimeGenerator;
+import org.apache.iotdb.db.query.executor.IFillEngineExecutor;
 import org.apache.iotdb.db.query.fill.IFill;
 import org.apache.iotdb.db.query.fill.PreviousFill;
 import org.apache.iotdb.db.query.reader.IPointReader;
@@ -37,55 +42,63 @@ import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.Path;
 import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
 
-public class FillEngineExecutor {
+public class ClusterFillEngineExecutor implements IFillEngineExecutor {
 
-  private long jobId;
   private List<Path> selectedSeries;
   private long queryTime;
   private Map<TSDataType, IFill> typeIFillMap;
+  private ClusterRpcSingleQueryManager queryManager;
+
 
-  public FillEngineExecutor(long jobId, List<Path> selectedSeries, long queryTime,
-      Map<TSDataType, IFill> typeIFillMap) {
-    this.jobId = jobId;
+  public ClusterFillEngineExecutor(List<Path> selectedSeries, long queryTime,
+      Map<TSDataType, IFill> typeIFillMap, ClusterRpcSingleQueryManager queryManager) {
     this.selectedSeries = selectedSeries;
     this.queryTime = queryTime;
     this.typeIFillMap = typeIFillMap;
+    this.queryManager = queryManager;
   }
 
-  /**
-   * execute fill.
-   *
-   * @param context query context
-   */
+  @Override
   public QueryDataSet execute(QueryContext context)
       throws FileNodeManagerException, PathErrorException, IOException {
-    QueryResourceManager.getInstance().beginQueryOfGivenQueryPaths(jobId, selectedSeries);
-
+    List<Path> paths = new ArrayList<>();
     List<IFill> fillList = new ArrayList<>();
     List<TSDataType> dataTypeList = new ArrayList<>();
+    List<IPointReader> readers = new ArrayList<>();
+    Map<String, SelectSeriesGroupEntity> selectSeriesEntityMap = queryManager.getSelectSeriesGroupEntityMap();
+    //Mark filter series reader index group by group id
+    Map<String, Integer> selectSeriesReaderIndex = new HashMap<>();
     for (Path path : selectedSeries) {
-      QueryDataSource queryDataSource = QueryResourceManager.getInstance()
-          .getQueryDataSource(path, context);
-      TSDataType dataType = MManager.getInstance().getSeriesType(path.getFullPath());
-      dataTypeList.add(dataType);
-      IFill fill = null;
-      if (!typeIFillMap.containsKey(dataType)) {
-        fill = new PreviousFill(dataType, queryTime, 0);
+      String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
+
+      if (selectSeriesEntityMap.containsKey(groupId)) {
+        int index = selectSeriesReaderIndex.getOrDefault(groupId, 0);
+        ClusterSelectSeriesReader reader = selectSeriesEntityMap.get(groupId).getSelectSeriesReaders().get(index);
+        readers.add(reader);
+        dataTypeList.add(reader.getDataType());
+        selectSeriesReaderIndex.put(groupId, index + 1);
       } else {
-        fill = typeIFillMap.get(dataType).copy(path);
+        QueryDataSource queryDataSource = QueryResourceManager.getInstance()
+            .getQueryDataSource(path, context);
+        TSDataType dataType = MManager.getInstance().getSeriesType(path.getFullPath());
+        dataTypeList.add(dataType);
+        IFill fill;
+        if (!typeIFillMap.containsKey(dataType)) {
+          fill = new PreviousFill(dataType, queryTime, 0);
+        } else {
+          fill = typeIFillMap.get(dataType).copy(path);
+        }
+        fill.setDataType(dataType);
+        fill.setQueryTime(queryTime);
+        fill.constructReaders(queryDataSource, context);
+        fillList.add(fill);
+        readers.add(fill.getFillResult());
       }
-      fill.setDataType(dataType);
-      fill.setQueryTime(queryTime);
-      fill.constructReaders(queryDataSource, context);
-      fillList.add(fill);
     }
 
-    List<IPointReader> readers = new ArrayList<>();
-    for (IFill fill : fillList) {
-      readers.add(fill.getFillResult());
-    }
+    QueryResourceManager.getInstance()
+        .beginQueryOfGivenQueryPaths(context.getJobId(), paths);
 
     return new EngineDataSetWithoutTimeGenerator(selectedSeries, dataTypeList, readers);
   }
-
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterQueryRouter.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterQueryRouter.java
index 4211528..3db7c6a 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterQueryRouter.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterQueryRouter.java
@@ -23,13 +23,16 @@ import java.util.List;
 import java.util.Map;
 import org.apache.iotdb.cluster.exception.RaftConnectionException;
 import org.apache.iotdb.cluster.query.QueryType;
+import org.apache.iotdb.cluster.query.dataset.ClusterGroupByDataSetWithOnlyTimeFilter;
+import org.apache.iotdb.cluster.query.dataset.ClusterGroupByDataSetWithTimeGenerator;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcQueryManager;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
 import org.apache.iotdb.db.exception.FileNodeManagerException;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.exception.ProcessorException;
 import org.apache.iotdb.db.query.context.QueryContext;
-import org.apache.iotdb.db.query.executor.IEngineQueryRouter;
+import org.apache.iotdb.db.query.executor.AbstractQueryRouter;
+import org.apache.iotdb.db.query.executor.AggregateEngineExecutor;
 import org.apache.iotdb.db.query.fill.IFill;
 import org.apache.iotdb.tsfile.exception.filter.QueryFilterOptimizationException;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
@@ -37,7 +40,10 @@ import org.apache.iotdb.tsfile.read.common.Path;
 import org.apache.iotdb.tsfile.read.expression.ExpressionType;
 import org.apache.iotdb.tsfile.read.expression.IExpression;
 import org.apache.iotdb.tsfile.read.expression.QueryExpression;
+import org.apache.iotdb.tsfile.read.expression.impl.BinaryExpression;
+import org.apache.iotdb.tsfile.read.expression.impl.GlobalTimeExpression;
 import org.apache.iotdb.tsfile.read.expression.util.ExpressionOptimizer;
+import org.apache.iotdb.tsfile.read.filter.TimeFilter;
 import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
 import org.apache.iotdb.tsfile.utils.Pair;
 
@@ -45,7 +51,7 @@ import org.apache.iotdb.tsfile.utils.Pair;
  * Query entrance class of cluster query process. All query clause will be transformed to physical
  * plan, physical plan will be executed by ClusterQueryRouter.
  */
-public class ClusterQueryRouter implements IEngineQueryRouter {
+public class ClusterQueryRouter extends AbstractQueryRouter {
 
   /**
    * Consistency level of reading data
@@ -86,15 +92,43 @@ public class ClusterQueryRouter implements IEngineQueryRouter {
         return engineExecutor.execute(context);
       }
     } catch (QueryFilterOptimizationException | IOException | RaftConnectionException e) {
-      throw new FileNodeManagerException(e);
+      throw new FileNodeManagerException(e.getMessage());
     }
   }
 
   @Override
   public QueryDataSet aggregate(List<Path> selectedSeries, List<String> aggres,
       IExpression expression, QueryContext context)
-      throws QueryFilterOptimizationException, FileNodeManagerException, IOException, PathErrorException, ProcessorException {
-    throw new UnsupportedOperationException();
+      throws FileNodeManagerException, PathErrorException, ProcessorException {
+
+    ClusterRpcSingleQueryManager queryManager = ClusterRpcQueryManager.getInstance()
+        .getSingleQuery(context.getJobId());
+
+    try {
+      if (expression != null) {
+        IExpression optimizedExpression = ExpressionOptimizer.getInstance()
+            .optimize(expression, selectedSeries);
+        // update query expression of origin query plan, it's necessary.
+        queryManager.getOriginQueryPlan().setExpression(optimizedExpression);
+
+        AggregateEngineExecutor engineExecutor = new ClusterAggregateEngineExecutor(
+            selectedSeries, aggres, optimizedExpression, queryManager);
+        if (optimizedExpression.getType() == ExpressionType.GLOBAL_TIME) {
+          queryManager.initQueryResource(QueryType.GLOBAL_TIME, getReadDataConsistencyLevel());
+          return engineExecutor.executeWithoutTimeGenerator(context);
+        } else {
+          queryManager.initQueryResource(QueryType.FILTER, getReadDataConsistencyLevel());
+          return engineExecutor.executeWithTimeGenerator(context);
+        }
+      } else {
+        AggregateEngineExecutor engineExecutor = new ClusterAggregateEngineExecutor(
+            selectedSeries, aggres, null, queryManager);
+        queryManager.initQueryResource(QueryType.NO_FILTER, getReadDataConsistencyLevel());
+        return engineExecutor.executeWithoutTimeGenerator(context);
+      }
+    } catch (QueryFilterOptimizationException | IOException | RaftConnectionException e) {
+      throw new FileNodeManagerException(e);
+    }
   }
 
   @Override
@@ -102,13 +136,76 @@ public class ClusterQueryRouter implements IEngineQueryRouter {
       IExpression expression, long unit, long origin, List<Pair<Long, Long>> intervals,
       QueryContext context)
       throws ProcessorException, QueryFilterOptimizationException, FileNodeManagerException, PathErrorException, IOException {
-    throw new UnsupportedOperationException();
+
+    long jobId = context.getJobId();
+    ClusterRpcSingleQueryManager queryManager = ClusterRpcQueryManager.getInstance()
+        .getSingleQuery(jobId);
+
+    //check the legitimacy of intervals
+    checkIntervals(intervals);
+
+    // merge intervals
+    List<Pair<Long, Long>> mergedIntervalList = mergeInterval(intervals);
+
+    // construct groupBy intervals filter
+    BinaryExpression intervalFilter = null;
+    for (Pair<Long, Long> pair : mergedIntervalList) {
+      BinaryExpression pairFilter = BinaryExpression
+          .and(new GlobalTimeExpression(TimeFilter.gtEq(pair.left)),
+              new GlobalTimeExpression(TimeFilter.ltEq(pair.right)));
+      if (intervalFilter != null) {
+        intervalFilter = BinaryExpression.or(intervalFilter, pairFilter);
+      } else {
+        intervalFilter = pairFilter;
+      }
+    }
+
+    // merge interval filter and filtering conditions after where statements
+    if (expression == null) {
+      expression = intervalFilter;
+    } else {
+      expression = BinaryExpression.and(expression, intervalFilter);
+    }
+
+    IExpression optimizedExpression = ExpressionOptimizer.getInstance()
+        .optimize(expression, selectedSeries);
+    // update query expression of origin query plan, it's necessary.
+    queryManager.getOriginQueryPlan().setExpression(optimizedExpression);
+
+    try {
+      if (optimizedExpression.getType() == ExpressionType.GLOBAL_TIME) {
+        queryManager.initQueryResource(QueryType.GLOBAL_TIME, getReadDataConsistencyLevel());
+        ClusterGroupByDataSetWithOnlyTimeFilter groupByEngine = new ClusterGroupByDataSetWithOnlyTimeFilter(
+            jobId, selectedSeries, unit, origin, mergedIntervalList, queryManager);
+        groupByEngine.initGroupBy(context, aggres, optimizedExpression);
+        return groupByEngine;
+      } else {
+        queryManager.initQueryResource(QueryType.FILTER, getReadDataConsistencyLevel());
+        ClusterGroupByDataSetWithTimeGenerator groupByEngine = new ClusterGroupByDataSetWithTimeGenerator(
+            jobId, selectedSeries, unit, origin, mergedIntervalList, queryManager);
+        groupByEngine.initGroupBy(context, aggres, optimizedExpression);
+        return groupByEngine;
+      }
+    } catch (RaftConnectionException e) {
+      throw new FileNodeManagerException(e);
+    }
   }
 
   @Override
   public QueryDataSet fill(List<Path> fillPaths, long queryTime, Map<TSDataType, IFill> fillType,
       QueryContext context) throws FileNodeManagerException, PathErrorException, IOException {
-    throw new UnsupportedOperationException();
+    ClusterRpcSingleQueryManager queryManager = ClusterRpcQueryManager.getInstance()
+        .getSingleQuery(context.getJobId());
+    try {
+      queryManager.initQueryResource(QueryType.NO_FILTER, getReadDataConsistencyLevel());
+
+      ClusterFillEngineExecutor fillEngineExecutor = new ClusterFillEngineExecutor(fillPaths,
+          queryTime,
+          fillType, queryManager);
+      return fillEngineExecutor.execute(context);
+    } catch (IOException | RaftConnectionException e) {
+      throw new FileNodeManagerException(e);
+    }
   }
 
   public int getReadDataConsistencyLevel() {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/factory/ClusterSeriesReaderFactory.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/factory/ClusterSeriesReaderFactory.java
index ddfa5eb..a9ee032 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/factory/ClusterSeriesReaderFactory.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/factory/ClusterSeriesReaderFactory.java
@@ -20,18 +20,24 @@ package org.apache.iotdb.cluster.query.factory;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.SelectSeriesGroupEntity;
 import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
 import org.apache.iotdb.db.exception.FileNodeManagerException;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.metadata.MManager;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.control.QueryResourceManager;
 import org.apache.iotdb.db.query.factory.SeriesReaderFactory;
 import org.apache.iotdb.db.query.reader.merge.EngineReaderByTimeStamp;
 import org.apache.iotdb.db.query.reader.merge.PriorityMergeReaderByTimestamp;
 import org.apache.iotdb.db.query.reader.sequence.SequenceDataReaderByTimestamp;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.Path;
 
 /**
@@ -39,28 +45,39 @@ import org.apache.iotdb.tsfile.read.common.Path;
  */
 public class ClusterSeriesReaderFactory {
 
+  private ClusterSeriesReaderFactory() {
+  }
+
   /**
-   * Construct ReaderByTimestamp , include sequential data and unsequential data.
+   * Construct ReaderByTimestamp , include sequential data and unsequential data. And get all series dataType.
    *
    * @param paths selected series path
    * @param context query context
    * @return the list of EngineReaderByTimeStamp
    */
   public static List<EngineReaderByTimeStamp> createReadersByTimestampOfSelectedPaths(
-      List<Path> paths, QueryContext context, ClusterRpcSingleQueryManager queryManager)
-      throws IOException, FileNodeManagerException {
+      List<Path> paths, QueryContext context, ClusterRpcSingleQueryManager queryManager, List<TSDataType> dataTypes)
+      throws IOException, FileNodeManagerException, PathErrorException {
 
-    Map<Path, ClusterSelectSeriesReader> selectSeriesReaders = queryManager.getSelectSeriesReaders();
+    Map<String, SelectSeriesGroupEntity> selectSeriesEntityMap = queryManager
+        .getSelectSeriesGroupEntityMap();
     List<EngineReaderByTimeStamp> readersOfSelectedSeries = new ArrayList<>();
+    //Mark filter series reader index group by group id
+    Map<String, Integer> selectSeriesReaderIndex = new HashMap<>();
 
     for (Path path : paths) {
-
-      if (selectSeriesReaders.containsKey(path)) {
-        readersOfSelectedSeries.add(selectSeriesReaders.get(path));
+      String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
+      if (selectSeriesEntityMap.containsKey(groupId)) {
+        int index = selectSeriesReaderIndex.getOrDefault(groupId, 0);
+        ClusterSelectSeriesReader reader = selectSeriesEntityMap.get(groupId).getSelectSeriesReaders().get(index);
+        readersOfSelectedSeries.add(reader);
+        dataTypes.add(reader.getDataType());
+        selectSeriesReaderIndex.put(groupId, index + 1);
       } else {
         /** can handle series query locally **/
         EngineReaderByTimeStamp readerByTimeStamp = createReaderByTimeStamp(path, context);
         readersOfSelectedSeries.add(readerByTimeStamp);
+        dataTypes.add(MManager.getInstance().getSeriesType(path.getFullPath()));
       }
     }
     return readersOfSelectedSeries;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcQueryManager.java
index faece2b..f57c538 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcQueryManager.java
@@ -20,7 +20,9 @@ package org.apache.iotdb.cluster.query.manager.coordinatornode;
 
 import com.alipay.sofa.jraft.util.OnlyForTest;
 import java.util.HashMap;
+import java.util.Iterator;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.concurrent.ConcurrentHashMap;
 import org.apache.iotdb.cluster.config.ClusterConfig;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
@@ -80,13 +82,23 @@ public class ClusterRpcQueryManager implements IClusterRpcQueryManager {
   public Map<String, Integer> getAllReadUsage() {
     Map<String, Integer> readerUsageMap = new HashMap<>();
     SINGLE_QUERY_MANAGER_MAP.values().forEach(singleQueryManager -> {
-      for(String groupId:singleQueryManager.getDataGroupUsage()) {
+      for (String groupId : singleQueryManager.getDataGroupUsage()) {
         readerUsageMap.put(groupId, readerUsageMap.getOrDefault(groupId, 0) + 1);
       }
     });
     return readerUsageMap;
   }
 
+  @Override
+  public void close() throws RaftConnectionException {
+    Iterator<Map.Entry<String, ClusterRpcSingleQueryManager>> iterator = SINGLE_QUERY_MANAGER_MAP.entrySet().iterator();
+    while(iterator.hasNext()){
+      Entry<String, ClusterRpcSingleQueryManager> entry = iterator.next();
+      entry.getValue().releaseQueryResource();
+      iterator.remove();
+    }
+  }
+
   @OnlyForTest
   public static ConcurrentHashMap<Long, String> getJobIdMapTaskId() {
     return JOB_ID_MAP_TASK_ID;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java
index d9a5859..c9dc701 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java
@@ -24,6 +24,7 @@ import java.util.ArrayList;
 import java.util.EnumMap;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -37,24 +38,31 @@ import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterFilterSeries
 import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
 import org.apache.iotdb.cluster.query.utils.ClusterRpcReaderUtils;
 import org.apache.iotdb.cluster.query.utils.QueryPlanPartitionUtils;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.CloseSeriesReaderRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.InitSeriesReaderRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataByTimestampRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataRequest;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicQueryDataResponse;
 import org.apache.iotdb.cluster.rpc.raft.response.querydata.InitSeriesReaderResponse;
 import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataByTimestampResponse;
 import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
 import org.apache.iotdb.cluster.utils.QPExecutorUtils;
-import org.apache.iotdb.cluster.utils.RaftUtils;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.BatchData;
 import org.apache.iotdb.tsfile.read.common.Path;
 import org.apache.iotdb.tsfile.read.filter.basic.Filter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Manage all remote series reader resource in a query resource in coordinator node.
  */
 public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManager {
 
+  private static final Logger LOGGER = LoggerFactory.getLogger(ClusterRpcSingleQueryManager.class);
   /**
    * Statistic all usage of local data group.
    */
@@ -82,27 +90,15 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
 
   // select path resource
   /**
-   * Query plans of select paths which are divided from queryPlan group by group id, it contains all
-   * group id ,including local data group if it involves.
+   * Select series group entity group by data group, key is group id(only contain remote group id)
    */
-  private Map<String, QueryPlan> selectPathPlans = new HashMap<>();
-
-  /**
-   * Key is group id (only contains remote group id), value is all select series in group id.
-   */
-  private Map<String, List<Path>> selectSeriesByGroupId = new HashMap<>();
-
-  /**
-   * Series reader of select paths (only contains remote series), key is series path , value is
-   * reader
-   */
-  private Map<Path, ClusterSelectSeriesReader> selectSeriesReaders = new HashMap<>();
+  private Map<String, SelectSeriesGroupEntity> selectSeriesGroupEntityMap = new HashMap<>();
 
   // filter path resource
   /**
-   * Filter group entity group by data group, key is group id(only contain remote group id)
+   * Filter series group entity group by data group, key is group id(only contain remote group id)
    */
-  private Map<String, FilterGroupEntity> filterGroupEntityMap = new HashMap<>();
+  private Map<String, FilterSeriesGroupEntity> filterSeriesGroupEntityMap = new HashMap<>();
 
   private static final ClusterConfig CLUSTER_CONF = ClusterDescriptor.getInstance().getConfig();
 
@@ -134,48 +130,61 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
    * group
    */
   private void initSeriesReader(int readDataConsistencyLevel)
-      throws IOException, RaftConnectionException {
+      throws RaftConnectionException, IOException {
     // Init all series with data group of select series,if filter series has the same data group, init them together.
-    for (Entry<String, QueryPlan> entry : selectPathPlans.entrySet()) {
+    Iterator<Map.Entry<String, SelectSeriesGroupEntity>> selectIterator = selectSeriesGroupEntityMap
+        .entrySet().iterator();
+    while (selectIterator.hasNext()) {
+      Entry<String, SelectSeriesGroupEntity> entry = selectIterator.next();
       String groupId = entry.getKey();
-      QueryPlan queryPlan = entry.getValue();
+      SelectSeriesGroupEntity selectEntity = entry.getValue();
+      QueryPlan queryPlan = selectEntity.getQueryPlan();
       if (!QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
-        PeerId randomPeer = RaftUtils.getRandomPeerID(groupId);
-        queryNodes.put(groupId, randomPeer);
+        LOGGER.debug("Init series reader for group id {} from remote node.", groupId);
         Map<PathType, QueryPlan> allQueryPlan = new EnumMap<>(PathType.class);
         allQueryPlan.put(PathType.SELECT_PATH, queryPlan);
-        List<Filter> filterList = null;
-        if (filterGroupEntityMap.containsKey(groupId)) {
-          FilterGroupEntity filterGroupEntity = filterGroupEntityMap.get(groupId);
-          allQueryPlan.put(PathType.FILTER_PATH, filterGroupEntity.getQueryPlan());
-          filterList = filterGroupEntity.getFilters();
+        List<Filter> filterList = new ArrayList<>();
+        if (filterSeriesGroupEntityMap.containsKey(groupId)) {
+          FilterSeriesGroupEntity filterSeriesGroupEntity = filterSeriesGroupEntityMap.get(groupId);
+          allQueryPlan.put(PathType.FILTER_PATH, filterSeriesGroupEntity.getQueryPlan());
+          filterList = filterSeriesGroupEntity.getFilters();
         }
+        /** create request **/
+        BasicRequest request = InitSeriesReaderRequest
+            .createInitialQueryRequest(groupId, taskId, readDataConsistencyLevel,
+                allQueryPlan, filterList);
         InitSeriesReaderResponse response = (InitSeriesReaderResponse) ClusterRpcReaderUtils
-            .createClusterSeriesReader(groupId, randomPeer, readDataConsistencyLevel,
-                allQueryPlan, taskId, filterList);
+            .createClusterSeriesReader(groupId, request, this);
         handleInitReaderResponse(groupId, allQueryPlan, response);
       } else {
+        LOGGER.debug("Init series reader for group id {} locally.", groupId);
         dataGroupUsage.add(groupId);
-        selectSeriesByGroupId.remove(groupId);
-        if (filterGroupEntityMap.containsKey(groupId)) {
-          filterGroupEntityMap.remove(groupId);
-        }
+        selectIterator.remove();
+        filterSeriesGroupEntityMap.remove(groupId);
       }
     }
 
     //Init series reader with data groups of filter series, which don't exist in data groups list of select series.
-    for (Entry<String, FilterGroupEntity> entry : filterGroupEntityMap.entrySet()) {
+    Iterator<Map.Entry<String, FilterSeriesGroupEntity>> filterIterator = filterSeriesGroupEntityMap
+        .entrySet().iterator();
+    while (filterIterator.hasNext()) {
+      Entry<String, FilterSeriesGroupEntity> entry = filterIterator.next();
       String groupId = entry.getKey();
-      if (!selectPathPlans.containsKey(groupId)) {
-        PeerId randomPeer = RaftUtils.getRandomPeerID(groupId);
+      if (!selectSeriesGroupEntityMap.containsKey(groupId) && !QPExecutorUtils
+          .canHandleQueryByGroupId(groupId)) {
         Map<PathType, QueryPlan> allQueryPlan = new EnumMap<>(PathType.class);
-        FilterGroupEntity filterGroupEntity = filterGroupEntityMap.get(groupId);
-        allQueryPlan.put(PathType.FILTER_PATH, filterGroupEntity.getQueryPlan());
-        List<Filter> filterList = filterGroupEntity.getFilters();
+        FilterSeriesGroupEntity filterSeriesGroupEntity = filterSeriesGroupEntityMap.get(groupId);
+        allQueryPlan.put(PathType.FILTER_PATH, filterSeriesGroupEntity.getQueryPlan());
+        List<Filter> filterList = filterSeriesGroupEntity.getFilters();
+        BasicRequest request = InitSeriesReaderRequest
+            .createInitialQueryRequest(groupId, taskId, readDataConsistencyLevel,
+                allQueryPlan, filterList);
         InitSeriesReaderResponse response = (InitSeriesReaderResponse) ClusterRpcReaderUtils
-            .createClusterSeriesReader(groupId, randomPeer, readDataConsistencyLevel,
-                allQueryPlan, taskId, filterList);
+            .createClusterSeriesReader(groupId, request, this);
         handleInitReaderResponse(groupId, allQueryPlan, response);
+      } else if (!selectSeriesGroupEntityMap.containsKey(groupId)) {
+        dataGroupUsage.add(groupId);
+        filterIterator.remove();
       }
     }
   }
@@ -185,6 +194,7 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
    */
   private void handleInitReaderResponse(String groupId, Map<PathType, QueryPlan> allQueryPlan,
       InitSeriesReaderResponse response) {
+    LOGGER.debug("Handle init reader response of group id {}", groupId);
     /** create cluster series reader **/
     if (allQueryPlan.containsKey(PathType.SELECT_PATH)) {
       QueryPlan plan = allQueryPlan.get(PathType.SELECT_PATH);
@@ -195,7 +205,7 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
         TSDataType dataType = seriesType.get(i);
         ClusterSelectSeriesReader seriesReader = new ClusterSelectSeriesReader(groupId, seriesPath,
             dataType, this);
-        selectSeriesReaders.put(seriesPath, seriesReader);
+        selectSeriesGroupEntityMap.get(groupId).addSelectSeriesReader(seriesReader);
       }
     }
     if (allQueryPlan.containsKey(PathType.FILTER_PATH)) {
@@ -207,41 +217,44 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
         TSDataType dataType = seriesType.get(i);
         ClusterFilterSeriesReader seriesReader = new ClusterFilterSeriesReader(groupId, seriesPath,
             dataType, this);
-        if (!filterGroupEntityMap.containsKey(groupId)) {
-          filterGroupEntityMap.put(groupId, new FilterGroupEntity(groupId));
-        }
-        filterGroupEntityMap.get(groupId).addFilterSeriesReader(seriesReader);
+        filterSeriesGroupEntityMap.get(groupId).addFilterSeriesReader(seriesReader);
       }
     }
   }
 
   @Override
   public void fetchBatchDataForSelectPaths(String groupId) throws RaftConnectionException {
-    List<String> fetchDataSeries = new ArrayList<>();
-    Map<String, List<Path>> seriesByGroupId;
-    Map<Path, ClusterSelectSeriesReader> seriesReaders;
-    seriesByGroupId = selectSeriesByGroupId;
-    seriesReaders = selectSeriesReaders;
-    if (seriesByGroupId.containsKey(groupId)) {
-      List<Path> allFilterSeries = seriesByGroupId.get(groupId);
-      for (Path series : allFilterSeries) {
-        if (seriesReaders.get(series).enableFetchData()) {
-          fetchDataSeries.add(series.getFullPath());
-        }
+    List<Integer> fetchDataSeriesIndexs = new ArrayList<>();
+    List<Path> fetchDataSeries = new ArrayList<>();
+    List<Path> selectSeries = selectSeriesGroupEntityMap.get(groupId).getSelectPaths();
+    List<ClusterSelectSeriesReader> seriesReaders = selectSeriesGroupEntityMap.get(groupId)
+        .getSelectSeriesReaders();
+    for (int i = 0; i < selectSeries.size(); i++) {
+      if (seriesReaders.get(i).enableFetchData()) {
+        fetchDataSeriesIndexs.add(i);
+        fetchDataSeries.add(selectSeries.get(i));
       }
     }
-    QuerySeriesDataResponse response = ClusterRpcReaderUtils
-        .fetchBatchData(groupId, queryNodes.get(groupId), taskId, PathType.SELECT_PATH,
-            fetchDataSeries,
+    LOGGER.debug("Fetch data for paths {} of group id {} from node {}", fetchDataSeries, groupId,
+        queryNodes.get(groupId));
+    BasicRequest request = QuerySeriesDataRequest
+        .createFetchDataRequest(groupId, taskId, PathType.SELECT_PATH, fetchDataSeriesIndexs,
             queryRounds++);
-    handleFetchDataResponseForSelectPaths(fetchDataSeries, response);
+    QuerySeriesDataResponse response = (QuerySeriesDataResponse) ClusterRpcReaderUtils
+        .handleQueryRequest(request, queryNodes.get(groupId), 0);
+
+    handleFetchDataResponseForSelectPaths(groupId, fetchDataSeriesIndexs, response);
   }
 
   @Override
-  public void fetchBatchDataForFilterPaths(String groupId) throws RaftConnectionException {
-    QuerySeriesDataResponse response = ClusterRpcReaderUtils
-        .fetchBatchData(groupId, queryNodes.get(groupId), taskId, PathType.FILTER_PATH, null,
-            queryRounds++);
+  public void fetchBatchDataForAllFilterPaths(String groupId) throws RaftConnectionException {
+    LOGGER.debug("Fetch Data for filter paths {} of group id {} from node {}",
+        filterSeriesGroupEntityMap.get(groupId).getFilterPaths(), groupId, queryNodes.get(groupId));
+    BasicRequest request = QuerySeriesDataRequest
+        .createFetchDataRequest(groupId, taskId, PathType.FILTER_PATH, null, queryRounds++);
+    QuerySeriesDataResponse response = (QuerySeriesDataResponse) ClusterRpcReaderUtils
+        .handleQueryRequest(request, queryNodes.get(groupId), 0);
+
     handleFetchDataResponseForFilterPaths(groupId, response);
   }
 
@@ -249,41 +262,41 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
   @Override
   public void fetchBatchDataByTimestampForAllSelectPaths(List<Long> batchTimestamp)
       throws RaftConnectionException {
-    for (Entry<String, List<Path>> entry : selectSeriesByGroupId.entrySet()) {
+    for (Entry<String, SelectSeriesGroupEntity> entry : selectSeriesGroupEntityMap.entrySet()) {
       String groupId = entry.getKey();
-      List<String> fetchDataFilterSeries = new ArrayList<>();
-      entry.getValue().forEach(path -> fetchDataFilterSeries.add(path.getFullPath()));
-      QuerySeriesDataByTimestampResponse response = ClusterRpcReaderUtils
-          .fetchBatchDataByTimestamp(groupId, queryNodes.get(groupId), taskId, queryRounds++,
-              batchTimestamp, fetchDataFilterSeries);
-      handleFetchDataByTimestampResponseForSelectPaths(fetchDataFilterSeries, response);
+      BasicRequest request = QuerySeriesDataByTimestampRequest
+          .createRequest(groupId, queryRounds++, taskId, batchTimestamp);
+      QuerySeriesDataByTimestampResponse response = (QuerySeriesDataByTimestampResponse) ClusterRpcReaderUtils
+          .handleQueryRequest(request, queryNodes.get(groupId), 0);
+      handleFetchDataByTimestampResponseForSelectPaths(groupId, response);
     }
   }
 
   /**
    * Handle response of fetching data, and add batch data to corresponding reader.
    */
-  private void handleFetchDataByTimestampResponseForSelectPaths(List<String> fetchDataSeries,
+  private void handleFetchDataByTimestampResponseForSelectPaths(String groupId,
       BasicQueryDataResponse response) {
     List<BatchData> batchDataList = response.getSeriesBatchData();
-    for (int i = 0; i < fetchDataSeries.size(); i++) {
-      String series = fetchDataSeries.get(i);
+    List<ClusterSelectSeriesReader> selectSeriesReaders = selectSeriesGroupEntityMap.get(groupId)
+        .getSelectSeriesReaders();
+    for (int i = 0; i < selectSeriesReaders.size(); i++) {
       BatchData batchData = batchDataList.get(i);
-      selectSeriesReaders.get(new Path(series))
-          .addBatchData(batchData, true);
+      selectSeriesReaders.get(i).addBatchData(batchData, true);
     }
   }
 
   /**
    * Handle response of fetching data, and add batch data to corresponding reader.
    */
-  private void handleFetchDataResponseForSelectPaths(List<String> fetchDataSeries,
-      BasicQueryDataResponse response) {
+  private void handleFetchDataResponseForSelectPaths(String groupId,
+      List<Integer> selectSeriesIndexs, BasicQueryDataResponse response) {
     List<BatchData> batchDataList = response.getSeriesBatchData();
-    for (int i = 0; i < fetchDataSeries.size(); i++) {
-      String series = fetchDataSeries.get(i);
+    List<ClusterSelectSeriesReader> selectSeriesReaders = selectSeriesGroupEntityMap.get(groupId)
+        .getSelectSeriesReaders();
+    for (int i = 0; i < selectSeriesIndexs.size(); i++) {
       BatchData batchData = batchDataList.get(i);
-      selectSeriesReaders.get(new Path(series))
+      selectSeriesReaders.get(selectSeriesIndexs.get(i))
           .addBatchData(batchData, batchData.length() < CLUSTER_CONF.getBatchReadSize());
     }
   }
@@ -293,10 +306,11 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
    */
   private void handleFetchDataResponseForFilterPaths(String groupId,
       QuerySeriesDataResponse response) {
-    FilterGroupEntity filterGroupEntity = filterGroupEntityMap.get(groupId);
-    List<Path> fetchDataSeries = filterGroupEntity.getFilterPaths();
+    FilterSeriesGroupEntity filterSeriesGroupEntity = filterSeriesGroupEntityMap.get(groupId);
+    List<Path> fetchDataSeries = filterSeriesGroupEntity.getFilterPaths();
     List<BatchData> batchDataList = response.getSeriesBatchData();
-    List<ClusterFilterSeriesReader> filterReaders = filterGroupEntity.getFilterSeriesReaders();
+    List<ClusterFilterSeriesReader> filterReaders = filterSeriesGroupEntity
+        .getFilterSeriesReaders();
     boolean remoteDataFinish = true;
     for (int i = 0; i < batchDataList.size(); i++) {
       if (batchDataList.get(i).length() != 0) {
@@ -313,11 +327,6 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
   }
 
   @Override
-  public QueryPlan getSelectPathQueryPlan(String fullPath) {
-    return selectPathPlans.get(fullPath);
-  }
-
-  @Override
   public void setDataGroupReaderNode(String groupId, PeerId readerNode) {
     queryNodes.put(groupId, readerNode);
   }
@@ -332,7 +341,8 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
     for (Entry<String, PeerId> entry : queryNodes.entrySet()) {
       String groupId = entry.getKey();
       PeerId queryNode = entry.getValue();
-      ClusterRpcReaderUtils.releaseRemoteQueryResource(groupId, queryNode, taskId);
+      BasicRequest request = CloseSeriesReaderRequest.createReleaseResourceRequest(groupId, taskId);
+      ClusterRpcReaderUtils.handleQueryRequest(request, queryNode, 0);
     }
   }
 
@@ -356,60 +366,19 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
     return queryRounds;
   }
 
-  public void setQueryRounds(long queryRounds) {
-    this.queryRounds = queryRounds;
-  }
-
   public QueryPlan getOriginQueryPlan() {
     return originQueryPlan;
   }
 
-  public void setOriginQueryPlan(QueryPlan queryPlan) {
-    this.originQueryPlan = queryPlan;
-  }
-
-  public Map<String, PeerId> getQueryNodes() {
-    return queryNodes;
-  }
-
-  public void setQueryNodes(
-      Map<String, PeerId> queryNodes) {
-    this.queryNodes = queryNodes;
-  }
-
-  public Map<String, QueryPlan> getSelectPathPlans() {
-    return selectPathPlans;
-  }
-
-  public void setSelectPathPlans(
-      Map<String, QueryPlan> selectPathPlans) {
-    this.selectPathPlans = selectPathPlans;
-  }
-
-  public Map<String, List<Path>> getSelectSeriesByGroupId() {
-    return selectSeriesByGroupId;
-  }
-
-  public void setSelectSeriesByGroupId(
-      Map<String, List<Path>> selectSeriesByGroupId) {
-    this.selectSeriesByGroupId = selectSeriesByGroupId;
-  }
-
-  public Map<Path, ClusterSelectSeriesReader> getSelectSeriesReaders() {
-    return selectSeriesReaders;
-  }
-
-  public void setSelectSeriesReaders(
-      Map<Path, ClusterSelectSeriesReader> selectSeriesReaders) {
-    this.selectSeriesReaders = selectSeriesReaders;
+  public void setQueryNode(String groupID, PeerId peerId) {
+    this.queryNodes.put(groupID, peerId);
   }
 
-  public Map<String, FilterGroupEntity> getFilterGroupEntityMap() {
-    return filterGroupEntityMap;
+  public Map<String, SelectSeriesGroupEntity> getSelectSeriesGroupEntityMap() {
+    return selectSeriesGroupEntityMap;
   }
 
-  public void setFilterGroupEntityMap(
-      Map<String, FilterGroupEntity> filterGroupEntityMap) {
-    this.filterGroupEntityMap = filterGroupEntityMap;
+  public Map<String, FilterSeriesGroupEntity> getFilterSeriesGroupEntityMap() {
+    return filterSeriesGroupEntityMap;
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterGroupEntity.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterSeriesGroupEntity.java
similarity index 97%
copy from cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterGroupEntity.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterSeriesGroupEntity.java
index 326af11..19407a0 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterGroupEntity.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterSeriesGroupEntity.java
@@ -28,7 +28,7 @@ import org.apache.iotdb.tsfile.read.filter.basic.Filter;
 /**
  * Filter entities of a data group, concluding QueryPlan, filters, all filter paths and filter readers
  */
-public class FilterGroupEntity {
+public class FilterSeriesGroupEntity {
 
   /**
    * Group id
@@ -62,7 +62,7 @@ public class FilterGroupEntity {
    */
   private List<ClusterFilterSeriesReader> filterSeriesReaders;
 
-  public FilterGroupEntity(String groupId) {
+  public FilterSeriesGroupEntity(String groupId) {
     this.groupId = groupId;
     this.filterPaths = new ArrayList<>();
     this.filters = new ArrayList<>();
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcQueryManager.java
index b8e4f5d..0917631 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcQueryManager.java
@@ -66,4 +66,9 @@ public interface IClusterRpcQueryManager {
    * Get all read usage count group by data group id, key is group id, value is usage count
    */
   Map<String, Integer> getAllReadUsage();
+
+  /**
+   * Close manager
+   */
+  void close() throws RaftConnectionException;
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcSingleQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcSingleQueryManager.java
index c4aec9c..19d8f25 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcSingleQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcSingleQueryManager.java
@@ -53,11 +53,11 @@ public interface IClusterRpcSingleQueryManager {
   void fetchBatchDataForSelectPaths(String groupId) throws RaftConnectionException;
 
   /**
-   * Fetch data for filter path.
+   * Fetch data for all filter paths.
    *
    * @param groupId data group id
    */
-  void fetchBatchDataForFilterPaths(String groupId) throws RaftConnectionException;
+  void fetchBatchDataForAllFilterPaths(String groupId) throws RaftConnectionException;
 
   /**
    * Fetch batch data for all select paths by batch timestamp. If target data can be fetched, skip
@@ -69,13 +69,6 @@ public interface IClusterRpcSingleQueryManager {
       throws RaftConnectionException;
 
   /**
-   * Get query plan of select path
-   *
-   * @param fullPath Timeseries full path in select paths
-   */
-  QueryPlan getSelectPathQueryPlan(String fullPath);
-
-  /**
    * Set reader node of a data group
    *
    * @param groupId data group id
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterGroupEntity.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/SelectSeriesGroupEntity.java
similarity index 54%
rename from cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterGroupEntity.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/SelectSeriesGroupEntity.java
index 326af11..1de26bd 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterGroupEntity.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/SelectSeriesGroupEntity.java
@@ -20,16 +20,14 @@ package org.apache.iotdb.cluster.query.manager.coordinatornode;
 
 import java.util.ArrayList;
 import java.util.List;
-import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterFilterSeriesReader;
+import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
 import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
 import org.apache.iotdb.tsfile.read.common.Path;
-import org.apache.iotdb.tsfile.read.filter.basic.Filter;
 
 /**
- * Filter entities of a data group, concluding QueryPlan, filters, all filter paths and filter readers
+ * Select series entity entities of a data group, concluding QueryPlan, all select paths and series readers
  */
-public class FilterGroupEntity {
-
+public class SelectSeriesGroupEntity {
   /**
    * Group id
    */
@@ -41,32 +39,24 @@ public class FilterGroupEntity {
   private QueryPlan queryPlan;
 
   /**
-   * Filters of filter path.
-   */
-  private List<Filter> filters;
-
-  /**
    *
-   * all filter series
+   * all select series
    * <p>
-   * Note: It may contain multiple series in a complicated tree
-   * for example: select * from root.vehicle where d0.s0 > 10 and d0.s0 < 101 or time = 12,
-   * filter tree: <code>[[[[root.vehicle.d0.s0:time == 12] || [root.vehicle.d0.s1:time == 12]] || [root.vehicle.d1.s2:time == 12]] || [root.vehicle.d1.s3:time == 12]]</code>
+   * Note: It may contain multiple series in a query
+   * for example: select sum(s0), max(s0) from root.vehicle.d0 where s0 > 10
    * </p>
    */
-  private List<Path> filterPaths;
-
+  private List<Path> selectPaths;
 
   /**
    * Series reader of filter paths (only contains remote series)
    */
-  private List<ClusterFilterSeriesReader> filterSeriesReaders;
+  private List<ClusterSelectSeriesReader> selectSeriesReaders;
 
-  public FilterGroupEntity(String groupId) {
+  public SelectSeriesGroupEntity(String groupId) {
     this.groupId = groupId;
-    this.filterPaths = new ArrayList<>();
-    this.filters = new ArrayList<>();
-    this.filterSeriesReaders = new ArrayList<>();
+    this.selectPaths = new ArrayList<>();
+    this.selectSeriesReaders = new ArrayList<>();
   }
 
   public String getGroupId() {
@@ -85,27 +75,19 @@ public class FilterGroupEntity {
     this.queryPlan = queryPlan;
   }
 
-  public List<Filter> getFilters() {
-    return filters;
-  }
-
-  public void addFilter(Filter filter) {
-    this.filters.add(filter);
-  }
-
-  public List<Path> getFilterPaths() {
-    return filterPaths;
+  public List<Path> getSelectPaths() {
+    return selectPaths;
   }
 
-  public void addFilterPaths(Path filterPath) {
-    this.filterPaths.add(filterPath);
+  public void addSelectPaths(Path selectPath) {
+    this.selectPaths.add(selectPath);
   }
 
-  public List<ClusterFilterSeriesReader> getFilterSeriesReaders() {
-    return filterSeriesReaders;
+  public List<ClusterSelectSeriesReader> getSelectSeriesReaders() {
+    return selectSeriesReaders;
   }
 
-  public void addFilterSeriesReader(ClusterFilterSeriesReader filterSeriesReader) {
-    this.filterSeriesReaders.add(filterSeriesReader);
+  public void addSelectSeriesReader(ClusterSelectSeriesReader selectSeriesReader) {
+    this.selectSeriesReaders.add(selectSeriesReader);
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalQueryManager.java
index fe3ac52..e6149f2 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalQueryManager.java
@@ -21,7 +21,9 @@ package org.apache.iotdb.cluster.query.manager.querynode;
 import com.alipay.sofa.jraft.util.OnlyForTest;
 import java.io.IOException;
 import java.util.HashMap;
+import java.util.Iterator;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.concurrent.ConcurrentHashMap;
 import org.apache.iotdb.cluster.rpc.raft.request.querydata.InitSeriesReaderRequest;
 import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataByTimestampRequest;
@@ -53,7 +55,7 @@ public class ClusterLocalQueryManager implements IClusterLocalQueryManager {
 
   @Override
   public InitSeriesReaderResponse createQueryDataSet(InitSeriesReaderRequest request)
-      throws IOException, FileNodeManagerException, PathErrorException, ProcessorException, QueryFilterOptimizationException {
+      throws IOException, FileNodeManagerException, PathErrorException, ProcessorException, QueryFilterOptimizationException, ClassNotFoundException {
     long jobId = QueryResourceManager.getInstance().assignJobId();
     String taskId = request.getTaskId();
     TASK_ID_MAP_JOB_ID.put(taskId, jobId);
@@ -113,6 +115,16 @@ public class ClusterLocalQueryManager implements IClusterLocalQueryManager {
     return readerUsageMap;
   }
 
+  @Override
+  public void close() throws FileNodeManagerException {
+    Iterator<Entry<Long, ClusterLocalSingleQueryManager>> iterator = SINGLE_QUERY_MANAGER_MAP.entrySet().iterator();
+    while(iterator.hasNext()){
+      Entry<Long, ClusterLocalSingleQueryManager> entry = iterator.next();
+      entry.getValue().close();
+      iterator.remove();
+    }
+  }
+
   @OnlyForTest
   public static ConcurrentHashMap<String, Long> getTaskIdMapJobId() {
     return TASK_ID_MAP_JOB_ID;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalSingleQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalSingleQueryManager.java
index 559575a..097f24d 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalSingleQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalSingleQueryManager.java
@@ -24,21 +24,24 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ScheduledFuture;
-import org.apache.iotdb.cluster.concurrent.pool.QueryTimerManager;
+import org.apache.iotdb.cluster.concurrent.pool.QueryTimerThreadManager;
 import org.apache.iotdb.cluster.config.ClusterConstant;
 import org.apache.iotdb.cluster.query.PathType;
 import org.apache.iotdb.cluster.query.factory.ClusterSeriesReaderFactory;
-import org.apache.iotdb.cluster.query.reader.querynode.AbstractClusterBatchReader;
-import org.apache.iotdb.cluster.query.reader.querynode.ClusterBatchReaderByTimestamp;
-import org.apache.iotdb.cluster.query.reader.querynode.ClusterBatchReaderWithoutTimeGenerator;
-import org.apache.iotdb.cluster.query.reader.querynode.ClusterFilterSeriesBatchReader;
-import org.apache.iotdb.cluster.query.reader.querynode.IClusterFilterSeriesBatchReader;
+import org.apache.iotdb.cluster.query.reader.querynode.IClusterSelectSeriesBatchReader;
+import org.apache.iotdb.cluster.query.reader.querynode.ClusterFillSelectSeriesBatchReader;
+import org.apache.iotdb.cluster.query.reader.querynode.ClusterFilterSeriesBatchReaderEntity;
+import org.apache.iotdb.cluster.query.reader.querynode.ClusterGroupBySelectSeriesBatchReaderEntity;
+import org.apache.iotdb.cluster.query.reader.querynode.ClusterSelectSeriesBatchReader;
+import org.apache.iotdb.cluster.query.reader.querynode.ClusterSelectSeriesBatchReaderByTimestamp;
+import org.apache.iotdb.cluster.query.reader.querynode.ClusterSelectSeriesBatchReaderEntity;
 import org.apache.iotdb.cluster.rpc.raft.request.querydata.InitSeriesReaderRequest;
 import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataByTimestampRequest;
 import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataRequest;
 import org.apache.iotdb.cluster.rpc.raft.response.querydata.InitSeriesReaderResponse;
 import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataByTimestampResponse;
 import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
+import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
 import org.apache.iotdb.db.exception.FileNodeManagerException;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.exception.ProcessorException;
@@ -46,11 +49,16 @@ import org.apache.iotdb.db.metadata.MManager;
 import org.apache.iotdb.db.qp.executor.OverflowQPExecutor;
 import org.apache.iotdb.db.qp.executor.QueryProcessExecutor;
 import org.apache.iotdb.db.qp.physical.crud.AggregationPlan;
+import org.apache.iotdb.db.qp.physical.crud.FillQueryPlan;
 import org.apache.iotdb.db.qp.physical.crud.GroupByPlan;
 import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.control.QueryResourceManager;
-import org.apache.iotdb.db.query.executor.ExecutorWithoutTimeGenerator;
+import org.apache.iotdb.db.query.dataset.groupby.GroupByWithOnlyTimeFilterDataSet;
+import org.apache.iotdb.db.query.executor.AbstractExecutorWithoutTimeGenerator;
+import org.apache.iotdb.db.query.executor.AggregateEngineExecutor;
+import org.apache.iotdb.db.query.fill.IFill;
+import org.apache.iotdb.db.query.fill.PreviousFill;
 import org.apache.iotdb.db.query.reader.IPointReader;
 import org.apache.iotdb.db.query.reader.merge.EngineReaderByTimeStamp;
 import org.apache.iotdb.tsfile.exception.filter.QueryFilterOptimizationException;
@@ -67,11 +75,17 @@ import org.slf4j.LoggerFactory;
 
 public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryManager {
 
-  private static final Logger LOGGER = LoggerFactory.getLogger(ClusterLocalSingleQueryManager.class);
+  private static final Logger LOGGER = LoggerFactory
+      .getLogger(ClusterLocalSingleQueryManager.class);
 
   private String groupId;
 
   /**
+   * Mark whether this manager has initialized or not.
+   */
+  private boolean isInit = false;
+
+  /**
    * Timer of Query, if the time is up, close query resource.
    */
   private ScheduledFuture<?> queryTimer;
@@ -87,14 +101,19 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
   private long queryRound = -1;
 
   /**
-   * Key is series full path, value is reader of select series
+   * Select reader entity
+   */
+  private ClusterSelectSeriesBatchReaderEntity selectReaderEntity;
+
+  /**
+   * Select reader entity of group by query, which handle group by query with only time filter
    */
-  private Map<String, AbstractClusterBatchReader> selectSeriesReaders = new HashMap<>();
+  private ClusterGroupBySelectSeriesBatchReaderEntity groupBySelectReaderEntity;
 
   /**
-   * Filter reader
+   * Filter reader entity
    */
-  private IClusterFilterSeriesBatchReader filterReader;
+  private ClusterFilterSeriesBatchReaderEntity filterReaderEntity;
 
   /**
    * Key is series full path, value is data type of series
@@ -113,30 +132,33 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
    */
   public ClusterLocalSingleQueryManager(long jobId) {
     this.jobId = jobId;
-    queryTimer = QueryTimerManager.getInstance()
+    queryTimer = QueryTimerThreadManager.getInstance()
         .execute(new QueryTimerRunnable(), ClusterConstant.QUERY_TIMEOUT_IN_QUERY_NODE);
   }
 
   @Override
   public InitSeriesReaderResponse createSeriesReader(InitSeriesReaderRequest request)
-      throws IOException, PathErrorException, FileNodeManagerException, ProcessorException, QueryFilterOptimizationException {
+      throws IOException, PathErrorException, FileNodeManagerException, ProcessorException, QueryFilterOptimizationException, ClassNotFoundException {
+    if (isInit) {
+      throw new IOException(String
+          .format("ClusterLocalSingleQueryManager has already initialized. Job id = %s", jobId));
+    }
+    isInit = true;
     this.groupId = request.getGroupID();
     InitSeriesReaderResponse response = new InitSeriesReaderResponse(groupId);
     QueryContext context = new QueryContext(jobId);
     Map<PathType, QueryPlan> queryPlanMap = request.getAllQueryPlan();
     if (queryPlanMap.containsKey(PathType.SELECT_PATH)) {
+      selectReaderEntity = new ClusterSelectSeriesBatchReaderEntity();
       QueryPlan plan = queryPlanMap.get(PathType.SELECT_PATH);
       if (plan instanceof GroupByPlan) {
-        throw new UnsupportedOperationException();
+        handleGroupBySeriesReader(plan, context, response);
       } else if (plan instanceof AggregationPlan) {
-        throw new UnsupportedOperationException();
+        handleAggreSeriesReader(plan, context, response);
+      } else if (plan instanceof FillQueryPlan) {
+        handleFillSeriesReader(plan, context, response);
       } else {
-        if (plan.getExpression() == null
-            || plan.getExpression().getType() == ExpressionType.GLOBAL_TIME) {
-          handleSelectReaderWithoutTimeGenerator(plan, context, response);
-        } else {
-          handleSelectReaderWithTimeGenerator(plan, context, response);
-        }
+        handleSelectSeriesReader(plan, context, response);
       }
     }
     if (queryPlanMap.containsKey(PathType.FILTER_PATH)) {
@@ -147,22 +169,145 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
   }
 
   /**
-   * Handle filter series reader
+   * Handle fill series reader
    *
-   * @param plan filter series query plan
+   * @param queryPlan fill query plan
    */
-  private void handleFilterSeriesReader(QueryPlan plan, QueryContext context,
-      InitSeriesReaderRequest request, InitSeriesReaderResponse response, PathType pathType)
-      throws PathErrorException, QueryFilterOptimizationException, FileNodeManagerException, ProcessorException, IOException {
-    QueryDataSet queryDataSet = queryProcessExecutor
-        .processQuery(plan, context);
-    List<Path> paths = plan.getPaths();
+  private void handleFillSeriesReader(QueryPlan queryPlan, QueryContext context,
+      InitSeriesReaderResponse response)
+      throws FileNodeManagerException, PathErrorException, IOException {
+    FillQueryPlan fillQueryPlan = (FillQueryPlan) queryPlan;
+
+    List<Path> selectedPaths = queryPlan.getPaths();
+    List<TSDataType> dataTypes = new ArrayList<>();
+    QueryResourceManager.getInstance().beginQueryOfGivenQueryPaths(jobId, selectedPaths);
+
+    Map<TSDataType, IFill> typeIFillMap = fillQueryPlan.getFillType();
+    for (Path path : selectedPaths) {
+      QueryDataSource queryDataSource = QueryResourceManager.getInstance()
+          .getQueryDataSource(path, context);
+      TSDataType dataType = MManager.getInstance().getSeriesType(path.getFullPath());
+      dataTypes.add(dataType);
+      IFill fill;
+      if (!typeIFillMap.containsKey(dataType)) {
+        fill = new PreviousFill(dataType, fillQueryPlan.getQueryTime(), 0);
+      } else {
+        fill = typeIFillMap.get(dataType).copy(path);
+      }
+      fill.setDataType(dataType);
+      fill.setQueryTime(fillQueryPlan.getQueryTime());
+      fill.constructReaders(queryDataSource, context);
+      selectReaderEntity.addPath(path.getFullPath());
+      selectReaderEntity
+          .addReaders(new ClusterFillSelectSeriesBatchReader(dataType, fill.getFillResult()));
+      dataTypeMap.put(path.getFullPath(), dataType);
+    }
+
+    response.getSeriesDataTypes().put(PathType.SELECT_PATH, dataTypes);
+  }
+
+
+  /**
+   * Handle aggregation series reader
+   *
+   * @param queryPlan fill query plan
+   */
+  private void handleGroupBySeriesReader(QueryPlan queryPlan, QueryContext context,
+      InitSeriesReaderResponse response)
+      throws FileNodeManagerException, PathErrorException, IOException, ProcessorException, QueryFilterOptimizationException {
+    if (queryPlan.getExpression() == null
+        || queryPlan.getExpression().getType() == ExpressionType.GLOBAL_TIME) {
+      handleGroupBySeriesReaderWithoutTimeGenerator(queryPlan, context, response);
+    } else {
+      handleSelectReaderWithTimeGenerator(queryPlan, context, response);
+    }
+  }
+
+
+  /**
+   * Handle aggregation series reader without value filter
+   *
+   * @param queryPlan fill query plan
+   */
+  private void handleGroupBySeriesReaderWithoutTimeGenerator(QueryPlan queryPlan,
+      QueryContext context,
+      InitSeriesReaderResponse response)
+      throws FileNodeManagerException, PathErrorException, IOException, ProcessorException, QueryFilterOptimizationException {
+    QueryDataSet queryDataSet = queryProcessExecutor.processQuery(queryPlan, context);
+    List<Path> paths = queryDataSet.getPaths();
     List<TSDataType> dataTypes = queryDataSet.getDataTypes();
     for (int i = 0; i < paths.size(); i++) {
       dataTypeMap.put(paths.get(i).getFullPath(), dataTypes.get(i));
     }
-    response.getSeriesDataTypes().put(pathType, dataTypes);
-    filterReader = new ClusterFilterSeriesBatchReader(queryDataSet, paths, request.getFilterList());
+    groupBySelectReaderEntity = new ClusterGroupBySelectSeriesBatchReaderEntity(paths, dataTypes,
+        (GroupByWithOnlyTimeFilterDataSet) queryDataSet);
+    response.getSeriesDataTypes().put(PathType.SELECT_PATH, dataTypes);
+  }
+
+  /**
+   * Handle aggregation series reader
+   *
+   * @param queryPlan fill query plan
+   */
+  private void handleAggreSeriesReader(QueryPlan queryPlan, QueryContext context,
+      InitSeriesReaderResponse response)
+      throws FileNodeManagerException, PathErrorException, IOException, ProcessorException {
+    if (queryPlan.getExpression() == null
+        || queryPlan.getExpression().getType() == ExpressionType.GLOBAL_TIME) {
+      handleAggreSeriesReaderWithoutTimeGenerator(queryPlan, context, response);
+    } else {
+      handleSelectReaderWithTimeGenerator(queryPlan, context, response);
+    }
+  }
+
+  /**
+   * Handle aggregation series reader without value filter
+   *
+   * @param queryPlan fill query plan
+   */
+  private void handleAggreSeriesReaderWithoutTimeGenerator(QueryPlan queryPlan,
+      QueryContext context,
+      InitSeriesReaderResponse response)
+      throws FileNodeManagerException, PathErrorException, IOException, ProcessorException {
+    AggregationPlan fillQueryPlan = (AggregationPlan) queryPlan;
+
+    List<Path> selectedPaths = fillQueryPlan.getPaths();
+    QueryResourceManager.getInstance().beginQueryOfGivenQueryPaths(jobId, selectedPaths);
+
+    AggregateEngineExecutor engineExecutor = new AggregateEngineExecutor(
+        selectedPaths, fillQueryPlan.getAggregations(), fillQueryPlan.getExpression());
+
+    List<IPointReader> readers = engineExecutor.constructAggreReadersWithoutTimeGenerator(context);
+
+    List<TSDataType> dataTypes = engineExecutor.getDataTypes();
+
+    for (int i = 0; i < selectedPaths.size(); i++) {
+      Path path = selectedPaths.get(i);
+      selectReaderEntity.addPath(path.getFullPath());
+      selectReaderEntity.addReaders(
+          new ClusterSelectSeriesBatchReader(dataTypes.get(i), readers.get(i)));
+      dataTypeMap.put(path.getFullPath(), dataTypes.get(i));
+    }
+
+    response.getSeriesDataTypes().put(PathType.SELECT_PATH, dataTypes);
+  }
+
+  /**
+   * Handle select series query
+   *
+   * @param plan plan query plan
+   * @param context query context
+   * @param response response for coordinator node
+   */
+  private void handleSelectSeriesReader(QueryPlan plan, QueryContext context,
+      InitSeriesReaderResponse response)
+      throws FileNodeManagerException, IOException, PathErrorException {
+    if (plan.getExpression() == null
+        || plan.getExpression().getType() == ExpressionType.GLOBAL_TIME) {
+      handleSelectReaderWithoutTimeGenerator(plan, context, response);
+    } else {
+      handleSelectReaderWithTimeGenerator(plan, context, response);
+    }
   }
 
   /**
@@ -185,16 +330,35 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
         .beginQueryOfGivenQueryPaths(context.getJobId(), plan.getPaths());
     for (int i = 0; i < paths.size(); i++) {
       String fullPath = paths.get(i).getFullPath();
-      IPointReader reader = ExecutorWithoutTimeGenerator
+      IPointReader reader = AbstractExecutorWithoutTimeGenerator
           .createSeriesReader(context, paths.get(i), dataTypes, timeFilter);
-      selectSeriesReaders
-          .put(fullPath, new ClusterBatchReaderWithoutTimeGenerator(dataTypes.get(i), reader));
+      selectReaderEntity.addPath(fullPath);
+      selectReaderEntity.addReaders(new ClusterSelectSeriesBatchReader(dataTypes.get(i), reader));
       dataTypeMap.put(fullPath, dataTypes.get(i));
     }
     response.getSeriesDataTypes().put(PathType.SELECT_PATH, dataTypes);
   }
 
   /**
+   * Handle filter series reader
+   *
+   * @param plan filter series query plan
+   */
+  private void handleFilterSeriesReader(QueryPlan plan, QueryContext context,
+      InitSeriesReaderRequest request, InitSeriesReaderResponse response, PathType pathType)
+      throws PathErrorException, QueryFilterOptimizationException, FileNodeManagerException, ProcessorException, IOException, ClassNotFoundException {
+    QueryDataSet queryDataSet = queryProcessExecutor.processQuery(plan, context);
+    List<Path> paths = plan.getPaths();
+    List<TSDataType> dataTypes = queryDataSet.getDataTypes();
+    for (int i = 0; i < paths.size(); i++) {
+      dataTypeMap.put(paths.get(i).getFullPath(), dataTypes.get(i));
+    }
+    response.getSeriesDataTypes().put(pathType, dataTypes);
+    filterReaderEntity = new ClusterFilterSeriesBatchReaderEntity(queryDataSet, paths,
+        request.getFilterList());
+  }
+
+  /**
    * Handle select series query with value filter
    *
    * @param plan plan query plan
@@ -211,8 +375,9 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
       EngineReaderByTimeStamp readerByTimeStamp = ClusterSeriesReaderFactory
           .createReaderByTimeStamp(path, context);
       TSDataType dataType = MManager.getInstance().getSeriesType(path.getFullPath());
-      selectSeriesReaders
-          .put(path.getFullPath(), new ClusterBatchReaderByTimestamp(readerByTimeStamp, dataType));
+      selectReaderEntity.addPath(path.getFullPath());
+      selectReaderEntity
+          .addReaders(new ClusterSelectSeriesBatchReaderByTimestamp(readerByTimeStamp, dataType));
       dataTypeMap.put(path.getFullPath(), dataType);
       dataTypeList.add(dataType);
     }
@@ -228,10 +393,12 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
     if (targetQueryRounds != this.queryRound) {
       this.queryRound = targetQueryRounds;
       PathType pathType = request.getPathType();
-      List<String> paths = request.getSeriesPaths();
       List<BatchData> batchDataList;
       if (pathType == PathType.SELECT_PATH) {
-        batchDataList = readSelectSeriesBatchData(paths);
+        // check whether it's a group by query with only time filter
+        batchDataList =
+            groupBySelectReaderEntity != null ? groupBySelectReaderEntity.nextBatchList()
+                : readSelectSeriesBatchData(request.getSeriesPathIndexs());
       } else {
         batchDataList = readFilterSeriesBatchData();
       }
@@ -247,13 +414,12 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
       throws IOException {
     resetQueryTimer();
     QuerySeriesDataByTimestampResponse response = new QuerySeriesDataByTimestampResponse(groupId);
-    List<String> fetchDataSeries = request.getFetchDataSeries();
     long targetQueryRounds = request.getQueryRounds();
     if (targetQueryRounds != this.queryRound) {
       this.queryRound = targetQueryRounds;
+      List<IClusterSelectSeriesBatchReader> readers = selectReaderEntity.getAllReaders();
       List<BatchData> batchDataList = new ArrayList<>();
-      for (String series : fetchDataSeries) {
-        AbstractClusterBatchReader reader = selectSeriesReaders.get(series);
+      for (IClusterSelectSeriesBatchReader reader : readers) {
         batchDataList.add(reader.nextBatch(request.getBatchTimestamp()));
       }
       cachedBatchDataResult = batchDataList;
@@ -265,19 +431,20 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
   @Override
   public void resetQueryTimer() {
     queryTimer.cancel(false);
-    queryTimer = QueryTimerManager.getInstance()
+    queryTimer = QueryTimerThreadManager.getInstance()
         .execute(new QueryTimerRunnable(), ClusterConstant.QUERY_TIMEOUT_IN_QUERY_NODE);
   }
 
   /**
-   * Read batch data of select series
+   * Read batch data of select series by series index
    *
-   * @param paths all series to query
+   * @param seriesIndexs all series index to query
    */
-  private List<BatchData> readSelectSeriesBatchData(List<String> paths) throws IOException {
+  private List<BatchData> readSelectSeriesBatchData(List<Integer> seriesIndexs) throws IOException {
     List<BatchData> batchDataList = new ArrayList<>();
-    for (String fullPath : paths) {
-      batchDataList.add(selectSeriesReaders.get(fullPath).nextBatch());
+    for (int index : seriesIndexs) {
+      IClusterSelectSeriesBatchReader reader = selectReaderEntity.getReaderByIndex(index);
+      batchDataList.add(reader.nextBatch());
     }
     return batchDataList;
   }
@@ -288,7 +455,7 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
    * @return batch data of all filter series
    */
   private List<BatchData> readFilterSeriesBatchData() throws IOException {
-    return filterReader.nextBatchList();
+    return filterReaderEntity.nextBatchList();
   }
 
   public String getGroupId() {
@@ -309,12 +476,12 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
     return queryRound;
   }
 
-  public Map<String, AbstractClusterBatchReader> getSelectSeriesReaders() {
-    return selectSeriesReaders;
+  public ClusterSelectSeriesBatchReaderEntity getSelectReaderEntity() {
+    return selectReaderEntity;
   }
 
-  public IClusterFilterSeriesBatchReader getFilterReader() {
-    return filterReader;
+  public ClusterFilterSeriesBatchReaderEntity getFilterReaderEntity() {
+    return filterReaderEntity;
   }
 
   public Map<String, TSDataType> getDataTypeMap() {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalQueryManager.java
index cc0f103..42374d5 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalQueryManager.java
@@ -42,7 +42,7 @@ public interface IClusterLocalQueryManager {
    * @param request request for query data from coordinator node
    */
   InitSeriesReaderResponse createQueryDataSet(InitSeriesReaderRequest request)
-      throws IOException, FileNodeManagerException, PathErrorException, ProcessorException, QueryFilterOptimizationException;
+      throws IOException, FileNodeManagerException, PathErrorException, ProcessorException, QueryFilterOptimizationException, ClassNotFoundException;
 
   /**
    * Read batch data of all querying series in request and set response.
@@ -54,8 +54,8 @@ public interface IClusterLocalQueryManager {
 
   /**
    * Read batch data of select series by batch timestamp which is used in query with value filter
-   *  @param request request of querying select paths
    *
+   * @param request request of querying select paths
    */
   QuerySeriesDataByTimestampResponse readBatchDataByTimestamp(
       QuerySeriesDataByTimestampRequest request) throws IOException;
@@ -79,4 +79,9 @@ public interface IClusterLocalQueryManager {
    * Get all read usage count group by data group id, key is group id, value is usage count
    */
   Map<String, Integer> getAllReadUsage();
+
+  /**
+   * Close manager
+   */
+  void close() throws FileNodeManagerException;
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalSingleQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalSingleQueryManager.java
index 318772f..1d89c5c 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalSingleQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalSingleQueryManager.java
@@ -40,18 +40,19 @@ public interface IClusterLocalSingleQueryManager {
 
   /**
    * Initially create corresponding series readers.
+   *
    * @param request request of querying series data
    */
   InitSeriesReaderResponse createSeriesReader(InitSeriesReaderRequest request)
-      throws IOException, PathErrorException, FileNodeManagerException, ProcessorException, QueryFilterOptimizationException;
+      throws IOException, PathErrorException, FileNodeManagerException, ProcessorException, QueryFilterOptimizationException, ClassNotFoundException;
 
   /**
    * <p>
    * Read batch data If query round in cache is equal to target query round, it means that batch
    * data in query node transfer to coordinator fail and return cached batch data.
    * </p>
-   *  @param request request of querying series data
    *
+   * @param request request of querying series data
    */
   QuerySeriesDataResponse readBatchData(QuerySeriesDataRequest request)
       throws IOException;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/AbstractClusterPointReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/AbstractClusterPointReader.java
index 72c7c70..c0012a1 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/AbstractClusterPointReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/AbstractClusterPointReader.java
@@ -20,9 +20,9 @@ package org.apache.iotdb.cluster.query.reader.coordinatornode;
 
 import java.io.IOException;
 import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.query.utils.ClusterTimeValuePairUtils;
 import org.apache.iotdb.db.query.reader.IPointReader;
 import org.apache.iotdb.db.utils.TimeValuePair;
-import org.apache.iotdb.db.utils.TimeValuePairUtils;
 import org.apache.iotdb.tsfile.read.common.BatchData;
 
 /**
@@ -63,11 +63,14 @@ public abstract class AbstractClusterPointReader implements IPointReader {
   @Override
   public TimeValuePair next() throws IOException {
     if (hasNext()) {
-      TimeValuePair timeValuePair = TimeValuePairUtils.getCurrentTimeValuePair(currentBatchData);
+      TimeValuePair timeValuePair = ClusterTimeValuePairUtils
+          .getCurrentTimeValuePair(currentBatchData);
       currentTimeValuePair = timeValuePair;
       currentBatchData.next();
       return timeValuePair;
     }
     return null;
   }
+
+  public abstract void addBatchData(BatchData batchData, boolean remoteDataFinish);
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterFilterSeriesReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterFilterSeriesReader.java
index 805d3af..9d60ae2 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterFilterSeriesReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterFilterSeriesReader.java
@@ -83,7 +83,7 @@ public class ClusterFilterSeriesReader extends AbstractClusterPointReader {
   @Override
   protected void updateCurrentBatchData() throws RaftConnectionException {
     if (batchDataList.isEmpty() && !remoteDataFinish) {
-      queryManager.fetchBatchDataForFilterPaths(groupId);
+      queryManager.fetchBatchDataForAllFilterPaths(groupId);
     }
     if (!batchDataList.isEmpty()) {
       currentBatchData = batchDataList.removeFirst();
@@ -95,14 +95,6 @@ public class ClusterFilterSeriesReader extends AbstractClusterPointReader {
     //Do nothing
   }
 
-  public Path getSeriesPath() {
-    return seriesPath;
-  }
-
-  public void setSeriesPath(Path seriesPath) {
-    this.seriesPath = seriesPath;
-  }
-
   public TSDataType getDataType() {
     return dataType;
   }
@@ -111,14 +103,7 @@ public class ClusterFilterSeriesReader extends AbstractClusterPointReader {
     this.dataType = dataType;
   }
 
-  public BatchData getCurrentBatchData() {
-    return currentBatchData;
-  }
-
-  public void setCurrentBatchData(BatchData currentBatchData) {
-    this.currentBatchData = currentBatchData;
-  }
-
+  @Override
   public void addBatchData(BatchData batchData, boolean remoteDataFinish) {
     batchDataList.addLast(batchData);
     this.remoteDataFinish = remoteDataFinish;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterSelectSeriesReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterSelectSeriesReader.java
index 0a507d5..c640b53 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterSelectSeriesReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterSelectSeriesReader.java
@@ -119,14 +119,6 @@ public class ClusterSelectSeriesReader extends AbstractClusterPointReader implem
     batchDataList = null;
   }
 
-  public Path getSeriesPath() {
-    return seriesPath;
-  }
-
-  public void setSeriesPath(Path seriesPath) {
-    this.seriesPath = seriesPath;
-  }
-
   public TSDataType getDataType() {
     return dataType;
   }
@@ -135,27 +127,12 @@ public class ClusterSelectSeriesReader extends AbstractClusterPointReader implem
     this.dataType = dataType;
   }
 
-  public BatchData getCurrentBatchData() {
-    return currentBatchData;
-  }
-
-  public void setCurrentBatchData(BatchData currentBatchData) {
-    this.currentBatchData = currentBatchData;
-  }
-
+  @Override
   public void addBatchData(BatchData batchData, boolean remoteDataFinish) {
     batchDataList.addLast(batchData);
     this.remoteDataFinish = remoteDataFinish;
   }
 
-  public boolean isRemoteDataFinish() {
-    return remoteDataFinish;
-  }
-
-  public void setRemoteDataFinish(boolean remoteDataFinish) {
-    this.remoteDataFinish = remoteDataFinish;
-  }
-
   /**
    * Check if this series need to fetch data from remote query node
    */
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFillSelectSeriesBatchReader.java
similarity index 57%
copy from cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReader.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFillSelectSeriesBatchReader.java
index 218d68b..a581d07 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFillSelectSeriesBatchReader.java
@@ -19,18 +19,27 @@
 package org.apache.iotdb.cluster.query.reader.querynode;
 
 import java.io.IOException;
-import java.util.List;
+import org.apache.iotdb.cluster.query.common.ClusterNullableBatchData;
+import org.apache.iotdb.db.query.reader.IPointReader;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.BatchData;
 
-/**
- * Batch reader for filter series which is used in query node.
- */
-public interface IClusterFilterSeriesBatchReader {
+public class ClusterFillSelectSeriesBatchReader extends ClusterSelectSeriesBatchReader {
 
-  boolean hasNext() throws IOException;
+  public ClusterFillSelectSeriesBatchReader(
+      TSDataType dataType,
+      IPointReader reader) {
+    super(dataType, reader);
+  }
 
-  /**
-   * Get next batch data of all filter series.
-   */
-  List<BatchData> nextBatchList() throws IOException;
+  @Override
+  public BatchData nextBatch() throws IOException {
+    if(hasNext()){
+      ClusterNullableBatchData batchData = new ClusterNullableBatchData();
+      batchData.addTimeValuePair(reader.next());
+      return batchData;
+    }else{
+      return new ClusterNullableBatchData();
+    }
+  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFilterSeriesBatchReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFilterSeriesBatchReaderEntity.java
similarity index 88%
rename from cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFilterSeriesBatchReader.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFilterSeriesBatchReaderEntity.java
index 6690999..ddcb35d 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFilterSeriesBatchReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFilterSeriesBatchReaderEntity.java
@@ -22,7 +22,6 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import org.apache.iotdb.cluster.config.ClusterConfig;
-import org.apache.iotdb.cluster.config.ClusterConstant;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.BatchData;
@@ -33,9 +32,9 @@ import org.apache.iotdb.tsfile.read.filter.basic.Filter;
 import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
 
 /**
- * Batch reader for all filter paths.
+ * Batch reader entity for all filter paths.
  */
-public class ClusterFilterSeriesBatchReader implements IClusterFilterSeriesBatchReader {
+public class ClusterFilterSeriesBatchReaderEntity implements IClusterSeriesBatchReaderEntity {
 
   private List<Path> allFilterPath;
 
@@ -45,7 +44,7 @@ public class ClusterFilterSeriesBatchReader implements IClusterFilterSeriesBatch
 
   private static final ClusterConfig CLUSTER_CONF = ClusterDescriptor.getInstance().getConfig();
 
-  public ClusterFilterSeriesBatchReader(QueryDataSet queryDataSet, List<Path> allFilterPath,
+  public ClusterFilterSeriesBatchReaderEntity(QueryDataSet queryDataSet, List<Path> allFilterPath,
       List<Filter> filters) {
     this.queryDataSet = queryDataSet;
     this.allFilterPath = allFilterPath;
@@ -69,12 +68,12 @@ public class ClusterFilterSeriesBatchReader implements IClusterFilterSeriesBatch
       batchDataList.add(new BatchData(dataTypeList.get(i), true));
     }
     int dataPointCount = 0;
-    while(true){
-      if(!hasNext() || dataPointCount == CLUSTER_CONF.getBatchReadSize()){
+    while (true) {
+      if (!hasNext() || dataPointCount == CLUSTER_CONF.getBatchReadSize()) {
         break;
       }
-      if(hasNext() && addTimeValuePair(batchDataList, dataTypeList)){
-          dataPointCount++;
+      if (hasNext() && addTimeValuePair(batchDataList, dataTypeList)) {
+        dataPointCount++;
       }
     }
     return batchDataList;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterGroupBySelectSeriesBatchReaderEntity.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterGroupBySelectSeriesBatchReaderEntity.java
new file mode 100644
index 0000000..30ecf1b
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterGroupBySelectSeriesBatchReaderEntity.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.reader.querynode;
+
+import static org.apache.iotdb.cluster.query.reader.querynode.ClusterSelectSeriesBatchReader.CLUSTER_CONF;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.iotdb.cluster.query.common.ClusterNullableBatchData;
+import org.apache.iotdb.cluster.query.utils.ClusterTimeValuePairUtils;
+import org.apache.iotdb.db.query.dataset.groupby.GroupByWithOnlyTimeFilterDataSet;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.read.common.BatchData;
+import org.apache.iotdb.tsfile.read.common.Field;
+import org.apache.iotdb.tsfile.read.common.Path;
+import org.apache.iotdb.tsfile.read.common.RowRecord;
+
+/**
+ * Batch reader entity for select paths in group by query with only time filter.
+ */
+public class ClusterGroupBySelectSeriesBatchReaderEntity implements
+    IClusterSeriesBatchReaderEntity {
+
+  private List<Path> paths;
+  private List<TSDataType> dataTypes;
+
+  private GroupByWithOnlyTimeFilterDataSet queryDataSet;
+
+  public ClusterGroupBySelectSeriesBatchReaderEntity(
+      List<Path> paths,
+      List<TSDataType> dataTypes,
+      GroupByWithOnlyTimeFilterDataSet queryDataSet) {
+    this.paths = paths;
+    this.dataTypes = dataTypes;
+    this.queryDataSet = queryDataSet;
+  }
+
+  @Override
+  public boolean hasNext() throws IOException {
+    return queryDataSet.hasNext();
+  }
+
+  @Override
+  public List<BatchData> nextBatchList() throws IOException {
+    List<BatchData> batchDataList = new ArrayList<>(paths.size());
+    for (int i = 0; i < paths.size(); i++) {
+      batchDataList.add(new ClusterNullableBatchData());
+    }
+    int dataPointCount = 0;
+    while (true) {
+      if (!hasNext() || dataPointCount == CLUSTER_CONF.getBatchReadSize()) {
+        break;
+      }
+      dataPointCount++;
+      RowRecord rowRecord = queryDataSet.next();
+      long time = rowRecord.getTimestamp();
+      List<Field> fieldList = rowRecord.getFields();
+      for (int j = 0; j < paths.size(); j++) {
+        ClusterNullableBatchData batchData = (ClusterNullableBatchData) batchDataList.get(j);
+        Object value = fieldList.get(j).getObjectValue(dataTypes.get(j));
+        batchData.addTimeValuePair(fieldList.get(j).toString().equals("null") ? null
+            : ClusterTimeValuePairUtils.getTimeValuePair(time, value, dataTypes.get(j)));
+      }
+    }
+    return batchDataList;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterBatchReaderWithoutTimeGenerator.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReader.java
similarity index 83%
rename from cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterBatchReaderWithoutTimeGenerator.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReader.java
index f3d443f..b3c05d8 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterBatchReaderWithoutTimeGenerator.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReader.java
@@ -21,7 +21,6 @@ package org.apache.iotdb.cluster.query.reader.querynode;
 import java.io.IOException;
 import java.util.List;
 import org.apache.iotdb.cluster.config.ClusterConfig;
-import org.apache.iotdb.cluster.config.ClusterConstant;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.db.query.reader.IPointReader;
 import org.apache.iotdb.db.utils.TimeValuePair;
@@ -31,21 +30,22 @@ import org.apache.iotdb.tsfile.read.common.BatchData;
 /**
  * BatchReader without time generator for cluster which is used in query node.
  */
-public class ClusterBatchReaderWithoutTimeGenerator extends AbstractClusterBatchReader {
+public class ClusterSelectSeriesBatchReader implements
+    IClusterSelectSeriesBatchReader {
 
   /**
    * Data type
    */
-  private TSDataType dataType;
+  protected TSDataType dataType;
 
   /**
    * Point reader
    */
-  private IPointReader reader;
+  protected IPointReader reader;
 
-  private static final ClusterConfig CLUSTER_CONF = ClusterDescriptor.getInstance().getConfig();
+  static final ClusterConfig CLUSTER_CONF = ClusterDescriptor.getInstance().getConfig();
 
-  public ClusterBatchReaderWithoutTimeGenerator(
+  public ClusterSelectSeriesBatchReader(
       TSDataType dataType, IPointReader reader) {
     this.dataType = dataType;
     this.reader = reader;
@@ -81,7 +81,7 @@ public class ClusterBatchReaderWithoutTimeGenerator extends AbstractClusterBatch
   @Override
   public BatchData nextBatch(List<Long> batchTime) throws IOException {
     throw new IOException(
-        "nextBatch(List<Long> batchTime) in ClusterBatchReaderWithoutTimeGenerator is an empty method.");
+        "nextBatch(List<Long> batchTime) in ClusterSelectSeriesBatchReader is an empty method.");
   }
 
   public TSDataType getDataType() {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterBatchReaderByTimestamp.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReaderByTimestamp.java
similarity index 90%
rename from cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterBatchReaderByTimestamp.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReaderByTimestamp.java
index b8c36eb..fc6fe31 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterBatchReaderByTimestamp.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReaderByTimestamp.java
@@ -27,7 +27,8 @@ import org.apache.iotdb.tsfile.read.common.BatchData;
 /**
  * BatchReader by timestamp for cluster which is used in query node.
  */
-public class ClusterBatchReaderByTimestamp extends AbstractClusterBatchReader {
+public class ClusterSelectSeriesBatchReaderByTimestamp implements
+    IClusterSelectSeriesBatchReader {
 
   /**
    * Reader
@@ -39,7 +40,7 @@ public class ClusterBatchReaderByTimestamp extends AbstractClusterBatchReader {
    */
   private TSDataType dataType;
 
-  public ClusterBatchReaderByTimestamp(
+  public ClusterSelectSeriesBatchReaderByTimestamp(
       EngineReaderByTimeStamp readerByTimeStamp,
       TSDataType dataType) {
     this.readerByTimeStamp = readerByTimeStamp;
@@ -54,7 +55,7 @@ public class ClusterBatchReaderByTimestamp extends AbstractClusterBatchReader {
   @Override
   public BatchData nextBatch() throws IOException {
     throw new UnsupportedOperationException(
-        "nextBatch() in ClusterBatchReaderByTimestamp is an empty method.");
+        "nextBatch() in ClusterSelectSeriesBatchReaderByTimestamp is an empty method.");
   }
 
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReaderEntity.java
similarity index 52%
copy from cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReader.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReaderEntity.java
index 218d68b..7150ffa 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReaderEntity.java
@@ -18,19 +18,46 @@
  */
 package org.apache.iotdb.cluster.query.reader.querynode;
 
-import java.io.IOException;
+import java.util.ArrayList;
 import java.util.List;
-import org.apache.iotdb.tsfile.read.common.BatchData;
 
 /**
- * Batch reader for filter series which is used in query node.
+ * Batch reader entity for all select paths.
  */
-public interface IClusterFilterSeriesBatchReader {
+public class ClusterSelectSeriesBatchReaderEntity {
 
-  boolean hasNext() throws IOException;
+  /**
+   * All select paths
+   */
+  private List<String> paths;
 
   /**
-   * Get next batch data of all filter series.
+   * All select readers
    */
-  List<BatchData> nextBatchList() throws IOException;
+  private List<IClusterSelectSeriesBatchReader> readers;
+
+  public ClusterSelectSeriesBatchReaderEntity() {
+    paths = new ArrayList<>();
+    readers = new ArrayList<>();
+  }
+
+  public void addPath(String path) {
+    this.paths.add(path);
+  }
+
+  public void addReaders(IClusterSelectSeriesBatchReader reader) {
+    this.readers.add(reader);
+  }
+
+  public List<IClusterSelectSeriesBatchReader> getAllReaders() {
+    return readers;
+  }
+
+  public IClusterSelectSeriesBatchReader getReaderByIndex(int index) {
+    return readers.get(index);
+  }
+
+  public List<String> getAllPaths() {
+    return paths;
+  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/AbstractClusterBatchReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterSelectSeriesBatchReader.java
similarity index 89%
rename from cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/AbstractClusterBatchReader.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterSelectSeriesBatchReader.java
index b0a86bd..87a8329 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/AbstractClusterBatchReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterSelectSeriesBatchReader.java
@@ -26,7 +26,7 @@ import org.apache.iotdb.tsfile.read.common.BatchData;
 /**
  * Cluster batch reader, which provides another method to get batch data by batch timestamp.
  */
-public abstract class AbstractClusterBatchReader implements IBatchReader {
+public interface IClusterSelectSeriesBatchReader extends IBatchReader {
 
   /**
    * Get batch data by batch time
@@ -34,6 +34,6 @@ public abstract class AbstractClusterBatchReader implements IBatchReader {
    * @param batchTime valid batch timestamp
    * @return corresponding batch data
    */
-  public abstract BatchData nextBatch(List<Long> batchTime) throws IOException;
+  BatchData nextBatch(List<Long> batchTime) throws IOException;
 
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterSeriesBatchReaderEntity.java
similarity index 87%
copy from cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReader.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterSeriesBatchReaderEntity.java
index 218d68b..80e72b6 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterSeriesBatchReaderEntity.java
@@ -23,14 +23,14 @@ import java.util.List;
 import org.apache.iotdb.tsfile.read.common.BatchData;
 
 /**
- * Batch reader for filter series which is used in query node.
+ * Batch reader for series which is used in query node.
  */
-public interface IClusterFilterSeriesBatchReader {
+public interface IClusterSeriesBatchReaderEntity {
 
   boolean hasNext() throws IOException;
 
   /**
-   * Get next batch data of all filter series.
+   * Get next batch data of all series.
    */
   List<BatchData> nextBatchList() throws IOException;
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/timegenerator/ClusterNodeConstructor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/timegenerator/ClusterNodeConstructor.java
index 639dce8..2b3ab18 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/timegenerator/ClusterNodeConstructor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/timegenerator/ClusterNodeConstructor.java
@@ -25,7 +25,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
-import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterGroupEntity;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterSeriesGroupEntity;
 import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterFilterSeriesReader;
 import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.db.exception.FileNodeManagerException;
@@ -65,7 +65,7 @@ public class ClusterNodeConstructor extends AbstractNodeConstructor {
    * Init filter series reader
    */
   private void init(ClusterRpcSingleQueryManager queryManager) {
-    Map<String, FilterGroupEntity> filterGroupEntityMap = queryManager.getFilterGroupEntityMap();
+    Map<String, FilterSeriesGroupEntity> filterGroupEntityMap = queryManager.getFilterSeriesGroupEntityMap();
     filterGroupEntityMap.forEach(
         (key, value) -> filterSeriesReadersByGroupId.put(key, value.getFilterSeriesReaders()));
     filterSeriesReadersByGroupId.forEach((key, value) -> filterSeriesReaderIndex.put(key, 0));
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterRpcReaderUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterRpcReaderUtils.java
index c3df421..bab0a67 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterRpcReaderUtils.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterRpcReaderUtils.java
@@ -19,86 +19,61 @@
 package org.apache.iotdb.cluster.query.utils;
 
 import com.alipay.sofa.jraft.entity.PeerId;
-import java.io.IOException;
 import java.util.List;
-import java.util.Map;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.cluster.entity.Server;
 import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.qp.task.DataQueryTask;
 import org.apache.iotdb.cluster.qp.task.QPTask.TaskState;
-import org.apache.iotdb.cluster.qp.task.QueryTask;
-import org.apache.iotdb.cluster.query.PathType;
-import org.apache.iotdb.cluster.rpc.raft.NodeAsClient;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.cluster.rpc.raft.impl.RaftNodeAsClientManager;
 import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
-import org.apache.iotdb.cluster.rpc.raft.request.querydata.CloseSeriesReaderRequest;
-import org.apache.iotdb.cluster.rpc.raft.request.querydata.InitSeriesReaderRequest;
-import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataByTimestampRequest;
-import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataRequest;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
-import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataByTimestampResponse;
-import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
 import org.apache.iotdb.cluster.utils.RaftUtils;
-import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
-import org.apache.iotdb.tsfile.read.filter.basic.Filter;
+import org.apache.iotdb.cluster.utils.hash.Router;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Utils for cluster reader which needs to acquire data from remote query node.
  */
 public class ClusterRpcReaderUtils {
 
+  private static final Logger LOGGER = LoggerFactory.getLogger(ClusterRpcReaderUtils.class);
+
   /**
    * Count limit to redo a task
    */
   private static final int TASK_MAX_RETRY = ClusterDescriptor.getInstance().getConfig()
       .getQpTaskRedoCount();
 
-  /**
-   * Create cluster series reader
-   *
-   * @param peerId query node to fetch data
-   * @param readDataConsistencyLevel consistency level of read data
-   * @param taskId task id assigned by coordinator node
-   */
-  public static BasicResponse createClusterSeriesReader(String groupId, PeerId peerId,
-      int readDataConsistencyLevel, Map<PathType, QueryPlan> allQueryPlan, String taskId,
-      List<Filter> filterList)
-      throws IOException, RaftConnectionException {
-
-    /** handle request **/
-    BasicRequest request = InitSeriesReaderRequest
-        .createInitialQueryRequest(groupId, taskId, readDataConsistencyLevel,
-            allQueryPlan, filterList);
-    return handleQueryRequest(request, peerId, 0);
-  }
-
-  public static QuerySeriesDataResponse fetchBatchData(String groupID, PeerId peerId, String taskId,
-      PathType pathType, List<String> fetchDataSeries, long queryRounds)
-      throws RaftConnectionException {
-    BasicRequest request = QuerySeriesDataRequest
-        .createFetchDataRequest(groupID, taskId, pathType, fetchDataSeries, queryRounds);
-    return (QuerySeriesDataResponse) handleQueryRequest(request, peerId, 0);
-  }
-
-  public static QuerySeriesDataByTimestampResponse fetchBatchDataByTimestamp(String groupId,
-      PeerId peerId, String taskId, long queryRounds, List<Long> batchTimestamp,
-      List<String> fetchDataSeries)
-      throws RaftConnectionException {
-    BasicRequest request = QuerySeriesDataByTimestampRequest
-        .createRequest(groupId, queryRounds, taskId, batchTimestamp, fetchDataSeries);
-    return (QuerySeriesDataByTimestampResponse) handleQueryRequest(request, peerId, 0);
+  private ClusterRpcReaderUtils() {
   }
 
   /**
-   * Release remote query resources
-   *
-   * @param groupId data group id
-   * @param peerId target query node
-   * @param taskId unique task id
+   * Create cluster series reader
    */
-  public static void releaseRemoteQueryResource(String groupId, PeerId peerId, String taskId)
+  public static BasicResponse createClusterSeriesReader(String groupId, BasicRequest request,
+      ClusterRpcSingleQueryManager manager)
       throws RaftConnectionException {
 
-    BasicRequest request = CloseSeriesReaderRequest.createReleaseResourceRequest(groupId, taskId);
-    handleQueryRequest(request, peerId, 0);
+    List<PeerId> peerIdList = RaftUtils
+        .getPeerIDList(groupId, Server.getInstance(), Router.getInstance());
+    int randomPeerIndex = RaftUtils.getRandomInt(peerIdList.size());
+    BasicResponse response;
+    for (int i = 0; i < peerIdList.size(); i++) {
+      PeerId peerId = peerIdList.get((i + randomPeerIndex) % peerIdList.size());
+      try {
+        response = handleQueryRequest(request, peerId, 0);
+        manager.setQueryNode(groupId, peerId);
+        LOGGER.debug("Init series reader in Node<{}> of group<{}> success.", peerId, groupId);
+        return response;
+      } catch (RaftConnectionException e) {
+        LOGGER.debug("Can not init series reader in Node<{}> of group<{}>", peerId, groupId, e);
+      }
+    }
+    throw new RaftConnectionException(
+        String.format("Can not init series reader in all nodes of group<%s>, please check cluster status.", groupId));
   }
 
   /**
@@ -109,7 +84,7 @@ public class ClusterRpcReaderUtils {
    * @param taskRetryNum retry num of the request
    * @return Response from remote query node
    */
-  private static BasicResponse handleQueryRequest(BasicRequest request, PeerId peerId,
+  public static BasicResponse handleQueryRequest(BasicRequest request, PeerId peerId,
       int taskRetryNum)
       throws RaftConnectionException {
     if (taskRetryNum > TASK_MAX_RETRY) {
@@ -117,10 +92,20 @@ public class ClusterRpcReaderUtils {
           String.format("Query request retries reach the upper bound %s",
               TASK_MAX_RETRY));
     }
-    NodeAsClient nodeAsClient = RaftUtils.getRaftNodeAsClient();
-    QueryTask queryTask = nodeAsClient.syncHandleRequest(request, peerId);
-    if (queryTask.getState() == TaskState.FINISH) {
-      return queryTask.getBasicResponse();
+    DataQueryTask dataQueryTask = new DataQueryTask(true, request);
+    dataQueryTask.setTargetNode(peerId);
+    RaftNodeAsClientManager.getInstance().produceQPTask(dataQueryTask);
+    try {
+      dataQueryTask.await();
+    } catch (InterruptedException e) {
+      throw new RaftConnectionException(
+          String.format("Can not connect to remote node {%s} for query", peerId));
+    }
+    if (dataQueryTask.getTaskState() == TaskState.RAFT_CONNECTION_EXCEPTION) {
+      throw new RaftConnectionException(
+          String.format("Can not connect to remote node {%s} for query", peerId));
+    } else if (dataQueryTask.getTaskState() == TaskState.FINISH) {
+      return dataQueryTask.getResponse();
     } else {
       return handleQueryRequest(request, peerId, taskRetryNum + 1);
     }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterTimeValuePairUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterTimeValuePairUtils.java
new file mode 100644
index 0000000..3141f99
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterTimeValuePairUtils.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.utils;
+
+import org.apache.iotdb.cluster.query.common.ClusterNullableBatchData;
+import org.apache.iotdb.db.utils.TimeValuePair;
+import org.apache.iotdb.db.utils.TimeValuePairUtils;
+import org.apache.iotdb.db.utils.TsPrimitiveType;
+import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.read.common.BatchData;
+import org.apache.iotdb.tsfile.utils.Binary;
+
+public class ClusterTimeValuePairUtils {
+
+  private ClusterTimeValuePairUtils() {
+  }
+
+  /**
+   * get given data's current (time,value) pair.
+   *
+   * @param data -batch data
+   * @return -given data's (time,value) pair
+   */
+  public static TimeValuePair getCurrentTimeValuePair(BatchData data) {
+    if (data instanceof ClusterNullableBatchData) {
+      return ((ClusterNullableBatchData) data).getCurrentTimeValuePair();
+    } else {
+      return TimeValuePairUtils.getCurrentTimeValuePair(data);
+    }
+  }
+
+  /**
+   * Get (time,value) pair according to data type
+   */
+  public static TimeValuePair getTimeValuePair(long time, Object v, TSDataType dataType) {
+    switch (dataType) {
+      case INT32:
+        return new TimeValuePair(time, new TsPrimitiveType.TsInt((int) v));
+      case INT64:
+        return new TimeValuePair(time, new TsPrimitiveType.TsLong((long) v));
+      case FLOAT:
+        return new TimeValuePair(time, new TsPrimitiveType.TsFloat((float) v));
+      case DOUBLE:
+        return new TimeValuePair(time, new TsPrimitiveType.TsDouble((double) v));
+      case TEXT:
+        return new TimeValuePair(time, new TsPrimitiveType.TsBinary((Binary) v));
+      case BOOLEAN:
+        return new TimeValuePair(time, new TsPrimitiveType.TsBoolean((boolean) v));
+      default:
+        throw new UnSupportedDataTypeException(String.valueOf(v));
+    }
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ExpressionUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ExpressionUtils.java
index 0024138..e6577ee 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ExpressionUtils.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ExpressionUtils.java
@@ -26,7 +26,7 @@ import static org.apache.iotdb.tsfile.read.expression.ExpressionType.TRUE;
 import java.util.List;
 import java.util.Map;
 import org.apache.iotdb.cluster.query.expression.TrueExpression;
-import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterGroupEntity;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterSeriesGroupEntity;
 import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
@@ -46,17 +46,15 @@ public class ExpressionUtils {
    * Get all series path of expression group by group id
    */
   public static void getAllExpressionSeries(IExpression expression,
-      Map<String, FilterGroupEntity> filterGroupEntityMap)
+      Map<String, FilterSeriesGroupEntity> filterGroupEntityMap)
       throws PathErrorException {
     if (expression.getType() == ExpressionType.SERIES) {
       Path path = ((SingleSeriesExpression) expression).getSeriesPath();
       String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
-      if (!filterGroupEntityMap.containsKey(groupId)) {
-        filterGroupEntityMap.put(groupId, new FilterGroupEntity(groupId));
-      }
-      FilterGroupEntity filterGroupEntity = filterGroupEntityMap.get(groupId);
-      filterGroupEntity.addFilterPaths(path);
-      filterGroupEntity.addFilter(((SingleSeriesExpression) expression).getFilter());
+      filterGroupEntityMap.putIfAbsent(groupId, new FilterSeriesGroupEntity(groupId));
+      FilterSeriesGroupEntity filterSeriesGroupEntity = filterGroupEntityMap.get(groupId);
+      filterSeriesGroupEntity.addFilterPaths(path);
+      filterSeriesGroupEntity.addFilter(((SingleSeriesExpression) expression).getFilter());
     } else if (expression.getType() == OR || expression.getType() == AND) {
       getAllExpressionSeries(((IBinaryExpression) expression).getLeft(), filterGroupEntityMap);
       getAllExpressionSeries(((IBinaryExpression) expression).getRight(), filterGroupEntityMap);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtils.java
index 4f7a5fe..2b09492 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtils.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtils.java
@@ -19,21 +19,23 @@
 package org.apache.iotdb.cluster.query.utils;
 
 import java.util.ArrayList;
+import java.util.EnumMap;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
-import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterGroupEntity;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterSeriesGroupEntity;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.SelectSeriesGroupEntity;
 import org.apache.iotdb.cluster.utils.QPExecutorUtils;
-import org.apache.iotdb.cluster.utils.hash.Router;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.qp.physical.crud.AggregationPlan;
+import org.apache.iotdb.db.qp.physical.crud.FillQueryPlan;
 import org.apache.iotdb.db.qp.physical.crud.GroupByPlan;
 import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
 import org.apache.iotdb.tsfile.read.common.Path;
 import org.apache.iotdb.tsfile.read.expression.ExpressionType;
 import org.apache.iotdb.tsfile.read.expression.IExpression;
-import org.apache.iotdb.tsfile.read.filter.basic.Filter;
 
 /**
  * Utils for splitting query plan to several sub query plans by group id.
@@ -45,11 +47,36 @@ public class QueryPlanPartitionUtils {
   }
 
   /**
-   * Split query plan with no filter or with only global time filter by group id
+   * Split query plan with no filter, with only global time filter by group id or fill query
    */
-  public static void splitQueryPlanWithoutValueFilter(ClusterRpcSingleQueryManager singleQueryManager)
+  public static void splitQueryPlanWithoutValueFilter(
+      ClusterRpcSingleQueryManager singleQueryManager)
       throws PathErrorException {
-    splitQueryPlanBySelectPath(singleQueryManager);
+    QueryPlan queryPLan = singleQueryManager.getOriginQueryPlan();
+    if (queryPLan instanceof FillQueryPlan) {
+      splitFillPlan(singleQueryManager);
+    } else if (queryPLan instanceof GroupByPlan) {
+      splitGroupByPlanBySelectPath(singleQueryManager);
+    } else if (queryPLan instanceof AggregationPlan) {
+      splitAggregationPlanBySelectPath(singleQueryManager);
+    } else {
+      splitQueryPlanBySelectPath(singleQueryManager);
+    }
+  }
+
+  /**
+   * Split query plan with filter.
+   */
+  public static void splitQueryPlanWithValueFilter(
+      ClusterRpcSingleQueryManager singleQueryManager) throws PathErrorException {
+    QueryPlan queryPlan = singleQueryManager.getOriginQueryPlan();
+    if (queryPlan instanceof GroupByPlan) {
+      splitGroupByPlanWithFilter(singleQueryManager);
+    } else if (queryPlan instanceof AggregationPlan) {
+      splitAggregationPlanWithFilter(singleQueryManager);
+    } else {
+      splitQueryPlanWithFilter(singleQueryManager);
+    }
   }
 
   /**
@@ -58,61 +85,39 @@ public class QueryPlanPartitionUtils {
   private static void splitQueryPlanBySelectPath(ClusterRpcSingleQueryManager singleQueryManager)
       throws PathErrorException {
     QueryPlan queryPlan = singleQueryManager.getOriginQueryPlan();
-    Map<String, List<Path>> selectSeriesByGroupId = singleQueryManager.getSelectSeriesByGroupId();
-    Map<String, QueryPlan> selectPathPlans = singleQueryManager.getSelectPathPlans();
+    // split query plan by select path
+    Map<String, SelectSeriesGroupEntity> selectGroupEntityMap = singleQueryManager
+        .getSelectSeriesGroupEntityMap();
     List<Path> selectPaths = queryPlan.getPaths();
     for (Path path : selectPaths) {
       String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
-      if (!selectSeriesByGroupId.containsKey(groupId)) {
-        selectSeriesByGroupId.put(groupId, new ArrayList<>());
-      }
-      selectSeriesByGroupId.get(groupId).add(path);
+      selectGroupEntityMap.putIfAbsent(groupId, new SelectSeriesGroupEntity(groupId));
+      selectGroupEntityMap.get(groupId).addSelectPaths(path);
     }
-    for (Entry<String, List<Path>> entry : selectSeriesByGroupId.entrySet()) {
-      String groupId = entry.getKey();
-      List<Path> paths = entry.getValue();
+    for (SelectSeriesGroupEntity entity : selectGroupEntityMap.values()) {
+      List<Path> paths = entity.getSelectPaths();
       QueryPlan subQueryPlan = new QueryPlan();
       subQueryPlan.setProposer(queryPlan.getProposer());
       subQueryPlan.setPaths(paths);
       subQueryPlan.setExpression(queryPlan.getExpression());
-      selectPathPlans.put(groupId, subQueryPlan);
+      entity.setQueryPlan(subQueryPlan);
     }
   }
 
+
   /**
-   * Split query plan with filter.
+   * Split query plan by filter paths
    */
-  public static void splitQueryPlanWithValueFilter(
-      ClusterRpcSingleQueryManager singleQueryManager) throws PathErrorException {
+  private static void splitQueryPlanByFilterPath(ClusterRpcSingleQueryManager singleQueryManager)
+      throws PathErrorException {
     QueryPlan queryPlan = singleQueryManager.getOriginQueryPlan();
-    if (queryPlan instanceof GroupByPlan) {
-      splitGroupByPlan((GroupByPlan) queryPlan, singleQueryManager);
-    } else if (queryPlan instanceof AggregationPlan) {
-      splitAggregationPlan((AggregationPlan) queryPlan, singleQueryManager);
-    } else {
-      splitQueryPlan(queryPlan, singleQueryManager);
-    }
-  }
-
-  private static void splitGroupByPlan(GroupByPlan queryPlan,
-      ClusterRpcSingleQueryManager singleQueryManager) {
-    throw new UnsupportedOperationException();
-  }
-
-  private static void splitAggregationPlan(AggregationPlan aggregationPlan,
-      ClusterRpcSingleQueryManager singleQueryManager) {
-    throw new UnsupportedOperationException();
-  }
-
-  private static void splitQueryPlan(QueryPlan queryPlan,
-      ClusterRpcSingleQueryManager singleQueryManager) throws PathErrorException {
-    splitQueryPlanBySelectPath(singleQueryManager);
     // split query plan by filter path
-    Map<String, FilterGroupEntity> filterGroupEntityMap = singleQueryManager.getFilterGroupEntityMap();
+    Map<String, FilterSeriesGroupEntity> filterGroupEntityMap = singleQueryManager
+        .getFilterSeriesGroupEntityMap();
     IExpression expression = queryPlan.getExpression();
     ExpressionUtils.getAllExpressionSeries(expression, filterGroupEntityMap);
-    for(FilterGroupEntity filterGroupEntity: filterGroupEntityMap.values()){
-      List<Path> filterSeriesList = filterGroupEntity.getFilterPaths();
+    for (FilterSeriesGroupEntity filterSeriesGroupEntity : filterGroupEntityMap.values()) {
+      List<Path> filterSeriesList = filterSeriesGroupEntity.getFilterPaths();
       // create filter sub query plan
       QueryPlan subQueryPlan = new QueryPlan();
       subQueryPlan.setPaths(filterSeriesList);
@@ -121,7 +126,136 @@ public class QueryPlanPartitionUtils {
       if (subExpression.getType() != ExpressionType.TRUE) {
         subQueryPlan.setExpression(subExpression);
       }
-      filterGroupEntity.setQueryPlan(subQueryPlan);
+      filterSeriesGroupEntity.setQueryPlan(subQueryPlan);
     }
   }
+
+  /**
+   * Split group by plan by select path
+   */
+  private static void splitGroupByPlanBySelectPath(
+      ClusterRpcSingleQueryManager singleQueryManager) throws PathErrorException {
+    GroupByPlan queryPlan = (GroupByPlan) singleQueryManager.getOriginQueryPlan();
+    List<Path> selectPaths = queryPlan.getPaths();
+    List<String> aggregations = queryPlan.getAggregations();
+    Map<String, SelectSeriesGroupEntity> selectGroupEntityMap = singleQueryManager
+        .getSelectSeriesGroupEntityMap();
+    Map<String, List<String>> selectAggregationByGroupId = new HashMap<>();
+    for (int i = 0; i < selectPaths.size(); i++) {
+      String aggregation = aggregations.get(i);
+      Path path = selectPaths.get(i);
+      String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
+      if (!selectGroupEntityMap.containsKey(groupId)) {
+        selectGroupEntityMap.put(groupId, new SelectSeriesGroupEntity(groupId));
+        selectAggregationByGroupId.put(groupId, new ArrayList<>());
+      }
+      selectGroupEntityMap.get(groupId).addSelectPaths(path);
+      selectAggregationByGroupId.get(groupId).add(aggregation);
+    }
+    for (Entry<String, SelectSeriesGroupEntity> entry : selectGroupEntityMap.entrySet()) {
+      String groupId = entry.getKey();
+      SelectSeriesGroupEntity entity = entry.getValue();
+      List<Path> paths = entity.getSelectPaths();
+      GroupByPlan subQueryPlan = new GroupByPlan();
+      subQueryPlan.setIntervals(queryPlan.getIntervals());
+      subQueryPlan.setOrigin(queryPlan.getOrigin());
+      subQueryPlan.setUnit(queryPlan.getUnit());
+      subQueryPlan.setProposer(queryPlan.getProposer());
+      subQueryPlan.setPaths(paths);
+      subQueryPlan.setExpression(queryPlan.getExpression());
+      subQueryPlan.setAggregations(selectAggregationByGroupId.get(groupId));
+      entity.setQueryPlan(subQueryPlan);
+    }
+  }
+
+  /**
+   * Split group by plan with filter path
+   */
+  private static void splitGroupByPlanWithFilter(ClusterRpcSingleQueryManager singleQueryManager)
+      throws PathErrorException {
+    splitGroupByPlanBySelectPath(singleQueryManager);
+    splitQueryPlanByFilterPath(singleQueryManager);
+  }
+
+  /**
+   * Split aggregation plan by select path
+   */
+  private static void splitAggregationPlanBySelectPath(
+      ClusterRpcSingleQueryManager singleQueryManager)
+      throws PathErrorException {
+    AggregationPlan queryPlan = (AggregationPlan) singleQueryManager.getOriginQueryPlan();
+    List<Path> selectPaths = queryPlan.getPaths();
+    List<String> aggregations = queryPlan.getAggregations();
+    Map<String, List<String>> selectAggregationByGroupId = new HashMap<>();
+    Map<String, SelectSeriesGroupEntity> selectGroupEntityMap = singleQueryManager
+        .getSelectSeriesGroupEntityMap();
+    for (int i = 0; i < selectPaths.size(); i++) {
+      Path path = selectPaths.get(i);
+      String aggregation = aggregations.get(i);
+      String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
+      if (!selectGroupEntityMap.containsKey(groupId)) {
+        selectGroupEntityMap.put(groupId, new SelectSeriesGroupEntity(groupId));
+        selectAggregationByGroupId.put(groupId, new ArrayList<>());
+      }
+      selectAggregationByGroupId.get(groupId).add(aggregation);
+      selectGroupEntityMap.get(groupId).addSelectPaths(path);
+    }
+    for (Entry<String, SelectSeriesGroupEntity> entry : selectGroupEntityMap.entrySet()) {
+      String groupId = entry.getKey();
+      SelectSeriesGroupEntity entity = entry.getValue();
+      List<Path> paths = entity.getSelectPaths();
+      AggregationPlan subQueryPlan = new AggregationPlan();
+      subQueryPlan.setProposer(queryPlan.getProposer());
+      subQueryPlan.setPaths(paths);
+      subQueryPlan.setExpression(queryPlan.getExpression());
+      subQueryPlan.setAggregations(selectAggregationByGroupId.get(groupId));
+      entity.setQueryPlan(subQueryPlan);
+    }
+  }
+
+  /**
+   * Split aggregation plan with filter path
+   */
+  private static void splitAggregationPlanWithFilter(
+      ClusterRpcSingleQueryManager singleQueryManager)
+      throws PathErrorException {
+    splitAggregationPlanBySelectPath(singleQueryManager);
+    splitQueryPlanByFilterPath(singleQueryManager);
+  }
+
+  /**
+   * Split fill plan which only contain select paths.
+   */
+  private static void splitFillPlan(ClusterRpcSingleQueryManager singleQueryManager)
+      throws PathErrorException {
+    FillQueryPlan fillQueryPlan = (FillQueryPlan) singleQueryManager.getOriginQueryPlan();
+    List<Path> selectPaths = fillQueryPlan.getPaths();
+    Map<String, SelectSeriesGroupEntity> selectGroupEntityMap = singleQueryManager
+        .getSelectSeriesGroupEntityMap();
+    for (Path path : selectPaths) {
+      String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
+      selectGroupEntityMap.putIfAbsent(groupId, new SelectSeriesGroupEntity(groupId));
+      selectGroupEntityMap.get(groupId).addSelectPaths(path);
+    }
+    for (SelectSeriesGroupEntity entity : selectGroupEntityMap.values()) {
+      List<Path> paths = entity.getSelectPaths();
+      FillQueryPlan subQueryPlan = new FillQueryPlan();
+      subQueryPlan.setProposer(fillQueryPlan.getProposer());
+      subQueryPlan.setPaths(paths);
+      subQueryPlan.setExpression(fillQueryPlan.getExpression());
+      subQueryPlan.setQueryTime(fillQueryPlan.getQueryTime());
+      subQueryPlan.setFillType(new EnumMap<>(fillQueryPlan.getFillType()));
+      entity.setQueryPlan(subQueryPlan);
+    }
+  }
+
+  /**
+   * Split query plan with filter
+   */
+  private static void splitQueryPlanWithFilter(ClusterRpcSingleQueryManager singleQueryManager)
+      throws PathErrorException {
+    splitQueryPlanBySelectPath(singleQueryManager);
+    splitQueryPlanByFilterPath(singleQueryManager);
+  }
+
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/NodeAsClient.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/NodeAsClient.java
index bab1536..d0690cd 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/NodeAsClient.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/NodeAsClient.java
@@ -22,7 +22,7 @@ import com.alipay.sofa.jraft.entity.PeerId;
 import org.apache.iotdb.cluster.exception.RaftConnectionException;
 import org.apache.iotdb.cluster.qp.task.SingleQPTask;
 import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
-import org.apache.iotdb.cluster.qp.task.QueryTask;
+import org.apache.iotdb.cluster.qp.task.DataQueryTask;
 
 /**
  * Handle the request and process the result as a client with the current node
@@ -31,19 +31,9 @@ public interface NodeAsClient {
 
   /**
    * Asynchronous processing requests
-   *  @param leader leader node of the target group
    * @param qpTask single QPTask to be executed
    */
-  void asyncHandleRequest(BasicRequest request, PeerId leader,
-      SingleQPTask qpTask) throws RaftConnectionException;
-
-  /**
-   * Synchronous processing requests
-   * @param peerId leader node of the target group
-   *
-   */
-  QueryTask syncHandleRequest(BasicRequest request, PeerId peerId)
-      throws RaftConnectionException;
+  void asyncHandleRequest(SingleQPTask qpTask) throws RaftConnectionException;
 
   /**
    * Shut down client
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/impl/RaftNodeAsClientManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/impl/RaftNodeAsClientManager.java
index 19f1343..cf41ae6 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/impl/RaftNodeAsClientManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/impl/RaftNodeAsClientManager.java
@@ -18,26 +18,22 @@
  */
 package org.apache.iotdb.cluster.rpc.raft.impl;
 
-import com.alipay.remoting.InvokeCallback;
 import com.alipay.remoting.exception.RemotingException;
-import com.alipay.sofa.jraft.entity.PeerId;
 import com.alipay.sofa.jraft.option.CliOptions;
 import com.alipay.sofa.jraft.rpc.impl.cli.BoltCliClientService;
 import java.util.LinkedList;
-import java.util.concurrent.Executor;
-import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.Condition;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
+import org.apache.iotdb.cluster.concurrent.pool.NodeAsClientThreadManager;
 import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterConstant;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.cluster.exception.RaftConnectionException;
-import org.apache.iotdb.cluster.qp.task.QPTask.TaskState;
-import org.apache.iotdb.cluster.qp.task.QueryTask;
 import org.apache.iotdb.cluster.qp.task.SingleQPTask;
 import org.apache.iotdb.cluster.rpc.raft.NodeAsClient;
-import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
+import org.apache.iotdb.db.exception.ProcessorException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -55,30 +51,20 @@ public class RaftNodeAsClientManager {
   private static final int TASK_TIMEOUT_MS = CLUSTER_CONFIG.getQpTaskTimeout();
 
   /**
-   * Max valid number of @NodeAsClient usage, represent the number can run simultaneously
-   * at the same time
-   */
-  private static final int MAX_VALID_CLIENT_NUM = CLUSTER_CONFIG.getMaxNumOfInnerRpcClient();
-
-  /**
    * Max request number in queue
    */
-  private static final int MAX_QUEUE_CLIENT_NUM = CLUSTER_CONFIG.getMaxNumOfInnerRpcClient();
-
-  /**
-   * RaftNodeAsClient list
-   */
-  private final LinkedList<RaftNodeAsClient> clientList = new LinkedList<>();
+  private static final int MAX_QUEUE_TASK_NUM = CLUSTER_CONFIG.getMaxQueueNumOfQPTask();
 
   /**
-   * Number of clients in use
+   * Node as client thread pool manager
    */
-  private AtomicInteger clientNumInUse = new AtomicInteger(0);
+  private static final NodeAsClientThreadManager THREAD_POOL_MANAGER = NodeAsClientThreadManager
+      .getInstance();
 
   /**
-   * Number of requests for clients in queue
+   * QPTask queue list
    */
-  private int queueClientNum = 0;
+  private final LinkedList<SingleQPTask> taskQueue = new LinkedList<>();
 
   /**
    * Lock to update clientNumInUse
@@ -95,101 +81,100 @@ public class RaftNodeAsClientManager {
    */
   private volatile boolean isShuttingDown;
 
+  /**
+   * Mark whether manager init or not
+   */
+  private volatile boolean isInit;
+
   private RaftNodeAsClientManager() {
 
   }
 
   public void init() {
     isShuttingDown = false;
+    isInit = true;
+    taskQueue.clear();
+    for (int i = 0; i < CLUSTER_CONFIG.getConcurrentInnerRpcClientThread(); i++) {
+      THREAD_POOL_MANAGER.execute(() -> {
+        RaftNodeAsClient client = new RaftNodeAsClient();
+        while (true) {
+          consumeQPTask(client);
+          if (Thread.currentThread().isInterrupted()) {
+            break;
+          }
+        }
+        client.shutdown();
+      });
+    }
   }
 
   /**
-   * Try to get clientList, return null if num of queue clientList exceeds threshold.
+   * Produce qp task to be executed.
    */
-  public RaftNodeAsClient getRaftNodeAsClient() throws RaftConnectionException {
+  public void produceQPTask(SingleQPTask qpTask) throws RaftConnectionException {
+    checkInit();
     resourceLock.lock();
     try {
-      if (queueClientNum >= MAX_QUEUE_CLIENT_NUM) {
+      checkInit();
+      checkShuttingDown();
+      if (taskQueue.size() >= MAX_QUEUE_TASK_NUM) {
         throw new RaftConnectionException(String
             .format("Raft inner rpc clients have reached the max numbers %s",
-                CLUSTER_CONFIG.getMaxNumOfInnerRpcClient() + CLUSTER_CONFIG
-                    .getMaxQueueNumOfInnerRpcClient()));
-      }
-      queueClientNum++;
-      try {
-        while (true) {
-          checkShuttingDown();
-          if (clientNumInUse.get() < MAX_VALID_CLIENT_NUM) {
-            clientNumInUse.incrementAndGet();
-            return getClient();
-          }
-          resourceCondition.await();
-        }
-      } catch (InterruptedException e) {
-        throw new RaftConnectionException("An error occurred when trying to get NodeAsClient", e);
-      } finally {
-        queueClientNum--;
+                CLUSTER_CONFIG.getConcurrentInnerRpcClientThread() + CLUSTER_CONFIG
+                    .getMaxQueueNumOfQPTask()));
       }
+      taskQueue.addLast(qpTask);
+      resourceCondition.signal();
     } finally {
       resourceLock.unlock();
     }
   }
 
-  private void checkShuttingDown() throws RaftConnectionException {
-    if (isShuttingDown) {
-      throw new RaftConnectionException(
-          "Reject to provide RaftNodeAsClient client because cluster system is shutting down");
-    }
-  }
-
-  /**
-   * No-safe method, get client
-   */
-  private RaftNodeAsClient getClient() {
-    if (clientList.isEmpty()) {
-      return new RaftNodeAsClient();
-    } else {
-      return clientList.removeFirst();
+  public void checkInit(){
+    if(!isInit){
+      init();
     }
   }
 
   /**
-   * Release usage of a client
+   * Consume qp task
    */
-  public void releaseClient(RaftNodeAsClient client) {
+  private void consumeQPTask(RaftNodeAsClient client) {
     resourceLock.lock();
     try {
-      clientNumInUse.decrementAndGet();
-      resourceCondition.signalAll();
-      clientList.addLast(client);
+      while (taskQueue.isEmpty()) {
+        if (Thread.currentThread().isInterrupted()) {
+          return;
+        }
+        resourceCondition.await();
+      }
+      client.asyncHandleRequest(taskQueue.removeFirst());
+    } catch (InterruptedException e) {
+      Thread.currentThread().interrupt();
+      LOGGER.debug("Occur interruption when await for ResourceContidion", e);
     } finally {
       resourceLock.unlock();
     }
   }
 
-  public void shutdown() throws InterruptedException {
-    isShuttingDown = true;
-    while (clientNumInUse.get() != 0 && queueClientNum != 0) {
-      // wait until releasing all usage of clients.
-      resourceCondition.await();
-    }
-    while (!clientList.isEmpty()) {
-      clientList.removeFirst().shutdown();
+
+  private void checkShuttingDown() throws RaftConnectionException {
+    if (isShuttingDown) {
+      throw new RaftConnectionException(
+          "Reject to execute QPTask because cluster system is shutting down");
     }
   }
 
-  /**
-   * Get client number in use
-   */
-  public int getClientNumInUse() {
-    return clientNumInUse.get();
+  public void shutdown() throws ProcessorException {
+    isShuttingDown = true;
+    THREAD_POOL_MANAGER.close(true, ClusterConstant.CLOSE_THREAD_POOL_BLOCK_TIMEOUT);
   }
 
   /**
-   * Get client number in queue
+   * Get qp task number in queue
    */
-  public int getClientNumInQueue() {
-    return queueClientNum;
+  public int getQPTaskNumInQueue() {
+    return taskQueue.size();
   }
 
   public static final RaftNodeAsClientManager getInstance() {
@@ -227,59 +212,21 @@ public class RaftNodeAsClientManager {
     }
 
     @Override
-    public void asyncHandleRequest(BasicRequest request, PeerId leader,
-        SingleQPTask qpTask)
-        throws RaftConnectionException {
-      LOGGER.debug("Node as client to send request to leader: {}", leader);
-      try {
-        boltClientService.getRpcClient()
-            .invokeWithCallback(leader.getEndpoint().toString(), request,
-                new InvokeCallback() {
-
-                  @Override
-                  public void onResponse(Object result) {
-                    BasicResponse response = (BasicResponse) result;
-                    releaseClient(RaftNodeAsClient.this);
-                    qpTask.run(response);
-                  }
-
-                  @Override
-                  public void onException(Throwable e) {
-                    LOGGER.error("Bolt rpc client occurs errors when handling Request", e);
-                    qpTask.setTaskState(TaskState.EXCEPTION);
-                    releaseClient(RaftNodeAsClient.this);
-                    qpTask.run(null);
-                  }
-
-                  @Override
-                  public Executor getExecutor() {
-                    return null;
-                  }
-                }, TASK_TIMEOUT_MS);
-      } catch (RemotingException | InterruptedException e) {
-        LOGGER.error(e.getMessage());
-        qpTask.setTaskState(TaskState.EXCEPTION);
-        releaseClient(RaftNodeAsClient.this);
-        qpTask.run(null);
-        throw new RaftConnectionException(e);
-      }
-    }
-
-    @Override
-    public QueryTask syncHandleRequest(BasicRequest request, PeerId peerId) {
+    public void asyncHandleRequest(SingleQPTask qpTask) {
+      LOGGER.debug("Node as client to send request to leader: {}", qpTask.getTargetNode());
       try {
         BasicResponse response = (BasicResponse) boltClientService.getRpcClient()
-            .invokeSync(peerId.getEndpoint().toString(), request, TASK_TIMEOUT_MS);
-        return new QueryTask(response, TaskState.FINISH);
+            .invokeSync(qpTask.getTargetNode().getEndpoint().toString(),
+                qpTask.getRequest(), TASK_TIMEOUT_MS);
+        qpTask.receive(response);
       } catch (RemotingException | InterruptedException e) {
-        return new QueryTask(null, TaskState.EXCEPTION);
-      } finally {
-        releaseClient(RaftNodeAsClient.this);
+        LOGGER.error(e.getMessage());
+        qpTask.receive(null);
       }
     }
 
     /**
-     * Shut down clientList
+     * Shut down taskQueue
      */
     @Override
     public void shutdown() {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/QueryMetricAsyncProcessor.java
similarity index 52%
copy from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/QueryMetricAsyncProcessor.java
index 90dc24a..a76d2a6 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/QueryMetricAsyncProcessor.java
@@ -16,25 +16,29 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.processor.querydata;
+package org.apache.iotdb.cluster.rpc.raft.processor;
 
+import com.alipay.remoting.AsyncContext;
 import com.alipay.remoting.BizContext;
-import org.apache.iotdb.cluster.query.manager.querynode.ClusterLocalQueryManager;
-import org.apache.iotdb.cluster.rpc.raft.processor.BasicSyncUserProcessor;
-import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataRequest;
-import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
+import org.apache.iotdb.cluster.rpc.raft.request.QueryMetricRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.QueryMetricResponse;
+import org.apache.iotdb.cluster.utils.RaftUtils;
 
-public class QuerySeriesDataSyncProcessor extends
-    BasicSyncUserProcessor<QuerySeriesDataRequest> {
+public class QueryMetricAsyncProcessor extends BasicAsyncUserProcessor<QueryMetricRequest> {
 
   @Override
-  public Object handleRequest(BizContext bizContext, QuerySeriesDataRequest request)
-      throws Exception {
-    return ClusterLocalQueryManager.getInstance().readBatchData(request);
+  public void handleRequest(BizContext bizContext, AsyncContext asyncContext,
+      QueryMetricRequest request) {
+    String groupId = request.getGroupID();
+
+    QueryMetricResponse response = QueryMetricResponse.createSuccessResponse(groupId,
+        RaftUtils.getReplicaMetric(request.getGroupID(), request.getMetric()));
+    response.addResult(true);
+    asyncContext.sendResponse(response);
   }
 
   @Override
   public String interest() {
-    return QuerySeriesDataRequest.class.getName();
+    return QueryMetricRequest.class.getName();
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/DataGroupNonQueryAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/DataGroupNonQueryAsyncProcessor.java
index de2d2ab..291da32 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/DataGroupNonQueryAsyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/DataGroupNonQueryAsyncProcessor.java
@@ -40,19 +40,16 @@ public class DataGroupNonQueryAsyncProcessor extends
   private static final Logger LOGGER = LoggerFactory
       .getLogger(DataGroupNonQueryAsyncProcessor.class);
 
-  public DataGroupNonQueryAsyncProcessor() {
-  }
-
   @Override
   public void handleRequest(BizContext bizContext, AsyncContext asyncContext,
       DataGroupNonQueryRequest request) {
     LOGGER.debug("Handle data non query request");
 
-    /** Check if it's the leader **/
+    /* Check if it's the leader */
     String groupId = request.getGroupID();
     DataPartitionRaftHolder dataPartitionRaftHolder = RaftUtils.getDataPartitonRaftHolder(groupId);
     if (!dataPartitionRaftHolder.getFsm().isLeader()) {
-      PeerId leader = RaftUtils.getLeaderPeerID(groupId);
+      PeerId leader = RaftUtils.getLocalLeaderPeerID(groupId);
       LOGGER.debug("Request need to redirect leader: {}, groupId : {} ", leader, groupId);
 
       DataGroupNonQueryResponse response = DataGroupNonQueryResponse
@@ -61,7 +58,8 @@ public class DataGroupNonQueryAsyncProcessor extends
     } else {
       LOGGER.debug("Apply task to raft node");
 
-      /** Apply Task to Raft Node **/
+
+      /* Apply Task to Raft Node */
       BasicResponse response = DataGroupNonQueryResponse.createEmptyResponse(groupId);
       RaftService service = (RaftService) dataPartitionRaftHolder.getService();
       RaftUtils.executeRaftTaskForRpcProcessor(service, asyncContext, request, response);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/MetaGroupNonQueryAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/MetaGroupNonQueryAsyncProcessor.java
index 9f09bbb..95f9e32 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/MetaGroupNonQueryAsyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/MetaGroupNonQueryAsyncProcessor.java
@@ -49,7 +49,7 @@ public class MetaGroupNonQueryAsyncProcessor extends
     String groupId = request.getGroupID();
     MetadataRaftHolder metadataHolder = RaftUtils.getMetadataRaftHolder();
     if (!metadataHolder.getFsm().isLeader()) {
-      PeerId leader = RaftUtils.getLeaderPeerID(groupId);
+      PeerId leader = RaftUtils.getLocalLeaderPeerID(groupId);
       LOGGER.debug("Request need to redirect leader: {}, groupId : {} ", leader, groupId);
 
       MetaGroupNonQueryResponse response = MetaGroupNonQueryResponse
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/InitSeriesReaderSyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/InitSeriesReaderSyncProcessor.java
index 894d9eb..8a388d3 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/InitSeriesReaderSyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/InitSeriesReaderSyncProcessor.java
@@ -20,17 +20,21 @@ package org.apache.iotdb.cluster.rpc.raft.processor.querydata;
 
 import com.alipay.remoting.BizContext;
 import com.alipay.sofa.jraft.Status;
+import org.apache.iotdb.cluster.config.ClusterConsistencyLevel;
 import org.apache.iotdb.cluster.config.ClusterConstant;
 import org.apache.iotdb.cluster.query.manager.querynode.ClusterLocalQueryManager;
 import org.apache.iotdb.cluster.rpc.raft.processor.BasicSyncUserProcessor;
 import org.apache.iotdb.cluster.rpc.raft.request.querydata.InitSeriesReaderRequest;
-import org.apache.iotdb.cluster.rpc.raft.response.querydata.InitSeriesReaderResponse;
 import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.cluster.utils.RaftUtils;
 import org.apache.iotdb.db.exception.ProcessorException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class InitSeriesReaderSyncProcessor extends BasicSyncUserProcessor<InitSeriesReaderRequest> {
 
+  private static final Logger LOGGER = LoggerFactory.getLogger(InitSeriesReaderSyncProcessor.class);
+
   @Override
   public Object handleRequest(BizContext bizContext, InitSeriesReaderRequest request)
       throws Exception {
@@ -47,7 +51,8 @@ public class InitSeriesReaderSyncProcessor extends BasicSyncUserProcessor<InitSe
    * @param groupId group id
    */
   private void handleNullRead(int readConsistencyLevel, String groupId) throws ProcessorException {
-    if (readConsistencyLevel == ClusterConstant.STRONG_CONSISTENCY_LEVEL && !QPExecutorUtils
+    LOGGER.debug("Read data level is {}", readConsistencyLevel);
+    if (readConsistencyLevel == ClusterConsistencyLevel.STRONG.ordinal() && !QPExecutorUtils
         .checkDataGroupLeader(groupId)) {
       Status nullReadTaskStatus = Status.OK();
       RaftUtils.handleNullReadToDataGroup(nullReadTaskStatus, groupId);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java
index 90dc24a..4c8e599 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java
@@ -22,7 +22,6 @@ import com.alipay.remoting.BizContext;
 import org.apache.iotdb.cluster.query.manager.querynode.ClusterLocalQueryManager;
 import org.apache.iotdb.cluster.rpc.raft.processor.BasicSyncUserProcessor;
 import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataRequest;
-import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
 
 public class QuerySeriesDataSyncProcessor extends
     BasicSyncUserProcessor<QuerySeriesDataRequest> {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryMetadataAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryMetadataAsyncProcessor.java
index 36e657c..a8fa1fa 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryMetadataAsyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryMetadataAsyncProcessor.java
@@ -22,7 +22,7 @@ import com.alipay.remoting.AsyncContext;
 import com.alipay.remoting.BizContext;
 import com.alipay.sofa.jraft.Status;
 import com.alipay.sofa.jraft.closure.ReadIndexClosure;
-import org.apache.iotdb.cluster.config.ClusterConstant;
+import org.apache.iotdb.cluster.config.ClusterConsistencyLevel;
 import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.RaftService;
 import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
@@ -42,8 +42,8 @@ public class QueryMetadataAsyncProcessor extends
       QueryMetadataRequest request) {
     String groupId = request.getGroupID();
 
-    if (request.getReadConsistencyLevel() == ClusterConstant.WEAK_CONSISTENCY_LEVEL) {
-      QueryMetadataResponse response = null;
+    if (request.getReadConsistencyLevel() == ClusterConsistencyLevel.WEAK.ordinal()) {
+      QueryMetadataResponse response;
       try {
         response = QueryMetadataResponse
             .createSuccessResponse(groupId, mManager.getMetadata());
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryMetadataInStringAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryMetadataInStringAsyncProcessor.java
index 8771eea..7a46a14 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryMetadataInStringAsyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryMetadataInStringAsyncProcessor.java
@@ -22,7 +22,7 @@ import com.alipay.remoting.AsyncContext;
 import com.alipay.remoting.BizContext;
 import com.alipay.sofa.jraft.Status;
 import com.alipay.sofa.jraft.closure.ReadIndexClosure;
-import org.apache.iotdb.cluster.config.ClusterConstant;
+import org.apache.iotdb.cluster.config.ClusterConsistencyLevel;
 import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.RaftService;
 import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
@@ -41,7 +41,7 @@ public class QueryMetadataInStringAsyncProcessor extends
       QueryMetadataInStringRequest request) {
     String groupId = request.getGroupID();
 
-    if (request.getReadConsistencyLevel() == ClusterConstant.WEAK_CONSISTENCY_LEVEL) {
+    if (request.getReadConsistencyLevel() == ClusterConsistencyLevel.WEAK.ordinal()) {
       QueryMetadataInStringResponse response = QueryMetadataInStringResponse
           .createSuccessResponse(groupId, mManager.getMetadataInString());
       response.addResult(true);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryPathsAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryPathsAsyncProcessor.java
index 8e1e47b..3736105 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryPathsAsyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryPathsAsyncProcessor.java
@@ -22,7 +22,7 @@ import com.alipay.remoting.AsyncContext;
 import com.alipay.remoting.BizContext;
 import com.alipay.sofa.jraft.Status;
 import com.alipay.sofa.jraft.closure.ReadIndexClosure;
-import org.apache.iotdb.cluster.config.ClusterConstant;
+import org.apache.iotdb.cluster.config.ClusterConsistencyLevel;
 import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.RaftService;
 import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
@@ -41,7 +41,7 @@ public class QueryPathsAsyncProcessor extends BasicAsyncUserProcessor<QueryPaths
       QueryPathsRequest request) {
     String groupId = request.getGroupID();
 
-    if (request.getReadConsistencyLevel() == ClusterConstant.WEAK_CONSISTENCY_LEVEL) {
+    if (request.getReadConsistencyLevel() == ClusterConsistencyLevel.WEAK.ordinal()) {
       QueryPathsResponse response = QueryPathsResponse
           .createEmptyResponse(groupId);
       try {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QuerySeriesTypeAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QuerySeriesTypeAsyncProcessor.java
index 9e4b1c7..c8df5a2 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QuerySeriesTypeAsyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QuerySeriesTypeAsyncProcessor.java
@@ -22,7 +22,7 @@ import com.alipay.remoting.AsyncContext;
 import com.alipay.remoting.BizContext;
 import com.alipay.sofa.jraft.Status;
 import com.alipay.sofa.jraft.closure.ReadIndexClosure;
-import org.apache.iotdb.cluster.config.ClusterConstant;
+import org.apache.iotdb.cluster.config.ClusterConsistencyLevel;
 import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.RaftService;
 import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
@@ -41,7 +41,7 @@ public class QuerySeriesTypeAsyncProcessor extends BasicAsyncUserProcessor<Query
       QuerySeriesTypeRequest request) {
     String groupId = request.getGroupID();
 
-    if (request.getReadConsistencyLevel() == ClusterConstant.WEAK_CONSISTENCY_LEVEL) {
+    if (request.getReadConsistencyLevel() == ClusterConsistencyLevel.WEAK.ordinal()) {
       QuerySeriesTypeResponse response;
       try {
         response = QuerySeriesTypeResponse.createSuccessResponse(groupId, mManager.getSeriesType(request.getPath()));
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryTimeSeriesAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryTimeSeriesAsyncProcessor.java
index 593f99d..d08cd1a 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryTimeSeriesAsyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryTimeSeriesAsyncProcessor.java
@@ -22,7 +22,7 @@ import com.alipay.remoting.AsyncContext;
 import com.alipay.remoting.BizContext;
 import com.alipay.sofa.jraft.Status;
 import com.alipay.sofa.jraft.closure.ReadIndexClosure;
-import org.apache.iotdb.cluster.config.ClusterConstant;
+import org.apache.iotdb.cluster.config.ClusterConsistencyLevel;
 import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.RaftService;
 import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
@@ -42,7 +42,7 @@ public class QueryTimeSeriesAsyncProcessor extends BasicAsyncUserProcessor<Query
       QueryTimeSeriesRequest request) {
     String groupId = request.getGroupID();
 
-    if (request.getReadConsistencyLevel() == ClusterConstant.WEAK_CONSISTENCY_LEVEL) {
+    if (request.getReadConsistencyLevel() == ClusterConsistencyLevel.WEAK.ordinal()) {
       QueryTimeSeriesResponse response = QueryTimeSeriesResponse
           .createEmptyResponse(groupId);
       try {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetric/QueryJobNumAsyncProcessor.java
similarity index 50%
copy from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetric/QueryJobNumAsyncProcessor.java
index 90dc24a..5074d45 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetric/QueryJobNumAsyncProcessor.java
@@ -16,25 +16,30 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.processor.querydata;
+package org.apache.iotdb.cluster.rpc.raft.processor.querymetric;
 
+import com.alipay.remoting.AsyncContext;
 import com.alipay.remoting.BizContext;
-import org.apache.iotdb.cluster.query.manager.querynode.ClusterLocalQueryManager;
-import org.apache.iotdb.cluster.rpc.raft.processor.BasicSyncUserProcessor;
-import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataRequest;
-import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
+import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetric.QueryJobNumRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetric.QueryJobNumResponse;
+import org.apache.iotdb.cluster.utils.RaftUtils;
 
-public class QuerySeriesDataSyncProcessor extends
-    BasicSyncUserProcessor<QuerySeriesDataRequest> {
+public class QueryJobNumAsyncProcessor extends BasicAsyncUserProcessor<QueryJobNumRequest> {
 
   @Override
-  public Object handleRequest(BizContext bizContext, QuerySeriesDataRequest request)
-      throws Exception {
-    return ClusterLocalQueryManager.getInstance().readBatchData(request);
+  public void handleRequest(BizContext bizContext, AsyncContext asyncContext,
+      QueryJobNumRequest request) {
+    String groupId = request.getGroupID();
+
+    QueryJobNumResponse response = QueryJobNumResponse.createSuccessResponse(groupId,
+        RaftUtils.getLocalQueryJobNumMap());
+    response.addResult(true);
+    asyncContext.sendResponse(response);
   }
 
   @Override
   public String interest() {
-    return QuerySeriesDataRequest.class.getName();
+    return QueryJobNumRequest.class.getName();
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetric/QueryLeaderAsyncProcessor.java
similarity index 50%
copy from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetric/QueryLeaderAsyncProcessor.java
index 90dc24a..9c5a2bf 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetric/QueryLeaderAsyncProcessor.java
@@ -16,25 +16,30 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.processor.querydata;
+package org.apache.iotdb.cluster.rpc.raft.processor.querymetric;
 
+import com.alipay.remoting.AsyncContext;
 import com.alipay.remoting.BizContext;
-import org.apache.iotdb.cluster.query.manager.querynode.ClusterLocalQueryManager;
-import org.apache.iotdb.cluster.rpc.raft.processor.BasicSyncUserProcessor;
-import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataRequest;
-import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
+import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetric.QueryLeaderRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetric.QueryLeaderResponse;
+import org.apache.iotdb.cluster.utils.RaftUtils;
 
-public class QuerySeriesDataSyncProcessor extends
-    BasicSyncUserProcessor<QuerySeriesDataRequest> {
+public class QueryLeaderAsyncProcessor extends BasicAsyncUserProcessor<QueryLeaderRequest> {
 
   @Override
-  public Object handleRequest(BizContext bizContext, QuerySeriesDataRequest request)
-      throws Exception {
-    return ClusterLocalQueryManager.getInstance().readBatchData(request);
+  public void handleRequest(BizContext bizContext, AsyncContext asyncContext,
+      QueryLeaderRequest request) {
+    String groupId = request.getGroupID();
+
+    QueryLeaderResponse response = QueryLeaderResponse.createSuccessResponse(groupId,
+        RaftUtils.getLocalLeaderPeerID(groupId));
+    response.addResult(true);
+    asyncContext.sendResponse(response);
   }
 
   @Override
   public String interest() {
-    return QuerySeriesDataRequest.class.getName();
+    return QueryLeaderRequest.class.getName();
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetric/QueryMetricAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetric/QueryMetricAsyncProcessor.java
new file mode 100644
index 0000000..c2dfef1
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetric/QueryMetricAsyncProcessor.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.processor.querymetric;
+
+import com.alipay.remoting.AsyncContext;
+import com.alipay.remoting.BizContext;
+import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetric.QueryMetricRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetric.QueryMetricResponse;
+import org.apache.iotdb.cluster.utils.RaftUtils;
+
+public class QueryMetricAsyncProcessor extends BasicAsyncUserProcessor<QueryMetricRequest> {
+
+  @Override
+  public void handleRequest(BizContext bizContext, AsyncContext asyncContext,
+      QueryMetricRequest request) {
+    String groupId = request.getGroupID();
+
+    QueryMetricResponse response = QueryMetricResponse.createSuccessResponse(groupId,
+        RaftUtils.getReplicaMetric(request.getGroupID(), request.getMetric()));
+    response.addResult(true);
+    asyncContext.sendResponse(response);
+  }
+
+  @Override
+  public String interest() {
+    return QueryMetricRequest.class.getName();
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetric/QueryStatusAsyncProcessor.java
similarity index 52%
copy from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetric/QueryStatusAsyncProcessor.java
index 90dc24a..615eaf6 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetric/QueryStatusAsyncProcessor.java
@@ -16,25 +16,29 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.processor.querydata;
+package org.apache.iotdb.cluster.rpc.raft.processor.querymetric;
 
+import com.alipay.remoting.AsyncContext;
 import com.alipay.remoting.BizContext;
-import org.apache.iotdb.cluster.query.manager.querynode.ClusterLocalQueryManager;
-import org.apache.iotdb.cluster.rpc.raft.processor.BasicSyncUserProcessor;
-import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataRequest;
-import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
+import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetric.QueryStatusRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetric.QueryStatusResponse;
 
-public class QuerySeriesDataSyncProcessor extends
-    BasicSyncUserProcessor<QuerySeriesDataRequest> {
+public class QueryStatusAsyncProcessor extends BasicAsyncUserProcessor<QueryStatusRequest> {
 
   @Override
-  public Object handleRequest(BizContext bizContext, QuerySeriesDataRequest request)
-      throws Exception {
-    return ClusterLocalQueryManager.getInstance().readBatchData(request);
+  public void handleRequest(BizContext bizContext, AsyncContext asyncContext,
+      QueryStatusRequest request) {
+    String groupId = request.getGroupID();
+
+    QueryStatusResponse response = QueryStatusResponse.createSuccessResponse(groupId,
+        true);
+    response.addResult(true);
+    asyncContext.sendResponse(response);
   }
 
   @Override
   public String interest() {
-    return QuerySeriesDataRequest.class.getName();
+    return QueryStatusRequest.class.getName();
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicNonQueryRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicNonQueryRequest.java
index dc15158..33a4d8e 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicNonQueryRequest.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicNonQueryRequest.java
@@ -19,7 +19,6 @@
 package org.apache.iotdb.cluster.rpc.raft.request;
 
 import java.io.IOException;
-import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.List;
 import org.apache.iotdb.db.qp.physical.PhysicalPlan;
diff --git a/spark/src/main/java/org/apache/iotdb/tsfile/qp/exception/BasicOperatorException.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/QueryMetricRequest.java
old mode 100755
new mode 100644
similarity index 67%
rename from spark/src/main/java/org/apache/iotdb/tsfile/qp/exception/BasicOperatorException.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/QueryMetricRequest.java
index be36f35..eb81769
--- a/spark/src/main/java/org/apache/iotdb/tsfile/qp/exception/BasicOperatorException.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/QueryMetricRequest.java
@@ -16,19 +16,20 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.tsfile.qp.exception;
+package org.apache.iotdb.cluster.rpc.raft.request;
 
+import java.io.Serializable;
 
-/**
- * This exception is threw whiling meeting error in BasicOperator
- *
- */
-public class BasicOperatorException extends QueryProcessorException {
+public class QueryMetricRequest extends BasicQueryRequest implements Serializable {
 
-  private static final long serialVersionUID = -2163809754074237707L;
+  private String metric;
 
-  public BasicOperatorException(String msg) {
-    super(msg);
+  public QueryMetricRequest(String groupID, int readConsistencyLevel, String metric) {
+    super(groupID, readConsistencyLevel);
+    this.metric = metric;
   }
 
-}
+  public String getMetric() {
+    return metric;
+  }
+}
\ No newline at end of file
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/InitSeriesReaderRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/InitSeriesReaderRequest.java
index c974e2f..e28ac15 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/InitSeriesReaderRequest.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/InitSeriesReaderRequest.java
@@ -18,10 +18,16 @@
  */
 package org.apache.iotdb.cluster.rpc.raft.request.querydata;
 
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
 import java.util.ArrayList;
 import java.util.EnumMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 import org.apache.iotdb.cluster.query.PathType;
 import org.apache.iotdb.cluster.rpc.raft.request.BasicQueryRequest;
 import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
@@ -42,12 +48,12 @@ public class InitSeriesReaderRequest extends BasicQueryRequest {
   /**
    * Key is series type, value is query plan
    */
-  private Map<PathType, QueryPlan> allQueryPlan = new EnumMap<>(PathType.class);
+  private Map<PathType, byte[]> allQueryPlan = new EnumMap<>(PathType.class);
 
   /**
    * Represent all filter of leaf node in filter tree while executing a query with value filter.
    */
-  private List<Filter> filterList = new ArrayList<>();
+  private List<byte[]> filterList = new ArrayList<>();
 
 
   private InitSeriesReaderRequest(String groupID, String taskId) {
@@ -55,12 +61,17 @@ public class InitSeriesReaderRequest extends BasicQueryRequest {
     this.taskId = taskId;
   }
 
-  public static InitSeriesReaderRequest createInitialQueryRequest(String groupId, String taskId, int readConsistencyLevel,
-      Map<PathType, QueryPlan> allQueryPlan, List<Filter> filterList){
+  public static InitSeriesReaderRequest createInitialQueryRequest(String groupId, String taskId,
+      int readConsistencyLevel,
+      Map<PathType, QueryPlan> allQueryPlan, List<Filter> filterList) throws IOException {
     InitSeriesReaderRequest request = new InitSeriesReaderRequest(groupId, taskId);
     request.setReadConsistencyLevel(readConsistencyLevel);
-    request.allQueryPlan = allQueryPlan;
-    request.filterList = filterList;
+    for (Entry<PathType, QueryPlan> entry : allQueryPlan.entrySet()) {
+      request.allQueryPlan.put(entry.getKey(), toByteArray(entry.getValue()));
+    }
+    for (Filter filter : filterList) {
+      request.filterList.add(toByteArray(filter));
+    }
     return request;
   }
 
@@ -72,20 +83,51 @@ public class InitSeriesReaderRequest extends BasicQueryRequest {
     this.taskId = taskId;
   }
 
-  public Map<PathType, QueryPlan> getAllQueryPlan() {
-    return allQueryPlan;
+  public Map<PathType, QueryPlan> getAllQueryPlan() throws IOException, ClassNotFoundException {
+    Map<PathType, QueryPlan> queryPlanMap = new EnumMap<>(PathType.class);
+    for (Entry<PathType, byte[]> entry : allQueryPlan.entrySet()) {
+      queryPlanMap.put(entry.getKey(), (QueryPlan) toObject(entry.getValue()));
+    }
+    return queryPlanMap;
   }
 
-  public void setAllQueryPlan(
-      Map<PathType, QueryPlan> allQueryPlan) {
-    this.allQueryPlan = allQueryPlan;
+  public List<Filter> getFilterList() throws IOException, ClassNotFoundException {
+    List<Filter> filters = new ArrayList<>();
+    for (byte[] filterBytes : filterList) {
+      filters.add((Filter) toObject(filterBytes));
+    }
+    return filters;
   }
 
-  public List<Filter> getFilterList() {
-    return filterList;
+  /**
+   * Convert an object to byte array
+   *
+   * @param obj Object, which need to implement Serializable
+   * @return byte array of object
+   */
+  private static byte[] toByteArray(Object obj) throws IOException {
+    ByteArrayOutputStream bos = new ByteArrayOutputStream();
+    ObjectOutputStream oos = new ObjectOutputStream(bos);
+    oos.writeObject(obj);
+    oos.flush();
+    byte[] bytes = bos.toByteArray();
+    oos.close();
+    bos.close();
+    return bytes;
   }
 
-  public void setFilterList(List<Filter> filterList) {
-    this.filterList = filterList;
+  /**
+   * Convert byte array back to Object
+   *
+   * @param bytes byte array of object
+   * @return object
+   */
+  private static Object toObject(byte[] bytes) throws IOException, ClassNotFoundException {
+    ByteArrayInputStream bis = new ByteArrayInputStream(bytes);
+    ObjectInputStream ois = new ObjectInputStream(bis);
+    Object obj = ois.readObject();
+    ois.close();
+    bis.close();
+    return obj;
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataByTimestampRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataByTimestampRequest.java
index 351e6eb..cbcef15 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataByTimestampRequest.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataByTimestampRequest.java
@@ -39,21 +39,16 @@ public class QuerySeriesDataByTimestampRequest extends BasicQueryRequest {
    */
   private List<Long> batchTimestamp;
 
-  /**
-   * Series to fetch data from remote query node
-   */
-  private List<String> fetchDataSeries;
-
   private QuerySeriesDataByTimestampRequest(String groupID) {
     super(groupID);
   }
 
-  public static QuerySeriesDataByTimestampRequest createRequest(String groupId, long queryRounds, String taskId, List<Long> batchTimestamp, List<String> fetchDataSeries){
+  public static QuerySeriesDataByTimestampRequest createRequest(String groupId, long queryRounds,
+      String taskId, List<Long> batchTimestamp) {
     QuerySeriesDataByTimestampRequest request = new QuerySeriesDataByTimestampRequest(groupId);
     request.queryRounds = queryRounds;
     request.taskId = taskId;
     request.batchTimestamp = batchTimestamp;
-    request.fetchDataSeries = fetchDataSeries;
     return request;
   }
 
@@ -80,12 +75,4 @@ public class QuerySeriesDataByTimestampRequest extends BasicQueryRequest {
   public void setBatchTimestamp(List<Long> batchTimestamp) {
     this.batchTimestamp = batchTimestamp;
   }
-
-  public List<String> getFetchDataSeries() {
-    return fetchDataSeries;
-  }
-
-  public void setFetchDataSeries(List<String> fetchDataSeries) {
-    this.fetchDataSeries = fetchDataSeries;
-  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataRequest.java
index 554b8c1..e0fc23c 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataRequest.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataRequest.java
@@ -46,9 +46,9 @@ public class QuerySeriesDataRequest extends BasicQueryRequest {
   private PathType pathType;
 
   /**
-   * Key is series type, value is series list
+   * list of series path index.
    */
-  private List<String> seriesPaths = new ArrayList<>();
+  private List<Integer> seriesPathIndexs = new ArrayList<>();
 
   private QuerySeriesDataRequest(String groupID, String taskId) {
     super(groupID);
@@ -56,10 +56,10 @@ public class QuerySeriesDataRequest extends BasicQueryRequest {
   }
 
   public static QuerySeriesDataRequest createFetchDataRequest(String groupId, String taskId,
-      PathType pathType, List<String> seriesPaths, long queryRounds) {
+      PathType pathType, List<Integer> seriesPathIndexs, long queryRounds) {
     QuerySeriesDataRequest request = new QuerySeriesDataRequest(groupId, taskId);
     request.pathType = pathType;
-    request.seriesPaths = seriesPaths;
+    request.seriesPathIndexs = seriesPathIndexs;
     request.queryRounds = queryRounds;
     return request;
   }
@@ -88,11 +88,7 @@ public class QuerySeriesDataRequest extends BasicQueryRequest {
     this.pathType = pathType;
   }
 
-  public List<String> getSeriesPaths() {
-    return seriesPaths;
-  }
-
-  public void setSeriesPaths(List<String> seriesPaths) {
-    this.seriesPaths = seriesPaths;
+  public List<Integer> getSeriesPathIndexs() {
+    return seriesPathIndexs;
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetric/QueryJobNumRequest.java
similarity index 68%
copy from cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetric/QueryJobNumRequest.java
index 9212258..61069e8 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetric/QueryJobNumRequest.java
@@ -16,27 +16,16 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.concurrent;
+package org.apache.iotdb.cluster.rpc.raft.request.querymetric;
 
-public enum ThreadName {
+import java.io.Serializable;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
 
-  /**
-   * QP Task thread
-   */
-  QP_TASK("QP-Task-Thread"),
+public class QueryJobNumRequest extends BasicRequest implements Serializable {
 
-  /**
-   * Remote query timer
-   */
-  REMOTE_QUERY_TIMER("Remote-Query-Timer");
+  private static final long serialVersionUID = 8438291845259380829L;
 
-  private String name;
-
-  ThreadName(String name) {
-    this.name = name;
-  }
-
-  public String getName() {
-    return name;
+  public QueryJobNumRequest(String groupID) {
+    super(groupID);
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetric/QueryLeaderRequest.java
similarity index 68%
copy from cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetric/QueryLeaderRequest.java
index 9212258..a3a2c06 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetric/QueryLeaderRequest.java
@@ -16,27 +16,16 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.concurrent;
+package org.apache.iotdb.cluster.rpc.raft.request.querymetric;
 
-public enum ThreadName {
+import java.io.Serializable;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
 
-  /**
-   * QP Task thread
-   */
-  QP_TASK("QP-Task-Thread"),
+public class QueryLeaderRequest extends BasicRequest implements Serializable {
 
-  /**
-   * Remote query timer
-   */
-  REMOTE_QUERY_TIMER("Remote-Query-Timer");
+  private static final long serialVersionUID = 8438291563829380829L;
 
-  private String name;
-
-  ThreadName(String name) {
-    this.name = name;
-  }
-
-  public String getName() {
-    return name;
+  public QueryLeaderRequest(String groupID) {
+    super(groupID);
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetric/QueryMetricRequest.java
similarity index 62%
copy from cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetric/QueryMetricRequest.java
index 9212258..4d0c0f6 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetric/QueryMetricRequest.java
@@ -16,27 +16,23 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.concurrent;
+package org.apache.iotdb.cluster.rpc.raft.request.querymetric;
 
-public enum ThreadName {
+import java.io.Serializable;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
 
-  /**
-   * QP Task thread
-   */
-  QP_TASK("QP-Task-Thread"),
+public class QueryMetricRequest extends BasicRequest implements Serializable {
 
-  /**
-   * Remote query timer
-   */
-  REMOTE_QUERY_TIMER("Remote-Query-Timer");
+  private static final long serialVersionUID = 8434915883945730829L;
 
-  private String name;
+  private String metric;
 
-  ThreadName(String name) {
-    this.name = name;
+  public QueryMetricRequest(String groupID, String metric) {
+    super(groupID);
+    this.metric = metric;
   }
 
-  public String getName() {
-    return name;
+  public String getMetric() {
+    return metric;
   }
-}
+}
\ No newline at end of file
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetric/QueryStatusRequest.java
similarity index 68%
copy from cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetric/QueryStatusRequest.java
index 9212258..b88b08e 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetric/QueryStatusRequest.java
@@ -16,27 +16,16 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.concurrent;
+package org.apache.iotdb.cluster.rpc.raft.request.querymetric;
 
-public enum ThreadName {
+import java.io.Serializable;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
 
-  /**
-   * QP Task thread
-   */
-  QP_TASK("QP-Task-Thread"),
+public class QueryStatusRequest extends BasicRequest implements Serializable {
 
-  /**
-   * Remote query timer
-   */
-  REMOTE_QUERY_TIMER("Remote-Query-Timer");
+  private static final long serialVersionUID = 8434915883943829829L;
 
-  private String name;
-
-  ThreadName(String name) {
-    this.name = name;
-  }
-
-  public String getName() {
-    return name;
+  public QueryStatusRequest(String groupID) {
+    super(groupID);
   }
-}
+}
\ No newline at end of file
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/exception/BufferWriteProcessorException.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/QueryMetricResponse.java
similarity index 51%
copy from iotdb/src/main/java/org/apache/iotdb/db/exception/BufferWriteProcessorException.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/QueryMetricResponse.java
index bf6a349..9c77792 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/exception/BufferWriteProcessorException.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/QueryMetricResponse.java
@@ -16,26 +16,31 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.db.exception;
+package org.apache.iotdb.cluster.rpc.raft.response;
 
-public class BufferWriteProcessorException extends ProcessorException {
+import java.util.Map;
 
-  private static final long serialVersionUID = 6817880163296469038L;
+public class QueryMetricResponse extends BasicResponse {
 
-  public BufferWriteProcessorException() {
-    super();
-  }
+  private Map<String, Long> value;
 
-  public BufferWriteProcessorException(Exception pathExcp) {
-    super(pathExcp.getMessage());
+  private QueryMetricResponse(String groupId, boolean redirected, String leaderStr,
+      String errorMsg) {
+    super(groupId, redirected, leaderStr, errorMsg);
   }
 
-  public BufferWriteProcessorException(String msg) {
-    super(msg);
+  public static QueryMetricResponse createSuccessResponse(String groupId, Map<String, Long> value) {
+    QueryMetricResponse response = new QueryMetricResponse(groupId, false, null,
+        null);
+    response.value = value;
+    return response;
   }
 
-  public BufferWriteProcessorException(Throwable throwable) {
-    super(throwable.getMessage());
+  public static QueryMetricResponse createErrorResponse(String groupId, String errorMsg) {
+    return new QueryMetricResponse(groupId, false, null, errorMsg);
   }
 
+  public Map<String, Long> getValue() {
+    return value;
+  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java
index 9d86398..e9e858d 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java
@@ -18,6 +18,8 @@
  */
 package org.apache.iotdb.cluster.rpc.raft.response.nonquery;
 
+import java.util.ArrayList;
+import java.util.List;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 
 /**
@@ -27,9 +29,12 @@ public class DataGroupNonQueryResponse extends BasicResponse {
 
   private static final long serialVersionUID = -8288044965888956717L;
 
+  private List<String> errorMsgList;
+
   private DataGroupNonQueryResponse(String groupId, boolean redirected, String leaderStr,
       String errorMsg) {
     super(groupId, redirected, leaderStr, errorMsg);
+    errorMsgList = new ArrayList<>();
   }
 
   public static DataGroupNonQueryResponse createRedirectedResponse(String groupId, String leaderStr) {
@@ -44,4 +49,11 @@ public class DataGroupNonQueryResponse extends BasicResponse {
     return new DataGroupNonQueryResponse(groupId, false, null, errorMsg);
   }
 
+  public List<String> getErrorMsgList() {
+    return errorMsgList;
+  }
+
+  public void addErrorMsg(String errorMsg) {
+    this.errorMsgList.add(errorMsg);
+  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetric/QueryJobNumResponse.java
similarity index 54%
copy from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetric/QueryJobNumResponse.java
index 9d86398..c681390 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetric/QueryJobNumResponse.java
@@ -16,32 +16,32 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.response.nonquery;
+package org.apache.iotdb.cluster.rpc.raft.response.querymetric;
 
+import java.util.Map;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 
-/**
- * Handle response from data group leader
- */
-public class DataGroupNonQueryResponse extends BasicResponse {
+public class QueryJobNumResponse extends BasicResponse {
 
-  private static final long serialVersionUID = -8288044965888956717L;
+  private Map<String, Integer> value;
 
-  private DataGroupNonQueryResponse(String groupId, boolean redirected, String leaderStr,
+  private QueryJobNumResponse(String groupId, boolean redirected, String leaderStr,
       String errorMsg) {
     super(groupId, redirected, leaderStr, errorMsg);
   }
 
-  public static DataGroupNonQueryResponse createRedirectedResponse(String groupId, String leaderStr) {
-    return new DataGroupNonQueryResponse(groupId, true, leaderStr, null);
+  public static QueryJobNumResponse createSuccessResponse(String groupId, Map<String, Integer> value) {
+    QueryJobNumResponse response = new QueryJobNumResponse(groupId, false, null,
+        null);
+    response.value = value;
+    return response;
   }
 
-  public static DataGroupNonQueryResponse createEmptyResponse(String groupId) {
-    return new DataGroupNonQueryResponse(groupId, false, null, null);
+  public static QueryJobNumResponse createErrorResponse(String groupId, String errorMsg) {
+    return new QueryJobNumResponse(groupId, false, null, errorMsg);
   }
 
-  public static DataGroupNonQueryResponse createErrorResponse(String groupId, String errorMsg) {
-    return new DataGroupNonQueryResponse(groupId, false, null, errorMsg);
+  public Map<String, Integer> getValue() {
+    return value;
   }
-
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetric/QueryLeaderResponse.java
similarity index 54%
copy from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetric/QueryLeaderResponse.java
index 9d86398..ad536aa 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetric/QueryLeaderResponse.java
@@ -16,32 +16,32 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.response.nonquery;
+package org.apache.iotdb.cluster.rpc.raft.response.querymetric;
 
+import com.alipay.sofa.jraft.entity.PeerId;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 
-/**
- * Handle response from data group leader
- */
-public class DataGroupNonQueryResponse extends BasicResponse {
+public class QueryLeaderResponse extends BasicResponse {
 
-  private static final long serialVersionUID = -8288044965888956717L;
+  private PeerId leader;
 
-  private DataGroupNonQueryResponse(String groupId, boolean redirected, String leaderStr,
+  private QueryLeaderResponse(String groupId, boolean redirected, String leaderStr,
       String errorMsg) {
     super(groupId, redirected, leaderStr, errorMsg);
   }
 
-  public static DataGroupNonQueryResponse createRedirectedResponse(String groupId, String leaderStr) {
-    return new DataGroupNonQueryResponse(groupId, true, leaderStr, null);
+  public static QueryLeaderResponse createSuccessResponse(String groupId, PeerId leader) {
+    QueryLeaderResponse response = new QueryLeaderResponse(groupId, false, null,
+        null);
+    response.leader = leader;
+    return response;
   }
 
-  public static DataGroupNonQueryResponse createEmptyResponse(String groupId) {
-    return new DataGroupNonQueryResponse(groupId, false, null, null);
+  public static QueryLeaderResponse createErrorResponse(String groupId, String errorMsg) {
+    return new QueryLeaderResponse(groupId, false, null, errorMsg);
   }
 
-  public static DataGroupNonQueryResponse createErrorResponse(String groupId, String errorMsg) {
-    return new DataGroupNonQueryResponse(groupId, false, null, errorMsg);
+  public PeerId getLeader() {
+    return leader;
   }
-
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetric/QueryMetricResponse.java
similarity index 54%
copy from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetric/QueryMetricResponse.java
index 9d86398..2f847db 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetric/QueryMetricResponse.java
@@ -16,32 +16,32 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.response.nonquery;
+package org.apache.iotdb.cluster.rpc.raft.response.querymetric;
 
+import java.util.Map;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 
-/**
- * Handle response from data group leader
- */
-public class DataGroupNonQueryResponse extends BasicResponse {
+public class QueryMetricResponse extends BasicResponse {
 
-  private static final long serialVersionUID = -8288044965888956717L;
+  private Map<String, Long> value;
 
-  private DataGroupNonQueryResponse(String groupId, boolean redirected, String leaderStr,
+  private QueryMetricResponse(String groupId, boolean redirected, String leaderStr,
       String errorMsg) {
     super(groupId, redirected, leaderStr, errorMsg);
   }
 
-  public static DataGroupNonQueryResponse createRedirectedResponse(String groupId, String leaderStr) {
-    return new DataGroupNonQueryResponse(groupId, true, leaderStr, null);
+  public static QueryMetricResponse createSuccessResponse(String groupId, Map<String, Long> value) {
+    QueryMetricResponse response = new QueryMetricResponse(groupId, false, null,
+        null);
+    response.value = value;
+    return response;
   }
 
-  public static DataGroupNonQueryResponse createEmptyResponse(String groupId) {
-    return new DataGroupNonQueryResponse(groupId, false, null, null);
+  public static QueryMetricResponse createErrorResponse(String groupId, String errorMsg) {
+    return new QueryMetricResponse(groupId, false, null, errorMsg);
   }
 
-  public static DataGroupNonQueryResponse createErrorResponse(String groupId, String errorMsg) {
-    return new DataGroupNonQueryResponse(groupId, false, null, errorMsg);
+  public Map<String, Long> getValue() {
+    return value;
   }
-
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/QueryTask.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetric/QueryStatusResponse.java
similarity index 54%
rename from cluster/src/main/java/org/apache/iotdb/cluster/qp/task/QueryTask.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetric/QueryStatusResponse.java
index f4cb4b5..2044f5e 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/QueryTask.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetric/QueryStatusResponse.java
@@ -16,34 +16,31 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.qp.task;
+package org.apache.iotdb.cluster.rpc.raft.response.querymetric;
 
-import org.apache.iotdb.cluster.qp.task.QPTask.TaskState;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 
-public class QueryTask {
-  private BasicResponse basicResponse;
-  private TaskState state;
+public class QueryStatusResponse extends BasicResponse {
 
-  public QueryTask(BasicResponse basicResponse,
-      TaskState state) {
-    this.basicResponse = basicResponse;
-    this.state = state;
-  }
+  private boolean status;
 
-  public BasicResponse getBasicResponse() {
-    return basicResponse;
+  private QueryStatusResponse(String groupId, boolean redirected, String leaderStr,
+      String errorMsg) {
+    super(groupId, redirected, leaderStr, errorMsg);
   }
 
-  public void setBasicResponse(BasicResponse basicResponse) {
-    this.basicResponse = basicResponse;
+  public static QueryStatusResponse createSuccessResponse(String groupId, boolean status) {
+    QueryStatusResponse response = new QueryStatusResponse(groupId, false, null,
+        null);
+    response.status = status;
+    return response;
   }
 
-  public TaskState getState() {
-    return state;
+  public static QueryStatusResponse createErrorResponse(String groupId, String errorMsg) {
+    return new QueryStatusResponse(groupId, false, null, errorMsg);
   }
 
-  public void setState(TaskState state) {
-    this.state = state;
+  public boolean getStatus() {
+    return status;
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/service/ClusterMonitor.java b/cluster/src/main/java/org/apache/iotdb/cluster/service/ClusterMonitor.java
new file mode 100644
index 0000000..01fe095
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/service/ClusterMonitor.java
@@ -0,0 +1,124 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.service;
+
+import com.alipay.sofa.jraft.entity.PeerId;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import org.apache.iotdb.cluster.utils.RaftUtils;
+import org.apache.iotdb.db.exception.StartupException;
+import org.apache.iotdb.db.service.IService;
+import org.apache.iotdb.db.service.JMXService;
+import org.apache.iotdb.db.service.ServiceType;
+
+public class ClusterMonitor implements ClusterMonitorMBean, IService {
+
+  /**
+   * Original format = String.format("%s:%s=%s",
+   * IoTDBConstant.IOTDB_PACKAGE, IoTDBConstant.JMX_TYPE, getID().getJmxName()
+   */
+  public static final String MBEAN_NAME = "org.apache.iotdb.service:type=Cluster Monitor";
+
+  public static final ClusterMonitor INSTANCE = new ClusterMonitor();
+
+  public String getMbeanName() {
+    return MBEAN_NAME;
+  }
+
+  @Override
+  public void start() throws StartupException {
+    try {
+      JMXService.registerMBean(INSTANCE, MBEAN_NAME);
+    } catch (Exception e) {
+      String errorMessage = String
+          .format("Failed to start %s because of %s", this.getID().getName(),
+              e.getMessage());
+      throw new StartupException(errorMessage);
+    }
+  }
+
+  @Override
+  public void stop() {
+    JMXService.deregisterMBean(MBEAN_NAME);
+  }
+
+  @Override
+  public ServiceType getID() {
+    return ServiceType.CLUSTER_MONITOR_SERVICE;
+  }
+
+  @Override
+  public Map<Integer, String> getPhysicalRing() {
+    return RaftUtils.getPhysicalRing();
+  }
+
+  @Override
+  public Map<Integer, String> getVirtualRing() {
+    return RaftUtils.getVirtualRing();
+  }
+
+  @Override
+  public Map<String, String> getAllLeaders() {
+    Map<String, String> map = new HashMap<>();
+    RaftUtils.getGroupLeaderCache().entrySet().forEach(entry -> map.put(entry.getKey(), entry.getValue().toString()));
+    return map;
+  }
+
+  @Override
+  public String getDataPartitionOfSG(String sg) {
+    PeerId[] nodes = RaftUtils.getDataPartitionOfSG(sg);
+    StringBuilder builder = new StringBuilder();
+    builder.append(nodes[0].getIp()).append(" (leader)");
+    for (int i = 1; i < nodes.length; i++) {
+      builder.append(", ").append(nodes[i].getIp());
+    }
+    return builder.toString();
+  }
+
+  @Override
+  public Set<String> getAllStorageGroupsLocally() {
+    return RaftUtils.getAllStorageGroupsLocally();
+  }
+
+  @Override
+  public Map<String[], String[]> getDataPartitonOfNode(String ip) {
+    return RaftUtils.getDataPartitionOfNode(ip);
+  }
+
+  @Override
+  public Map<String[], String[]> getDataPartitonOfNode(String ip, int port) {
+    return RaftUtils.getDataPartitionOfNode(ip, port);
+  }
+
+  @Override
+  public Map<String, Map<String, Long>> getReplicaLagMap() {
+    return RaftUtils.getReplicaLagMap();
+  }
+
+  @Override
+  public Map<String, Map<String, Integer>> getQueryJobNumMap() {
+    return RaftUtils.getQueryJobNumMapForCluster();
+  }
+
+  @Override
+  public Map<String, Boolean> getStatusMap() {
+    return RaftUtils.getStatusMapForCluster();
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/service/ClusterMonitorMBean.java b/cluster/src/main/java/org/apache/iotdb/cluster/service/ClusterMonitorMBean.java
new file mode 100644
index 0000000..cca0820
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/service/ClusterMonitorMBean.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.service;
+
+import java.util.Map;
+import java.util.Set;
+
+public interface ClusterMonitorMBean {
+
+  /**
+   * Get physical hash ring
+   *
+   * @return key: hash value, value: node ip
+   */
+  Map<Integer, String> getPhysicalRing();
+
+  /**
+   * Get virtual hash ring
+   *
+   * @return key: hash value, value: node ip
+   */
+  Map<Integer, String> getVirtualRing();
+
+  /**
+   * Get currents leaders of each data partition
+   *
+   * @return key: group id, value: leader node ip
+   */
+  Map<String, String> getAllLeaders();
+
+  /**
+   * Get data partition information of input storage group in String format. The node ips are split
+   * by ',', and the first ip is the currnt leader.
+   *
+   * @param sg input storage group path
+   * @return data partition information in String format
+   */
+  String getDataPartitionOfSG(String sg);
+
+  /**
+   * Get all storage groups
+   *
+   * @return Set of all storage groups
+   */
+  Set<String> getAllStorageGroupsLocally();
+
+  /**
+   * Get data partitions that input node belongs to.
+   *
+   * @param ip node ip
+   * @param port node rpc port
+   * @return key: node ips of one data partition, value: storage group paths that belong to this
+   * data partition
+   */
+  Map<String[], String[]> getDataPartitonOfNode(String ip, int port);
+  Map<String[], String[]> getDataPartitonOfNode(String ip);
+
+  /**
+   * Get replica lag for metadata group and each data partition
+   *
+   * @return key: groupId, value: ip -> replica lag
+   */
+  Map<String, Map<String, Long>> getReplicaLagMap();
+
+  /**
+   * Get number of query jobs on each data partition for all nodes
+   *
+   * @return outer key: ip, inner key: groupId, value: number of query jobs
+   */
+  Map<String, Map<String, Integer>> getQueryJobNumMap();
+
+  /**
+   * Get status of all nodes
+   *
+   * @return key: node ip, value: live or not
+   */
+  Map<String, Boolean> getStatusMap();
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/service/TSServiceClusterImpl.java b/cluster/src/main/java/org/apache/iotdb/cluster/service/TSServiceClusterImpl.java
index bfc74c1..505afd9 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/service/TSServiceClusterImpl.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/service/TSServiceClusterImpl.java
@@ -27,6 +27,7 @@ import java.util.Map;
 import java.util.Set;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
+import org.apache.iotdb.cluster.config.ClusterConsistencyLevel;
 import org.apache.iotdb.cluster.config.ClusterConstant;
 import org.apache.iotdb.cluster.exception.ConsistencyLevelException;
 import org.apache.iotdb.cluster.qp.executor.ClusterQueryProcessExecutor;
@@ -66,9 +67,10 @@ public class TSServiceClusterImpl extends TSServiceImpl {
 
   private static final Logger LOGGER = LoggerFactory.getLogger(TSServiceClusterImpl.class);
 
-  private ClusterQueryProcessExecutor queryDataExecutor = new ClusterQueryProcessExecutor();
-  private NonQueryExecutor nonQueryExecutor = new NonQueryExecutor();
   private QueryMetadataExecutor queryMetadataExecutor = new QueryMetadataExecutor();
+  private ClusterQueryProcessExecutor queryDataExecutor = new ClusterQueryProcessExecutor(
+      queryMetadataExecutor);
+  private NonQueryExecutor nonQueryExecutor = new NonQueryExecutor();
 
   private IClusterRpcQueryManager queryManager = ClusterRpcQueryManager.getInstance();
 
@@ -102,7 +104,8 @@ public class TSServiceClusterImpl extends TSServiceImpl {
   }
 
   @Override
-  protected TSDataType getSeriesType(String path) throws PathErrorException, InterruptedException, ProcessorException {
+  protected TSDataType getSeriesType(String path)
+      throws PathErrorException, InterruptedException, ProcessorException {
     return queryMetadataExecutor.processSeriesTypeQuery(path);
   }
 
@@ -123,17 +126,17 @@ public class TSServiceClusterImpl extends TSServiceImpl {
       List<String> statements = req.getStatements();
       PhysicalPlan[] physicalPlans = new PhysicalPlan[statements.size()];
       int[] result = new int[statements.size()];
-      String batchErrorMessage = "";
+      StringBuilder batchErrorMessage = new StringBuilder();
       boolean isAllSuccessful = true;
 
-      /** find all valid physical plans **/
+      /* find all valid physical plans */
       for (int i = 0; i < statements.size(); i++) {
         try {
           PhysicalPlan plan = processor
               .parseSQLToPhysicalPlan(statements.get(i), zoneIds.get());
           plan.setProposer(username.get());
 
-          /** if meet a query, handle all requests before the query request. **/
+          /* if meet a query, handle all requests before the query request. */
           if (plan.isQuery()) {
             int[] resultTemp = new int[i];
             PhysicalPlan[] physicalPlansTemp = new PhysicalPlan[i];
@@ -143,8 +146,11 @@ public class TSServiceClusterImpl extends TSServiceImpl {
             physicalPlans = physicalPlansTemp;
             BatchResult batchResult = new BatchResult(isAllSuccessful, batchErrorMessage, result);
             nonQueryExecutor.processBatch(physicalPlans, batchResult);
+            batchErrorMessage.append(String
+                .format(ERROR_MESSAGE_FORMAT_IN_BATCH, i,
+                    "statement is query :" + statements.get(i)));
             return getTSBathExecuteStatementResp(TS_StatusCode.ERROR_STATUS,
-                "statement is query :" + statements.get(i), Arrays.stream(result).boxed().collect(
+                statements.get(i), Arrays.stream(result).boxed().collect(
                     Collectors.toList()));
           }
 
@@ -155,7 +161,7 @@ public class TSServiceClusterImpl extends TSServiceImpl {
                 plan.getOperatorType());
             result[i] = Statement.EXECUTE_FAILED;
             isAllSuccessful = false;
-            batchErrorMessage = errMessage;
+            batchErrorMessage.append(String.format(ERROR_MESSAGE_FORMAT_IN_BATCH, i, errMessage));
           } else {
             physicalPlans[i] = plan;
           }
@@ -165,19 +171,19 @@ public class TSServiceClusterImpl extends TSServiceImpl {
               e.getMessage());
           result[i] = Statement.EXECUTE_FAILED;
           isAllSuccessful = false;
-          batchErrorMessage = errMessage;
+          batchErrorMessage.append(String.format(ERROR_MESSAGE_FORMAT_IN_BATCH, i, errMessage));
         } catch (Exception e) {
           String errMessage = String.format("Fail to generate physcial plan" + "%s beacuse %s",
               statements.get(i), e.getMessage());
           result[i] = Statement.EXECUTE_FAILED;
           isAllSuccessful = false;
-          batchErrorMessage = errMessage;
+          batchErrorMessage.append(String.format(ERROR_MESSAGE_FORMAT_IN_BATCH, i, errMessage));
         }
       }
 
       BatchResult batchResult = new BatchResult(isAllSuccessful, batchErrorMessage, result);
       nonQueryExecutor.processBatch(physicalPlans, batchResult);
-      batchErrorMessage = batchResult.batchErrorMessage;
+      batchErrorMessage.append(batchResult.batchErrorMessage);
       isAllSuccessful = batchResult.isAllSuccessful;
 
       if (isAllSuccessful) {
@@ -185,7 +191,8 @@ public class TSServiceClusterImpl extends TSServiceImpl {
             "Execute batch statements successfully", Arrays.stream(result).boxed().collect(
                 Collectors.toList()));
       } else {
-        return getTSBathExecuteStatementResp(TS_StatusCode.ERROR_STATUS, batchErrorMessage,
+        return getTSBathExecuteStatementResp(TS_StatusCode.ERROR_STATUS,
+            batchErrorMessage.toString(),
             Arrays.stream(result).boxed().collect(
                 Collectors.toList()));
       }
@@ -198,16 +205,17 @@ public class TSServiceClusterImpl extends TSServiceImpl {
   /**
    * Present batch results.
    */
-  public class BatchResult {
+  public static class BatchResult {
 
     private boolean isAllSuccessful;
-    private String batchErrorMessage;
-    private int[] result;
+    private StringBuilder batchErrorMessage;
+    private int[] resultArray;
 
-    private BatchResult(boolean isAllSuccessful, String batchErrorMessage, int[] result) {
+    public BatchResult(boolean isAllSuccessful, StringBuilder batchErrorMessage,
+        int[] resultArray) {
       this.isAllSuccessful = isAllSuccessful;
       this.batchErrorMessage = batchErrorMessage;
-      this.result = result;
+      this.resultArray = resultArray;
     }
 
     public boolean isAllSuccessful() {
@@ -218,20 +226,21 @@ public class TSServiceClusterImpl extends TSServiceImpl {
       isAllSuccessful = allSuccessful;
     }
 
-    public String getBatchErrorMessage() {
+    public StringBuilder getBatchErrorMessage() {
       return batchErrorMessage;
     }
 
-    public void setBatchErrorMessage(String batchErrorMessage) {
-      this.batchErrorMessage = batchErrorMessage;
+    public void addBatchErrorMessage(int index, String batchErrorMessage) {
+      this.batchErrorMessage
+          .append(String.format(ERROR_MESSAGE_FORMAT_IN_BATCH, index, batchErrorMessage));
     }
 
-    public int[] getResult() {
-      return result;
+    public int[] getResultArray() {
+      return resultArray;
     }
 
-    public void setResult(int[] result) {
-      this.result = result;
+    public void setResultArray(int[] resultArray) {
+      this.resultArray = resultArray;
     }
   }
 
@@ -243,38 +252,44 @@ public class TSServiceClusterImpl extends TSServiceImpl {
     statement = statement.toLowerCase().trim();
     try {
       if (Pattern.matches(ClusterConstant.SET_READ_METADATA_CONSISTENCY_LEVEL_PATTERN, statement)) {
-        String[] splits = statement.split("\\s+");
-        int level = Integer.parseInt(splits[splits.length - 1]);
+        int level = parseConsistencyLevel(statement);
         queryMetadataExecutor.setReadMetadataConsistencyLevel(level);
         return true;
       } else if (Pattern
           .matches(ClusterConstant.SET_READ_DATA_CONSISTENCY_LEVEL_PATTERN, statement)) {
-        String[] splits = statement.split("\\s+");
-        int level = Integer.parseInt(splits[splits.length - 1]);
+        int level = parseConsistencyLevel(statement);
         queryDataExecutor.setReadDataConsistencyLevel(level);
         return true;
       } else {
         return false;
       }
-    } catch (ConsistencyLevelException e){
+    } catch (ConsistencyLevelException e) {
       throw new Exception(e.getMessage());
     }
   }
 
+  private int parseConsistencyLevel(String statement) throws ConsistencyLevelException {
+    String[] splits = statement.split("\\s+");
+    String levelName = splits[splits.length - 1].toLowerCase();
+    int level = ClusterConsistencyLevel.getLevel(levelName);
+    if (level == ClusterConsistencyLevel.UNSUPPORT_LEVEL) {
+      throw new ConsistencyLevelException(String.format("Consistency level %s not support", levelName));
+    }
+    return level;
+  }
+
   @Override
   protected boolean executeNonQuery(PhysicalPlan plan) throws ProcessorException {
     return nonQueryExecutor.processNonQuery(plan);
   }
 
-  /**
-   * It's unnecessary to do this check. It has benn checked in transforming query physical plan.
-   */
   @Override
-  public void checkFileLevelSet(List<Path> paths) throws PathErrorException {
+  protected void checkFileLevelSet(List<Path> paths) throws PathErrorException {
+    //It's unnecessary to do this check. It has benn checked in transforming query physical plan.
   }
 
   @Override
-  public void releaseQueryResource(TSCloseOperationReq req) throws Exception {
+  protected void releaseQueryResource(TSCloseOperationReq req) throws Exception {
     Map<Long, QueryContext> contextMap = contextMapLocal.get();
     if (contextMap == null) {
       return;
@@ -294,7 +309,7 @@ public class TSServiceClusterImpl extends TSServiceImpl {
   }
 
   @Override
-  public QueryDataSet createNewDataSet(String statement, int fetchSize, TSFetchResultsReq req)
+  protected QueryDataSet createNewDataSet(String statement, int fetchSize, TSFetchResultsReq req)
       throws PathErrorException, QueryFilterOptimizationException, FileNodeManagerException,
       ProcessorException, IOException {
     PhysicalPlan physicalPlan = queryStatus.get().get(statement);
@@ -306,15 +321,26 @@ public class TSServiceClusterImpl extends TSServiceImpl {
     contextMapLocal.get().put(req.queryId, context);
 
     queryManager.addSingleQuery(jobId, (QueryPlan) physicalPlan);
-    QueryDataSet queryDataSet = processor.getExecutor().processQuery((QueryPlan) physicalPlan,
+    QueryDataSet queryDataSet = processor.getExecutor().processQuery(physicalPlan,
         context);
-    queryRet.get().put(statement, queryDataSet);
+    try {
+      queryRet.get().put(statement, queryDataSet);
+    }catch (Exception e){
+      e.printStackTrace();
+    }
     return queryDataSet;
   }
+
+  @Override
+  public void handleClientExit() throws TException {
+    closeClusterService();
+    closeOperation(null);
+    closeSession(null);
+  }
+
   /**
    * Close cluster service
    */
-  @Override
   public void closeClusterService() {
     nonQueryExecutor.shutdown();
     queryMetadataExecutor.shutdown();
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Host.java b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Host.java
new file mode 100644
index 0000000..21b02be
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Host.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.service.nodetool;
+
+import io.airlift.airline.Command;
+import io.airlift.airline.Option;
+import java.util.Map;
+import java.util.Map.Entry;
+import org.apache.iotdb.cluster.service.nodetool.NodeTool.NodeToolCmd;
+import org.apache.iotdb.cluster.service.ClusterMonitorMBean;
+
+@Command(name = "host", description = "Print all data partitions information which specific host belongs to")
+public class Host extends NodeToolCmd {
+
+  private static final int DEFAULT_PORT = -1;
+
+  @Option(title = "ip", name = {"-i", "--ip"}, description = "Specify the host ip for accurate hosts information")
+  private String ip = "127.0.0.1";
+
+  @Option(title = "port", name = {"-p", "--port"}, description = "Specify the host port for accurate hosts information")
+  private int port = DEFAULT_PORT;
+
+  @Option(title = "sg_detail", name = {"-d", "--detail"}, description = "Show path of storage groups")
+  private boolean detail = false;
+
+  @Override
+  public void execute(ClusterMonitorMBean proxy) {
+    Map<String[], String[]> map;
+    if (port == DEFAULT_PORT) {
+      map = proxy.getDataPartitonOfNode(ip);
+    } else {
+      map = proxy.getDataPartitonOfNode(ip, port);
+    }
+
+    if (map == null) {
+      System.out.println("Can't find the input IP.");
+      return;
+    }
+
+    for (Entry<String[], String[]> entry : map.entrySet()) {
+      StringBuilder builder = new StringBuilder();
+      String[] ips = entry.getKey();
+      String[] sgs = entry.getValue();
+      builder.append('(');
+      for (int i = 0; i < ips.length; i++) {
+        builder.append(ips[i]).append(", ");
+      }
+      builder.delete(builder.length() - 2, builder.length());
+      builder.append(')');
+
+      builder.append("\t->\t");
+      if (detail) {
+        builder.append('(');
+        for (int i = 0; i < sgs.length; i++) {
+          builder.append(sgs[i]).append(", ");
+        }
+        if (sgs.length > 0) {
+          builder.delete(builder.length() - 2, builder.length());
+        }
+        builder.append(')');
+      } else {
+        builder.append(sgs.length);
+      }
+
+      System.out.println(builder.toString());
+    }
+  }
+}
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/query/aggregation/impl/SumAggrFunc.java b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Lag.java
similarity index 50%
copy from iotdb/src/main/java/org/apache/iotdb/db/query/aggregation/impl/SumAggrFunc.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Lag.java
index 3bb8d7d..1cc852b 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/aggregation/impl/SumAggrFunc.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Lag.java
@@ -16,22 +16,27 @@
  * specific language governing permissions and limitations
  * under the License.
  */
+package org.apache.iotdb.cluster.service.nodetool;
 
-package org.apache.iotdb.db.query.aggregation.impl;
+import io.airlift.airline.Command;
+import java.util.Map;
+import java.util.Map.Entry;
+import org.apache.iotdb.cluster.service.nodetool.NodeTool.NodeToolCmd;
+import org.apache.iotdb.cluster.service.ClusterMonitorMBean;
 
-import org.apache.iotdb.db.query.aggregation.AggreResultData;
-import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
-
-public class SumAggrFunc extends MeanAggrFunc {
-
-  public SumAggrFunc(TSDataType seriesDataType) {
-    super(seriesDataType);
-  }
+@Command(name = "lag", description = "Print log lag for all groups of connected host")
+public class Lag extends NodeToolCmd {
 
   @Override
-  public AggreResultData getResult() {
-    resultData.setDoubleRet(sum);
-    resultData.setTimestamp(0);
-    return resultData;
+  public void execute(ClusterMonitorMBean proxy)
+  {
+    Map<String, Map<String, Long>> groupMap = proxy.getReplicaLagMap();
+    for (Entry<String, Map<String, Long>> entry : groupMap.entrySet()) {
+      if (entry.getValue() == null) {
+        continue;
+      }
+      System.out.println(entry.getKey() + ":");
+      entry.getValue().forEach((node, lag) -> System.out.println("\t" + node + "\t->\t" + lag));
+    }
   }
-}
+}
\ No newline at end of file
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/NodeTool.java b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/NodeTool.java
new file mode 100644
index 0000000..9d464b3
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/NodeTool.java
@@ -0,0 +1,148 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.service.nodetool;
+
+import static java.lang.String.format;
+
+import com.google.common.base.Throwables;
+import com.google.common.collect.Lists;
+import io.airlift.airline.Cli;
+import io.airlift.airline.Help;
+import io.airlift.airline.Option;
+import io.airlift.airline.OptionType;
+import io.airlift.airline.ParseArgumentsMissingException;
+import io.airlift.airline.ParseArgumentsUnexpectedException;
+import io.airlift.airline.ParseCommandMissingException;
+import io.airlift.airline.ParseCommandUnrecognizedException;
+import io.airlift.airline.ParseOptionConversionException;
+import io.airlift.airline.ParseOptionMissingException;
+import io.airlift.airline.ParseOptionMissingValueException;
+import java.io.IOException;
+import java.util.List;
+import javax.management.JMX;
+import javax.management.MBeanServerConnection;
+import javax.management.MalformedObjectNameException;
+import javax.management.ObjectName;
+import javax.management.remote.JMXConnector;
+import javax.management.remote.JMXConnectorFactory;
+import javax.management.remote.JMXServiceURL;
+import org.apache.iotdb.cluster.service.ClusterMonitor;
+import org.apache.iotdb.cluster.service.ClusterMonitorMBean;
+
+public class NodeTool {
+
+  public static void main(String... args) {
+    List<Class<? extends Runnable>> commands = Lists.newArrayList(
+        Help.class,
+        Ring.class,
+        StorageGroup.class,
+        Host.class,
+        Lag.class,
+        Query.class,
+        Status.class
+    );
+
+    Cli.CliBuilder<Runnable> builder = Cli.builder("nodetool");
+
+    builder.withDescription("Manage your IoTDB cluster")
+        .withDefaultCommand(Help.class)
+        .withCommands(commands);
+
+    Cli<Runnable> parser = builder.build();
+
+    int status = 0;
+    try {
+      Runnable parse = parser.parse(args);
+      parse.run();
+    } catch (IllegalArgumentException |
+        IllegalStateException |
+        ParseArgumentsMissingException |
+        ParseArgumentsUnexpectedException |
+        ParseOptionConversionException |
+        ParseOptionMissingException |
+        ParseOptionMissingValueException |
+        ParseCommandMissingException |
+        ParseCommandUnrecognizedException e) {
+      badUse(e);
+      status = 1;
+    } catch (Exception e) {
+      err(Throwables.getRootCause(e));
+      status = 2;
+    }
+
+    System.exit(status);
+  }
+
+  private static void badUse(Exception e) {
+    System.out.println("nodetool: " + e.getMessage());
+    System.out.println("See 'nodetool help' or 'nodetool help <command>'.");
+  }
+
+  private static void err(Throwable e) {
+    System.err.println("error: " + e.getMessage());
+    System.err.println("-- StackTrace --");
+    System.err.println(Throwables.getStackTraceAsString(e));
+  }
+
+  public abstract static class NodeToolCmd implements Runnable {
+
+    @Option(type = OptionType.GLOBAL, name = {"-h",
+        "--host"}, description = "Node hostname or ip address")
+    private String host = "127.0.0.1";
+
+    @Option(type = OptionType.GLOBAL, name = {"-p",
+        "--port"}, description = "Remote jmx agent port number")
+    private String port = "31999";
+
+    private static final String JMX_URL_FORMAT = "service:jmx:rmi:///jndi/rmi://%s:%s/jmxrmi";
+
+    @Override
+    public void run() {
+      try {
+        MBeanServerConnection mbsc = connect();
+        ObjectName name = new ObjectName(ClusterMonitor.MBEAN_NAME);
+        ClusterMonitorMBean clusterMonitorProxy = JMX
+            .newMBeanProxy(mbsc, name, ClusterMonitorMBean.class);
+        execute(clusterMonitorProxy);
+      } catch (MalformedObjectNameException e) {
+        e.printStackTrace();
+      }
+    }
+
+    protected abstract void execute(ClusterMonitorMBean probe);
+
+    private MBeanServerConnection connect() {
+      MBeanServerConnection mbsc = null;
+
+      try {
+        String jmxURL = String.format(JMX_URL_FORMAT, host, port);
+        JMXServiceURL serviceURL = new JMXServiceURL(jmxURL);
+        JMXConnector connector = JMXConnectorFactory.connect(serviceURL);
+        mbsc = connector.getMBeanServerConnection();
+      } catch (IOException e) {
+        Throwable rootCause = Throwables.getRootCause(e);
+        System.err.println(format("nodetool: Failed to connect to '%s:%s' - %s: '%s'.", host, port,
+            rootCause.getClass().getSimpleName(), rootCause.getMessage()));
+        System.exit(1);
+      }
+
+      return mbsc;
+    }
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Query.java b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Query.java
new file mode 100644
index 0000000..d0682fd
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Query.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.service.nodetool;
+
+import io.airlift.airline.Command;
+import java.util.Map;
+import org.apache.iotdb.cluster.service.nodetool.NodeTool.NodeToolCmd;
+import org.apache.iotdb.cluster.service.ClusterMonitorMBean;
+
+@Command(name = "query", description = "Print number of query jobs for all data partitions for all hosts")
+public class Query extends NodeToolCmd {
+
+  @Override
+  public void execute(ClusterMonitorMBean proxy)
+  {
+    Map<String, Map<String, Integer>> queryNumMap = proxy.getQueryJobNumMap();
+    queryNumMap.forEach((ip, map) -> {
+      System.out.println(ip + ":");
+      if (map != null) {
+        map.forEach((groupId, num) -> System.out.println("\t" + groupId + "\t->\t" + num));
+      }
+    });
+    final int[] sum = {0};
+    queryNumMap.forEach((ip, map) -> {
+      if (map != null) {
+        map.forEach((groupId, num) -> sum[0] += num);
+      }
+    });
+    System.out.println("Total\t->\t" + sum[0]);
+  }
+}
\ No newline at end of file
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/query/aggregation/impl/SumAggrFunc.java b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Ring.java
similarity index 51%
copy from iotdb/src/main/java/org/apache/iotdb/db/query/aggregation/impl/SumAggrFunc.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Ring.java
index 3bb8d7d..8dd96a0 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/aggregation/impl/SumAggrFunc.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Ring.java
@@ -16,22 +16,24 @@
  * specific language governing permissions and limitations
  * under the License.
  */
+package org.apache.iotdb.cluster.service.nodetool;
 
-package org.apache.iotdb.db.query.aggregation.impl;
+import io.airlift.airline.Command;
+import io.airlift.airline.Option;
+import java.util.Map;
+import org.apache.iotdb.cluster.service.ClusterMonitorMBean;
+import org.apache.iotdb.cluster.service.nodetool.NodeTool.NodeToolCmd;
 
-import org.apache.iotdb.db.query.aggregation.AggreResultData;
-import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+@Command(name = "ring", description = "Print information about the hash ring")
+public class Ring extends NodeToolCmd {
 
-public class SumAggrFunc extends MeanAggrFunc {
-
-  public SumAggrFunc(TSDataType seriesDataType) {
-    super(seriesDataType);
-  }
+  @Option(title = "physical_ring", name = {"-p", "--physical"}, description = "Show physical nodes instead of virtual ones")
+  private boolean physical = false;
 
   @Override
-  public AggreResultData getResult() {
-    resultData.setDoubleRet(sum);
-    resultData.setTimestamp(0);
-    return resultData;
+  public void execute(ClusterMonitorMBean proxy)
+  {
+    Map<Integer, String> map = physical ? proxy.getPhysicalRing() : proxy.getVirtualRing();
+    map.forEach((hash, ip) -> System.out.println(hash + "\t->\t" + ip));
   }
-}
+}
\ No newline at end of file
diff --git a/spark/src/main/java/org/apache/iotdb/tsfile/qp/common/Operator.java b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Status.java
old mode 100755
new mode 100644
similarity index 59%
copy from spark/src/main/java/org/apache/iotdb/tsfile/qp/common/Operator.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Status.java
index 684e0e5..302a1c7
--- a/spark/src/main/java/org/apache/iotdb/tsfile/qp/common/Operator.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Status.java
@@ -16,32 +16,20 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.tsfile.qp.common;
+package org.apache.iotdb.cluster.service.nodetool;
 
-/**
- * This class is a superclass of all operator. 
- *
- */
-public abstract class Operator {
+import io.airlift.airline.Command;
+import java.util.Map;
+import org.apache.iotdb.cluster.service.nodetool.NodeTool.NodeToolCmd;
+import org.apache.iotdb.cluster.service.ClusterMonitorMBean;
 
-  int tokenIntType;
-  String tokenSymbol;
-
-  Operator(int tokenIntType) {
-    this.tokenIntType = tokenIntType;
-    this.tokenSymbol = SQLConstant.tokenSymbol.get(tokenIntType);
-  }
-
-  public int getTokenIntType() {
-    return tokenIntType;
-  }
-
-  public String getTokenSymbol() {
-    return tokenSymbol;
-  }
+@Command(name = "status", description = "Print status of all hosts")
+public class Status extends NodeToolCmd {
 
   @Override
-  public String toString() {
-    return tokenSymbol;
+  public void execute(ClusterMonitorMBean proxy)
+  {
+    Map<String, Boolean> statusMap = proxy.getStatusMap();
+    statusMap.forEach((ip, status) -> System.out.println(ip + "\t->\t" + (status ? "on" : "off")));
   }
-}
+}
\ No newline at end of file
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/StorageGroup.java b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/StorageGroup.java
new file mode 100644
index 0000000..e44aa64
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/StorageGroup.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.service.nodetool;
+
+import io.airlift.airline.Command;
+import io.airlift.airline.Option;
+import java.util.HashSet;
+import java.util.Set;
+import org.apache.iotdb.cluster.service.ClusterMonitorMBean;
+import org.apache.iotdb.cluster.service.nodetool.NodeTool.NodeToolCmd;
+
+@Command(name = "storagegroup", description = "Print all hosts information of specific storage group")
+public class StorageGroup extends NodeToolCmd {
+
+  @Option(title = "all storagegroup", name = {"-a", "--all"}, description = "Show hosts info of all storage groups")
+  private boolean showAll = false;
+
+  @Option(title = "storage group", name = {"-sg",
+      "--storagegroup"}, description = "Specify a storage group for accurate hosts information")
+  private String sg = null;
+
+  @Override
+  public void execute(ClusterMonitorMBean proxy) {
+    Set<String> sgSet;
+    if (showAll) {
+      sgSet = proxy.getAllStorageGroupsLocally();
+    } else {
+      sgSet = new HashSet<>();
+      sgSet.add(sg);
+    }
+
+    if (!showAll && sg == null) {
+      System.out.println("Metadata\t->\t" + proxy.getDataPartitionOfSG(sg));
+    } else {
+      sgSet.forEach(sg -> System.out.println(sg + "\t->\t" + proxy.getDataPartitionOfSG(sg)));
+    }
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/QPExecutorUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/QPExecutorUtils.java
index 809a01c..80dec3a 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/QPExecutorUtils.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/utils/QPExecutorUtils.java
@@ -45,10 +45,13 @@ public class QPExecutorUtils {
   private static final PhysicalNode localNode = new PhysicalNode(CLUSTER_CONFIG.getIp(),
       CLUSTER_CONFIG.getPort());
 
-  private static final  MManager mManager = MManager.getInstance();
+  private static final MManager mManager = MManager.getInstance();
 
   private static final Server server = Server.getInstance();
 
+  private QPExecutorUtils() {
+  }
+
   /**
    * Get Storage Group Name by device name
    */
@@ -85,13 +88,8 @@ public class QPExecutorUtils {
     for (int i = 0; i < sgList.size(); i++) {
       String sg = sgList.get(i);
       String groupId = router.getGroupIdBySG(sg);
-      if (map.containsKey(groupId)) {
-        map.get(groupId).add(sg);
-      } else {
-        Set<String> set = new HashSet<>();
-        set.add(sg);
-        map.put(groupId, set);
-      }
+      map.putIfAbsent(groupId, new HashSet<>());
+      map.get(groupId).add(sg);
     }
     return map;
   }
@@ -102,9 +100,9 @@ public class QPExecutorUtils {
    */
   public static boolean canHandleNonQueryByGroupId(String groupId) {
     boolean canHandle = false;
-    if(groupId.equals(ClusterConfig.METADATA_GROUP_ID)){
+    if (groupId.equals(ClusterConfig.METADATA_GROUP_ID)) {
       canHandle = ((MetadataRaftHolder) (server.getMetadataHolder())).getFsm().isLeader();
-    }else {
+    } else {
       if (checkDataGroupLeader(groupId)) {
         canHandle = true;
       }
@@ -121,7 +119,7 @@ public class QPExecutorUtils {
   public static boolean checkDataGroupLeader(String groupId) {
     boolean isLeader = false;
     if (router.containPhysicalNodeByGroupId(groupId, localNode) && RaftUtils
-        .getPhysicalNodeFrom(RaftUtils.getLeaderPeerID(groupId)).equals(localNode)) {
+        .getPhysicalNodeFrom(RaftUtils.getLocalLeaderPeerID(groupId)).equals(localNode)) {
       isLeader = true;
     }
     return isLeader;
@@ -141,8 +139,7 @@ public class QPExecutorUtils {
    */
   public static String getGroupIdByDevice(String device) throws PathErrorException {
     String storageGroup = QPExecutorUtils.getStroageGroupByDevice(device);
-    String groupId = Router.getInstance().getGroupIdBySG(storageGroup);
-    return groupId;
+    return Router.getInstance().getGroupIdBySG(storageGroup);
   }
 
   /**
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/RaftUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/RaftUtils.java
index be6eea0..96e0363 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/RaftUtils.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/utils/RaftUtils.java
@@ -23,12 +23,24 @@ import com.alipay.remoting.exception.CodecException;
 import com.alipay.remoting.serialization.SerializerManager;
 import com.alipay.sofa.jraft.Status;
 import com.alipay.sofa.jraft.closure.ReadIndexClosure;
+import com.alipay.sofa.jraft.core.NodeImpl;
 import com.alipay.sofa.jraft.entity.PeerId;
 import com.alipay.sofa.jraft.entity.Task;
 import com.alipay.sofa.jraft.util.Bits;
 import com.alipay.sofa.jraft.util.OnlyForTest;
+import com.codahale.metrics.Gauge;
 import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
 import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -37,31 +49,41 @@ import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.cluster.entity.Server;
 import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.MetadataRaftHolder;
+import org.apache.iotdb.cluster.entity.raft.MetadataStateManchine;
 import org.apache.iotdb.cluster.entity.raft.RaftService;
 import org.apache.iotdb.cluster.exception.RaftConnectionException;
 import org.apache.iotdb.cluster.qp.task.QPTask;
+import org.apache.iotdb.cluster.qp.task.QPTask.TaskState;
 import org.apache.iotdb.cluster.qp.task.SingleQPTask;
-import org.apache.iotdb.cluster.rpc.raft.NodeAsClient;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcQueryManager;
 import org.apache.iotdb.cluster.rpc.raft.closure.ResponseClosure;
 import org.apache.iotdb.cluster.rpc.raft.impl.RaftNodeAsClientManager;
 import org.apache.iotdb.cluster.rpc.raft.request.BasicNonQueryRequest;
 import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetric.QueryJobNumRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetric.QueryLeaderRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetric.QueryMetricRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetric.QueryStatusRequest;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 import org.apache.iotdb.cluster.rpc.raft.response.nonquery.DataGroupNonQueryResponse;
 import org.apache.iotdb.cluster.rpc.raft.response.nonquery.MetaGroupNonQueryResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetric.QueryJobNumResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetric.QueryLeaderResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetric.QueryMetricResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetric.QueryStatusResponse;
 import org.apache.iotdb.cluster.utils.hash.PhysicalNode;
 import org.apache.iotdb.cluster.utils.hash.Router;
+import org.apache.iotdb.cluster.utils.hash.VirtualNode;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class RaftUtils {
 
-  private static final ClusterConfig CLUSTER_CONFIG = ClusterDescriptor.getInstance().getConfig();
-
   private static final Logger LOGGER = LoggerFactory.getLogger(RaftUtils.class);
   private static final Server server = Server.getInstance();
   private static final Router router = Router.getInstance();
   private static final AtomicInteger requestId = new AtomicInteger(0);
+  private static final ClusterConfig config = ClusterDescriptor.getInstance().getConfig();
   /**
    * Raft as client manager.
    */
@@ -70,27 +92,128 @@ public class RaftUtils {
 
   /**
    * The cache will be update in two case: 1. When @onLeaderStart() method of state machine is
-   * called, the cache will be update. 2. When @getLeaderPeerID() in this class is called and cache
-   * don't have the key, it's will get random peer and update. 3. When @redirected of BasicRequest
-   * is true, the task will be retry and the cache will update.
+   * called, the cache will be update. 2. When @getLocalLeaderPeerID() in this class is called and
+   * cache don't have the key, it's will get random peer and update. 3. When @redirected of
+   * BasicRequest is true, the task will be retry and the cache will update.
    */
   private static final ConcurrentHashMap<String, PeerId> groupLeaderCache = new ConcurrentHashMap<>();
 
+  private static ThreadLocal<Map<String, Integer>> nodeIndexMap = ThreadLocal.withInitial(() -> {
+    Map<String, Integer> map = new HashMap<>();
+    router.getAllGroupId().forEach(groupId -> {
+      PhysicalNode[] physicalNodes = router.getNodesByGroupId(groupId);
+      map.put(groupId, getRandomInt(physicalNodes.length));
+    });
+    return map;
+  });
+
   private RaftUtils() {
   }
 
   /**
+   * Get peer ID in order
+   *
+   * @return node id
+   */
+  public static PeerId getPeerIDInOrder(String groupId) {
+    int index;
+    PeerId peerId;
+    int len;
+    if (groupId.equals(ClusterConfig.METADATA_GROUP_ID)) {
+      RaftService service = (RaftService) server.getMetadataHolder().getService();
+      List<PeerId> peerIdList = service.getPeerIdList();
+      len = peerIdList.size();
+      index = nodeIndexMap.get().getOrDefault(groupId, getRandomInt(peerIdList.size()));
+      peerId = peerIdList.get(index);
+    } else {
+      PhysicalNode[] physicalNodes = router.getNodesByGroupId(groupId);
+      len = physicalNodes.length;
+      index = nodeIndexMap.get().getOrDefault(groupId, getRandomInt(physicalNodes.length));
+      peerId = getPeerIDFrom(physicalNodes[index]);
+    }
+    nodeIndexMap.get().put(groupId, (index + 1) % len);
+
+    LOGGER.debug("Get node {} for group {}", peerId, groupId);
+
+    return peerId;
+  }
+
+  public static void updatePeerIDOrder(PeerId peerId, String groupId) {
+    int index = -1;
+    int len;
+    if (groupId.equals(ClusterConfig.METADATA_GROUP_ID)) {
+      RaftService service = (RaftService) server.getMetadataHolder().getService();
+      List<PeerId> peerIdList = service.getPeerIdList();
+      len = peerIdList.size();
+      index = peerIdList.indexOf(peerId);
+    } else {
+      PhysicalNode[] physicalNodes = router.getNodesByGroupId(groupId);
+      len = physicalNodes.length;
+      PhysicalNode node = getPhysicalNodeFrom(peerId);
+      for (int i = 0; i < physicalNodes.length; i++) {
+        if (physicalNodes[i].equals(node)) {
+          index = i;
+          break;
+        }
+      }
+    }
+
+    if (index == -1) {
+      LOGGER.warn(
+          "Fail to update order of node {} for group {}, because the group doesn't contain it.",
+          peerId, groupId);
+    } else {
+      LOGGER.debug("Update order of node {} for group {}, current index is {}", peerId, groupId,
+          index);
+      nodeIndexMap.get().put(groupId, (index + 1) % len);
+    }
+  }
+
+  /**
    * Get peer id to send request. If groupLeaderCache has the group id, then return leader id of the
    * group.Otherwise, random get a peer of the group.
    *
    * @return leader id
    */
-  public static PeerId getLeaderPeerID(String groupId) {
-    if (!groupLeaderCache.containsKey(groupId)) {
-      PeerId randomPeerId = getRandomPeerID(groupId);
-      groupLeaderCache.put(groupId, randomPeerId);
+  public static PeerId getLocalLeaderPeerID(String groupId) {
+    PeerId leader;
+    if (groupLeaderCache.containsKey(groupId)) {
+      leader = groupLeaderCache.get(groupId);
+    } else {
+      leader = getRandomPeerID(groupId);
+      groupLeaderCache.put(groupId, leader);
+    }
+    LOGGER.debug("Get local cached leader {} of group {}.", leader, groupId);
+    return leader;
+  }
+
+  /**
+   * Get peer id to send request. If groupLeaderCache has the group id, then return leader id of the
+   * group.Otherwise, random get a peer of the group.
+   *
+   * @return leader id
+   */
+  public static PeerId getLeaderPeerIDFromRemoteNode(PeerId peerId, String groupId) {
+    QueryLeaderRequest request = new QueryLeaderRequest(groupId);
+    SingleQPTask task = new SingleQPTask(false, request);
+    task.setTargetNode(peerId);
+    LOGGER.debug("Execute get leader of group {} from node {}.", groupId, peerId);
+    try {
+      CLIENT_MANAGER.produceQPTask(task);
+
+      task.await();
+      PeerId leader = null;
+      if (task.getTaskState() == TaskState.FINISH) {
+        BasicResponse response = task.getResponse();
+        leader = response == null ? null : ((QueryLeaderResponse) response).getLeader();
+      }
+      LOGGER.debug("Get leader {} of group {} from node {}.", leader, groupId, peerId);
+      return leader;
+    } catch (RaftConnectionException | InterruptedException e) {
+      LOGGER.error("Fail to get leader of group {} from remote node {} because of {}.", groupId,
+          peerId, e.getMessage());
+      return null;
     }
-    return groupLeaderCache.get(groupId);
   }
 
   /**
@@ -100,18 +223,29 @@ public class RaftUtils {
     return getRandomPeerID(groupId, server, router);
   }
 
+  /**
+   * Get random peer id
+   */
   public static PeerId getRandomPeerID(String groupId, Server server, Router router) {
-    PeerId randomPeerId;
+    List<PeerId> peerIdList = getPeerIDList(groupId, server, router);
+    return peerIdList.get(getRandomInt(peerIdList.size()));
+  }
+
+  /**
+   * Get peer id list by groupid
+   */
+  public static List<PeerId> getPeerIDList(String groupId, Server server, Router router) {
+    List<PeerId> peerIdList = new ArrayList<>();
     if (groupId.equals(ClusterConfig.METADATA_GROUP_ID)) {
       RaftService service = (RaftService) server.getMetadataHolder().getService();
-      List<PeerId> peerIdList = service.getPeerIdList();
-      randomPeerId = peerIdList.get(getRandomInt(peerIdList.size()));
+      peerIdList.addAll(service.getPeerIdList());
     } else {
       PhysicalNode[] physicalNodes = router.getNodesByGroupId(groupId);
-      PhysicalNode node = physicalNodes[getRandomInt(physicalNodes.length)];
-      randomPeerId = getPeerIDFrom(node);
+      for (PhysicalNode node : physicalNodes) {
+        peerIdList.add(getPeerIDFrom(node));
+      }
     }
-    return randomPeerId;
+    return peerIdList;
   }
 
   /**
@@ -140,16 +274,6 @@ public class RaftUtils {
     return peerIds;
   }
 
-  @Deprecated
-  public static int getIndexOfIpFromRaftNodeList(String ip, PeerId[] peerIds) {
-    for (int i = 0; i < peerIds.length; i++) {
-      if (peerIds[i].getIp().equals(ip)) {
-        return i;
-      }
-    }
-    return -1;
-  }
-
   public static PhysicalNode[] getPhysicalNodeArrayFrom(PeerId[] peerIds) {
     PhysicalNode[] physicalNodes = new PhysicalNode[peerIds.length];
     for (int i = 0; i < peerIds.length; i++) {
@@ -188,7 +312,7 @@ public class RaftUtils {
 
   @OnlyForTest
   public static void clearRaftGroupLeader() {
-	  groupLeaderCache.clear();
+    groupLeaderCache.clear();
   }
 
   /**
@@ -206,7 +330,7 @@ public class RaftUtils {
       if (!status.isOk()) {
         response.setErrorMsg(status.getErrorMsg());
       }
-      qpTask.run(response);
+      qpTask.receive(response);
     });
     task.setDone(closure);
     try {
@@ -236,8 +360,10 @@ public class RaftUtils {
       }
       asyncContext.sendResponse(response);
     });
-    LOGGER.debug(
-        String.format("Processor batch size() : %d", request.getPhysicalPlanBytes().size()));
+    if (LOGGER.isDebugEnabled()) {
+      LOGGER.debug(
+          String.format("Processor batch size() : %d", request.getPhysicalPlanBytes().size()));
+    }
     task.setDone(closure);
     try {
       task.setData(ByteBuffer
@@ -305,11 +431,12 @@ public class RaftUtils {
                 status.setCode(-1);
                 status.setErrorMsg(status.getErrorMsg());
               }
-              nullReadTask.run(response);
+              nullReadTask.receive(response);
             }
           });
       nullReadTask.await();
     } catch (InterruptedException e) {
+      LOGGER.warn("Exception {} occurs while handling null read to metadata group.", e);
       status.setCode(-1);
       status.setErrorMsg(e.getMessage());
     }
@@ -323,12 +450,13 @@ public class RaftUtils {
     handleNullReadToDataGroup(status, server, nullReadTask, groupId);
   }
 
-  private static void handleNullReadToDataGroup(Status status, Server server,
+  private static void handleNullReadToDataGroup(Status resultStatus, Server server,
       SingleQPTask nullReadTask, String groupId) {
     try {
       LOGGER.debug("Handle null-read in data group for reading.");
       final byte[] reqContext = RaftUtils.createRaftRequestContext();
-      DataPartitionRaftHolder dataPartitionRaftHolder = (DataPartitionRaftHolder) server.getDataPartitionHolder(groupId);
+      DataPartitionRaftHolder dataPartitionRaftHolder = (DataPartitionRaftHolder) server
+          .getDataPartitionHolder(groupId);
       ((RaftService) dataPartitionRaftHolder.getService()).getNode()
           .readIndex(reqContext, new ReadIndexClosure() {
             @Override
@@ -336,37 +464,354 @@ public class RaftUtils {
               BasicResponse response = DataGroupNonQueryResponse
                   .createEmptyResponse(groupId);
               if (!status.isOk()) {
-                status.setCode(-1);
-                status.setErrorMsg(status.getErrorMsg());
+                resultStatus.setCode(-1);
+                resultStatus.setErrorMsg(status.getErrorMsg());
               }
-              nullReadTask.run(response);
+              nullReadTask.receive(response);
             }
           });
       nullReadTask.await();
     } catch (InterruptedException e) {
-      status.setCode(-1);
-      status.setErrorMsg(e.getMessage());
+      resultStatus.setCode(-1);
+      resultStatus.setErrorMsg(e.getMessage());
     }
   }
 
-  public static Status createErrorStatus(String errorMsg){
+  public static Status createErrorStatus(String errorMsg) {
     Status status = new Status();
     status.setErrorMsg(errorMsg);
     status.setCode(-1);
     return status;
   }
 
+  public static Map<String, PeerId> getGroupLeaderCache() {
+    return groupLeaderCache;
+  }
+
+  public static Map<Integer, String> getPhysicalRing() {
+    SortedMap<Integer, PhysicalNode> hashNodeMap = router.getPhysicalRing();
+    Map<Integer, String> res = new LinkedHashMap<>();
+    hashNodeMap.forEach((key, value) -> res.put(key, value.getIp()));
+    return res;
+  }
+
+  public static Map<Integer, String> getVirtualRing() {
+    SortedMap<Integer, VirtualNode> hashNodeMap = router.getVirtualRing();
+    Map<Integer, String> res = new LinkedHashMap<>();
+    hashNodeMap.forEach((key, value) -> res.put(key, value.getPhysicalNode().getIp()));
+    return res;
+  }
+
   /**
-   * try to get raft rpc client
+   * Get all node information of the data group of input storage group. The first node is the
+   * current leader
+   *
+   * @param sg storage group ID. If null, return metadata group info
    */
-  public static NodeAsClient getRaftNodeAsClient() throws RaftConnectionException {
-    NodeAsClient client = CLIENT_MANAGER.getRaftNodeAsClient();
-    if (client == null) {
-      throw new RaftConnectionException(String
-          .format("Raft inner rpc clients have reached the max numbers %s",
-              CLUSTER_CONFIG.getMaxNumOfInnerRpcClient() + CLUSTER_CONFIG
-                  .getMaxQueueNumOfInnerRpcClient()));
-    }
-    return client;
+  public static PeerId[] getDataPartitionOfSG(String sg) {
+    return getDataPartitionOfSG(sg, server, router);
+  }
+
+  public static PeerId[] getDataPartitionOfSG(String sg, Server server, Router router) {
+    String groupId;
+    PeerId[] nodes;
+    if (sg == null) {
+      groupId = ClusterConfig.METADATA_GROUP_ID;
+      List<PeerId> peerIdList = ((RaftService) server.getMetadataHolder().getService())
+          .getPeerIdList();
+      nodes = peerIdList.toArray(new PeerId[peerIdList.size()]);
+    } else {
+      PhysicalNode[] group = router.routeGroup(sg);
+      groupId = router.getGroupID(group);
+      nodes = getPeerIdArrayFrom(group);
+    }
+
+    PeerId leader = null;
+    for (PeerId node : nodes) {
+      LOGGER.debug("Try to get leader of group {} from node {}.", groupId, node);
+      leader = getLeaderPeerIDFromRemoteNode(node, groupId);
+      LOGGER.debug("Get leader {} of group {} from node {}.", leader, groupId, node);
+      if (leader != null) {
+        break;
+      }
+    }
+
+    if (leader == null) {
+      LOGGER
+          .debug("Fail to get leader of group {} from all remote nodes, get it locally.", groupId);
+      leader = RaftUtils.getLocalLeaderPeerID(groupId);
+      LOGGER.debug("Get leader {} of group {} locally.", leader, groupId);
+    }
+
+    for (int i = 0; i < nodes.length; i++) {
+      if (leader.equals(nodes[i])) {
+        PeerId t = nodes[i];
+        nodes[i] = nodes[0];
+        nodes[0] = t;
+        break;
+      }
+    }
+    return nodes;
+  }
+
+  /**
+   * Get data partitions that input node belongs to.
+   *
+   * @param ip node ip
+   * @return key: node ips of one data partition, value: storage group paths that belong to this
+   * data partition
+   */
+  public static Map<String[], String[]> getDataPartitionOfNode(String ip) {
+    return getDataPartitionOfNode(ip, config.getPort());
+  }
+
+  public static Map<String[], String[]> getDataPartitionOfNode(String ip, int port) {
+    return getDataPartitionOfNode(ip, port, server, router);
+  }
+
+  public static Map<String[], String[]> getDataPartitionOfNode(String ip, int port, Server server,
+      Router router) {
+    PhysicalNode[][] groups = router.getGroupsNodes(ip, port);
+    if (groups == null) {
+      return null;
+    }
+
+    Map<String, List<String>> groupSGMap = new LinkedHashMap<>();
+    for (int i = 0; i < groups.length; i++) {
+      groupSGMap.put(generateStringKey(groups[i]), new ArrayList<>());
+    }
+    Set<String> allSGList = ((MetadataStateManchine) ((RaftService) server.getMetadataHolder()
+        .getService()).getFsm()).getAllStorageGroups();
+    for (String sg : allSGList) {
+      String key = generateStringKey(router.routeGroup(sg));
+      if (groupSGMap.containsKey(key)) {
+        groupSGMap.get(key).add(sg);
+      }
+    }
+
+    String[][] groupIps = new String[groups.length][];
+    for (int i = 0; i < groups.length; i++) {
+      groupIps[i] = new String[groups[i].length];
+      for (int j = 0; j < groups[i].length; j++) {
+        groupIps[i][j] = groups[i][j].getIp();
+      }
+    }
+
+    Map<String[], String[]> res = new HashMap<>();
+    int index = 0;
+    for (Entry<String, List<String>> entry : groupSGMap.entrySet()) {
+      res.put(groupIps[index], entry.getValue().toArray(new String[entry.getValue().size()]));
+      index++;
+    }
+    return res;
+  }
+
+  private static String generateStringKey(PhysicalNode[] nodes) {
+    if (nodes == null || nodes.length == 0) {
+      return "";
+    }
+    Arrays.sort(nodes, Comparator.comparing(PhysicalNode::toString));
+    StringBuilder builder = new StringBuilder();
+    builder.append(nodes[0]);
+    for (int i = 1; i < nodes.length; i++) {
+      builder.append('#').append(nodes[i]);
+    }
+    return builder.toString();
+  }
+
+  /**
+   * Get replica lag for metadata group and each data partition
+   *
+   * @return key: groupId, value: ip -> replica lag
+   */
+  public static Map<String, Map<String, Long>> getReplicaLagMap() {
+    return getReplicaMetricMap("log-lags");
+  }
+
+  public static Map<String, Map<String, Long>> getReplicaMetricMap(String metric) {
+    Map<String, Map<String, Long>> metricMap = new HashMap<>();
+    RaftService raftService = (RaftService) server.getMetadataHolder().getService();
+    metricMap.put(raftService.getGroupId(), getReplicaMetricFromRaftService(raftService, metric));
+
+    router.getAllGroupId()
+        .forEach(groupId -> metricMap.put(groupId, getReplicaMetric(groupId, metric)));
+    return metricMap;
+  }
+
+  public static Map<String, Long> getReplicaMetric(String groupId, String metric) {
+    if (server.getDataPartitionHolderMap().containsKey(groupId)) {
+      RaftService service = (RaftService) server.getDataPartitionHolder(groupId).getService();
+      return getReplicaMetricFromRaftService(service, metric);
+    } else {
+      LOGGER.debug("Current host does not contain group {}, all groups are {}.", groupId,
+          server.getDataPartitionHolderMap().keySet());
+      return getReplicaMetricFromRemoteNode(groupId, metric);
+    }
+  }
+
+  private static Map<String, Long> getReplicaMetricFromRaftService(RaftService service,
+      String metric) {
+    String groupId = service.getGroupId();
+    LOGGER.debug("Get replica metric {} for group {}.", metric, service.getGroupId());
+    NodeImpl node = (NodeImpl) service.getNode();
+    Map<String, Long> lagMap;
+    if (node.isLeader()) {
+      LOGGER.debug("Get metric locally.");
+      List<PeerId> nodes = service.getPeerIdList();
+      Map<String, Gauge> metrics = service.getNode().getNodeMetrics().getMetricRegistry()
+          .getGauges();
+
+      lagMap = new HashMap<>();
+      String keyFormat = "replicator-%s/%s.%s";
+      for (int i = 0; i < nodes.size(); i++) {
+        // leader doesn't have lag metric
+        if (nodes.get(i).equals(node.getServerId())) {
+          lagMap.put(nodes.get(i).getIp() + " (leader)", 0L);
+          continue;
+        }
+
+        String key = String.format(keyFormat, groupId, nodes.get(i), metric);
+        long value = -1;
+        if (metrics.containsKey(key)) {
+          value = (long) metrics.get(key).getValue();
+        } else {
+          LOGGER.warn("Metric map {} should contain key {}, but not.", metrics, key);
+        }
+        lagMap.put(nodes.get(i).getIp(), value);
+      }
+    } else {
+      lagMap = getReplicaMetricFromRemoteNode(groupId, metric);
+    }
+    return lagMap;
+  }
+
+  private static Map<String, Long> getReplicaMetricFromRemoteNode(String groupId, String metric) {
+    QueryMetricRequest request = new QueryMetricRequest(groupId, metric);
+    SingleQPTask task = new SingleQPTask(false, request);
+
+    LOGGER.debug("Execute get metric for {} statement for group {}.", metric, groupId);
+    PeerId holder = RaftUtils.getLocalLeaderPeerID(groupId);
+    LOGGER.debug("Get metric from node {}.", holder);
+    task.setTargetNode(holder);
+    try {
+      CLIENT_MANAGER.produceQPTask(task);
+
+      task.await();
+      Map<String, Long> value = null;
+      if (task.getTaskState() == TaskState.FINISH) {
+        BasicResponse response = task.getResponse();
+        value = response == null ? null : ((QueryMetricResponse) response).getValue();
+      }
+      return value;
+    } catch (RaftConnectionException | InterruptedException e) {
+      LOGGER.error("Fail to get replica metric from remote node because of {}.", e);
+      return null;
+    }
+  }
+
+  /**
+   * Get query job number running on each data partition for all nodes
+   *
+   * @return outer key: ip, inner key: groupId, value: number of query jobs
+   */
+  public static Map<String, Map<String, Integer>> getQueryJobNumMapForCluster() {
+    PeerId[] peerIds = RaftUtils.convertStringArrayToPeerIdArray(config.getNodes());
+    Map<String, Map<String, Integer>> res = new HashMap<>();
+    for (int i = 0; i < peerIds.length; i++) {
+      PeerId peerId = peerIds[i];
+      res.put(peerId.getIp(), getQueryJobNumMapFromRemoteNode(peerId));
+    }
+
+    return res;
+  }
+
+  public static Map<String, Integer> getLocalQueryJobNumMap() {
+    return ClusterRpcQueryManager.getInstance().getAllReadUsage();
+  }
+
+  private static Map<String, Integer> getQueryJobNumMapFromRemoteNode(PeerId peerId) {
+    QueryJobNumRequest request = new QueryJobNumRequest("");
+    SingleQPTask task = new SingleQPTask(false, request);
+    task.setTargetNode(peerId);
+    LOGGER.debug("Execute get query job num map for node {}.", peerId);
+    try {
+      CLIENT_MANAGER.produceQPTask(task);
+
+      task.await();
+      Map<String, Integer> value = null;
+      if (task.getTaskState() == TaskState.FINISH) {
+        BasicResponse response = task.getResponse();
+        value = response == null ? null : ((QueryJobNumResponse) response).getValue();
+      }
+      return value;
+    } catch (RaftConnectionException | InterruptedException e) {
+      LOGGER.error("Fail to get query job num map from remote node {} because of {}.", peerId, e);
+      return null;
+    }
+  }
+
+  /**
+   * Get status of each node in cluster
+   *
+   * @return key: node ip, value: live or not
+   */
+  public static Map<String, Boolean> getStatusMapForCluster() {
+    PeerId[] peerIds = RaftUtils.convertStringArrayToPeerIdArray(config.getNodes());
+    SortedMap<String, Boolean> treeMap = new TreeMap<>(new Comparator<String>() {
+      @Override
+      public int compare(String o1, String o2) {
+        int[] nums1 = convertIPToNums(o1);
+        int[] nums2 = convertIPToNums(o2);
+        for (int i = 0; i < Math.min(nums1.length, nums2.length); i++) {
+          if (nums1[i] != nums2[i]) {
+            return Integer.compare(nums1[i], nums2[i]);
+          }
+        }
+        return 0;
+      }
+
+      private int[] convertIPToNums(String ip) {
+        String[] ss = ip.split("\\.");
+        int[] nums = new int[ss.length];
+        for (int i = 0; i < nums.length; i++) {
+          nums[i] = Integer.parseInt(ss[i]);
+        }
+        return nums;
+      }
+    });
+    for (int i = 0; i < peerIds.length; i++) {
+      PeerId peerId = peerIds[i];
+      treeMap.put(peerId.getIp(), getStatusOfNode(peerId));
+    }
+
+    Map<String, Boolean> res = new LinkedHashMap<>();
+    treeMap.forEach((ip, status) -> res.put(ip, status));
+    return res;
+  }
+
+  private static boolean getStatusOfNode(PeerId peerId) {
+    QueryStatusRequest request = new QueryStatusRequest("");
+    SingleQPTask task = new SingleQPTask(false, request);
+    task.setTargetNode(peerId);
+    LOGGER.debug("Execute get status for node {}.", peerId);
+    try {
+      CLIENT_MANAGER.produceQPTask(task);
+
+      task.await();
+      boolean status = false;
+      if (task.getTaskState() == TaskState.FINISH) {
+        BasicResponse response = task.getResponse();
+        status = response == null ? null : ((QueryStatusResponse) response).getStatus();
+      }
+      return status;
+    } catch (RaftConnectionException | InterruptedException e) {
+      LOGGER.error("Fail to get status from remote node {} because of {}.", peerId, e);
+      return false;
+    }
+  }
+
+  public static Set<String> getAllStorageGroupsLocally() {
+    MetadataRaftHolder metadataRaftHolder = (MetadataRaftHolder) server.getMetadataHolder();
+    return metadataRaftHolder.getFsm().getAllStorageGroups();
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/PhysicalNode.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/PhysicalNode.java
index 66544a8..b8b6854 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/PhysicalNode.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/PhysicalNode.java
@@ -23,8 +23,14 @@ import com.alipay.sofa.jraft.util.OnlyForTest;
 public class PhysicalNode {
 
   private String ip;
+
   private int port;
 
+  /**
+   * Group id of data group which first node is this PhysicalNode.
+   */
+  private String groupId;
+
   public PhysicalNode(String ip, int port) {
     this.ip = ip;
     this.port = port;
@@ -77,6 +83,14 @@ public class PhysicalNode {
     return port;
   }
 
+  public String getGroupId() {
+    return groupId;
+  }
+
+  public void setGroupId(String groupId) {
+    this.groupId = groupId;
+  }
+
   @OnlyForTest
   public void setIp(String ip) {
     this.ip = ip;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/Router.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/Router.java
index 544c0fc..0460e05 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/Router.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/Router.java
@@ -51,11 +51,6 @@ public class Router {
   private Map<PhysicalNode, PhysicalNode[][]> dataPartitionCache = new HashMap<>();
 
   /**
-   * Key is the first node of the group, value is group id.
-   */
-  private Map<PhysicalNode, String> nodeMapGroupIdCache = new HashMap<>();
-
-  /**
    * Key is group id, value is the first node of the group.
    */
   private Map<String, PhysicalNode> groupIdMapNodeCache = new HashMap<>();
@@ -66,7 +61,9 @@ public class Router {
   public static final String DATA_GROUP_STR = "data-group-";
 
   private HashFunction hashFunction = new MD5Hash();
+
   private final SortedMap<Integer, PhysicalNode> physicalRing = new TreeMap<>();
+
   private final SortedMap<Integer, VirtualNode> virtualRing = new TreeMap<>();
 
   private static class RouterHolder {
@@ -86,8 +83,11 @@ public class Router {
    * Change this method to public for test, you should not invoke this method explicitly.
    */
   public void init() {
+    init(ClusterDescriptor.getInstance().getConfig());
+  }
+
+  public void init(ClusterConfig config) {
     reset();
-    ClusterConfig config = ClusterDescriptor.getInstance().getConfig();
     String[] hosts = config.getNodes();
     int replicator = config.getReplication();
     int numOfVirtualNodes = config.getNumOfVirtualNodes();
@@ -99,17 +99,9 @@ public class Router {
       if (len < replicator) {
         throw new ErrorConfigureExecption(String.format("Replicator number %d is greater "
             + "than cluster number %d", replicator, len));
-      } else if (len == replicator) {
-        PhysicalNode[][] val = new PhysicalNode[1][len];
-        nodeMapGroupIdCache.put(first, DATA_GROUP_STR + "0");
-        groupIdMapNodeCache.put(DATA_GROUP_STR + "0", first);
-        for (int j = 0; j < len; j++) {
-          val[0][j] = nodes[(i + j) % len];
-        }
-        dataPartitionCache.put(first, val);
-      }  else {
+      } else {
         PhysicalNode[][] val = new PhysicalNode[replicator][replicator];
-        nodeMapGroupIdCache.put(first, DATA_GROUP_STR + i);
+        first.setGroupId(DATA_GROUP_STR + i);
         groupIdMapNodeCache.put(DATA_GROUP_STR + i, first);
         for (int j = 0; j < replicator; j++) {
           for (int k = 0; k < replicator; k++) {
@@ -121,7 +113,7 @@ public class Router {
     }
   }
 
-  private void createHashRing(String[] hosts, int numOfVirtualNodes){
+  private void createHashRing(String[] hosts, int numOfVirtualNodes) {
     for (String host : hosts) {
       String[] values = host.split(":");
       PhysicalNode node = new PhysicalNode(values[0].trim(), Integer.parseInt(values[1].trim()));
@@ -145,7 +137,7 @@ public class Router {
   }
 
   public String getGroupID(PhysicalNode[] nodes) {
-    return nodeMapGroupIdCache.get(nodes[0]);
+    return nodes[0].getGroupId();
   }
 
   public PhysicalNode[][] getGroupsNodes(String ip, int port) {
@@ -159,14 +151,14 @@ public class Router {
     physicalRing.put(hashFunction.hash(node.getKey()), node);
     for (int i = 0; i < virtualNum; i++) {
       VirtualNode vNode = new VirtualNode(i, node);
-      virtualRing.put(hashFunction.hash(vNode.getKey()), vNode);
+      virtualRing.put(hashFunction.hash(vNode.toString()), vNode);
     }
   }
 
   /**
    * For a storage group, compute the nearest physical node on the hash ring
    */
-  public PhysicalNode routeNode(String objectKey) {
+  PhysicalNode routeNode(String objectKey) {
     int hashVal = hashFunction.hash(objectKey);
     SortedMap<Integer, VirtualNode> tailMap = virtualRing.tailMap(hashVal);
     Integer nodeHashVal = !tailMap.isEmpty() ? tailMap.firstKey() : virtualRing.firstKey();
@@ -188,7 +180,6 @@ public class Router {
     virtualRing.clear();
     sgRouter.clear();
     dataPartitionCache.clear();
-    nodeMapGroupIdCache.clear();
     groupIdMapNodeCache.clear();
   }
 
@@ -202,15 +193,10 @@ public class Router {
   @OnlyForTest
   public void showVirtualRing() {
     for (Entry<Integer, VirtualNode> entry : virtualRing.entrySet()) {
-      LOGGER.info("{}-{}", entry.getKey(), entry.getValue().getKey());
+      LOGGER.info("{}-{}", entry.getKey(), entry.getValue());
     }
   }
 
-  public boolean containPhysicalNodeBySG(String storageGroup, PhysicalNode node) {
-    PhysicalNode[] nodes = routeGroup(storageGroup);
-    return Arrays.asList(nodes).contains(node);
-  }
-
   public boolean containPhysicalNodeByGroupId(String groupId, PhysicalNode node) {
     PhysicalNode[] nodes = getNodesByGroupId(groupId);
     return Arrays.asList(nodes).contains(node);
@@ -238,6 +224,14 @@ public class Router {
     return groupIdMapNodeCache.keySet();
   }
 
+  public SortedMap<Integer, PhysicalNode> getPhysicalRing() {
+    return physicalRing;
+  }
+
+  public SortedMap<Integer, VirtualNode> getVirtualRing() {
+    return virtualRing;
+  }
+
   /**
    * Get raft group id by storage group name
    */
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/VirtualNode.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/VirtualNode.java
index 88816cf..891f755 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/VirtualNode.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/VirtualNode.java
@@ -19,20 +19,25 @@
 package org.apache.iotdb.cluster.utils.hash;
 
 public class VirtualNode {
-  //the index of the virtual node in the physicalNode
-  private final int replicaIndex;
+
+  /**
+   * the index of the virtual node in the physicalNode
+   */
+  private final int index;
+
   private final PhysicalNode physicalNode;
 
-  VirtualNode(int replicaIndex, PhysicalNode physicalNode) {
-    this.replicaIndex = replicaIndex;
+  VirtualNode(int index, PhysicalNode physicalNode) {
+    this.index = index;
     this.physicalNode = physicalNode;
   }
 
-  PhysicalNode getPhysicalNode() {
+  public PhysicalNode getPhysicalNode() {
     return this.physicalNode;
   }
 
-  String getKey() {
-    return String.format("%s-%d", physicalNode.getKey(), replicaIndex);
+  @Override
+  public String toString() {
+    return String.format("%s-%d", physicalNode.getKey(), index);
   }
 }
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskManagerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskThreadManagerTest.java
similarity index 80%
rename from cluster/src/test/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskManagerTest.java
rename to cluster/src/test/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskThreadManagerTest.java
index 148d25d..01333a0 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskManagerTest.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskThreadManagerTest.java
@@ -28,9 +28,9 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
-public class QPTaskManagerTest {
+public class QPTaskThreadManagerTest {
 
-  private QPTaskManager qpTaskManager = QPTaskManager.getInstance();
+  private QPTaskThreadManager qpTaskThreadManager = QPTaskThreadManager.getInstance();
 
   private ClusterConfig clusterConfig = ClusterDescriptor.getInstance().getConfig();
 
@@ -64,22 +64,22 @@ public class QPTaskManagerTest {
   @Test
   public void testSubmitAndClose() throws InterruptedException {
 
-    assertEquals(clusterConfig.getConcurrentQPSubTaskThread(), qpTaskManager.getThreadPoolSize());
+    assertEquals(clusterConfig.getConcurrentQPSubTaskThread(), qpTaskThreadManager.getThreadPoolSize());
 
-    int threadPoolSize = qpTaskManager.getThreadPoolSize();
+    int threadPoolSize = qpTaskThreadManager.getThreadPoolSize();
     // test thread num
     for (int i = 1; i <= threadPoolSize + 2; i++) {
-      qpTaskManager.submit(testRunnable);
+      qpTaskThreadManager.submit(testRunnable);
       Thread.sleep(10);
-      assertEquals(Math.min(i, threadPoolSize), qpTaskManager.getActiveCnt());
+      assertEquals(Math.min(i, threadPoolSize), qpTaskThreadManager.getActiveCnt());
     }
 
     // test close
     try {
       new Thread(changeMark).start();
-      qpTaskManager.close(true, blockTimeOut);
+      qpTaskThreadManager.close(true, blockTimeOut);
     } catch (ProcessorException e) {
-      assertEquals("qp task manager thread pool doesn't exit after 10 ms", e.getMessage());
+      assertEquals("qp-task-thread-manager thread pool doesn't exit after 10 ms", e.getMessage());
     }
   }
 }
\ No newline at end of file
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/config/ClusterDescriptorTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/config/ClusterDescriptorTest.java
index a03ee99..4c3286e 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/config/ClusterDescriptorTest.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/config/ClusterDescriptorTest.java
@@ -54,8 +54,8 @@ public class ClusterDescriptorTest {
   private String testVNodesNew = "4";
   private String testClientNumNew = "400000";
   private String testQueueLenNew = "300000";
-  private String testMetadataConsistencyNew = "2";
-  private String testDataConsistencyNew = "4";
+  private String testMetadataConsistencyNew = String.valueOf(ClusterConsistencyLevel.STRONG.ordinal());
+  private String testDataConsistencyNew = String.valueOf(ClusterConsistencyLevel.STRONG.ordinal());
   private String testConcurrentQPTaskThreadNew = "6";
   private String testConcurrentRaftTaskThreadNew = "11";
 
@@ -96,7 +96,7 @@ public class ClusterDescriptorTest {
       put("qp_task_redo_count", testTaskRedoCountNew);
       put("qp_task_timeout_ms", testTaskTimeoutMSNew);
       put("num_of_virtual_nodes", testVNodesNew);
-      put("max_num_of_inner_rpc_client", testClientNumNew);
+      put("concurrent_inner_rpc_client_thread", testClientNumNew);
       put("max_queue_num_of_inner_rpc_client", testQueueLenNew);
       put("read_metadata_consistency_level", testMetadataConsistencyNew);
       put("read_data_consistency_level", testDataConsistencyNew);
@@ -143,8 +143,8 @@ public class ClusterDescriptorTest {
     assertEquals(testTaskRedoCountNew, config.getQpTaskRedoCount() + "");
     assertEquals(testTaskTimeoutMSNew, config.getQpTaskTimeout() + "");
     assertEquals(testVNodesNew, config.getNumOfVirtualNodes() + "");
-    assertEquals(testClientNumNew, config.getMaxNumOfInnerRpcClient() + "");
-    assertEquals(testQueueLenNew, config.getMaxQueueNumOfInnerRpcClient() + "");
+    assertEquals(testClientNumNew, config.getConcurrentInnerRpcClientThread() + "");
+    assertEquals(testQueueLenNew, config.getMaxQueueNumOfQPTask() + "");
     assertEquals(testMetadataConsistencyNew, config.getReadMetadataConsistencyLevel() + "");
     assertEquals(testDataConsistencyNew, config.getReadDataConsistencyLevel() + "");
     assertEquals(testConcurrentQPTaskThreadNew, config.getConcurrentQPSubTaskThread() + "");
@@ -198,8 +198,8 @@ public class ClusterDescriptorTest {
     testTaskRedoCountOld = config.getQpTaskRedoCount();
     testTaskTimeoutMSOld = config.getQpTaskTimeout();
     testVNodesOld = config.getNumOfVirtualNodes();
-    testClientNumOld = config.getMaxNumOfInnerRpcClient();
-    testQueueLenOld = config.getMaxQueueNumOfInnerRpcClient();
+    testClientNumOld = config.getConcurrentInnerRpcClientThread();
+    testQueueLenOld = config.getMaxQueueNumOfQPTask();
     testMetadataConsistencyOld = config.getReadMetadataConsistencyLevel();
     testDataConsistencyOld = config.getReadDataConsistencyLevel();
     testConcurrentQPTaskThreadOld = config.getConcurrentQPSubTaskThread();
@@ -221,8 +221,8 @@ public class ClusterDescriptorTest {
     config.setQpTaskRedoCount(testTaskRedoCountOld);
     config.setQpTaskTimeout(testTaskTimeoutMSOld);
     config.setNumOfVirtualNodes(testVNodesOld);
-    config.setMaxNumOfInnerRpcClient(testClientNumOld);
-    config.setMaxQueueNumOfInnerRpcClient(testQueueLenOld);
+    config.setConcurrentInnerRpcClientThread(testClientNumOld);
+    config.setMaxQueueNumOfQPTask(testQueueLenOld);
     config.setReadMetadataConsistencyLevel(testMetadataConsistencyOld);
     config.setReadDataConsistencyLevel(testDataConsistencyOld);
     config.setConcurrentQPSubTaskThread(testConcurrentQPTaskThreadOld);
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/integration/Constant.java b/cluster/src/test/java/org/apache/iotdb/cluster/integration/Constant.java
new file mode 100644
index 0000000..71cf523
--- /dev/null
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/integration/Constant.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.integration;
+
+import org.apache.iotdb.tsfile.write.record.TSRecord;
+import org.apache.iotdb.tsfile.write.record.datapoint.DataPoint;
+
+public class Constant {
+
+  public static final String d0s0 = "root.vehicle.d0.s0";
+  public static final String d0s1 = "root.vehicle.d0.s1";
+  public static final String d0s2 = "root.vehicle.d0.s2";
+  public static final String d0s3 = "root.vehicle.d0.s3";
+  public static final String d0s4 = "root.vehicle.d0.s4";
+  public static final String d0s5 = "root.vehicle.d0.s5";
+  public static final String d1s0 = "root.vehicle.d1.s0";
+  public static final String d1s1 = "root.vehicle.d1.s1";
+  public static final String TIMESTAMP_STR = "Time";
+  public static boolean testFlag = true;
+  public static String[] stringValue = new String[]{"A", "B", "C", "D", "E"};
+  public static String[] booleanValue = new String[]{"true", "false"};
+
+  public static String[] create_sql = new String[]{"SET STORAGE GROUP TO root.vehicle",
+
+      "CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=INT32, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d0.s1 WITH DATATYPE=INT64, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d0.s2 WITH DATATYPE=FLOAT, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d0.s3 WITH DATATYPE=TEXT, ENCODING=PLAIN",
+      "CREATE TIMESERIES root.vehicle.d0.s4 WITH DATATYPE=BOOLEAN, ENCODING=PLAIN",
+      "CREATE TIMESERIES root.vehicle.d0.s5 WITH DATATYPE=DOUBLE, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d1.s0 WITH DATATYPE=INT32, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d1.s1 WITH DATATYPE=INT64, ENCODING=RLE",
+
+  };
+
+  public static String insertTemplate = "insert into %s(timestamp%s) values(%d%s)";
+
+  public static String first(String path) {
+    return String.format("first(%s)", path);
+  }
+
+  public static String last(String path) {
+    return String.format("last(%s)", path);
+  }
+
+  public static String sum(String path) {
+    return String.format("sum(%s)", path);
+  }
+
+  public static String mean(String path) {
+    return String.format("mean(%s)", path);
+  }
+
+  public static String count(String path) {
+    return String.format("count(%s)", path);
+  }
+
+  public static String max_time(String path) {
+    return String.format("max_time(%s)", path);
+  }
+
+  public static String min_time(String path) {
+    return String.format("min_time(%s)", path);
+  }
+
+  public static String max_value(String path) {
+    return String.format("max_value(%s)", path);
+  }
+
+  public static String min_value(String path) {
+    return String.format("min_value(%s)", path);
+  }
+
+  public static String recordToInsert(TSRecord record) {
+    StringBuilder measurements = new StringBuilder();
+    StringBuilder values = new StringBuilder();
+    for (DataPoint dataPoint : record.dataPointList) {
+      measurements.append(",").append(dataPoint.getMeasurementId());
+      values.append(",").append(dataPoint.getValue());
+    }
+    return String
+        .format(insertTemplate, record.deviceId, measurements.toString(), record.time, values);
+  }
+}
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IOTDBGroupByIT.java b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IOTDBGroupByIT.java
new file mode 100644
index 0000000..0165bba
--- /dev/null
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IOTDBGroupByIT.java
@@ -0,0 +1,490 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.integration;
+
+import static org.apache.iotdb.cluster.integration.Constant.count;
+import static org.apache.iotdb.cluster.integration.Constant.first;
+import static org.apache.iotdb.cluster.integration.Constant.last;
+import static org.apache.iotdb.cluster.integration.Constant.max_time;
+import static org.apache.iotdb.cluster.integration.Constant.max_value;
+import static org.apache.iotdb.cluster.integration.Constant.mean;
+import static org.apache.iotdb.cluster.integration.Constant.min_time;
+import static org.apache.iotdb.cluster.integration.Constant.min_value;
+import static org.apache.iotdb.cluster.integration.Constant.sum;
+import static org.apache.iotdb.cluster.utils.Utils.insertData;
+import static org.junit.Assert.fail;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.cluster.entity.Server;
+import org.apache.iotdb.cluster.utils.EnvironmentUtils;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
+import org.apache.iotdb.cluster.utils.hash.PhysicalNode;
+import org.apache.iotdb.jdbc.Config;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class IOTDBGroupByIT {
+
+  private Server server;
+  private static final ClusterConfig CLUSTER_CONFIG = ClusterDescriptor.getInstance().getConfig();
+  private static final PhysicalNode localNode = new PhysicalNode(CLUSTER_CONFIG.getIp(),
+      CLUSTER_CONFIG.getPort());
+
+  private static String[] createSqls = new String[]{
+      "SET STORAGE GROUP TO root.ln.wf01.wt01",
+      "CREATE TIMESERIES root.ln.wf01.wt01.status WITH DATATYPE=BOOLEAN, ENCODING=PLAIN",
+      "CREATE TIMESERIES root.ln.wf01.wt01.temperature WITH DATATYPE=DOUBLE, ENCODING=PLAIN",
+      "CREATE TIMESERIES root.ln.wf01.wt01.hardware WITH DATATYPE=INT32, ENCODING=PLAIN"};
+  private static String[] insertSqls = new String[]{
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(1, 1.1, false, 11)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(2, 2.2, true, 22)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(3, 3.3, false, 33 )",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(4, 4.4, false, 44)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(5, 5.5, false, 55)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(100, 100.1, false, 110)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(150, 200.2, true, 220)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(200, 300.3, false, 330 )",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(250, 400.4, false, 440)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(300, 500.5, false, 550)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(10, 10.1, false, 110)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(20, 20.2, true, 220)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(30, 30.3, false, 330 )",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(40, 40.4, false, 440)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(50, 50.5, false, 550)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(500, 100.1, false, 110)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(510, 200.2, true, 220)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(520, 300.3, false, 330 )",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(530, 400.4, false, 440)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(540, 500.5, false, 550)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(580, 100.1, false, 110)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(590, 200.2, true, 220)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(600, 300.3, false, 330 )",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(610, 400.4, false, 440)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(620, 500.5, false, 550)",
+  };
+
+  private static final String TIMESTAMP_STR = "Time";
+
+  @Before
+  public void setUp() throws Exception {
+    EnvironmentUtils.closeStatMonitor();
+    EnvironmentUtils.closeMemControl();
+    CLUSTER_CONFIG.createAllPath();
+    server = Server.getInstance();
+    server.start();
+    EnvironmentUtils.envSetUp();
+    Class.forName(Config.JDBC_DRIVER_NAME);
+    insertSql();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    server.stop();
+    QPExecutorUtils.setLocalNodeAddr(localNode.getIp(), localNode.getPort());
+    EnvironmentUtils.cleanEnv();
+  }
+
+  @Test
+  public void countSumMeanTest() {
+    String[] retArray1 = new String[]{
+        "2,1,4.4,4.4",
+        "5,3,35.8,11.933333333333332",
+        "25,1,30.3,30.3",
+        "50,1,50.5,50.5",
+        "65,0,0.0,null",
+        "85,1,100.1,100.1",
+        "105,0,0.0,null",
+        "125,0,0.0,null",
+        "145,1,200.2,200.2",
+        "310,0,0.0,null"
+    };
+    String[] retArray2 = new String[]{
+        "2,2,7.7,3.85",
+        "5,3,35.8,11.933333333333332",
+        "25,1,30.3,30.3",
+        "50,1,50.5,50.5",
+        "65,0,0.0,null",
+        "85,1,100.1,100.1",
+        "105,0,0.0,null",
+        "125,0,0.0,null",
+        "145,1,200.2,200.2",
+        "310,0,0.0,null"
+    };
+    try (Connection connection = DriverManager.
+        getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root")) {
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute(
+          "select count(temperature), sum(temperature), mean(temperature) from "
+              + "root.ln.wf01.wt01 where time > 3 "
+              + "GROUP BY (20ms, 5,[2,30], [35,37], [50, 160], [310, 314])");
+
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet
+            .getString(count("root.ln.wf01.wt01.temperature")) + "," +
+            resultSet.getString(sum("root.ln.wf01.wt01.temperature")) + "," + resultSet
+            .getString(mean("root.ln.wf01.wt01.temperature"));
+        Assert.assertEquals(retArray1[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(retArray1.length, cnt);
+      statement.close();
+
+      statement = connection.createStatement();
+      hasResultSet = statement.execute(
+          "select count(temperature), sum(temperature), mean(temperature) from "
+              + "root.ln.wf01.wt01 where temperature > 3 "
+              + "GROUP BY (20ms, 5,[2,30], [35,37], [50, 160], [310, 314])");
+
+      Assert.assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet
+            .getString(count("root.ln.wf01.wt01.temperature")) + "," +
+            resultSet.getString(sum("root.ln.wf01.wt01.temperature")) + "," + resultSet
+            .getString(mean("root.ln.wf01.wt01.temperature"));
+        Assert.assertEquals(retArray2[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(retArray2.length, cnt);
+      statement.close();
+
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    }
+  }
+
+  @Test
+  public void maxMinValeTimeTest() {
+    String[] retArray1 = new String[]{
+        "2,4.4,4.4,4,4",
+        "5,20.2,5.5,20,5",
+        "25,30.3,30.3,30,30",
+        "50,50.5,50.5,50,50",
+        "65,null,null,null,null",
+        "85,100.1,100.1,100,100",
+        "105,null,null,null,null",
+        "125,null,null,null,null",
+        "145,200.2,200.2,150,150",
+        "310,null,null,null,null"
+    };
+    String[] retArray2 = new String[]{
+        "2,4.4,3.3,4,3",
+        "5,20.2,5.5,20,5",
+        "25,30.3,30.3,30,30",
+        "50,50.5,50.5,50,50",
+        "65,null,null,null,null",
+        "85,100.1,100.1,100,100",
+        "105,null,null,null,null",
+        "125,null,null,null,null",
+        "145,200.2,200.2,150,150",
+        "310,null,null,null,null"
+    };
+    try (Connection connection = DriverManager.
+        getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root")) {
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute(
+          "select max_value(temperature), min_value(temperature), max_time(temperature), "
+              + "min_time(temperature) from root.ln.wf01.wt01 where time > 3 "
+              + "GROUP BY (20ms, 5,[2,30], [35,37], [50, 160], [310, 314])");
+
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet
+            .getString(max_value("root.ln.wf01.wt01.temperature"))
+            + "," + resultSet.getString(min_value("root.ln.wf01.wt01.temperature")) + ","
+            + resultSet.getString(max_time("root.ln.wf01.wt01.temperature"))
+            + "," + resultSet.getString(min_time("root.ln.wf01.wt01.temperature"));
+        Assert.assertEquals(retArray1[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(retArray1.length, cnt);
+      statement.close();
+
+      statement = connection.createStatement();
+      hasResultSet = statement.execute(
+          "select max_value(temperature), min_value(temperature), max_time(temperature), "
+              + "min_time(temperature) from root.ln.wf01.wt01 where temperature > 3 "
+              + "GROUP BY (20ms, 5,[2,30], [35,37], [50, 160], [310, 314])");
+
+      Assert.assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet
+            .getString(max_value("root.ln.wf01.wt01.temperature"))
+            + "," + resultSet.getString(min_value("root.ln.wf01.wt01.temperature")) + ","
+            + resultSet.getString(max_time("root.ln.wf01.wt01.temperature"))
+            + "," + resultSet.getString(min_time("root.ln.wf01.wt01.temperature"));
+        Assert.assertEquals(retArray2[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(retArray2.length, cnt);
+      statement.close();
+
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    }
+  }
+
+  @Test
+  public void firstLastTest() {
+    String[] retArray1 = new String[]{
+        "2,4.4,4.4",
+        "5,20.2,5.5",
+        "25,30.3,30.3",
+        "50,50.5,50.5",
+        "65,null,null",
+        "85,100.1,100.1",
+        "105,null,null",
+        "125,null,null",
+        "145,200.2,200.2",
+        "310,null,null"
+    };
+    String[] retArray2 = new String[]{
+        "2,4.4,3.3",
+        "5,20.2,5.5",
+        "25,30.3,30.3",
+        "50,50.5,50.5",
+        "65,null,null",
+        "85,100.1,100.1",
+        "105,null,null",
+        "125,null,null",
+        "145,200.2,200.2",
+        "310,null,null"
+    };
+    try (Connection connection = DriverManager.
+        getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root")) {
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute(
+          "select last(temperature), first(temperature) from root.ln.wf01.wt01 where time > 3 "
+              + "GROUP BY (20ms, 5,[2,30], [35,37], [50, 160], [310, 314])");
+
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet
+            .getString(last("root.ln.wf01.wt01.temperature"))
+            + "," + resultSet.getString(first("root.ln.wf01.wt01.temperature"));
+        Assert.assertEquals(retArray1[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(retArray1.length, cnt);
+      statement.close();
+
+      statement = connection.createStatement();
+      hasResultSet = statement.execute(
+          "select first(temperature), last(temperature) from root.ln.wf01.wt01 "
+              + "where temperature > 3 "
+              + "GROUP BY (20ms, 5,[2,30], [35,37], [50, 160], [310, 314])");
+
+      Assert.assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet
+            .getString(last("root.ln.wf01.wt01.temperature"))
+            + "," + resultSet.getString(first("root.ln.wf01.wt01.temperature"));
+        Assert.assertEquals(retArray2[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(retArray2.length, cnt);
+      statement.close();
+
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    }
+  }
+
+  @Test
+  public void largeIntervalTest() {
+    String[] retArray1 = new String[]{
+        "2,4.4,4,20,4",
+        "30,30.3,16,610,30",
+        "620,500.5,1,620,620"
+    };
+    String[] retArray2 = new String[]{
+        "2,3.3,5,20,3",
+        "30,30.3,16,610,30",
+        "620,500.5,1,620,620"
+    };
+    try (Connection connection = DriverManager.
+        getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root")) {
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute(
+          "select min_value(temperature), count(temperature), max_time(temperature), "
+              + "min_time(temperature) from root.ln.wf01.wt01 where time > 3 GROUP BY "
+              + "(590ms, 30, [2, 30], [30, 120], [100, 120], [123, 125], [155, 550], [540, 680])");
+
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet
+            .getString(min_value("root.ln.wf01.wt01.temperature"))
+            + "," + resultSet.getString(count("root.ln.wf01.wt01.temperature")) + "," +
+            resultSet.getString(max_time("root.ln.wf01.wt01.temperature"))
+            + "," + resultSet.getString(min_time("root.ln.wf01.wt01.temperature"));
+        Assert.assertEquals(retArray1[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(retArray1.length, cnt);
+      statement.close();
+
+      statement = connection.createStatement();
+      hasResultSet = statement.execute(
+          "select min_value(temperature), count (temperature), max_time(temperature), "
+              + "min_time(temperature) from root.ln.wf01.wt01 where temperature > 3 GROUP BY "
+              + "(590ms, 30, [2, 30], [30, 120], [100, 120], [123, 125], [155, 550],[540, 680])");
+
+      Assert.assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet
+            .getString(min_value("root.ln.wf01.wt01.temperature"))
+            + "," + resultSet.getString(count("root.ln.wf01.wt01.temperature")) + ","
+            + resultSet.getString(max_time("root.ln.wf01.wt01.temperature"))
+            + "," + resultSet.getString(min_time("root.ln.wf01.wt01.temperature"));
+        Assert.assertEquals(retArray2[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(retArray2.length, cnt);
+      statement.close();
+
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    }
+  }
+
+  @Test
+  public void smallPartitionTest() {
+    String[] retArray1 = new String[]{
+        "50,100.1,50.5,150.6",
+        "615,500.5,500.5,500.5"
+
+    };
+    String[] retArray2 = new String[]{
+        "50,100.1,50.5,150.6",
+        "585,null,null,0.0",
+        "590,500.5,200.2,700.7"
+    };
+    try (Connection connection = DriverManager.
+        getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root")) {
... 19050 lines suppressed ...


[incubator-iotdb] 01/02: Cluster read (#152)

Posted by lt...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

lta pushed a commit to branch cluster
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git

commit 729935036cefd8d8e55ea2ba498c3d442ace40e7
Author: lta <li...@163.com>
AuthorDate: Tue May 7 19:28:11 2019 +0800

    Cluster read (#152)
    
    * reorganize package
    
    * initial query module
    
    * remove threadlocal in TSServiceClusterImpl
    
    * add Query Manager
    
    * update
    
    * update
    
    * add query manager
    
    * add single query manager
    
    * add ClusterRpcReaderUtils
    
    * add query series data processor
    
    * transfer physical plans implement Serializable
    
    * manage package
    
    * add reader processor
    
    * add task id
    
    * add cached batch data
    
    * remove rpc batch reader
    
    * complete code of no filter reader
    
    * reorganize engine executor with time generator
    
    * reorganize engine executor with time generator
    
    * complete query with no value filter
    
    * add construct filter query time generator
    
    * add comments
    
    * add comments
    
    * add comments
    
    * fix bug of qp executor test and add prune expression tree
    
    * add query node process for query with value
    
    * complete clsuer query with value filter
    
    * remove task id
    
    * remove task id
    
    * fix issue of remote query finish
    
    * update
    
    * fix bug of unit test
    
    * update remote unit test
    
    * fix a serve bug
    
    * add rpc manager ut and fix some bugs
    
    * add ut of query utils
    
    * add ut
    
    * fix a bug of pertition query plans
    
    * fix some serve bugs and change read resource lock to count
    
    * add ut of query remote data
    
    * fix some serve bugs and finish ut
    
    * fix some serve bugs and finish ut
    
    * fix some serve bugs
    
    * add some request and response , fix bug of cluster pom
    
    * fix a bug
    
    * skip test
    
    * remove disconnect function
    
    * fix a bug
    
    * fix some bugs and add statistic
    
    * add read resource usage
    
    * reimple raft node as client manager
    
    * update jraft version
    
    * clear property set in test
    
    * add init router when tear down in router test
    
    * add large data ut and add query timeout mechanism
    
    add reset timer
    
    * add abstract timer class
    
    * reimpl raft node as manager
    
    * update trvis.yml
    
    * fix some issues according to pr comments
---
 cluster/pom.xml                                    |  12 +-
 .../iotdb/cluster/concurrent/ThreadName.java       |  10 +-
 .../cluster/concurrent/pool/QueryTimerManager.java |  74 +++
 .../cluster/concurrent/pool/ThreadPoolManager.java |   2 +-
 .../apache/iotdb/cluster/config/ClusterConfig.java |  31 +-
 .../iotdb/cluster/config/ClusterConstant.java      |  13 +-
 .../iotdb/cluster/config/ClusterDescriptor.java    |  30 +-
 .../org/apache/iotdb/cluster/entity/Server.java    |  48 +-
 .../cluster/entity/raft/DataStateMachine.java      |   4 +-
 .../cluster/entity/raft/MetadataStateManchine.java |   4 +-
 .../apache/iotdb/cluster/qp/ClusterQPExecutor.java | 260 ----------
 .../cluster/qp/executor/AbstractQPExecutor.java    | 178 +++++++
 .../qp/executor/ClusterQueryProcessExecutor.java   | 177 +++++++
 .../cluster/qp/executor/NonQueryExecutor.java      |  55 ++-
 .../cluster/qp/executor/QueryMetadataExecutor.java |  73 ++-
 .../cluster/qp/{callback => task}/BatchQPTask.java |  17 +-
 .../cluster/qp/{callback => task}/MultiQPTask.java |   8 +-
 .../cluster/qp/{callback => task}/QPTask.java      |   2 +-
 .../apache/iotdb/cluster/qp/task/QueryTask.java    |  39 +-
 .../qp/{callback => task}/SingleQPTask.java        |   6 +-
 .../org/apache/iotdb/cluster/query/PathType.java   |  19 +-
 .../org/apache/iotdb/cluster/query/QueryType.java  |  23 +-
 .../dataset/ClusterDataSetWithTimeGenerator.java   | 100 ++--
 .../executor/ClusterExecutorWithTimeGenerator.java | 130 +++++
 .../ClusterExecutorWithoutTimeGenerator.java       | 102 ++++
 .../cluster/query/executor/ClusterQueryRouter.java | 121 +++++
 .../cluster/query/expression/TrueExpression.java   |  31 +-
 .../query/factory/ClusterSeriesReaderFactory.java  |  94 ++++
 .../coordinatornode/ClusterRpcQueryManager.java    | 111 +++++
 .../ClusterRpcSingleQueryManager.java              | 415 ++++++++++++++++
 .../manager/coordinatornode/FilterGroupEntity.java | 111 +++++
 .../coordinatornode/IClusterRpcQueryManager.java   |  69 +++
 .../IClusterRpcSingleQueryManager.java             |  98 ++++
 .../querynode/ClusterLocalQueryManager.java        | 125 +++++
 .../querynode/ClusterLocalSingleQueryManager.java  | 335 +++++++++++++
 .../querynode/IClusterLocalQueryManager.java       |  82 +++
 .../querynode/IClusterLocalSingleQueryManager.java |  74 +++
 .../AbstractClusterPointReader.java                |  73 +++
 .../coordinatornode/ClusterFilterSeriesReader.java | 126 +++++
 .../coordinatornode/ClusterSelectSeriesReader.java | 167 +++++++
 .../querynode/AbstractClusterBatchReader.java}     |  23 +-
 .../querynode/ClusterBatchReaderByTimestamp.java   |  86 ++++
 .../ClusterBatchReaderWithoutTimeGenerator.java    |  94 ++++
 .../querynode/ClusterFilterSeriesBatchReader.java  | 121 +++++
 .../IClusterFilterSeriesBatchReader.java}          |  19 +-
 .../timegenerator/ClusterLeafNode.java}            |  35 +-
 .../timegenerator/ClusterNodeConstructor.java      | 106 ++++
 .../query/timegenerator/ClusterTimeGenerator.java  |  28 +-
 .../cluster/query/utils/ClusterRpcReaderUtils.java | 128 +++++
 .../iotdb/cluster/query/utils/ExpressionUtils.java | 131 +++++
 .../query/utils/QueryPlanPartitionUtils.java       | 127 +++++
 .../iotdb/cluster/rpc/raft/NodeAsClient.java       |  16 +-
 .../rpc/raft/impl/RaftNodeAsClientManager.java     |  90 ++--
 .../DataGroupNonQueryAsyncProcessor.java           |   7 +-
 .../MetaGroupNonQueryAsyncProcessor.java           |   7 +-
 .../querydata/CloseSeriesReaderSyncProcessor.java  |  43 ++
 .../querydata/InitSeriesReaderSyncProcessor.java   |  64 +++
 .../QuerySeriesDataByTimestampSyncProcessor.java   |  36 +-
 .../querydata/QuerySeriesDataSyncProcessor.java    |  38 +-
 .../QueryMetadataAsyncProcessor.java               |   7 +-
 .../QueryMetadataInStringAsyncProcessor.java       |   7 +-
 .../QueryPathsAsyncProcessor.java                  |   7 +-
 .../QuerySeriesTypeAsyncProcessor.java             |   7 +-
 .../QueryTimeSeriesAsyncProcessor.java             |   7 +-
 ...BasicRequest.java => BasicNonQueryRequest.java} |  27 +-
 .../rpc/raft/request/BasicQueryRequest.java        |   5 +
 .../cluster/rpc/raft/request/BasicRequest.java     |  21 -
 .../{ => nonquery}/DataGroupNonQueryRequest.java   |   7 +-
 .../{ => nonquery}/MetaGroupNonQueryRequest.java   |   8 +-
 .../CloseSeriesReaderRequest.java}                 |  27 +-
 .../request/querydata/InitSeriesReaderRequest.java |  91 ++++
 .../QuerySeriesDataByTimestampRequest.java         |  91 ++++
 .../request/querydata/QuerySeriesDataRequest.java  |  98 ++++
 .../QueryMetadataInStringRequest.java              |   8 +-
 .../{ => querymetadata}/QueryMetadataRequest.java  |   8 +-
 .../{ => querymetadata}/QueryPathsRequest.java     |   7 +-
 .../QuerySeriesTypeRequest.java                    |   7 +-
 .../QueryStorageGroupRequest.java                  |   8 +-
 .../QueryTimeSeriesRequest.java                    |   7 +-
 ...ryResponse.java => BasicQueryDataResponse.java} |  24 +-
 .../{ => nonquery}/DataGroupNonQueryResponse.java  |   6 +-
 .../{ => nonquery}/MetaGroupNonQueryResponse.java  |   6 +-
 .../querydata/InitSeriesReaderResponse.java        |  59 +++
 .../QuerySeriesDataByTimestampResponse.java        |  18 +-
 .../querydata/QuerySeriesDataResponse.java}        |  26 +-
 .../QueryMetadataInStringResponse.java             |   5 +-
 .../{ => querymetadata}/QueryMetadataResponse.java |   4 +-
 .../{ => querymetadata}/QueryPathsResponse.java    |   4 +-
 .../QuerySeriesTypeResponse.java                   |   4 +-
 .../QueryStorageGroupResponse.java                 |   4 +-
 .../QueryTimeSeriesResponse.java                   |   4 +-
 .../{rpc => }/service/TSServiceClusterImpl.java    | 147 ++++--
 .../iotdb/cluster/utils/QPExecutorUtils.java       | 156 ++++++
 .../org/apache/iotdb/cluster/utils/RaftUtils.java  |  85 +++-
 .../iotdb/cluster/utils/hash/PhysicalNode.java     |  16 +-
 .../apache/iotdb/cluster/utils/hash/Router.java    |   8 +
 .../cluster/config/ClusterDescriptorTest.java      |   9 +-
 .../integration/IoTDBMetadataFetchRemoteIT.java    |  13 +-
 ...ecutorTest.java => AbstractQPExecutorTest.java} |  40 +-
 .../cluster/query/ClusterQueryLargeDataTest.java   | 507 +++++++++++++++++++
 .../iotdb/cluster/query/ClusterQueryTest.java      | 550 +++++++++++++++++++++
 .../query/manager/ClusterLocalManagerTest.java     | 406 +++++++++++++++
 .../query/manager/ClusterRpcManagerTest.java       | 334 +++++++++++++
 .../cluster/query/utils/ExpressionUtilsTest.java   | 230 +++++++++
 .../query/utils/QueryPlanPartitionUtilsTest.java   | 332 +++++++++++++
 .../apache/iotdb/cluster/utils/RaftUtilsTest.java  |   6 +-
 .../java/org/apache/iotdb/cluster/utils/Utils.java |  27 +
 .../iotdb/cluster/utils/hash/RouterTest.java       |   3 +
 iotdb/iotdb/conf/iotdb-cluster.properties          |  13 +-
 iotdb/iotdb/conf/logback.xml                       |   2 -
 .../iotdb/db/engine/filenode/FileNodeManager.java  |   8 +-
 .../db/engine/filenode/FileNodeProcessor.java      | 136 ++---
 .../db/exception/FileNodeManagerException.java     |   4 +
 .../org/apache/iotdb/db/qp/QueryProcessor.java     |  11 +-
 .../db/qp/executor/IQueryProcessExecutor.java      | 143 ++++++
 .../iotdb/db/qp/executor/OverflowQPExecutor.java   |   3 +-
 .../iotdb/db/qp/executor/QueryProcessExecutor.java |  98 +---
 .../db/qp/logical/crud/BasicFunctionOperator.java  |   3 +-
 .../iotdb/db/qp/logical/crud/FilterOperator.java   |   5 +-
 .../apache/iotdb/db/qp/physical/PhysicalPlan.java  |   4 +-
 .../iotdb/db/qp/physical/crud/AggregationPlan.java |   1 +
 .../iotdb/db/qp/physical/crud/DeletePlan.java      |   1 +
 .../iotdb/db/qp/physical/crud/FillQueryPlan.java   |   1 +
 .../iotdb/db/qp/physical/crud/GroupByPlan.java     |   1 +
 .../iotdb/db/qp/physical/crud/InsertPlan.java      |   1 +
 .../iotdb/db/qp/physical/crud/QueryPlan.java       |   5 +-
 .../iotdb/db/qp/physical/crud/UpdatePlan.java      |   1 +
 .../iotdb/db/qp/physical/sys/AuthorPlan.java       |   1 +
 .../iotdb/db/qp/physical/sys/LoadDataPlan.java     |   1 +
 .../iotdb/db/qp/physical/sys/MetadataPlan.java     |   1 +
 .../iotdb/db/qp/physical/sys/PropertyPlan.java     |   1 +
 .../{writelog => qp/physical}/transfer/Codec.java  |   3 +-
 .../physical}/transfer/CodecInstances.java         |   2 +-
 .../physical}/transfer/PhysicalPlanCodec.java      |  14 +-
 .../transfer/PhysicalPlanLogTransfer.java          |   2 +-
 .../physical}/transfer/SystemLogOperator.java      |   2 +-
 .../iotdb/db/qp/strategy/PhysicalGenerator.java    |   6 +-
 .../qp/strategy/optimizer/ConcatPathOptimizer.java |   5 +-
 .../db/query/control/QueryResourceManager.java     |  27 +-
 .../dataset/EngineDataSetWithTimeGenerator.java    |  35 +-
 .../dataset/EngineDataSetWithoutTimeGenerator.java |   4 +
 .../EngineExecutorWithoutTimeGenerator.java        | 103 +---
 .../iotdb/db/query/executor/EngineQueryRouter.java |  34 +-
 .../executor/ExecutorWithoutTimeGenerator.java     |  80 +++
 .../db/query/executor/IEngineQueryRouter.java      |  78 +++
 ...nstructor.java => AbstractNodeConstructor.java} |  57 +--
 .../query/timegenerator/EngineNodeConstructor.java |  62 +--
 .../query/timegenerator/EngineTimeGenerator.java   |   1 -
 .../org/apache/iotdb/db/service/TSServiceImpl.java |  65 +--
 .../apache/iotdb/db/writelog/io/RAFLogReader.java  |   2 +-
 .../db/writelog/node/ExclusiveWriteLogNode.java    |   2 +-
 .../iotdb/db/integration/IoTDBSeriesReaderIT.java  |   2 +-
 .../transfer/PhysicalPlanLogTransferTest.java      |   2 +-
 .../org/apache/iotdb/db/qp/plan/QPUpdateTest.java  |   2 +-
 .../apache/iotdb/db/qp/utils/MemIntQpExecutor.java |   3 +-
 .../EngineDataSetWithTimeGeneratorTest.java        |   2 +-
 .../org/apache/iotdb/db/tools/WalCheckerTest.java  |   2 +-
 .../apache/iotdb/db/writelog/PerformanceTest.java  |   3 +-
 .../apache/iotdb/db/writelog/WriteLogNodeTest.java |   2 +-
 .../iotdb/db/writelog/io/LogWriterReaderTest.java  |   3 +-
 .../apache/iotdb/tsfile/read/common/BatchData.java |   4 +-
 .../org/apache/iotdb/tsfile/read/common/Field.java |  22 +
 .../org/apache/iotdb/tsfile/read/common/Path.java  |   4 +-
 .../tsfile/read/expression/ExpressionType.java     |   2 +-
 .../tsfile/read/expression/IBinaryExpression.java  |   6 +-
 .../iotdb/tsfile/read/expression/IExpression.java  |   4 +-
 .../read/expression/impl/BinaryExpression.java     |  38 +-
 .../read/expression/impl/GlobalTimeExpression.java |  10 +-
 .../expression/impl/SingleSeriesExpression.java    |  10 +-
 .../tsfile/read/filter/basic/BinaryFilter.java     |   3 +
 .../iotdb/tsfile/read/filter/basic/Filter.java     |   2 +
 .../tsfile/read/filter/basic/UnaryFilter.java      |   3 +
 .../tsfile/read/filter/operator/AndFilter.java     |   7 +-
 .../iotdb/tsfile/read/filter/operator/Eq.java      |   6 +
 .../iotdb/tsfile/read/filter/operator/Gt.java      |   6 +
 .../iotdb/tsfile/read/filter/operator/GtEq.java    |   6 +
 .../iotdb/tsfile/read/filter/operator/Lt.java      |   6 +
 .../iotdb/tsfile/read/filter/operator/LtEq.java    |   6 +
 .../iotdb/tsfile/read/filter/operator/NotEq.java   |   6 +
 .../tsfile/read/filter/operator/NotFilter.java     |   5 +
 .../tsfile/read/filter/operator/OrFilter.java      |   5 +
 .../query/dataset/DataSetWithTimeGenerator.java    |  32 --
 .../tsfile/read/query/dataset/QueryDataSet.java    |  36 ++
 .../java/org/apache/iotdb/tsfile/utils/Pair.java   |   5 +-
 184 files changed, 8047 insertions(+), 1373 deletions(-)

diff --git a/cluster/pom.xml b/cluster/pom.xml
index b0bca81..25d13ea 100644
--- a/cluster/pom.xml
+++ b/cluster/pom.xml
@@ -26,10 +26,10 @@
         <version>0.8.0-SNAPSHOT</version>
     </parent>
     <modelVersion>4.0.0</modelVersion>
-    <artifactId>IoTDB-cluster</artifactId>
+    <artifactId>iotdb-cluster</artifactId>
     <name>IoTDB Cluster</name>
     <properties>
-        <jraft.version>1.2.4</jraft.version>
+        <jraft.version>1.2.5</jraft.version>
         <antlr3.version>3.5.2</antlr3.version>
         <common.lang3.version>3.8.1</common.lang3.version>
         <cluster.test.skip>false</cluster.test.skip>
@@ -84,21 +84,21 @@
                 <configuration>
                     <filesets>
                         <fileset>
-                            <directory>${project.basedir}/../iotdb/lib_cluster</directory>
+                            <directory>${project.basedir}/../iotdb/iotdb/lib_cluster</directory>
                             <includes>
                                 <include>**/*.jar</include>
                             </includes>
                             <followSymlinks>false</followSymlinks>
                         </fileset>
                         <fileset>
-                            <directory>${project.basedir}/../iotdb/data</directory>
+                            <directory>${project.basedir}/../iotdb/iotdb/data</directory>
                             <includes>
                                 <include>**/*</include>
                             </includes>
                             <followSymlinks>false</followSymlinks>
                         </fileset>
                         <fileset>
-                            <directory>${project.basedir}/../iotdb/logs</directory>
+                            <directory>${project.basedir}/../iotdb/iotdb/logs</directory>
                             <includes>
                                 <include>**/*</include>
                             </includes>
@@ -142,7 +142,7 @@
                     <outputDirectory>${project.basedir}/../iotdb/iotdb/lib_cluster</outputDirectory>
                 </configuration>
             </plugin>
-            <!--using `mvn test` to run UT, `mvn verify` to run ITs
+            <!--using `mvn test` to triggerAction UT, `mvn verify` to triggerAction ITs
                         Reference: https://antoniogoncalves.org/2012/12/13/lets-turn-integration-tests-with-maven-to-a-first-class-citizen/-->
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
index b500c18..9212258 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
@@ -19,8 +19,16 @@
 package org.apache.iotdb.cluster.concurrent;
 
 public enum ThreadName {
+
+  /**
+   * QP Task thread
+   */
   QP_TASK("QP-Task-Thread"),
-  RAFT_TASK("Raft-Task-Thread");
+
+  /**
+   * Remote query timer
+   */
+  REMOTE_QUERY_TIMER("Remote-Query-Timer");
 
   private String name;
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QueryTimerManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QueryTimerManager.java
new file mode 100644
index 0000000..779488c
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QueryTimerManager.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.concurrent.pool;
+
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+import org.apache.iotdb.cluster.concurrent.ThreadName;
+import org.apache.iotdb.db.concurrent.IoTDBThreadPoolFactory;
+
+/**
+ * Manage all query timer in query node, if timer is timeout, close all query resource for remote
+ * coordinator node.
+ */
+public class QueryTimerManager extends ThreadPoolManager {
+
+  private static final String MANAGER_NAME = "remote-query-timer-manager";
+
+  private static final int CORE_POOL_SIZE = 1;
+
+  @Override
+  public void init() {
+    pool = IoTDBThreadPoolFactory.newScheduledThreadPool(getThreadPoolSize(), getThreadName());
+  }
+
+  public static QueryTimerManager getInstance() {
+    return QueryTimerManager.QueryTimerManagerHolder.INSTANCE;
+  }
+
+  @Override
+  public String getManagerName() {
+    return MANAGER_NAME;
+  }
+
+  @Override
+  public String getThreadName() {
+    return ThreadName.REMOTE_QUERY_TIMER.getName();
+  }
+
+  @Override
+  public int getThreadPoolSize() {
+    return CORE_POOL_SIZE;
+  }
+
+  public ScheduledFuture<?> execute(Runnable task, long delayMs) {
+    checkInit();
+    return ((ScheduledExecutorService) pool).schedule(task, delayMs, TimeUnit.MICROSECONDS);
+  }
+
+  private static class QueryTimerManagerHolder {
+
+    private static final QueryTimerManager INSTANCE = new QueryTimerManager();
+
+    private QueryTimerManagerHolder() {
+
+    }
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/ThreadPoolManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/ThreadPoolManager.java
index a6dfa42..828cc1a 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/ThreadPoolManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/ThreadPoolManager.java
@@ -29,7 +29,7 @@ public abstract class ThreadPoolManager {
 
   ExecutorService pool;
 
-  private void checkInit() {
+  public void checkInit() {
     if (pool == null) {
       init();
     }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java
index b860835..0e6472d 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java
@@ -102,7 +102,7 @@ public class ClusterConfig {
    * then it sends requests to other nodes in the cluster. This parameter represents the maximum
    * timeout for these requests. The unit is milliseconds.
    **/
-  private int qpTaskTimeout = 1000;
+  private int qpTaskTimeout = 5000;
 
   /**
    * Number of virtual nodes
@@ -137,6 +137,19 @@ public class ClusterConfig {
    */
   private int concurrentQPSubTaskThread = Runtime.getRuntime().availableProcessors() * 10;
 
+  /**
+   * Batch data size read from remote query node once while reading, default value is 10000.
+   * The smaller the parameter, the more communication times and the more time-consuming it is.
+   */
+  private int batchReadSize = 10000;
+
+  /**
+   * Maximum number of cached batch data list for each series in coordinator node while reading,
+   * default value is 2. The coordinator node is responsible for receiving client requests and
+   * requesting data from query nodes and collecting them.
+   */
+  private int maxCachedBatchDataListSize = 2;
+
   public ClusterConfig() {
     // empty constructor
   }
@@ -323,4 +336,20 @@ public class ClusterConfig {
   public void setConcurrentQPSubTaskThread(int concurrentQPSubTaskThread) {
     this.concurrentQPSubTaskThread = concurrentQPSubTaskThread;
   }
+
+  public int getBatchReadSize() {
+    return batchReadSize;
+  }
+
+  public void setBatchReadSize(int batchReadSize) {
+    this.batchReadSize = batchReadSize;
+  }
+
+  public int getMaxCachedBatchDataListSize() {
+    return maxCachedBatchDataListSize;
+  }
+
+  public void setMaxCachedBatchDataListSize(int maxCachedBatchDataListSize) {
+    this.maxCachedBatchDataListSize = maxCachedBatchDataListSize;
+  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConstant.java b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConstant.java
index 5aca9b0..5448847 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConstant.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConstant.java
@@ -20,10 +20,12 @@ package org.apache.iotdb.cluster.config;
 
 public class ClusterConstant {
 
-  private ClusterConstant(){
-
+  private ClusterConstant() {
   }
 
+  /**
+   * Set read metadata consistency level pattern
+   */
   public static final String SET_READ_METADATA_CONSISTENCY_LEVEL_PATTERN = "set\\s+read\\s+metadata\\s+level\\s+to\\s+\\d+";
   public static final String SET_READ_DATA_CONSISTENCY_LEVEL_PATTERN = "set\\s+read\\s+data\\s+level\\s+to\\s+\\d+";
   public static final int MAX_CONSISTENCY_LEVEL = 2;
@@ -37,4 +39,11 @@ public class ClusterConstant {
    */
   public static final int CLOSE_QP_SUB_TASK_BLOCK_TIMEOUT = 1000;
 
+  /**
+   * Query timeout in query node. If time interval between last communications with coordinator node
+   * and now exceed this parameter, release corresponding query resource.Each query in query node
+   * has a <code>QueryRepeaterTimer</code>, the unit is milliseconds. Default value is 30 minutes.
+   */
+  public static final int QUERY_TIMEOUT_IN_QUERY_NODE = 30 * 60 * 1000;
+
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterDescriptor.java b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterDescriptor.java
index 8a49d7e..b90d781 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterDescriptor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterDescriptor.java
@@ -24,7 +24,7 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.util.Properties;
-import org.apache.iotdb.cluster.rpc.service.TSServiceClusterImpl;
+import org.apache.iotdb.cluster.service.TSServiceClusterImpl;
 import org.apache.iotdb.db.conf.IoTDBConfig;
 import org.apache.iotdb.db.conf.IoTDBConstant;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
@@ -52,9 +52,8 @@ public class ClusterDescriptor {
   }
 
   /**
-   * Load an property file and set ClusterConfig variables.
-   * Change this method to public only for test.
-   * In most case, you should invoke this method.
+   * Load an property file and set ClusterConfig variables. Change this method to public only for
+   * test. In most case, you should invoke this method.
    */
   public void loadProps() {
     ioTDBConf.setRpcImplClassName(TSServiceClusterImpl.class.getName());
@@ -104,9 +103,11 @@ public class ClusterDescriptor {
 
       conf.setRaftLogPath(properties.getProperty("raft_log_path", conf.getRaftLogPath()));
 
-      conf.setRaftSnapshotPath(properties.getProperty("raft_snapshot_path", conf.getRaftSnapshotPath()));
+      conf.setRaftSnapshotPath(
+          properties.getProperty("raft_snapshot_path", conf.getRaftSnapshotPath()));
 
-      conf.setRaftMetadataPath(properties.getProperty("raft_metadata_path", conf.getRaftMetadataPath()));
+      conf.setRaftMetadataPath(
+          properties.getProperty("raft_metadata_path", conf.getRaftMetadataPath()));
 
       conf.setElectionTimeoutMs(Integer
           .parseInt(properties.getProperty("election_timeout_ms",
@@ -155,10 +156,26 @@ public class ClusterDescriptor {
       conf.setConcurrentQPSubTaskThread(Integer
           .parseInt(properties.getProperty("concurrent_qp_sub_task_thread",
               Integer.toString(conf.getConcurrentQPSubTaskThread()))));
+
+      conf.setBatchReadSize(Integer.parseInt(properties.getProperty("batch_read_size",
+          Integer.toString(conf.getBatchReadSize()))));
+
+      conf.setMaxCachedBatchDataListSize(Integer.parseInt(properties
+          .getProperty("max_cached_batch_data_list_size",
+              Integer.toString(conf.getMaxCachedBatchDataListSize()))));
+
       if (conf.getConcurrentQPSubTaskThread() <= 0) {
         conf.setConcurrentQPSubTaskThread(Runtime.getRuntime().availableProcessors() * 10);
       }
 
+      if (conf.getMaxCachedBatchDataListSize() <= 0) {
+        conf.setMaxCachedBatchDataListSize(2);
+      }
+
+      if (conf.getBatchReadSize() <= 0) {
+        conf.setBatchReadSize(10000);
+      }
+
     } catch (IOException e) {
       LOGGER.warn("Cannot load config file because, use default configuration", e);
     } catch (Exception e) {
@@ -174,6 +191,7 @@ public class ClusterDescriptor {
   }
 
   private static class ClusterDescriptorHolder {
+
     private static final ClusterDescriptor INSTANCE = new ClusterDescriptor();
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/entity/Server.java b/cluster/src/main/java/org/apache/iotdb/cluster/entity/Server.java
index 4b2ebef..0efb70d 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/entity/Server.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/entity/Server.java
@@ -32,13 +32,17 @@ import org.apache.iotdb.cluster.entity.metadata.MetadataHolder;
 import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.MetadataRaftHolder;
 import org.apache.iotdb.cluster.rpc.raft.impl.RaftNodeAsClientManager;
-import org.apache.iotdb.cluster.rpc.raft.processor.DataGroupNonQueryAsyncProcessor;
-import org.apache.iotdb.cluster.rpc.raft.processor.MetaGroupNonQueryAsyncProcessor;
-import org.apache.iotdb.cluster.rpc.raft.processor.QueryMetadataAsyncProcessor;
-import org.apache.iotdb.cluster.rpc.raft.processor.QueryMetadataInStringAsyncProcessor;
-import org.apache.iotdb.cluster.rpc.raft.processor.QueryPathsAsyncProcessor;
-import org.apache.iotdb.cluster.rpc.raft.processor.QuerySeriesTypeAsyncProcessor;
-import org.apache.iotdb.cluster.rpc.raft.processor.QueryTimeSeriesAsyncProcessor;
+import org.apache.iotdb.cluster.rpc.raft.processor.nonquery.DataGroupNonQueryAsyncProcessor;
+import org.apache.iotdb.cluster.rpc.raft.processor.nonquery.MetaGroupNonQueryAsyncProcessor;
+import org.apache.iotdb.cluster.rpc.raft.processor.querydata.CloseSeriesReaderSyncProcessor;
+import org.apache.iotdb.cluster.rpc.raft.processor.querydata.InitSeriesReaderSyncProcessor;
+import org.apache.iotdb.cluster.rpc.raft.processor.querydata.QuerySeriesDataByTimestampSyncProcessor;
+import org.apache.iotdb.cluster.rpc.raft.processor.querydata.QuerySeriesDataSyncProcessor;
+import org.apache.iotdb.cluster.rpc.raft.processor.querymetadata.QueryMetadataAsyncProcessor;
+import org.apache.iotdb.cluster.rpc.raft.processor.querymetadata.QueryMetadataInStringAsyncProcessor;
+import org.apache.iotdb.cluster.rpc.raft.processor.querymetadata.QueryPathsAsyncProcessor;
+import org.apache.iotdb.cluster.rpc.raft.processor.querymetadata.QuerySeriesTypeAsyncProcessor;
+import org.apache.iotdb.cluster.rpc.raft.processor.querymetadata.QueryTimeSeriesAsyncProcessor;
 import org.apache.iotdb.cluster.utils.RaftUtils;
 import org.apache.iotdb.cluster.utils.hash.PhysicalNode;
 import org.apache.iotdb.cluster.utils.hash.Router;
@@ -96,13 +100,9 @@ public class Server {
     RpcServer rpcServer = new RpcServer(serverId.getPort());
     RaftRpcServerFactory.addRaftRequestProcessors(rpcServer);
 
-    rpcServer.registerUserProcessor(new DataGroupNonQueryAsyncProcessor());
-    rpcServer.registerUserProcessor(new MetaGroupNonQueryAsyncProcessor());
-    rpcServer.registerUserProcessor(new QueryTimeSeriesAsyncProcessor());
-    rpcServer.registerUserProcessor(new QueryMetadataInStringAsyncProcessor());
-    rpcServer.registerUserProcessor(new QueryMetadataAsyncProcessor());
-    rpcServer.registerUserProcessor(new QuerySeriesTypeAsyncProcessor());
-    rpcServer.registerUserProcessor(new QueryPathsAsyncProcessor());
+    registerNonQueryProcessor(rpcServer);
+    registerQueryMetadataProcessor(rpcServer);
+    registerQueryDataProcessor(rpcServer);
 
     metadataHolder = new MetadataRaftHolder(peerIds, serverId, rpcServer, true);
     metadataHolder.init();
@@ -128,6 +128,26 @@ public class Server {
 
   }
 
+  private void registerNonQueryProcessor(RpcServer rpcServer) {
+    rpcServer.registerUserProcessor(new DataGroupNonQueryAsyncProcessor());
+    rpcServer.registerUserProcessor(new MetaGroupNonQueryAsyncProcessor());
+  }
+
+  private void registerQueryMetadataProcessor(RpcServer rpcServer) {
+    rpcServer.registerUserProcessor(new QueryTimeSeriesAsyncProcessor());
+    rpcServer.registerUserProcessor(new QueryMetadataInStringAsyncProcessor());
+    rpcServer.registerUserProcessor(new QueryMetadataAsyncProcessor());
+    rpcServer.registerUserProcessor(new QuerySeriesTypeAsyncProcessor());
+    rpcServer.registerUserProcessor(new QueryPathsAsyncProcessor());
+  }
+
+  private void registerQueryDataProcessor(RpcServer rpcServer) {
+    rpcServer.registerUserProcessor(new InitSeriesReaderSyncProcessor());
+    rpcServer.registerUserProcessor(new QuerySeriesDataSyncProcessor());
+    rpcServer.registerUserProcessor(new QuerySeriesDataByTimestampSyncProcessor());
+    rpcServer.registerUserProcessor(new CloseSeriesReaderSyncProcessor());
+  }
+
   public void stop() throws ProcessorException, InterruptedException {
     QPTaskManager.getInstance().close(true, ClusterConstant.CLOSE_QP_SUB_TASK_BLOCK_TIMEOUT);
     iotdb.deactivate();
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/DataStateMachine.java b/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/DataStateMachine.java
index ebac074..b8c6f43 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/DataStateMachine.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/DataStateMachine.java
@@ -31,7 +31,7 @@ import java.nio.ByteBuffer;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicLong;
 import org.apache.iotdb.cluster.rpc.raft.closure.ResponseClosure;
-import org.apache.iotdb.cluster.rpc.raft.request.DataGroupNonQueryRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.nonquery.DataGroupNonQueryRequest;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 import org.apache.iotdb.cluster.utils.RaftUtils;
 import org.apache.iotdb.db.exception.PathErrorException;
@@ -41,7 +41,7 @@ import org.apache.iotdb.db.qp.executor.OverflowQPExecutor;
 import org.apache.iotdb.db.qp.logical.Operator.OperatorType;
 import org.apache.iotdb.db.qp.physical.PhysicalPlan;
 import org.apache.iotdb.db.qp.physical.sys.MetadataPlan;
-import org.apache.iotdb.db.writelog.transfer.PhysicalPlanLogTransfer;
+import org.apache.iotdb.db.qp.physical.transfer.PhysicalPlanLogTransfer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/MetadataStateManchine.java b/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/MetadataStateManchine.java
index 9592718..3cc9001 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/MetadataStateManchine.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/MetadataStateManchine.java
@@ -32,7 +32,7 @@ import java.util.List;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicLong;
 import org.apache.iotdb.cluster.rpc.raft.closure.ResponseClosure;
-import org.apache.iotdb.cluster.rpc.raft.request.MetaGroupNonQueryRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.nonquery.MetaGroupNonQueryRequest;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 import org.apache.iotdb.cluster.utils.RaftUtils;
 import org.apache.iotdb.db.exception.PathErrorException;
@@ -43,7 +43,7 @@ import org.apache.iotdb.db.qp.logical.Operator.OperatorType;
 import org.apache.iotdb.db.qp.physical.PhysicalPlan;
 import org.apache.iotdb.db.qp.physical.sys.AuthorPlan;
 import org.apache.iotdb.db.qp.physical.sys.MetadataPlan;
-import org.apache.iotdb.db.writelog.transfer.PhysicalPlanLogTransfer;
+import org.apache.iotdb.db.qp.physical.transfer.PhysicalPlanLogTransfer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/ClusterQPExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/ClusterQPExecutor.java
deleted file mode 100644
index b9debe3..0000000
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/ClusterQPExecutor.java
+++ /dev/null
@@ -1,260 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.iotdb.cluster.qp;
-
-import com.alipay.sofa.jraft.entity.PeerId;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import org.apache.iotdb.cluster.qp.callback.QPTask;
-import org.apache.iotdb.cluster.qp.callback.QPTask.TaskState;
-import org.apache.iotdb.cluster.config.ClusterConfig;
-import org.apache.iotdb.cluster.config.ClusterConstant;
-import org.apache.iotdb.cluster.config.ClusterDescriptor;
-import org.apache.iotdb.cluster.entity.Server;
-import org.apache.iotdb.cluster.entity.raft.MetadataRaftHolder;
-import org.apache.iotdb.cluster.exception.ConsistencyLevelException;
-import org.apache.iotdb.cluster.exception.RaftConnectionException;
-import org.apache.iotdb.cluster.rpc.raft.NodeAsClient;
-import org.apache.iotdb.cluster.rpc.raft.impl.RaftNodeAsClientManager;
-import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
-import org.apache.iotdb.cluster.utils.RaftUtils;
-import org.apache.iotdb.cluster.utils.hash.PhysicalNode;
-import org.apache.iotdb.cluster.utils.hash.Router;
-import org.apache.iotdb.db.exception.PathErrorException;
-import org.apache.iotdb.db.metadata.MManager;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public abstract class ClusterQPExecutor {
-
-  private static final Logger LOGGER = LoggerFactory.getLogger(ClusterQPExecutor.class);
-
-  private static final ClusterConfig CLUSTER_CONFIG = ClusterDescriptor.getInstance().getConfig();
-
-  /**
-   * Raft as client manager.
-   */
-  private static final RaftNodeAsClientManager CLIENT_MANAGER = RaftNodeAsClientManager
-      .getInstance();
-
-  protected Router router = Router.getInstance();
-
-  private PhysicalNode localNode = new PhysicalNode(CLUSTER_CONFIG.getIp(),
-      CLUSTER_CONFIG.getPort());
-
-  protected MManager mManager = MManager.getInstance();
-
-  protected final Server server = Server.getInstance();
-
-  /**
-   * The task in progress.
-   */
-  protected QPTask currentTask;
-
-  /**
-   * Count limit to redo a single task
-   */
-  private static final int TASK_MAX_RETRY = CLUSTER_CONFIG.getQpTaskRedoCount();
-
-  /**
-   * ReadMetadataConsistencyLevel: 1 Strong consistency, 2 Weak consistency
-   */
-  protected int readMetadataConsistencyLevel = CLUSTER_CONFIG.getReadMetadataConsistencyLevel();
-
-  /**
-   * ReadDataConsistencyLevel: 1 Strong consistency, 2 Weak consistency
-   */
-  private int readDataConsistencyLevel = CLUSTER_CONFIG.getReadDataConsistencyLevel();
-
-  /**
-   * Get Storage Group Name by device name
-   */
-  protected String getStroageGroupByDevice(String device) throws PathErrorException {
-    String storageGroup;
-    try {
-      storageGroup = MManager.getInstance().getFileNameByPath(device);
-    } catch (PathErrorException e) {
-      throw new PathErrorException(String.format("File level of %s doesn't exist.", device));
-    }
-    return storageGroup;
-  }
-
-  /**
-   * Get all Storage Group Names by path
-   */
-  public List<String> getAllStroageGroupsByPath(String path) throws PathErrorException {
-    List<String> storageGroupList;
-    try {
-      storageGroupList = mManager.getAllFileNamesByPath(path);
-    } catch (PathErrorException e) {
-      throw new PathErrorException(String.format("File level of %s doesn't exist.", path));
-    }
-    return storageGroupList;
-  }
-
-  /**
-   * Classify the input storage group list by which data group it belongs to.
-   *
-   * @return key is groupId, value is all SGs belong to this data group
-   */
-  protected Map<String, Set<String>> classifySGByGroupId(List<String> sgList) {
-    Map<String, Set<String>> map = new HashMap<>();
-    for (int i = 0; i < sgList.size(); i++) {
-      String sg = sgList.get(i);
-      String groupId = getGroupIdBySG(sg);
-      if (map.containsKey(groupId)) {
-        map.get(groupId).add(sg);
-      } else {
-        Set<String> set = new HashSet<>();
-        set.add(sg);
-        map.put(groupId, set);
-      }
-    }
-    return map;
-  }
-
-  /**
-   * Get raft group id by storage group name
-   */
-  protected String getGroupIdBySG(String storageGroup) {
-    return router.getGroupID(router.routeGroup(storageGroup));
-  }
-
-  /**
-   * Check if the non query command can execute in local. 1. If this node belongs to the storage
-   * group 2. If this node is leader.
-   */
-  public boolean canHandleNonQueryByGroupId(String groupId) {
-    boolean canHandle = false;
-    if(groupId.equals(ClusterConfig.METADATA_GROUP_ID)){
-      canHandle = ((MetadataRaftHolder) (server.getMetadataHolder())).getFsm().isLeader();
-    }else {
-      if (router.containPhysicalNodeByGroupId(groupId, localNode) && RaftUtils
-          .getPhysicalNodeFrom(RaftUtils.getLeaderPeerID(groupId)).equals(localNode)) {
-        canHandle = true;
-      }
-    }
-    return canHandle;
-  }
-
-  /**
-   * Check if the query command can execute in local. Check if this node belongs to the group id
-   */
-  protected boolean canHandleQueryByGroupId(String groupId) {
-    return router.containPhysicalNodeByGroupId(groupId, localNode);
-  }
-
-  /**
-   * Async handle QPTask by QPTask and leader id
-   *
-   * @param task request QPTask
-   * @param leader leader of the target raft group
-   * @param taskRetryNum Number of QPTask retries due to timeout and redirected.
-   * @return basic response
-   */
-  protected BasicResponse asyncHandleNonQueryTaskGetRes(QPTask task, PeerId leader,
-      int taskRetryNum)
-      throws InterruptedException, RaftConnectionException {
-    asyncSendNonQueryTask(task, leader, taskRetryNum);
-    return asyncGetNonQueryRes(task, leader, taskRetryNum);
-  }
-
-  /**
-   * Asynchronous send rpc task via client
-   *
-   * @param task rpc task
-   * @param leader leader node of the group
-   * @param taskRetryNum Retry time of the task
-   */
-  public void asyncSendNonQueryTask(QPTask task, PeerId leader, int taskRetryNum)
-      throws RaftConnectionException {
-    if (taskRetryNum >= TASK_MAX_RETRY) {
-      throw new RaftConnectionException(String.format("QPTask retries reach the upper bound %s",
-          TASK_MAX_RETRY));
-    }
-    NodeAsClient client = getRaftNodeAsClient();
-    /** Call async method **/
-    client.asyncHandleRequest(task.getRequest(), leader, task);
-  }
-
-  /**
-   * try to get raft rpc client
-   */
-  private NodeAsClient getRaftNodeAsClient() throws RaftConnectionException {
-    return CLIENT_MANAGER.getRaftNodeAsClient();
-  }
-
-  /**
-   * Asynchronous get task response. If it's redirected or status is exception, the task needs to be
-   * resent. Note: If status is Exception, it marks that an exception occurred during the task is
-   * being sent instead of executed.
-   *
-   * @param task rpc task
-   * @param leader leader node of the group
-   * @param taskRetryNum Retry time of the task
-   */
-  private BasicResponse asyncGetNonQueryRes(QPTask task, PeerId leader, int taskRetryNum)
-      throws InterruptedException, RaftConnectionException {
-    task.await();
-    if (task.getTaskState() != TaskState.FINISH) {
-      if (task.getTaskState() == TaskState.REDIRECT) {
-        /** redirect to the right leader **/
-        leader = PeerId.parsePeer(task.getResponse().getLeaderStr());
-        LOGGER.debug("Redirect leader: {}, group id = {}", leader, task.getRequest().getGroupID());
-        RaftUtils.updateRaftGroupLeader(task.getRequest().getGroupID(), leader);
-      }
-      task.resetTask();
-      return asyncHandleNonQueryTaskGetRes(task, leader, taskRetryNum + 1);
-    }
-    return task.getResponse();
-  }
-
-  public void shutdown() {
-    if (currentTask != null) {
-      currentTask.shutdown();
-    }
-  }
-
-  public void setReadMetadataConsistencyLevel(int level) throws ConsistencyLevelException {
-    if (level <= ClusterConstant.MAX_CONSISTENCY_LEVEL) {
-      this.readMetadataConsistencyLevel = level;
-    } else {
-      throw new ConsistencyLevelException(String.format("Consistency level %d not support", level));
-    }
-  }
-
-  public void setReadDataConsistencyLevel(int level) throws ConsistencyLevelException {
-    if (level <= ClusterConstant.MAX_CONSISTENCY_LEVEL) {
-      this.readDataConsistencyLevel = level;
-    } else {
-      throw new ConsistencyLevelException(String.format("Consistency level %d not support", level));
-    }
-  }
-
-  public int getReadMetadataConsistencyLevel() {
-    return readMetadataConsistencyLevel;
-  }
-
-  public int getReadDataConsistencyLevel() {
-    return readDataConsistencyLevel;
-  }
-}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/AbstractQPExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/AbstractQPExecutor.java
new file mode 100644
index 0000000..492b7ad
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/AbstractQPExecutor.java
@@ -0,0 +1,178 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.qp.executor;
+
+import com.alipay.sofa.jraft.entity.PeerId;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterConstant;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.cluster.entity.Server;
+import org.apache.iotdb.cluster.exception.ConsistencyLevelException;
+import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.qp.task.QPTask;
+import org.apache.iotdb.cluster.qp.task.QPTask.TaskState;
+import org.apache.iotdb.cluster.qp.task.SingleQPTask;
+import org.apache.iotdb.cluster.rpc.raft.NodeAsClient;
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
+import org.apache.iotdb.cluster.utils.RaftUtils;
+import org.apache.iotdb.cluster.utils.hash.Router;
+import org.apache.iotdb.db.metadata.MManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public abstract class AbstractQPExecutor {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(AbstractQPExecutor.class);
+
+  private static final ClusterConfig CLUSTER_CONFIG = ClusterDescriptor.getInstance().getConfig();
+
+  protected Router router = Router.getInstance();
+
+  protected MManager mManager = MManager.getInstance();
+
+  protected final Server server = Server.getInstance();
+
+  /**
+   * The task in progress.
+   */
+  protected ThreadLocal<QPTask> currentTask = new ThreadLocal<>();
+
+  /**
+   * Count limit to redo a single task
+   */
+  private static final int TASK_MAX_RETRY = CLUSTER_CONFIG.getQpTaskRedoCount();
+
+  /**
+   * ReadMetadataConsistencyLevel: 1 Strong consistency, 2 Weak consistency
+   */
+  private ThreadLocal<Integer> readMetadataConsistencyLevel = new ThreadLocal<>();
+
+  /**
+   * ReadDataConsistencyLevel: 1 Strong consistency, 2 Weak consistency
+   */
+  private ThreadLocal<Integer> readDataConsistencyLevel = new ThreadLocal<>();
+
+  public AbstractQPExecutor() {
+  }
+
+  /**
+   * Check init of consistency level(<code>ThreadLocal</code>)
+   */
+  private void checkInitConsistencyLevel() {
+    if (readMetadataConsistencyLevel.get() == null) {
+      readMetadataConsistencyLevel.set(CLUSTER_CONFIG.getReadMetadataConsistencyLevel());
+    }
+    if (readDataConsistencyLevel.get() == null) {
+      readDataConsistencyLevel.set(CLUSTER_CONFIG.getReadDataConsistencyLevel());
+    }
+  }
+
+  /**
+   * Async handle QPTask by QPTask and leader id
+   *
+   * @param task request QPTask
+   * @param leader leader of the target raft group
+   * @param taskRetryNum Number of QPTask retries due to timeout and redirected.
+   * @return basic response
+   */
+  protected BasicResponse asyncHandleNonQuerySingleTaskGetRes(SingleQPTask task, PeerId leader,
+      int taskRetryNum)
+      throws InterruptedException, RaftConnectionException {
+    asyncSendNonQuerySingleTask(task, leader, taskRetryNum);
+    return syncGetNonQueryRes(task, leader, taskRetryNum);
+  }
+
+  /**
+   * Asynchronous send rpc task via client
+   *  @param task rpc task
+   * @param leader leader node of the group
+   * @param taskRetryNum Retry time of the task
+   */
+  protected void asyncSendNonQuerySingleTask(SingleQPTask task, PeerId leader, int taskRetryNum)
+      throws RaftConnectionException {
+    if (taskRetryNum >= TASK_MAX_RETRY) {
+      throw new RaftConnectionException(String.format("QPTask retries reach the upper bound %s",
+          TASK_MAX_RETRY));
+    }
+    NodeAsClient client = RaftUtils.getRaftNodeAsClient();
+    /** Call async method **/
+    client.asyncHandleRequest(task.getRequest(), leader, task);
+  }
+
+  /**
+   * Synchronous get task response. If it's redirected or status is exception, the task needs to be
+   * resent. Note: If status is Exception, it marks that an exception occurred during the task is
+   * being sent instead of executed.
+   *  @param task rpc task
+   * @param leader leader node of the group
+   * @param taskRetryNum Retry time of the task
+   */
+  private BasicResponse syncGetNonQueryRes(SingleQPTask task, PeerId leader, int taskRetryNum)
+      throws InterruptedException, RaftConnectionException {
+    task.await();
+    if (task.getTaskState() != TaskState.FINISH) {
+      if (task.getTaskState() == TaskState.REDIRECT) {
+        /** redirect to the right leader **/
+        leader = PeerId.parsePeer(task.getResponse().getLeaderStr());
+        LOGGER.debug("Redirect leader: {}, group id = {}", leader, task.getRequest().getGroupID());
+        RaftUtils.updateRaftGroupLeader(task.getRequest().getGroupID(), leader);
+      } else {
+        String groupId = task.getRequest().getGroupID();
+        RaftUtils.removeCachedRaftGroupLeader(groupId);
+        LOGGER.debug("Remove cached raft group leader of {}", groupId);
+        leader = RaftUtils.getLeaderPeerID(groupId);
+      }
+      task.resetTask();
+      return asyncHandleNonQuerySingleTaskGetRes(task, leader, taskRetryNum + 1);
+    }
+    return task.getResponse();
+  }
+
+  public void shutdown() {
+    if (currentTask.get() != null) {
+      currentTask.get().shutdown();
+    }
+  }
+
+  public void setReadMetadataConsistencyLevel(int level) throws ConsistencyLevelException {
+    if (level <= ClusterConstant.MAX_CONSISTENCY_LEVEL) {
+      readMetadataConsistencyLevel.set(level);
+    } else {
+      throw new ConsistencyLevelException(String.format("Consistency level %d not support", level));
+    }
+  }
+
+  public void setReadDataConsistencyLevel(int level) throws ConsistencyLevelException {
+    if (level <= ClusterConstant.MAX_CONSISTENCY_LEVEL) {
+      readDataConsistencyLevel.set(level);
+    } else {
+      throw new ConsistencyLevelException(String.format("Consistency level %d not support", level));
+    }
+  }
+
+  public int getReadMetadataConsistencyLevel() {
+    checkInitConsistencyLevel();
+    return readMetadataConsistencyLevel.get();
+  }
+
+  public int getReadDataConsistencyLevel() {
+    checkInitConsistencyLevel();
+    return readDataConsistencyLevel.get();
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/ClusterQueryProcessExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/ClusterQueryProcessExecutor.java
new file mode 100644
index 0000000..c5032fc
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/ClusterQueryProcessExecutor.java
@@ -0,0 +1,177 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.qp.executor;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import org.apache.iotdb.cluster.query.executor.ClusterQueryRouter;
+import org.apache.iotdb.db.exception.FileNodeManagerException;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.exception.ProcessorException;
+import org.apache.iotdb.db.qp.constant.SQLConstant;
+import org.apache.iotdb.db.qp.executor.IQueryProcessExecutor;
+import org.apache.iotdb.db.qp.physical.PhysicalPlan;
+import org.apache.iotdb.db.qp.physical.crud.AggregationPlan;
+import org.apache.iotdb.db.qp.physical.crud.FillQueryPlan;
+import org.apache.iotdb.db.qp.physical.crud.GroupByPlan;
+import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
+import org.apache.iotdb.db.query.context.QueryContext;
+import org.apache.iotdb.db.query.fill.IFill;
+import org.apache.iotdb.tsfile.exception.filter.QueryFilterOptimizationException;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.read.common.Path;
+import org.apache.iotdb.tsfile.read.expression.IExpression;
+import org.apache.iotdb.tsfile.read.expression.QueryExpression;
+import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
+import org.apache.iotdb.tsfile.utils.Pair;
+
+public class ClusterQueryProcessExecutor extends AbstractQPExecutor implements IQueryProcessExecutor {
+
+  private ThreadLocal<Integer> fetchSize = new ThreadLocal<>();
+  private ClusterQueryRouter clusterQueryRouter = new ClusterQueryRouter();
+
+  private QueryMetadataExecutor queryMetadataExecutor = new QueryMetadataExecutor();
+
+  @Override
+  public QueryDataSet processQuery(QueryPlan queryPlan, QueryContext context)
+      throws IOException, FileNodeManagerException, PathErrorException,
+      QueryFilterOptimizationException, ProcessorException {
+
+    QueryExpression queryExpression = QueryExpression.create().setSelectSeries(queryPlan.getPaths())
+        .setExpression(queryPlan.getExpression());
+    clusterQueryRouter.setReadDataConsistencyLevel(getReadDataConsistencyLevel());
+    if (queryPlan instanceof GroupByPlan) {
+      GroupByPlan groupByPlan = (GroupByPlan) queryPlan;
+      return groupBy(groupByPlan.getPaths(), groupByPlan.getAggregations(),
+          groupByPlan.getExpression(), groupByPlan.getUnit(), groupByPlan.getOrigin(),
+          groupByPlan.getIntervals(), context);
+    }
+
+    if (queryPlan instanceof AggregationPlan) {
+      return aggregate(queryPlan.getPaths(), queryPlan.getAggregations(),
+          queryPlan.getExpression(), context);
+    }
+
+    if (queryPlan instanceof FillQueryPlan) {
+      FillQueryPlan fillQueryPlan = (FillQueryPlan) queryPlan;
+      return fill(queryPlan.getPaths(), fillQueryPlan.getQueryTime(),
+          fillQueryPlan.getFillType(), context);
+    }
+    return clusterQueryRouter.query(queryExpression, context);
+  }
+
+  @Override
+  public QueryDataSet aggregate(List<Path> paths, List<String> aggres, IExpression expression,
+      QueryContext context)
+      throws ProcessorException, IOException, PathErrorException, FileNodeManagerException, QueryFilterOptimizationException {
+    return clusterQueryRouter.aggregate(paths, aggres, expression, context);
+  }
+
+  @Override
+  public QueryDataSet groupBy(List<Path> paths, List<String> aggres, IExpression expression,
+      long unit, long origin, List<Pair<Long, Long>> intervals, QueryContext context)
+      throws ProcessorException, IOException, PathErrorException, FileNodeManagerException, QueryFilterOptimizationException {
+    return clusterQueryRouter.groupBy(paths, aggres, expression, unit, origin, intervals, context);
+  }
+
+  @Override
+  public QueryDataSet fill(List<Path> fillPaths, long queryTime, Map<TSDataType, IFill> fillTypes,
+      QueryContext context)
+      throws ProcessorException, IOException, PathErrorException, FileNodeManagerException {
+    return clusterQueryRouter.fill(fillPaths, queryTime, fillTypes, context);
+  }
+
+  @Override
+  public TSDataType getSeriesType(Path path) throws PathErrorException {
+    if (path.equals(SQLConstant.RESERVED_TIME)) {
+      return TSDataType.INT64;
+    }
+    if (path.equals(SQLConstant.RESERVED_FREQ)) {
+      return TSDataType.FLOAT;
+    }
+    try {
+      return queryMetadataExecutor.processSeriesTypeQuery(path.getFullPath());
+    } catch (InterruptedException | ProcessorException e) {
+      throw new PathErrorException(e.getMessage());
+    }
+  }
+
+  @Override
+  public List<String> getAllPaths(String originPath)
+      throws PathErrorException {
+    try {
+      return queryMetadataExecutor.processPathsQuery(originPath);
+    } catch (InterruptedException | ProcessorException e) {
+      throw new PathErrorException(e.getMessage());
+    }
+  }
+
+  @Override
+  public boolean judgePathExists(Path fullPath) {
+    try {
+      List<List<String>> results = queryMetadataExecutor.processTimeSeriesQuery(fullPath.toString());
+      return !results.isEmpty();
+    } catch (InterruptedException | PathErrorException | ProcessorException e) {
+      return false;
+    }
+  }
+
+  @Override
+  public int getFetchSize() {
+    return fetchSize.get();
+  }
+
+  @Override
+  public void setFetchSize(int fetchSize) {
+    this.fetchSize.set(fetchSize);
+  }
+
+  @Override
+  public boolean update(Path path, long startTime, long endTime, String value)
+      throws ProcessorException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean delete(List<Path> paths, long deleteTime) throws ProcessorException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean delete(Path path, long deleteTime) throws ProcessorException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public int insert(Path path, long insertTime, String value) throws ProcessorException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public int multiInsert(String deviceId, long insertTime, List<String> measurementList,
+      List<String> insertValues) throws ProcessorException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean processNonQuery(PhysicalPlan plan) throws ProcessorException {
+    throw new UnsupportedOperationException();
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/NonQueryExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/NonQueryExecutor.java
index c8c2a9b..1420370 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/NonQueryExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/NonQueryExecutor.java
@@ -34,17 +34,17 @@ import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.MetadataRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.RaftService;
 import org.apache.iotdb.cluster.exception.RaftConnectionException;
-import org.apache.iotdb.cluster.qp.ClusterQPExecutor;
-import org.apache.iotdb.cluster.qp.callback.BatchQPTask;
-import org.apache.iotdb.cluster.qp.callback.QPTask;
-import org.apache.iotdb.cluster.qp.callback.SingleQPTask;
+import org.apache.iotdb.cluster.qp.task.BatchQPTask;
+import org.apache.iotdb.cluster.qp.task.QPTask;
+import org.apache.iotdb.cluster.qp.task.SingleQPTask;
 import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
-import org.apache.iotdb.cluster.rpc.raft.request.DataGroupNonQueryRequest;
-import org.apache.iotdb.cluster.rpc.raft.request.MetaGroupNonQueryRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.nonquery.DataGroupNonQueryRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.nonquery.MetaGroupNonQueryRequest;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
-import org.apache.iotdb.cluster.rpc.raft.response.DataGroupNonQueryResponse;
-import org.apache.iotdb.cluster.rpc.raft.response.MetaGroupNonQueryResponse;
-import org.apache.iotdb.cluster.rpc.service.TSServiceClusterImpl.BatchResult;
+import org.apache.iotdb.cluster.rpc.raft.response.nonquery.DataGroupNonQueryResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.nonquery.MetaGroupNonQueryResponse;
+import org.apache.iotdb.cluster.service.TSServiceClusterImpl.BatchResult;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.cluster.utils.RaftUtils;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.exception.ProcessorException;
@@ -61,7 +61,7 @@ import org.slf4j.LoggerFactory;
 /**
  * Handle distributed non-query logic
  */
-public class NonQueryExecutor extends ClusterQPExecutor {
+public class NonQueryExecutor extends AbstractQPExecutor {
 
   private static final Logger LOGGER = LoggerFactory.getLogger(NonQueryExecutor.class);
 
@@ -100,10 +100,13 @@ public class NonQueryExecutor extends ClusterQPExecutor {
    * @param batchResult batch result
    */
   public void processBatch(PhysicalPlan[] physicalPlans, BatchResult batchResult)
-      throws InterruptedException {
+      throws InterruptedException, ProcessorException {
 
     Status nullReadTaskStatus = Status.OK();
     RaftUtils.handleNullReadToMetaGroup(nullReadTaskStatus);
+    if(!nullReadTaskStatus.isOk()){
+      throw new ProcessorException("Null read while processing batch failed");
+    }
     nullReaderEnable = false;
 
     /** 1. Classify physical plans by group id **/
@@ -112,12 +115,12 @@ public class NonQueryExecutor extends ClusterQPExecutor {
     classifyPhysicalPlanByGroupId(physicalPlans, batchResult, physicalPlansMap, planIndexMap);
 
     /** 2. Construct Multiple Data Group Requests **/
-    Map<String, QPTask> subTaskMap = new HashMap<>();
+    Map<String, SingleQPTask> subTaskMap = new HashMap<>();
     constructMultipleRequests(physicalPlansMap, planIndexMap, subTaskMap, batchResult);
 
     /** 3. Execute Multiple Sub Tasks **/
     BatchQPTask task = new BatchQPTask(subTaskMap.size(), batchResult, subTaskMap, planIndexMap);
-    currentTask = task;
+    currentTask.set(task);
     task.execute(this);
     task.await();
     batchResult.setAllSuccessful(task.isAllSuccessful());
@@ -165,7 +168,7 @@ public class NonQueryExecutor extends ClusterQPExecutor {
    * Construct multiple data group requests
    */
   private void constructMultipleRequests(Map<String, List<PhysicalPlan>> physicalPlansMap,
-      Map<String, List<Integer>> planIndexMap, Map<String, QPTask> subTaskMap,
+      Map<String, List<Integer>> planIndexMap, Map<String, SingleQPTask> subTaskMap,
       BatchResult batchResult) {
     int[] result = batchResult.getResult();
     for (Entry<String, List<PhysicalPlan>> entry : physicalPlansMap.entrySet()) {
@@ -197,16 +200,16 @@ public class NonQueryExecutor extends ClusterQPExecutor {
     switch (plan.getOperatorType()) {
       case DELETE:
         storageGroup = getStorageGroupFromDeletePlan((DeletePlan) plan);
-        groupId = getGroupIdBySG(storageGroup);
+        groupId = router.getGroupIdBySG(storageGroup);
         break;
       case UPDATE:
         Path path = ((UpdatePlan) plan).getPath();
-        storageGroup = getStroageGroupByDevice(path.getDevice());
-        groupId = getGroupIdBySG(storageGroup);
+        storageGroup = QPExecutorUtils.getStroageGroupByDevice(path.getDevice());
+        groupId = router.getGroupIdBySG(storageGroup);
         break;
       case INSERT:
-        storageGroup = getStroageGroupByDevice(((InsertPlan) plan).getDeviceId());
-        groupId = getGroupIdBySG(storageGroup);
+        storageGroup = QPExecutorUtils.getStroageGroupByDevice(((InsertPlan) plan).getDeviceId());
+        groupId = router.getGroupIdBySG(storageGroup);
         break;
       case CREATE_ROLE:
       case DELETE_ROLE:
@@ -285,8 +288,8 @@ public class NonQueryExecutor extends ClusterQPExecutor {
       case ADD_PATH:
       case DELETE_PATH:
         String deviceId = path.getDevice();
-        String storageGroup = getStroageGroupByDevice(deviceId);
-        groupId = getGroupIdBySG(storageGroup);
+        String storageGroup = QPExecutorUtils.getStroageGroupByDevice(deviceId);
+        groupId = router.getGroupIdBySG(storageGroup);
         break;
       case SET_FILE_LEVEL:
         boolean fileLevelExist = mManager.checkStorageLevelOfMTree(path.getFullPath());
@@ -316,11 +319,11 @@ public class NonQueryExecutor extends ClusterQPExecutor {
     } else {
       request = new DataGroupNonQueryRequest(groupId, plans);
     }
-    QPTask qpTask = new SingleQPTask(false, request);
-    currentTask = qpTask;
+    SingleQPTask qpTask = new SingleQPTask(false, request);
+    currentTask.set(qpTask);
 
     /** Check if the plan can be executed locally. **/
-    if (canHandleNonQueryByGroupId(groupId)) {
+    if (QPExecutorUtils.canHandleNonQueryByGroupId(groupId)) {
       return handleNonQueryRequestLocally(groupId, qpTask);
     } else {
       PeerId leader = RaftUtils.getLeaderPeerID(groupId);
@@ -358,9 +361,9 @@ public class NonQueryExecutor extends ClusterQPExecutor {
    * @param leader leader of the target raft group
    * @return request result
    */
-  public boolean asyncHandleNonQueryTask(QPTask task, PeerId leader)
+  public boolean asyncHandleNonQueryTask(SingleQPTask task, PeerId leader)
       throws RaftConnectionException, InterruptedException {
-    BasicResponse response = asyncHandleNonQueryTaskGetRes(task, leader, 0);
+    BasicResponse response = asyncHandleNonQuerySingleTaskGetRes(task, leader, 0);
     return response != null && response.isSuccess();
   }
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/QueryMetadataExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/QueryMetadataExecutor.java
index 1dfbc7e..82325e1 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/QueryMetadataExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/QueryMetadataExecutor.java
@@ -26,27 +26,26 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
-import org.apache.iotdb.cluster.qp.callback.SingleQPTask;
+import org.apache.iotdb.cluster.qp.task.SingleQPTask;
 import org.apache.iotdb.cluster.config.ClusterConfig;
 import org.apache.iotdb.cluster.config.ClusterConstant;
-import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.MetadataRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.RaftService;
 import org.apache.iotdb.cluster.exception.RaftConnectionException;
-import org.apache.iotdb.cluster.qp.ClusterQPExecutor;
-import org.apache.iotdb.cluster.rpc.raft.request.QueryMetadataInStringRequest;
-import org.apache.iotdb.cluster.rpc.raft.request.QueryMetadataRequest;
-import org.apache.iotdb.cluster.rpc.raft.request.QueryPathsRequest;
-import org.apache.iotdb.cluster.rpc.raft.request.QuerySeriesTypeRequest;
-import org.apache.iotdb.cluster.rpc.raft.request.QueryStorageGroupRequest;
-import org.apache.iotdb.cluster.rpc.raft.request.QueryTimeSeriesRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QueryMetadataInStringRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QueryMetadataRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QueryPathsRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QuerySeriesTypeRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QueryStorageGroupRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QueryTimeSeriesRequest;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
-import org.apache.iotdb.cluster.rpc.raft.response.QueryMetadataInStringResponse;
-import org.apache.iotdb.cluster.rpc.raft.response.QueryMetadataResponse;
-import org.apache.iotdb.cluster.rpc.raft.response.QueryPathsResponse;
-import org.apache.iotdb.cluster.rpc.raft.response.QuerySeriesTypeResponse;
-import org.apache.iotdb.cluster.rpc.raft.response.QueryStorageGroupResponse;
-import org.apache.iotdb.cluster.rpc.raft.response.QueryTimeSeriesResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QueryMetadataInStringResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QueryMetadataResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QueryPathsResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QuerySeriesTypeResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QueryStorageGroupResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QueryTimeSeriesResponse;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.cluster.utils.RaftUtils;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.exception.ProcessorException;
@@ -59,7 +58,7 @@ import org.slf4j.LoggerFactory;
 /**
  * Handle < show timeseries <path> > logic
  */
-public class QueryMetadataExecutor extends ClusterQPExecutor {
+public class QueryMetadataExecutor extends AbstractQPExecutor {
 
   private static final Logger LOGGER = LoggerFactory.getLogger(QueryMetadataExecutor.class);
   private static final String DOUB_SEPARATOR = "\\.";
@@ -83,7 +82,7 @@ public class QueryMetadataExecutor extends ClusterQPExecutor {
     if (storageGroupList.isEmpty()) {
       return new ArrayList<>();
     } else {
-      Map<String, Set<String>> groupIdSGMap = classifySGByGroupId(storageGroupList);
+      Map<String, Set<String>> groupIdSGMap = QPExecutorUtils.classifySGByGroupId(storageGroupList);
       for (Entry<String, Set<String>> entry : groupIdSGMap.entrySet()) {
         List<String> paths = getSubQueryPaths(entry.getValue(), path);
         String groupId = entry.getKey();
@@ -126,13 +125,13 @@ public class QueryMetadataExecutor extends ClusterQPExecutor {
   private void handleTimseriesQuery(String groupId, List<String> pathList, List<List<String>> res)
       throws ProcessorException, InterruptedException {
     QueryTimeSeriesRequest request = new QueryTimeSeriesRequest(groupId,
-        readMetadataConsistencyLevel, pathList);
+        getReadMetadataConsistencyLevel(), pathList);
     SingleQPTask task = new SingleQPTask(false, request);
 
     LOGGER.debug("Execute show timeseries {} statement for group {}.", pathList, groupId);
     PeerId holder;
     /** Check if the plan can be executed locally. **/
-    if (canHandleQueryByGroupId(groupId)) {
+    if (QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
       LOGGER.debug("Execute show timeseries {} statement locally for group {} by sending request to local node.", pathList, groupId);
       holder = this.server.getServerId();
     } else {
@@ -153,21 +152,21 @@ public class QueryMetadataExecutor extends ClusterQPExecutor {
     List<SingleQPTask> taskList = new ArrayList<>();
     for (String groupId : groupIdSet) {
       QueryMetadataInStringRequest request = new QueryMetadataInStringRequest(groupId,
-          readMetadataConsistencyLevel);
+          getReadMetadataConsistencyLevel());
       SingleQPTask task = new SingleQPTask(false, request);
       taskList.add(task);
 
       LOGGER.debug("Execute show metadata in string statement for group {}.", groupId);
       PeerId holder;
       /** Check if the plan can be executed locally. **/
-      if (canHandleQueryByGroupId(groupId)) {
+      if (QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
         LOGGER.debug("Execute show metadata in string statement locally for group {} by sending request to local node.", groupId);
         holder = this.server.getServerId();
       } else {
         holder = RaftUtils.getRandomPeerID(groupId);
       }
       try {
-        asyncSendNonQueryTask(task, holder, 0);
+        asyncSendNonQuerySingleTask(task, holder, 0);
       } catch (RaftConnectionException e) {
         throw new ProcessorException("Raft connection occurs error.", e);
       }
@@ -192,21 +191,21 @@ public class QueryMetadataExecutor extends ClusterQPExecutor {
     List<SingleQPTask> taskList = new ArrayList<>();
     for (String groupId : groupIdSet) {
       QueryMetadataRequest request = new QueryMetadataRequest(groupId,
-          readMetadataConsistencyLevel);
+          getReadMetadataConsistencyLevel());
       SingleQPTask task = new SingleQPTask(false, request);
       taskList.add(task);
 
       LOGGER.debug("Execute query metadata statement for group {}.", groupId);
       PeerId holder;
       /** Check if the plan can be executed locally. **/
-      if (canHandleQueryByGroupId(groupId)) {
+      if (QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
         LOGGER.debug("Execute query metadata statement locally for group {} by sending request to local node.", groupId);
         holder = this.server.getServerId();
       } else {
         holder = RaftUtils.getRandomPeerID(groupId);
       }
       try {
-        asyncSendNonQueryTask(task, holder, 0);
+        asyncSendNonQuerySingleTask(task, holder, 0);
       } catch (RaftConnectionException e) {
         throw new ProcessorException("Raft connection occurs error.", e);
       }
@@ -229,20 +228,20 @@ public class QueryMetadataExecutor extends ClusterQPExecutor {
 
   public TSDataType processSeriesTypeQuery(String path)
       throws InterruptedException, ProcessorException, PathErrorException {
-    TSDataType dataType = null;
+    TSDataType dataType;
     List<String> storageGroupList = mManager.getAllFileNamesByPath(path);
     if (storageGroupList.size() != 1) {
       throw new PathErrorException("path " + path + " is not valid.");
     } else {
-      String groupId = getGroupIdBySG(storageGroupList.get(0));
+      String groupId = router.getGroupIdBySG(storageGroupList.get(0));
       QuerySeriesTypeRequest request = new QuerySeriesTypeRequest(groupId,
-          readMetadataConsistencyLevel, path);
+          getReadMetadataConsistencyLevel(), path);
       SingleQPTask task = new SingleQPTask(false, request);
 
       LOGGER.debug("Execute get series type for {} statement for group {}.", path, groupId);
       PeerId holder;
       /** Check if the plan can be executed locally. **/
-      if (canHandleQueryByGroupId(groupId)) {
+      if (QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
         LOGGER.debug("Execute get series type for {} statement locally for group {} by sending request to local node.", path, groupId);
         holder = this.server.getServerId();
       } else {
@@ -267,7 +266,7 @@ public class QueryMetadataExecutor extends ClusterQPExecutor {
     if (storageGroupList.isEmpty()) {
       return new ArrayList<>();
     } else {
-      Map<String, Set<String>> groupIdSGMap = classifySGByGroupId(storageGroupList);
+      Map<String, Set<String>> groupIdSGMap = QPExecutorUtils.classifySGByGroupId(storageGroupList);
       for (Entry<String, Set<String>> entry : groupIdSGMap.entrySet()) {
         List<String> paths = getSubQueryPaths(entry.getValue(), path);
         String groupId = entry.getKey();
@@ -285,13 +284,13 @@ public class QueryMetadataExecutor extends ClusterQPExecutor {
   private void handlePathsQuery(String groupId, List<String> pathList, List<String> res)
       throws ProcessorException, InterruptedException {
     QueryPathsRequest request = new QueryPathsRequest(groupId,
-        readMetadataConsistencyLevel, pathList);
+        getReadMetadataConsistencyLevel(), pathList);
     SingleQPTask task = new SingleQPTask(false, request);
 
     LOGGER.debug("Execute get paths for {} statement for group {}.", pathList, groupId);
     PeerId holder;
     /** Check if the plan can be executed locally. **/
-    if (canHandleQueryByGroupId(groupId)) {
+    if (QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
       LOGGER.debug("Execute get paths for {} statement locally for group {} by sending request to local node.", pathList, groupId);
       holder = this.server.getServerId();
     } else {
@@ -306,14 +305,14 @@ public class QueryMetadataExecutor extends ClusterQPExecutor {
 
   private List<List<String>> queryTimeSeries(SingleQPTask task, PeerId leader)
       throws InterruptedException, RaftConnectionException {
-    BasicResponse response = asyncHandleNonQueryTaskGetRes(task, leader, 0);
+    BasicResponse response = asyncHandleNonQuerySingleTaskGetRes(task, leader, 0);
     return response == null ? new ArrayList<>()
         : ((QueryTimeSeriesResponse) response).getTimeSeries();
   }
 
   private TSDataType querySeriesType(SingleQPTask task, PeerId leader)
       throws InterruptedException, RaftConnectionException {
-    BasicResponse response = asyncHandleNonQueryTaskGetRes(task, leader, 0);
+    BasicResponse response = asyncHandleNonQuerySingleTaskGetRes(task, leader, 0);
     return response == null ? null
         : ((QuerySeriesTypeResponse) response).getDataType();
   }
@@ -326,10 +325,10 @@ public class QueryMetadataExecutor extends ClusterQPExecutor {
   private Set<String> queryStorageGroupLocally() throws InterruptedException {
     final byte[] reqContext = RaftUtils.createRaftRequestContext();
     QueryStorageGroupRequest request = new QueryStorageGroupRequest(
-        ClusterConfig.METADATA_GROUP_ID, readMetadataConsistencyLevel);
+        ClusterConfig.METADATA_GROUP_ID, getReadMetadataConsistencyLevel());
     SingleQPTask task = new SingleQPTask(false, request);
     MetadataRaftHolder metadataHolder = (MetadataRaftHolder) server.getMetadataHolder();
-    if (readMetadataConsistencyLevel == ClusterConstant.WEAK_CONSISTENCY_LEVEL) {
+    if (getReadMetadataConsistencyLevel() == ClusterConstant.WEAK_CONSISTENCY_LEVEL) {
       QueryStorageGroupResponse response;
       try {
         response = QueryStorageGroupResponse
@@ -365,7 +364,7 @@ public class QueryMetadataExecutor extends ClusterQPExecutor {
 
   private List<String> queryPaths(SingleQPTask task, PeerId leader)
       throws InterruptedException, RaftConnectionException {
-    BasicResponse response = asyncHandleNonQueryTaskGetRes(task, leader, 0);
+    BasicResponse response = asyncHandleNonQuerySingleTaskGetRes(task, leader, 0);
     return response == null ? new ArrayList<>()
         : ((QueryPathsResponse) response).getPaths();
   }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/callback/BatchQPTask.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/BatchQPTask.java
similarity index 90%
rename from cluster/src/main/java/org/apache/iotdb/cluster/qp/callback/BatchQPTask.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/qp/task/BatchQPTask.java
index 2706388..43edd67 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/callback/BatchQPTask.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/BatchQPTask.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.qp.callback;
+package org.apache.iotdb.cluster.qp.task;
 
 import com.alipay.sofa.jraft.entity.PeerId;
 import java.sql.Statement;
@@ -30,8 +30,9 @@ import org.apache.iotdb.cluster.concurrent.pool.QPTaskManager;
 import org.apache.iotdb.cluster.exception.RaftConnectionException;
 import org.apache.iotdb.cluster.qp.executor.NonQueryExecutor;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
-import org.apache.iotdb.cluster.rpc.raft.response.DataGroupNonQueryResponse;
-import org.apache.iotdb.cluster.rpc.service.TSServiceClusterImpl.BatchResult;
+import org.apache.iotdb.cluster.rpc.raft.response.nonquery.DataGroupNonQueryResponse;
+import org.apache.iotdb.cluster.service.TSServiceClusterImpl.BatchResult;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.cluster.utils.RaftUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -73,7 +74,7 @@ public class BatchQPTask extends MultiQPTask {
   private NonQueryExecutor executor;
 
 
-  public BatchQPTask(int taskNum, BatchResult batchResult, Map<String, QPTask> taskMap,
+  public BatchQPTask(int taskNum, BatchResult batchResult, Map<String, SingleQPTask> taskMap,
       Map<String, List<Integer>> planIndexMap) {
     super(false, taskNum, TaskType.BATCH);
     this.batchResult = batchResult.getResult();
@@ -117,11 +118,11 @@ public class BatchQPTask extends MultiQPTask {
   public void execute(NonQueryExecutor executor) {
     this.executor = executor;
 
-    for (Entry<String, QPTask> entry : taskMap.entrySet()) {
+    for (Entry<String, SingleQPTask> entry : taskMap.entrySet()) {
       String groupId = entry.getKey();
-      QPTask subTask = entry.getValue();
+      SingleQPTask subTask = entry.getValue();
       Future<?> taskThread;
-      if (executor.canHandleNonQueryByGroupId(groupId)) {
+      if (QPExecutorUtils.canHandleNonQueryByGroupId(groupId)) {
         taskThread = QPTaskManager.getInstance()
             .submit(() -> executeLocalSubTask(subTask, groupId));
       } else {
@@ -149,7 +150,7 @@ public class BatchQPTask extends MultiQPTask {
   /**
    * Execute RPC sub task
    */
-  private void executeRpcSubTask(QPTask subTask, PeerId leader, String groupId) {
+  private void executeRpcSubTask(SingleQPTask subTask, PeerId leader, String groupId) {
     try {
       executor.asyncHandleNonQueryTask(subTask, leader);
       this.run(subTask.getResponse());
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/callback/MultiQPTask.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/MultiQPTask.java
similarity index 89%
rename from cluster/src/main/java/org/apache/iotdb/cluster/qp/callback/MultiQPTask.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/qp/task/MultiQPTask.java
index f400eaf..e451f3e 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/callback/MultiQPTask.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/MultiQPTask.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.qp.callback;
+package org.apache.iotdb.cluster.qp.task;
 
 import java.util.Map;
 import java.util.concurrent.Future;
@@ -26,7 +26,7 @@ public abstract class MultiQPTask extends QPTask {
   /**
    * Each request is corresponding to a group id. String: group id
    */
-  Map<String, QPTask> taskMap;
+  Map<String, SingleQPTask> taskMap;
 
   /**
    * Each future task handle a request in taskMap, which is corresponding to a group id. String:
@@ -45,6 +45,8 @@ public abstract class MultiQPTask extends QPTask {
         task.cancel(true);
       }
     }
-    this.taskCountDownLatch.countDown();
+    while (taskCountDownLatch.getCount() != 0) {
+      this.taskCountDownLatch.countDown();
+    }
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/callback/QPTask.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/QPTask.java
similarity index 98%
rename from cluster/src/main/java/org/apache/iotdb/cluster/qp/callback/QPTask.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/qp/task/QPTask.java
index fd21f3f..96a517a 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/callback/QPTask.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/QPTask.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.qp.callback;
+package org.apache.iotdb.cluster.qp.task;
 
 import java.util.concurrent.CountDownLatch;
 import org.apache.iotdb.cluster.entity.Server;
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/filter/basic/BinaryFilter.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/QueryTask.java
old mode 100755
new mode 100644
similarity index 55%
copy from tsfile/src/main/java/org/apache/iotdb/tsfile/read/filter/basic/BinaryFilter.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/qp/task/QueryTask.java
index aeba875..f4cb4b5
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/filter/basic/BinaryFilter.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/QueryTask.java
@@ -16,35 +16,34 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.tsfile.read.filter.basic;
+package org.apache.iotdb.cluster.qp.task;
 
-import java.io.Serializable;
+import org.apache.iotdb.cluster.qp.task.QPTask.TaskState;
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 
-/**
- * Definition for binary filter operations.
- */
-public abstract class BinaryFilter implements Filter, Serializable {
+public class QueryTask {
+  private BasicResponse basicResponse;
+  private TaskState state;
 
-  private static final long serialVersionUID = 1039585564327602465L;
-
-  protected final Filter left;
-  protected final Filter right;
+  public QueryTask(BasicResponse basicResponse,
+      TaskState state) {
+    this.basicResponse = basicResponse;
+    this.state = state;
+  }
 
-  protected BinaryFilter(Filter left, Filter right) {
-    this.left = left;
-    this.right = right;
+  public BasicResponse getBasicResponse() {
+    return basicResponse;
   }
 
-  public Filter getLeft() {
-    return left;
+  public void setBasicResponse(BasicResponse basicResponse) {
+    this.basicResponse = basicResponse;
   }
 
-  public Filter getRight() {
-    return right;
+  public TaskState getState() {
+    return state;
   }
 
-  @Override
-  public String toString() {
-    return "( " + left + "," + right + " )";
+  public void setState(TaskState state) {
+    this.state = state;
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/callback/SingleQPTask.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/SingleQPTask.java
similarity index 93%
rename from cluster/src/main/java/org/apache/iotdb/cluster/qp/callback/SingleQPTask.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/qp/task/SingleQPTask.java
index 7fc7ba2..805834e 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/callback/SingleQPTask.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/SingleQPTask.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.qp.callback;
+package org.apache.iotdb.cluster.qp.task;
 
 import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
@@ -57,6 +57,8 @@ public class SingleQPTask extends QPTask {
 
   @Override
   public void shutdown() {
-    this.taskCountDownLatch.countDown();
+    if (taskCountDownLatch.getCount() != 0) {
+      this.taskCountDownLatch.countDown();
+    }
   }
 }
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/expression/IBinaryExpression.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/PathType.java
similarity index 81%
copy from tsfile/src/main/java/org/apache/iotdb/tsfile/read/expression/IBinaryExpression.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/query/PathType.java
index b97310a..78c54f7 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/expression/IBinaryExpression.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/PathType.java
@@ -16,15 +16,18 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.tsfile.read.expression;
+package org.apache.iotdb.cluster.query;
 
 /**
- * @author Jinrui Zhang
+ * Type of path
  */
-public interface IBinaryExpression extends IExpression {
-
-  IExpression getLeft();
-
-  IExpression getRight();
-
+public enum PathType {
+  /**
+   * Select paths in a query
+   */
+  SELECT_PATH,
+  /**
+   * Filter paths in a query
+   */
+  FILTER_PATH
 }
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/expression/IBinaryExpression.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/QueryType.java
similarity index 77%
copy from tsfile/src/main/java/org/apache/iotdb/tsfile/read/expression/IBinaryExpression.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/query/QueryType.java
index b97310a..5bf8c53 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/expression/IBinaryExpression.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/QueryType.java
@@ -16,15 +16,22 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.tsfile.read.expression;
+package org.apache.iotdb.cluster.query;
 
 /**
- * @author Jinrui Zhang
+ * Type of query
  */
-public interface IBinaryExpression extends IExpression {
-
-  IExpression getLeft();
-
-  IExpression getRight();
-
+public enum QueryType {
+  /**
+   * Query with no filter
+   */
+  NO_FILTER,
+  /**
+   * Query with global time
+   */
+  GLOBAL_TIME,
+  /**
+   * Query with value filter
+   */
+  FILTER
 }
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/query/dataset/EngineDataSetWithTimeGenerator.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/dataset/ClusterDataSetWithTimeGenerator.java
similarity index 55%
copy from iotdb/src/main/java/org/apache/iotdb/db/query/dataset/EngineDataSetWithTimeGenerator.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/query/dataset/ClusterDataSetWithTimeGenerator.java
index 6e76e66..f3e4eaf 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/dataset/EngineDataSetWithTimeGenerator.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/dataset/ClusterDataSetWithTimeGenerator.java
@@ -16,25 +16,44 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.db.query.dataset;
+package org.apache.iotdb.cluster.query.dataset;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
 import java.util.List;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.cluster.query.timegenerator.ClusterTimeGenerator;
 import org.apache.iotdb.db.query.reader.merge.EngineReaderByTimeStamp;
-import org.apache.iotdb.db.query.timegenerator.EngineTimeGenerator;
-import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.Field;
 import org.apache.iotdb.tsfile.read.common.Path;
 import org.apache.iotdb.tsfile.read.common.RowRecord;
 import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
-import org.apache.iotdb.tsfile.utils.Binary;
 
-public class EngineDataSetWithTimeGenerator extends QueryDataSet {
+/**
+ * Dataset with time generator for cluster
+ */
+public class ClusterDataSetWithTimeGenerator extends QueryDataSet {
+
+  private ClusterRpcSingleQueryManager queryManager;
+
+  private ClusterTimeGenerator timeGenerator;
+
+  private EngineReaderByTimeStamp[] readers;
+
+  private static final ClusterConfig CLUSTER_CONF = ClusterDescriptor.getInstance().getConfig();
+
+  /**
+   * Cached batch timestamp
+   */
+  private Iterator<Long> cachedBatchTimestamp;
 
-  private EngineTimeGenerator timeGenerator;
-  private List<EngineReaderByTimeStamp> readers;
   private boolean hasCachedRowRecord;
+
   private RowRecord cachedRowRecord;
 
   /**
@@ -45,11 +64,13 @@ public class EngineDataSetWithTimeGenerator extends QueryDataSet {
    * @param timeGenerator EngineTimeGenerator object
    * @param readers readers in List(EngineReaderByTimeStamp) structure
    */
-  public EngineDataSetWithTimeGenerator(List<Path> paths, List<TSDataType> dataTypes,
-      EngineTimeGenerator timeGenerator, List<EngineReaderByTimeStamp> readers) {
+  public ClusterDataSetWithTimeGenerator(List<Path> paths, List<TSDataType> dataTypes,
+      ClusterTimeGenerator timeGenerator, EngineReaderByTimeStamp[] readers,
+      ClusterRpcSingleQueryManager queryManager) {
     super(paths, dataTypes);
     this.timeGenerator = timeGenerator;
     this.readers = readers;
+    this.queryManager = queryManager;
   }
 
   @Override
@@ -75,12 +96,12 @@ public class EngineDataSetWithTimeGenerator extends QueryDataSet {
    * @return if there has next row record.
    */
   private boolean cacheRowRecord() throws IOException {
-    while (timeGenerator.hasNext()) {
+    while (hasNextTimestamp()) {
       boolean hasField = false;
-      long timestamp = timeGenerator.next();
+      long timestamp = cachedBatchTimestamp.next();
       RowRecord rowRecord = new RowRecord(timestamp);
-      for (int i = 0; i < readers.size(); i++) {
-        EngineReaderByTimeStamp reader = readers.get(i);
+      for (int i = 0; i < readers.length; i++) {
+        EngineReaderByTimeStamp reader = readers[i];
         Object value = reader.getValueInTimestamp(timestamp);
         if (value == null) {
           rowRecord.addField(new Field(null));
@@ -98,36 +119,31 @@ public class EngineDataSetWithTimeGenerator extends QueryDataSet {
     return hasCachedRowRecord;
   }
 
-  private Field getField(Object value, TSDataType dataType) {
-    Field field = new Field(dataType);
-
-    if (value == null) {
-      field.setNull();
-      return field;
+  /**
+   * Check if it has next valid timestamp
+   */
+  private boolean hasNextTimestamp() throws IOException {
+    if (cachedBatchTimestamp == null || !cachedBatchTimestamp.hasNext()) {
+      List<Long> batchTimestamp = new ArrayList<>();
+      for (int i = 0; i < CLUSTER_CONF.getBatchReadSize(); i++) {
+        if (timeGenerator.hasNext()) {
+          batchTimestamp.add(timeGenerator.next());
+        } else {
+          break;
+        }
+      }
+      if (!batchTimestamp.isEmpty()) {
+        cachedBatchTimestamp = batchTimestamp.iterator();
+        try {
+          queryManager.fetchBatchDataByTimestampForAllSelectPaths(batchTimestamp);
+        } catch (RaftConnectionException e) {
+          throw new IOException(e);
+        }
+      }
     }
-
-    switch (dataType) {
-      case DOUBLE:
-        field.setDoubleV((double) value);
-        break;
-      case FLOAT:
-        field.setFloatV((float) value);
-        break;
-      case INT64:
-        field.setLongV((long) value);
-        break;
-      case INT32:
-        field.setIntV((int) value);
-        break;
-      case BOOLEAN:
-        field.setBoolV((boolean) value);
-        break;
-      case TEXT:
-        field.setBinaryV((Binary) value);
-        break;
-      default:
-        throw new UnSupportedDataTypeException("UnSupported: " + dataType);
+    if (cachedBatchTimestamp != null && cachedBatchTimestamp.hasNext()) {
+      return true;
     }
-    return field;
+    return false;
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithTimeGenerator.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithTimeGenerator.java
new file mode 100644
index 0000000..fed8c0d
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithTimeGenerator.java
@@ -0,0 +1,130 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.executor;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import org.apache.iotdb.cluster.query.dataset.ClusterDataSetWithTimeGenerator;
+import org.apache.iotdb.cluster.query.factory.ClusterSeriesReaderFactory;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterGroupEntity;
+import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
+import org.apache.iotdb.cluster.query.timegenerator.ClusterTimeGenerator;
+import org.apache.iotdb.db.exception.FileNodeManagerException;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.metadata.MManager;
+import org.apache.iotdb.db.query.context.QueryContext;
+import org.apache.iotdb.db.query.control.QueryResourceManager;
+import org.apache.iotdb.db.query.reader.merge.EngineReaderByTimeStamp;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.read.common.Path;
+import org.apache.iotdb.tsfile.read.expression.QueryExpression;
+import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
+
+public class ClusterExecutorWithTimeGenerator {
+
+  /**
+   * query expression
+   */
+  private QueryExpression queryExpression;
+
+  /**
+   * Manger for all remote query series reader resource in the query
+   */
+  private ClusterRpcSingleQueryManager queryManager;
+
+  /**
+   * Constructor of ClusterExecutorWithTimeGenerator
+   */
+  public ClusterExecutorWithTimeGenerator(QueryExpression queryExpression,
+      ClusterRpcSingleQueryManager queryManager) {
+    this.queryExpression = queryExpression;
+    this.queryManager = queryManager;
+  }
+
+  /**
+   * Execute query with value filter.
+   *
+   * @return QueryDataSet object
+   */
+  public QueryDataSet execute(QueryContext context) throws FileNodeManagerException {
+
+    /** add query token for query series which can handle locally **/
+    List<Path> localQuerySeries = new ArrayList<>(queryExpression.getSelectedSeries());
+    Set<Path> remoteQuerySeries = queryManager.getSelectSeriesReaders().keySet();
+    localQuerySeries.removeAll(remoteQuerySeries);
+    QueryResourceManager.getInstance()
+        .beginQueryOfGivenQueryPaths(context.getJobId(), localQuerySeries);
+
+    /** add query token for filter series which can handle locally **/
+    Set<String> deviceIdSet = new HashSet<>();
+    for (FilterGroupEntity filterGroupEntity : queryManager.getFilterGroupEntityMap().values()) {
+      List<Path> remoteFilterSeries = filterGroupEntity.getFilterPaths();
+      remoteFilterSeries.forEach(seriesPath -> deviceIdSet.add(seriesPath.getDevice()));
+    }
+    QueryResourceManager.getInstance()
+        .beginQueryOfGivenExpression(context.getJobId(), queryExpression.getExpression(),
+            deviceIdSet);
+
+    ClusterTimeGenerator timestampGenerator;
+    List<EngineReaderByTimeStamp> readersOfSelectedSeries;
+    try {
+      timestampGenerator = new ClusterTimeGenerator(queryExpression.getExpression(), context,
+          queryManager);
+      readersOfSelectedSeries = ClusterSeriesReaderFactory
+          .createReadersByTimestampOfSelectedPaths(queryExpression.getSelectedSeries(), context,
+              queryManager);
+    } catch (IOException ex) {
+      throw new FileNodeManagerException(ex);
+    }
+
+    /** Get data type of select paths **/
+    List<TSDataType> dataTypes = new ArrayList<>();
+    Map<Path, ClusterSelectSeriesReader> selectSeriesReaders = queryManager
+        .getSelectSeriesReaders();
+    for (Path path : queryExpression.getSelectedSeries()) {
+      try {
+        if (selectSeriesReaders.containsKey(path)) {
+          dataTypes.add(selectSeriesReaders.get(path).getDataType());
+        } else {
+          dataTypes.add(MManager.getInstance().getSeriesType(path.getFullPath()));
+        }
+      } catch (PathErrorException e) {
+        throw new FileNodeManagerException(e);
+      }
+
+    }
+
+    EngineReaderByTimeStamp[] readersOfSelectedSeriesArray = new EngineReaderByTimeStamp[readersOfSelectedSeries
+        .size()];
+    int index = 0;
+    for (EngineReaderByTimeStamp reader : readersOfSelectedSeries) {
+      readersOfSelectedSeriesArray[index] = reader;
+      index++;
+    }
+
+    return new ClusterDataSetWithTimeGenerator(queryExpression.getSelectedSeries(), dataTypes,
+        timestampGenerator,
+        readersOfSelectedSeriesArray, queryManager);
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithoutTimeGenerator.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithoutTimeGenerator.java
new file mode 100644
index 0000000..65bd87b
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithoutTimeGenerator.java
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.executor;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
+import org.apache.iotdb.db.exception.FileNodeManagerException;
+import org.apache.iotdb.db.query.context.QueryContext;
+import org.apache.iotdb.db.query.control.QueryResourceManager;
+import org.apache.iotdb.db.query.dataset.EngineDataSetWithoutTimeGenerator;
+import org.apache.iotdb.db.query.executor.ExecutorWithoutTimeGenerator;
+import org.apache.iotdb.db.query.reader.IPointReader;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.read.common.Path;
+import org.apache.iotdb.tsfile.read.expression.QueryExpression;
+import org.apache.iotdb.tsfile.read.expression.impl.GlobalTimeExpression;
+import org.apache.iotdb.tsfile.read.filter.basic.Filter;
+import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
+
+public class ClusterExecutorWithoutTimeGenerator extends ExecutorWithoutTimeGenerator {
+
+  /**
+   * Query expression
+   */
+  private QueryExpression queryExpression;
+
+  /**
+   * Manger for all remote query series reader resource in the query
+   */
+  private ClusterRpcSingleQueryManager queryManager;
+
+  /**
+   * Constructor of ClusterExecutorWithoutTimeGenerator
+   */
+  public ClusterExecutorWithoutTimeGenerator(QueryExpression queryExpression,
+      ClusterRpcSingleQueryManager queryManager) {
+    this.queryExpression = queryExpression;
+    this.queryManager = queryManager;
+  }
+
+  /**
+   * Execute query without filter or with only global time filter.
+   */
+  public QueryDataSet execute(QueryContext context)
+      throws FileNodeManagerException {
+
+    Filter timeFilter = null;
+    if (queryExpression.getExpression() != null) {
+      timeFilter = ((GlobalTimeExpression) queryExpression.getExpression()).getFilter();
+    }
+
+    List<IPointReader> readersOfSelectedSeries = new ArrayList<>();
+    List<TSDataType> dataTypes = new ArrayList<>();
+
+    Map<Path, ClusterSelectSeriesReader> selectPathReaders = queryManager.getSelectSeriesReaders();
+    List<Path> paths = new ArrayList<>();
+    for (Path path : queryExpression.getSelectedSeries()) {
+
+      if (selectPathReaders.containsKey(path)) {
+        ClusterSelectSeriesReader reader = selectPathReaders.get(path);
+        readersOfSelectedSeries.add(reader);
+        dataTypes.add(reader.getDataType());
+
+      } else {
+        IPointReader reader = createSeriesReader(context, path, dataTypes, timeFilter);
+        readersOfSelectedSeries.add(reader);
+        paths.add(path);
+      }
+    }
+
+    QueryResourceManager.getInstance()
+        .beginQueryOfGivenQueryPaths(context.getJobId(), paths);
+
+    try {
+      return new EngineDataSetWithoutTimeGenerator(queryExpression.getSelectedSeries(), dataTypes,
+          readersOfSelectedSeries);
+    } catch (IOException e) {
+      throw new FileNodeManagerException(e);
+    }
+  }
+
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterQueryRouter.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterQueryRouter.java
new file mode 100644
index 0000000..4211528
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterQueryRouter.java
@@ -0,0 +1,121 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.executor;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.query.QueryType;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcQueryManager;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.db.exception.FileNodeManagerException;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.exception.ProcessorException;
+import org.apache.iotdb.db.query.context.QueryContext;
+import org.apache.iotdb.db.query.executor.IEngineQueryRouter;
+import org.apache.iotdb.db.query.fill.IFill;
+import org.apache.iotdb.tsfile.exception.filter.QueryFilterOptimizationException;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.read.common.Path;
+import org.apache.iotdb.tsfile.read.expression.ExpressionType;
+import org.apache.iotdb.tsfile.read.expression.IExpression;
+import org.apache.iotdb.tsfile.read.expression.QueryExpression;
+import org.apache.iotdb.tsfile.read.expression.util.ExpressionOptimizer;
+import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
+import org.apache.iotdb.tsfile.utils.Pair;
+
+/**
+ * Query entrance class of cluster query process. All query clause will be transformed to physical
+ * plan, physical plan will be executed by ClusterQueryRouter.
+ */
+public class ClusterQueryRouter implements IEngineQueryRouter {
+
+  /**
+   * Consistency level of reading data
+   */
+  private ThreadLocal<Integer> readDataConsistencyLevel = new ThreadLocal<>();
+
+  @Override
+  public QueryDataSet query(QueryExpression queryExpression, QueryContext context)
+      throws FileNodeManagerException, PathErrorException {
+
+    ClusterRpcSingleQueryManager queryManager = ClusterRpcQueryManager.getInstance()
+        .getSingleQuery(context.getJobId());
+    try {
+      if (queryExpression.hasQueryFilter()) {
+
+        IExpression optimizedExpression = ExpressionOptimizer.getInstance()
+            .optimize(queryExpression.getExpression(), queryExpression.getSelectedSeries());
+        queryExpression.setExpression(optimizedExpression);
+        // update query expression of origin query plan, it's necessary.
+        queryManager.getOriginQueryPlan().setExpression(optimizedExpression);
+
+        if (optimizedExpression.getType() == ExpressionType.GLOBAL_TIME) {
+          queryManager.initQueryResource(QueryType.GLOBAL_TIME, getReadDataConsistencyLevel());
+          ClusterExecutorWithoutTimeGenerator engineExecutor =
+              new ClusterExecutorWithoutTimeGenerator(queryExpression, queryManager);
+          return engineExecutor.execute(context);
+        } else {
+          queryManager.initQueryResource(QueryType.FILTER, getReadDataConsistencyLevel());
+          ClusterExecutorWithTimeGenerator engineExecutor = new ClusterExecutorWithTimeGenerator(
+              queryExpression, queryManager);
+          return engineExecutor.execute(context);
+        }
+
+      } else {
+        queryManager.initQueryResource(QueryType.NO_FILTER, getReadDataConsistencyLevel());
+        ClusterExecutorWithoutTimeGenerator engineExecutor =
+            new ClusterExecutorWithoutTimeGenerator(queryExpression, queryManager);
+        return engineExecutor.execute(context);
+      }
+    } catch (QueryFilterOptimizationException | IOException | RaftConnectionException e) {
+      throw new FileNodeManagerException(e);
+    }
+  }
+
+  @Override
+  public QueryDataSet aggregate(List<Path> selectedSeries, List<String> aggres,
+      IExpression expression, QueryContext context)
+      throws QueryFilterOptimizationException, FileNodeManagerException, IOException, PathErrorException, ProcessorException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public QueryDataSet groupBy(List<Path> selectedSeries, List<String> aggres,
+      IExpression expression, long unit, long origin, List<Pair<Long, Long>> intervals,
+      QueryContext context)
+      throws ProcessorException, QueryFilterOptimizationException, FileNodeManagerException, PathErrorException, IOException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public QueryDataSet fill(List<Path> fillPaths, long queryTime, Map<TSDataType, IFill> fillType,
+      QueryContext context) throws FileNodeManagerException, PathErrorException, IOException {
+    throw new UnsupportedOperationException();
+  }
+
+  public int getReadDataConsistencyLevel() {
+    return readDataConsistencyLevel.get();
+  }
+
+  public void setReadDataConsistencyLevel(int readDataConsistencyLevel) {
+    this.readDataConsistencyLevel.set(readDataConsistencyLevel);
+  }
+}
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/expression/impl/GlobalTimeExpression.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/expression/TrueExpression.java
similarity index 63%
copy from tsfile/src/main/java/org/apache/iotdb/tsfile/read/expression/impl/GlobalTimeExpression.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/query/expression/TrueExpression.java
index d69a65a..d62c885 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/expression/impl/GlobalTimeExpression.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/expression/TrueExpression.java
@@ -16,37 +16,28 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.tsfile.read.expression.impl;
+package org.apache.iotdb.cluster.query.expression;
 
 import org.apache.iotdb.tsfile.read.expression.ExpressionType;
-import org.apache.iotdb.tsfile.read.expression.IUnaryExpression;
-import org.apache.iotdb.tsfile.read.filter.basic.Filter;
+import org.apache.iotdb.tsfile.read.expression.IExpression;
 
-public class GlobalTimeExpression implements IUnaryExpression {
-
-  private Filter filter;
-
-  public GlobalTimeExpression(Filter filter) {
-    this.filter = filter;
-  }
-
-  @Override
-  public Filter getFilter() {
-    return filter;
-  }
+/**
+ * This type of Expression is used in pruning filter tree while handling query with value filter
+ */
+public class TrueExpression implements IExpression {
 
   @Override
-  public void setFilter(Filter filter) {
-    this.filter = filter;
+  public ExpressionType getType() {
+    return ExpressionType.TRUE;
   }
 
   @Override
-  public ExpressionType getType() {
-    return ExpressionType.GLOBAL_TIME;
+  public IExpression clone() {
+    return new TrueExpression();
   }
 
   @Override
   public String toString() {
-    return "[" + this.filter.toString() + "]";
+    return "TrueExpression{}";
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/factory/ClusterSeriesReaderFactory.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/factory/ClusterSeriesReaderFactory.java
new file mode 100644
index 0000000..ddfa5eb
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/factory/ClusterSeriesReaderFactory.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.factory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
+import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
+import org.apache.iotdb.db.exception.FileNodeManagerException;
+import org.apache.iotdb.db.query.context.QueryContext;
+import org.apache.iotdb.db.query.control.QueryResourceManager;
+import org.apache.iotdb.db.query.factory.SeriesReaderFactory;
+import org.apache.iotdb.db.query.reader.merge.EngineReaderByTimeStamp;
+import org.apache.iotdb.db.query.reader.merge.PriorityMergeReaderByTimestamp;
+import org.apache.iotdb.db.query.reader.sequence.SequenceDataReaderByTimestamp;
+import org.apache.iotdb.tsfile.read.common.Path;
+
+/**
+ * Reader factory for cluster
+ */
+public class ClusterSeriesReaderFactory {
+
+  /**
+   * Construct ReaderByTimestamp , include sequential data and unsequential data.
+   *
+   * @param paths selected series path
+   * @param context query context
+   * @return the list of EngineReaderByTimeStamp
+   */
+  public static List<EngineReaderByTimeStamp> createReadersByTimestampOfSelectedPaths(
+      List<Path> paths, QueryContext context, ClusterRpcSingleQueryManager queryManager)
+      throws IOException, FileNodeManagerException {
+
+    Map<Path, ClusterSelectSeriesReader> selectSeriesReaders = queryManager.getSelectSeriesReaders();
+    List<EngineReaderByTimeStamp> readersOfSelectedSeries = new ArrayList<>();
+
+    for (Path path : paths) {
+
+      if (selectSeriesReaders.containsKey(path)) {
+        readersOfSelectedSeries.add(selectSeriesReaders.get(path));
+      } else {
+        /** can handle series query locally **/
+        EngineReaderByTimeStamp readerByTimeStamp = createReaderByTimeStamp(path, context);
+        readersOfSelectedSeries.add(readerByTimeStamp);
+      }
+    }
+    return readersOfSelectedSeries;
+  }
+
+  /**
+   * Create single ReaderByTimestamp
+   *
+   * @param path series path
+   * @param context query context
+   */
+  public static EngineReaderByTimeStamp createReaderByTimeStamp(Path path, QueryContext context)
+      throws IOException, FileNodeManagerException {
+    QueryDataSource queryDataSource = QueryResourceManager.getInstance()
+        .getQueryDataSource(path,
+            context);
+
+    PriorityMergeReaderByTimestamp mergeReaderByTimestamp = new PriorityMergeReaderByTimestamp();
+
+    // reader for sequence data
+    SequenceDataReaderByTimestamp tsFilesReader = new SequenceDataReaderByTimestamp(
+        queryDataSource.getSeqDataSource(), context);
+    mergeReaderByTimestamp.addReaderWithPriority(tsFilesReader, 1);
+
+    // reader for unSequence data
+    PriorityMergeReaderByTimestamp unSeqMergeReader = SeriesReaderFactory.getInstance()
+        .createUnSeqMergeReaderByTimestamp(queryDataSource.getOverflowSeriesDataSource());
+    mergeReaderByTimestamp.addReaderWithPriority(unSeqMergeReader, 2);
+    return mergeReaderByTimestamp;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcQueryManager.java
new file mode 100644
index 0000000..faece2b
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcQueryManager.java
@@ -0,0 +1,111 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.manager.coordinatornode;
+
+import com.alipay.sofa.jraft.util.OnlyForTest;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
+
+public class ClusterRpcQueryManager implements IClusterRpcQueryManager {
+
+  /**
+   * Key is job id, value is task id.
+   */
+  private static final ConcurrentHashMap<Long, String> JOB_ID_MAP_TASK_ID = new ConcurrentHashMap<>();
+
+  /**
+   * Key is task id, value is manager of a client query.
+   */
+  private static final ConcurrentHashMap<String, ClusterRpcSingleQueryManager> SINGLE_QUERY_MANAGER_MAP = new ConcurrentHashMap<>();
+
+  private static final ClusterConfig CLUSTER_CONFIG = ClusterDescriptor.getInstance().getConfig();
+
+  /**
+   * Local address
+   */
+  private static final String LOCAL_ADDR = String
+      .format("%s:%d", CLUSTER_CONFIG.getIp(), CLUSTER_CONFIG.getPort());
+
+  @Override
+  public void addSingleQuery(long jobId, QueryPlan physicalPlan) {
+    String taskId = createTaskId(jobId);
+    JOB_ID_MAP_TASK_ID.put(jobId, taskId);
+    SINGLE_QUERY_MANAGER_MAP.put(taskId, new ClusterRpcSingleQueryManager(taskId, physicalPlan));
+  }
+
+  @Override
+  public String createTaskId(long jobId) {
+    return String.format("%s:%d", LOCAL_ADDR, jobId);
+  }
+
+  @Override
+  public ClusterRpcSingleQueryManager getSingleQuery(long jobId) {
+    return SINGLE_QUERY_MANAGER_MAP.get(JOB_ID_MAP_TASK_ID.get(jobId));
+  }
+
+  @Override
+  public ClusterRpcSingleQueryManager getSingleQuery(String taskId) {
+    return SINGLE_QUERY_MANAGER_MAP.get(taskId);
+  }
+
+  @Override
+  public void releaseQueryResource(long jobId) throws RaftConnectionException {
+    if (JOB_ID_MAP_TASK_ID.containsKey(jobId)) {
+      SINGLE_QUERY_MANAGER_MAP.remove(JOB_ID_MAP_TASK_ID.remove(jobId)).releaseQueryResource();
+    }
+  }
+
+  @Override
+  public Map<String, Integer> getAllReadUsage() {
+    Map<String, Integer> readerUsageMap = new HashMap<>();
+    SINGLE_QUERY_MANAGER_MAP.values().forEach(singleQueryManager -> {
+      for(String groupId:singleQueryManager.getDataGroupUsage()) {
+        readerUsageMap.put(groupId, readerUsageMap.getOrDefault(groupId, 0) + 1);
+      }
+    });
+    return readerUsageMap;
+  }
+
+  @OnlyForTest
+  public static ConcurrentHashMap<Long, String> getJobIdMapTaskId() {
+    return JOB_ID_MAP_TASK_ID;
+  }
+
+  private ClusterRpcQueryManager() {
+  }
+
+  public static final ClusterRpcQueryManager getInstance() {
+    return ClusterRpcQueryManagerHolder.INSTANCE;
+  }
+
+  private static class ClusterRpcQueryManagerHolder {
+
+    private static final ClusterRpcQueryManager INSTANCE = new ClusterRpcQueryManager();
+
+    private ClusterRpcQueryManagerHolder() {
+
+    }
+  }
+
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java
new file mode 100644
index 0000000..d9a5859
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java
@@ -0,0 +1,415 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.manager.coordinatornode;
+
+import com.alipay.sofa.jraft.entity.PeerId;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.EnumMap;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.query.PathType;
+import org.apache.iotdb.cluster.query.QueryType;
+import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterFilterSeriesReader;
+import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
+import org.apache.iotdb.cluster.query.utils.ClusterRpcReaderUtils;
+import org.apache.iotdb.cluster.query.utils.QueryPlanPartitionUtils;
+import org.apache.iotdb.cluster.rpc.raft.response.BasicQueryDataResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querydata.InitSeriesReaderResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataByTimestampResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
+import org.apache.iotdb.cluster.utils.RaftUtils;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.read.common.BatchData;
+import org.apache.iotdb.tsfile.read.common.Path;
+import org.apache.iotdb.tsfile.read.filter.basic.Filter;
+
+/**
+ * Manage all remote series reader resource in a query resource in coordinator node.
+ */
+public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManager {
+
+  /**
+   * Statistic all usage of local data group.
+   */
+  private Set<String> dataGroupUsage = new HashSet<>();
+
+  /**
+   * Query job id assigned by ClusterRpcQueryManager
+   */
+  private String taskId;
+
+  /**
+   * Represents the number of query rounds
+   */
+  private long queryRounds = 0;
+
+  /**
+   * Origin query plan parsed by QueryProcessor
+   */
+  private QueryPlan originQueryPlan;
+
+  /**
+   * Represent selected reader nodes, key is group id and value is selected peer id
+   */
+  private Map<String, PeerId> queryNodes = new HashMap<>();
+
+  // select path resource
+  /**
+   * Query plans of select paths which are divided from queryPlan group by group id, it contains all
+   * group id ,including local data group if it involves.
+   */
+  private Map<String, QueryPlan> selectPathPlans = new HashMap<>();
+
+  /**
+   * Key is group id (only contains remote group id), value is all select series in group id.
+   */
+  private Map<String, List<Path>> selectSeriesByGroupId = new HashMap<>();
+
+  /**
+   * Series reader of select paths (only contains remote series), key is series path , value is
+   * reader
+   */
+  private Map<Path, ClusterSelectSeriesReader> selectSeriesReaders = new HashMap<>();
+
+  // filter path resource
+  /**
+   * Filter group entity group by data group, key is group id(only contain remote group id)
+   */
+  private Map<String, FilterGroupEntity> filterGroupEntityMap = new HashMap<>();
+
+  private static final ClusterConfig CLUSTER_CONF = ClusterDescriptor.getInstance().getConfig();
+
+  public ClusterRpcSingleQueryManager(String taskId,
+      QueryPlan queryPlan) {
+    this.taskId = taskId;
+    this.originQueryPlan = queryPlan;
+  }
+
+  @Override
+  public void initQueryResource(QueryType queryType, int readDataConsistencyLevel)
+      throws PathErrorException, IOException, RaftConnectionException {
+    switch (queryType) {
+      case NO_FILTER:
+      case GLOBAL_TIME:
+        QueryPlanPartitionUtils.splitQueryPlanWithoutValueFilter(this);
+        break;
+      case FILTER:
+        QueryPlanPartitionUtils.splitQueryPlanWithValueFilter(this);
+        break;
+      default:
+        throw new UnsupportedOperationException();
+    }
+    initSeriesReader(readDataConsistencyLevel);
+  }
+
+  /**
+   * Init series reader, complete all initialization with all remote query node of a specific data
+   * group
+   */
+  private void initSeriesReader(int readDataConsistencyLevel)
+      throws IOException, RaftConnectionException {
+    // Init all series with data group of select series,if filter series has the same data group, init them together.
+    for (Entry<String, QueryPlan> entry : selectPathPlans.entrySet()) {
+      String groupId = entry.getKey();
+      QueryPlan queryPlan = entry.getValue();
+      if (!QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
+        PeerId randomPeer = RaftUtils.getRandomPeerID(groupId);
+        queryNodes.put(groupId, randomPeer);
+        Map<PathType, QueryPlan> allQueryPlan = new EnumMap<>(PathType.class);
+        allQueryPlan.put(PathType.SELECT_PATH, queryPlan);
+        List<Filter> filterList = null;
+        if (filterGroupEntityMap.containsKey(groupId)) {
+          FilterGroupEntity filterGroupEntity = filterGroupEntityMap.get(groupId);
+          allQueryPlan.put(PathType.FILTER_PATH, filterGroupEntity.getQueryPlan());
+          filterList = filterGroupEntity.getFilters();
+        }
+        InitSeriesReaderResponse response = (InitSeriesReaderResponse) ClusterRpcReaderUtils
+            .createClusterSeriesReader(groupId, randomPeer, readDataConsistencyLevel,
+                allQueryPlan, taskId, filterList);
+        handleInitReaderResponse(groupId, allQueryPlan, response);
+      } else {
+        dataGroupUsage.add(groupId);
+        selectSeriesByGroupId.remove(groupId);
+        if (filterGroupEntityMap.containsKey(groupId)) {
+          filterGroupEntityMap.remove(groupId);
+        }
+      }
+    }
+
+    //Init series reader with data groups of filter series, which don't exist in data groups list of select series.
+    for (Entry<String, FilterGroupEntity> entry : filterGroupEntityMap.entrySet()) {
+      String groupId = entry.getKey();
+      if (!selectPathPlans.containsKey(groupId)) {
+        PeerId randomPeer = RaftUtils.getRandomPeerID(groupId);
+        Map<PathType, QueryPlan> allQueryPlan = new EnumMap<>(PathType.class);
+        FilterGroupEntity filterGroupEntity = filterGroupEntityMap.get(groupId);
+        allQueryPlan.put(PathType.FILTER_PATH, filterGroupEntity.getQueryPlan());
+        List<Filter> filterList = filterGroupEntity.getFilters();
+        InitSeriesReaderResponse response = (InitSeriesReaderResponse) ClusterRpcReaderUtils
+            .createClusterSeriesReader(groupId, randomPeer, readDataConsistencyLevel,
+                allQueryPlan, taskId, filterList);
+        handleInitReaderResponse(groupId, allQueryPlan, response);
+      }
+    }
+  }
+
+  /**
+   * Handle response of initialization with remote query node
+   */
+  private void handleInitReaderResponse(String groupId, Map<PathType, QueryPlan> allQueryPlan,
+      InitSeriesReaderResponse response) {
+    /** create cluster series reader **/
+    if (allQueryPlan.containsKey(PathType.SELECT_PATH)) {
+      QueryPlan plan = allQueryPlan.get(PathType.SELECT_PATH);
+      List<Path> paths = plan.getPaths();
+      List<TSDataType> seriesType = response.getSeriesDataTypes().get(PathType.SELECT_PATH);
+      for (int i = 0; i < paths.size(); i++) {
+        Path seriesPath = paths.get(i);
+        TSDataType dataType = seriesType.get(i);
+        ClusterSelectSeriesReader seriesReader = new ClusterSelectSeriesReader(groupId, seriesPath,
+            dataType, this);
+        selectSeriesReaders.put(seriesPath, seriesReader);
+      }
+    }
+    if (allQueryPlan.containsKey(PathType.FILTER_PATH)) {
+      QueryPlan plan = allQueryPlan.get(PathType.FILTER_PATH);
+      List<Path> paths = plan.getPaths();
+      List<TSDataType> seriesType = response.getSeriesDataTypes().get(PathType.FILTER_PATH);
+      for (int i = 0; i < paths.size(); i++) {
+        Path seriesPath = paths.get(i);
+        TSDataType dataType = seriesType.get(i);
+        ClusterFilterSeriesReader seriesReader = new ClusterFilterSeriesReader(groupId, seriesPath,
+            dataType, this);
+        if (!filterGroupEntityMap.containsKey(groupId)) {
+          filterGroupEntityMap.put(groupId, new FilterGroupEntity(groupId));
+        }
+        filterGroupEntityMap.get(groupId).addFilterSeriesReader(seriesReader);
+      }
+    }
+  }
+
+  @Override
+  public void fetchBatchDataForSelectPaths(String groupId) throws RaftConnectionException {
+    List<String> fetchDataSeries = new ArrayList<>();
+    Map<String, List<Path>> seriesByGroupId;
+    Map<Path, ClusterSelectSeriesReader> seriesReaders;
+    seriesByGroupId = selectSeriesByGroupId;
+    seriesReaders = selectSeriesReaders;
+    if (seriesByGroupId.containsKey(groupId)) {
+      List<Path> allFilterSeries = seriesByGroupId.get(groupId);
+      for (Path series : allFilterSeries) {
+        if (seriesReaders.get(series).enableFetchData()) {
+          fetchDataSeries.add(series.getFullPath());
+        }
+      }
+    }
+    QuerySeriesDataResponse response = ClusterRpcReaderUtils
+        .fetchBatchData(groupId, queryNodes.get(groupId), taskId, PathType.SELECT_PATH,
+            fetchDataSeries,
+            queryRounds++);
+    handleFetchDataResponseForSelectPaths(fetchDataSeries, response);
+  }
+
+  @Override
+  public void fetchBatchDataForFilterPaths(String groupId) throws RaftConnectionException {
+    QuerySeriesDataResponse response = ClusterRpcReaderUtils
+        .fetchBatchData(groupId, queryNodes.get(groupId), taskId, PathType.FILTER_PATH, null,
+            queryRounds++);
+    handleFetchDataResponseForFilterPaths(groupId, response);
+  }
+
+
+  @Override
+  public void fetchBatchDataByTimestampForAllSelectPaths(List<Long> batchTimestamp)
+      throws RaftConnectionException {
+    for (Entry<String, List<Path>> entry : selectSeriesByGroupId.entrySet()) {
+      String groupId = entry.getKey();
+      List<String> fetchDataFilterSeries = new ArrayList<>();
+      entry.getValue().forEach(path -> fetchDataFilterSeries.add(path.getFullPath()));
+      QuerySeriesDataByTimestampResponse response = ClusterRpcReaderUtils
+          .fetchBatchDataByTimestamp(groupId, queryNodes.get(groupId), taskId, queryRounds++,
+              batchTimestamp, fetchDataFilterSeries);
+      handleFetchDataByTimestampResponseForSelectPaths(fetchDataFilterSeries, response);
+    }
+  }
+
+  /**
+   * Handle response of fetching data, and add batch data to corresponding reader.
+   */
+  private void handleFetchDataByTimestampResponseForSelectPaths(List<String> fetchDataSeries,
+      BasicQueryDataResponse response) {
+    List<BatchData> batchDataList = response.getSeriesBatchData();
+    for (int i = 0; i < fetchDataSeries.size(); i++) {
+      String series = fetchDataSeries.get(i);
+      BatchData batchData = batchDataList.get(i);
+      selectSeriesReaders.get(new Path(series))
+          .addBatchData(batchData, true);
+    }
+  }
+
+  /**
+   * Handle response of fetching data, and add batch data to corresponding reader.
+   */
+  private void handleFetchDataResponseForSelectPaths(List<String> fetchDataSeries,
+      BasicQueryDataResponse response) {
+    List<BatchData> batchDataList = response.getSeriesBatchData();
+    for (int i = 0; i < fetchDataSeries.size(); i++) {
+      String series = fetchDataSeries.get(i);
+      BatchData batchData = batchDataList.get(i);
+      selectSeriesReaders.get(new Path(series))
+          .addBatchData(batchData, batchData.length() < CLUSTER_CONF.getBatchReadSize());
+    }
+  }
+
+  /**
+   * Handle response of fetching data, and add batch data to corresponding reader.
+   */
+  private void handleFetchDataResponseForFilterPaths(String groupId,
+      QuerySeriesDataResponse response) {
+    FilterGroupEntity filterGroupEntity = filterGroupEntityMap.get(groupId);
+    List<Path> fetchDataSeries = filterGroupEntity.getFilterPaths();
+    List<BatchData> batchDataList = response.getSeriesBatchData();
+    List<ClusterFilterSeriesReader> filterReaders = filterGroupEntity.getFilterSeriesReaders();
+    boolean remoteDataFinish = true;
+    for (int i = 0; i < batchDataList.size(); i++) {
+      if (batchDataList.get(i).length() != 0) {
+        remoteDataFinish = false;
+        break;
+      }
+    }
+    for (int i = 0; i < fetchDataSeries.size(); i++) {
+      BatchData batchData = batchDataList.get(i);
+      if (batchData.length() != 0) {
+        filterReaders.get(i).addBatchData(batchData, remoteDataFinish);
+      }
+    }
+  }
+
+  @Override
+  public QueryPlan getSelectPathQueryPlan(String fullPath) {
+    return selectPathPlans.get(fullPath);
+  }
+
+  @Override
+  public void setDataGroupReaderNode(String groupId, PeerId readerNode) {
+    queryNodes.put(groupId, readerNode);
+  }
+
+  @Override
+  public PeerId getDataGroupReaderNode(String groupId) {
+    return queryNodes.get(groupId);
+  }
+
+  @Override
+  public void releaseQueryResource() throws RaftConnectionException {
+    for (Entry<String, PeerId> entry : queryNodes.entrySet()) {
+      String groupId = entry.getKey();
+      PeerId queryNode = entry.getValue();
+      ClusterRpcReaderUtils.releaseRemoteQueryResource(groupId, queryNode, taskId);
+    }
+  }
+
+  public Set<String> getDataGroupUsage() {
+    return dataGroupUsage;
+  }
+
+  public void addDataGroupUsage(String groupId) {
+    this.dataGroupUsage.add(groupId);
+  }
+
+  public String getTaskId() {
+    return taskId;
+  }
+
+  public void setTaskId(String taskId) {
+    this.taskId = taskId;
+  }
+
+  public long getQueryRounds() {
+    return queryRounds;
+  }
+
+  public void setQueryRounds(long queryRounds) {
+    this.queryRounds = queryRounds;
+  }
+
+  public QueryPlan getOriginQueryPlan() {
+    return originQueryPlan;
+  }
+
+  public void setOriginQueryPlan(QueryPlan queryPlan) {
+    this.originQueryPlan = queryPlan;
+  }
+
+  public Map<String, PeerId> getQueryNodes() {
+    return queryNodes;
+  }
+
+  public void setQueryNodes(
+      Map<String, PeerId> queryNodes) {
+    this.queryNodes = queryNodes;
+  }
+
+  public Map<String, QueryPlan> getSelectPathPlans() {
+    return selectPathPlans;
+  }
+
+  public void setSelectPathPlans(
+      Map<String, QueryPlan> selectPathPlans) {
+    this.selectPathPlans = selectPathPlans;
+  }
+
+  public Map<String, List<Path>> getSelectSeriesByGroupId() {
+    return selectSeriesByGroupId;
+  }
+
+  public void setSelectSeriesByGroupId(
+      Map<String, List<Path>> selectSeriesByGroupId) {
+    this.selectSeriesByGroupId = selectSeriesByGroupId;
+  }
+
+  public Map<Path, ClusterSelectSeriesReader> getSelectSeriesReaders() {
+    return selectSeriesReaders;
+  }
+
+  public void setSelectSeriesReaders(
+      Map<Path, ClusterSelectSeriesReader> selectSeriesReaders) {
+    this.selectSeriesReaders = selectSeriesReaders;
+  }
+
+  public Map<String, FilterGroupEntity> getFilterGroupEntityMap() {
+    return filterGroupEntityMap;
+  }
+
+  public void setFilterGroupEntityMap(
+      Map<String, FilterGroupEntity> filterGroupEntityMap) {
+    this.filterGroupEntityMap = filterGroupEntityMap;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterGroupEntity.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterGroupEntity.java
new file mode 100644
index 0000000..326af11
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterGroupEntity.java
@@ -0,0 +1,111 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.manager.coordinatornode;
+
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterFilterSeriesReader;
+import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
+import org.apache.iotdb.tsfile.read.common.Path;
+import org.apache.iotdb.tsfile.read.filter.basic.Filter;
+
+/**
+ * Filter entities of a data group, concluding QueryPlan, filters, all filter paths and filter readers
+ */
+public class FilterGroupEntity {
+
+  /**
+   * Group id
+   */
+  private String groupId;
+
+  /**
+   * Query plans of filter paths which are divided from queryPlan
+   */
+  private QueryPlan queryPlan;
+
+  /**
+   * Filters of filter path.
+   */
+  private List<Filter> filters;
+
+  /**
+   *
+   * all filter series
+   * <p>
+   * Note: It may contain multiple series in a complicated tree
+   * for example: select * from root.vehicle where d0.s0 > 10 and d0.s0 < 101 or time = 12,
+   * filter tree: <code>[[[[root.vehicle.d0.s0:time == 12] || [root.vehicle.d0.s1:time == 12]] || [root.vehicle.d1.s2:time == 12]] || [root.vehicle.d1.s3:time == 12]]</code>
+   * </p>
+   */
+  private List<Path> filterPaths;
+
+
+  /**
+   * Series reader of filter paths (only contains remote series)
+   */
+  private List<ClusterFilterSeriesReader> filterSeriesReaders;
+
+  public FilterGroupEntity(String groupId) {
+    this.groupId = groupId;
+    this.filterPaths = new ArrayList<>();
+    this.filters = new ArrayList<>();
+    this.filterSeriesReaders = new ArrayList<>();
+  }
+
+  public String getGroupId() {
+    return groupId;
+  }
+
+  public void setGroupId(String groupId) {
+    this.groupId = groupId;
+  }
+
+  public QueryPlan getQueryPlan() {
+    return queryPlan;
+  }
+
+  public void setQueryPlan(QueryPlan queryPlan) {
+    this.queryPlan = queryPlan;
+  }
+
+  public List<Filter> getFilters() {
+    return filters;
+  }
+
+  public void addFilter(Filter filter) {
+    this.filters.add(filter);
+  }
+
+  public List<Path> getFilterPaths() {
+    return filterPaths;
+  }
+
+  public void addFilterPaths(Path filterPath) {
+    this.filterPaths.add(filterPath);
+  }
+
+  public List<ClusterFilterSeriesReader> getFilterSeriesReaders() {
+    return filterSeriesReaders;
+  }
+
+  public void addFilterSeriesReader(ClusterFilterSeriesReader filterSeriesReader) {
+    this.filterSeriesReaders.add(filterSeriesReader);
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcQueryManager.java
new file mode 100644
index 0000000..b8e4f5d
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcQueryManager.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.manager.coordinatornode;
+
+import java.util.Map;
+import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
+
+/**
+ * Manage all query series reader resources which fetch data from remote query nodes in coordinator
+ * node
+ */
+public interface IClusterRpcQueryManager {
+
+  /**
+   * Add a query
+   *
+   * @param jobId job id assigned by QueryResourceManager
+   * @param physicalPlan physical plan
+   */
+  void addSingleQuery(long jobId, QueryPlan physicalPlan);
+
+  /**
+   * Get full task id (local address + job id)
+   */
+  String createTaskId(long jobId);
+
+  /**
+   * Get query manager by jobId
+   *
+   * @param jobId job id assigned by QueryResourceManager
+   */
+  ClusterRpcSingleQueryManager getSingleQuery(long jobId);
+
+  /**
+   * Get query manager by taskId
+   *
+   * @param taskId task id assigned by getAndIncreaTaskId() method
+   */
+  ClusterRpcSingleQueryManager getSingleQuery(String taskId);
+
+  /**
+   * Release query resource
+   *
+   * @param jobId job id
+   */
+  void releaseQueryResource(long jobId) throws RaftConnectionException;
+
+  /**
+   * Get all read usage count group by data group id, key is group id, value is usage count
+   */
+  Map<String, Integer> getAllReadUsage();
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcSingleQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcSingleQueryManager.java
new file mode 100644
index 0000000..c4aec9c
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcSingleQueryManager.java
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.manager.coordinatornode;
+
+import com.alipay.sofa.jraft.entity.PeerId;
+import java.io.IOException;
+import java.util.List;
+import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.query.QueryType;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
+
+/**
+ * Manage a single query.
+ */
+public interface IClusterRpcSingleQueryManager {
+
+  /**
+   * Divide physical plan into several sub physical plans according to timeseries full path and
+   * create sereis reader.
+   *
+   * @param queryType query type
+   * @param readDataConsistencyLevel consistency level of reading data
+   */
+  void initQueryResource(QueryType queryType, int readDataConsistencyLevel)
+      throws PathErrorException, IOException, RaftConnectionException;
+
+  /**
+   * <p>
+   * Fetch data for select paths. In order to reduce the number of RPC communications, fetching data
+   * from remote query node will fetch for all series in the same data group. If the cached data for
+   * specific series exceed limit, ignore this fetching data process of the series.
+   * </p>
+   *
+   * @param groupId data group id
+   */
+  void fetchBatchDataForSelectPaths(String groupId) throws RaftConnectionException;
+
+  /**
+   * Fetch data for filter path.
+   *
+   * @param groupId data group id
+   */
+  void fetchBatchDataForFilterPaths(String groupId) throws RaftConnectionException;
+
+  /**
+   * Fetch batch data for all select paths by batch timestamp. If target data can be fetched, skip
+   * corresponding group id.
+   *
+   * @param batchTimestamp valid batch timestamp
+   */
+  void fetchBatchDataByTimestampForAllSelectPaths(List<Long> batchTimestamp)
+      throws RaftConnectionException;
+
+  /**
+   * Get query plan of select path
+   *
+   * @param fullPath Timeseries full path in select paths
+   */
+  QueryPlan getSelectPathQueryPlan(String fullPath);
+
+  /**
+   * Set reader node of a data group
+   *
+   * @param groupId data group id
+   * @param readerNode reader peer id
+   */
+  void setDataGroupReaderNode(String groupId, PeerId readerNode);
+
+  /**
+   * Get reader node of a data group by group id
+   *
+   * @param groupId data group id
+   * @return peer id of reader node
+   */
+  PeerId getDataGroupReaderNode(String groupId);
+
+  /**
+   * Release query resource in remote query node
+   */
+  void releaseQueryResource() throws RaftConnectionException;
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalQueryManager.java
new file mode 100644
index 0000000..fe3ac52
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalQueryManager.java
@@ -0,0 +1,125 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.manager.querynode;
+
+import com.alipay.sofa.jraft.util.OnlyForTest;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.InitSeriesReaderRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataByTimestampRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.querydata.InitSeriesReaderResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataByTimestampResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
+import org.apache.iotdb.db.exception.FileNodeManagerException;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.exception.ProcessorException;
+import org.apache.iotdb.db.query.control.QueryResourceManager;
+import org.apache.iotdb.tsfile.exception.filter.QueryFilterOptimizationException;
+
+public class ClusterLocalQueryManager implements IClusterLocalQueryManager {
+
+  /**
+   * Key is task id which is assigned by coordinator node, value is job id which is assigned by
+   * query node(local).
+   */
+  private static final ConcurrentHashMap<String, Long> TASK_ID_MAP_JOB_ID = new ConcurrentHashMap<>();
+
+  /**
+   * Key is job id, value is manager of a client query.
+   */
+  private static final ConcurrentHashMap<Long, ClusterLocalSingleQueryManager> SINGLE_QUERY_MANAGER_MAP = new ConcurrentHashMap<>();
+
+  private ClusterLocalQueryManager() {
+  }
+
+  @Override
+  public InitSeriesReaderResponse createQueryDataSet(InitSeriesReaderRequest request)
+      throws IOException, FileNodeManagerException, PathErrorException, ProcessorException, QueryFilterOptimizationException {
+    long jobId = QueryResourceManager.getInstance().assignJobId();
+    String taskId = request.getTaskId();
+    TASK_ID_MAP_JOB_ID.put(taskId, jobId);
+    ClusterLocalSingleQueryManager localQueryManager = new ClusterLocalSingleQueryManager(jobId);
+    SINGLE_QUERY_MANAGER_MAP.put(jobId, localQueryManager);
+    return localQueryManager.createSeriesReader(request);
+  }
+
+  @Override
+  public QuerySeriesDataResponse readBatchData(QuerySeriesDataRequest request)
+      throws IOException {
+    long jobId = TASK_ID_MAP_JOB_ID.get(request.getTaskId());
+    return SINGLE_QUERY_MANAGER_MAP.get(jobId).readBatchData(request);
+  }
+
+  @Override
+  public QuerySeriesDataByTimestampResponse readBatchDataByTimestamp(
+      QuerySeriesDataByTimestampRequest request)
+      throws IOException {
+    long jobId = TASK_ID_MAP_JOB_ID.get(request.getTaskId());
+    return SINGLE_QUERY_MANAGER_MAP.get(jobId).readBatchDataByTimestamp(request);
+  }
+
+  @Override
+  public void close(String taskId) throws FileNodeManagerException {
+    if (TASK_ID_MAP_JOB_ID.containsKey(taskId)) {
+      SINGLE_QUERY_MANAGER_MAP.remove(TASK_ID_MAP_JOB_ID.remove(taskId)).close();
+    }
+  }
+
+  @Override
+  public ClusterLocalSingleQueryManager getSingleQuery(String taskId) {
+    long jobId = TASK_ID_MAP_JOB_ID.get(taskId);
+    return SINGLE_QUERY_MANAGER_MAP.get(jobId);
+  }
+
+  public static final ClusterLocalQueryManager getInstance() {
+    return ClusterLocalQueryManager.ClusterLocalQueryManagerHolder.INSTANCE;
+  }
+
+  private static class ClusterLocalQueryManagerHolder {
+
+    private static final ClusterLocalQueryManager INSTANCE = new ClusterLocalQueryManager();
+
+    private ClusterLocalQueryManagerHolder() {
+
+    }
+  }
+
+  @Override
+  public Map<String, Integer> getAllReadUsage() {
+    Map<String, Integer> readerUsageMap = new HashMap<>();
+    SINGLE_QUERY_MANAGER_MAP.values().forEach(singleQueryManager -> {
+      String groupId = singleQueryManager.getGroupId();
+      readerUsageMap.put(groupId, readerUsageMap.getOrDefault(groupId, 0) + 1);
+    });
+    return readerUsageMap;
+  }
+
+  @OnlyForTest
+  public static ConcurrentHashMap<String, Long> getTaskIdMapJobId() {
+    return TASK_ID_MAP_JOB_ID;
+  }
+
+  @OnlyForTest
+  public static ConcurrentHashMap<Long, ClusterLocalSingleQueryManager> getSingleQueryManagerMap() {
+    return SINGLE_QUERY_MANAGER_MAP;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalSingleQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalSingleQueryManager.java
new file mode 100644
index 0000000..559575a
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalSingleQueryManager.java
@@ -0,0 +1,335 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.manager.querynode;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ScheduledFuture;
+import org.apache.iotdb.cluster.concurrent.pool.QueryTimerManager;
+import org.apache.iotdb.cluster.config.ClusterConstant;
+import org.apache.iotdb.cluster.query.PathType;
+import org.apache.iotdb.cluster.query.factory.ClusterSeriesReaderFactory;
+import org.apache.iotdb.cluster.query.reader.querynode.AbstractClusterBatchReader;
+import org.apache.iotdb.cluster.query.reader.querynode.ClusterBatchReaderByTimestamp;
+import org.apache.iotdb.cluster.query.reader.querynode.ClusterBatchReaderWithoutTimeGenerator;
+import org.apache.iotdb.cluster.query.reader.querynode.ClusterFilterSeriesBatchReader;
+import org.apache.iotdb.cluster.query.reader.querynode.IClusterFilterSeriesBatchReader;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.InitSeriesReaderRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataByTimestampRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.querydata.InitSeriesReaderResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataByTimestampResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
+import org.apache.iotdb.db.exception.FileNodeManagerException;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.exception.ProcessorException;
+import org.apache.iotdb.db.metadata.MManager;
+import org.apache.iotdb.db.qp.executor.OverflowQPExecutor;
+import org.apache.iotdb.db.qp.executor.QueryProcessExecutor;
+import org.apache.iotdb.db.qp.physical.crud.AggregationPlan;
+import org.apache.iotdb.db.qp.physical.crud.GroupByPlan;
+import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
+import org.apache.iotdb.db.query.context.QueryContext;
+import org.apache.iotdb.db.query.control.QueryResourceManager;
+import org.apache.iotdb.db.query.executor.ExecutorWithoutTimeGenerator;
+import org.apache.iotdb.db.query.reader.IPointReader;
+import org.apache.iotdb.db.query.reader.merge.EngineReaderByTimeStamp;
+import org.apache.iotdb.tsfile.exception.filter.QueryFilterOptimizationException;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.read.common.BatchData;
+import org.apache.iotdb.tsfile.read.common.Path;
+import org.apache.iotdb.tsfile.read.expression.ExpressionType;
+import org.apache.iotdb.tsfile.read.expression.impl.GlobalTimeExpression;
+import org.apache.iotdb.tsfile.read.filter.basic.Filter;
+import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryManager {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(ClusterLocalSingleQueryManager.class);
+
+  private String groupId;
+
+  /**
+   * Timer of Query, if the time is up, close query resource.
+   */
+  private ScheduledFuture<?> queryTimer;
+
+  /**
+   * Job id assigned by local QueryResourceManager
+   */
+  private long jobId;
+
+  /**
+   * Represents the number of query rounds, initial value is -1.
+   */
+  private long queryRound = -1;
+
+  /**
+   * Key is series full path, value is reader of select series
+   */
+  private Map<String, AbstractClusterBatchReader> selectSeriesReaders = new HashMap<>();
+
+  /**
+   * Filter reader
+   */
+  private IClusterFilterSeriesBatchReader filterReader;
+
+  /**
+   * Key is series full path, value is data type of series
+   */
+  private Map<String, TSDataType> dataTypeMap = new HashMap<>();
+
+  /**
+   * Cached batch data result
+   */
+  private List<BatchData> cachedBatchDataResult = new ArrayList<>();
+
+  private QueryProcessExecutor queryProcessExecutor = new OverflowQPExecutor();
+
+  /**
+   * Constructor of ClusterLocalSingleQueryManager
+   */
+  public ClusterLocalSingleQueryManager(long jobId) {
+    this.jobId = jobId;
+    queryTimer = QueryTimerManager.getInstance()
+        .execute(new QueryTimerRunnable(), ClusterConstant.QUERY_TIMEOUT_IN_QUERY_NODE);
+  }
+
+  @Override
+  public InitSeriesReaderResponse createSeriesReader(InitSeriesReaderRequest request)
+      throws IOException, PathErrorException, FileNodeManagerException, ProcessorException, QueryFilterOptimizationException {
+    this.groupId = request.getGroupID();
+    InitSeriesReaderResponse response = new InitSeriesReaderResponse(groupId);
+    QueryContext context = new QueryContext(jobId);
+    Map<PathType, QueryPlan> queryPlanMap = request.getAllQueryPlan();
+    if (queryPlanMap.containsKey(PathType.SELECT_PATH)) {
+      QueryPlan plan = queryPlanMap.get(PathType.SELECT_PATH);
+      if (plan instanceof GroupByPlan) {
+        throw new UnsupportedOperationException();
+      } else if (plan instanceof AggregationPlan) {
+        throw new UnsupportedOperationException();
+      } else {
+        if (plan.getExpression() == null
+            || plan.getExpression().getType() == ExpressionType.GLOBAL_TIME) {
+          handleSelectReaderWithoutTimeGenerator(plan, context, response);
+        } else {
+          handleSelectReaderWithTimeGenerator(plan, context, response);
+        }
+      }
+    }
+    if (queryPlanMap.containsKey(PathType.FILTER_PATH)) {
+      QueryPlan queryPlan = queryPlanMap.get(PathType.FILTER_PATH);
+      handleFilterSeriesReader(queryPlan, context, request, response, PathType.FILTER_PATH);
+    }
+    return response;
+  }
+
+  /**
+   * Handle filter series reader
+   *
+   * @param plan filter series query plan
+   */
+  private void handleFilterSeriesReader(QueryPlan plan, QueryContext context,
+      InitSeriesReaderRequest request, InitSeriesReaderResponse response, PathType pathType)
+      throws PathErrorException, QueryFilterOptimizationException, FileNodeManagerException, ProcessorException, IOException {
+    QueryDataSet queryDataSet = queryProcessExecutor
+        .processQuery(plan, context);
+    List<Path> paths = plan.getPaths();
+    List<TSDataType> dataTypes = queryDataSet.getDataTypes();
+    for (int i = 0; i < paths.size(); i++) {
+      dataTypeMap.put(paths.get(i).getFullPath(), dataTypes.get(i));
+    }
+    response.getSeriesDataTypes().put(pathType, dataTypes);
+    filterReader = new ClusterFilterSeriesBatchReader(queryDataSet, paths, request.getFilterList());
+  }
+
+  /**
+   * Handle select series query with no filter or only global time filter
+   *
+   * @param plan plan query plan
+   * @param context query context
+   * @param response response for coordinator node
+   */
+  private void handleSelectReaderWithoutTimeGenerator(QueryPlan plan, QueryContext context,
+      InitSeriesReaderResponse response)
+      throws FileNodeManagerException {
+    List<Path> paths = plan.getPaths();
+    Filter timeFilter = null;
+    if (plan.getExpression() != null) {
+      timeFilter = ((GlobalTimeExpression) plan.getExpression()).getFilter();
+    }
+    List<TSDataType> dataTypes = new ArrayList<>();
+    QueryResourceManager.getInstance()
+        .beginQueryOfGivenQueryPaths(context.getJobId(), plan.getPaths());
+    for (int i = 0; i < paths.size(); i++) {
+      String fullPath = paths.get(i).getFullPath();
+      IPointReader reader = ExecutorWithoutTimeGenerator
+          .createSeriesReader(context, paths.get(i), dataTypes, timeFilter);
+      selectSeriesReaders
+          .put(fullPath, new ClusterBatchReaderWithoutTimeGenerator(dataTypes.get(i), reader));
+      dataTypeMap.put(fullPath, dataTypes.get(i));
+    }
+    response.getSeriesDataTypes().put(PathType.SELECT_PATH, dataTypes);
+  }
+
+  /**
+   * Handle select series query with value filter
+   *
+   * @param plan plan query plan
+   * @param context query context
+   * @param response response for coordinator node
+   */
+  private void handleSelectReaderWithTimeGenerator(QueryPlan plan, QueryContext context,
+      InitSeriesReaderResponse response)
+      throws PathErrorException, FileNodeManagerException, IOException {
+    List<Path> paths = plan.getPaths();
+    List<TSDataType> dataTypeList = new ArrayList<>();
+    for (int i = 0; i < paths.size(); i++) {
+      Path path = paths.get(i);
+      EngineReaderByTimeStamp readerByTimeStamp = ClusterSeriesReaderFactory
+          .createReaderByTimeStamp(path, context);
+      TSDataType dataType = MManager.getInstance().getSeriesType(path.getFullPath());
+      selectSeriesReaders
+          .put(path.getFullPath(), new ClusterBatchReaderByTimestamp(readerByTimeStamp, dataType));
+      dataTypeMap.put(path.getFullPath(), dataType);
+      dataTypeList.add(dataType);
+    }
+    response.getSeriesDataTypes().put(PathType.SELECT_PATH, dataTypeList);
+  }
+
+  @Override
+  public QuerySeriesDataResponse readBatchData(QuerySeriesDataRequest request)
+      throws IOException {
+    resetQueryTimer();
+    QuerySeriesDataResponse response = new QuerySeriesDataResponse(request.getGroupID());
+    long targetQueryRounds = request.getQueryRounds();
+    if (targetQueryRounds != this.queryRound) {
+      this.queryRound = targetQueryRounds;
+      PathType pathType = request.getPathType();
+      List<String> paths = request.getSeriesPaths();
+      List<BatchData> batchDataList;
+      if (pathType == PathType.SELECT_PATH) {
+        batchDataList = readSelectSeriesBatchData(paths);
+      } else {
+        batchDataList = readFilterSeriesBatchData();
+      }
+      cachedBatchDataResult = batchDataList;
+    }
+    response.setSeriesBatchData(cachedBatchDataResult);
+    return response;
+  }
+
+  @Override
+  public QuerySeriesDataByTimestampResponse readBatchDataByTimestamp(
+      QuerySeriesDataByTimestampRequest request)
+      throws IOException {
+    resetQueryTimer();
+    QuerySeriesDataByTimestampResponse response = new QuerySeriesDataByTimestampResponse(groupId);
+    List<String> fetchDataSeries = request.getFetchDataSeries();
+    long targetQueryRounds = request.getQueryRounds();
+    if (targetQueryRounds != this.queryRound) {
+      this.queryRound = targetQueryRounds;
+      List<BatchData> batchDataList = new ArrayList<>();
+      for (String series : fetchDataSeries) {
+        AbstractClusterBatchReader reader = selectSeriesReaders.get(series);
+        batchDataList.add(reader.nextBatch(request.getBatchTimestamp()));
+      }
+      cachedBatchDataResult = batchDataList;
+    }
+    response.setSeriesBatchData(cachedBatchDataResult);
+    return response;
+  }
+
+  @Override
+  public void resetQueryTimer() {
+    queryTimer.cancel(false);
+    queryTimer = QueryTimerManager.getInstance()
+        .execute(new QueryTimerRunnable(), ClusterConstant.QUERY_TIMEOUT_IN_QUERY_NODE);
+  }
+
+  /**
+   * Read batch data of select series
+   *
+   * @param paths all series to query
+   */
+  private List<BatchData> readSelectSeriesBatchData(List<String> paths) throws IOException {
+    List<BatchData> batchDataList = new ArrayList<>();
+    for (String fullPath : paths) {
+      batchDataList.add(selectSeriesReaders.get(fullPath).nextBatch());
+    }
+    return batchDataList;
+  }
+
+  /**
+   * Read batch data of filter series
+   *
+   * @return batch data of all filter series
+   */
+  private List<BatchData> readFilterSeriesBatchData() throws IOException {
+    return filterReader.nextBatchList();
+  }
+
+  public String getGroupId() {
+    return groupId;
+  }
+
+  @Override
+  public void close() throws FileNodeManagerException {
+    queryTimer.cancel(false);
+    QueryResourceManager.getInstance().endQueryForGivenJob(jobId);
+  }
+
+  public long getJobId() {
+    return jobId;
+  }
+
+  public long getQueryRound() {
+    return queryRound;
+  }
+
+  public Map<String, AbstractClusterBatchReader> getSelectSeriesReaders() {
+    return selectSeriesReaders;
+  }
+
+  public IClusterFilterSeriesBatchReader getFilterReader() {
+    return filterReader;
+  }
+
+  public Map<String, TSDataType> getDataTypeMap() {
+    return dataTypeMap;
+  }
+
+  public class QueryTimerRunnable implements Runnable {
+
+    @Override
+    public void run() {
+      try {
+        close();
+      } catch (FileNodeManagerException e) {
+        LOGGER.error(e.getMessage());
+      }
+    }
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalQueryManager.java
new file mode 100644
index 0000000..cc0f103
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalQueryManager.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.manager.querynode;
+
+import java.io.IOException;
+import java.util.Map;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.InitSeriesReaderRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataByTimestampRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.querydata.InitSeriesReaderResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataByTimestampResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
+import org.apache.iotdb.db.exception.FileNodeManagerException;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.exception.ProcessorException;
+import org.apache.iotdb.tsfile.exception.filter.QueryFilterOptimizationException;
+
+/**
+ * Manage all local query resources which provide data for coordinator node in cluster query node.
+ */
+public interface IClusterLocalQueryManager {
+
+  /**
+   * Initially create query data set for coordinator node.
+   *
+   * @param request request for query data from coordinator node
+   */
+  InitSeriesReaderResponse createQueryDataSet(InitSeriesReaderRequest request)
+      throws IOException, FileNodeManagerException, PathErrorException, ProcessorException, QueryFilterOptimizationException;
+
+  /**
+   * Read batch data of all querying series in request and set response.
+   *
+   * @param request request of querying series
+   */
+  QuerySeriesDataResponse readBatchData(QuerySeriesDataRequest request)
+      throws IOException;
+
+  /**
+   * Read batch data of select series by batch timestamp which is used in query with value filter
+   *  @param request request of querying select paths
+   *
+   */
+  QuerySeriesDataByTimestampResponse readBatchDataByTimestamp(
+      QuerySeriesDataByTimestampRequest request) throws IOException;
+
+  /**
+   * Close query resource of a task
+   *
+   * @param taskId task id of local single query manager
+   */
+  void close(String taskId) throws FileNodeManagerException;
+
+
+  /**
+   * Get query manager by taskId
+   *
+   * @param taskId task id assigned by ClusterRpcQueryManager
+   */
+  ClusterLocalSingleQueryManager getSingleQuery(String taskId);
+
+  /**
+   * Get all read usage count group by data group id, key is group id, value is usage count
+   */
+  Map<String, Integer> getAllReadUsage();
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalSingleQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalSingleQueryManager.java
new file mode 100644
index 0000000..318772f
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalSingleQueryManager.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.manager.querynode;
+
+import java.io.IOException;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.InitSeriesReaderRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataByTimestampRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.querydata.InitSeriesReaderResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataByTimestampResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
+import org.apache.iotdb.db.exception.FileNodeManagerException;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.exception.ProcessorException;
+import org.apache.iotdb.tsfile.exception.filter.QueryFilterOptimizationException;
+
+/**
+ * <p>
+ * Manage all series reader in a query as a query node, cooperate with coordinator node for a client
+ * query
+ * </p>
+ */
+public interface IClusterLocalSingleQueryManager {
+
+  /**
+   * Initially create corresponding series readers.
+   * @param request request of querying series data
+   */
+  InitSeriesReaderResponse createSeriesReader(InitSeriesReaderRequest request)
+      throws IOException, PathErrorException, FileNodeManagerException, ProcessorException, QueryFilterOptimizationException;
+
+  /**
+   * <p>
+   * Read batch data If query round in cache is equal to target query round, it means that batch
+   * data in query node transfer to coordinator fail and return cached batch data.
+   * </p>
+   *  @param request request of querying series data
+   *
+   */
+  QuerySeriesDataResponse readBatchData(QuerySeriesDataRequest request)
+      throws IOException;
+
+  /**
+   * Read batch data of select paths by timestamp
+   */
+  QuerySeriesDataByTimestampResponse readBatchDataByTimestamp(
+      QuerySeriesDataByTimestampRequest request) throws IOException;
+
+  /**
+   * Reset query timer and restart timer
+   */
+  void resetQueryTimer();
+
+  /**
+   * Release query resource
+   */
+  void close() throws FileNodeManagerException;
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/AbstractClusterPointReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/AbstractClusterPointReader.java
new file mode 100644
index 0000000..72c7c70
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/AbstractClusterPointReader.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.reader.coordinatornode;
+
+import java.io.IOException;
+import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.db.query.reader.IPointReader;
+import org.apache.iotdb.db.utils.TimeValuePair;
+import org.apache.iotdb.db.utils.TimeValuePairUtils;
+import org.apache.iotdb.tsfile.read.common.BatchData;
+
+/**
+ * Cluster point reader
+ */
+public abstract class AbstractClusterPointReader implements IPointReader {
+
+  /**
+   * Current time value pair
+   */
+  protected TimeValuePair currentTimeValuePair;
+
+  /**
+   * Current batch data
+   */
+  protected BatchData currentBatchData;
+
+  @Override
+  public boolean hasNext() throws IOException {
+    if (currentBatchData == null || !currentBatchData.hasNext()) {
+      try {
+        updateCurrentBatchData();
+      } catch (RaftConnectionException e) {
+        throw new IOException(e);
+      }
+      if (currentBatchData == null || !currentBatchData.hasNext()) {
+        return false;
+      }
+    }
+    return true;
+  }
+
+  /**
+   * Update current batch data. If necessary ,fetch batch data from remote query node
+   */
+  protected abstract void updateCurrentBatchData() throws RaftConnectionException;
+
+  @Override
+  public TimeValuePair next() throws IOException {
+    if (hasNext()) {
+      TimeValuePair timeValuePair = TimeValuePairUtils.getCurrentTimeValuePair(currentBatchData);
+      currentTimeValuePair = timeValuePair;
+      currentBatchData.next();
+      return timeValuePair;
+    }
+    return null;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterFilterSeriesReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterFilterSeriesReader.java
new file mode 100644
index 0000000..805d3af
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterFilterSeriesReader.java
@@ -0,0 +1,126 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.reader.coordinatornode;
+
+import java.io.IOException;
+import java.util.LinkedList;
+import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.db.utils.TimeValuePair;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.read.common.BatchData;
+import org.apache.iotdb.tsfile.read.common.Path;
+
+/**
+ * Filter series reader which is used in coordinator node.
+ */
+public class ClusterFilterSeriesReader extends AbstractClusterPointReader {
+
+  /**
+   * Data group id
+   */
+  private String groupId;
+
+  /**
+   * Manager of the whole query
+   */
+  private ClusterRpcSingleQueryManager queryManager;
+
+  /**
+   * Series name
+   */
+  private Path seriesPath;
+
+  /**
+   * Data type
+   */
+  private TSDataType dataType;
+
+  /**
+   * Batch data
+   */
+  private LinkedList<BatchData> batchDataList;
+
+  /**
+   * Mark whether remote has data
+   */
+  private boolean remoteDataFinish;
+
+  public ClusterFilterSeriesReader(String groupId, Path seriesPath, TSDataType dataType,
+      ClusterRpcSingleQueryManager queryManager) {
+    this.groupId = groupId;
+    this.seriesPath = seriesPath;
+    this.dataType = dataType;
+    this.queryManager = queryManager;
+    this.batchDataList = new LinkedList<>();
+    remoteDataFinish = false;
+  }
+
+  @Override
+  public TimeValuePair current() throws IOException {
+    return currentTimeValuePair;
+  }
+
+  /**
+   * Update current batch data. If necessary ,fetch batch data from remote query node
+   */
+  @Override
+  protected void updateCurrentBatchData() throws RaftConnectionException {
+    if (batchDataList.isEmpty() && !remoteDataFinish) {
+      queryManager.fetchBatchDataForFilterPaths(groupId);
+    }
+    if (!batchDataList.isEmpty()) {
+      currentBatchData = batchDataList.removeFirst();
+    }
+  }
+
+  @Override
+  public void close() throws IOException {
+    //Do nothing
+  }
+
+  public Path getSeriesPath() {
+    return seriesPath;
+  }
+
+  public void setSeriesPath(Path seriesPath) {
+    this.seriesPath = seriesPath;
+  }
+
+  public TSDataType getDataType() {
+    return dataType;
+  }
+
+  public void setDataType(TSDataType dataType) {
+    this.dataType = dataType;
+  }
+
+  public BatchData getCurrentBatchData() {
+    return currentBatchData;
+  }
+
+  public void setCurrentBatchData(BatchData currentBatchData) {
+    this.currentBatchData = currentBatchData;
+  }
+
+  public void addBatchData(BatchData batchData, boolean remoteDataFinish) {
+    batchDataList.addLast(batchData);
+    this.remoteDataFinish = remoteDataFinish;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterSelectSeriesReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterSelectSeriesReader.java
new file mode 100644
index 0000000..0a507d5
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterSelectSeriesReader.java
@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.reader.coordinatornode;
+
+import java.io.IOException;
+import java.util.LinkedList;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.db.query.reader.merge.EngineReaderByTimeStamp;
+import org.apache.iotdb.db.utils.TimeValuePair;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.read.common.BatchData;
+import org.apache.iotdb.tsfile.read.common.Path;
+
+/**
+ * Select series reader which is used in coordinator node.
+ */
+public class ClusterSelectSeriesReader extends AbstractClusterPointReader implements
+    EngineReaderByTimeStamp {
+
+  /**
+   * Data group id
+   */
+  private String groupId;
+
+  /**
+   * Manager of the whole query
+   */
+  private ClusterRpcSingleQueryManager queryManager;
+
+  /**
+   * Series name
+   */
+  private Path seriesPath;
+
+  /**
+   * Data type
+   */
+  private TSDataType dataType;
+
+  /**
+   * Batch data
+   */
+  private LinkedList<BatchData> batchDataList;
+
+  /**
+   * Mark whether remote has data
+   */
+  private boolean remoteDataFinish;
+
+  public ClusterSelectSeriesReader(String groupId, Path seriesPath, TSDataType dataType,
+      ClusterRpcSingleQueryManager queryManager) {
+    this.groupId = groupId;
+    this.seriesPath = seriesPath;
+    this.dataType = dataType;
+    this.queryManager = queryManager;
+    this.batchDataList = new LinkedList<>();
+    this.remoteDataFinish = false;
+  }
+
+  @Override
+  public TimeValuePair current() throws IOException {
+    return currentTimeValuePair;
+  }
+
+  @Override
+  public Object getValueInTimestamp(long timestamp) throws IOException {
+    if (currentTimeValuePair != null && currentTimeValuePair.getTimestamp() == timestamp) {
+      return currentTimeValuePair.getValue().getValue();
+    } else if (currentTimeValuePair != null && currentTimeValuePair.getTimestamp() > timestamp) {
+      return null;
+    }
+    while (true) {
+      if (hasNext()) {
+        next();
+        if (currentTimeValuePair.getTimestamp() == timestamp) {
+          return currentTimeValuePair.getValue().getValue();
+        } else if (currentTimeValuePair.getTimestamp() > timestamp) {
+          return null;
+        }
+      } else {
+        return null;
+      }
+    }
+  }
+
+  /**
+   * Update current batch data. If necessary ,fetch batch data from remote query node
+   */
+  @Override
+  protected void updateCurrentBatchData() throws RaftConnectionException {
+    if (batchDataList.isEmpty() && !remoteDataFinish) {
+      queryManager.fetchBatchDataForSelectPaths(groupId);
+    }
+    if (!batchDataList.isEmpty()) {
+      currentBatchData = batchDataList.removeFirst();
+    }
+  }
+
+  @Override
+  public void close() throws IOException {
+    batchDataList = null;
+  }
+
+  public Path getSeriesPath() {
+    return seriesPath;
+  }
+
+  public void setSeriesPath(Path seriesPath) {
+    this.seriesPath = seriesPath;
+  }
+
+  public TSDataType getDataType() {
+    return dataType;
+  }
+
+  public void setDataType(TSDataType dataType) {
+    this.dataType = dataType;
+  }
+
+  public BatchData getCurrentBatchData() {
+    return currentBatchData;
+  }
+
+  public void setCurrentBatchData(BatchData currentBatchData) {
+    this.currentBatchData = currentBatchData;
+  }
+
+  public void addBatchData(BatchData batchData, boolean remoteDataFinish) {
+    batchDataList.addLast(batchData);
+    this.remoteDataFinish = remoteDataFinish;
+  }
+
+  public boolean isRemoteDataFinish() {
+    return remoteDataFinish;
+  }
+
+  public void setRemoteDataFinish(boolean remoteDataFinish) {
+    this.remoteDataFinish = remoteDataFinish;
+  }
+
+  /**
+   * Check if this series need to fetch data from remote query node
+   */
+  public boolean enableFetchData() {
+    return !remoteDataFinish
+        && batchDataList.size() <= ClusterDescriptor.getInstance().getConfig()
+        .getMaxCachedBatchDataListSize();
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/DataGroupNonQueryRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/AbstractClusterBatchReader.java
similarity index 61%
copy from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/DataGroupNonQueryRequest.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/AbstractClusterBatchReader.java
index c1bcf5f..b0a86bd 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/DataGroupNonQueryRequest.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/AbstractClusterBatchReader.java
@@ -16,23 +16,24 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.request;
+package org.apache.iotdb.cluster.query.reader.querynode;
 
 import java.io.IOException;
-import java.io.Serializable;
 import java.util.List;
-import org.apache.iotdb.db.qp.physical.PhysicalPlan;
+import org.apache.iotdb.db.query.reader.IBatchReader;
+import org.apache.iotdb.tsfile.read.common.BatchData;
 
 /**
- * Handle request to data group
+ * Cluster batch reader, which provides another method to get batch data by batch timestamp.
  */
-public class DataGroupNonQueryRequest extends BasicRequest implements Serializable {
+public abstract class AbstractClusterBatchReader implements IBatchReader {
 
-
-  public DataGroupNonQueryRequest(String groupID, List<PhysicalPlan> physicalPlanBytes)
-      throws IOException {
-    super(groupID);
-    init(physicalPlanBytes);
-  }
+  /**
+   * Get batch data by batch time
+   *
+   * @param batchTime valid batch timestamp
+   * @return corresponding batch data
+   */
+  public abstract BatchData nextBatch(List<Long> batchTime) throws IOException;
 
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterBatchReaderByTimestamp.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterBatchReaderByTimestamp.java
new file mode 100644
index 0000000..b8c36eb
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterBatchReaderByTimestamp.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.reader.querynode;
+
+import java.io.IOException;
+import java.util.List;
+import org.apache.iotdb.db.query.reader.merge.EngineReaderByTimeStamp;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.read.common.BatchData;
+
+/**
+ * BatchReader by timestamp for cluster which is used in query node.
+ */
+public class ClusterBatchReaderByTimestamp extends AbstractClusterBatchReader {
+
+  /**
+   * Reader
+   */
+  private EngineReaderByTimeStamp readerByTimeStamp;
+
+  /**
+   * Data type
+   */
+  private TSDataType dataType;
+
+  public ClusterBatchReaderByTimestamp(
+      EngineReaderByTimeStamp readerByTimeStamp,
+      TSDataType dataType) {
+    this.readerByTimeStamp = readerByTimeStamp;
+    this.dataType = dataType;
+  }
+
+  @Override
+  public boolean hasNext() throws IOException {
+    return readerByTimeStamp.hasNext();
+  }
+
+  @Override
+  public BatchData nextBatch() throws IOException {
+    throw new UnsupportedOperationException(
+        "nextBatch() in ClusterBatchReaderByTimestamp is an empty method.");
+  }
+
+
+  @Override
+  public void close() throws IOException {
+    // do nothing
+  }
+
+  @Override
+  public BatchData nextBatch(List<Long> batchTime) throws IOException {
+    BatchData batchData = new BatchData(dataType, true);
+    for(long time: batchTime){
+      Object value = readerByTimeStamp.getValueInTimestamp(time);
+      if(value != null){
+        batchData.putTime(time);
+        batchData.putAnObject(value);
+      }
+    }
+    return batchData;
+  }
+
+  public EngineReaderByTimeStamp getReaderByTimeStamp() {
+    return readerByTimeStamp;
+  }
+
+  public TSDataType getDataType() {
+    return dataType;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterBatchReaderWithoutTimeGenerator.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterBatchReaderWithoutTimeGenerator.java
new file mode 100644
index 0000000..f3d443f
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterBatchReaderWithoutTimeGenerator.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.reader.querynode;
+
+import java.io.IOException;
+import java.util.List;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterConstant;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.db.query.reader.IPointReader;
+import org.apache.iotdb.db.utils.TimeValuePair;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.read.common.BatchData;
+
+/**
+ * BatchReader without time generator for cluster which is used in query node.
+ */
+public class ClusterBatchReaderWithoutTimeGenerator extends AbstractClusterBatchReader {
+
+  /**
+   * Data type
+   */
+  private TSDataType dataType;
+
+  /**
+   * Point reader
+   */
+  private IPointReader reader;
+
+  private static final ClusterConfig CLUSTER_CONF = ClusterDescriptor.getInstance().getConfig();
+
+  public ClusterBatchReaderWithoutTimeGenerator(
+      TSDataType dataType, IPointReader reader) {
+    this.dataType = dataType;
+    this.reader = reader;
+  }
+
+  @Override
+  public boolean hasNext() throws IOException {
+    return reader.hasNext();
+  }
+
+  @Override
+  public BatchData nextBatch() throws IOException {
+    BatchData batchData = new BatchData(dataType, true);
+    for (int i = 0; i < CLUSTER_CONF.getBatchReadSize(); i++) {
+      if (hasNext()) {
+        TimeValuePair pair = reader.next();
+        batchData.putTime(pair.getTimestamp());
+        batchData.putAnObject(pair.getValue().getValue());
+      } else {
+        break;
+      }
+    }
+    return batchData;
+  }
+
+  @Override
+  public void close() throws IOException {
+    if (reader != null) {
+      reader.close();
+    }
+  }
+
+  @Override
+  public BatchData nextBatch(List<Long> batchTime) throws IOException {
+    throw new IOException(
+        "nextBatch(List<Long> batchTime) in ClusterBatchReaderWithoutTimeGenerator is an empty method.");
+  }
+
+  public TSDataType getDataType() {
+    return dataType;
+  }
+
+  public IPointReader getReader() {
+    return reader;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFilterSeriesBatchReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFilterSeriesBatchReader.java
new file mode 100644
index 0000000..6690999
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFilterSeriesBatchReader.java
@@ -0,0 +1,121 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.reader.querynode;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterConstant;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.read.common.BatchData;
+import org.apache.iotdb.tsfile.read.common.Field;
+import org.apache.iotdb.tsfile.read.common.Path;
+import org.apache.iotdb.tsfile.read.common.RowRecord;
+import org.apache.iotdb.tsfile.read.filter.basic.Filter;
+import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
+
+/**
+ * Batch reader for all filter paths.
+ */
+public class ClusterFilterSeriesBatchReader implements IClusterFilterSeriesBatchReader {
+
+  private List<Path> allFilterPath;
+
+  private List<Filter> filters;
+
+  private QueryDataSet queryDataSet;
+
+  private static final ClusterConfig CLUSTER_CONF = ClusterDescriptor.getInstance().getConfig();
+
+  public ClusterFilterSeriesBatchReader(QueryDataSet queryDataSet, List<Path> allFilterPath,
+      List<Filter> filters) {
+    this.queryDataSet = queryDataSet;
+    this.allFilterPath = allFilterPath;
+    this.filters = filters;
+  }
+
+  @Override
+  public boolean hasNext() throws IOException {
+    return queryDataSet.hasNext();
+  }
+
+  /**
+   * Get batch data of all filter series by next batch time which is determined by
+   * <code>queryDataSet</code>
+   */
+  @Override
+  public List<BatchData> nextBatchList() throws IOException {
+    List<BatchData> batchDataList = new ArrayList<>(allFilterPath.size());
+    List<TSDataType> dataTypeList = queryDataSet.getDataTypes();
+    for (int i = 0; i < allFilterPath.size(); i++) {
+      batchDataList.add(new BatchData(dataTypeList.get(i), true));
+    }
+    int dataPointCount = 0;
+    while(true){
+      if(!hasNext() || dataPointCount == CLUSTER_CONF.getBatchReadSize()){
+        break;
+      }
+      if(hasNext() && addTimeValuePair(batchDataList, dataTypeList)){
+          dataPointCount++;
+      }
+    }
+    return batchDataList;
+  }
+
+  /**
+   * Add a time-value pair to batch data
+   */
+  private boolean addTimeValuePair(List<BatchData> batchDataList, List<TSDataType> dataTypeList)
+      throws IOException {
+    boolean hasField = false;
+    RowRecord rowRecord = queryDataSet.next();
+    long time = rowRecord.getTimestamp();
+    List<Field> fieldList = rowRecord.getFields();
+    for (int j = 0; j < allFilterPath.size(); j++) {
+      if (!fieldList.get(j).isNull()) {
+        BatchData batchData = batchDataList.get(j);
+        Object value = fieldList.get(j).getObjectValue(dataTypeList.get(j));
+        if (filters.get(j).satisfy(time, value)) {
+          hasField = true;
+          batchData.putTime(time);
+          batchData.putAnObject(value);
+        }
+      }
+    }
+    return hasField;
+  }
+
+  public List<Path> getAllFilterPath() {
+    return allFilterPath;
+  }
+
+  public void setAllFilterPath(List<Path> allFilterPath) {
+    this.allFilterPath = allFilterPath;
+  }
+
+  public QueryDataSet getQueryDataSet() {
+    return queryDataSet;
+  }
+
+  public void setQueryDataSet(QueryDataSet queryDataSet) {
+    this.queryDataSet = queryDataSet;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/MetaGroupNonQueryRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReader.java
similarity index 68%
copy from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/MetaGroupNonQueryRequest.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReader.java
index 69625ff..218d68b 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/MetaGroupNonQueryRequest.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReader.java
@@ -16,22 +16,21 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.request;
+package org.apache.iotdb.cluster.query.reader.querynode;
 
 import java.io.IOException;
-import java.io.Serializable;
 import java.util.List;
-import org.apache.iotdb.db.qp.physical.PhysicalPlan;
+import org.apache.iotdb.tsfile.read.common.BatchData;
 
 /**
- * Handle request to metadata group leader
+ * Batch reader for filter series which is used in query node.
  */
-public class MetaGroupNonQueryRequest extends BasicRequest implements Serializable {
+public interface IClusterFilterSeriesBatchReader {
 
-  public MetaGroupNonQueryRequest(String groupID, List<PhysicalPlan> plans)
-      throws IOException {
-    super(groupID);
-    this.init(plans);
-  }
+  boolean hasNext() throws IOException;
 
+  /**
+   * Get next batch data of all filter series.
+   */
+  List<BatchData> nextBatchList() throws IOException;
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/DataGroupNonQueryRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/timegenerator/ClusterLeafNode.java
similarity index 56%
copy from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/DataGroupNonQueryRequest.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/query/timegenerator/ClusterLeafNode.java
index c1bcf5f..39d4be4 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/DataGroupNonQueryRequest.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/timegenerator/ClusterLeafNode.java
@@ -16,23 +16,34 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.request;
+package org.apache.iotdb.cluster.query.timegenerator;
 
 import java.io.IOException;
-import java.io.Serializable;
-import java.util.List;
-import org.apache.iotdb.db.qp.physical.PhysicalPlan;
+import org.apache.iotdb.db.query.reader.IReader;
+import org.apache.iotdb.tsfile.read.query.timegenerator.node.Node;
+import org.apache.iotdb.tsfile.read.query.timegenerator.node.NodeType;
 
-/**
- * Handle request to data group
- */
-public class DataGroupNonQueryRequest extends BasicRequest implements Serializable {
+public class ClusterLeafNode implements Node {
+
+  private IReader reader;
+
+  public ClusterLeafNode(IReader reader) {
+    this.reader = reader;
+  }
+
+  @Override
+  public boolean hasNext() throws IOException {
+    return reader.hasNext();
+  }
 
+  @Override
+  public long next() throws IOException {
+    return reader.next().getTimestamp();
+  }
 
-  public DataGroupNonQueryRequest(String groupID, List<PhysicalPlan> physicalPlanBytes)
-      throws IOException {
-    super(groupID);
-    init(physicalPlanBytes);
+  @Override
+  public NodeType getType() {
+    return NodeType.LEAF;
   }
 
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/timegenerator/ClusterNodeConstructor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/timegenerator/ClusterNodeConstructor.java
new file mode 100644
index 0000000..639dce8
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/timegenerator/ClusterNodeConstructor.java
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.timegenerator;
+
+import static org.apache.iotdb.tsfile.read.expression.ExpressionType.SERIES;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterGroupEntity;
+import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterFilterSeriesReader;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
+import org.apache.iotdb.db.exception.FileNodeManagerException;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.query.context.QueryContext;
+import org.apache.iotdb.db.query.timegenerator.AbstractNodeConstructor;
+import org.apache.iotdb.tsfile.read.common.Path;
+import org.apache.iotdb.tsfile.read.expression.IExpression;
+import org.apache.iotdb.tsfile.read.expression.impl.SingleSeriesExpression;
+import org.apache.iotdb.tsfile.read.query.timegenerator.node.Node;
+
+public class ClusterNodeConstructor extends AbstractNodeConstructor {
+
+  /**
+   * Single query manager
+   */
+  private ClusterRpcSingleQueryManager queryManager;
+
+  /**
+   * Filter series reader group by  group id
+   */
+  private Map<String, List<ClusterFilterSeriesReader>> filterSeriesReadersByGroupId;
+
+  /**
+   * Mark filter series reader index group by group id
+   */
+  private Map<String, Integer> filterSeriesReaderIndex;
+
+  public ClusterNodeConstructor(ClusterRpcSingleQueryManager queryManager) {
+    this.queryManager = queryManager;
+    this.filterSeriesReadersByGroupId = new HashMap<>();
+    this.filterSeriesReaderIndex = new HashMap<>();
+    this.init(queryManager);
+  }
+
+  /**
+   * Init filter series reader
+   */
+  private void init(ClusterRpcSingleQueryManager queryManager) {
+    Map<String, FilterGroupEntity> filterGroupEntityMap = queryManager.getFilterGroupEntityMap();
+    filterGroupEntityMap.forEach(
+        (key, value) -> filterSeriesReadersByGroupId.put(key, value.getFilterSeriesReaders()));
+    filterSeriesReadersByGroupId.forEach((key, value) -> filterSeriesReaderIndex.put(key, 0));
+  }
+
+  /**
+   * Construct expression node.
+   *
+   * @param expression expression
+   * @return Node object
+   * @throws IOException IOException
+   * @throws FileNodeManagerException FileNodeManagerException
+   */
+  @Override
+  public Node construct(IExpression expression, QueryContext context)
+      throws FileNodeManagerException {
+    if (expression.getType() == SERIES) {
+      try {
+        Path seriesPath = ((SingleSeriesExpression) expression).getSeriesPath();
+        String groupId = QPExecutorUtils.getGroupIdByDevice(seriesPath.getDevice());
+        if (filterSeriesReadersByGroupId.containsKey(groupId)) {
+          List<ClusterFilterSeriesReader> seriesReaders = filterSeriesReadersByGroupId.get(groupId);
+          int readerIndex = filterSeriesReaderIndex.get(groupId);
+          filterSeriesReaderIndex.put(groupId, readerIndex + 1);
+          return new ClusterLeafNode(seriesReaders.get(readerIndex));
+        } else {
+          queryManager.addDataGroupUsage(groupId);
+          return new ClusterLeafNode(generateSeriesReader((SingleSeriesExpression) expression,
+              context));
+        }
+      } catch (IOException | PathErrorException e) {
+        throw new FileNodeManagerException(e);
+      }
+    } else {
+      return constructNotSeriesNode(expression, context);
+    }
+  }
+}
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/query/timegenerator/EngineTimeGenerator.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/timegenerator/ClusterTimeGenerator.java
similarity index 67%
copy from iotdb/src/main/java/org/apache/iotdb/db/query/timegenerator/EngineTimeGenerator.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/query/timegenerator/ClusterTimeGenerator.java
index 350ea6f..f2b72d1 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/timegenerator/EngineTimeGenerator.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/timegenerator/ClusterTimeGenerator.java
@@ -16,9 +16,10 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.db.query.timegenerator;
+package org.apache.iotdb.cluster.query.timegenerator;
 
 import java.io.IOException;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
 import org.apache.iotdb.db.exception.FileNodeManagerException;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.tsfile.read.common.Path;
@@ -26,27 +27,24 @@ import org.apache.iotdb.tsfile.read.expression.IExpression;
 import org.apache.iotdb.tsfile.read.query.timegenerator.TimeGenerator;
 import org.apache.iotdb.tsfile.read.query.timegenerator.node.Node;
 
-/**
- * A timestamp generator for query with filter. e.g. For query clause "select s1, s2 form root where
- * s3 < 0 and time > 100", this class can iterate back to every timestamp of the query.
- */
-public class EngineTimeGenerator implements TimeGenerator {
-
+public class ClusterTimeGenerator implements TimeGenerator {
   private IExpression expression;
   private Node operatorNode;
 
   /**
-   * Constructor of EngineTimeGenerator.
+   * Constructor of Cluster TimeGenerator.
    */
-  public EngineTimeGenerator(IExpression expression, QueryContext context)
+  public ClusterTimeGenerator(IExpression expression, QueryContext context,
+      ClusterRpcSingleQueryManager queryManager)
       throws FileNodeManagerException {
     this.expression = expression;
-    initNode(context);
+    initNode(context, queryManager);
   }
 
-  private void initNode(QueryContext context) throws FileNodeManagerException {
-    EngineNodeConstructor engineNodeConstructor = new EngineNodeConstructor();
-    this.operatorNode = engineNodeConstructor.construct(expression, context);
+  private void initNode(QueryContext context, ClusterRpcSingleQueryManager queryManager)
+      throws FileNodeManagerException {
+    ClusterNodeConstructor nodeConstructor = new ClusterNodeConstructor(queryManager);
+    this.operatorNode = nodeConstructor.construct(expression, context);
   }
 
   @Override
@@ -60,9 +58,7 @@ public class EngineTimeGenerator implements TimeGenerator {
   }
 
   @Override
-  public Object getValue(Path path, long time) {
-    // TODO implement the optimization
+  public Object getValue(Path path, long time) throws IOException {
     return null;
   }
-
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterRpcReaderUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterRpcReaderUtils.java
new file mode 100644
index 0000000..c3df421
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterRpcReaderUtils.java
@@ -0,0 +1,128 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.utils;
+
+import com.alipay.sofa.jraft.entity.PeerId;
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.qp.task.QPTask.TaskState;
+import org.apache.iotdb.cluster.qp.task.QueryTask;
+import org.apache.iotdb.cluster.query.PathType;
+import org.apache.iotdb.cluster.rpc.raft.NodeAsClient;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.CloseSeriesReaderRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.InitSeriesReaderRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataByTimestampRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataByTimestampResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
+import org.apache.iotdb.cluster.utils.RaftUtils;
+import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
+import org.apache.iotdb.tsfile.read.filter.basic.Filter;
+
+/**
+ * Utils for cluster reader which needs to acquire data from remote query node.
+ */
+public class ClusterRpcReaderUtils {
+
+  /**
+   * Count limit to redo a task
+   */
+  private static final int TASK_MAX_RETRY = ClusterDescriptor.getInstance().getConfig()
+      .getQpTaskRedoCount();
+
+  /**
+   * Create cluster series reader
+   *
+   * @param peerId query node to fetch data
+   * @param readDataConsistencyLevel consistency level of read data
+   * @param taskId task id assigned by coordinator node
+   */
+  public static BasicResponse createClusterSeriesReader(String groupId, PeerId peerId,
+      int readDataConsistencyLevel, Map<PathType, QueryPlan> allQueryPlan, String taskId,
+      List<Filter> filterList)
+      throws IOException, RaftConnectionException {
+
+    /** handle request **/
+    BasicRequest request = InitSeriesReaderRequest
+        .createInitialQueryRequest(groupId, taskId, readDataConsistencyLevel,
+            allQueryPlan, filterList);
+    return handleQueryRequest(request, peerId, 0);
+  }
+
+  public static QuerySeriesDataResponse fetchBatchData(String groupID, PeerId peerId, String taskId,
+      PathType pathType, List<String> fetchDataSeries, long queryRounds)
+      throws RaftConnectionException {
+    BasicRequest request = QuerySeriesDataRequest
+        .createFetchDataRequest(groupID, taskId, pathType, fetchDataSeries, queryRounds);
+    return (QuerySeriesDataResponse) handleQueryRequest(request, peerId, 0);
+  }
+
+  public static QuerySeriesDataByTimestampResponse fetchBatchDataByTimestamp(String groupId,
+      PeerId peerId, String taskId, long queryRounds, List<Long> batchTimestamp,
+      List<String> fetchDataSeries)
+      throws RaftConnectionException {
+    BasicRequest request = QuerySeriesDataByTimestampRequest
+        .createRequest(groupId, queryRounds, taskId, batchTimestamp, fetchDataSeries);
+    return (QuerySeriesDataByTimestampResponse) handleQueryRequest(request, peerId, 0);
+  }
+
+  /**
+   * Release remote query resources
+   *
+   * @param groupId data group id
+   * @param peerId target query node
+   * @param taskId unique task id
+   */
+  public static void releaseRemoteQueryResource(String groupId, PeerId peerId, String taskId)
+      throws RaftConnectionException {
+
+    BasicRequest request = CloseSeriesReaderRequest.createReleaseResourceRequest(groupId, taskId);
+    handleQueryRequest(request, peerId, 0);
+  }
+
+  /**
+   * Send query request to remote node and return response
+   *
+   * @param request query request
+   * @param peerId target remote query node
+   * @param taskRetryNum retry num of the request
+   * @return Response from remote query node
+   */
+  private static BasicResponse handleQueryRequest(BasicRequest request, PeerId peerId,
+      int taskRetryNum)
+      throws RaftConnectionException {
+    if (taskRetryNum > TASK_MAX_RETRY) {
+      throw new RaftConnectionException(
+          String.format("Query request retries reach the upper bound %s",
+              TASK_MAX_RETRY));
+    }
+    NodeAsClient nodeAsClient = RaftUtils.getRaftNodeAsClient();
+    QueryTask queryTask = nodeAsClient.syncHandleRequest(request, peerId);
+    if (queryTask.getState() == TaskState.FINISH) {
+      return queryTask.getBasicResponse();
+    } else {
+      return handleQueryRequest(request, peerId, taskRetryNum + 1);
+    }
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ExpressionUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ExpressionUtils.java
new file mode 100644
index 0000000..0024138
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ExpressionUtils.java
@@ -0,0 +1,131 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.utils;
+
+import static org.apache.iotdb.tsfile.read.expression.ExpressionType.AND;
+import static org.apache.iotdb.tsfile.read.expression.ExpressionType.OR;
+import static org.apache.iotdb.tsfile.read.expression.ExpressionType.SERIES;
+import static org.apache.iotdb.tsfile.read.expression.ExpressionType.TRUE;
+
+import java.util.List;
+import java.util.Map;
+import org.apache.iotdb.cluster.query.expression.TrueExpression;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterGroupEntity;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
+import org.apache.iotdb.tsfile.read.common.Path;
+import org.apache.iotdb.tsfile.read.expression.ExpressionType;
+import org.apache.iotdb.tsfile.read.expression.IBinaryExpression;
+import org.apache.iotdb.tsfile.read.expression.IExpression;
+import org.apache.iotdb.tsfile.read.expression.impl.BinaryExpression;
+import org.apache.iotdb.tsfile.read.expression.impl.SingleSeriesExpression;
+
+public class ExpressionUtils {
+
+  private ExpressionUtils() {
+  }
+
+  /**
+   * Get all series path of expression group by group id
+   */
+  public static void getAllExpressionSeries(IExpression expression,
+      Map<String, FilterGroupEntity> filterGroupEntityMap)
+      throws PathErrorException {
+    if (expression.getType() == ExpressionType.SERIES) {
+      Path path = ((SingleSeriesExpression) expression).getSeriesPath();
+      String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
+      if (!filterGroupEntityMap.containsKey(groupId)) {
+        filterGroupEntityMap.put(groupId, new FilterGroupEntity(groupId));
+      }
+      FilterGroupEntity filterGroupEntity = filterGroupEntityMap.get(groupId);
+      filterGroupEntity.addFilterPaths(path);
+      filterGroupEntity.addFilter(((SingleSeriesExpression) expression).getFilter());
+    } else if (expression.getType() == OR || expression.getType() == AND) {
+      getAllExpressionSeries(((IBinaryExpression) expression).getLeft(), filterGroupEntityMap);
+      getAllExpressionSeries(((IBinaryExpression) expression).getRight(), filterGroupEntityMap);
+    } else {
+      throw new UnSupportedDataTypeException(
+          "Unsupported QueryFilterType when construct OperatorNode: " + expression.getType());
+    }
+  }
+
+  /**
+   * Prune filter true by group id
+   *
+   * @param pathList all paths of a data group
+   */
+  public static IExpression pruneFilterTree(IExpression expression, List<Path> pathList) {
+    if (expression.getType() == SERIES) {
+      if (pathList.contains(((SingleSeriesExpression) expression).getSeriesPath())) {
+        return expression;
+      } else{
+        return new TrueExpression();
+      }
+    } else if(expression.getType() == OR){
+      return pruneOrFilterTree(expression, pathList);
+    } else if(expression.getType() == AND){
+      return pruneAndFilterTree(expression, pathList);
+    } else {
+      throw new UnSupportedDataTypeException(
+          "Unsupported ExpressionType when prune filter tree: " + expression.getType());
+    }
+  }
+
+  /**
+   * Prune or filter tree
+   *
+   * @param expression origin expression
+   * @param pathList all series path of the same data group
+   */
+  private static IExpression pruneOrFilterTree(IExpression expression, List<Path> pathList) {
+    IExpression left = pruneFilterTree(((BinaryExpression) expression).getLeft(), pathList);
+    IExpression right = pruneFilterTree(((BinaryExpression) expression).getRight(), pathList);
+    if (left.getType() == TRUE || right.getType() == TRUE) {
+      return new TrueExpression();
+    } else {
+      ((BinaryExpression) expression).setLeft(left);
+      ((BinaryExpression) expression).setRight(right);
+      return expression;
+    }
+  }
+
+  /**
+   * Prune and filter tree
+   *
+   * @param expression origin expression
+   * @param pathList all series path of the same data group
+   */
+  private static IExpression pruneAndFilterTree(IExpression expression, List<Path> pathList) {
+    IExpression left = pruneFilterTree(((BinaryExpression) expression).getLeft(), pathList);
+    IExpression right = pruneFilterTree(((BinaryExpression) expression).getRight(), pathList);
+    if (left.getType() == TRUE && right.getType() == TRUE) {
+      return new TrueExpression();
+    } else if (left.getType() == TRUE) {
+      return right;
+    } else if (right.getType() == TRUE) {
+      return left;
+    } else {
+      ((BinaryExpression) expression).setLeft(left);
+      ((BinaryExpression) expression).setRight(right);
+      return expression;
+    }
+  }
+
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtils.java
new file mode 100644
index 0000000..4f7a5fe
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtils.java
@@ -0,0 +1,127 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.utils;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterGroupEntity;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
+import org.apache.iotdb.cluster.utils.hash.Router;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.qp.physical.crud.AggregationPlan;
+import org.apache.iotdb.db.qp.physical.crud.GroupByPlan;
+import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
+import org.apache.iotdb.tsfile.read.common.Path;
+import org.apache.iotdb.tsfile.read.expression.ExpressionType;
+import org.apache.iotdb.tsfile.read.expression.IExpression;
+import org.apache.iotdb.tsfile.read.filter.basic.Filter;
+
+/**
+ * Utils for splitting query plan to several sub query plans by group id.
+ */
+public class QueryPlanPartitionUtils {
+
+  private QueryPlanPartitionUtils() {
+
+  }
+
+  /**
+   * Split query plan with no filter or with only global time filter by group id
+   */
+  public static void splitQueryPlanWithoutValueFilter(ClusterRpcSingleQueryManager singleQueryManager)
+      throws PathErrorException {
+    splitQueryPlanBySelectPath(singleQueryManager);
+  }
+
+  /**
+   * Split query plan by select paths
+   */
+  private static void splitQueryPlanBySelectPath(ClusterRpcSingleQueryManager singleQueryManager)
+      throws PathErrorException {
+    QueryPlan queryPlan = singleQueryManager.getOriginQueryPlan();
+    Map<String, List<Path>> selectSeriesByGroupId = singleQueryManager.getSelectSeriesByGroupId();
+    Map<String, QueryPlan> selectPathPlans = singleQueryManager.getSelectPathPlans();
+    List<Path> selectPaths = queryPlan.getPaths();
+    for (Path path : selectPaths) {
+      String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
+      if (!selectSeriesByGroupId.containsKey(groupId)) {
+        selectSeriesByGroupId.put(groupId, new ArrayList<>());
+      }
+      selectSeriesByGroupId.get(groupId).add(path);
+    }
+    for (Entry<String, List<Path>> entry : selectSeriesByGroupId.entrySet()) {
+      String groupId = entry.getKey();
+      List<Path> paths = entry.getValue();
+      QueryPlan subQueryPlan = new QueryPlan();
+      subQueryPlan.setProposer(queryPlan.getProposer());
+      subQueryPlan.setPaths(paths);
+      subQueryPlan.setExpression(queryPlan.getExpression());
+      selectPathPlans.put(groupId, subQueryPlan);
+    }
+  }
+
+  /**
+   * Split query plan with filter.
+   */
+  public static void splitQueryPlanWithValueFilter(
+      ClusterRpcSingleQueryManager singleQueryManager) throws PathErrorException {
+    QueryPlan queryPlan = singleQueryManager.getOriginQueryPlan();
+    if (queryPlan instanceof GroupByPlan) {
+      splitGroupByPlan((GroupByPlan) queryPlan, singleQueryManager);
+    } else if (queryPlan instanceof AggregationPlan) {
+      splitAggregationPlan((AggregationPlan) queryPlan, singleQueryManager);
+    } else {
+      splitQueryPlan(queryPlan, singleQueryManager);
+    }
+  }
+
+  private static void splitGroupByPlan(GroupByPlan queryPlan,
+      ClusterRpcSingleQueryManager singleQueryManager) {
+    throw new UnsupportedOperationException();
+  }
+
+  private static void splitAggregationPlan(AggregationPlan aggregationPlan,
+      ClusterRpcSingleQueryManager singleQueryManager) {
+    throw new UnsupportedOperationException();
+  }
+
+  private static void splitQueryPlan(QueryPlan queryPlan,
+      ClusterRpcSingleQueryManager singleQueryManager) throws PathErrorException {
+    splitQueryPlanBySelectPath(singleQueryManager);
+    // split query plan by filter path
+    Map<String, FilterGroupEntity> filterGroupEntityMap = singleQueryManager.getFilterGroupEntityMap();
+    IExpression expression = queryPlan.getExpression();
+    ExpressionUtils.getAllExpressionSeries(expression, filterGroupEntityMap);
+    for(FilterGroupEntity filterGroupEntity: filterGroupEntityMap.values()){
+      List<Path> filterSeriesList = filterGroupEntity.getFilterPaths();
+      // create filter sub query plan
+      QueryPlan subQueryPlan = new QueryPlan();
+      subQueryPlan.setPaths(filterSeriesList);
+      IExpression subExpression = ExpressionUtils
+          .pruneFilterTree(expression.clone(), filterSeriesList);
+      if (subExpression.getType() != ExpressionType.TRUE) {
+        subQueryPlan.setExpression(subExpression);
+      }
+      filterGroupEntity.setQueryPlan(subQueryPlan);
+    }
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/NodeAsClient.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/NodeAsClient.java
index ca5d238..bab1536 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/NodeAsClient.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/NodeAsClient.java
@@ -19,9 +19,10 @@
 package org.apache.iotdb.cluster.rpc.raft;
 
 import com.alipay.sofa.jraft.entity.PeerId;
-import org.apache.iotdb.cluster.qp.callback.QPTask;
 import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.qp.task.SingleQPTask;
 import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
+import org.apache.iotdb.cluster.qp.task.QueryTask;
 
 /**
  * Handle the request and process the result as a client with the current node
@@ -30,21 +31,18 @@ public interface NodeAsClient {
 
   /**
    * Asynchronous processing requests
-   *
-   * @param leader leader node of the target group
-   * @param qpTask the QPTask to be executed
+   *  @param leader leader node of the target group
+   * @param qpTask single QPTask to be executed
    */
   void asyncHandleRequest(BasicRequest request, PeerId leader,
-      QPTask qpTask) throws RaftConnectionException;
+      SingleQPTask qpTask) throws RaftConnectionException;
 
   /**
    * Synchronous processing requests
+   * @param peerId leader node of the target group
    *
-   * @param clientService client rpc service handle
-   * @param leader leader node of the target group
-   * @param qpTask the QPTask to be executed
    */
-  void syncHandleRequest(BasicRequest request, PeerId leader, QPTask qpTask)
+  QueryTask syncHandleRequest(BasicRequest request, PeerId peerId)
       throws RaftConnectionException;
 
   /**
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/impl/RaftNodeAsClientManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/impl/RaftNodeAsClientManager.java
index fc5df44..19f1343 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/impl/RaftNodeAsClientManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/impl/RaftNodeAsClientManager.java
@@ -25,14 +25,16 @@ import com.alipay.sofa.jraft.option.CliOptions;
 import com.alipay.sofa.jraft.rpc.impl.cli.BoltCliClientService;
 import java.util.LinkedList;
 import java.util.concurrent.Executor;
-import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 import org.apache.iotdb.cluster.config.ClusterConfig;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.cluster.exception.RaftConnectionException;
-import org.apache.iotdb.cluster.qp.callback.QPTask;
-import org.apache.iotdb.cluster.qp.callback.QPTask.TaskState;
+import org.apache.iotdb.cluster.qp.task.QPTask.TaskState;
+import org.apache.iotdb.cluster.qp.task.QueryTask;
+import org.apache.iotdb.cluster.qp.task.SingleQPTask;
 import org.apache.iotdb.cluster.rpc.raft.NodeAsClient;
 import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
@@ -53,8 +55,8 @@ public class RaftNodeAsClientManager {
   private static final int TASK_TIMEOUT_MS = CLUSTER_CONFIG.getQpTaskTimeout();
 
   /**
-   * Max valid number of @NodeAsClient usage, represent the number can run simultaneously at the
-   * same time
+   * Max valid number of @NodeAsClient usage, represent the number can run simultaneously
+   * at the same time
    */
   private static final int MAX_VALID_CLIENT_NUM = CLUSTER_CONFIG.getMaxNumOfInnerRpcClient();
 
@@ -81,17 +83,17 @@ public class RaftNodeAsClientManager {
   /**
    * Lock to update clientNumInUse
    */
-  private ReentrantLock resourceLock = new ReentrantLock();
+  private Lock resourceLock = new ReentrantLock();
 
   /**
-   * Mark whether system is shutting down
+   * Condition to get client
    */
-  private volatile boolean isShuttingDown;
+  private Condition resourceCondition = resourceLock.newCondition();
 
   /**
-   * Interval of thread sleep, unit is millisecond.
+   * Mark whether system is shutting down
    */
-  private static final int THREAD_SLEEP_INTERVAL = 10;
+  private volatile boolean isShuttingDown;
 
   private RaftNodeAsClientManager() {
 
@@ -105,59 +107,38 @@ public class RaftNodeAsClientManager {
    * Try to get clientList, return null if num of queue clientList exceeds threshold.
    */
   public RaftNodeAsClient getRaftNodeAsClient() throws RaftConnectionException {
+    resourceLock.lock();
     try {
-      resourceLock.lock();
       if (queueClientNum >= MAX_QUEUE_CLIENT_NUM) {
         throw new RaftConnectionException(String
             .format("Raft inner rpc clients have reached the max numbers %s",
                 CLUSTER_CONFIG.getMaxNumOfInnerRpcClient() + CLUSTER_CONFIG
                     .getMaxQueueNumOfInnerRpcClient()));
       }
-      checkShuttingDown();
-      if (clientNumInUse.get() < MAX_VALID_CLIENT_NUM) {
-        clientNumInUse.incrementAndGet();
-        return getClient();
-      }
       queueClientNum++;
-    } finally {
-      resourceLock.unlock();
-    }
-    return tryToGetClient();
-  }
-
-  private void checkShuttingDown() throws RaftConnectionException {
-    if (isShuttingDown) {
-      throw new RaftConnectionException(
-          "Reject to provide RaftNodeAsClient client because cluster system is shutting down");
-    }
-  }
-
-  /**
-   * Check whether it can get the clientList
-   */
-  private RaftNodeAsClient tryToGetClient() throws RaftConnectionException {
-    for (; ; ) {
-      if (clientNumInUse.get() < MAX_VALID_CLIENT_NUM) {
-        resourceLock.lock();
-        try {
+      try {
+        while (true) {
           checkShuttingDown();
           if (clientNumInUse.get() < MAX_VALID_CLIENT_NUM) {
             clientNumInUse.incrementAndGet();
-            queueClientNum--;
             return getClient();
           }
-        } catch (RaftConnectionException e) {
-          queueClientNum--;
-          throw new RaftConnectionException(e);
-        } finally {
-          resourceLock.unlock();
+          resourceCondition.await();
         }
-      }
-      try {
-        Thread.sleep(THREAD_SLEEP_INTERVAL);
       } catch (InterruptedException e) {
         throw new RaftConnectionException("An error occurred when trying to get NodeAsClient", e);
+      } finally {
+        queueClientNum--;
       }
+    } finally {
+      resourceLock.unlock();
+    }
+  }
+
+  private void checkShuttingDown() throws RaftConnectionException {
+    if (isShuttingDown) {
+      throw new RaftConnectionException(
+          "Reject to provide RaftNodeAsClient client because cluster system is shutting down");
     }
   }
 
@@ -179,6 +160,7 @@ public class RaftNodeAsClientManager {
     resourceLock.lock();
     try {
       clientNumInUse.decrementAndGet();
+      resourceCondition.signalAll();
       clientList.addLast(client);
     } finally {
       resourceLock.unlock();
@@ -189,7 +171,7 @@ public class RaftNodeAsClientManager {
     isShuttingDown = true;
     while (clientNumInUse.get() != 0 && queueClientNum != 0) {
       // wait until releasing all usage of clients.
-      Thread.sleep(THREAD_SLEEP_INTERVAL);
+      resourceCondition.await();
     }
     while (!clientList.isEmpty()) {
       clientList.removeFirst().shutdown();
@@ -246,7 +228,7 @@ public class RaftNodeAsClientManager {
 
     @Override
     public void asyncHandleRequest(BasicRequest request, PeerId leader,
-        QPTask qpTask)
+        SingleQPTask qpTask)
         throws RaftConnectionException {
       LOGGER.debug("Node as client to send request to leader: {}", leader);
       try {
@@ -284,17 +266,13 @@ public class RaftNodeAsClientManager {
     }
 
     @Override
-    public void syncHandleRequest(BasicRequest request, PeerId leader,
-        QPTask qpTask)
-        throws RaftConnectionException {
+    public QueryTask syncHandleRequest(BasicRequest request, PeerId peerId) {
       try {
         BasicResponse response = (BasicResponse) boltClientService.getRpcClient()
-            .invokeSync(leader.getEndpoint().toString(), request, TASK_TIMEOUT_MS);
-        qpTask.run(response);
+            .invokeSync(peerId.getEndpoint().toString(), request, TASK_TIMEOUT_MS);
+        return new QueryTask(response, TaskState.FINISH);
       } catch (RemotingException | InterruptedException e) {
-        qpTask.setTaskState(TaskState.EXCEPTION);
-        qpTask.run(null);
-        throw new RaftConnectionException(e);
+        return new QueryTask(null, TaskState.EXCEPTION);
       } finally {
         releaseClient(RaftNodeAsClient.this);
       }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/DataGroupNonQueryAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/DataGroupNonQueryAsyncProcessor.java
similarity index 90%
rename from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/DataGroupNonQueryAsyncProcessor.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/DataGroupNonQueryAsyncProcessor.java
index fb00c0d..de2d2ab 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/DataGroupNonQueryAsyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/DataGroupNonQueryAsyncProcessor.java
@@ -16,16 +16,17 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.processor;
+package org.apache.iotdb.cluster.rpc.raft.processor.nonquery;
 
 import com.alipay.remoting.AsyncContext;
 import com.alipay.remoting.BizContext;
 import com.alipay.sofa.jraft.entity.PeerId;
 import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.RaftService;
-import org.apache.iotdb.cluster.rpc.raft.request.DataGroupNonQueryRequest;
+import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
+import org.apache.iotdb.cluster.rpc.raft.request.nonquery.DataGroupNonQueryRequest;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
-import org.apache.iotdb.cluster.rpc.raft.response.DataGroupNonQueryResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.nonquery.DataGroupNonQueryResponse;
 import org.apache.iotdb.cluster.utils.RaftUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/MetaGroupNonQueryAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/MetaGroupNonQueryAsyncProcessor.java
similarity index 89%
rename from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/MetaGroupNonQueryAsyncProcessor.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/MetaGroupNonQueryAsyncProcessor.java
index d6f6270..9f09bbb 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/MetaGroupNonQueryAsyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/MetaGroupNonQueryAsyncProcessor.java
@@ -16,16 +16,17 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.processor;
+package org.apache.iotdb.cluster.rpc.raft.processor.nonquery;
 
 import com.alipay.remoting.AsyncContext;
 import com.alipay.remoting.BizContext;
 import com.alipay.sofa.jraft.entity.PeerId;
 import org.apache.iotdb.cluster.entity.raft.MetadataRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.RaftService;
-import org.apache.iotdb.cluster.rpc.raft.request.MetaGroupNonQueryRequest;
+import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
+import org.apache.iotdb.cluster.rpc.raft.request.nonquery.MetaGroupNonQueryRequest;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
-import org.apache.iotdb.cluster.rpc.raft.response.MetaGroupNonQueryResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.nonquery.MetaGroupNonQueryResponse;
 import org.apache.iotdb.cluster.utils.RaftUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/CloseSeriesReaderSyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/CloseSeriesReaderSyncProcessor.java
new file mode 100644
index 0000000..f6ec67a
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/CloseSeriesReaderSyncProcessor.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.processor.querydata;
+
+import com.alipay.remoting.BizContext;
+import org.apache.iotdb.cluster.query.manager.querynode.ClusterLocalQueryManager;
+import org.apache.iotdb.cluster.rpc.raft.processor.BasicSyncUserProcessor;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.CloseSeriesReaderRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
+
+public class CloseSeriesReaderSyncProcessor extends
+    BasicSyncUserProcessor<CloseSeriesReaderRequest> {
+
+  @Override
+  public Object handleRequest(BizContext bizContext, CloseSeriesReaderRequest request)
+      throws Exception {
+    String groupId = request.getGroupID();
+    QuerySeriesDataResponse response = new QuerySeriesDataResponse(groupId);
+    ClusterLocalQueryManager.getInstance().close(request.getTaskId());
+    return response;
+  }
+
+  @Override
+  public String interest() {
+    return CloseSeriesReaderRequest.class.getName();
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/InitSeriesReaderSyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/InitSeriesReaderSyncProcessor.java
new file mode 100644
index 0000000..894d9eb
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/InitSeriesReaderSyncProcessor.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.processor.querydata;
+
+import com.alipay.remoting.BizContext;
+import com.alipay.sofa.jraft.Status;
+import org.apache.iotdb.cluster.config.ClusterConstant;
+import org.apache.iotdb.cluster.query.manager.querynode.ClusterLocalQueryManager;
+import org.apache.iotdb.cluster.rpc.raft.processor.BasicSyncUserProcessor;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.InitSeriesReaderRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.querydata.InitSeriesReaderResponse;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
+import org.apache.iotdb.cluster.utils.RaftUtils;
+import org.apache.iotdb.db.exception.ProcessorException;
+
+public class InitSeriesReaderSyncProcessor extends BasicSyncUserProcessor<InitSeriesReaderRequest> {
+
+  @Override
+  public Object handleRequest(BizContext bizContext, InitSeriesReaderRequest request)
+      throws Exception {
+    String groupId = request.getGroupID();
+    handleNullRead(request.getReadConsistencyLevel(), groupId);
+    return ClusterLocalQueryManager.getInstance().createQueryDataSet(request);
+  }
+
+  /**
+   * It's necessary to do null read while creating query data set with a strong consistency level
+   * and local node is not the leader of data group
+   *
+   * @param readConsistencyLevel read concistency level
+   * @param groupId group id
+   */
+  private void handleNullRead(int readConsistencyLevel, String groupId) throws ProcessorException {
+    if (readConsistencyLevel == ClusterConstant.STRONG_CONSISTENCY_LEVEL && !QPExecutorUtils
+        .checkDataGroupLeader(groupId)) {
+      Status nullReadTaskStatus = Status.OK();
+      RaftUtils.handleNullReadToDataGroup(nullReadTaskStatus, groupId);
+      if (!nullReadTaskStatus.isOk()) {
+        throw new ProcessorException("Null read to data group failed");
+      }
+    }
+  }
+
+  @Override
+  public String interest() {
+    return InitSeriesReaderRequest.class.getName();
+  }
+}
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/expression/impl/GlobalTimeExpression.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataByTimestampSyncProcessor.java
similarity index 52%
copy from tsfile/src/main/java/org/apache/iotdb/tsfile/read/expression/impl/GlobalTimeExpression.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataByTimestampSyncProcessor.java
index d69a65a..ae3f057 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/expression/impl/GlobalTimeExpression.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataByTimestampSyncProcessor.java
@@ -16,37 +16,23 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.tsfile.read.expression.impl;
+package org.apache.iotdb.cluster.rpc.raft.processor.querydata;
 
-import org.apache.iotdb.tsfile.read.expression.ExpressionType;
-import org.apache.iotdb.tsfile.read.expression.IUnaryExpression;
-import org.apache.iotdb.tsfile.read.filter.basic.Filter;
+import com.alipay.remoting.BizContext;
+import org.apache.iotdb.cluster.query.manager.querynode.ClusterLocalQueryManager;
+import org.apache.iotdb.cluster.rpc.raft.processor.BasicSyncUserProcessor;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataByTimestampRequest;
 
-public class GlobalTimeExpression implements IUnaryExpression {
-
-  private Filter filter;
-
-  public GlobalTimeExpression(Filter filter) {
-    this.filter = filter;
-  }
-
-  @Override
-  public Filter getFilter() {
-    return filter;
-  }
-
-  @Override
-  public void setFilter(Filter filter) {
-    this.filter = filter;
-  }
+public class QuerySeriesDataByTimestampSyncProcessor extends BasicSyncUserProcessor<QuerySeriesDataByTimestampRequest> {
 
   @Override
-  public ExpressionType getType() {
-    return ExpressionType.GLOBAL_TIME;
+  public Object handleRequest(BizContext bizContext,
+      QuerySeriesDataByTimestampRequest request) throws Exception {
+    return ClusterLocalQueryManager.getInstance().readBatchDataByTimestamp(request);
   }
 
   @Override
-  public String toString() {
-    return "[" + this.filter.toString() + "]";
+  public String interest() {
+    return QuerySeriesDataByTimestampRequest.class.getName();
   }
 }
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/expression/impl/GlobalTimeExpression.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java
similarity index 51%
copy from tsfile/src/main/java/org/apache/iotdb/tsfile/read/expression/impl/GlobalTimeExpression.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java
index d69a65a..90dc24a 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/expression/impl/GlobalTimeExpression.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/QuerySeriesDataSyncProcessor.java
@@ -16,37 +16,25 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.tsfile.read.expression.impl;
+package org.apache.iotdb.cluster.rpc.raft.processor.querydata;
 
-import org.apache.iotdb.tsfile.read.expression.ExpressionType;
-import org.apache.iotdb.tsfile.read.expression.IUnaryExpression;
-import org.apache.iotdb.tsfile.read.filter.basic.Filter;
+import com.alipay.remoting.BizContext;
+import org.apache.iotdb.cluster.query.manager.querynode.ClusterLocalQueryManager;
+import org.apache.iotdb.cluster.rpc.raft.processor.BasicSyncUserProcessor;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
 
-public class GlobalTimeExpression implements IUnaryExpression {
-
-  private Filter filter;
-
-  public GlobalTimeExpression(Filter filter) {
-    this.filter = filter;
-  }
-
-  @Override
-  public Filter getFilter() {
-    return filter;
-  }
-
-  @Override
-  public void setFilter(Filter filter) {
-    this.filter = filter;
-  }
+public class QuerySeriesDataSyncProcessor extends
+    BasicSyncUserProcessor<QuerySeriesDataRequest> {
 
   @Override
-  public ExpressionType getType() {
-    return ExpressionType.GLOBAL_TIME;
+  public Object handleRequest(BizContext bizContext, QuerySeriesDataRequest request)
+      throws Exception {
+    return ClusterLocalQueryManager.getInstance().readBatchData(request);
   }
 
   @Override
-  public String toString() {
-    return "[" + this.filter.toString() + "]";
+  public String interest() {
+    return QuerySeriesDataRequest.class.getName();
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/QueryMetadataAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryMetadataAsyncProcessor.java
similarity index 91%
rename from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/QueryMetadataAsyncProcessor.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryMetadataAsyncProcessor.java
index 176fa33..36e657c 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/QueryMetadataAsyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryMetadataAsyncProcessor.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.processor;
+package org.apache.iotdb.cluster.rpc.raft.processor.querymetadata;
 
 import com.alipay.remoting.AsyncContext;
 import com.alipay.remoting.BizContext;
@@ -25,8 +25,9 @@ import com.alipay.sofa.jraft.closure.ReadIndexClosure;
 import org.apache.iotdb.cluster.config.ClusterConstant;
 import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.RaftService;
-import org.apache.iotdb.cluster.rpc.raft.request.QueryMetadataRequest;
-import org.apache.iotdb.cluster.rpc.raft.response.QueryMetadataResponse;
+import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QueryMetadataRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QueryMetadataResponse;
 import org.apache.iotdb.cluster.utils.RaftUtils;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.metadata.MManager;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/QueryMetadataInStringAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryMetadataInStringAsyncProcessor.java
similarity index 90%
rename from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/QueryMetadataInStringAsyncProcessor.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryMetadataInStringAsyncProcessor.java
index b80f4ae..8771eea 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/QueryMetadataInStringAsyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryMetadataInStringAsyncProcessor.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.processor;
+package org.apache.iotdb.cluster.rpc.raft.processor.querymetadata;
 
 import com.alipay.remoting.AsyncContext;
 import com.alipay.remoting.BizContext;
@@ -25,8 +25,9 @@ import com.alipay.sofa.jraft.closure.ReadIndexClosure;
 import org.apache.iotdb.cluster.config.ClusterConstant;
 import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.RaftService;
-import org.apache.iotdb.cluster.rpc.raft.request.QueryMetadataInStringRequest;
-import org.apache.iotdb.cluster.rpc.raft.response.QueryMetadataInStringResponse;
+import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QueryMetadataInStringRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QueryMetadataInStringResponse;
 import org.apache.iotdb.cluster.utils.RaftUtils;
 import org.apache.iotdb.db.metadata.MManager;
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/QueryPathsAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryPathsAsyncProcessor.java
similarity index 92%
rename from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/QueryPathsAsyncProcessor.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryPathsAsyncProcessor.java
index f54aba0..8e1e47b 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/QueryPathsAsyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryPathsAsyncProcessor.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.processor;
+package org.apache.iotdb.cluster.rpc.raft.processor.querymetadata;
 
 import com.alipay.remoting.AsyncContext;
 import com.alipay.remoting.BizContext;
@@ -25,8 +25,9 @@ import com.alipay.sofa.jraft.closure.ReadIndexClosure;
 import org.apache.iotdb.cluster.config.ClusterConstant;
 import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.RaftService;
-import org.apache.iotdb.cluster.rpc.raft.request.QueryPathsRequest;
-import org.apache.iotdb.cluster.rpc.raft.response.QueryPathsResponse;
+import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QueryPathsRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QueryPathsResponse;
 import org.apache.iotdb.cluster.utils.RaftUtils;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.metadata.MManager;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/QuerySeriesTypeAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QuerySeriesTypeAsyncProcessor.java
similarity index 91%
rename from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/QuerySeriesTypeAsyncProcessor.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QuerySeriesTypeAsyncProcessor.java
index f0a4fc6..9e4b1c7 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/QuerySeriesTypeAsyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QuerySeriesTypeAsyncProcessor.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.processor;
+package org.apache.iotdb.cluster.rpc.raft.processor.querymetadata;
 
 import com.alipay.remoting.AsyncContext;
 import com.alipay.remoting.BizContext;
@@ -25,8 +25,9 @@ import com.alipay.sofa.jraft.closure.ReadIndexClosure;
 import org.apache.iotdb.cluster.config.ClusterConstant;
 import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.RaftService;
-import org.apache.iotdb.cluster.rpc.raft.request.QuerySeriesTypeRequest;
-import org.apache.iotdb.cluster.rpc.raft.response.QuerySeriesTypeResponse;
+import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QuerySeriesTypeRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QuerySeriesTypeResponse;
 import org.apache.iotdb.cluster.utils.RaftUtils;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.metadata.MManager;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/QueryTimeSeriesAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryTimeSeriesAsyncProcessor.java
similarity index 92%
rename from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/QueryTimeSeriesAsyncProcessor.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryTimeSeriesAsyncProcessor.java
index c41fdcf..593f99d 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/QueryTimeSeriesAsyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryTimeSeriesAsyncProcessor.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.processor;
+package org.apache.iotdb.cluster.rpc.raft.processor.querymetadata;
 
 import com.alipay.remoting.AsyncContext;
 import com.alipay.remoting.BizContext;
@@ -25,8 +25,9 @@ import com.alipay.sofa.jraft.closure.ReadIndexClosure;
 import org.apache.iotdb.cluster.config.ClusterConstant;
 import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.RaftService;
-import org.apache.iotdb.cluster.rpc.raft.request.QueryTimeSeriesRequest;
-import org.apache.iotdb.cluster.rpc.raft.response.QueryTimeSeriesResponse;
+import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QueryTimeSeriesRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QueryTimeSeriesResponse;
 import org.apache.iotdb.cluster.utils.RaftUtils;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.metadata.MManager;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicNonQueryRequest.java
similarity index 76%
copy from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicRequest.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicNonQueryRequest.java
index ee4d840..dc15158 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicRequest.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicNonQueryRequest.java
@@ -23,22 +23,20 @@ import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.List;
 import org.apache.iotdb.db.qp.physical.PhysicalPlan;
-import org.apache.iotdb.db.writelog.transfer.PhysicalPlanLogTransfer;
+import org.apache.iotdb.db.qp.physical.transfer.PhysicalPlanLogTransfer;
 
-public abstract class BasicRequest implements Serializable {
-
-  private static final long serialVersionUID = 8434915845259380829L;
-
-  /**
-   * Group ID
-   */
-  private String groupID;
+public abstract class BasicNonQueryRequest extends BasicRequest{
 
+  private static final long serialVersionUID = -3082772186451384202L;
   /**
    * Serialized physical plans
    */
   private List<byte[]> physicalPlanBytes;
 
+  public BasicNonQueryRequest(String groupID) {
+    super(groupID);
+  }
+
   protected void init(List<PhysicalPlan> physicalPlanBytes) throws IOException {
     this.physicalPlanBytes = new ArrayList<>(physicalPlanBytes.size());
     for (PhysicalPlan plan : physicalPlanBytes) {
@@ -50,15 +48,4 @@ public abstract class BasicRequest implements Serializable {
     return physicalPlanBytes;
   }
 
-  public BasicRequest(String groupID) {
-    this.groupID = groupID;
-  }
-
-  public String getGroupID() {
-    return groupID;
-  }
-
-  public void setGroupID(String groupID) {
-    this.groupID = groupID;
-  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicQueryRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicQueryRequest.java
index 2cf613f..3ceddaf 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicQueryRequest.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicQueryRequest.java
@@ -20,6 +20,7 @@ package org.apache.iotdb.cluster.rpc.raft.request;
 
 public abstract class BasicQueryRequest extends BasicRequest {
 
+  private static final long serialVersionUID = 2993000692822502110L;
   /**
    * Read Consistency Level
    */
@@ -30,6 +31,10 @@ public abstract class BasicQueryRequest extends BasicRequest {
     this.readConsistencyLevel = readConsistencyLevel;
   }
 
+  public BasicQueryRequest(String groupID) {
+    super(groupID);
+  }
+
   public int getReadConsistencyLevel() {
     return readConsistencyLevel;
   }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicRequest.java
index ee4d840..dd4758a 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicRequest.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicRequest.java
@@ -18,12 +18,7 @@
  */
 package org.apache.iotdb.cluster.rpc.raft.request;
 
-import java.io.IOException;
 import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.List;
-import org.apache.iotdb.db.qp.physical.PhysicalPlan;
-import org.apache.iotdb.db.writelog.transfer.PhysicalPlanLogTransfer;
 
 public abstract class BasicRequest implements Serializable {
 
@@ -34,22 +29,6 @@ public abstract class BasicRequest implements Serializable {
    */
   private String groupID;
 
-  /**
-   * Serialized physical plans
-   */
-  private List<byte[]> physicalPlanBytes;
-
-  protected void init(List<PhysicalPlan> physicalPlanBytes) throws IOException {
-    this.physicalPlanBytes = new ArrayList<>(physicalPlanBytes.size());
-    for (PhysicalPlan plan : physicalPlanBytes) {
-      this.physicalPlanBytes.add(PhysicalPlanLogTransfer.operatorToLog(plan));
-    }
-  }
-
-  public List<byte[]> getPhysicalPlanBytes() {
-    return physicalPlanBytes;
-  }
-
   public BasicRequest(String groupID) {
     this.groupID = groupID;
   }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/DataGroupNonQueryRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/nonquery/DataGroupNonQueryRequest.java
similarity index 80%
rename from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/DataGroupNonQueryRequest.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/nonquery/DataGroupNonQueryRequest.java
index c1bcf5f..8413373 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/DataGroupNonQueryRequest.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/nonquery/DataGroupNonQueryRequest.java
@@ -16,18 +16,19 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.request;
+package org.apache.iotdb.cluster.rpc.raft.request.nonquery;
 
 import java.io.IOException;
-import java.io.Serializable;
 import java.util.List;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicNonQueryRequest;
 import org.apache.iotdb.db.qp.physical.PhysicalPlan;
 
 /**
  * Handle request to data group
  */
-public class DataGroupNonQueryRequest extends BasicRequest implements Serializable {
+public class DataGroupNonQueryRequest extends BasicNonQueryRequest {
 
+  private static final long serialVersionUID = -2442407985738324604L;
 
   public DataGroupNonQueryRequest(String groupID, List<PhysicalPlan> physicalPlanBytes)
       throws IOException {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/MetaGroupNonQueryRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/nonquery/MetaGroupNonQueryRequest.java
similarity index 80%
rename from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/MetaGroupNonQueryRequest.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/nonquery/MetaGroupNonQueryRequest.java
index 69625ff..b29609a 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/MetaGroupNonQueryRequest.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/nonquery/MetaGroupNonQueryRequest.java
@@ -16,17 +16,19 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.request;
+package org.apache.iotdb.cluster.rpc.raft.request.nonquery;
 
 import java.io.IOException;
-import java.io.Serializable;
 import java.util.List;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicNonQueryRequest;
 import org.apache.iotdb.db.qp.physical.PhysicalPlan;
 
 /**
  * Handle request to metadata group leader
  */
-public class MetaGroupNonQueryRequest extends BasicRequest implements Serializable {
+public class MetaGroupNonQueryRequest extends BasicNonQueryRequest {
+
+  private static final long serialVersionUID = 312899249719243646L;
 
   public MetaGroupNonQueryRequest(String groupID, List<PhysicalPlan> plans)
       throws IOException {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicQueryRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/CloseSeriesReaderRequest.java
similarity index 54%
copy from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicQueryRequest.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/CloseSeriesReaderRequest.java
index 2cf613f..1a3cf3c 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicQueryRequest.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/CloseSeriesReaderRequest.java
@@ -16,25 +16,32 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.request;
+package org.apache.iotdb.cluster.rpc.raft.request.querydata;
 
-public abstract class BasicQueryRequest extends BasicRequest {
+import org.apache.iotdb.cluster.rpc.raft.request.BasicQueryRequest;
+
+/**
+ * Release series reader resource in remote query node
+ */
+public class CloseSeriesReaderRequest extends BasicQueryRequest {
+
+  private static final long serialVersionUID = 1369515842480836991L;
 
   /**
-   * Read Consistency Level
+   * Unique task id which is assigned in coordinator node
    */
-  private int readConsistencyLevel;
+  private String taskId;
 
-  public BasicQueryRequest(String groupID, int readConsistencyLevel) {
+  private CloseSeriesReaderRequest(String groupID, String taskId) {
     super(groupID);
-    this.readConsistencyLevel = readConsistencyLevel;
+    this.taskId = taskId;
   }
 
-  public int getReadConsistencyLevel() {
-    return readConsistencyLevel;
+  public static CloseSeriesReaderRequest createReleaseResourceRequest(String groupId, String taskId) {
+    return new CloseSeriesReaderRequest(groupId, taskId);
   }
 
-  public void setReadConsistencyLevel(int readConsistencyLevel) {
-    this.readConsistencyLevel = readConsistencyLevel;
+  public String getTaskId() {
+    return taskId;
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/InitSeriesReaderRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/InitSeriesReaderRequest.java
new file mode 100644
index 0000000..c974e2f
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/InitSeriesReaderRequest.java
@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.request.querydata;
+
+import java.util.ArrayList;
+import java.util.EnumMap;
+import java.util.List;
+import java.util.Map;
+import org.apache.iotdb.cluster.query.PathType;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicQueryRequest;
+import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
+import org.apache.iotdb.tsfile.read.filter.basic.Filter;
+
+/**
+ * Initially create corresponding series readers in remote query node
+ */
+public class InitSeriesReaderRequest extends BasicQueryRequest {
+
+  private static final long serialVersionUID = 8374330837710097285L;
+
+  /**
+   * Unique task id which is assigned in coordinator node
+   */
+  private String taskId;
+
+  /**
+   * Key is series type, value is query plan
+   */
+  private Map<PathType, QueryPlan> allQueryPlan = new EnumMap<>(PathType.class);
+
+  /**
+   * Represent all filter of leaf node in filter tree while executing a query with value filter.
+   */
+  private List<Filter> filterList = new ArrayList<>();
+
+
+  private InitSeriesReaderRequest(String groupID, String taskId) {
+    super(groupID);
+    this.taskId = taskId;
+  }
+
+  public static InitSeriesReaderRequest createInitialQueryRequest(String groupId, String taskId, int readConsistencyLevel,
+      Map<PathType, QueryPlan> allQueryPlan, List<Filter> filterList){
+    InitSeriesReaderRequest request = new InitSeriesReaderRequest(groupId, taskId);
+    request.setReadConsistencyLevel(readConsistencyLevel);
+    request.allQueryPlan = allQueryPlan;
+    request.filterList = filterList;
+    return request;
+  }
+
+  public String getTaskId() {
+    return taskId;
+  }
+
+  public void setTaskId(String taskId) {
+    this.taskId = taskId;
+  }
+
+  public Map<PathType, QueryPlan> getAllQueryPlan() {
+    return allQueryPlan;
+  }
+
+  public void setAllQueryPlan(
+      Map<PathType, QueryPlan> allQueryPlan) {
+    this.allQueryPlan = allQueryPlan;
+  }
+
+  public List<Filter> getFilterList() {
+    return filterList;
+  }
+
+  public void setFilterList(List<Filter> filterList) {
+    this.filterList = filterList;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataByTimestampRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataByTimestampRequest.java
new file mode 100644
index 0000000..351e6eb
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataByTimestampRequest.java
@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.request.querydata;
+
+import java.util.List;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicQueryRequest;
+
+public class QuerySeriesDataByTimestampRequest extends BasicQueryRequest {
+
+  private static final long serialVersionUID = 4942493162179531133L;
+  /**
+   * Rounds number of query
+   */
+  private long queryRounds;
+
+  /**
+   * Unique task id which is assigned in coordinator node
+   */
+  private String taskId;
+
+  /**
+   * Batch valid timestamp
+   */
+  private List<Long> batchTimestamp;
+
+  /**
+   * Series to fetch data from remote query node
+   */
+  private List<String> fetchDataSeries;
+
+  private QuerySeriesDataByTimestampRequest(String groupID) {
+    super(groupID);
+  }
+
+  public static QuerySeriesDataByTimestampRequest createRequest(String groupId, long queryRounds, String taskId, List<Long> batchTimestamp, List<String> fetchDataSeries){
+    QuerySeriesDataByTimestampRequest request = new QuerySeriesDataByTimestampRequest(groupId);
+    request.queryRounds = queryRounds;
+    request.taskId = taskId;
+    request.batchTimestamp = batchTimestamp;
+    request.fetchDataSeries = fetchDataSeries;
+    return request;
+  }
+
+  public long getQueryRounds() {
+    return queryRounds;
+  }
+
+  public void setQueryRounds(long queryRounds) {
+    this.queryRounds = queryRounds;
+  }
+
+  public String getTaskId() {
+    return taskId;
+  }
+
+  public void setTaskId(String taskId) {
+    this.taskId = taskId;
+  }
+
+  public List<Long> getBatchTimestamp() {
+    return batchTimestamp;
+  }
+
+  public void setBatchTimestamp(List<Long> batchTimestamp) {
+    this.batchTimestamp = batchTimestamp;
+  }
+
+  public List<String> getFetchDataSeries() {
+    return fetchDataSeries;
+  }
+
+  public void setFetchDataSeries(List<String> fetchDataSeries) {
+    this.fetchDataSeries = fetchDataSeries;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataRequest.java
new file mode 100644
index 0000000..554b8c1
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataRequest.java
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.request.querydata;
+
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.iotdb.cluster.query.PathType;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicQueryRequest;
+
+/**
+ * Read batch data from series reader from remote query node.
+ */
+public class QuerySeriesDataRequest extends BasicQueryRequest {
+
+  private static final long serialVersionUID = 7132891920951977625L;
+
+  /**
+   * Rounds number of query
+   */
+  private long queryRounds;
+
+  /**
+   * Unique task id which is assigned in coordinator node
+   */
+  private String taskId;
+
+  /**
+   * Series type
+   */
+  private PathType pathType;
+
+  /**
+   * Key is series type, value is series list
+   */
+  private List<String> seriesPaths = new ArrayList<>();
+
+  private QuerySeriesDataRequest(String groupID, String taskId) {
+    super(groupID);
+    this.taskId = taskId;
+  }
+
+  public static QuerySeriesDataRequest createFetchDataRequest(String groupId, String taskId,
+      PathType pathType, List<String> seriesPaths, long queryRounds) {
+    QuerySeriesDataRequest request = new QuerySeriesDataRequest(groupId, taskId);
+    request.pathType = pathType;
+    request.seriesPaths = seriesPaths;
+    request.queryRounds = queryRounds;
+    return request;
+  }
+
+  public long getQueryRounds() {
+    return queryRounds;
+  }
+
+  public void setQueryRounds(long queryRounds) {
+    this.queryRounds = queryRounds;
+  }
+
+  public String getTaskId() {
+    return taskId;
+  }
+
+  public void setTaskId(String taskId) {
+    this.taskId = taskId;
+  }
+
+  public PathType getPathType() {
+    return pathType;
+  }
+
+  public void setPathType(PathType pathType) {
+    this.pathType = pathType;
+  }
+
+  public List<String> getSeriesPaths() {
+    return seriesPaths;
+  }
+
+  public void setSeriesPaths(List<String> seriesPaths) {
+    this.seriesPaths = seriesPaths;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/QueryMetadataInStringRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryMetadataInStringRequest.java
similarity index 82%
rename from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/QueryMetadataInStringRequest.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryMetadataInStringRequest.java
index 18471a6..c90cf80 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/QueryMetadataInStringRequest.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryMetadataInStringRequest.java
@@ -16,11 +16,13 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.request;
+package org.apache.iotdb.cluster.rpc.raft.request.querymetadata;
 
-import java.io.Serializable;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicQueryRequest;
 
-public class QueryMetadataInStringRequest extends BasicQueryRequest implements Serializable {
+public class QueryMetadataInStringRequest extends BasicQueryRequest {
+
+  private static final long serialVersionUID = -7037884610669129082L;
 
   public QueryMetadataInStringRequest(String groupID, int readConsistencyLevel) {
     super(groupID, readConsistencyLevel);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/QueryMetadataRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryMetadataRequest.java
similarity index 77%
rename from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/QueryMetadataRequest.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryMetadataRequest.java
index 2628fb6..75ae438 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/QueryMetadataRequest.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryMetadataRequest.java
@@ -16,11 +16,13 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.request;
+package org.apache.iotdb.cluster.rpc.raft.request.querymetadata;
 
-import java.io.Serializable;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicQueryRequest;
 
-public class QueryMetadataRequest extends BasicQueryRequest implements Serializable {
+public class QueryMetadataRequest extends BasicQueryRequest {
+
+  private static final long serialVersionUID = -1976805423799324348L;
 
   public QueryMetadataRequest(String groupID, int readConsistencyLevel) {
     super(groupID, readConsistencyLevel);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/QueryPathsRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryPathsRequest.java
similarity index 80%
rename from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/QueryPathsRequest.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryPathsRequest.java
index 2c600f4..b92a0e6 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/QueryPathsRequest.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryPathsRequest.java
@@ -16,13 +16,14 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.request;
+package org.apache.iotdb.cluster.rpc.raft.request.querymetadata;
 
-import java.io.Serializable;
 import java.util.List;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicQueryRequest;
 
-public class QueryPathsRequest extends BasicQueryRequest implements Serializable {
+public class QueryPathsRequest extends BasicQueryRequest {
 
+  private static final long serialVersionUID = -4334131357874435256L;
   private List<String> path;
 
   public QueryPathsRequest(String groupID, int readConsistencyLevel, List<String> path) {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/QuerySeriesTypeRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QuerySeriesTypeRequest.java
similarity index 79%
rename from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/QuerySeriesTypeRequest.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QuerySeriesTypeRequest.java
index c486576..e46fe66 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/QuerySeriesTypeRequest.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QuerySeriesTypeRequest.java
@@ -16,12 +16,13 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.request;
+package org.apache.iotdb.cluster.rpc.raft.request.querymetadata;
 
-import java.io.Serializable;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicQueryRequest;
 
-public class QuerySeriesTypeRequest extends BasicQueryRequest implements Serializable {
+public class QuerySeriesTypeRequest extends BasicQueryRequest {
 
+  private static final long serialVersionUID = -7917403708996214075L;
   private String path;
 
   public QuerySeriesTypeRequest(String groupID, int readConsistencyLevel, String path) {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/QueryStorageGroupRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryStorageGroupRequest.java
similarity index 82%
rename from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/QueryStorageGroupRequest.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryStorageGroupRequest.java
index 037924f..bb3d847 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/QueryStorageGroupRequest.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryStorageGroupRequest.java
@@ -16,11 +16,13 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.request;
+package org.apache.iotdb.cluster.rpc.raft.request.querymetadata;
 
-import java.io.Serializable;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicQueryRequest;
 
-public class QueryStorageGroupRequest extends BasicQueryRequest implements Serializable {
+public class QueryStorageGroupRequest extends BasicQueryRequest {
+
+  private static final long serialVersionUID = -1260362721166408556L;
 
   public QueryStorageGroupRequest(String groupID, int readConsistencyLevel) {
     super(groupID, readConsistencyLevel);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/QueryTimeSeriesRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryTimeSeriesRequest.java
similarity index 80%
rename from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/QueryTimeSeriesRequest.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryTimeSeriesRequest.java
index 0106f18..92d2f8a 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/QueryTimeSeriesRequest.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryTimeSeriesRequest.java
@@ -16,13 +16,14 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.request;
+package org.apache.iotdb.cluster.rpc.raft.request.querymetadata;
 
-import java.io.Serializable;
 import java.util.List;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicQueryRequest;
 
-public class QueryTimeSeriesRequest extends BasicQueryRequest implements Serializable {
+public class QueryTimeSeriesRequest extends BasicQueryRequest {
 
+  private static final long serialVersionUID = -1902657459558399385L;
   private List<String> path;
 
   public QueryTimeSeriesRequest(String groupID, int readConsistencyLevel, List<String> path) {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/MetaGroupNonQueryResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/BasicQueryDataResponse.java
similarity index 64%
copy from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/MetaGroupNonQueryResponse.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/BasicQueryDataResponse.java
index f662e35..53e7923 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/MetaGroupNonQueryResponse.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/BasicQueryDataResponse.java
@@ -18,22 +18,26 @@
  */
 package org.apache.iotdb.cluster.rpc.raft.response;
 
-/**
- * Handle response from metadata group leader
- */
-public class MetaGroupNonQueryResponse extends BasicResponse {
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.iotdb.tsfile.read.common.BatchData;
+
+public abstract class BasicQueryDataResponse extends BasicResponse{
 
-  private MetaGroupNonQueryResponse(String groupId, boolean redirected, String leaderStr,
+
+  private List<BatchData> seriesBatchData = new ArrayList<>();
+
+  public BasicQueryDataResponse(String groupId, boolean redirected, String leaderStr,
       String errorMsg) {
     super(groupId, redirected, leaderStr, errorMsg);
   }
 
-  public static MetaGroupNonQueryResponse createRedirectedResponse(String groupId, String leaderStr) {
-    return new MetaGroupNonQueryResponse(groupId, true, leaderStr, null);
+  public List<BatchData> getSeriesBatchData() {
+    return seriesBatchData;
   }
 
-  public static MetaGroupNonQueryResponse createEmptyResponse(String groupId) {
-    return new MetaGroupNonQueryResponse(groupId, false, null, null);
+  public void setSeriesBatchData(
+      List<BatchData> seriesBatchData) {
+    this.seriesBatchData = seriesBatchData;
   }
-
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/DataGroupNonQueryResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java
similarity index 89%
rename from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/DataGroupNonQueryResponse.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java
index 074f452..9d86398 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/DataGroupNonQueryResponse.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java
@@ -16,13 +16,17 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.response;
+package org.apache.iotdb.cluster.rpc.raft.response.nonquery;
+
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 
 /**
  * Handle response from data group leader
  */
 public class DataGroupNonQueryResponse extends BasicResponse {
 
+  private static final long serialVersionUID = -8288044965888956717L;
+
   private DataGroupNonQueryResponse(String groupId, boolean redirected, String leaderStr,
       String errorMsg) {
     super(groupId, redirected, leaderStr, errorMsg);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/MetaGroupNonQueryResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/MetaGroupNonQueryResponse.java
similarity index 87%
rename from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/MetaGroupNonQueryResponse.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/MetaGroupNonQueryResponse.java
index f662e35..653958a 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/MetaGroupNonQueryResponse.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/MetaGroupNonQueryResponse.java
@@ -16,13 +16,17 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.response;
+package org.apache.iotdb.cluster.rpc.raft.response.nonquery;
+
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 
 /**
  * Handle response from metadata group leader
  */
 public class MetaGroupNonQueryResponse extends BasicResponse {
 
+  private static final long serialVersionUID = -7444143717755803056L;
+
   private MetaGroupNonQueryResponse(String groupId, boolean redirected, String leaderStr,
       String errorMsg) {
     super(groupId, redirected, leaderStr, errorMsg);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querydata/InitSeriesReaderResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querydata/InitSeriesReaderResponse.java
new file mode 100644
index 0000000..e30a288
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querydata/InitSeriesReaderResponse.java
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.response.querydata;
+
+import java.util.EnumMap;
+import java.util.List;
+import java.util.Map;
+import org.apache.iotdb.cluster.query.PathType;
+import org.apache.iotdb.cluster.rpc.raft.response.BasicQueryDataResponse;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+
+public class InitSeriesReaderResponse extends BasicQueryDataResponse {
+
+  private static final long serialVersionUID = 6298440729566956283L;
+
+  private Map<PathType, List<TSDataType>> seriesDataTypes = new EnumMap<>(PathType.class);
+
+  /**
+   * Series type
+   */
+  private PathType pathType;
+
+  public InitSeriesReaderResponse(String groupId) {
+    super(groupId, false, null, null);
+  }
+
+  public Map<PathType, List<TSDataType>> getSeriesDataTypes() {
+    return seriesDataTypes;
+  }
+
+  public void setSeriesDataTypes(
+      Map<PathType, List<TSDataType>> seriesDataTypes) {
+    this.seriesDataTypes = seriesDataTypes;
+  }
+
+  public PathType getPathType() {
+    return pathType;
+  }
+
+  public void setPathType(PathType pathType) {
+    this.pathType = pathType;
+  }
+}
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/exception/FileNodeManagerException.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querydata/QuerySeriesDataByTimestampResponse.java
similarity index 67%
copy from iotdb/src/main/java/org/apache/iotdb/db/exception/FileNodeManagerException.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querydata/QuerySeriesDataByTimestampResponse.java
index 1e5e11d..f3417b7 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/exception/FileNodeManagerException.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querydata/QuerySeriesDataByTimestampResponse.java
@@ -16,22 +16,16 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.db.exception;
+package org.apache.iotdb.cluster.rpc.raft.response.querydata;
 
-public class FileNodeManagerException extends Exception {
+import org.apache.iotdb.cluster.rpc.raft.response.BasicQueryDataResponse;
 
-  private static final long serialVersionUID = 9001649171768311032L;
+public class QuerySeriesDataByTimestampResponse extends BasicQueryDataResponse {
 
-  public FileNodeManagerException() {
-    super();
-  }
-
-  public FileNodeManagerException(String message) {
-    super(message);
-  }
+  private static final long serialVersionUID = -9015865924302417289L;
 
-  public FileNodeManagerException(Throwable cause) {
-    super(cause);
+  public QuerySeriesDataByTimestampResponse(String groupId) {
+    super(groupId, false, null, null);
   }
 
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicQueryRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querydata/QuerySeriesDataResponse.java
similarity index 58%
copy from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicQueryRequest.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querydata/QuerySeriesDataResponse.java
index 2cf613f..b4586bd 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicQueryRequest.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querydata/QuerySeriesDataResponse.java
@@ -16,25 +16,29 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.request;
+package org.apache.iotdb.cluster.rpc.raft.response.querydata;
 
-public abstract class BasicQueryRequest extends BasicRequest {
+import org.apache.iotdb.cluster.query.PathType;
+import org.apache.iotdb.cluster.rpc.raft.response.BasicQueryDataResponse;
+
+public class QuerySeriesDataResponse extends BasicQueryDataResponse {
+
+  private static final long serialVersionUID = -4783032133961145205L;
 
   /**
-   * Read Consistency Level
+   * Series type
    */
-  private int readConsistencyLevel;
+  private PathType pathType;
 
-  public BasicQueryRequest(String groupID, int readConsistencyLevel) {
-    super(groupID);
-    this.readConsistencyLevel = readConsistencyLevel;
+  public QuerySeriesDataResponse(String groupId) {
+    super(groupId, false, null, null);
   }
 
-  public int getReadConsistencyLevel() {
-    return readConsistencyLevel;
+ public PathType getPathType() {
+    return pathType;
   }
 
-  public void setReadConsistencyLevel(int readConsistencyLevel) {
-    this.readConsistencyLevel = readConsistencyLevel;
+  public void setPathType(PathType pathType) {
+    this.pathType = pathType;
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/QueryMetadataInStringResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryMetadataInStringResponse.java
similarity index 88%
rename from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/QueryMetadataInStringResponse.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryMetadataInStringResponse.java
index a3a963a..98b8201 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/QueryMetadataInStringResponse.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryMetadataInStringResponse.java
@@ -16,10 +16,13 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.response;
+package org.apache.iotdb.cluster.rpc.raft.response.querymetadata;
+
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 
 public class QueryMetadataInStringResponse extends BasicResponse {
 
+  private static final long serialVersionUID = 5704333006127833921L;
   private String metadata;
 
   private QueryMetadataInStringResponse(String groupId, boolean redirected, String leaderStr,
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/QueryMetadataResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryMetadataResponse.java
similarity index 88%
rename from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/QueryMetadataResponse.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryMetadataResponse.java
index 6c21798..20e09f2 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/QueryMetadataResponse.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryMetadataResponse.java
@@ -16,12 +16,14 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.response;
+package org.apache.iotdb.cluster.rpc.raft.response.querymetadata;
 
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 import org.apache.iotdb.db.metadata.Metadata;
 
 public class QueryMetadataResponse extends BasicResponse {
 
+  private static final long serialVersionUID = -3969749781116510054L;
   private Metadata metadata;
 
   private QueryMetadataResponse(String groupId, boolean redirected, String leaderStr,
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/QueryPathsResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryPathsResponse.java
similarity index 89%
rename from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/QueryPathsResponse.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryPathsResponse.java
index 29d659a..171563a 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/QueryPathsResponse.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryPathsResponse.java
@@ -16,13 +16,15 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.response;
+package org.apache.iotdb.cluster.rpc.raft.response.querymetadata;
 
 import java.util.ArrayList;
 import java.util.List;
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 
 public class QueryPathsResponse extends BasicResponse {
 
+  private static final long serialVersionUID = -8255822509893237195L;
   private List<String> paths;
 
   private QueryPathsResponse(String groupId, boolean redirected, boolean success, String leaderStr, String errorMsg) {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/QuerySeriesTypeResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QuerySeriesTypeResponse.java
similarity index 89%
rename from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/QuerySeriesTypeResponse.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QuerySeriesTypeResponse.java
index e86e108..eee45c6 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/QuerySeriesTypeResponse.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QuerySeriesTypeResponse.java
@@ -16,12 +16,14 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.response;
+package org.apache.iotdb.cluster.rpc.raft.response.querymetadata;
 
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 
 public class QuerySeriesTypeResponse extends BasicResponse {
 
+  private static final long serialVersionUID = 7977583965911799165L;
   private TSDataType dataType;
 
   private QuerySeriesTypeResponse(String groupId, boolean redirected, String leaderStr,
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/QueryStorageGroupResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryStorageGroupResponse.java
similarity index 89%
rename from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/QueryStorageGroupResponse.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryStorageGroupResponse.java
index 6abff89..8a3bb11 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/QueryStorageGroupResponse.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryStorageGroupResponse.java
@@ -16,12 +16,14 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.response;
+package org.apache.iotdb.cluster.rpc.raft.response.querymetadata;
 
 import java.util.Set;
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 
 public class QueryStorageGroupResponse extends BasicResponse {
 
+  private static final long serialVersionUID = 248840631619860233L;
   private Set<String> storageGroups;
 
   private QueryStorageGroupResponse(boolean success, String leaderStr, String errorMsg) {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/QueryTimeSeriesResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryTimeSeriesResponse.java
similarity index 89%
rename from cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/QueryTimeSeriesResponse.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryTimeSeriesResponse.java
index edeb4c4..1e029e8 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/QueryTimeSeriesResponse.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryTimeSeriesResponse.java
@@ -16,13 +16,15 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.raft.response;
+package org.apache.iotdb.cluster.rpc.raft.response.querymetadata;
 
 import java.util.ArrayList;
 import java.util.List;
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 
 public class QueryTimeSeriesResponse extends BasicResponse {
 
+  private static final long serialVersionUID = 8313150788331085964L;
   private List<List<String>> timeSeries;
 
   private QueryTimeSeriesResponse(String groupId, boolean redirected, boolean success, String leaderStr, String errorMsg) {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/service/TSServiceClusterImpl.java b/cluster/src/main/java/org/apache/iotdb/cluster/service/TSServiceClusterImpl.java
similarity index 67%
rename from cluster/src/main/java/org/apache/iotdb/cluster/rpc/service/TSServiceClusterImpl.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/service/TSServiceClusterImpl.java
index 33e3e81..bfc74c1 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/service/TSServiceClusterImpl.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/service/TSServiceClusterImpl.java
@@ -16,32 +16,45 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.rpc.service;
+package org.apache.iotdb.cluster.service;
 
-import com.alipay.sofa.jraft.util.OnlyForTest;
 import java.io.IOException;
 import java.sql.Statement;
 import java.util.Arrays;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
 import org.apache.iotdb.cluster.config.ClusterConstant;
 import org.apache.iotdb.cluster.exception.ConsistencyLevelException;
+import org.apache.iotdb.cluster.qp.executor.ClusterQueryProcessExecutor;
 import org.apache.iotdb.cluster.qp.executor.NonQueryExecutor;
 import org.apache.iotdb.cluster.qp.executor.QueryMetadataExecutor;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcQueryManager;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.IClusterRpcQueryManager;
 import org.apache.iotdb.db.auth.AuthException;
 import org.apache.iotdb.db.conf.IoTDBConstant;
+import org.apache.iotdb.db.exception.FileNodeManagerException;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.exception.ProcessorException;
 import org.apache.iotdb.db.metadata.Metadata;
+import org.apache.iotdb.db.qp.QueryProcessor;
 import org.apache.iotdb.db.qp.physical.PhysicalPlan;
+import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
+import org.apache.iotdb.db.query.context.QueryContext;
+import org.apache.iotdb.db.query.control.QueryResourceManager;
 import org.apache.iotdb.db.service.TSServiceImpl;
+import org.apache.iotdb.service.rpc.thrift.TSCloseOperationReq;
 import org.apache.iotdb.service.rpc.thrift.TSExecuteBatchStatementReq;
 import org.apache.iotdb.service.rpc.thrift.TSExecuteBatchStatementResp;
+import org.apache.iotdb.service.rpc.thrift.TSFetchResultsReq;
 import org.apache.iotdb.service.rpc.thrift.TS_StatusCode;
+import org.apache.iotdb.tsfile.exception.filter.QueryFilterOptimizationException;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.Path;
+import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
 import org.apache.thrift.TException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -53,17 +66,50 @@ public class TSServiceClusterImpl extends TSServiceImpl {
 
   private static final Logger LOGGER = LoggerFactory.getLogger(TSServiceClusterImpl.class);
 
-  private ThreadLocal<NonQueryExecutor> nonQueryExecutor = new ThreadLocal<>();
-  private ThreadLocal<QueryMetadataExecutor> queryMetadataExecutor = new ThreadLocal<>();
+  private ClusterQueryProcessExecutor queryDataExecutor = new ClusterQueryProcessExecutor();
+  private NonQueryExecutor nonQueryExecutor = new NonQueryExecutor();
+  private QueryMetadataExecutor queryMetadataExecutor = new QueryMetadataExecutor();
+
+  private IClusterRpcQueryManager queryManager = ClusterRpcQueryManager.getInstance();
 
   public TSServiceClusterImpl() throws IOException {
     super();
+    processor = new QueryProcessor(queryDataExecutor);
+  }
+
+
+  @Override
+  protected Set<String> getAllStorageGroups() throws InterruptedException {
+    return queryMetadataExecutor.processStorageGroupQuery();
+  }
+
+  @Override
+  protected List<List<String>> getTimeSeriesForPath(String path)
+      throws PathErrorException, InterruptedException, ProcessorException {
+    return queryMetadataExecutor.processTimeSeriesQuery(path);
+  }
+
+  @Override
+  protected String getMetadataInString()
+      throws InterruptedException, ProcessorException {
+    return queryMetadataExecutor.processMetadataInStringQuery();
   }
 
   @Override
-  public void initClusterService() {
-    nonQueryExecutor.set(new NonQueryExecutor());
-    queryMetadataExecutor.set(new QueryMetadataExecutor());
+  protected Metadata getMetadata()
+      throws InterruptedException, ProcessorException, PathErrorException {
+    return queryMetadataExecutor.processMetadataQuery();
+  }
+
+  @Override
+  protected TSDataType getSeriesType(String path) throws PathErrorException, InterruptedException, ProcessorException {
+    return queryMetadataExecutor.processSeriesTypeQuery(path);
+  }
+
+  @Override
+  protected List<String> getPaths(String path)
+      throws PathErrorException, InterruptedException, ProcessorException {
+    return queryMetadataExecutor.processPathsQuery(path);
   }
 
   @Override
@@ -96,7 +142,7 @@ public class TSServiceClusterImpl extends TSServiceImpl {
             result = resultTemp;
             physicalPlans = physicalPlansTemp;
             BatchResult batchResult = new BatchResult(isAllSuccessful, batchErrorMessage, result);
-            nonQueryExecutor.get().processBatch(physicalPlans, batchResult);
+            nonQueryExecutor.processBatch(physicalPlans, batchResult);
             return getTSBathExecuteStatementResp(TS_StatusCode.ERROR_STATUS,
                 "statement is query :" + statements.get(i), Arrays.stream(result).boxed().collect(
                     Collectors.toList()));
@@ -130,7 +176,7 @@ public class TSServiceClusterImpl extends TSServiceImpl {
       }
 
       BatchResult batchResult = new BatchResult(isAllSuccessful, batchErrorMessage, result);
-      nonQueryExecutor.get().processBatch(physicalPlans, batchResult);
+      nonQueryExecutor.processBatch(physicalPlans, batchResult);
       batchErrorMessage = batchResult.batchErrorMessage;
       isAllSuccessful = batchResult.isAllSuccessful;
 
@@ -199,13 +245,13 @@ public class TSServiceClusterImpl extends TSServiceImpl {
       if (Pattern.matches(ClusterConstant.SET_READ_METADATA_CONSISTENCY_LEVEL_PATTERN, statement)) {
         String[] splits = statement.split("\\s+");
         int level = Integer.parseInt(splits[splits.length - 1]);
-        nonQueryExecutor.get().setReadMetadataConsistencyLevel(level);
+        queryMetadataExecutor.setReadMetadataConsistencyLevel(level);
         return true;
       } else if (Pattern
           .matches(ClusterConstant.SET_READ_DATA_CONSISTENCY_LEVEL_PATTERN, statement)) {
         String[] splits = statement.split("\\s+");
         int level = Integer.parseInt(splits[splits.length - 1]);
-        nonQueryExecutor.get().setReadDataConsistencyLevel(level);
+        queryDataExecutor.setReadDataConsistencyLevel(level);
         return true;
       } else {
         return false;
@@ -217,54 +263,77 @@ public class TSServiceClusterImpl extends TSServiceImpl {
 
   @Override
   protected boolean executeNonQuery(PhysicalPlan plan) throws ProcessorException {
-    return nonQueryExecutor.get().processNonQuery(plan);
+    return nonQueryExecutor.processNonQuery(plan);
   }
 
   /**
-   * Close cluster service
+   * It's unnecessary to do this check. It has benn checked in transforming query physical plan.
    */
   @Override
-  public void closeClusterService() {
-    nonQueryExecutor.get().shutdown();
-    queryMetadataExecutor.get().shutdown();
+  public void checkFileLevelSet(List<Path> paths) throws PathErrorException {
   }
 
   @Override
-  protected Set<String> getAllStorageGroups() throws InterruptedException {
-    return queryMetadataExecutor.get().processStorageGroupQuery();
+  public void releaseQueryResource(TSCloseOperationReq req) throws Exception {
+    Map<Long, QueryContext> contextMap = contextMapLocal.get();
+    if (contextMap == null) {
+      return;
+    }
+    if (req == null || req.queryId == -1) {
+      // end query for all the query tokens created by current thread
+      for (QueryContext context : contextMap.values()) {
+        QueryResourceManager.getInstance().endQueryForGivenJob(context.getJobId());
+        queryManager.releaseQueryResource(context.getJobId());
+      }
+      contextMapLocal.set(new HashMap<>());
+    } else {
+      long jobId = contextMap.remove(req.queryId).getJobId();
+      QueryResourceManager.getInstance().endQueryForGivenJob(jobId);
+      queryManager.releaseQueryResource(jobId);
+    }
   }
 
   @Override
-  protected List<List<String>> getTimeSeriesForPath(String path)
-      throws PathErrorException, InterruptedException, ProcessorException {
-    return queryMetadataExecutor.get().processTimeSeriesQuery(path);
-  }
+  public QueryDataSet createNewDataSet(String statement, int fetchSize, TSFetchResultsReq req)
+      throws PathErrorException, QueryFilterOptimizationException, FileNodeManagerException,
+      ProcessorException, IOException {
+    PhysicalPlan physicalPlan = queryStatus.get().get(statement);
+    processor.getExecutor().setFetchSize(fetchSize);
 
+    long jobId = QueryResourceManager.getInstance().assignJobId();
+    QueryContext context = new QueryContext(jobId);
+    initContextMap();
+    contextMapLocal.get().put(req.queryId, context);
+
+    queryManager.addSingleQuery(jobId, (QueryPlan) physicalPlan);
+    QueryDataSet queryDataSet = processor.getExecutor().processQuery((QueryPlan) physicalPlan,
+        context);
+    queryRet.get().put(statement, queryDataSet);
+    return queryDataSet;
+  }
+  /**
+   * Close cluster service
+   */
   @Override
-  protected String getMetadataInString()
-      throws InterruptedException, ProcessorException {
-    return queryMetadataExecutor.get().processMetadataInStringQuery();
+  public void closeClusterService() {
+    nonQueryExecutor.shutdown();
+    queryMetadataExecutor.shutdown();
   }
 
-  @Override
-  protected Metadata getMetadata()
-      throws InterruptedException, ProcessorException, PathErrorException {
-    return queryMetadataExecutor.get().processMetadataQuery();
+  public ClusterQueryProcessExecutor getQueryDataExecutor() {
+    return queryDataExecutor;
   }
 
-  @Override
-  protected TSDataType getSeriesType(String path) throws PathErrorException, InterruptedException, ProcessorException {
-    return queryMetadataExecutor.get().processSeriesTypeQuery(path);
+  public void setQueryDataExecutor(
+      ClusterQueryProcessExecutor queryDataExecutor) {
+    this.queryDataExecutor = queryDataExecutor;
   }
 
-  @Override
-  protected List<String> getPaths(String path)
-      throws PathErrorException, InterruptedException, ProcessorException {
-    return queryMetadataExecutor.get().processPathsQuery(path);
+  public QueryMetadataExecutor getQueryMetadataExecutor() {
+    return queryMetadataExecutor;
   }
 
-  @OnlyForTest
-  public NonQueryExecutor getNonQueryExecutor() {
-    return nonQueryExecutor.get();
+  public void setNonQueryExecutor(NonQueryExecutor nonQueryExecutor) {
+    this.nonQueryExecutor = nonQueryExecutor;
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/QPExecutorUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/QPExecutorUtils.java
new file mode 100644
index 0000000..809a01c
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/utils/QPExecutorUtils.java
@@ -0,0 +1,156 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.utils;
+
+import com.alipay.sofa.jraft.util.OnlyForTest;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.cluster.entity.Server;
+import org.apache.iotdb.cluster.entity.raft.MetadataRaftHolder;
+import org.apache.iotdb.cluster.utils.hash.PhysicalNode;
+import org.apache.iotdb.cluster.utils.hash.Router;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.metadata.MManager;
+
+/**
+ * Utils for QP executor
+ */
+public class QPExecutorUtils {
+
+  private static final ClusterConfig CLUSTER_CONFIG = ClusterDescriptor.getInstance().getConfig();
+
+  private static final Router router = Router.getInstance();
+
+  private static final PhysicalNode localNode = new PhysicalNode(CLUSTER_CONFIG.getIp(),
+      CLUSTER_CONFIG.getPort());
+
+  private static final  MManager mManager = MManager.getInstance();
+
+  private static final Server server = Server.getInstance();
+
+  /**
+   * Get Storage Group Name by device name
+   */
+  public static String getStroageGroupByDevice(String device) throws PathErrorException {
+    String storageGroup;
+    try {
+      storageGroup = MManager.getInstance().getFileNameByPath(device);
+    } catch (PathErrorException e) {
+      throw new PathErrorException(String.format("File level of %s doesn't exist.", device));
+    }
+    return storageGroup;
+  }
+
+  /**
+   * Get all Storage Group Names by path
+   */
+  public static List<String> getAllStroageGroupsByPath(String path) throws PathErrorException {
+    List<String> storageGroupList;
+    try {
+      storageGroupList = mManager.getAllFileNamesByPath(path);
+    } catch (PathErrorException e) {
+      throw new PathErrorException(String.format("File level of %s doesn't exist.", path));
+    }
+    return storageGroupList;
+  }
+
+  /**
+   * Classify the input storage group list by which data group it belongs to.
+   *
+   * @return key is groupId, value is all SGs belong to this data group
+   */
+  public static Map<String, Set<String>> classifySGByGroupId(List<String> sgList) {
+    Map<String, Set<String>> map = new HashMap<>();
+    for (int i = 0; i < sgList.size(); i++) {
+      String sg = sgList.get(i);
+      String groupId = router.getGroupIdBySG(sg);
+      if (map.containsKey(groupId)) {
+        map.get(groupId).add(sg);
+      } else {
+        Set<String> set = new HashSet<>();
+        set.add(sg);
+        map.put(groupId, set);
+      }
+    }
+    return map;
+  }
+
+  /**
+   * Check if the non query command can execute in local. 1. If this node belongs to the storage
+   * group 2. If this node is leader.
+   */
+  public static boolean canHandleNonQueryByGroupId(String groupId) {
+    boolean canHandle = false;
+    if(groupId.equals(ClusterConfig.METADATA_GROUP_ID)){
+      canHandle = ((MetadataRaftHolder) (server.getMetadataHolder())).getFsm().isLeader();
+    }else {
+      if (checkDataGroupLeader(groupId)) {
+        canHandle = true;
+      }
+    }
+    return canHandle;
+  }
+
+  /**
+   * Check whether local node is leader of data group.
+   *
+   * @param groupId data group
+   * @return true if local node is leader of data group, else return false
+   */
+  public static boolean checkDataGroupLeader(String groupId) {
+    boolean isLeader = false;
+    if (router.containPhysicalNodeByGroupId(groupId, localNode) && RaftUtils
+        .getPhysicalNodeFrom(RaftUtils.getLeaderPeerID(groupId)).equals(localNode)) {
+      isLeader = true;
+    }
+    return isLeader;
+  }
+
+  /**
+   * Check if the query command can execute in local. Check if this node belongs to the group id
+   */
+  public static boolean canHandleQueryByGroupId(String groupId) {
+    return router.containPhysicalNodeByGroupId(groupId, localNode);
+  }
+
+  /**
+   * Get group id by device
+   *
+   * @param device device
+   */
+  public static String getGroupIdByDevice(String device) throws PathErrorException {
+    String storageGroup = QPExecutorUtils.getStroageGroupByDevice(device);
+    String groupId = Router.getInstance().getGroupIdBySG(storageGroup);
+    return groupId;
+  }
+
+  /**
+   * Change address of local node for test
+   */
+  @OnlyForTest
+  public static void setLocalNodeAddr(String ip, int port) {
+    localNode.setIp(ip);
+    localNode.setPort(port);
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/RaftUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/RaftUtils.java
index 744730f..be6eea0 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/RaftUtils.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/utils/RaftUtils.java
@@ -27,23 +27,28 @@ import com.alipay.sofa.jraft.entity.PeerId;
 import com.alipay.sofa.jraft.entity.Task;
 import com.alipay.sofa.jraft.util.Bits;
 import com.alipay.sofa.jraft.util.OnlyForTest;
-
 import java.nio.ByteBuffer;
 import java.util.List;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicInteger;
-import org.apache.iotdb.cluster.qp.callback.QPTask;
-import org.apache.iotdb.cluster.qp.callback.SingleQPTask;
 import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.cluster.entity.Server;
 import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.MetadataRaftHolder;
 import org.apache.iotdb.cluster.entity.raft.RaftService;
+import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.qp.task.QPTask;
+import org.apache.iotdb.cluster.qp.task.SingleQPTask;
+import org.apache.iotdb.cluster.rpc.raft.NodeAsClient;
 import org.apache.iotdb.cluster.rpc.raft.closure.ResponseClosure;
+import org.apache.iotdb.cluster.rpc.raft.impl.RaftNodeAsClientManager;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicNonQueryRequest;
 import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
-import org.apache.iotdb.cluster.rpc.raft.response.MetaGroupNonQueryResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.nonquery.DataGroupNonQueryResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.nonquery.MetaGroupNonQueryResponse;
 import org.apache.iotdb.cluster.utils.hash.PhysicalNode;
 import org.apache.iotdb.cluster.utils.hash.Router;
 import org.slf4j.Logger;
@@ -51,10 +56,17 @@ import org.slf4j.LoggerFactory;
 
 public class RaftUtils {
 
+  private static final ClusterConfig CLUSTER_CONFIG = ClusterDescriptor.getInstance().getConfig();
+
   private static final Logger LOGGER = LoggerFactory.getLogger(RaftUtils.class);
   private static final Server server = Server.getInstance();
   private static final Router router = Router.getInstance();
   private static final AtomicInteger requestId = new AtomicInteger(0);
+  /**
+   * Raft as client manager.
+   */
+  private static final RaftNodeAsClientManager CLIENT_MANAGER = RaftNodeAsClientManager
+      .getInstance();
 
   /**
    * The cache will be update in two case: 1. When @onLeaderStart() method of state machine is
@@ -110,7 +122,7 @@ public class RaftUtils {
   }
 
   public static PeerId getPeerIDFrom(PhysicalNode node) {
-    return new PeerId(node.ip, node.port);
+    return new PeerId(node.getIp(), node.getPort());
   }
 
   public static PhysicalNode getPhysicalNodeFrom(PeerId peer) {
@@ -165,6 +177,15 @@ public class RaftUtils {
     LOGGER.info("group leader cache:{}", groupLeaderCache);
   }
 
+  /**
+   * Remove cached raft group leader if occurs exception in the process of executing qp task.
+   *
+   * @param groupId data group id
+   */
+  public static void removeCachedRaftGroupLeader(String groupId) {
+    groupLeaderCache.remove(groupId);
+  }
+
   @OnlyForTest
   public static void clearRaftGroupLeader() {
 	  groupLeaderCache.clear();
@@ -206,7 +227,7 @@ public class RaftUtils {
    * @param service raft service
    */
   public static void executeRaftTaskForRpcProcessor(RaftService service, AsyncContext asyncContext,
-      BasicRequest request, BasicResponse response) {
+      BasicNonQueryRequest request, BasicResponse response) {
     final Task task = new Task();
     ResponseClosure closure = new ResponseClosure(response, status -> {
       response.addResult(status.isOk());
@@ -262,8 +283,6 @@ public class RaftUtils {
 
   /**
    * Handle null-read process in metadata group if the request is to set path.
-   *
-   * @param status status to return result if this node is leader of the data group
    */
   public static void handleNullReadToMetaGroup(Status status) {
     SingleQPTask nullReadTask = new SingleQPTask(false, null);
@@ -273,7 +292,7 @@ public class RaftUtils {
   public static void handleNullReadToMetaGroup(Status status, Server server,
       SingleQPTask nullReadTask) {
     try {
-      LOGGER.debug("Handle null-read in meta group for adding path request.");
+      LOGGER.debug("Handle null-read in meta group for metadata request.");
       final byte[] reqContext = RaftUtils.createRaftRequestContext();
       MetadataRaftHolder metadataRaftHolder = (MetadataRaftHolder) server.getMetadataHolder();
       ((RaftService) metadataRaftHolder.getService()).getNode()
@@ -296,10 +315,58 @@ public class RaftUtils {
     }
   }
 
+  /**
+   * Handle null-read process in data group while reading process
+   */
+  public static void handleNullReadToDataGroup(Status status, String groupId) {
+    SingleQPTask nullReadTask = new SingleQPTask(false, null);
+    handleNullReadToDataGroup(status, server, nullReadTask, groupId);
+  }
+
+  private static void handleNullReadToDataGroup(Status status, Server server,
+      SingleQPTask nullReadTask, String groupId) {
+    try {
+      LOGGER.debug("Handle null-read in data group for reading.");
+      final byte[] reqContext = RaftUtils.createRaftRequestContext();
+      DataPartitionRaftHolder dataPartitionRaftHolder = (DataPartitionRaftHolder) server.getDataPartitionHolder(groupId);
+      ((RaftService) dataPartitionRaftHolder.getService()).getNode()
+          .readIndex(reqContext, new ReadIndexClosure() {
+            @Override
+            public void run(Status status, long index, byte[] reqCtx) {
+              BasicResponse response = DataGroupNonQueryResponse
+                  .createEmptyResponse(groupId);
+              if (!status.isOk()) {
+                status.setCode(-1);
+                status.setErrorMsg(status.getErrorMsg());
+              }
+              nullReadTask.run(response);
+            }
+          });
+      nullReadTask.await();
+    } catch (InterruptedException e) {
+      status.setCode(-1);
+      status.setErrorMsg(e.getMessage());
+    }
+  }
+
   public static Status createErrorStatus(String errorMsg){
     Status status = new Status();
     status.setErrorMsg(errorMsg);
     status.setCode(-1);
     return status;
   }
+
+  /**
+   * try to get raft rpc client
+   */
+  public static NodeAsClient getRaftNodeAsClient() throws RaftConnectionException {
+    NodeAsClient client = CLIENT_MANAGER.getRaftNodeAsClient();
+    if (client == null) {
+      throw new RaftConnectionException(String
+          .format("Raft inner rpc clients have reached the max numbers %s",
+              CLUSTER_CONFIG.getMaxNumOfInnerRpcClient() + CLUSTER_CONFIG
+                  .getMaxQueueNumOfInnerRpcClient()));
+    }
+    return client;
+  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/PhysicalNode.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/PhysicalNode.java
index b8c30c9..66544a8 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/PhysicalNode.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/PhysicalNode.java
@@ -18,10 +18,12 @@
  */
 package org.apache.iotdb.cluster.utils.hash;
 
+import com.alipay.sofa.jraft.util.OnlyForTest;
+
 public class PhysicalNode {
 
-  public final String ip;
-  public final int port;
+  private String ip;
+  private int port;
 
   public PhysicalNode(String ip, int port) {
     this.ip = ip;
@@ -74,4 +76,14 @@ public class PhysicalNode {
   public int getPort() {
     return port;
   }
+
+  @OnlyForTest
+  public void setIp(String ip) {
+    this.ip = ip;
+  }
+
+  @OnlyForTest
+  public void setPort(int port) {
+    this.port = port;
+  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/Router.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/Router.java
index 7c7b2be..544c0fc 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/Router.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/utils/hash/Router.java
@@ -189,6 +189,7 @@ public class Router {
     sgRouter.clear();
     dataPartitionCache.clear();
     nodeMapGroupIdCache.clear();
+    groupIdMapNodeCache.clear();
   }
 
   @OnlyForTest
@@ -236,4 +237,11 @@ public class Router {
   public Set<String> getAllGroupId() {
     return groupIdMapNodeCache.keySet();
   }
+
+  /**
+   * Get raft group id by storage group name
+   */
+  public String getGroupIdBySG(String storageGroup) {
+    return getGroupID(routeGroup(storageGroup));
+  }
 }
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/config/ClusterDescriptorTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/config/ClusterDescriptorTest.java
index 1e8650c..a03ee99 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/config/ClusterDescriptorTest.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/config/ClusterDescriptorTest.java
@@ -78,7 +78,6 @@ public class ClusterDescriptorTest {
   private int testMetadataConsistencyOld;
   private int testDataConsistencyOld;
   private int testConcurrentQPTaskThreadOld;
-  private int testConcurrentRaftTaskThreadOld;
   private Map<String, String> testConfigMap = new HashMap<String, String>() {
     private static final long serialVersionUID = 7832408957178621132L;
 
@@ -121,6 +120,7 @@ public class ClusterDescriptorTest {
 
   @Test
   public void test() throws IOException {
+    String oldConfig = System.getProperty(IoTDBConstant.IOTDB_CONF);
     System.setProperty(IoTDBConstant.IOTDB_CONF, absoultePath);
     ClusterDescriptor.getInstance().loadProps();
     ClusterConfig config = ClusterDescriptor.getInstance().getConfig();
@@ -149,8 +149,11 @@ public class ClusterDescriptorTest {
     assertEquals(testDataConsistencyNew, config.getReadDataConsistencyLevel() + "");
     assertEquals(testConcurrentQPTaskThreadNew, config.getConcurrentQPSubTaskThread() + "");
 
-
-    System.setProperty(IoTDBConstant.IOTDB_CONF, "");
+    if (oldConfig == null) {
+      System.clearProperty(IoTDBConstant.IOTDB_CONF);
+    } else {
+      System.setProperty(IoTDBConstant.IOTDB_CONF, oldConfig);
+    }
     config.deleteAllPath();
   }
 
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBMetadataFetchRemoteIT.java b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBMetadataFetchRemoteIT.java
index f601f14..76eaca0 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBMetadataFetchRemoteIT.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBMetadataFetchRemoteIT.java
@@ -29,7 +29,6 @@ import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-
 public class IoTDBMetadataFetchRemoteIT extends IoTDBMetadataFetchAbstract{
   @BeforeClass
   public static void  setUp() throws Exception {
@@ -44,11 +43,11 @@ public class IoTDBMetadataFetchRemoteIT extends IoTDBMetadataFetchAbstract{
 
   @Test
   public void test() throws IOException {
-    String dir = Utils.getCurrentPath("pwd");
-    String node = "3";
-    String replicator = "3";
-    startScript("sh", dir + File.separator + "script" + File.separator + "deploy.sh", node, replicator, dir);
-    startScript("sh", dir + File.separator + "script" + File.separator + "stop.sh", node, replicator, dir);
+//    String dir = Utils.getCurrentPath("pwd");
+//    String node = "3";
+//    String replicator = "3";
+//    startScript("sh", dir + File.separator + "script" + File.separator + "deploy.sh", node, replicator, dir);
+//    startScript("sh", dir + File.separator + "script" + File.separator + "stop.sh", node, replicator, dir);
   }
 
   private void startScript(String... command) throws IOException {
@@ -68,4 +67,4 @@ public class IoTDBMetadataFetchRemoteIT extends IoTDBMetadataFetchAbstract{
     r.close();
     p.destroy();
   }
-}
\ No newline at end of file
+}
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/qp/ClusterQPExecutorTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/qp/AbstractQPExecutorTest.java
similarity index 69%
rename from cluster/src/test/java/org/apache/iotdb/cluster/qp/ClusterQPExecutorTest.java
rename to cluster/src/test/java/org/apache/iotdb/cluster/qp/AbstractQPExecutorTest.java
index 298dd4d..02b0311 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/qp/ClusterQPExecutorTest.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/qp/AbstractQPExecutorTest.java
@@ -24,28 +24,30 @@ import static org.junit.Assert.assertTrue;
 
 import org.apache.iotdb.cluster.config.ClusterConfig;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
-import org.apache.iotdb.cluster.qp.executor.NonQueryExecutor;
-import org.apache.iotdb.cluster.rpc.service.TSServiceClusterImpl;
+import org.apache.iotdb.cluster.qp.executor.ClusterQueryProcessExecutor;
+import org.apache.iotdb.cluster.qp.executor.QueryMetadataExecutor;
+import org.apache.iotdb.cluster.service.TSServiceClusterImpl;
 import org.apache.iotdb.cluster.utils.EnvironmentUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
-public class ClusterQPExecutorTest {
+public class AbstractQPExecutorTest {
 
   private static final ClusterConfig CLUSTER_CONFIG = ClusterDescriptor.getInstance().getConfig();
 
   private TSServiceClusterImpl impl;
 
-  private NonQueryExecutor executor;
+  private ClusterQueryProcessExecutor queryExecutor;
+
+  private QueryMetadataExecutor queryMetadataExecutor;
 
   @Before
   public void setUp() throws Exception {
     EnvironmentUtils.envSetUp();
     impl = new TSServiceClusterImpl();
-    impl.initClusterService();
-
-    executor = impl.getNonQueryExecutor();
+    queryMetadataExecutor = impl.getQueryMetadataExecutor();
+    queryExecutor = impl.getQueryDataExecutor();
   }
 
   @After
@@ -57,22 +59,22 @@ public class ClusterQPExecutorTest {
   @Test
   public void setReadMetadataConsistencyLevel() throws Exception {
     assertEquals(CLUSTER_CONFIG.getReadMetadataConsistencyLevel(),
-        executor.getReadMetadataConsistencyLevel());
+        queryMetadataExecutor.getReadMetadataConsistencyLevel());
     boolean exec;
     exec= impl.execSetConsistencyLevel("set read metadata level to 1");
     assertTrue(exec);
-    assertEquals(1, executor.getReadMetadataConsistencyLevel());
+    assertEquals(1, queryMetadataExecutor.getReadMetadataConsistencyLevel());
 
     exec= impl.execSetConsistencyLevel("show timeseries");
-    assertEquals(1, executor.getReadMetadataConsistencyLevel());
+    assertEquals(1, queryMetadataExecutor.getReadMetadataConsistencyLevel());
     assertFalse(exec);
 
     exec= impl.execSetConsistencyLevel("set read metadata level to 2");
     assertTrue(exec);
-    assertEquals(2, executor.getReadMetadataConsistencyLevel());
+    assertEquals(2, queryMetadataExecutor.getReadMetadataConsistencyLevel());
 
     exec = impl.execSetConsistencyLevel("set read metadata level to -2");
-    assertEquals(2, executor.getReadMetadataConsistencyLevel());
+    assertEquals(2, queryMetadataExecutor.getReadMetadataConsistencyLevel());
     assertFalse(exec);
 
     try {
@@ -80,28 +82,28 @@ public class ClusterQPExecutorTest {
     } catch (Exception e) {
       assertEquals("Consistency level 90 not support", e.getMessage());
     }
-    assertEquals(2, executor.getReadMetadataConsistencyLevel());
+    assertEquals(2, queryMetadataExecutor.getReadMetadataConsistencyLevel());
   }
 
   @Test
   public void setReadDataConsistencyLevel() throws Exception {
     assertEquals(CLUSTER_CONFIG.getReadDataConsistencyLevel(),
-        executor.getReadDataConsistencyLevel());
+        queryMetadataExecutor.getReadDataConsistencyLevel());
     boolean exec;
     exec= impl.execSetConsistencyLevel("set read data level to 1");
     assertTrue(exec);
-    assertEquals(1, executor.getReadDataConsistencyLevel());
+    assertEquals(1, queryExecutor.getReadDataConsistencyLevel());
 
     exec= impl.execSetConsistencyLevel("show timeseries");
-    assertEquals(1, executor.getReadDataConsistencyLevel());
+    assertEquals(1, queryExecutor.getReadDataConsistencyLevel());
     assertFalse(exec);
 
     exec= impl.execSetConsistencyLevel("set read data level  to 2");
     assertTrue(exec);
-    assertEquals(2, executor.getReadDataConsistencyLevel());
+    assertEquals(2, queryExecutor.getReadDataConsistencyLevel());
 
     exec = impl.execSetConsistencyLevel("set read data level  to -2");
-    assertEquals(2, executor.getReadDataConsistencyLevel());
+    assertEquals(2, queryExecutor.getReadDataConsistencyLevel());
     assertFalse(exec);
 
     try {
@@ -109,6 +111,6 @@ public class ClusterQPExecutorTest {
     } catch (Exception e) {
       assertEquals("Consistency level 90 not support", e.getMessage());
     }
-    assertEquals(2, executor.getReadDataConsistencyLevel());
+    assertEquals(2, queryExecutor.getReadDataConsistencyLevel());
   }
 }
\ No newline at end of file
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterQueryLargeDataTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterQueryLargeDataTest.java
new file mode 100644
index 0000000..223f0dc
--- /dev/null
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterQueryLargeDataTest.java
@@ -0,0 +1,507 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query;
+
+import static org.apache.iotdb.cluster.utils.Utils.insertData;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.cluster.entity.Server;
+import org.apache.iotdb.cluster.utils.EnvironmentUtils;
+import org.apache.iotdb.cluster.utils.hash.PhysicalNode;
+import org.apache.iotdb.jdbc.Config;
+import org.apache.iotdb.jdbc.IoTDBResultMetadata;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class ClusterQueryLargeDataTest {
+
+
+  private Server server;
+  private static final ClusterConfig CLUSTER_CONFIG = ClusterDescriptor.getInstance().getConfig();
+  private static final PhysicalNode localNode = new PhysicalNode(CLUSTER_CONFIG.getIp(),
+      CLUSTER_CONFIG.getPort());
+
+  private static final String URL = "127.0.0.1:6667/";
+
+  private static final String[] createSQLs1 = {
+      "SET STORAGE GROUP TO root.vehicle",
+      "SET STORAGE GROUP TO root.test",
+      "CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=INT32, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d0.s1 WITH DATATYPE=TEXT, ENCODING=PLAIN",
+      "CREATE TIMESERIES root.vehicle.d1.s2 WITH DATATYPE=FLOAT, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d1.s3 WITH DATATYPE=BOOLEAN, ENCODING=PLAIN",
+      "CREATE TIMESERIES root.test.d0.s0 WITH DATATYPE=INT32, ENCODING=RLE",
+      "CREATE TIMESERIES root.test.d0.s1 WITH DATATYPE=TEXT, ENCODING=PLAIN",
+      "CREATE TIMESERIES root.test.d1.g0.s0 WITH DATATYPE=INT32, ENCODING=RLE"
+  };
+  private static final String[] insertSQLs1 = {
+      "insert into root.vehicle.d0(timestamp,s0) values(10,100)",
+      "insert into root.vehicle.d0(timestamp,s0,s1) values(12,101,'102')",
+      "insert into root.vehicle.d0(timestamp,s1) values(19,'103')",
+      "insert into root.vehicle.d1(timestamp,s2) values(11,104.0)",
+      "insert into root.vehicle.d1(timestamp,s2,s3) values(15,105.0,true)",
+      "insert into root.vehicle.d1(timestamp,s3) values(17,false)",
+      "insert into root.vehicle.d0(timestamp,s0) values(20,1000)",
+      "insert into root.vehicle.d0(timestamp,s0,s1) values(22,1001,'1002')",
+      "insert into root.vehicle.d0(timestamp,s1) values(29,'1003')",
+      "insert into root.vehicle.d1(timestamp,s2) values(21,1004.0)",
+      "insert into root.vehicle.d1(timestamp,s2,s3) values(25,1005.0,true)",
+      "insert into root.vehicle.d1(timestamp,s3) values(27,true)",
+      "insert into root.test.d0(timestamp,s0) values(10,106)",
+      "insert into root.test.d0(timestamp,s0,s1) values(14,107,'108')",
+      "insert into root.test.d0(timestamp,s1) values(16,'109')",
+      "insert into root.test.d1.g0(timestamp,s0) values(1,110)",
+      "insert into root.test.d0(timestamp,s0) values(30,1006)",
+      "insert into root.test.d0(timestamp,s0,s1) values(34,1007,'1008')",
+      "insert into root.test.d0(timestamp,s1) values(36,'1090')",
+      "insert into root.test.d1.g0(timestamp,s0) values(10,1100)"};
+  private static final String[] insertSqls2 = {
+      "insert into root.vehicle.d0(timestamp,s0) values(6,120)",
+      "insert into root.vehicle.d0(timestamp,s0,s1) values(38,121,'122')",
+      "insert into root.vehicle.d0(timestamp,s1) values(9,'123')",
+      "insert into root.vehicle.d0(timestamp,s0) values(16,128)",
+      "insert into root.vehicle.d0(timestamp,s0,s1) values(18,189,'198')",
+      "insert into root.vehicle.d0(timestamp,s1) values(99,'1234')",
+      "insert into root.vehicle.d1(timestamp,s2) values(14,1024.0)",
+      "insert into root.vehicle.d1(timestamp,s2,s3) values(29,1205.0,true)",
+      "insert into root.vehicle.d1(timestamp,s3) values(33,true)",
+      "insert into root.test.d0(timestamp,s0) values(15,126)",
+      "insert into root.test.d0(timestamp,s0,s1) values(8,127,'128')",
+      "insert into root.test.d0(timestamp,s1) values(20,'129')",
+      "insert into root.test.d1.g0(timestamp,s0) values(14,430)",
+      "insert into root.test.d0(timestamp,s0) values(150,426)",
+      "insert into root.test.d0(timestamp,s0,s1) values(80,427,'528')",
+      "insert into root.test.d0(timestamp,s1) values(2,'1209')",
+      "insert into root.test.d1.g0(timestamp,s0) values(4,330)"};
+  private static final String[] createSQLs3 = {
+      "SET STORAGE GROUP TO root.iotdb",
+      "SET STORAGE GROUP TO root.cluster",
+      "CREATE TIMESERIES root.iotdb.d0.s0 WITH DATATYPE=INT32, ENCODING=RLE",
+      "CREATE TIMESERIES root.iotdb.d0.s1 WITH DATATYPE=TEXT, ENCODING=PLAIN",
+      "CREATE TIMESERIES root.iotdb.d1.s2 WITH DATATYPE=FLOAT, ENCODING=RLE",
+      "CREATE TIMESERIES root.iotdb.d1.s3 WITH DATATYPE=BOOLEAN, ENCODING=PLAIN",
+      "CREATE TIMESERIES root.cluster.d0.s0 WITH DATATYPE=INT32, ENCODING=RLE",
+      "CREATE TIMESERIES root.cluster.d0.s1 WITH DATATYPE=TEXT, ENCODING=PLAIN",
+      "CREATE TIMESERIES root.cluster.d1.g0.s0 WITH DATATYPE=INT32, ENCODING=RLE"
+  };
+  private static final String[] insertSQLs3 = {
+      "DELETE FROM root.vehicle WHERE time < 20",
+      "DELETE FROM root.test WHERE time < 20",
+      "insert into root.iotdb.d0(timestamp,s0) values(3,100)",
+      "insert into root.iotdb.d0(timestamp,s0,s1) values(22,101,'102')",
+      "insert into root.iotdb.d0(timestamp,s1) values(24,'103')",
+      "insert into root.iotdb.d1(timestamp,s2) values(21,104.0)",
+      "insert into root.iotdb.d1(timestamp,s2,s3) values(25,105.0,true)",
+      "insert into root.iotdb.d1(timestamp,s3) values(27,false)",
+      "insert into root.iotdb.d0(timestamp,s0) values(30,1000)",
+      "insert into root.iotdb.d0(timestamp,s0,s1) values(202,101,'102')",
+      "insert into root.iotdb.d0(timestamp,s1) values(44,'103')",
+      "insert into root.iotdb.d1(timestamp,s2) values(1,404.0)",
+      "insert into root.iotdb.d1(timestamp,s2,s3) values(250,10.0,true)",
+      "insert into root.iotdb.d1(timestamp,s3) values(207,false)",
+      "insert into root.cluster.d0(timestamp,s0) values(20,106)",
+      "insert into root.cluster.d0(timestamp,s0,s1) values(14,107,'108')",
+      "insert into root.cluster.d1.g0(timestamp,s0) values(1,110)",
+      "insert into root.cluster.d0(timestamp,s0) values(200,1006)",
+      "insert into root.cluster.d0(timestamp,s0,s1) values(1004,1007,'1080')",
+      "insert into root.cluster.d1.g0(timestamp,s0) values(1000,910)",
+      "insert into root.vehicle.d0(timestamp,s0) values(209,130)",
+      "insert into root.vehicle.d0(timestamp,s0,s1) values(206,131,'132')",
+      "insert into root.vehicle.d0(timestamp,s1) values(70,'33')",
+      "insert into root.vehicle.d1(timestamp,s2) values(204,14.0)",
+      "insert into root.vehicle.d1(timestamp,s2,s3) values(29,135.0,false)",
+      "insert into root.vehicle.d1(timestamp,s3) values(14,false)",
+      "insert into root.test.d0(timestamp,s0) values(19,136)",
+      "insert into root.test.d0(timestamp,s0,s1) values(7,137,'138')",
+      "insert into root.test.d0(timestamp,s1) values(30,'139')",
+      "insert into root.test.d1.g0(timestamp,s0) values(4,150)",
+      "insert into root.test.d0(timestamp,s0) values(1900,1316)",
+      "insert into root.test.d0(timestamp,s0,s1) values(700,1307,'1038')",
+      "insert into root.test.d0(timestamp,s1) values(3000,'1309')",
+      "insert into root.test.d1.g0(timestamp,s0) values(400,1050)"
+  };
+
+  private static final String[] querys1  ={
+      "select * from root.vehicle",
+      "select * from root.test",
+      "select * from root.vehicle,root.test where time = 11 or time = 12",
+      "select * from root.vehicle,root.test where d0.s0 > 10 and d0.s1 < 301 or time = 12",
+      "select * from root"
+  };
+  private static final String[] querys2  ={
+      "select * from root.vehicle",
+      "select * from root.test",
+      "select * from root.vehicle,root.test where time = 11 or time = 16",
+      "select * from root.vehicle,root.test where d0.s0 > 10 and d0.s1 < 301 or time = 20",
+      "select * from root"
+  };
+  private static final String[] querys3  ={
+      "select * from root.vehicle",
+      "select * from root.test",
+      "select * from root.cluster",
+      "select * from root.vehicle,root.test where time = 11 or time = 14",
+      "select * from root.vehicle,root.test where d0.s0 > 0 and d0.s1 < 1001 or time = 14",
+      "select * from root"
+  };
+
+  private Map<Integer, List<String>> queryCorrentResults = new HashMap<>();
+
+  @Before
+  public void setUp() throws Exception {
+    EnvironmentUtils.cleanEnv();
+    EnvironmentUtils.closeStatMonitor();
+    EnvironmentUtils.closeMemControl();
+    CLUSTER_CONFIG.createAllPath();
+    server = Server.getInstance();
+    server.start();
+    EnvironmentUtils.envSetUp();
+    Class.forName(Config.JDBC_DRIVER_NAME);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    server.stop();
+    EnvironmentUtils.cleanEnv();
+  }
+
+  private void initCorrectResults1(){
+    queryCorrentResults.clear();
+    queryCorrentResults.put(0, new ArrayList<>());
+    queryCorrentResults.put(1, new ArrayList<>());
+    queryCorrentResults.put(2, new ArrayList<>());
+    queryCorrentResults.put(3, new ArrayList<>());
+    queryCorrentResults.put(4, new ArrayList<>());
+    List<String> firstQueryRes = queryCorrentResults.get(0);
+    firstQueryRes.add("10,100,null,null,null");
+    firstQueryRes.add("11,null,null,104.0,null");
+    firstQueryRes.add("12,101,102,null,null");
+    firstQueryRes.add("15,null,null,105.0,true");
+    firstQueryRes.add("17,null,null,null,false");
+    firstQueryRes.add("19,null,103,null,null");
+    firstQueryRes.add("20,1000,null,null,null");
+    firstQueryRes.add("21,null,null,1004.0,null");
+    firstQueryRes.add("22,1001,1002,null,null");
+    firstQueryRes.add("25,null,null,1005.0,true");
+    firstQueryRes.add("27,null,null,null,true");
+    firstQueryRes.add("29,null,1003,null,null");
+
+    List<String> secondQueryRes = queryCorrentResults.get(1);
+    secondQueryRes.add("1,null,null,110");
+    secondQueryRes.add("10,106,null,1100");
+    secondQueryRes.add("14,107,108,null");
+    secondQueryRes.add("16,null,109,null");
+    secondQueryRes.add("30,1006,null,null");
+    secondQueryRes.add("34,1007,1008,null");
+    secondQueryRes.add("36,null,1090,null");
+
+    List<String> thirdQueryRes = queryCorrentResults.get(2);
+    thirdQueryRes.add("11,null,null,104.0,null,null,null,null");
+    thirdQueryRes.add("12,101,102,null,null,null,null,null");
+
+    List<String> forthQueryRes = queryCorrentResults.get(3);
+    forthQueryRes.add("12,101,102,null,null,null,null,null");
+
+    List<String> fifthQueryRes = queryCorrentResults.get(4);
+    fifthQueryRes.add("1,null,null,null,null,null,null,110");
+    fifthQueryRes.add("10,100,null,null,null,106,null,1100");
+    fifthQueryRes.add("11,null,null,104.0,null,null,null,null");
+    fifthQueryRes.add("12,101,102,null,null,null,null,null");
+    fifthQueryRes.add("14,null,null,null,null,107,108,null");
+    fifthQueryRes.add("15,null,null,105.0,true,null,null,null");
+    fifthQueryRes.add("16,null,null,null,null,null,109,null");
+    fifthQueryRes.add("17,null,null,null,false,null,null,null");
+    fifthQueryRes.add("19,null,103,null,null,null,null,null");
+    fifthQueryRes.add("20,1000,null,null,null,null,null,null");
+    fifthQueryRes.add("21,null,null,1004.0,null,null,null,null");
+    fifthQueryRes.add("22,1001,1002,null,null,null,null,null");
+    fifthQueryRes.add("25,null,null,1005.0,true,null,null,null");
+    fifthQueryRes.add("27,null,null,null,true,null,null,null");
+    fifthQueryRes.add("29,null,1003,null,null,null,null,null");
+    fifthQueryRes.add("30,null,null,null,null,1006,null,null");
+    fifthQueryRes.add("34,null,null,null,null,1007,1008,null");
+    fifthQueryRes.add("36,null,null,null,null,null,1090,null");
+  }
+
+  private void initCorrectResults2(){
+    queryCorrentResults.clear();
+    queryCorrentResults.put(0, new ArrayList<>());
+    queryCorrentResults.put(1, new ArrayList<>());
+    queryCorrentResults.put(2, new ArrayList<>());
+    queryCorrentResults.put(3, new ArrayList<>());
+    queryCorrentResults.put(4, new ArrayList<>());
+    List<String> firstQueryRes = queryCorrentResults.get(0);
+    firstQueryRes.add("6,120,null,null,null");
+    firstQueryRes.add("9,null,123,null,null");
+    firstQueryRes.add("10,100,null,null,null");
+    firstQueryRes.add("11,null,null,104.0,null");
+    firstQueryRes.add("12,101,102,null,null");
+    firstQueryRes.add("14,null,null,1024.0,null");
+    firstQueryRes.add("15,null,null,105.0,true");
+    firstQueryRes.add("16,128,null,null,null");
+    firstQueryRes.add("17,null,null,null,false");
+    firstQueryRes.add("18,189,198,null,null");
+    firstQueryRes.add("19,null,103,null,null");
+    firstQueryRes.add("20,1000,null,null,null");
+    firstQueryRes.add("21,null,null,1004.0,null");
+    firstQueryRes.add("22,1001,1002,null,null");
+    firstQueryRes.add("25,null,null,1005.0,true");
+    firstQueryRes.add("27,null,null,null,true");
+    firstQueryRes.add("29,null,1003,1205.0,true");
+    firstQueryRes.add("33,null,null,null,true");
+    firstQueryRes.add("38,121,122,null,null");
+    firstQueryRes.add("99,null,1234,null,null");
+
+    List<String> secondQueryRes = queryCorrentResults.get(1);
+    secondQueryRes.add("1,null,null,110");
+    secondQueryRes.add("2,null,1209,null");
+    secondQueryRes.add("4,null,null,330");
+    secondQueryRes.add("8,127,128,null");
+    secondQueryRes.add("10,106,null,1100");
+    secondQueryRes.add("14,107,108,430");
+    secondQueryRes.add("15,126,null,null");
+    secondQueryRes.add("16,null,109,null");
+    secondQueryRes.add("20,null,129,null");
+    secondQueryRes.add("30,1006,null,null");
+    secondQueryRes.add("34,1007,1008,null");
+    secondQueryRes.add("36,null,1090,null");
+    secondQueryRes.add("80,427,528,null");
+    secondQueryRes.add("150,426,null,null");
+
+    List<String> thirdQueryRes = queryCorrentResults.get(2);
+    thirdQueryRes.add("11,null,null,104.0,null,null,null,null");
+    thirdQueryRes.add("16,128,null,null,null,null,109,null");
+
+    List<String> forthQueryRes = queryCorrentResults.get(3);
+    forthQueryRes.add("20,1000,null,null,null,null,129,null");
+
+    List<String> fifthQueryRes = queryCorrentResults.get(4);
+    fifthQueryRes.add("1,null,null,null,null,null,null,110");
+    fifthQueryRes.add("2,null,null,null,null,null,1209,null");
+    fifthQueryRes.add("4,null,null,null,null,null,null,330");
+    fifthQueryRes.add("6,120,null,null,null,null,null,null");
+    fifthQueryRes.add("8,null,null,null,null,127,128,null");
+    fifthQueryRes.add("9,null,123,null,null,null,null,null");
+    fifthQueryRes.add("10,100,null,null,null,106,null,1100");
+    fifthQueryRes.add("11,null,null,104.0,null,null,null,null");
+    fifthQueryRes.add("12,101,102,null,null,null,null,null");
+    fifthQueryRes.add("14,null,null,1024.0,null,107,108,430");
+    fifthQueryRes.add("15,null,null,105.0,true,126,null,null");
+    fifthQueryRes.add("16,128,null,null,null,null,109,null");
+    fifthQueryRes.add("17,null,null,null,false,null,null,null");
+    fifthQueryRes.add("18,189,198,null,null,null,null,null");
+    fifthQueryRes.add("19,null,103,null,null,null,null,null");
+    fifthQueryRes.add("20,1000,null,null,null,null,129,null");
+    fifthQueryRes.add("21,null,null,1004.0,null,null,null,null");
+    fifthQueryRes.add("22,1001,1002,null,null,null,null,null");
+    fifthQueryRes.add("25,null,null,1005.0,true,null,null,null");
+    fifthQueryRes.add("27,null,null,null,true,null,null,null");
+    fifthQueryRes.add("29,null,1003,1205.0,true,null,null,null");
+    fifthQueryRes.add("30,null,null,null,null,1006,null,null");
+    fifthQueryRes.add("33,null,null,null,true,null,null,null");
+    fifthQueryRes.add("34,null,null,null,null,1007,1008,null");
+    fifthQueryRes.add("36,null,null,null,null,null,1090,null");
+    fifthQueryRes.add("38,121,122,null,null,null,null,null");
+    fifthQueryRes.add("80,null,null,null,null,427,528,null");
+    fifthQueryRes.add("99,null,1234,null,null,null,null,null");
+    fifthQueryRes.add("150,null,null,null,null,426,null,null");
+  }
+
+  private void initCorrectResults3(){
+    queryCorrentResults.clear();
+    queryCorrentResults.put(0, new ArrayList<>());
+    queryCorrentResults.put(1, new ArrayList<>());
+    queryCorrentResults.put(2, new ArrayList<>());
+    queryCorrentResults.put(3, new ArrayList<>());
+    queryCorrentResults.put(4, new ArrayList<>());
+    queryCorrentResults.put(5, new ArrayList<>());
+    List<String> zeroQueryRes = queryCorrentResults.get(0);
+    zeroQueryRes.add("14,null,null,null,false");
+    zeroQueryRes.add("20,1000,null,null,null");
+    zeroQueryRes.add("21,null,null,1004.0,null");
+    zeroQueryRes.add("22,1001,1002,null,null");
+    zeroQueryRes.add("25,null,null,1005.0,true");
+    zeroQueryRes.add("27,null,null,null,true");
+    zeroQueryRes.add("29,null,1003,135.0,false");
+    zeroQueryRes.add("33,null,null,null,true");
+    zeroQueryRes.add("38,121,122,null,null");
+    zeroQueryRes.add("70,null,33,null,null");
+    zeroQueryRes.add("99,null,1234,null,null");
+    zeroQueryRes.add("204,null,null,14.0,null");
+    zeroQueryRes.add("206,131,132,null,null");
+    zeroQueryRes.add("209,130,null,null,null");
+
+    List<String> firstQueryRes = queryCorrentResults.get(1);
+    firstQueryRes.add("4,null,null,150");
+    firstQueryRes.add("7,137,138,null");
+    firstQueryRes.add("19,136,null,null");
+    firstQueryRes.add("20,null,129,null");
+    firstQueryRes.add("30,1006,139,null");
+    firstQueryRes.add("34,1007,1008,null");
+    firstQueryRes.add("36,null,1090,null");
+    firstQueryRes.add("80,427,528,null");
+    firstQueryRes.add("150,426,null,null");
+    firstQueryRes.add("400,null,null,1050");
+    firstQueryRes.add("700,1307,1038,null");
+    firstQueryRes.add("1900,1316,null,null");
+    firstQueryRes.add("3000,null,1309,null");
+
+    List<String> secondQueryRes = queryCorrentResults.get(2);
+    secondQueryRes.add("1,null,null,110");
+    secondQueryRes.add("14,107,108,null");
+    secondQueryRes.add("20,106,null,null");
+    secondQueryRes.add("200,1006,null,null");
+    secondQueryRes.add("1000,null,null,910");
+    secondQueryRes.add("1004,1007,1080,null");
+
+    List<String> thirdQueryRes = queryCorrentResults.get(3);
+    thirdQueryRes.add("14,null,null,null,false,null,null,null");
+
+    List<String> forthQueryRes = queryCorrentResults.get(4);
+    forthQueryRes.add("14,null,null,null,false,null,null,null");
+
+    List<String> fifthQueryRes = queryCorrentResults.get(5);
+    fifthQueryRes.add("1,null,null,404.0,null,null,null,null,null,null,null,null,null,null,110");
+    fifthQueryRes.add("3,100,null,null,null,null,null,null,null,null,null,null,null,null,null");
+    fifthQueryRes.add("4,null,null,null,null,null,null,null,null,null,null,150,null,null,null");
+    fifthQueryRes.add("7,null,null,null,null,null,null,null,null,137,138,null,null,null,null");
+    fifthQueryRes.add("14,null,null,null,null,null,null,null,false,null,null,null,107,108,null");
+    fifthQueryRes.add("19,null,null,null,null,null,null,null,null,136,null,null,null,null,null");
+    fifthQueryRes.add("20,null,null,null,null,1000,null,null,null,null,129,null,106,null,null");
+    fifthQueryRes.add("21,null,null,104.0,null,null,null,1004.0,null,null,null,null,null,null,null");
+    fifthQueryRes.add("22,101,102,null,null,1001,1002,null,null,null,null,null,null,null,null");
+    fifthQueryRes.add("24,null,103,null,null,null,null,null,null,null,null,null,null,null,null");
+    fifthQueryRes.add("25,null,null,105.0,true,null,null,1005.0,true,null,null,null,null,null,null");
+    fifthQueryRes.add("27,null,null,null,false,null,null,null,true,null,null,null,null,null,null");
+    fifthQueryRes.add("29,null,null,null,null,null,1003,135.0,false,null,null,null,null,null,null");
+    fifthQueryRes.add("30,1000,null,null,null,null,null,null,null,1006,139,null,null,null,null");
+    fifthQueryRes.add("33,null,null,null,null,null,null,null,true,null,null,null,null,null,null");
+    fifthQueryRes.add("34,null,null,null,null,null,null,null,null,1007,1008,null,null,null,null");
+    fifthQueryRes.add("36,null,null,null,null,null,null,null,null,null,1090,null,null,null,null");
+    fifthQueryRes.add("38,null,null,null,null,121,122,null,null,null,null,null,null,null,null");
+    fifthQueryRes.add("44,null,103,null,null,null,null,null,null,null,null,null,null,null,null");
+    fifthQueryRes.add("70,null,null,null,null,null,33,null,null,null,null,null,null,null,null");
+    fifthQueryRes.add("80,null,null,null,null,null,null,null,null,427,528,null,null,null,null");
+    fifthQueryRes.add("99,null,null,null,null,null,1234,null,null,null,null,null,null,null,null");
+    fifthQueryRes.add("150,null,null,null,null,null,null,null,null,426,null,null,null,null,null");
+    fifthQueryRes.add("200,null,null,null,null,null,null,null,null,null,null,null,1006,null,null");
+    fifthQueryRes.add("202,101,102,null,null,null,null,null,null,null,null,null,null,null,null");
+    fifthQueryRes.add("204,null,null,null,null,null,null,14.0,null,null,null,null,null,null,null");
+    fifthQueryRes.add("206,null,null,null,null,131,132,null,null,null,null,null,null,null,null");
+    fifthQueryRes.add("207,null,null,null,false,null,null,null,null,null,null,null,null,null,null");
+    fifthQueryRes.add("209,null,null,null,null,130,null,null,null,null,null,null,null,null,null");
+    fifthQueryRes.add("250,null,null,10.0,true,null,null,null,null,null,null,null,null,null,null");
+    fifthQueryRes.add("400,null,null,null,null,null,null,null,null,null,null,1050,null,null,null");
+    fifthQueryRes.add("700,null,null,null,null,null,null,null,null,1307,1038,null,null,null,null");
+    fifthQueryRes.add("1000,null,null,null,null,null,null,null,null,null,null,null,null,null,910");
+    fifthQueryRes.add("1004,null,null,null,null,null,null,null,null,null,null,null,1007,1080,null");
+    fifthQueryRes.add("1900,null,null,null,null,null,null,null,null,1316,null,null,null,null,null");
+    fifthQueryRes.add("3000,null,null,null,null,null,null,null,null,null,1309,null,null,null,null");
+  }
+
+  @Test
+  public void testClusterQueryWithLargeData() throws Exception {
+    try (Connection connection = DriverManager
+        .getConnection(Config.IOTDB_URL_PREFIX + URL, "root", "root")) {
+      Statement statement = connection.createStatement();
+
+       //first round
+      insertData(connection, createSQLs1, insertSQLs1);
+      initCorrectResults1();
+      for(int i =0 ; i < querys1.length; i++) {
+        String queryStatement = querys1[i];
+        boolean hasResultSet = statement.execute(queryStatement);
+        assertTrue(hasResultSet);
+        ResultSet resultSet = statement.getResultSet();
+        IoTDBResultMetadata resultSetMetaData = (IoTDBResultMetadata) resultSet.getMetaData();
+        int columnCount = resultSetMetaData.getColumnCount();
+        List<String> correctResult = queryCorrentResults.get(i);
+        int count = 0;
+        while (resultSet.next()) {
+          String correctRow = correctResult.get(count++);
+          StringBuilder rowRecordBuilder = new StringBuilder();
+          for (int j = 1; j < columnCount; j++) {
+            rowRecordBuilder.append(resultSet.getString(j)).append(",");
+          }
+          rowRecordBuilder.append(resultSet.getString(columnCount));
+          assertEquals(correctRow, rowRecordBuilder.toString());
+        }
+      }
+
+      // second round
+      insertData(connection, new String[]{}, insertSqls2);
+      initCorrectResults2();
+      for(int i =0 ; i < querys2.length; i++) {
+        String queryStatement = querys2[i];
+        boolean hasResultSet = statement.execute(queryStatement);
+        assertTrue(hasResultSet);
+        ResultSet resultSet = statement.getResultSet();
+        IoTDBResultMetadata resultSetMetaData = (IoTDBResultMetadata) resultSet.getMetaData();
+        int columnCount = resultSetMetaData.getColumnCount();
+        List<String> correctResult = queryCorrentResults.get(i);
+        int count = 0;
+        while (resultSet.next()) {
+          String correctRow = correctResult.get(count++);
+          StringBuilder rowRecordBuilder = new StringBuilder();
+          for (int j = 1; j < columnCount; j++) {
+            rowRecordBuilder.append(resultSet.getString(j)).append(",");
+          }
+          rowRecordBuilder.append(resultSet.getString(columnCount));
+          assertEquals(correctRow, rowRecordBuilder.toString());
+        }
+      }
+
+      // third round
+      insertData(connection, createSQLs3, insertSQLs3);
+      initCorrectResults3();
+      for(int i =0 ; i < querys3.length; i++) {
+        String queryStatement = querys3[i];
+        boolean hasResultSet = statement.execute(queryStatement);
+        assertTrue(hasResultSet);
+        ResultSet resultSet = statement.getResultSet();
+        IoTDBResultMetadata resultSetMetaData = (IoTDBResultMetadata) resultSet.getMetaData();
+        int columnCount = resultSetMetaData.getColumnCount();
+        List<String> correctResult = queryCorrentResults.get(i);
+        int count = 0;
+        while (resultSet.next()) {
+          String correctRow = correctResult.get(count++);
+          StringBuilder rowRecordBuilder = new StringBuilder();
+          for (int j = 1; j < columnCount; j++) {
+            rowRecordBuilder.append(resultSet.getString(j)).append(",");
+          }
+          rowRecordBuilder.append(resultSet.getString(columnCount));
+          assertEquals(correctRow, rowRecordBuilder.toString());
+        }
+      }
+
+      statement.close();
+    }
+  }
+}
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterQueryTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterQueryTest.java
new file mode 100644
index 0000000..f5cc295
--- /dev/null
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterQueryTest.java
@@ -0,0 +1,550 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query;
+
+import static org.apache.iotdb.cluster.utils.Utils.insertBatchData;
+import static org.apache.iotdb.cluster.utils.Utils.insertData;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.cluster.entity.Server;
+import org.apache.iotdb.cluster.utils.EnvironmentUtils;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
+import org.apache.iotdb.cluster.utils.hash.PhysicalNode;
+import org.apache.iotdb.jdbc.Config;
+import org.apache.iotdb.jdbc.IoTDBResultMetadata;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class ClusterQueryTest {
+
+  private Server server;
+  private static final ClusterConfig CLUSTER_CONFIG = ClusterDescriptor.getInstance().getConfig();
+  private static final PhysicalNode localNode = new PhysicalNode(CLUSTER_CONFIG.getIp(),
+      CLUSTER_CONFIG.getPort());
+
+  private static final String URL = "127.0.0.1:6667/";
+
+  private String[] createSQLs = {
+      "SET STORAGE GROUP TO root.vehicle",
+      "SET STORAGE GROUP TO root.test",
+      "CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=INT32, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d0.s1 WITH DATATYPE=TEXT, ENCODING=PLAIN",
+      "CREATE TIMESERIES root.vehicle.d1.s2 WITH DATATYPE=FLOAT, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d1.s3 WITH DATATYPE=BOOLEAN, ENCODING=PLAIN",
+      "CREATE TIMESERIES root.test.d0.s0 WITH DATATYPE=INT32, ENCODING=RLE",
+      "CREATE TIMESERIES root.test.d0.s1 WITH DATATYPE=TEXT, ENCODING=PLAIN",
+      "CREATE TIMESERIES root.test.d1.g0.s0 WITH DATATYPE=INT32, ENCODING=RLE"
+
+  };
+  private String[] insertSQLs = {
+      "insert into root.vehicle.d0(timestamp,s0) values(10,100)",
+      "insert into root.vehicle.d0(timestamp,s0,s1) values(12,101,'102')",
+      "insert into root.vehicle.d0(timestamp,s1) values(19,'103')",
+      "insert into root.vehicle.d1(timestamp,s2) values(11,104.0)",
+      "insert into root.vehicle.d1(timestamp,s2,s3) values(15,105.0,true)",
+      "insert into root.vehicle.d1(timestamp,s3) values(17,false)",
+      "insert into root.vehicle.d0(timestamp,s0) values(20,1000)",
+      "insert into root.vehicle.d0(timestamp,s0,s1) values(22,1001,'1002')",
+      "insert into root.vehicle.d0(timestamp,s1) values(29,'1003')",
+      "insert into root.vehicle.d1(timestamp,s2) values(21,1004.0)",
+      "insert into root.vehicle.d1(timestamp,s2,s3) values(25,1005.0,true)",
+      "insert into root.vehicle.d1(timestamp,s3) values(27,true)",
+      "insert into root.test.d0(timestamp,s0) values(10,106)",
+      "insert into root.test.d0(timestamp,s0,s1) values(14,107,'108')",
+      "insert into root.test.d0(timestamp,s1) values(16,'109')",
+      "insert into root.test.d1.g0(timestamp,s0) values(1,110)",
+      "insert into root.test.d0(timestamp,s0) values(30,1006)",
+      "insert into root.test.d0(timestamp,s0,s1) values(34,1007,'1008')",
+      "insert into root.test.d0(timestamp,s1) values(36,'1090')",
+      "insert into root.test.d1.g0(timestamp,s0) values(10,1100)",
+      "insert into root.vehicle.d0(timestamp,s0) values(6,120)",
+      "insert into root.vehicle.d0(timestamp,s0,s1) values(38,121,'122')",
+      "insert into root.vehicle.d0(timestamp,s1) values(9,'123')",
+      "insert into root.vehicle.d0(timestamp,s0) values(16,128)",
+      "insert into root.vehicle.d0(timestamp,s0,s1) values(18,189,'198')",
+      "insert into root.vehicle.d0(timestamp,s1) values(99,'1234')",
+      "insert into root.vehicle.d1(timestamp,s2) values(14,1024.0)",
+      "insert into root.vehicle.d1(timestamp,s2,s3) values(29,1205.0,true)",
+      "insert into root.vehicle.d1(timestamp,s3) values(33,true)",
+      "insert into root.test.d0(timestamp,s0) values(15,126)",
+      "insert into root.test.d0(timestamp,s0,s1) values(8,127,'128')",
+      "insert into root.test.d0(timestamp,s1) values(20,'129')",
+      "insert into root.test.d1.g0(timestamp,s0) values(14,430)",
+      "insert into root.test.d0(timestamp,s0) values(150,426)",
+      "insert into root.test.d0(timestamp,s0,s1) values(80,427,'528')",
+      "insert into root.test.d0(timestamp,s1) values(2,'1209')",
+      "insert into root.test.d1.g0(timestamp,s0) values(4,330)"
+  };
+  private String[] queryStatementsWithoutFilter = {
+      "select * from root",
+      "select * from root.vehicle",
+      "select * from root.test",
+      "select vehicle.d0.s0, test.d0.s1 from root",
+      "select vehicle.d1.s2, vehicle.d1.s3 ,test.d1.g0.s0 from root"
+  };
+
+  private Map<Integer, List<String>> queryCorrentResultsWithoutFilter = new HashMap<>();
+
+  private String[] queryStatementsWithFilter = {
+      "select * from root.vehicle where d0.s0 > 10",
+      "select * from root.vehicle where d0.s0 < 101",
+      "select * from root.vehicle where d0.s0 > 10 and d0.s0 < 101 or time = 12",
+      "select * from root.test where d0.s0 > 10",
+      "select * from root.test where d0.s0 > 10 and d0.s0 < 200 or d0.s0 = 3",
+      "select * from root where vehicle.d0.s0 > 10 and test.d0.s0 < 101 or time = 20",
+  };
+
+  private Map<Integer, List<String>> queryCorrentResultsWithFilter = new HashMap<>();
+
+  @Before
+  public void setUp() throws Exception {
+    EnvironmentUtils.cleanEnv();
+    EnvironmentUtils.closeStatMonitor();
+    EnvironmentUtils.closeMemControl();
+    CLUSTER_CONFIG.createAllPath();
+    server = Server.getInstance();
+    server.start();
+    EnvironmentUtils.envSetUp();
+    Class.forName(Config.JDBC_DRIVER_NAME);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    server.stop();
+    EnvironmentUtils.cleanEnv();
+  }
+
+  private void initCorrectResultsWithoutFilter(){
+    queryCorrentResultsWithoutFilter.put(0, new ArrayList<>());
+    queryCorrentResultsWithoutFilter.put(1, new ArrayList<>());
+    queryCorrentResultsWithoutFilter.put(2, new ArrayList<>());
+    queryCorrentResultsWithoutFilter.put(3, new ArrayList<>());
+    queryCorrentResultsWithoutFilter.put(4, new ArrayList<>());
+    List<String> firstQueryRes = queryCorrentResultsWithoutFilter.get(0);
+    firstQueryRes.add("1,null,null,null,null,null,null,110");
+    firstQueryRes.add("2,null,null,null,null,null,1209,null");
+    firstQueryRes.add("4,null,null,null,null,null,null,330");
+    firstQueryRes.add("6,120,null,null,null,null,null,null");
+    firstQueryRes.add("8,null,null,null,null,127,128,null");
+    firstQueryRes.add("9,null,123,null,null,null,null,null");
+    firstQueryRes.add("10,100,null,null,null,106,null,1100");
+    firstQueryRes.add("11,null,null,104.0,null,null,null,null");
+    firstQueryRes.add("12,101,102,null,null,null,null,null");
+    firstQueryRes.add("14,null,null,1024.0,null,107,108,430");
+    firstQueryRes.add("15,null,null,105.0,true,126,null,null");
+    firstQueryRes.add("16,128,null,null,null,null,109,null");
+    firstQueryRes.add("17,null,null,null,false,null,null,null");
+    firstQueryRes.add("18,189,198,null,null,null,null,null");
+    firstQueryRes.add("19,null,103,null,null,null,null,null");
+    firstQueryRes.add("20,1000,null,null,null,null,129,null");
+    firstQueryRes.add("21,null,null,1004.0,null,null,null,null");
+    firstQueryRes.add("22,1001,1002,null,null,null,null,null");
+    firstQueryRes.add("25,null,null,1005.0,true,null,null,null");
+    firstQueryRes.add("27,null,null,null,true,null,null,null");
+    firstQueryRes.add("29,null,1003,1205.0,true,null,null,null");
+    firstQueryRes.add("30,null,null,null,null,1006,null,null");
+    firstQueryRes.add("33,null,null,null,true,null,null,null");
+    firstQueryRes.add("34,null,null,null,null,1007,1008,null");
+    firstQueryRes.add("36,null,null,null,null,null,1090,null");
+    firstQueryRes.add("38,121,122,null,null,null,null,null");
+    firstQueryRes.add("80,null,null,null,null,427,528,null");
+    firstQueryRes.add("99,null,1234,null,null,null,null,null");
+    firstQueryRes.add("150,null,null,null,null,426,null,null");
+
+    List<String> secondQueryRes = queryCorrentResultsWithoutFilter.get(1);
+    secondQueryRes.add("6,120,null,null,null");
+    secondQueryRes.add("9,null,123,null,null");
+    secondQueryRes.add("10,100,null,null,null");
+    secondQueryRes.add("11,null,null,104.0,null");
+    secondQueryRes.add("12,101,102,null,null");
+    secondQueryRes.add("14,null,null,1024.0,null");
+    secondQueryRes.add("15,null,null,105.0,true");
+    secondQueryRes.add("16,128,null,null,null");
+    secondQueryRes.add("17,null,null,null,false");
+    secondQueryRes.add("18,189,198,null,null");
+    secondQueryRes.add("19,null,103,null,null");
+    secondQueryRes.add("20,1000,null,null,null");
+    secondQueryRes.add("21,null,null,1004.0,null");
+    secondQueryRes.add("22,1001,1002,null,null");
+    secondQueryRes.add("25,null,null,1005.0,true");
+    secondQueryRes.add("27,null,null,null,true");
+    secondQueryRes.add("29,null,1003,1205.0,true");
+    secondQueryRes.add("33,null,null,null,true");
+    secondQueryRes.add("38,121,122,null,null");
+    secondQueryRes.add("99,null,1234,null,null");
+    List<String> thirdQueryRes = queryCorrentResultsWithoutFilter.get(2);
+    thirdQueryRes.add("1,null,null,110");
+    thirdQueryRes.add("2,null,1209,null");
+    thirdQueryRes.add("4,null,null,330");
+    thirdQueryRes.add("8,127,128,null");
+    thirdQueryRes.add("10,106,null,1100");
+    thirdQueryRes.add("14,107,108,430");
+    thirdQueryRes.add("15,126,null,null");
+    thirdQueryRes.add("16,null,109,null");
+    thirdQueryRes.add("20,null,129,null");
+    thirdQueryRes.add("30,1006,null,null");
+    thirdQueryRes.add("34,1007,1008,null");
+    thirdQueryRes.add("36,null,1090,null");
+    thirdQueryRes.add("80,427,528,null");
+    thirdQueryRes.add("150,426,null,null");
+    List<String> forthQueryRes = queryCorrentResultsWithoutFilter.get(3);
+    forthQueryRes.add("2,null,1209");
+    forthQueryRes.add("6,120,null");
+    forthQueryRes.add("8,null,128");
+    forthQueryRes.add("10,100,null");
+    forthQueryRes.add("12,101,null");
+    forthQueryRes.add("14,null,108");
+    forthQueryRes.add("16,128,109");
+    forthQueryRes.add("18,189,null");
+    forthQueryRes.add("20,1000,129");
+    forthQueryRes.add("22,1001,null");
+    forthQueryRes.add("34,null,1008");
+    forthQueryRes.add("36,null,1090");
+    forthQueryRes.add("38,121,null");
+    forthQueryRes.add("80,null,528");
+    List<String> fifthQueryRes = queryCorrentResultsWithoutFilter.get(4);
+    fifthQueryRes.add("1,null,null,110");
+    fifthQueryRes.add("4,null,null,330");
+    fifthQueryRes.add("10,null,null,1100");
+    fifthQueryRes.add("11,104.0,null,null");
+    fifthQueryRes.add("14,1024.0,null,430");
+    fifthQueryRes.add("15,105.0,true,null");
+    fifthQueryRes.add("17,null,false,null");
+    fifthQueryRes.add("21,1004.0,null,null");
+    fifthQueryRes.add("25,1005.0,true,null");
+    fifthQueryRes.add("27,null,true,null");
+    fifthQueryRes.add("29,1205.0,true,null");
+    fifthQueryRes.add("33,null,true,null");
+  }
+
+  private void initCorrectResultsWithFilter(){
+    queryCorrentResultsWithFilter.put(0, new ArrayList<>());
+    queryCorrentResultsWithFilter.put(1, new ArrayList<>());
+    queryCorrentResultsWithFilter.put(2, new ArrayList<>());
+    queryCorrentResultsWithFilter.put(3, new ArrayList<>());
+    queryCorrentResultsWithFilter.put(4, new ArrayList<>());
+    queryCorrentResultsWithFilter.put(5, new ArrayList<>());
+    List<String> firstQueryRes = queryCorrentResultsWithFilter.get(0);
+    firstQueryRes.add("6,120,null,null,null");
+    firstQueryRes.add("10,100,null,null,null");
+    firstQueryRes.add("12,101,102,null,null");
+    firstQueryRes.add("16,128,null,null,null");
+    firstQueryRes.add("18,189,198,null,null");
+    firstQueryRes.add("20,1000,null,null,null");
+    firstQueryRes.add("22,1001,1002,null,null");
+    firstQueryRes.add("38,121,122,null,null");
+
+    List<String> secondQueryRes = queryCorrentResultsWithFilter.get(1);
+    secondQueryRes.add("10,100,null,null,null");
+    List<String> thirdQueryRes = queryCorrentResultsWithFilter.get(2);
+    thirdQueryRes.add("10,100,null,null,null");
+    thirdQueryRes.add("12,101,102,null,null");
+    List<String> forthQueryRes = queryCorrentResultsWithFilter.get(3);
+    forthQueryRes.add("8,127,128,null");
+    forthQueryRes.add("10,106,null,1100");
+    forthQueryRes.add("14,107,108,430");
+    forthQueryRes.add("15,126,null,null");
+    forthQueryRes.add("30,1006,null,null");
+    forthQueryRes.add("34,1007,1008,null");
+    forthQueryRes.add("80,427,528,null");
+    forthQueryRes.add("150,426,null,null");
+    List<String> fifthQueryRes = queryCorrentResultsWithFilter.get(4);
+    fifthQueryRes.add("8,127,128,null");
+    fifthQueryRes.add("10,106,null,1100");
+    fifthQueryRes.add("14,107,108,430");
+    fifthQueryRes.add("15,126,null,null");
+    List<String> sixthQueryRes = queryCorrentResultsWithFilter.get(5);
+    sixthQueryRes.add("20,1000,null,null,null,null,129,null");
+  }
+
+  @Test
+  public void testLocalQueryWithoutFilter() throws Exception {
+    initCorrectResultsWithoutFilter();
+    try (Connection connection = DriverManager
+        .getConnection(Config.IOTDB_URL_PREFIX + URL, "root", "root")) {
+      insertData(connection, createSQLs, insertSQLs);
+      Statement statement = connection.createStatement();
+
+      for(int i =0 ; i < queryStatementsWithoutFilter.length; i++) {
+        String queryStatement = queryStatementsWithoutFilter[i];
+        boolean hasResultSet = statement.execute(queryStatement);
+        assertTrue(hasResultSet);
+        ResultSet resultSet = statement.getResultSet();
+        IoTDBResultMetadata resultSetMetaData = (IoTDBResultMetadata) resultSet.getMetaData();
+        int columnCount = resultSetMetaData.getColumnCount();
+        List<String> correctResult = queryCorrentResultsWithoutFilter.get(i);
+        int count = 0;
+        while (resultSet.next()) {
+          String correctRow = correctResult.get(count++);
+          StringBuilder rowRecordBuilder = new StringBuilder();
+          for (int j = 1; j < columnCount; j++) {
+            rowRecordBuilder.append(resultSet.getString(j)).append(",");
+          }
+          rowRecordBuilder.append(resultSet.getString(columnCount));
+          assertEquals(correctRow, rowRecordBuilder.toString());
+        }
+      }
+      statement.close();
+    }
+  }
+
+  @Test
+  public void testLocalQueryWithFilter() throws Exception {
+
+    initCorrectResultsWithFilter();
+    try (Connection connection = DriverManager
+        .getConnection(Config.IOTDB_URL_PREFIX + URL, "root", "root")) {
+      insertData(connection, createSQLs, insertSQLs);
+      Statement statement = connection.createStatement();
+
+      for(int i =0 ; i < queryStatementsWithFilter.length; i++) {
+        String queryStatement = queryStatementsWithFilter[i];
+        boolean hasResultSet = statement.execute(queryStatement);
+        assertTrue(hasResultSet);
+        ResultSet resultSet = statement.getResultSet();
+        IoTDBResultMetadata resultSetMetaData = (IoTDBResultMetadata) resultSet.getMetaData();
+        int columnCount = resultSetMetaData.getColumnCount();
+        List<String> correctResult = queryCorrentResultsWithFilter.get(i);
+        int count = 0;
+        while (resultSet.next()) {
+          String correctRow = correctResult.get(count++);
+          StringBuilder rowRecordBuilder = new StringBuilder();
+          for (int j = 1; j < columnCount; j++) {
+            rowRecordBuilder.append(resultSet.getString(j)).append(",");
+          }
+          rowRecordBuilder.append(resultSet.getString(columnCount));
+          assertEquals(correctRow, rowRecordBuilder.toString());
+        }
+      }
+      statement.close();
+    }
+  }
+
+  @Test
+  public void testRemoteQueryWithoutFilter() throws Exception {
+
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    initCorrectResultsWithoutFilter();
+    try (Connection connection = DriverManager
+        .getConnection(Config.IOTDB_URL_PREFIX + URL, "root", "root")) {
+      insertData(connection, createSQLs, insertSQLs);
+      Statement statement = connection.createStatement();
+
+      for(int i =0 ; i < queryStatementsWithoutFilter.length; i++) {
+        String queryStatement = queryStatementsWithoutFilter[i];
+        boolean hasResultSet = statement.execute(queryStatement);
+        assertTrue(hasResultSet);
+        ResultSet resultSet = statement.getResultSet();
+        IoTDBResultMetadata resultSetMetaData = (IoTDBResultMetadata) resultSet.getMetaData();
+        int columnCount = resultSetMetaData.getColumnCount();
+        List<String> correctResult = queryCorrentResultsWithoutFilter.get(i);
+        int count = 0;
+        while (resultSet.next()) {
+          String correctRow = correctResult.get(count++);
+          StringBuilder rowRecordBuilder = new StringBuilder();
+          for (int j = 1; j < columnCount; j++) {
+            rowRecordBuilder.append(resultSet.getString(j)).append(",");
+          }
+          rowRecordBuilder.append(resultSet.getString(columnCount));
+          assertEquals(correctRow, rowRecordBuilder.toString());
+        }
+      }
+      QPExecutorUtils.setLocalNodeAddr(localNode.getIp(), localNode.getPort());
+      statement.close();
+    }
+  }
+
+  @Test
+  public void testRemoteQueryWithFilter() throws Exception {
+
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    initCorrectResultsWithFilter();
+    try (Connection connection = DriverManager
+        .getConnection(Config.IOTDB_URL_PREFIX + URL, "root", "root")) {
+      insertData(connection, createSQLs, insertSQLs);
+      Statement statement = connection.createStatement();
+
+      for(int i =0 ; i < queryStatementsWithFilter.length; i++) {
+        String queryStatement = queryStatementsWithFilter[i];
+        boolean hasResultSet = statement.execute(queryStatement);
+        assertTrue(hasResultSet);
+        ResultSet resultSet = statement.getResultSet();
+        IoTDBResultMetadata resultSetMetaData = (IoTDBResultMetadata) resultSet.getMetaData();
+        int columnCount = resultSetMetaData.getColumnCount();
+        List<String> correctResult = queryCorrentResultsWithFilter.get(i);
+        int count = 0;
+        while (resultSet.next()) {
+          String correctRow = correctResult.get(count++);
+          StringBuilder rowRecordBuilder = new StringBuilder();
+          for (int j = 1; j < columnCount; j++) {
+            rowRecordBuilder.append(resultSet.getString(j)).append(",");
+          }
+          rowRecordBuilder.append(resultSet.getString(columnCount));
+          assertEquals(correctRow, rowRecordBuilder.toString());
+        }
+      }
+      QPExecutorUtils.setLocalNodeAddr(localNode.getIp(), localNode.getPort());
+      statement.close();
+    }
+  }
+
+  @Test
+  public void testLocalQueryWithoutFilterByBatch() throws Exception {
+    initCorrectResultsWithoutFilter();
+    try (Connection connection = DriverManager
+        .getConnection(Config.IOTDB_URL_PREFIX + URL, "root", "root")) {
+      insertBatchData(connection, createSQLs, insertSQLs);
+      Statement statement = connection.createStatement();
+
+      for(int i =0 ; i < queryStatementsWithoutFilter.length; i++) {
+        String queryStatement = queryStatementsWithoutFilter[i];
+        boolean hasResultSet = statement.execute(queryStatement);
+        assertTrue(hasResultSet);
+        ResultSet resultSet = statement.getResultSet();
+        IoTDBResultMetadata resultSetMetaData = (IoTDBResultMetadata) resultSet.getMetaData();
+        int columnCount = resultSetMetaData.getColumnCount();
+        List<String> correctResult = queryCorrentResultsWithoutFilter.get(i);
+        int count = 0;
+        while (resultSet.next()) {
+          String correctRow = correctResult.get(count++);
+          StringBuilder rowRecordBuilder = new StringBuilder();
+          for (int j = 1; j < columnCount; j++) {
+            rowRecordBuilder.append(resultSet.getString(j)).append(",");
+          }
+          rowRecordBuilder.append(resultSet.getString(columnCount));
+          assertEquals(correctRow, rowRecordBuilder.toString());
+        }
+      }
+      statement.close();
+    }
+  }
+
+  @Test
+  public void testLocalQueryWithFilterByBatch() throws Exception {
+
+    initCorrectResultsWithFilter();
+    try (Connection connection = DriverManager
+        .getConnection(Config.IOTDB_URL_PREFIX + URL, "root", "root")) {
+      insertBatchData(connection, createSQLs, insertSQLs);
+      Statement statement = connection.createStatement();
+
+      for(int i =0 ; i < queryStatementsWithFilter.length; i++) {
+        String queryStatement = queryStatementsWithFilter[i];
+        boolean hasResultSet = statement.execute(queryStatement);
+        assertTrue(hasResultSet);
+        ResultSet resultSet = statement.getResultSet();
+        IoTDBResultMetadata resultSetMetaData = (IoTDBResultMetadata) resultSet.getMetaData();
+        int columnCount = resultSetMetaData.getColumnCount();
+        List<String> correctResult = queryCorrentResultsWithFilter.get(i);
+        int count = 0;
+        while (resultSet.next()) {
+          String correctRow = correctResult.get(count++);
+          StringBuilder rowRecordBuilder = new StringBuilder();
+          for (int j = 1; j < columnCount; j++) {
+            rowRecordBuilder.append(resultSet.getString(j)).append(",");
+          }
+          rowRecordBuilder.append(resultSet.getString(columnCount));
+          assertEquals(correctRow, rowRecordBuilder.toString());
+        }
+      }
+      statement.close();
+    }
+  }
+
+  @Test
+  public void testRemoteQueryWithoutFilterByBatch() throws Exception {
+
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    initCorrectResultsWithoutFilter();
+    try (Connection connection = DriverManager
+        .getConnection(Config.IOTDB_URL_PREFIX + URL, "root", "root")) {
+      insertBatchData(connection, createSQLs, insertSQLs);
+      Statement statement = connection.createStatement();
+
+      for(int i =0 ; i < queryStatementsWithoutFilter.length; i++) {
+        String queryStatement = queryStatementsWithoutFilter[i];
+        boolean hasResultSet = statement.execute(queryStatement);
+        assertTrue(hasResultSet);
+        ResultSet resultSet = statement.getResultSet();
+        IoTDBResultMetadata resultSetMetaData = (IoTDBResultMetadata) resultSet.getMetaData();
+        int columnCount = resultSetMetaData.getColumnCount();
+        List<String> correctResult = queryCorrentResultsWithoutFilter.get(i);
+        int count = 0;
+        while (resultSet.next()) {
+          String correctRow = correctResult.get(count++);
+          StringBuilder rowRecordBuilder = new StringBuilder();
+          for (int j = 1; j < columnCount; j++) {
+            rowRecordBuilder.append(resultSet.getString(j)).append(",");
+          }
+          rowRecordBuilder.append(resultSet.getString(columnCount));
+          assertEquals(correctRow, rowRecordBuilder.toString());
+        }
+      }
+      QPExecutorUtils.setLocalNodeAddr(localNode.getIp(), localNode.getPort());
+      statement.close();
+    }
+  }
+
+  @Test
+  public void testRemoteQueryWithFilterByBatch() throws Exception {
+
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    initCorrectResultsWithFilter();
+    try (Connection connection = DriverManager
+        .getConnection(Config.IOTDB_URL_PREFIX + URL, "root", "root")) {
+      insertBatchData(connection, createSQLs, insertSQLs);
+      Statement statement = connection.createStatement();
+
+      for(int i =0 ; i < queryStatementsWithFilter.length; i++) {
+        String queryStatement = queryStatementsWithFilter[i];
+        boolean hasResultSet = statement.execute(queryStatement);
+        assertTrue(hasResultSet);
+        ResultSet resultSet = statement.getResultSet();
+        IoTDBResultMetadata resultSetMetaData = (IoTDBResultMetadata) resultSet.getMetaData();
+        int columnCount = resultSetMetaData.getColumnCount();
+        List<String> correctResult = queryCorrentResultsWithFilter.get(i);
+        int count = 0;
+        while (resultSet.next()) {
+          String correctRow = correctResult.get(count++);
+          StringBuilder rowRecordBuilder = new StringBuilder();
+          for (int j = 1; j < columnCount; j++) {
+            rowRecordBuilder.append(resultSet.getString(j)).append(",");
+          }
+          rowRecordBuilder.append(resultSet.getString(columnCount));
+          assertEquals(correctRow, rowRecordBuilder.toString());
+        }
+      }
+      QPExecutorUtils.setLocalNodeAddr(localNode.getIp(), localNode.getPort());
+      statement.close();
+    }
+  }
+
+}
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/manager/ClusterLocalManagerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/manager/ClusterLocalManagerTest.java
new file mode 100644
index 0000000..c09aaa5
--- /dev/null
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/query/manager/ClusterLocalManagerTest.java
@@ -0,0 +1,406 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.manager;
+
+import static org.apache.iotdb.cluster.utils.Utils.insertData;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.ConcurrentHashMap;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.cluster.entity.Server;
+import org.apache.iotdb.cluster.query.manager.querynode.ClusterLocalQueryManager;
+import org.apache.iotdb.cluster.query.manager.querynode.ClusterLocalSingleQueryManager;
+import org.apache.iotdb.cluster.query.reader.querynode.ClusterBatchReaderByTimestamp;
+import org.apache.iotdb.cluster.query.reader.querynode.ClusterBatchReaderWithoutTimeGenerator;
+import org.apache.iotdb.cluster.query.reader.querynode.ClusterFilterSeriesBatchReader;
+import org.apache.iotdb.cluster.query.reader.querynode.AbstractClusterBatchReader;
+import org.apache.iotdb.cluster.utils.EnvironmentUtils;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
+import org.apache.iotdb.cluster.utils.hash.PhysicalNode;
+import org.apache.iotdb.jdbc.Config;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.read.common.Path;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class ClusterLocalManagerTest {
+
+  private Server server;
+  private static final ClusterConfig CLUSTER_CONFIG = ClusterDescriptor.getInstance().getConfig();
+  private static ClusterLocalQueryManager manager = ClusterLocalQueryManager.getInstance();
+  private static final PhysicalNode localNode = new PhysicalNode(CLUSTER_CONFIG.getIp(),
+      CLUSTER_CONFIG.getPort());
+  private static final String URL = "127.0.0.1:6667/";
+
+  private String[] createSQLs = {
+      "set storage group to root.vehicle",
+      "CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=INT32, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d0.s1 WITH DATATYPE=TEXT, ENCODING=PLAIN",
+      "CREATE TIMESERIES root.vehicle.d0.s3 WITH DATATYPE=TEXT, ENCODING=PLAIN"
+  };
+  private String[] insertSQLs = {
+      "insert into root.vehicle.d0(timestamp,s0) values(10,100)",
+      "insert into root.vehicle.d0(timestamp,s0,s1) values(12,101,'102')",
+      "insert into root.vehicle.d0(timestamp,s3) values(19,'103')",
+      "insert into root.vehicle.d0(timestamp,s0,s1) values(22,1031,'3102')",
+      "insert into root.vehicle.d0(timestamp,s1) values(192,'1033')"
+  };
+  private String queryStatementsWithoutFilter = "select * from root.vehicle";
+  private String queryStatementsWithFilter = "select * from root.vehicle where d0.s0 > 10 and d0.s0 < 101 or d0.s0 = 3";
+
+  @Before
+  public void setUp() throws Exception {
+    EnvironmentUtils.cleanEnv();
+    EnvironmentUtils.closeStatMonitor();
+    EnvironmentUtils.closeMemControl();
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    CLUSTER_CONFIG.createAllPath();
+    server = Server.getInstance();
+    server.start();
+    EnvironmentUtils.envSetUp();
+    Class.forName(Config.JDBC_DRIVER_NAME);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    server.stop();
+    QPExecutorUtils.setLocalNodeAddr(localNode.getIp(), localNode.getPort());
+    EnvironmentUtils.cleanEnv();
+  }
+
+  @Test
+  public void testClusterLocalQueryManagerWithoutFilter() throws Exception {
+    try (Connection connection = DriverManager
+        .getConnection(Config.IOTDB_URL_PREFIX + URL, "root", "root")) {
+      insertData(connection, createSQLs, insertSQLs);
+      Statement statement = connection.createStatement();
+
+      // first query
+      boolean hasResultSet = statement.execute(queryStatementsWithoutFilter);
+      assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      assertTrue(resultSet.next());
+      ConcurrentHashMap<String, Long> map = ClusterLocalQueryManager.getTaskIdMapJobId();
+      assertEquals(1, map.size());
+      for (String taskId : map.keySet()) {
+        assertNotNull(manager.getSingleQuery(taskId));
+      }
+
+      // second query
+      hasResultSet = statement.execute(queryStatementsWithoutFilter);
+      assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      assertTrue(resultSet.next());
+      map = ClusterLocalQueryManager.getTaskIdMapJobId();
+      assertEquals(2, map.size());
+      for (String taskId : map.keySet()) {
+        assertNotNull(manager.getSingleQuery(taskId));
+      }
+
+      // third query
+      hasResultSet = statement.execute(queryStatementsWithoutFilter);
+      assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      assertTrue(resultSet.next());
+      map = ClusterLocalQueryManager.getTaskIdMapJobId();
+      assertEquals(3, map.size());
+      for (String taskId : map.keySet()) {
+        assertNotNull(manager.getSingleQuery(taskId));
+      }
+      statement.close();
+    }
+  }
+
+  @Test
+  public void testClusterLocalQueryManagerWithFilter() throws Exception {
+    try (Connection connection = DriverManager
+        .getConnection(Config.IOTDB_URL_PREFIX + URL, "root", "root")) {
+      insertData(connection, createSQLs, insertSQLs);
+      Statement statement = connection.createStatement();
+
+      // first query
+      boolean hasResultSet = statement.execute(queryStatementsWithFilter);
+      assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      assertTrue(resultSet.next());
+      assertEquals(10, resultSet.getLong(1));
+      assertEquals(100, resultSet.getInt(2));
+      assertNull(resultSet.getString(3));
+      assertNull(resultSet.getString(4));
+      ConcurrentHashMap<String, Long> map = ClusterLocalQueryManager.getTaskIdMapJobId();
+      assertEquals(1, map.size());
+      for (String taskId : map.keySet()) {
+        assertNotNull(manager.getSingleQuery(taskId));
+      }
+      assertFalse(resultSet.next());
+
+      // second query
+      hasResultSet = statement.execute(queryStatementsWithFilter);
+      assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      assertTrue(resultSet.next());
+      assertEquals(10, resultSet.getLong(1));
+      assertEquals(100, resultSet.getInt(2));
+      assertNull(resultSet.getString(3));
+      assertNull(resultSet.getString(4));
+      map = ClusterLocalQueryManager.getTaskIdMapJobId();
+      assertEquals(2, map.size());
+      for (String taskId : map.keySet()) {
+        assertNotNull(manager.getSingleQuery(taskId));
+      }
+      assertFalse(resultSet.next());
+
+      // third query
+      hasResultSet = statement.execute(queryStatementsWithFilter);
+      assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      assertTrue(resultSet.next());
+      assertEquals(10, resultSet.getLong(1));
+      assertEquals(100, resultSet.getInt(2));
+      assertNull(resultSet.getString(3));
+      assertNull(resultSet.getString(4));
+      map = ClusterLocalQueryManager.getTaskIdMapJobId();
+      assertEquals(3, map.size());
+      for (String taskId : map.keySet()) {
+        assertNotNull(manager.getSingleQuery(taskId));
+      }
+      assertFalse(resultSet.next());
+
+      statement.close();
+    }
+  }
+
+  @Test
+  public void testClusterLocalSingleQueryWithoutFilterManager() throws Exception {
+    try (Connection connection = DriverManager
+        .getConnection(Config.IOTDB_URL_PREFIX + URL, "root", "root")) {
+      insertData(connection, createSQLs, insertSQLs);
+      Statement statement = connection.createStatement();
+
+      // first query
+      boolean hasResultSet = statement.execute(queryStatementsWithoutFilter);
+      assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      assertTrue(resultSet.next());
+      ConcurrentHashMap<String, Long> map = ClusterLocalQueryManager.getTaskIdMapJobId();
+      assertEquals(1, map.size());
+      for (String taskId : map.keySet()) {
+        ClusterLocalSingleQueryManager singleQueryManager = manager.getSingleQuery(taskId);
+        assertNotNull(singleQueryManager);
+        assertEquals((long) map.get(taskId), singleQueryManager.getJobId());
+        assertEquals(0, singleQueryManager.getQueryRound());
+        assertNull(singleQueryManager.getFilterReader());
+        Map<String, AbstractClusterBatchReader> selectSeriesReaders = singleQueryManager
+            .getSelectSeriesReaders();
+        assertEquals(3, selectSeriesReaders.size());
+        Map<String, TSDataType> typeMap = singleQueryManager.getDataTypeMap();
+        for (Entry<String, AbstractClusterBatchReader> entry : selectSeriesReaders.entrySet()) {
+          String path = entry.getKey();
+          TSDataType dataType = typeMap.get(path);
+          AbstractClusterBatchReader clusterBatchReader = entry.getValue();
+          assertNotNull(((ClusterBatchReaderWithoutTimeGenerator) clusterBatchReader).getReader());
+          assertEquals(dataType,
+              ((ClusterBatchReaderWithoutTimeGenerator) clusterBatchReader).getDataType());
+        }
+      }
+
+      // second query
+      hasResultSet = statement.execute(queryStatementsWithoutFilter);
+      assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      assertTrue(resultSet.next());
+      map = ClusterLocalQueryManager.getTaskIdMapJobId();
+      assertEquals(2, map.size());
+      for (String taskId : map.keySet()) {
+        ClusterLocalSingleQueryManager singleQueryManager = manager.getSingleQuery(taskId);
+        assertNotNull(singleQueryManager);
+        assertEquals((long) map.get(taskId), singleQueryManager.getJobId());
+        assertEquals(0, singleQueryManager.getQueryRound());
+        assertNull(singleQueryManager.getFilterReader());
+        Map<String, AbstractClusterBatchReader> selectSeriesReaders = singleQueryManager
+            .getSelectSeriesReaders();
+        assertEquals(3, selectSeriesReaders.size());
+        Map<String, TSDataType> typeMap = singleQueryManager.getDataTypeMap();
+        for (Entry<String, AbstractClusterBatchReader> entry : selectSeriesReaders.entrySet()) {
+          String path = entry.getKey();
+          TSDataType dataType = typeMap.get(path);
+          AbstractClusterBatchReader clusterBatchReader = entry.getValue();
+          assertNotNull(((ClusterBatchReaderWithoutTimeGenerator) clusterBatchReader).getReader());
+          assertEquals(dataType,
+              ((ClusterBatchReaderWithoutTimeGenerator) clusterBatchReader).getDataType());
+        }
+      }
+
+      // third query
+      hasResultSet = statement.execute(queryStatementsWithoutFilter);
+      assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      assertTrue(resultSet.next());
+      map = ClusterLocalQueryManager.getTaskIdMapJobId();
+      assertEquals(3, map.size());
+      for (String taskId : map.keySet()) {
+        ClusterLocalSingleQueryManager singleQueryManager = manager.getSingleQuery(taskId);
+        assertNotNull(singleQueryManager);
+        assertEquals((long) map.get(taskId), singleQueryManager.getJobId());
+        assertEquals(0, singleQueryManager.getQueryRound());
+        assertNull(singleQueryManager.getFilterReader());
+        Map<String, AbstractClusterBatchReader> selectSeriesReaders = singleQueryManager
+            .getSelectSeriesReaders();
+        assertEquals(3, selectSeriesReaders.size());
+        Map<String, TSDataType> typeMap = singleQueryManager.getDataTypeMap();
+        for (Entry<String, AbstractClusterBatchReader> entry : selectSeriesReaders.entrySet()) {
+          String path = entry.getKey();
+          TSDataType dataType = typeMap.get(path);
+          AbstractClusterBatchReader clusterBatchReader = entry.getValue();
+          assertNotNull(((ClusterBatchReaderWithoutTimeGenerator) clusterBatchReader).getReader());
+          assertEquals(dataType,
+              ((ClusterBatchReaderWithoutTimeGenerator) clusterBatchReader).getDataType());
+        }
+      }
+      statement.close();
+    }
+  }
+
+  @Test
+  public void testClusterLocalSingleQueryWithFilterManager() throws Exception {
+    try (Connection connection = DriverManager
+        .getConnection(Config.IOTDB_URL_PREFIX + URL, "root", "root")) {
+      insertData(connection, createSQLs, insertSQLs);
+      Statement statement = connection.createStatement();
+
+      // first query
+      boolean hasResultSet = statement.execute(queryStatementsWithFilter);
+      assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      assertTrue(resultSet.next());
+      ConcurrentHashMap<String, Long> map = ClusterLocalQueryManager.getTaskIdMapJobId();
+      assertEquals(1, map.size());
+      for (String taskId : map.keySet()) {
+        ClusterLocalSingleQueryManager singleQueryManager = manager.getSingleQuery(taskId);
+        assertNotNull(singleQueryManager);
+        assertEquals((long) map.get(taskId), singleQueryManager.getJobId());
+        assertEquals(3, singleQueryManager.getQueryRound());
+        ClusterFilterSeriesBatchReader filterReader = (ClusterFilterSeriesBatchReader) singleQueryManager.getFilterReader();
+        assertNotNull(filterReader);
+        List<Path> allFilterPaths = new ArrayList<>();
+        allFilterPaths.add(new Path("root.vehicle.d0.s0"));
+        assertTrue(allFilterPaths.containsAll(filterReader.getAllFilterPath()));
+        assertNotNull(filterReader.getQueryDataSet());
+
+        Map<String, AbstractClusterBatchReader> selectSeriesReaders = singleQueryManager
+            .getSelectSeriesReaders();
+        assertNotNull(selectSeriesReaders);
+        assertEquals(3, selectSeriesReaders.size());
+        Map<String, TSDataType> typeMap = singleQueryManager.getDataTypeMap();
+        for (Entry<String, AbstractClusterBatchReader> entry : selectSeriesReaders.entrySet()) {
+          String path = entry.getKey();
+          TSDataType dataType = typeMap.get(path);
+          AbstractClusterBatchReader clusterBatchReader = entry.getValue();
+          assertNotNull(((ClusterBatchReaderByTimestamp) clusterBatchReader).getReaderByTimeStamp());
+          assertEquals(dataType,
+              ((ClusterBatchReaderByTimestamp) clusterBatchReader).getDataType());
+        }
+      }
+
+      // second query
+      hasResultSet = statement.execute(queryStatementsWithFilter);
+      assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      assertTrue(resultSet.next());
+      map = ClusterLocalQueryManager.getTaskIdMapJobId();
+      assertEquals(2, map.size());
+      for (String taskId : map.keySet()) {
+        ClusterLocalSingleQueryManager singleQueryManager = manager.getSingleQuery(taskId);
+        assertNotNull(singleQueryManager);
+        assertEquals((long) map.get(taskId), singleQueryManager.getJobId());
+        assertEquals(3, singleQueryManager.getQueryRound());
+        ClusterFilterSeriesBatchReader filterReader = (ClusterFilterSeriesBatchReader) singleQueryManager.getFilterReader();
+        assertNotNull(filterReader);
+        List<Path> allFilterPaths = new ArrayList<>();
+        allFilterPaths.add(new Path("root.vehicle.d0.s0"));
+        assertTrue(allFilterPaths.containsAll(filterReader.getAllFilterPath()));
+        assertNotNull(filterReader.getQueryDataSet());
+
+        Map<String, AbstractClusterBatchReader> selectSeriesReaders = singleQueryManager
+            .getSelectSeriesReaders();
+        assertNotNull(selectSeriesReaders);
+        assertEquals(3, selectSeriesReaders.size());
+        Map<String, TSDataType> typeMap = singleQueryManager.getDataTypeMap();
+        for (Entry<String, AbstractClusterBatchReader> entry : selectSeriesReaders.entrySet()) {
+          String path = entry.getKey();
+          TSDataType dataType = typeMap.get(path);
+          AbstractClusterBatchReader clusterBatchReader = entry.getValue();
+          assertNotNull(((ClusterBatchReaderByTimestamp) clusterBatchReader).getReaderByTimeStamp());
+          assertEquals(dataType,
+              ((ClusterBatchReaderByTimestamp) clusterBatchReader).getDataType());
+        }
+      }
+
+      // third query
+      hasResultSet = statement.execute(queryStatementsWithFilter);
+      assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      assertTrue(resultSet.next());
+      map = ClusterLocalQueryManager.getTaskIdMapJobId();
+      assertEquals(3, map.size());
+      for (String taskId : map.keySet()) {
+        ClusterLocalSingleQueryManager singleQueryManager = manager.getSingleQuery(taskId);
+        assertNotNull(singleQueryManager);
+        assertEquals((long) map.get(taskId), singleQueryManager.getJobId());
+        assertEquals(3, singleQueryManager.getQueryRound());
+        ClusterFilterSeriesBatchReader filterReader = (ClusterFilterSeriesBatchReader) singleQueryManager.getFilterReader();
+        assertNotNull(filterReader);
+        List<Path> allFilterPaths = new ArrayList<>();
+        allFilterPaths.add(new Path("root.vehicle.d0.s0"));
+        assertTrue(allFilterPaths.containsAll(filterReader.getAllFilterPath()));
+        assertNotNull(filterReader.getQueryDataSet());
+
+        Map<String, AbstractClusterBatchReader> selectSeriesReaders = singleQueryManager
+            .getSelectSeriesReaders();
+        assertNotNull(selectSeriesReaders);
+        assertEquals(3, selectSeriesReaders.size());
+        Map<String, TSDataType> typeMap = singleQueryManager.getDataTypeMap();
+        for (Entry<String, AbstractClusterBatchReader> entry : selectSeriesReaders.entrySet()) {
+          String path = entry.getKey();
+          TSDataType dataType = typeMap.get(path);
+          AbstractClusterBatchReader clusterBatchReader = entry.getValue();
+          assertNotNull(((ClusterBatchReaderByTimestamp) clusterBatchReader).getReaderByTimeStamp());
+          assertEquals(dataType,
+              ((ClusterBatchReaderByTimestamp) clusterBatchReader).getDataType());
+        }
+      }
+      statement.close();
+    }
+  }
+
+}
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/manager/ClusterRpcManagerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/manager/ClusterRpcManagerTest.java
new file mode 100644
index 0000000..b800fbf
--- /dev/null
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/query/manager/ClusterRpcManagerTest.java
@@ -0,0 +1,334 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.manager;
+
+import static org.apache.iotdb.cluster.utils.Utils.insertData;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.ConcurrentHashMap;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.cluster.entity.Server;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcQueryManager;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterGroupEntity;
+import org.apache.iotdb.cluster.utils.EnvironmentUtils;
+import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
+import org.apache.iotdb.jdbc.Config;
+import org.apache.iotdb.tsfile.read.common.Path;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class ClusterRpcManagerTest {
+
+
+  private Server server;
+  private static final ClusterConfig CLUSTER_CONFIG = ClusterDescriptor.getInstance().getConfig();
+  private static final String LOCAL_ADDR = String
+      .format("%s:%d", CLUSTER_CONFIG.getIp(), CLUSTER_CONFIG.getPort());
+  private static ClusterRpcQueryManager manager = ClusterRpcQueryManager.getInstance();
+
+  private static final String URL = "127.0.0.1:6667/";
+
+  private String[] createSQLs = {
+      "set storage group to root.vehicle",
+      "CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=INT32, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d0.s1 WITH DATATYPE=TEXT, ENCODING=PLAIN",
+      "CREATE TIMESERIES root.vehicle.d0.s3 WITH DATATYPE=TEXT, ENCODING=PLAIN"
+  };
+  private String[] insertSQLs = {
+      "insert into root.vehicle.d0(timestamp,s0) values(10,100)",
+      "insert into root.vehicle.d0(timestamp,s0,s1) values(12,101,'102')",
+      "insert into root.vehicle.d0(timestamp,s3) values(19,'103')",
+      "insert into root.vehicle.d0(timestamp,s0,s1) values(22,1031,'3102')",
+      "insert into root.vehicle.d0(timestamp,s1) values(192,'1033')"
+  };
+  private String queryStatementsWithoutFilter = "select * from root.vehicle";
+  private String queryStatementsWithFilter = "select * from root.vehicle where d0.s0 > 10 and d0.s0 < 101 or d0.s0 = 3";
+
+  @Before
+  public void setUp() throws Exception {
+    EnvironmentUtils.cleanEnv();
+    EnvironmentUtils.closeStatMonitor();
+    EnvironmentUtils.closeMemControl();
+    CLUSTER_CONFIG.createAllPath();
+    server = Server.getInstance();
+    server.start();
+    EnvironmentUtils.envSetUp();
+    Class.forName(Config.JDBC_DRIVER_NAME);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    server.stop();
+    EnvironmentUtils.cleanEnv();
+  }
+
+  @Test
+  public void testClusterRpcQueryManagerWithoutFilter() throws Exception {
+    try (Connection connection = DriverManager
+        .getConnection(Config.IOTDB_URL_PREFIX + URL, "root", "root")) {
+      insertData(connection, createSQLs, insertSQLs);
+      Statement statement = connection.createStatement();
+
+      // first query
+      boolean hasResultSet = statement.execute(queryStatementsWithoutFilter);
+      assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      assertTrue(resultSet.next());
+      ConcurrentHashMap<Long, String> map = ClusterRpcQueryManager.getJobIdMapTaskId();
+      assertEquals(1, map.size());
+      for (String taskId : map.values()) {
+        assertNotNull(manager.getSingleQuery(taskId));
+      }
+      for (long jobId : map.keySet()) {
+        assertNotNull(manager.getSingleQuery(jobId));
+      }
+      for (Entry<Long, String> entry : map.entrySet()) {
+        long jobId = entry.getKey();
+        String taskId = entry.getValue();
+        assertEquals(taskId, String.format("%s:%d", LOCAL_ADDR, jobId));
+      }
+
+      // second query
+      hasResultSet = statement.execute(queryStatementsWithoutFilter);
+      assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      assertTrue(resultSet.next());
+      map = ClusterRpcQueryManager.getJobIdMapTaskId();
+      assertEquals(2, map.size());
+      for (String taskId : map.values()) {
+        assertNotNull(manager.getSingleQuery(taskId));
+      }
+      for (long jobId : map.keySet()) {
+        assertNotNull(manager.getSingleQuery(jobId));
+      }
+      for (Entry<Long, String> entry : map.entrySet()) {
+        long jobId = entry.getKey();
+        String taskId = entry.getValue();
+        assertEquals(taskId, String.format("%s:%d", LOCAL_ADDR, jobId));
+      }
+
+      // third query
+      hasResultSet = statement.execute(queryStatementsWithoutFilter);
+      assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      assertTrue(resultSet.next());
+      map = ClusterRpcQueryManager.getJobIdMapTaskId();
+      assertEquals(3, map.size());
+      for (String taskId : map.values()) {
+        assertNotNull(manager.getSingleQuery(taskId));
+      }
+      for (long jobId : map.keySet()) {
+        assertNotNull(manager.getSingleQuery(jobId));
+      }
+      for (Entry<Long, String> entry : map.entrySet()) {
+        long jobId = entry.getKey();
+        String taskId = entry.getValue();
+        assertEquals(taskId, String.format("%s:%d", LOCAL_ADDR, jobId));
+      }
+      statement.close();
+    }
+  }
+
+  @Test
+  public void testClusterRpcQueryManagerWithFilter() throws Exception {
+    try (Connection connection = DriverManager
+        .getConnection(Config.IOTDB_URL_PREFIX + URL, "root", "root")) {
+      insertData(connection, createSQLs, insertSQLs);
+      Statement statement = connection.createStatement();
+
+      // first query
+      boolean hasResultSet = statement.execute(queryStatementsWithFilter);
+      assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      assertTrue(resultSet.next());
+      assertEquals(10, resultSet.getLong(1));
+      assertEquals(100, resultSet.getInt(2));
+      assertNull(resultSet.getString(3));
+      assertNull(resultSet.getString(4));
+      ConcurrentHashMap<Long, String> map = ClusterRpcQueryManager.getJobIdMapTaskId();
+      assertEquals(1, map.size());
+      for (String taskId : map.values()) {
+        assertNotNull(manager.getSingleQuery(taskId));
+      }
+      for (long jobId : map.keySet()) {
+        assertNotNull(manager.getSingleQuery(jobId));
+      }
+      for (Entry<Long, String> entry : map.entrySet()) {
+        long jobId = entry.getKey();
+        String taskId = entry.getValue();
+        assertEquals(taskId, String.format("%s:%d", LOCAL_ADDR, jobId));
+      }
+      assertFalse(resultSet.next());
+
+      // second query
+      hasResultSet = statement.execute(queryStatementsWithFilter);
+      assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      assertTrue(resultSet.next());
+      assertEquals(10, resultSet.getLong(1));
+      assertEquals(100, resultSet.getInt(2));
+      assertNull(resultSet.getString(3));
+      assertNull(resultSet.getString(4));
+      map = ClusterRpcQueryManager.getJobIdMapTaskId();
+      assertEquals(2, map.size());
+      for (String taskId : map.values()) {
+        assertNotNull(manager.getSingleQuery(taskId));
+      }
+      for (long jobId : map.keySet()) {
+        assertNotNull(manager.getSingleQuery(jobId));
+      }
+      for (Entry<Long, String> entry : map.entrySet()) {
+        long jobId = entry.getKey();
+        String taskId = entry.getValue();
+        assertEquals(taskId, String.format("%s:%d", LOCAL_ADDR, jobId));
+      }
+      assertFalse(resultSet.next());
+
+      // third query
+      hasResultSet = statement.execute(queryStatementsWithFilter);
+      assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      assertTrue(resultSet.next());
+      assertEquals(10, resultSet.getLong(1));
+      assertEquals(100, resultSet.getInt(2));
+      assertNull(resultSet.getString(3));
+      assertNull(resultSet.getString(4));
+      map = ClusterRpcQueryManager.getJobIdMapTaskId();
+      assertEquals(3, map.size());
+      for (String taskId : map.values()) {
+        assertNotNull(manager.getSingleQuery(taskId));
+      }
+      for (long jobId : map.keySet()) {
+        assertNotNull(manager.getSingleQuery(jobId));
+      }
+      for (Entry<Long, String> entry : map.entrySet()) {
+        long jobId = entry.getKey();
+        String taskId = entry.getValue();
+        assertEquals(taskId, String.format("%s:%d", LOCAL_ADDR, jobId));
+      }
+      assertFalse(resultSet.next());
+      statement.close();
+    }
+  }
+
+  @Test
+  public void testClusterRpcSingleQueryWithoutFilterManager() throws Exception {
+    try (Connection connection = DriverManager
+        .getConnection(Config.IOTDB_URL_PREFIX + URL, "root", "root")) {
+      insertData(connection, createSQLs, insertSQLs);
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute(queryStatementsWithoutFilter);
+      assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      assertTrue(resultSet.next());
+      ConcurrentHashMap<Long, String> map = ClusterRpcQueryManager.getJobIdMapTaskId();
+      assertEquals(1, map.size());
+      for (String taskId : map.values()) {
+        ClusterRpcSingleQueryManager singleManager = manager.getSingleQuery(taskId);
+        assertNotNull(singleManager);
+        assertEquals(0, singleManager.getQueryRounds());
+        assertEquals(taskId, singleManager.getTaskId());
+
+        // select path plans
+        Map<String, QueryPlan> selectPathPlans = singleManager.getSelectPathPlans();
+        assertEquals(1, selectPathPlans.size());
+        for (QueryPlan queryPlan : selectPathPlans.values()) {
+          List<Path> paths = queryPlan.getPaths();
+          List<Path> correctPaths = new ArrayList<>();
+          correctPaths.add(new Path("root.vehicle.d0.s0"));
+          correctPaths.add(new Path("root.vehicle.d0.s1"));
+          correctPaths.add(new Path("root.vehicle.d0.s3"));
+          assertEquals(correctPaths, paths);
+          assertNull(queryPlan.getExpression());
+        }
+
+        // select series by group id
+        assertEquals(0, singleManager.getSelectSeriesByGroupId().size());
+
+        // select series reader
+        assertTrue(singleManager
+            .getSelectSeriesReaders().isEmpty());
+
+      }
+      statement.close();
+    }
+  }
+
+  @Test
+  public void testClusterRpcSingleQueryWithFilterManager() throws Exception {
+    try (Connection connection = DriverManager
+        .getConnection(Config.IOTDB_URL_PREFIX + URL, "root", "root")) {
+      insertData(connection, createSQLs, insertSQLs);
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute(queryStatementsWithFilter);
+      assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      assertTrue(resultSet.next());
+      ConcurrentHashMap<Long, String> map = ClusterRpcQueryManager.getJobIdMapTaskId();
+      assertEquals(1, map.size());
+      for (String taskId : map.values()) {
+        ClusterRpcSingleQueryManager singleManager = manager.getSingleQuery(taskId);
+        assertNotNull(singleManager);
+        assertEquals(0, singleManager.getQueryRounds());
+        assertEquals(taskId, singleManager.getTaskId());
+
+        // select path plans
+        Map<String, QueryPlan> selectPathPlans = singleManager.getSelectPathPlans();
+        assertEquals(1, selectPathPlans.size());
+        for (QueryPlan queryPlan : selectPathPlans.values()) {
+          List<Path> paths = queryPlan.getPaths();
+          List<Path> correctPaths = new ArrayList<>();
+          correctPaths.add(new Path("root.vehicle.d0.s0"));
+          correctPaths.add(new Path("root.vehicle.d0.s1"));
+          correctPaths.add(new Path("root.vehicle.d0.s3"));
+          assertEquals(correctPaths, paths);
+          assertNotNull(queryPlan.getExpression());
+        }
+
+        // select series by group id
+        assertTrue(singleManager.getSelectSeriesByGroupId().isEmpty());
+
+        // select series reader
+        assertTrue(singleManager
+            .getSelectSeriesReaders().isEmpty());
+
+        // filter path plans
+        Map<String, FilterGroupEntity> filterGroupEntityMap = singleManager.getFilterGroupEntityMap();
+        assertTrue(filterGroupEntityMap.isEmpty());
+
+      }
+      statement.close();
+    }
+  }
+}
\ No newline at end of file
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/utils/ExpressionUtilsTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/utils/ExpressionUtilsTest.java
new file mode 100644
index 0000000..84f8f5f
--- /dev/null
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/query/utils/ExpressionUtilsTest.java
@@ -0,0 +1,230 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.utils;
+
+import static org.apache.iotdb.cluster.utils.Utils.insertData;
+import static org.junit.Assert.*;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.ConcurrentHashMap;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.cluster.entity.Server;
+import org.apache.iotdb.cluster.qp.executor.ClusterQueryProcessExecutor;
+import org.apache.iotdb.cluster.query.expression.TrueExpression;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcQueryManager;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.cluster.utils.EnvironmentUtils;
+import org.apache.iotdb.db.qp.QueryProcessor;
+import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
+import org.apache.iotdb.jdbc.Config;
+import org.apache.iotdb.tsfile.read.common.Path;
... 3401 lines suppressed ...