You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by lt...@apache.org on 2019/05/23 07:29:30 UTC

[incubator-iotdb] branch cluster updated (411ea77 -> 51f91b6)

This is an automated email from the ASF dual-hosted git repository.

lta pushed a change to branch cluster
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git.


    from 411ea77  remove groupIdMapNodeCache
     new 41a49ad  add cluster_framework
     new 51f91b6  fix lots of conflicts with cluster_framework

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../iotdb/cluster/concurrent/ThreadName.java       |   5 +
 ...Manager.java => NodeAsClientThreadManager.java} |  18 +--
 ...QPTaskManager.java => QPTaskThreadManager.java} |  12 +-
 ...erManager.java => QueryTimerThreadManager.java} |  10 +-
 .../cluster/concurrent/pool/ThreadPoolManager.java |  15 +-
 .../apache/iotdb/cluster/config/ClusterConfig.java |  34 ++---
 .../iotdb/cluster/config/ClusterConstant.java      |   2 +-
 .../iotdb/cluster/config/ClusterDescriptor.java    |  17 ++-
 .../org/apache/iotdb/cluster/entity/Server.java    |  23 ++-
 .../cluster/entity/raft/DataStateMachine.java      |  18 ++-
 .../cluster/qp/executor/AbstractQPExecutor.java    |  23 ++-
 .../cluster/qp/executor/NonQueryExecutor.java      |  72 ++++-----
 .../cluster/qp/executor/QueryMetadataExecutor.java | 142 +++++++++++-------
 .../apache/iotdb/cluster/qp/task/BatchQPTask.java  |  72 ++++-----
 .../iotdb/cluster/qp/task/DataQueryTask.java       |  30 +---
 .../org/apache/iotdb/cluster/qp/task/QPTask.java   |  51 ++++++-
 .../apache/iotdb/cluster/qp/task/SingleQPTask.java |   2 +-
 .../coordinatornode/ClusterRpcQueryManager.java    |  12 ++
 .../coordinatornode/IClusterRpcQueryManager.java   |   5 +
 .../querynode/ClusterLocalQueryManager.java        |  13 ++
 .../querynode/IClusterLocalQueryManager.java       |   5 +
 .../cluster/query/utils/ClusterRpcReaderUtils.java |  22 ++-
 .../iotdb/cluster/rpc/raft/NodeAsClient.java       |  14 +-
 .../rpc/raft/impl/RaftNodeAsClientManager.java     | 161 ++++++++-------------
 .../QueryMetricAsyncProcessor.java                 |   7 +-
 .../nonquery/DataGroupNonQueryAsyncProcessor.java  |   5 +-
 .../querymetadata/QueryMetadataAsyncProcessor.java |   2 +-
 .../{querymetric => }/QueryMetricRequest.java      |  11 +-
 .../{querymetric => }/QueryMetricResponse.java     |   3 +-
 .../nonquery/DataGroupNonQueryResponse.java        |  12 ++
 .../cluster/service/TSServiceClusterImpl.java      |  51 ++++---
 .../org/apache/iotdb/cluster/utils/RaftUtils.java  |  54 +++----
 .../iotdb/cluster/utils/hash/PhysicalNode.java     |   5 +
 .../apache/iotdb/cluster/utils/hash/Router.java    |   8 +-
 .../iotdb/cluster/utils/hash/VirtualNode.java      |  18 +--
 ...nagerTest.java => QPTaskThreadManagerTest.java} |  16 +-
 .../cluster/config/ClusterDescriptorTest.java      |  14 +-
 .../integration/IoTDBMetadataFetchAbstract.java    |  63 ++++----
 .../integration/IoTDBMetadataFetchLocallyIT.java   |   1 +
 .../apache/iotdb/cluster/utils/RaftUtilsTest.java  |  19 +--
 .../java/org/apache/iotdb/cluster/utils/Utils.java |   1 -
 .../iotdb/cluster/utils/hash/MD5HashTest.java      |   8 +-
 .../iotdb/cluster/utils/hash/PhysicalNodeTest.java |   4 +-
 .../iotdb/cluster/utils/hash/RouterTest.java       |  11 +-
 .../UserGuideV0.7.0/7-Tools-NodeTool.md            |   2 +-
 iotdb/iotdb/conf/iotdb-cluster.properties          |  15 +-
 .../iotdb/db/qp/executor/QueryProcessExecutor.java |   2 +-
 .../db/query/executor/AggregateEngineExecutor.java |   2 -
 .../iotdb/db/query/executor/EngineQueryRouter.java |   1 -
 ...actQueryRouter.java => IEngineQueryRouter.java} |  52 +------
 .../org/apache/iotdb/db/service/TSServiceImpl.java |  19 ++-
 service-rpc/src/main/thrift/rpc.thrift             |   2 +-
 52 files changed, 621 insertions(+), 565 deletions(-)
 copy cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/{QPTaskManager.java => NodeAsClientThreadManager.java} (72%)
 copy cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/{QPTaskManager.java => QPTaskThreadManager.java} (80%)
 copy cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/{QueryTimerManager.java => QueryTimerThreadManager.java} (87%)
 copy cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/{querymetric => }/QueryMetricAsyncProcessor.java (83%)
 copy cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/{querymetric => }/QueryMetricRequest.java (72%)
 copy cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/{querymetric => }/QueryMetricResponse.java (92%)
 copy cluster/src/test/java/org/apache/iotdb/cluster/concurrent/pool/{QPTaskManagerTest.java => QPTaskThreadManagerTest.java} (80%)
 copy iotdb/src/main/java/org/apache/iotdb/db/query/executor/{AbstractQueryRouter.java => IEngineQueryRouter.java} (59%)


[incubator-iotdb] 01/02: add cluster_framework

Posted by lt...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

lta pushed a commit to branch cluster
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git

commit 41a49ad74bb381e1dea572de24528b9c91e7ba74
Author: lta <li...@163.com>
AuthorDate: Thu May 23 11:22:29 2019 +0800

    add cluster_framework
---
 .checkstyle                                        |    10 +
 .github/ISSUE_TEMPLATE/bug_report.md               |    31 +
 .github/ISSUE_TEMPLATE/custom.md                   |     7 +
 .github/ISSUE_TEMPLATE/feature_request.md          |    17 +
 .gitignore                                         |    96 +
 .mvn/wrapper/MavenWrapperDownloader.java           |   107 +
 .mvn/wrapper/maven-wrapper.properties              |    21 +
 .travis.yml                                        |   153 +
 Jenkinsfile                                        |   182 +
 License                                            |   283 +
 NOTICE                                             |   545 +
 README.md                                          |   308 +
 RELEASE_NOTES                                      |    39 +
 asf.header                                         |    16 +
 checkstyle.xml                                     |   235 +
 cluster/pom.xml                                    |   207 +
 cluster/script/deploy.sh                           |    55 +
 cluster/script/stop.sh                             |    53 +
 .../iotdb/cluster/concurrent/ThreadName.java       |    47 +
 .../concurrent/pool/NodeAsClientThreadManager.java |    64 +
 .../concurrent/pool/QPTaskThreadManager.java       |    64 +
 .../concurrent/pool/QueryTimerThreadManager.java   |    74 +
 .../cluster/concurrent/pool/ThreadPoolManager.java |   106 +
 .../apache/iotdb/cluster/config/ClusterConfig.java |   355 +
 .../iotdb/cluster/config/ClusterConstant.java      |    49 +
 .../iotdb/cluster/config/ClusterDescriptor.java    |   204 +
 .../org/apache/iotdb/cluster/entity/Server.java    |   192 +
 .../cluster/entity/data/DataPartitionHolder.java   |    45 +
 .../cluster/entity/data/IPartitionHolder.java      |    25 +
 .../cluster/entity/metadata/IMetadataHolder.java   |    28 +
 .../cluster/entity/metadata/MetadataHolder.java    |    45 +
 .../entity/raft/DataPartitionRaftHolder.java       |    61 +
 .../cluster/entity/raft/DataStateMachine.java      |   191 +
 .../cluster/entity/raft/MetadataRaftHolder.java    |    42 +
 .../cluster/entity/raft/MetadataStateManchine.java |   178 +
 .../iotdb/cluster/entity/raft/RaftService.java     |   112 +
 .../iotdb/cluster/entity/service/IService.java     |    28 +
 .../exception/ConsistencyLevelException.java       |    41 +
 .../cluster/exception/ErrorConfigureExecption.java |    39 +
 .../cluster/exception/RaftConnectionException.java |    36 +
 .../cluster/qp/executor/AbstractQPExecutor.java    |   175 +
 .../qp/executor/ClusterQueryProcessExecutor.java   |   150 +
 .../cluster/qp/executor/NonQueryExecutor.java      |   366 +
 .../cluster/qp/executor/QueryMetadataExecutor.java |   378 +
 .../apache/iotdb/cluster/qp/task/BatchQPTask.java  |   163 +
 .../iotdb/cluster/qp/task/DataQueryTask.java       |    49 +
 .../apache/iotdb/cluster/qp/task/MultiQPTask.java  |    52 +
 .../org/apache/iotdb/cluster/qp/task/QPTask.java   |   187 +
 .../apache/iotdb/cluster/qp/task/SingleQPTask.java |    64 +
 .../iotdb/cluster/rpc/raft/NodeAsClient.java       |    50 +
 .../cluster/rpc/raft/closure/ResponseClosure.java  |    49 +
 .../rpc/raft/impl/RaftNodeAsClientManager.java     |   244 +
 .../raft/processor/BasicAsyncUserProcessor.java    |    27 +
 .../rpc/raft/processor/BasicSyncUserProcessor.java |    27 +
 .../raft/processor/QueryMetricAsyncProcessor.java  |    44 +
 .../nonquery/DataGroupNonQueryAsyncProcessor.java  |    72 +
 .../nonquery/MetaGroupNonQueryAsyncProcessor.java  |    72 +
 .../querymetadata/QueryMetadataAsyncProcessor.java |    90 +
 .../QueryMetadataInStringAsyncProcessor.java       |    78 +
 .../querymetadata/QueryPathsAsyncProcessor.java    |    99 +
 .../QuerySeriesTypeAsyncProcessor.java             |    87 +
 .../QueryTimeSeriesAsyncProcessor.java             |   100 +
 .../rpc/raft/request/BasicNonQueryRequest.java     |    50 +
 .../rpc/raft/request/BasicQueryRequest.java        |    45 +
 .../cluster/rpc/raft/request/BasicRequest.java     |    43 +
 .../rpc/raft/request/QueryMetricRequest.java       |    35 +
 .../request/nonquery/DataGroupNonQueryRequest.java |    39 +
 .../request/nonquery/MetaGroupNonQueryRequest.java |    39 +
 .../QueryMetadataInStringRequest.java              |    30 +
 .../querymetadata/QueryMetadataRequest.java        |    30 +
 .../request/querymetadata/QueryPathsRequest.java   |    37 +
 .../querymetadata/QuerySeriesTypeRequest.java      |    36 +
 .../querymetadata/QueryStorageGroupRequest.java    |    30 +
 .../querymetadata/QueryTimeSeriesRequest.java      |    37 +
 .../rpc/raft/response/BasicQueryDataResponse.java  |    43 +
 .../cluster/rpc/raft/response/BasicResponse.java   |   109 +
 .../rpc/raft/response/QueryMetricResponse.java     |    46 +
 .../nonquery/DataGroupNonQueryResponse.java        |    59 +
 .../nonquery/MetaGroupNonQueryResponse.java        |    43 +
 .../QueryMetadataInStringResponse.java             |    48 +
 .../querymetadata/QueryMetadataResponse.java       |    49 +
 .../response/querymetadata/QueryPathsResponse.java |    52 +
 .../querymetadata/QuerySeriesTypeResponse.java     |    52 +
 .../querymetadata/QueryStorageGroupResponse.java   |    51 +
 .../querymetadata/QueryTimeSeriesResponse.java     |    52 +
 .../cluster/service/TSServiceClusterImpl.java      |   298 +
 .../iotdb/cluster/utils/QPExecutorUtils.java       |   158 +
 .../org/apache/iotdb/cluster/utils/RaftUtils.java  |   572 +
 .../iotdb/cluster/utils/hash/HashFunction.java     |    24 +
 .../apache/iotdb/cluster/utils/hash/MD5Hash.java   |    53 +
 .../iotdb/cluster/utils/hash/PhysicalNode.java     |   103 +
 .../apache/iotdb/cluster/utils/hash/Router.java    |   241 +
 .../iotdb/cluster/utils/hash/VirtualNode.java      |    43 +
 .../concurrent/pool/QPTaskThreadManagerTest.java   |    85 +
 .../cluster/config/ClusterDescriptorTest.java      |   230 +
 .../integration/IoTDBMetadataFetchAbstract.java    |   439 +
 .../integration/IoTDBMetadataFetchLocallyIT.java   |    65 +
 .../integration/IoTDBMetadataFetchRemoteIT.java    |    71 +
 .../iotdb/cluster/qp/AbstractQPExecutorTest.java   |   116 +
 .../cluster/qp/executor/NonQueryExecutorTest.java  |   150 +
 .../cluster/utils/ClusterConfigureGenerator.java   |    87 +
 .../iotdb/cluster/utils/EnvironmentUtils.java      |   179 +
 .../apache/iotdb/cluster/utils/RaftUtilsTest.java  |   304 +
 .../java/org/apache/iotdb/cluster/utils/Utils.java |    61 +
 .../iotdb/cluster/utils/hash/MD5HashTest.java      |    65 +
 .../iotdb/cluster/utils/hash/PhysicalNodeTest.java |    55 +
 .../iotdb/cluster/utils/hash/RouterTest.java       |   246 +
 cluster/src/test/resources/logback.xml             |    41 +
 codecov.yml                                        |    48 +
 docker/Dockerfile                                  |    49 +
 docs/Community-History&Vision.md                   |    24 +
 docs/Community-Powered By.md                       |    42 +
 docs/Community-Project Committers.md               |    44 +
 docs/Development.md                                |   197 +
 docs/Documentation/Frequently asked questions.md   |   144 +
 docs/Documentation/OtherMaterial-Examples.md       |    84 +
 docs/Documentation/OtherMaterial-Reference.md      |    30 +
 .../OtherMaterial-ReleaseNotesV0.7.0.md            |    59 +
 docs/Documentation/OtherMaterial-Sample Data.md    |    67 +
 docs/Documentation/OtherMaterial-Sample Data.txt   | 60509 +++++++++++++++++++
 docs/Documentation/QuickStart.md                   |   353 +
 docs/Documentation/UserGuideV0.7.0/1-Overview.md   |   120 +
 docs/Documentation/UserGuideV0.7.0/2-Concept.md    |   251 +
 .../UserGuideV0.7.0/3-Operation Manual.md          |   875 +
 .../UserGuideV0.7.0/4-Deployment and Management.md |  1062 +
 .../UserGuideV0.7.0/5-SQL Documentation.md         |   633 +
 .../UserGuideV0.7.0/6-JDBC Documentation.md        |    27 +
 docs/Documentation/UserGuideV0.7.0/7-Tools-Cli.md  |    93 +
 .../UserGuideV0.7.0/7-Tools-Grafana.md             |   138 +
 .../UserGuideV0.7.0/7-Tools-Hadoop.md              |    29 +
 .../UserGuideV0.7.0/7-Tools-NodeTool.md            |   292 +
 .../Documentation/UserGuideV0.7.0/7-Tools-spark.md |   331 +
 example/kafka/pom.xml                              |    71 +
 example/kafka/readme.md                            |    75 +
 .../main/java/org/apache/iotdb/kafka/Constant.java |    58 +
 .../java/org/apache/iotdb/kafka/KafkaConsumer.java |   102 +
 .../apache/iotdb/kafka/KafkaConsumerThread.java    |   103 +
 .../java/org/apache/iotdb/kafka/KafkaProducer.java |    67 +
 example/pom.xml                                    |    46 +
 example/rocketmq/pom.xml                           |    56 +
 example/rocketmq/readme.md                         |    77 +
 .../java/org/apache/iotdb/example/Constant.java    |    66 +
 .../org/apache/iotdb/example/RocketMQConsumer.java |   138 +
 .../org/apache/iotdb/example/RocketMQProducer.java |   104 +
 .../main/java/org/apache/iotdb/example/Utils.java  |    42 +
 grafana/conf/application.properties                |    24 +
 grafana/img/add_data_source.png                    |   Bin 0 -> 175851 bytes
 grafana/img/add_graph.png                          |   Bin 0 -> 723579 bytes
 grafana/img/edit_data_source.png                   |   Bin 0 -> 313673 bytes
 grafana/pom.xml                                    |   247 +
 grafana/readme.md                                  |   112 +
 grafana/readme_zh.md                               |   104 +
 .../web/grafana/TsfileWebDemoApplication.java      |    30 +
 .../apache/iotdb/web/grafana/bean/TimeValues.java  |    49 +
 .../iotdb/web/grafana/conf/MyConfiguration.java    |    45 +
 .../controller/DatabaseConnectController.java      |   220 +
 .../org/apache/iotdb/web/grafana/dao/BasicDao.java |    35 +
 .../iotdb/web/grafana/dao/impl/BasicDaoImpl.java   |   124 +
 .../grafana/service/DatabaseConnectService.java    |    34 +
 .../service/impl/DatabaseConnectServiceImpl.java   |    51 +
 hadoop/README.md                                   |    22 +
 hadoop/pom.xml                                     |    50 +
 .../iotdb/tsfile/hadoop/TSFHadoopException.java    |    50 +
 .../apache/iotdb/tsfile/hadoop/TSFInputFormat.java |   379 +
 .../apache/iotdb/tsfile/hadoop/TSFInputSplit.java  |   187 +
 .../iotdb/tsfile/hadoop/TSFOutputFormat.java       |    75 +
 .../iotdb/tsfile/hadoop/TSFRecordReader.java       |   227 +
 .../iotdb/tsfile/hadoop/TSFRecordWriter.java       |    82 +
 .../java/org/apache/iotdb/tsfile/hadoop/TSRow.java |    51 +
 .../tsfile/hadoop/example/TSFMRReadExample.java    |   127 +
 .../iotdb/tsfile/hadoop/example/TsFileHelper.java  |   130 +
 .../iotdb/tsfile/hadoop/io/HDFSInputStream.java    |   111 +
 .../iotdb/tsfile/hadoop/io/HDFSOutputStream.java   |    92 +
 .../thu/tsfile/hadoop/InputOutputStreamTest.java   |    93 +
 .../cn/edu/thu/tsfile/hadoop/TSFHadoopTest.java    |   199 +
 .../edu/thu/tsfile/hadoop/TSFInputSplitTest.java   |    98 +
 .../cn/edu/thu/tsfile/hadoop/TsFileTestHelper.java |   147 +
 iotdb-cli/cli/bin/export-csv.bat                   |    66 +
 iotdb-cli/cli/bin/export-csv.sh                    |    53 +
 iotdb-cli/cli/bin/import-csv.bat                   |    67 +
 iotdb-cli/cli/bin/import-csv.sh                    |    53 +
 iotdb-cli/cli/bin/run-client.bat                   |    20 +
 iotdb-cli/cli/bin/start-client.bat                 |    63 +
 iotdb-cli/cli/bin/start-client.sh                  |    53 +
 iotdb-cli/pom.xml                                  |   163 +
 .../apache/iotdb/cli/client/AbstractClient.java    |   737 +
 .../java/org/apache/iotdb/cli/client/Client.java   |   171 +
 .../org/apache/iotdb/cli/client/WinClient.java     |   177 +
 .../iotdb/cli/exception/ArgsErrorException.java    |    29 +
 .../org/apache/iotdb/cli/tool/AbstractCsvTool.java |   117 +
 .../java/org/apache/iotdb/cli/tool/ExportCsv.java  |   351 +
 .../java/org/apache/iotdb/cli/tool/ImportCsv.java  |   515 +
 .../apache/iotdb/cli/client/AbstractClientIT.java  |   205 +
 .../apache/iotdb/cli/client/AbstractScript.java    |    65 +
 .../iotdb/cli/client/StartClientScriptIT.java      |    74 +
 .../org/apache/iotdb/cli/tool/ExportCsvTestIT.java |    75 +
 .../org/apache/iotdb/cli/tool/ImportCsvTestIT.java |    77 +
 iotdb-cli/src/test/resources/logback.xml           |   114 +
 iotdb/iotdb/bin/nodetool.bat                       |    58 +
 iotdb/iotdb/bin/nodetool.sh                        |    48 +
 iotdb/iotdb/bin/start-WalChecker.bat               |   110 +
 iotdb/iotdb/bin/start-WalChecker.sh                |    85 +
 iotdb/iotdb/bin/start-cluster.bat                  |   108 +
 iotdb/iotdb/bin/start-cluster.sh                   |    76 +
 iotdb/iotdb/bin/start-server.bat                   |   108 +
 iotdb/iotdb/bin/start-server.sh                    |    76 +
 iotdb/iotdb/bin/start-sync-client.bat              |    74 +
 iotdb/iotdb/bin/start-sync-client.sh               |    54 +
 iotdb/iotdb/bin/stop-cluster.bat                   |    23 +
 iotdb/iotdb/bin/stop-cluster.sh                    |    30 +
 iotdb/iotdb/bin/stop-server.bat                    |    23 +
 iotdb/iotdb/bin/stop-server.sh                     |    30 +
 iotdb/iotdb/bin/stop-sync-client.bat               |    23 +
 iotdb/iotdb/bin/stop-sync-client.sh                |    30 +
 iotdb/iotdb/conf/error_info_cn.properties          |    28 +
 iotdb/iotdb/conf/error_info_en.properties          |    28 +
 iotdb/iotdb/conf/iotdb-cluster.properties          |    97 +
 iotdb/iotdb/conf/iotdb-engine.properties           |   221 +
 iotdb/iotdb/conf/iotdb-env.bat                     |    67 +
 iotdb/iotdb/conf/iotdb-env.sh                      |   150 +
 iotdb/iotdb/conf/iotdb-sync-client.properties      |    35 +
 iotdb/iotdb/conf/logback.xml                       |   139 +
 iotdb/iotdb/conf/tsfile-format.properties          |    42 +
 iotdb/pom.xml                                      |   215 +
 .../antlr3/org/apache/iotdb/db/sql/parse/TSLexer.g |   183 +
 .../org/apache/iotdb/db/sql/parse/TSParser.g       |   853 +
 .../org/apache/iotdb/db/auth/AuthException.java    |    47 +
 .../org/apache/iotdb/db/auth/AuthorityChecker.java |   169 +
 .../iotdb/db/auth/authorizer/BasicAuthorizer.java  |   280 +
 .../iotdb/db/auth/authorizer/IAuthorizer.java      |   253 +
 .../db/auth/authorizer/LocalFileAuthorizer.java    |    62 +
 .../apache/iotdb/db/auth/entity/PathPrivilege.java |   107 +
 .../apache/iotdb/db/auth/entity/PrivilegeType.java |    40 +
 .../java/org/apache/iotdb/db/auth/entity/Role.java |   108 +
 .../java/org/apache/iotdb/db/auth/entity/User.java |   163 +
 .../iotdb/db/auth/role/BasicRoleManager.java       |   170 +
 .../apache/iotdb/db/auth/role/IRoleAccessor.java   |    69 +
 .../apache/iotdb/db/auth/role/IRoleManager.java    |    93 +
 .../iotdb/db/auth/role/LocalFileRoleAccessor.java  |   166 +
 .../iotdb/db/auth/role/LocalFileRoleManager.java   |    26 +
 .../iotdb/db/auth/user/BasicUserManager.java       |   285 +
 .../apache/iotdb/db/auth/user/IUserAccessor.java   |    69 +
 .../apache/iotdb/db/auth/user/IUserManager.java    |   147 +
 .../iotdb/db/auth/user/LocalFileUserAccessor.java  |   208 +
 .../iotdb/db/auth/user/LocalFileUserManager.java   |    28 +
 .../org/apache/iotdb/db/concurrent/HashLock.java   |    63 +
 .../IoTDBDefaultThreadExceptionHandler.java        |    34 +
 .../db/concurrent/IoTDBThreadPoolFactory.java      |   131 +
 .../iotdb/db/concurrent/IoTThreadFactory.java      |    61 +
 .../org/apache/iotdb/db/concurrent/ThreadName.java |    51 +
 .../java/org/apache/iotdb/db/conf/IoTDBConfig.java |   832 +
 .../org/apache/iotdb/db/conf/IoTDBConstant.java    |    62 +
 .../org/apache/iotdb/db/conf/IoTDBDescriptor.java  |   272 +
 .../iotdb/db/conf/directories/Directories.java     |   109 +
 .../directories/strategy/DirectoryStrategy.java    |    75 +
 .../strategy/MaxDiskUsableSpaceFirstStrategy.java  |    58 +
 .../strategy/MinDirOccupiedSpaceFirstStrategy.java |    76 +
 .../MinFolderOccupiedSpaceFirstStrategy.java       |    76 +
 .../directories/strategy/SequenceStrategy.java     |    48 +
 .../java/org/apache/iotdb/db/engine/Processor.java |   191 +
 .../apache/iotdb/db/engine/bufferwrite/Action.java |    28 +
 .../db/engine/bufferwrite/ActionException.java     |    30 +
 .../engine/bufferwrite/BufferWriteProcessor.java   |   559 +
 .../db/engine/bufferwrite/FileNodeConstants.java   |    44 +
 .../bufferwrite/RestorableTsFileIOWriter.java      |   326 +
 .../engine/cache/RowGroupBlockMetaDataCache.java   |   167 +
 .../iotdb/db/engine/cache/TsFileMetaDataCache.java |    98 +
 .../iotdb/db/engine/cache/TsFileMetadataUtils.java |    84 +
 .../db/engine/filenode/FileNodeFlushFuture.java    |    91 +
 .../iotdb/db/engine/filenode/FileNodeManager.java  |  1220 +
 .../db/engine/filenode/FileNodeProcessor.java      |  2065 +
 .../engine/filenode/FileNodeProcessorStatus.java   |    51 +
 .../db/engine/filenode/FileNodeProcessorStore.java |   165 +
 .../db/engine/filenode/OverflowChangeType.java     |    57 +
 .../iotdb/db/engine/filenode/TsFileResource.java   |   390 +
 .../db/engine/memcontrol/BasicMemController.java   |   186 +
 .../engine/memcontrol/DisabledMemController.java   |    56 +
 .../db/engine/memcontrol/FlushPartialPolicy.java   |    72 +
 .../db/engine/memcontrol/ForceFLushAllPolicy.java  |    61 +
 .../db/engine/memcontrol/JVMMemController.java     |   100 +
 .../db/engine/memcontrol/MemMonitorThread.java     |    88 +
 .../db/engine/memcontrol/MemStatisticThread.java   |   102 +
 .../iotdb/db/engine/memcontrol/NoActPolicy.java    |    38 +
 .../apache/iotdb/db/engine/memcontrol/Policy.java  |    28 +
 .../db/engine/memcontrol/RecordMemController.java  |   177 +
 .../iotdb/db/engine/memtable/AbstractMemTable.java |   177 +
 .../apache/iotdb/db/engine/memtable/IMemTable.java |    72 +
 .../db/engine/memtable/IWritableMemChunk.java      |    47 +
 .../db/engine/memtable/MemSeriesLazyMerger.java    |    61 +
 .../db/engine/memtable/MemTableFlushUtil.java      |   110 +
 .../db/engine/memtable/PrimitiveMemTable.java      |    47 +
 .../engine/memtable/TimeValuePairInMemTable.java   |    45 +
 .../db/engine/memtable/TimeValuePairSorter.java    |    56 +
 .../iotdb/db/engine/memtable/WritableMemChunk.java |   156 +
 .../iotdb/db/engine/modification/Deletion.java     |    63 +
 .../iotdb/db/engine/modification/Modification.java |    84 +
 .../db/engine/modification/ModificationFile.java   |   120 +
 .../io/LocalTextModificationAccessor.java          |   157 +
 .../engine/modification/io/ModificationReader.java |    43 +
 .../engine/modification/io/ModificationWriter.java |    47 +
 .../iotdb/db/engine/modification/package-info.java |    23 +
 .../iotdb/db/engine/overflow/io/OverflowIO.java    |   173 +
 .../db/engine/overflow/io/OverflowMemtable.java    |   116 +
 .../db/engine/overflow/io/OverflowProcessor.java   |   735 +
 .../db/engine/overflow/io/OverflowResource.java    |   329 +
 .../overflow/io/OverflowedTsFileIOWriter.java      |    36 +
 .../engine/overflow/metadata/OFFileMetadata.java   |   107 +
 .../overflow/metadata/OFRowGroupListMetadata.java  |   109 +
 .../overflow/metadata/OFSeriesListMetadata.java    |   107 +
 .../db/engine/overflow/utils/MergeStatus.java      |    29 +
 .../db/engine/overflow/utils/OverflowOpType.java   |    29 +
 .../apache/iotdb/db/engine/pool/FlushManager.java  |   139 +
 .../apache/iotdb/db/engine/pool/MergeManager.java  |   126 +
 .../querycontext/GlobalSortedSeriesDataSource.java |    92 +
 .../engine/querycontext/MergeSeriesDataSource.java |    33 +
 .../db/engine/querycontext/OverflowInsertFile.java |    51 +
 .../querycontext/OverflowSeriesDataSource.java     |    78 +
 .../querycontext/OverflowUpdateDeleteFile.java     |    42 +
 .../db/engine/querycontext/QueryDataSource.java    |    42 +
 .../db/engine/querycontext/ReadOnlyMemChunk.java   |   162 +
 .../db/engine/querycontext/UnsealedTsFile.java     |    44 +
 .../version/SimpleFileVersionController.java       |   125 +
 .../engine/version/SysTimeVersionController.java   |    42 +
 .../iotdb/db/engine/version/VersionController.java |    37 +
 .../iotdb/db/exception/ArgsErrorException.java     |    29 +
 .../exception/BufferWriteProcessorException.java   |    41 +
 .../db/exception/DeltaEngineRunningException.java  |    47 +
 .../iotdb/db/exception/ErrorDebugException.java    |    37 +
 .../db/exception/FileNodeManagerException.java     |    41 +
 .../db/exception/FileNodeNotExistException.java    |    33 +
 .../db/exception/FileNodeProcessorException.java   |    44 +
 .../db/exception/MetadataArgsErrorException.java   |    35 +
 .../iotdb/db/exception/NotConsistentException.java |    29 +
 .../db/exception/OverflowProcessorException.java   |    41 +
 .../exception/OverflowWrongParameterException.java |    42 +
 .../iotdb/db/exception/PathErrorException.java     |    34 +
 .../iotdb/db/exception/ProcessorException.java     |    47 +
 .../db/exception/ProcessorRuntimException.java     |    41 +
 .../iotdb/db/exception/RecoverException.java       |    36 +
 .../iotdb/db/exception/StartupException.java       |    34 +
 .../db/exception/SyncConnectionException.java      |    38 +
 .../iotdb/db/exception/SysCheckException.java      |    38 +
 .../db/exception/UnSupportedFillTypeException.java |    35 +
 .../UnSupportedOverflowOpTypeException.java        |    42 +
 .../iotdb/db/exception/WALOverSizedException.java  |    38 +
 .../db/exception/builder/ExceptionBuilder.java     |   107 +
 .../exception/codebased/AuthPluginException.java   |    33 +
 .../codebased/ConnectionFailedException.java       |    32 +
 .../codebased/ConnectionHostException.java         |    33 +
 .../exception/codebased/InsecureAPIException.java  |    33 +
 .../codebased/InvalidParameterException.java       |    32 +
 .../db/exception/codebased/IoTDBException.java     |    49 +
 .../exception/codebased/NoParameterException.java  |    32 +
 .../codebased/NoPreparedStatementException.java    |    32 +
 .../exception/codebased/OutOfMemoryException.java  |    32 +
 .../db/exception/codebased/UnknownException.java   |    32 +
 .../qp/GeneratePhysicalPlanException.java          |    34 +
 .../db/exception/qp/IllegalASTFormatException.java |    32 +
 .../db/exception/qp/LogicalOperatorException.java  |    32 +
 .../db/exception/qp/LogicalOptimizeException.java  |    32 +
 .../db/exception/qp/QueryProcessorException.java   |    36 +
 .../java/org/apache/iotdb/db/metadata/MGraph.java  |   382 +
 .../org/apache/iotdb/db/metadata/MManager.java     |  1053 +
 .../java/org/apache/iotdb/db/metadata/MNode.java   |   201 +
 .../java/org/apache/iotdb/db/metadata/MTree.java   |  1089 +
 .../org/apache/iotdb/db/metadata/Metadata.java     |   126 +
 .../apache/iotdb/db/metadata/MetadataConstant.java |    30 +
 .../iotdb/db/metadata/MetadataOperationType.java   |    37 +
 .../java/org/apache/iotdb/db/metadata/PNode.java   |   127 +
 .../java/org/apache/iotdb/db/metadata/PTree.java   |   261 +
 .../org/apache/iotdb/db/monitor/IStatistic.java    |    54 +
 .../apache/iotdb/db/monitor/MonitorConstants.java  |   113 +
 .../org/apache/iotdb/db/monitor/StatMonitor.java   |   391 +
 .../iotdb/db/monitor/collector/FileSize.java       |   173 +
 .../org/apache/iotdb/db/qp/QueryProcessor.java     |   178 +
 .../apache/iotdb/db/qp/constant/DatetimeUtils.java |   215 +
 .../apache/iotdb/db/qp/constant/SQLConstant.java   |   159 +
 .../iotdb/db/qp/constant/TSParserConstant.java     |    64 +
 .../db/qp/exception/DateTimeFormatException.java   |    31 +
 .../db/qp/executor/IQueryProcessExecutor.java      |   148 +
 .../iotdb/db/qp/executor/OverflowQPExecutor.java   |   682 +
 .../iotdb/db/qp/executor/QueryProcessExecutor.java |   117 +
 .../org/apache/iotdb/db/qp/logical/Operator.java   |    77 +
 .../apache/iotdb/db/qp/logical/RootOperator.java   |    31 +
 .../db/qp/logical/crud/BasicFunctionOperator.java  |   168 +
 .../db/qp/logical/crud/BasicOperatorType.java      |   189 +
 .../iotdb/db/qp/logical/crud/DeleteOperator.java   |    43 +
 .../iotdb/db/qp/logical/crud/FilterOperator.java   |   265 +
 .../iotdb/db/qp/logical/crud/FromOperator.java     |    48 +
 .../iotdb/db/qp/logical/crud/FunctionOperator.java |    46 +
 .../iotdb/db/qp/logical/crud/InsertOperator.java   |    61 +
 .../iotdb/db/qp/logical/crud/QueryOperator.java    |   120 +
 .../iotdb/db/qp/logical/crud/SFWOperator.java      |    87 +
 .../iotdb/db/qp/logical/crud/SelectOperator.java   |    69 +
 .../iotdb/db/qp/logical/crud/UpdateOperator.java   |    41 +
 .../iotdb/db/qp/logical/sys/AuthorOperator.java    |   211 +
 .../iotdb/db/qp/logical/sys/LoadDataOperator.java  |    48 +
 .../iotdb/db/qp/logical/sys/MetadataOperator.java  |   155 +
 .../iotdb/db/qp/logical/sys/PropertyOperator.java  |   106 +
 .../apache/iotdb/db/qp/physical/PhysicalPlan.java  |    78 +
 .../iotdb/db/qp/physical/crud/AggregationPlan.java |    43 +
 .../iotdb/db/qp/physical/crud/DeletePlan.java      |    95 +
 .../iotdb/db/qp/physical/crud/FillQueryPlan.java   |    52 +
 .../iotdb/db/qp/physical/crud/GroupByPlan.java     |    60 +
 .../iotdb/db/qp/physical/crud/InsertPlan.java      |   124 +
 .../iotdb/db/qp/physical/crud/QueryPlan.java       |    71 +
 .../iotdb/db/qp/physical/crud/UpdatePlan.java      |   133 +
 .../iotdb/db/qp/physical/sys/AuthorPlan.java       |   219 +
 .../iotdb/db/qp/physical/sys/LoadDataPlan.java     |    77 +
 .../iotdb/db/qp/physical/sys/MetadataPlan.java     |   174 +
 .../iotdb/db/qp/physical/sys/PropertyPlan.java     |    96 +
 .../iotdb/db/qp/physical/transfer/Codec.java       |    29 +
 .../db/qp/physical/transfer/CodecInstances.java    |   472 +
 .../db/qp/physical/transfer/PhysicalPlanCodec.java |    57 +
 .../physical/transfer/PhysicalPlanLogTransfer.java |    90 +
 .../db/qp/physical/transfer/SystemLogOperator.java |    35 +
 .../iotdb/db/qp/strategy/LogicalGenerator.java     |  1137 +
 .../iotdb/db/qp/strategy/PhysicalGenerator.java    |   345 +
 .../qp/strategy/optimizer/ConcatPathOptimizer.java |   369 +
 .../qp/strategy/optimizer/DnfFilterOptimizer.java  |   162 +
 .../db/qp/strategy/optimizer/IFilterOptimizer.java |    31 +
 .../qp/strategy/optimizer/ILogicalOptimizer.java   |    31 +
 .../optimizer/MergeSingleFilterOptimizer.java      |   163 +
 .../qp/strategy/optimizer/RemoveNotOptimizer.java  |   108 +
 .../db/query/aggregation/AggreResultData.java      |   192 +
 .../db/query/aggregation/AggregateFunction.java    |   135 +
 .../db/query/aggregation/impl/CountAggrFunc.java   |   155 +
 .../db/query/aggregation/impl/FirstAggrFunc.java   |   154 +
 .../db/query/aggregation/impl/LastAggrFunc.java    |   151 +
 .../db/query/aggregation/impl/MaxTimeAggrFunc.java |   135 +
 .../query/aggregation/impl/MaxValueAggrFunc.java   |   170 +
 .../db/query/aggregation/impl/MeanAggrFunc.java    |   163 +
 .../db/query/aggregation/impl/MinTimeAggrFunc.java |   156 +
 .../query/aggregation/impl/MinValueAggrFunc.java   |   166 +
 .../db/query/aggregation/impl/SumAggrFunc.java     |    37 +
 .../iotdb/db/query/context/QueryContext.java       |    95 +
 .../iotdb/db/query/control/FileReaderManager.java  |   247 +
 .../iotdb/db/query/control/JobFileManager.java     |   107 +
 .../db/query/control/QueryResourceManager.java     |   216 +
 .../query/dataset/AggreResultDataPointReader.java  |    52 +
 .../dataset/EngineDataSetWithTimeGenerator.java    |   108 +
 .../dataset/EngineDataSetWithoutTimeGenerator.java |   161 +
 .../dataset/groupby/GroupByEngineDataSet.java      |   163 +
 .../groupby/GroupByWithOnlyTimeFilterDataSet.java  |   314 +
 .../groupby/GroupByWithValueFilterDataSet.java     |   160 +
 .../AbstractExecutorWithoutTimeGenerator.java      |    84 +
 .../db/query/executor/AggregateEngineExecutor.java |   330 +
 .../executor/EngineExecutorWithTimeGenerator.java  |    89 +
 .../EngineExecutorWithoutTimeGenerator.java        |    77 +
 .../iotdb/db/query/executor/EngineQueryRouter.java |   200 +
 .../db/query/executor/FillEngineExecutor.java      |    91 +
 .../db/query/executor/IEngineQueryRouter.java      |    78 +
 .../iotdb/db/query/factory/AggreFuncFactory.java   |    80 +
 .../db/query/factory/SeriesReaderFactory.java      |   275 +
 .../java/org/apache/iotdb/db/query/fill/IFill.java |   123 +
 .../org/apache/iotdb/db/query/fill/LinearFill.java |   151 +
 .../apache/iotdb/db/query/fill/PreviousFill.java   |    77 +
 .../iotdb/db/query/reader/AllDataReader.java       |   129 +
 .../iotdb/db/query/reader/IAggregateReader.java    |    34 +
 .../apache/iotdb/db/query/reader/IBatchReader.java |    32 +
 .../apache/iotdb/db/query/reader/IPointReader.java |    28 +
 .../org/apache/iotdb/db/query/reader/IReader.java  |    36 +
 .../iotdb/db/query/reader/mem/MemChunkReader.java  |   120 +
 .../reader/mem/MemChunkReaderByTimestamp.java      |    71 +
 .../reader/merge/EngineReaderByTimeStamp.java      |    32 +
 .../db/query/reader/merge/PriorityMergeReader.java |   132 +
 .../merge/PriorityMergeReaderByTimestamp.java      |    69 +
 .../query/reader/sequence/SealedTsFilesReader.java |   183 +
 .../sequence/SealedTsFilesReaderByTimestamp.java   |   136 +
 .../query/reader/sequence/SequenceDataReader.java  |   148 +
 .../sequence/SequenceDataReaderByTimestamp.java    |   108 +
 .../reader/sequence/UnSealedTsFileReader.java      |   101 +
 .../sequence/UnSealedTsFilesReaderByTimestamp.java |    64 +
 .../query/reader/unsequence/EngineChunkReader.java |    77 +
 .../unsequence/EngineChunkReaderByTimestamp.java   |    77 +
 .../timegenerator/AbstractNodeConstructor.java     |   112 +
 .../db/query/timegenerator/EngineLeafNode.java     |    63 +
 .../query/timegenerator/EngineNodeConstructor.java |    58 +
 .../query/timegenerator/EngineTimeGenerator.java   |    67 +
 .../org/apache/iotdb/db/rescon/package-info.java   |    24 +
 .../apache/iotdb/db/service/CloseMergeService.java |   213 +
 .../java/org/apache/iotdb/db/service/IService.java |    41 +
 .../java/org/apache/iotdb/db/service/IoTDB.java    |   179 +
 .../org/apache/iotdb/db/service/IoTDBMBean.java    |    27 +
 .../apache/iotdb/db/service/IoTDBShutdownHook.java |    39 +
 .../org/apache/iotdb/db/service/JDBCService.java   |   249 +
 .../iotdb/db/service/JDBCServiceEventHandler.java  |    68 +
 .../apache/iotdb/db/service/JDBCServiceMBean.java  |    34 +
 .../org/apache/iotdb/db/service/JMXService.java    |   165 +
 .../java/org/apache/iotdb/db/service/Monitor.java  |   166 +
 .../org/apache/iotdb/db/service/MonitorMBean.java  |    54 +
 .../apache/iotdb/db/service/RegisterManager.java   |    64 +
 .../org/apache/iotdb/db/service/ServiceType.java   |    51 +
 .../org/apache/iotdb/db/service/StartupCheck.java  |    30 +
 .../org/apache/iotdb/db/service/StartupChecks.java |    94 +
 .../org/apache/iotdb/db/service/TSServiceImpl.java |   908 +
 .../java/org/apache/iotdb/db/service/Utils.java    |   104 +
 .../org/apache/iotdb/db/sql/ParseGenerator.java    |    43 +
 .../apache/iotdb/db/sql/parse/AstErrorNode.java    |    58 +
 .../org/apache/iotdb/db/sql/parse/AstNode.java     |   362 +
 .../apache/iotdb/db/sql/parse/AstNodeOrigin.java   |   110 +
 .../java/org/apache/iotdb/db/sql/parse/Node.java   |    42 +
 .../org/apache/iotdb/db/sql/parse/ParseDriver.java |   202 +
 .../org/apache/iotdb/db/sql/parse/ParseError.java  |    52 +
 .../apache/iotdb/db/sql/parse/ParseException.java  |    48 +
 .../org/apache/iotdb/db/sql/parse/ParseUtils.java  |    43 +
 .../org/apache/iotdb/db/sync/conf/Constans.java    |    47 +
 .../iotdb/db/sync/conf/SyncSenderConfig.java       |   145 +
 .../iotdb/db/sync/conf/SyncSenderDescriptor.java   |   133 +
 .../db/sync/receiver/SyncServiceEventHandler.java  |    55 +
 .../iotdb/db/sync/receiver/SyncServiceImpl.java    |   740 +
 .../iotdb/db/sync/receiver/SyncServiceManager.java |   191 +
 .../iotdb/db/sync/sender/SyncFileManager.java      |   204 +
 .../apache/iotdb/db/sync/sender/SyncSender.java    |    65 +
 .../iotdb/db/sync/sender/SyncSenderImpl.java       |   543 +
 .../java/org/apache/iotdb/db/tools/WalChecker.java |   127 +
 .../java/org/apache/iotdb/db/utils/AuthUtils.java  |   319 +
 .../org/apache/iotdb/db/utils/CommonUtils.java     |    70 +
 .../org/apache/iotdb/db/utils/FilePathUtils.java   |    38 +
 .../org/apache/iotdb/db/utils/FileSchemaUtils.java |    61 +
 .../java/org/apache/iotdb/db/utils/FileUtils.java  |    87 +
 .../java/org/apache/iotdb/db/utils/IOUtils.java    |   180 +
 .../org/apache/iotdb/db/utils/ImmediateFuture.java |    58 +
 .../org/apache/iotdb/db/utils/LoadDataUtils.java   |   246 +
 .../java/org/apache/iotdb/db/utils/MathUtils.java  |    76 +
 .../java/org/apache/iotdb/db/utils/MemUtils.java   |   173 +
 .../org/apache/iotdb/db/utils/OpenFileNumUtil.java |   287 +
 .../apache/iotdb/db/utils/PrimitiveArrayList.java  |   135 +
 .../iotdb/db/utils/PrimitiveArrayListFactory.java  |    47 +
 .../java/org/apache/iotdb/db/utils/QueryUtils.java |    76 +
 .../apache/iotdb/db/utils/RandomDeleteCache.java   |    79 +
 .../org/apache/iotdb/db/utils/RecordUtils.java     |   115 +
 .../java/org/apache/iotdb/db/utils/SyncUtils.java  |   126 +
 .../org/apache/iotdb/db/utils/TimeValuePair.java   |    79 +
 .../apache/iotdb/db/utils/TimeValuePairUtils.java  |    80 +
 .../org/apache/iotdb/db/utils/TsPrimitiveType.java |   412 +
 .../org/apache/iotdb/db/writelog/LogPosition.java  |    26 +
 .../org/apache/iotdb/db/writelog/RecoverStage.java |    50 +
 .../apache/iotdb/db/writelog/io/ILogReader.java    |    36 +
 .../apache/iotdb/db/writelog/io/ILogWriter.java    |    31 +
 .../org/apache/iotdb/db/writelog/io/LogWriter.java |    84 +
 .../apache/iotdb/db/writelog/io/RAFLogReader.java  |   104 +
 .../writelog/manager/MultiFileLogNodeManager.java  |   258 +
 .../db/writelog/manager/WriteLogNodeManager.java   |    67 +
 .../db/writelog/node/ExclusiveWriteLogNode.java    |   299 +
 .../iotdb/db/writelog/node/WriteLogNode.java       |    89 +
 .../recover/ExclusiveLogRecoverPerformer.java      |   369 +
 .../writelog/recover/FileNodeRecoverPerformer.java |    55 +
 .../db/writelog/recover/RecoverPerformer.java      |    32 +
 .../db/writelog/replay/ConcreteLogReplayer.java    |   102 +
 .../iotdb/db/writelog/replay/LogReplayer.java      |    27 +
 .../iotdb/db/auth/LocalFIleRoleAccessorTest.java   |    90 +
 .../iotdb/db/auth/LocalFileAuthorizerTest.java     |   352 +
 .../iotdb/db/auth/LocalFileRoleManagerTest.java    |   142 +
 .../iotdb/db/auth/LocalFileUserAccessorTest.java   |   107 +
 .../iotdb/db/auth/LocalFileUserManagerTest.java    |   196 +
 .../IoTDBDefaultThreadExceptionHandlerTest.java    |    78 +
 .../db/concurrent/IoTDBThreadPoolFactoryTest.java  |   219 +
 .../iotdb/db/engine/MetadataManagerHelper.java     |   131 +
 .../java/org/apache/iotdb/db/engine/PathUtils.java |    64 +
 .../org/apache/iotdb/db/engine/ProcessorTest.java  |   152 +
 .../engine/bufferwrite/BufferWriteBenchmark.java   |   127 +
 .../bufferwrite/BufferWriteProcessorNewTest.java   |   172 +
 .../bufferwrite/BufferWriteProcessorTest.java      |   257 +
 .../bufferwrite/RestorableTsFileIOWriterTest.java  |   266 +
 .../filenode/FileNodeProcessorStoreTest.java       |    91 +
 .../db/engine/filenode/TsFileResourceTest.java     |    98 +
 .../filenodev2/FileNodeManagerBenchmark.java       |   125 +
 .../memcontrol/BufferwriteFileSizeControlTest.java |   164 +
 .../memcontrol/BufferwriteMetaSizeControlTest.java |   165 +
 .../db/engine/memcontrol/IoTDBMemControlTest.java  |   195 +
 .../db/engine/memcontrol/MemControllerTest.java    |    95 +
 .../memcontrol/OverflowFileSizeControlTest.java    |   143 +
 .../memcontrol/OverflowMetaSizeControlTest.java    |   144 +
 .../db/engine/memtable/MemTableTestUtils.java      |    54 +
 .../db/engine/memtable/MemtableBenchmark.java      |    58 +
 .../db/engine/memtable/PrimitiveMemTableTest.java  |   192 +
 .../engine/modification/DeletionFileNodeTest.java  |   261 +
 .../db/engine/modification/DeletionQueryTest.java  |   298 +
 .../engine/modification/ModificationFileTest.java  |   101 +
 .../io/LocalTextModificationAccessorTest.java      |    80 +
 .../db/engine/overflow/io/OverflowIOTest.java      |    65 +
 .../engine/overflow/io/OverflowMemtableTest.java   |   100 +
 .../overflow/io/OverflowProcessorBenchmark.java    |   123 +
 .../engine/overflow/io/OverflowProcessorTest.java  |   211 +
 .../engine/overflow/io/OverflowResourceTest.java   |    92 +
 .../db/engine/overflow/io/OverflowTestUtils.java   |    77 +
 .../overflow/metadata/OFFileMetadataTest.java      |    90 +
 .../metadata/OFRowGroupListMetadataTest.java       |    93 +
 .../metadata/OFSeriesListMetadataTest.java         |    88 +
 .../overflow/metadata/OverflowTestHelper.java      |    84 +
 .../db/engine/overflow/metadata/OverflowUtils.java |   138 +
 .../version/SimpleFileVersionControllerTest.java   |    54 +
 .../version/SysTimeVersionControllerTest.java      |    43 +
 .../iotdb/db/exception/ExceptionBuilderTest.java   |   102 +
 .../org/apache/iotdb/db/integration/Constant.java  |   100 +
 .../apache/iotdb/db/integration/IOTDBFillIT.java   |   351 +
 .../iotdb/db/integration/IOTDBGroupByIT.java       |   532 +
 .../iotdb/db/integration/IoTDBAggregationIT.java   |   550 +
 .../integration/IoTDBAggregationLargeDataIT.java   |   900 +
 .../integration/IoTDBAggregationSmallDataIT.java   |   757 +
 .../iotdb/db/integration/IoTDBAuthorizationIT.java |   933 +
 .../iotdb/db/integration/IoTDBCompleteIT.java      |   419 +
 .../apache/iotdb/db/integration/IoTDBDaemonIT.java |   378 +
 .../iotdb/db/integration/IoTDBDeletionIT.java      |   276 +
 .../db/integration/IoTDBEngineTimeGeneratorIT.java |   269 +
 .../db/integration/IoTDBFloatPrecisionIT.java      |   183 +
 .../db/integration/IoTDBFlushQueryMergeTest.java   |   130 +
 .../iotdb/db/integration/IoTDBLargeDataIT.java     |   376 +
 .../iotdb/db/integration/IoTDBLimitSlimitIT.java   |   222 +
 .../iotdb/db/integration/IoTDBMetadataFetchIT.java |   390 +
 .../iotdb/db/integration/IoTDBMultiSeriesIT.java   |   377 +
 .../db/integration/IoTDBSequenceDataQueryIT.java   |   260 +
 .../iotdb/db/integration/IoTDBSeriesReaderIT.java  |   368 +
 .../iotdb/db/integration/IoTDBTimeZoneIT.java      |   153 +
 .../iotdb/db/integration/IoTDBVersionIT.java       |    84 +
 .../org/apache/iotdb/db/metadata/MGraphTest.java   |    79 +
 .../iotdb/db/metadata/MManagerAdvancedTest.java    |   141 +
 .../iotdb/db/metadata/MManagerBasicTest.java       |   347 +
 .../iotdb/db/metadata/MManagerEfficiencyTest.java  |   155 +
 .../iotdb/db/metadata/MManagerImproveTest.java     |   285 +
 .../org/apache/iotdb/db/metadata/MTreeTest.java    |   317 +
 .../org/apache/iotdb/db/metadata/MetadataTest.java |    93 +
 .../org/apache/iotdb/db/monitor/MonitorTest.java   |   147 +
 .../iotdb/db/monitor/collector/FileSizeTest.java   |    93 +
 .../org/apache/iotdb/db/qp/QueryProcessorTest.java |   134 +
 .../iotdb/db/qp/bench/QueryParseBenchmark.java     |    47 +
 .../iotdb/db/qp/other/TSPlanContextAuthorTest.java |    82 +
 .../db/qp/other/TSPlanContextPropertyTest.java     |    96 +
 .../transfer/PhysicalPlanLogTransferTest.java      |   158 +
 .../iotdb/db/qp/plan/LogicalPlanSmallTest.java     |   191 +
 .../apache/iotdb/db/qp/plan/PhysicalPlanTest.java  |   277 +
 .../org/apache/iotdb/db/qp/plan/QPUpdateTest.java  |   210 +
 .../iotdb/db/qp/plan/TestConcatOptimizer.java      |   138 +
 .../iotdb/db/qp/strategy/LogicalGeneratorTest.java |    77 +
 .../apache/iotdb/db/qp/utils/MemIntQpExecutor.java |   213 +
 .../iotdb/db/query/component/SimpleFileWriter.java |    54 +
 .../db/query/control/FileReaderManagerTest.java    |   134 +
 .../db/query/control/QueryResourceManagerTest.java |    29 +
 .../EngineDataSetWithTimeGeneratorTest.java        |   123 +
 .../query/executor/GroupByEngineDataSetTest.java   |   153 +
 .../iotdb/db/query/reader/AllDataReaderTest.java   |    76 +
 .../iotdb/db/query/reader/FakedIBatchPoint.java    |   108 +
 .../iotdb/db/query/reader/FakedIPointReader.java   |    74 +
 .../query/reader/FakedSeriesReaderByTimestamp.java |    76 +
 .../merge/PriorityMergeReaderByTimestampTest.java  |   173 +
 .../reader/merge/PriorityMergeReaderTest.java      |   104 +
 .../reader/merge/SeriesMergeSortReaderTest.java    |   100 +
 .../SequenceDataReaderByTimestampTest.java         |    91 +
 .../org/apache/iotdb/db/script/EnvScriptIT.java    |   104 +
 .../org/apache/iotdb/db/sql/DatetimeUtilsTest.java |   119 +
 .../org/apache/iotdb/db/sql/SQLParserTest.java     |  1496 +
 .../db/sync/sender/MultipleClientSyncTest.java     |   226 +
 .../iotdb/db/sync/sender/SingleClientSyncTest.java |   597 +
 .../iotdb/db/sync/sender/SyncFileManagerTest.java  |   374 +
 .../org/apache/iotdb/db/sync/test/RandomNum.java   |    70 +
 .../apache/iotdb/db/sync/test/SyncTestClient1.java |   258 +
 .../apache/iotdb/db/sync/test/SyncTestClient2.java |   262 +
 .../apache/iotdb/db/sync/test/SyncTestClient3.java |   282 +
 .../java/org/apache/iotdb/db/sync/test/Utils.java  |    44 +
 .../org/apache/iotdb/db/tools/WalCheckerTest.java  |   159 +
 .../apache/iotdb/db/utils/EnvironmentUtils.java    |   179 +
 .../org/apache/iotdb/db/utils/MathUtilsTest.java   |    64 +
 .../apache/iotdb/db/utils/OpenFileNumUtilTest.java |   287 +
 .../iotdb/db/utils/PrimitiveArrayListTest.java     |    53 +
 .../iotdb/db/writelog/IoTDBLogFileSizeTest.java    |   232 +
 .../apache/iotdb/db/writelog/PerformanceTest.java  |   236 +
 .../org/apache/iotdb/db/writelog/RecoverTest.java  |   308 +
 .../iotdb/db/writelog/WriteLogNodeManagerTest.java |   139 +
 .../apache/iotdb/db/writelog/WriteLogNodeTest.java |   288 +
 .../iotdb/db/writelog/io/LogWriterReaderTest.java  |    83 +
 iotdb/src/test/resources/logback.xml               |    41 +
 iotdb/src/test/resources/start-sync-test.sh        |    54 +
 iotdb/src/test/resources/stop-sync-test.sh         |    29 +
 java-google-style.xml                              |   618 +
 jdbc/LICENSE                                       |   201 +
 jdbc/README.md                                     |    79 +
 jdbc/package.sh                                    |    43 +
 jdbc/pom.xml                                       |   177 +
 .../main/java/org/apache/iotdb/jdbc/Config.java    |    55 +
 .../main/java/org/apache/iotdb/jdbc/Constant.java  |    44 +
 .../org/apache/iotdb/jdbc/IoTDBConnection.java     |   537 +
 .../apache/iotdb/jdbc/IoTDBConnectionParams.java   |    82 +
 .../apache/iotdb/jdbc/IoTDBDatabaseMetadata.java   |  1259 +
 .../java/org/apache/iotdb/jdbc/IoTDBDriver.java    |    99 +
 .../iotdb/jdbc/IoTDBMetadataResultMetadata.java    |   193 +
 .../apache/iotdb/jdbc/IoTDBMetadataResultSet.java  |   316 +
 .../apache/iotdb/jdbc/IoTDBPrepareStatement.java   |   425 +
 .../org/apache/iotdb/jdbc/IoTDBQueryResultSet.java |  1248 +
 .../org/apache/iotdb/jdbc/IoTDBResultMetadata.java |   214 +
 .../org/apache/iotdb/jdbc/IoTDBSQLException.java   |    32 +
 .../java/org/apache/iotdb/jdbc/IoTDBStatement.java |   590 +
 .../org/apache/iotdb/jdbc/IoTDBURLException.java   |    30 +
 .../src/main/java/org/apache/iotdb/jdbc/Utils.java |   154 +
 .../test/java/org/apache/iotdb/jdbc/BatchTest.java |   150 +
 .../org/apache/iotdb/jdbc/IoTDBConnectionTest.java |    94 +
 .../iotdb/jdbc/IoTDBDatabaseMetadataTest.java      |   311 +
 .../jdbc/IoTDBMetadataResultMetadataTest.java      |   108 +
 .../iotdb/jdbc/IoTDBPrepareStatementTest.java      |   315 +
 .../apache/iotdb/jdbc/IoTDBQueryResultSetTest.java |   286 +
 .../apache/iotdb/jdbc/IoTDBResultMetadataTest.java |   171 +
 .../org/apache/iotdb/jdbc/IoTDBStatementTest.java  |   218 +
 .../test/java/org/apache/iotdb/jdbc/UtilsTest.java |   190 +
 .../org/apache/iotdb/jdbc/demo/MetadataDemo.java   |    36 +
 .../iotdb/jdbc/demo/PrepareStatementDemo.java      |    84 +
 .../org/apache/iotdb/jdbc/demo/StatementDemo.java  |    68 +
 jenkins.pom                                        |    67 +
 licenses/CDDL License 1.1                          |    90 +
 licenses/EDL License 1.0                           |    11 +
 licenses/Eclipse Public License 1.0 (EPL-1.0)      |    83 +
 .../Eclipse Public License version 2.0 (EPL-2.0)   |    81 +
 licenses/The 2-Clause BSD License                  |    14 +
 licenses/The 3-Clause BSD License                  |    16 +
 licenses/The MIT License                           |    10 +
 mvnw.cmd                                           |   159 +
 mvnw.sh                                            |   270 +
 pom.xml                                            |   580 +
 service-rpc/pom.xml                                |   156 +
 service-rpc/src/main/thrift/rpc.thrift             |   281 +
 service-rpc/src/main/thrift/sync.thrift            |    38 +
 spark/README.md                                    |   331 +
 spark/pom.xml                                      |    95 +
 .../java/org/apache/iotdb/tsfile/io/HDFSInput.java |   147 +
 .../org/apache/iotdb/tsfile/io/HDFSOutput.java     |    93 +
 .../apache/iotdb/tsfile/io/TsFileOutputFormat.java |    45 +
 .../apache/iotdb/tsfile/io/TsFileRecordWriter.java |    57 +
 .../scala/org/apache/iotdb/tsfile/Converter.scala  |   594 +
 .../org/apache/iotdb/tsfile/DefaultSource.scala    |   187 +
 .../apache/iotdb/tsfile/TsFileOutputWriter.scala   |    52 +
 .../apache/iotdb/tsfile/TsFileWriterFactory.scala  |    35 +
 .../scala/org/apache/iotdb/tsfile/package.scala    |    40 +
 .../scala/org/apache/iotdb/tool/TsFileExample.java |   106 +
 .../scala/org/apache/iotdb/tool/TsFileWrite.java   |   215 +
 .../org/apache/iotdb/tsfile/ConverterTest.scala    |   266 +
 .../org/apache/iotdb/tsfile/HDFSInputTest.java     |    79 +
 .../scala/org/apache/iotdb/tsfile/TSFileSuit.scala |   217 +
 tsfile/README.md                                   |    63 +
 tsfile/example/readme.md                           |    64 +
 .../java/org/apache/iotdb/tsfile/TsFileRead.java   |   117 +
 .../apache/iotdb/tsfile/TsFileSequenceRead.java    |   123 +
 .../java/org/apache/iotdb/tsfile/TsFileWrite.java  |   127 +
 tsfile/package.sh                                  |    43 +
 tsfile/pom.xml                                     |   139 +
 .../apache/iotdb/tsfile/common/cache/Cache.java    |    29 +
 .../apache/iotdb/tsfile/common/cache/LRUCache.java |    85 +
 .../iotdb/tsfile/common/conf/TSFileConfig.java     |   149 +
 .../iotdb/tsfile/common/conf/TSFileDescriptor.java |   159 +
 .../tsfile/common/constant/JsonFormatConstant.java |    45 +
 .../tsfile/common/constant/QueryConstant.java      |    38 +
 .../tsfile/common/constant/StatisticConstant.java  |    41 +
 .../tsfile/common/constant/SystemConstant.java     |    30 +
 .../apache/iotdb/tsfile/compress/ICompressor.java  |   143 +
 .../iotdb/tsfile/compress/IUnCompressor.java       |   191 +
 .../tsfile/encoding/bitpacking/IntPacker.java      |   172 +
 .../tsfile/encoding/bitpacking/LongPacker.java     |   185 +
 .../tsfile/encoding/common/EncodingConfig.java     |    44 +
 .../iotdb/tsfile/encoding/common/EndianType.java   |    28 +
 .../tsfile/encoding/decoder/BitmapDecoder.java     |   237 +
 .../iotdb/tsfile/encoding/decoder/Decoder.java     |   137 +
 .../encoding/decoder/DeltaBinaryDecoder.java       |   257 +
 .../encoding/decoder/DoublePrecisionDecoder.java   |   114 +
 .../tsfile/encoding/decoder/FloatDecoder.java      |   149 +
 .../tsfile/encoding/decoder/GorillaDecoder.java    |   130 +
 .../tsfile/encoding/decoder/IntRleDecoder.java     |   130 +
 .../tsfile/encoding/decoder/LongRleDecoder.java    |   126 +
 .../tsfile/encoding/decoder/PlainDecoder.java      |   134 +
 .../iotdb/tsfile/encoding/decoder/RleDecoder.java  |   251 +
 .../encoding/decoder/SinglePrecisionDecoder.java   |   111 +
 .../tsfile/encoding/encoder/BitmapEncoder.java     |   137 +
 .../encoding/encoder/DeltaBinaryEncoder.java       |   339 +
 .../encoding/encoder/DoublePrecisionEncoder.java   |   113 +
 .../iotdb/tsfile/encoding/encoder/Encoder.java     |   112 +
 .../tsfile/encoding/encoder/FloatEncoder.java      |   147 +
 .../tsfile/encoding/encoder/GorillaEncoder.java    |    90 +
 .../tsfile/encoding/encoder/IntRleEncoder.java     |   135 +
 .../tsfile/encoding/encoder/LongRleEncoder.java    |   128 +
 .../tsfile/encoding/encoder/PlainEncoder.java      |   179 +
 .../iotdb/tsfile/encoding/encoder/RleEncoder.java  |   352 +
 .../encoding/encoder/SinglePrecisionEncoder.java   |   110 +
 .../tsfile/encoding/encoder/TSEncodingBuilder.java |   243 +
 .../tsfile/exception/NotImplementedException.java  |    40 +
 .../tsfile/exception/TsFileRuntimeException.java   |    46 +
 .../tsfile/exception/cache/CacheException.java     |    44 +
 .../CompressionTypeNotSupportedException.java      |    43 +
 .../encoding/TsFileDecodingException.java          |    49 +
 .../encoding/TsFileEncodingException.java          |    49 +
 .../exception/filter/FilterDataTypeException.java  |    43 +
 .../exception/filter/FilterInvokeException.java    |    42 +
 .../filter/QueryFilterOptimizationException.java   |    34 +
 .../filter/UnSupportFilterDataTypeException.java   |    37 +
 .../metadata/MetadataArgsErrorException.java       |    35 +
 .../write/InvalidJsonSchemaException.java          |    34 +
 .../exception/write/NoMeasurementException.java    |    31 +
 .../tsfile/exception/write/PageException.java      |    31 +
 .../write/UnSupportedDataTypeException.java        |    28 +
 .../write/UnknownColumnTypeException.java          |    36 +
 .../exception/write/WriteProcessException.java     |    40 +
 .../org/apache/iotdb/tsfile/file/MetaMarker.java   |    39 +
 .../iotdb/tsfile/file/footer/ChunkGroupFooter.java |   154 +
 .../iotdb/tsfile/file/header/ChunkHeader.java      |   247 +
 .../iotdb/tsfile/file/header/PageHeader.java       |   209 +
 .../iotdb/tsfile/file/header/package-info.java     |    29 +
 .../tsfile/file/metadata/ChunkGroupMetaData.java   |   250 +
 .../iotdb/tsfile/file/metadata/ChunkMetaData.java  |   267 +
 .../tsfile/file/metadata/TsDeviceMetadata.java     |   220 +
 .../file/metadata/TsDeviceMetadataIndex.java       |   163 +
 .../iotdb/tsfile/file/metadata/TsDigest.java       |   221 +
 .../iotdb/tsfile/file/metadata/TsFileMetaData.java |   290 +
 .../file/metadata/enums/CompressionType.java       |   139 +
 .../tsfile/file/metadata/enums/TSDataType.java     |    79 +
 .../tsfile/file/metadata/enums/TSEncoding.java     |    81 +
 .../tsfile/file/metadata/enums/TSFreqType.java     |    61 +
 .../file/metadata/statistics/BinaryStatistics.java |   218 +
 .../metadata/statistics/BooleanStatistics.java     |   203 +
 .../file/metadata/statistics/DoubleStatistics.java |   204 +
 .../file/metadata/statistics/FloatStatistics.java  |   195 +
 .../metadata/statistics/IntegerStatistics.java     |   195 +
 .../file/metadata/statistics/LongStatistics.java   |   205 +
 .../file/metadata/statistics/NoStatistics.java     |   163 +
 .../file/metadata/statistics/Statistics.java       |   268 +
 .../statistics/StatisticsClassException.java       |    30 +
 .../org/apache/iotdb/tsfile/read/IDataReader.java  |    28 +
 .../apache/iotdb/tsfile/read/ReadOnlyTsFile.java   |    66 +
 .../iotdb/tsfile/read/TsFileCheckStatus.java       |    28 +
 .../iotdb/tsfile/read/TsFileRestorableReader.java  |    70 +
 .../iotdb/tsfile/read/TsFileSequenceReader.java    |   608 +
 .../iotdb/tsfile/read/UnClosedTsFileReader.java    |    58 +
 .../apache/iotdb/tsfile/read/common/BatchData.java |   617 +
 .../org/apache/iotdb/tsfile/read/common/Chunk.java |    53 +
 .../org/apache/iotdb/tsfile/read/common/Field.java |   157 +
 .../org/apache/iotdb/tsfile/read/common/Path.java  |   225 +
 .../apache/iotdb/tsfile/read/common/RowRecord.java |    60 +
 .../apache/iotdb/tsfile/read/common/TimeRange.java |   289 +
 .../iotdb/tsfile/read/controller/ChunkLoader.java  |    37 +
 .../tsfile/read/controller/ChunkLoaderImpl.java    |    72 +
 .../tsfile/read/controller/MetadataQuerier.java    |    90 +
 .../read/controller/MetadataQuerierByFileImpl.java |   318 +
 .../tsfile/read/expression/ExpressionType.java     |    50 +
 .../tsfile/read/expression/IBinaryExpression.java  |    34 +
 .../iotdb/tsfile/read/expression/IExpression.java  |    26 +
 .../tsfile/read/expression/IUnaryExpression.java   |    28 +
 .../tsfile/read/expression/QueryExpression.java    |    84 +
 .../read/expression/impl/BinaryExpression.java     |   132 +
 .../read/expression/impl/GlobalTimeExpression.java |    60 +
 .../expression/impl/SingleSeriesExpression.java    |    67 +
 .../read/expression/util/ExpressionOptimizer.java  |   189 +
 .../read/expression/util/ExpressionPrinter.java    |    60 +
 .../iotdb/tsfile/read/filter/DigestForFilter.java  |   140 +
 .../iotdb/tsfile/read/filter/TimeFilter.java       |   110 +
 .../iotdb/tsfile/read/filter/ValueFilter.java      |   114 +
 .../tsfile/read/filter/basic/BinaryFilter.java     |    53 +
 .../iotdb/tsfile/read/filter/basic/Filter.java     |    66 +
 .../tsfile/read/filter/basic/UnaryFilter.java      |    55 +
 .../tsfile/read/filter/factory/FilterFactory.java  |    40 +
 .../tsfile/read/filter/factory/FilterType.java     |    35 +
 .../tsfile/read/filter/operator/AndFilter.java     |    67 +
 .../iotdb/tsfile/read/filter/operator/Eq.java      |    92 +
 .../iotdb/tsfile/read/filter/operator/Gt.java      |    90 +
 .../iotdb/tsfile/read/filter/operator/GtEq.java    |    90 +
 .../iotdb/tsfile/read/filter/operator/Lt.java      |    90 +
 .../iotdb/tsfile/read/filter/operator/LtEq.java    |    90 +
 .../iotdb/tsfile/read/filter/operator/NotEq.java   |    91 +
 .../tsfile/read/filter/operator/NotFilter.java     |    75 +
 .../tsfile/read/filter/operator/OrFilter.java      |    69 +
 .../query/dataset/DataSetWithTimeGenerator.java    |    87 +
 .../query/dataset/DataSetWithoutTimeGenerator.java |   187 +
 .../tsfile/read/query/dataset/QueryDataSet.java    |    98 +
 .../query/executor/ExecutorWithTimeGenerator.java  |   120 +
 .../tsfile/read/query/executor/QueryExecutor.java  |    28 +
 .../tsfile/read/query/executor/TsFileExecutor.java |   193 +
 .../read/query/timegenerator/TimeGenerator.java    |    38 +
 .../query/timegenerator/TimeGeneratorImpl.java     |   130 +
 .../read/query/timegenerator/node/AndNode.java     |    90 +
 .../read/query/timegenerator/node/LeafNode.java    |    91 +
 .../tsfile/read/query/timegenerator/node/Node.java |    30 +
 .../read/query/timegenerator/node/NodeType.java    |    28 +
 .../read/query/timegenerator/node/OrNode.java      |   100 +
 .../tsfile/read/reader/DefaultTsFileInput.java     |    93 +
 .../iotdb/tsfile/read/reader/TsFileInput.java      |   139 +
 .../tsfile/read/reader/chunk/ChunkReader.java      |   164 +
 .../read/reader/chunk/ChunkReaderByTimestamp.java  |    43 +
 .../read/reader/chunk/ChunkReaderWithFilter.java   |    48 +
 .../reader/chunk/ChunkReaderWithoutFilter.java     |    35 +
 .../iotdb/tsfile/read/reader/page/PageReader.java  |   251 +
 .../read/reader/series/EmptyFileSeriesReader.java  |    64 +
 .../read/reader/series/FileSeriesReader.java       |   110 +
 .../reader/series/FileSeriesReaderWithFilter.java  |    61 +
 .../series/FileSeriesReaderWithoutFilter.java      |    49 +
 .../reader/series/SeriesReaderByTimestamp.java     |   157 +
 .../java/org/apache/iotdb/tsfile/utils/Binary.java |   127 +
 .../org/apache/iotdb/tsfile/utils/BytesUtils.java  |   858 +
 .../java/org/apache/iotdb/tsfile/utils/Loader.java |    59 +
 .../java/org/apache/iotdb/tsfile/utils/Pair.java   |    84 +
 .../org/apache/iotdb/tsfile/utils/PublicBAOS.java  |    58 +
 .../tsfile/utils/ReadWriteForEncodingUtils.java    |   255 +
 .../iotdb/tsfile/utils/ReadWriteIOUtils.java       |   689 +
 .../apache/iotdb/tsfile/utils/StringContainer.java |   387 +
 .../apache/iotdb/tsfile/write/TsFileWriter.java    |   341 +
 .../iotdb/tsfile/write/chunk/ChunkBuffer.java      |   235 +
 .../tsfile/write/chunk/ChunkGroupWriterImpl.java   |   119 +
 .../iotdb/tsfile/write/chunk/ChunkWriterImpl.java  |   295 +
 .../tsfile/write/chunk/IChunkGroupWriter.java      |    89 +
 .../iotdb/tsfile/write/chunk/IChunkWriter.java     |    91 +
 .../apache/iotdb/tsfile/write/page/PageWriter.java |   175 +
 .../apache/iotdb/tsfile/write/record/TSRecord.java |    89 +
 .../write/record/datapoint/BooleanDataPoint.java   |    67 +
 .../tsfile/write/record/datapoint/DataPoint.java   |   155 +
 .../write/record/datapoint/DoubleDataPoint.java    |    67 +
 .../write/record/datapoint/FloatDataPoint.java     |    66 +
 .../write/record/datapoint/IntDataPoint.java       |    66 +
 .../write/record/datapoint/LongDataPoint.java      |    68 +
 .../write/record/datapoint/StringDataPoint.java    |    69 +
 .../iotdb/tsfile/write/schema/FileSchema.java      |   119 +
 .../iotdb/tsfile/write/schema/JsonConverter.java   |   203 +
 .../tsfile/write/schema/MeasurementSchema.java     |   326 +
 .../iotdb/tsfile/write/schema/SchemaBuilder.java   |    93 +
 .../tsfile/write/writer/DefaultTsFileOutput.java   |    83 +
 .../iotdb/tsfile/write/writer/IDataWriter.java     |    23 +
 .../write/writer/IncompleteFileTestUtil.java       |    61 +
 .../write/writer/NativeRestorableIOWriter.java     |    93 +
 .../iotdb/tsfile/write/writer/TsFileIOWriter.java  |   358 +
 .../iotdb/tsfile/write/writer/TsFileOutput.java    |    84 +
 .../resources/tsfile-format.properties.template    |    55 +
 .../apache/iotdb/tsfile/common/LRUCacheTest.java   |    53 +
 .../apache/iotdb/tsfile/compress/CompressTest.java |    84 +
 .../apache/iotdb/tsfile/compress/SnappyTest.java   |    85 +
 .../tsfile/constant/TimeseriesTestConstant.java    |    34 +
 .../tsfile/encoding/bitpacking/IntPackerTest.java  |    79 +
 .../tsfile/encoding/bitpacking/LongPackerTest.java |   129 +
 .../tsfile/encoding/decoder/BitmapDecoderTest.java |    96 +
 .../tsfile/encoding/decoder/FloatDecoderTest.java  |   220 +
 .../encoding/decoder/GorillaDecoderTest.java       |   257 +
 .../tsfile/encoding/decoder/IntRleDecoderTest.java |   261 +
 .../encoding/decoder/LongRleDecoderTest.java       |   225 +
 .../delta/DeltaBinaryEncoderIntegerTest.java       |   114 +
 .../decoder/delta/DeltaBinaryEncoderLongTest.java  |   196 +
 .../iotdb/tsfile/file/header/PageHeaderTest.java   |   165 +
 .../file/metadata/ChunkGroupMetaDataTest.java      |   155 +
 .../tsfile/file/metadata/ChunkMetaDataTest.java    |   105 +
 .../file/metadata/TimeSeriesMetadataTest.java      |   100 +
 .../file/metadata/TsDeviceMetadataIndexTest.java   |    75 +
 .../tsfile/file/metadata/TsDeviceMetadataTest.java |    64 +
 .../tsfile/file/metadata/TsFileMetaDataTest.java   |   100 +
 .../metadata/statistics/BooleanStatisticsTest.java |    72 +
 .../metadata/statistics/DoubleStatisticsTest.java  |    70 +
 .../metadata/statistics/FloatStatisticsTest.java   |    71 +
 .../metadata/statistics/IntegerStatisticsTest.java |    67 +
 .../metadata/statistics/LongStatisticsTest.java    |    93 +
 .../metadata/statistics/StringStatisticsTest.java  |    70 +
 .../tsfile/file/metadata/utils/TestHelper.java     |   151 +
 .../iotdb/tsfile/file/metadata/utils/Utils.java    |   246 +
 .../iotdb/tsfile/read/ReadInPartitionTest.java     |   194 +
 .../iotdb/tsfile/read/ReadOnlyTsFileTest.java      |   143 +
 .../org/apache/iotdb/tsfile/read/ReadTest.java     |   380 +
 .../iotdb/tsfile/read/TimePlainEncodeReadTest.java |   366 +
 .../tsfile/read/TsFileRestorableReaderTest.java    |    59 +
 .../tsfile/read/TsFileSequenceReaderTest.java      |   117 +
 .../apache/iotdb/tsfile/read/common/PathTest.java  |    83 +
 .../iotdb/tsfile/read/common/TimeRangeTest.java    |   225 +
 .../tsfile/read/controller/ChunkLoaderTest.java    |    65 +
 .../controller/MetadataQuerierByFileImplTest.java  |   123 +
 .../iotdb/tsfile/read/filter/DigestFilterTest.java |   101 +
 .../read/filter/IExpressionOptimizerTest.java      |   261 +
 .../read/filter/MinTimeMaxTimeFilterTest.java      |   209 +
 .../iotdb/tsfile/read/filter/OperatorTest.java     |   171 +
 .../read/query/executor/QueryExecutorTest.java     |   148 +
 .../tsfile/read/query/timegenerator/NodeTest.java  |   136 +
 .../query/timegenerator/ReaderByTimestampTest.java |   107 +
 .../query/timegenerator/TimeGeneratorTest.java     |    87 +
 .../TsFileGeneratorForSeriesReaderByTimestamp.java |   265 +
 .../iotdb/tsfile/read/reader/PageReaderTest.java   |   229 +
 .../iotdb/tsfile/read/reader/ReaderTest.java       |   129 +
 .../apache/iotdb/tsfile/utils/BytesUtilsTest.java  |   391 +
 .../iotdb/tsfile/utils/CommonTestConstant.java     |    34 +
 .../apache/iotdb/tsfile/utils/FileGenerator.java   |   261 +
 .../org/apache/iotdb/tsfile/utils/FileUtils.java   |    83 +
 .../apache/iotdb/tsfile/utils/FileUtilsTest.java   |    57 +
 .../org/apache/iotdb/tsfile/utils/PairTest.java    |    72 +
 .../tsfile/utils/ReadWriteStreamUtilsTest.java     |   165 +
 .../tsfile/utils/ReadWriteToBytesUtilsTest.java    |    66 +
 .../org/apache/iotdb/tsfile/utils/RecordUtils.java |   113 +
 .../apache/iotdb/tsfile/utils/RecordUtilsTest.java |   213 +
 .../iotdb/tsfile/utils/StringContainerTest.java    |   181 +
 .../iotdb/tsfile/utils/TsFileGeneratorForTest.java |   259 +
 .../org/apache/iotdb/tsfile/write/PerfTest.java    |   244 +
 .../iotdb/tsfile/write/ReadPageInMemTest.java      |   192 +
 .../iotdb/tsfile/write/TsFileIOWriterTest.java     |   106 +
 .../iotdb/tsfile/write/TsFileReadWriteTest.java    |   168 +
 .../org/apache/iotdb/tsfile/write/WriteTest.java   |   257 +
 .../write/schema/converter/JsonConverterTest.java  |   126 +
 .../write/schema/converter/SchemaBuilderTest.java  |    61 +
 .../iotdb/tsfile/write/series/PageWriterTest.java  |    85 +
 .../write/writer/NativeRestorableIOWriterTest.java |   315 +
 tsfile/src/test/resources/logback.xml              |   116 +
 tsfile/src/test/resources/test_schema.json         |    25 +
 tsfile/src/test/resources/test_write_schema.json   |    31 +
 996 files changed, 204666 insertions(+)

diff --git a/.checkstyle b/.checkstyle
new file mode 100644
index 0000000..c3770a3
--- /dev/null
+++ b/.checkstyle
@@ -0,0 +1,10 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<fileset-config file-format-version="1.2.0" simple-config="true" sync-formatter="false">
+  <local-check-config name="google" location="checkstyle.xml" type="project" description="">
+    <additional-data name="protect-config-file" value="false"/>
+  </local-check-config>
+  <fileset name="all" enabled="true" check-config-name="Google Checks" local="false">
+    <file-match-pattern match-pattern="." include-pattern="true"/>
+  </fileset>
+</fileset-config>
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 0000000..6bf71aa
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,31 @@
+---
+name: Bug report
+about: Create a report to help us improve
+
+---
+
+**Describe the bug**
+A clear and concise description of what the bug is.
+
+**To Reproduce**
+Steps to reproduce the behavior:
+1. Go to '...'
+2. Click on '....'
+3. Scroll down to '....'
+4. See error
+
+**Expected behavior**
+A clear and concise description of what you expected to happen.
+
+**Screenshots**
+If applicable, add screenshots to help explain your problem.
+
+**Desktop (please complete the following information):**
+ - OS: [e.g. iOS]
+ - Browser [e.g. chrome, safari]
+ - Version [e.g. 22]
+
+
+
+**Additional context**
+Add any other context about the problem here.
diff --git a/.github/ISSUE_TEMPLATE/custom.md b/.github/ISSUE_TEMPLATE/custom.md
new file mode 100644
index 0000000..99bb9a0
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/custom.md
@@ -0,0 +1,7 @@
+---
+name: Custom issue template
+about: Describe this issue template's purpose here.
+
+---
+
+
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 0000000..066b2d9
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,17 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+
+---
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context or screenshots about the feature request here.
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..2f2f2e1
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,96 @@
+iotdb/data/*
+cluster/data/*
+iotdb/derby.log
+iotdb/iotdb/data/*
+iotdb/iotdb/derby.log
+iotdb/*.pid
+iotdb/iotdb/data/*
+iotdb/iotdb/gc.log*
+iotdb/iotdb/logs/*
+iotdb/iotdb/lib/*
+iotdb/overflow/
+iotdb/testoverflowfile
+
+tsfile/src/test/resources/perTestInputData
+# Eclipse IDE files
+**/.classpath
+**/.project
+**/.settings/
+# src/main/resources/
+# intellij IDE files
+**/*.iml
+**/.idea/
+**/*.log
+**/*.ipr
+**/*.iws
+# Apple OS X related
+**/.DS_Store
+derby-tsfile-db
+# intellj IDE files
+**/*.iml
+**/.idea/
+
+# Apple OS X related
+**/.DS_Store
+
+# build generated
+**/target/
+**/build/
+
+# intermediately generated locally
+**/logs/
+
+tsfile-timeseries/src/main/resources/logback.out.out.xml
+tsfile-timeseries/src/main/resources/logback.out.xml
+tsfile-service/derby-tsfile-db/
+tsfile-timeseries/src/test/resources/data
+src/main/resources/metadata/mlog.txt
+tsfile-jdbc/src/main/resources/output/queryRes.csv
+
+*.jar
+*.gz
+*.tar.gz
+*.tar
+/data/
+#src/test/resources/logback.xml
+
+
+### Maven ###
+grafana/target/
+!grafana/.mvn/wrapper/maven-wrapper.jar
+grafana/.mvn/
+
+grafana/logs/
+*.log
+
+### STS ###
+.apt_generated
+.classpath
+.factorypath
+.project
+.settings
+.springBeans
+
+
+### NetBeans ###
+**/nbproject/private/
+**/build/
+**/nbbuild/
+**/dist/
+**/nbdist/
+**/.nb-gradle/
+grafana/data/
+
+
+**/.DS_Store
+
+
+grafana/data/test.csv
+**/lib/
+*.jar
+/target/
+*.tsfile
+tsfile/src/test/resources/*.ts
+
+### Apache release ###
+local-snapshots-dir/
diff --git a/.mvn/wrapper/MavenWrapperDownloader.java b/.mvn/wrapper/MavenWrapperDownloader.java
new file mode 100644
index 0000000..a22fe2e
--- /dev/null
+++ b/.mvn/wrapper/MavenWrapperDownloader.java
@@ -0,0 +1,107 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+import java.util.Properties;
+
+public class MavenWrapperDownloader {
+
+  /**
+   * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided.
+   */
+  private static final String DEFAULT_DOWNLOAD_URL =
+      "https://repo1.maven.org/maven2/io/takari/maven-wrapper/0.5.3/maven-wrapper-0.5.3.jar";
+
+  /**
+   * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to
+   * use instead of the default one.
+   */
+  private static final String MAVEN_WRAPPER_PROPERTIES_PATH =
+      ".mvn/wrapper/maven-wrapper.properties";
+
+  /**
+   * Path where the maven-wrapper.jar will be saved to.
+   */
+  private static final String MAVEN_WRAPPER_JAR_PATH =
+      ".mvn/wrapper/maven-wrapper.jar";
+
+  /**
+   * Name of the property which should be used to override the default download url for the wrapper.
+   */
+  private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl";
+
+  public static void main(String args[]) {
+    System.out.println("- Downloader started");
+    File baseDirectory = new File(args[0]);
+    System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath());
+
+    // If the maven-wrapper.properties exists, read it and check if it contains a custom
+    // wrapperUrl parameter.
+    File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH);
+    String url = DEFAULT_DOWNLOAD_URL;
+    if (mavenWrapperPropertyFile.exists()) {
+      FileInputStream mavenWrapperPropertyFileInputStream = null;
+      try {
+        mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile);
+        Properties mavenWrapperProperties = new Properties();
+        mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream);
+        url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url);
+      } catch (IOException e) {
+        System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'");
+      } finally {
+        try {
+          if (mavenWrapperPropertyFileInputStream != null) {
+            mavenWrapperPropertyFileInputStream.close();
+          }
+        } catch (IOException e) {
+          // Ignore ...
+        }
+      }
+    }
+    System.out.println("- Downloading from: : " + url);
+
+    File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH);
+    if (!outputFile.getParentFile().exists()) {
+      if (!outputFile.getParentFile().mkdirs()) {
+        System.out.println(
+            "- ERROR creating output direcrory '" + outputFile.getParentFile().getAbsolutePath()
+                + "'");
+      }
+    }
+    System.out.println("- Downloading to: " + outputFile.getAbsolutePath());
+    try {
+      downloadFileFromURL(url, outputFile);
+      System.out.println("Done");
+      System.exit(0);
+    } catch (Throwable e) {
+      System.out.println("- Error downloading");
+      e.printStackTrace();
+      System.exit(1);
+    }
+  }
+
+  private static void downloadFileFromURL(String urlString, File destination) throws Exception {
+    URL website = new URL(urlString);
+    ReadableByteChannel rbc;
+    rbc = Channels.newChannel(website.openStream());
+    FileOutputStream fos = new FileOutputStream(destination);
+    fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
+    fos.close();
+    rbc.close();
+  }
+
+}
diff --git a/.mvn/wrapper/maven-wrapper.properties b/.mvn/wrapper/maven-wrapper.properties
new file mode 100644
index 0000000..fbce673
--- /dev/null
+++ b/.mvn/wrapper/maven-wrapper.properties
@@ -0,0 +1,21 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+distributionUrl=https://repo1.maven.org/maven2/org/apache/maven/apache-maven/3.6.0/apache-maven-3.6.0-bin.zip
+#wrapperUrl=https://repo1.maven.org/maven2/io/takari/maven-wrapper/0.2.1/maven-wrapper-0.2.1.jar
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..6202bf5
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,153 @@
+#!/bin/sh
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# Since we don't have osx test environment, we use travis-ci to test on osx.
+# Free-plan of travis-ci offers limited resources, we only test whether iotdb can be packaged on jdk8 and jdk11.
+
+language: java
+
+#dist: trusty
+#sudo: required
+
+matrix:
+  include:
+    - os: osx
+      osx_image: xcode10.1 # with JDK11.0.1+13 installed
+      name: osx-oraclejdk11
+      script:
+        - java -version
+        - mvn -version
+        - mvn -B apache-rat:check
+        - mvn -B clean package -pl iotdb,grafana,iotdb-cli,example,:kafka-example,:rocketmq-example -am integration-test
+    - os: osx
+      osx_image: xcode9.3  # with JDK1.8.0_112-b16 installed
+      name: osx-oraclejdk8
+    - os: osx
+      osx_image: xcode10.1 # with JDK11.0.1+13 installed
+      name: osx-openjdk11
+      addons:
+        homebrew:
+          taps:
+            #- homebrew/cask-versions
+            - AdoptOpenJDK/openjdk
+          update: true
+          casks: adoptopenjdk-openjdk11
+      script:
+        - java -version
+        - mvn -version
+        - mvn -B apache-rat:check
+        - mvn -B clean package -pl iotdb,grafana,iotdb-cli,example,:kafka-example,:rocketmq-example -am integration-test
+    - os: osx
+      osx_image: xcode9.3  # with JDK1.8.0_112-b16 installed
+      name: osx-openjdk8
+      addons:
+        homebrew:
+          taps:
+           #- homebrew/cask-versions
+           - AdoptOpenJDK/openjdk
+          update: true
+          casks: adoptopenjdk-openjdk8
+    - os: windows
+      language: c
+      name: win-oraclejdk11
+      install:
+        - choco install jdk11 -params 'installdir=c:\\java11'
+        - export PATH=$PATH:"/c/java11/bin"
+        - export JAVA_HOME="/c/java11"
+        - wget -q https://www-eu.apache.org/dist/maven/maven-3/3.6.1/binaries/apache-maven-3.6.1-bin.zip
+        - /C/Progra~1/7-Zip/7z.exe x apache-maven-3.6.1-bin.zip -o/c/mvn361
+        - export "MAVEN_HOME=/c/mvn361/apache-maven-3.6.1"
+        - export "M2_HOME=/c/mvn361/apache-maven-3.6.1"
+        - export "PATH=$MAVEN_HOME/bin:$PATH"
+      script:
+        - java -version
+        - mvn -version
+        - mvn -B clean package -pl iotdb,grafana,iotdb-cli,example,:kafka-example,:rocketmq-example -am integration-test
+          
+    - os: windows
+      language: c
+      name: win-oraclejdk8
+      before_install:
+        - choco install jdk8 -params 'installdir=c:\\jdk8'
+        - wget https://www-eu.apache.org/dist/maven/maven-3/3.6.1/binaries/apache-maven-3.6.1-bin.zip
+        - /C/Progra~1/7-Zip/7z.exe x apache-maven-3.6.1-bin.zip -o/c/mvn361
+      before_script:
+        - export "JAVA_HOME=/c/jdk8"
+        - export "PATH=/c/jdk8/bin:$PATH"
+        - export "PATH=/c/jdk8/jre/bin:$PATH"
+        - export "MAVEN_HOME=/c/mvn361/apache-maven-3.6.1"
+        - export "M2_HOME=/c/mvn361/apache-maven-3.6.1"
+        - export "PATH=/c/mvn361/apache-maven-3.6.1/bin:$PATH"
+      script:
+        - java -version
+        - mvn -version
+        - mvn -B clean integration-test
+
+    - os: linux
+      name: linux-openjdk11
+      dist: trusty
+      sudo: required
+      before_install:
+        - wget https://download.java.net/java/GA/jdk11/9/GPL/openjdk-11.0.2_linux-x64_bin.tar.gz -O jdk11.tar.gz
+        - tar -xzf jdk11.tar.gz
+      before_script:
+        - export JAVA_HOME=$PWD/jdk-11.0.2/
+        - export PATH=$JAVA_HOME/bin:$PATH
+      script:
+        - java -version
+        - mvn -B clean package -pl iotdb,grafana,iotdb-cli,example,:kafka-example,:rocketmq-example -am integration-test
+    - os: linux
+      name: linux-openjdk8
+      dist: trusty
+      jdk: openjdk8
+    - os: linux
+      name: linux-oraclejdk8
+      dist: trusty
+      jdk: oraclejdk8
+    - os: linux
+      name: linux-oraclejdk11
+      dist: trusty
+      jdk: oraclejdk11
+      script:
+        - java -version
+        - mvn -version
+        - mvn -B apache-rat:check
+        - mvn -B clean package -pl iotdb,grafana,iotdb-cli,example,:kafka-example,:rocketmq-example -am integration-test
+
+cache:
+  directories:
+    - '$HOME/.m2/repository'
+    - '$HOME/.sonar/cache'
+
+
+
+# skip `before_install` stage
+before_install: true
+
+# skip `install` stage
+install: true
+
+script:
+  - java -version
+  - mvn -B apache-rat:check
+  - mvn -B clean integration-test
+
+after_success:
+
diff --git a/Jenkinsfile b/Jenkinsfile
new file mode 100644
index 0000000..1571ad8
--- /dev/null
+++ b/Jenkinsfile
@@ -0,0 +1,182 @@
+#!groovy
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+pipeline {
+
+    agent {
+        node {
+            label 'ubuntu'
+        }
+    }
+
+    environment {
+        // Testfails will be handled by the jenkins junit steps and mark the build as unstable.
+        MVN_TEST_FAIL_IGNORE = '-Dmaven.test.failure.ignore=true'
+    }
+
+    tools {
+        maven 'Maven 3 (latest)'
+        jdk 'JDK 1.8 (latest)'
+    }
+
+    options {
+        timeout(time: 1, unit: 'HOURS')
+        // When we have test-fails e.g. we don't need to run the remaining steps
+        skipStagesAfterUnstable()
+    }
+
+    stages {
+        stage('Initialization') {
+            steps {
+                echo 'Building Branch: ' + env.BRANCH_NAME
+                echo 'Using PATH = ' + env.PATH
+            }
+        }
+
+        stage('Checkout') {
+            steps {
+                echo 'Checking out branch ' + env.BRANCH_NAME
+                checkout scm
+            }
+        }
+
+        stage('Build (not master)') {
+            when {
+                expression {
+                    env.BRANCH_NAME != 'master'
+                }
+            }
+            steps {
+                echo 'Building'
+                sh 'mvn ${MVN_TEST_FAIL_IGNORE} ${MVN_LOCAL_REPO_OPT} clean install'
+            }
+            post {
+                always {
+                    junit(testResults: '**/surefire-reports/*.xml', allowEmptyResults: true)
+                    junit(testResults: '**/failsafe-reports/*.xml', allowEmptyResults: true)
+                }
+            }
+        }
+
+        stage('Build') {
+            when {
+                branch 'master'
+            }
+            steps {
+                echo 'Building'
+                // We'll deploy to a relative directory so we can
+                // deploy new versions only if the entrie build succeeds
+                sh 'mvn ${MVN_TEST_FAIL_IGNORE} -DaltDeploymentRepository=snapshot-repo::default::file:./local-snapshots-dir clean deploy'
+            }
+            post {
+                always {
+                    junit(testResults: '**/surefire-reports/*.xml', allowEmptyResults: true)
+                    junit(testResults: '**/failsafe-reports/*.xml', allowEmptyResults: true)
+                }
+            }
+        }
+
+        stage('Code Quality') {
+            when {
+                branch 'master'
+            }
+            steps {
+                echo 'Checking Code Quality'
+                withSonarQubeEnv('ASF Sonar Analysis') {
+                    sh 'mvn sonar:sonar'
+                }
+            }
+        }
+
+        stage('Deploy') {
+            when {
+                branch 'master'
+            }
+            steps {
+                echo 'Deploying'
+                // Deploy the artifacts using the wagon-maven-plugin.
+                sh 'mvn -f jenkins.pom -X -P deploy-snapshots wagon:upload'
+            }
+        }
+
+        stage('Cleanup') {
+            steps {
+                echo 'Cleaning up the workspace'
+                deleteDir()
+            }
+        }
+    }
+
+    // Send out notifications on unsuccessful builds.
+    post {
+        // If this build failed, send an email to the list.
+        failure {
+            script {
+                if(env.BRANCH_NAME == "master") {
+                    emailext(
+                        subject: "[BUILD-FAILURE]: Job '${env.JOB_NAME} [${env.BRANCH_NAME}] [${env.BUILD_NUMBER}]'",
+                        body: """
+BUILD-FAILURE: Job '${env.JOB_NAME} [${env.BRANCH_NAME}] [${env.BUILD_NUMBER}]':
+
+Check console output at "<a href="${env.BUILD_URL}">${env.JOB_NAME} [${env.BRANCH_NAME}] [${env.BUILD_NUMBER}]</a>"
+""",
+                        to: "dev@iotdb.apache.org"
+                    )
+                }
+            }
+        }
+
+        // If this build didn't fail, but there were failing tests, send an email to the list.
+        unstable {
+            script {
+                if(env.BRANCH_NAME == "master") {
+                    emailext(
+                        subject: "[BUILD-UNSTABLE]: Job '${env.JOB_NAME} [${env.BRANCH_NAME}] [${env.BUILD_NUMBER}]'",
+                        body: """
+BUILD-UNSTABLE: Job '${env.JOB_NAME} [${env.BRANCH_NAME}] [${env.BUILD_NUMBER}]':
+
+Check console output at "<a href="${env.BUILD_URL}">${env.JOB_NAME} [${env.BRANCH_NAME}] [${env.BUILD_NUMBER}]</a>"
+""",
+                        to: "dev@iotdb.apache.org"
+                    )
+                }
+            }
+        }
+
+        // Send an email, if the last build was not successful and this one is.
+        success {
+            script {
+                if ((env.BRANCH_NAME == "master") && (currentBuild.previousBuild != null) && (currentBuild.previousBuild.result != 'SUCCESS')) {
+                    emailext (
+                        subject: "[BUILD-STABLE]: Job '${env.JOB_NAME} [${env.BRANCH_NAME}] [${env.BUILD_NUMBER}]'",
+                        body: """
+BUILD-STABLE: Job '${env.JOB_NAME} [${env.BRANCH_NAME}] [${env.BUILD_NUMBER}]':
+
+Is back to normal.
+""",
+                        to: "dev@iotdb.apache.org"
+                    )
+                }
+            }
+        }
+    }
+
+}
\ No newline at end of file
diff --git a/License b/License
new file mode 100644
index 0000000..58f5c09
--- /dev/null
+++ b/License
@@ -0,0 +1,283 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+
+================================================================
+APACHE IOTDB SUBCOMPONENTS:
+
+Apache IoTDB project includes a number of submodules with separate copyright notices
+and license terms. Your use of these submodules is subject to the terms and
+conditions of the following licenses.
+
+================================================================
+
+Apache Software Foundation License 2.0
+--------------------------------------
+
+iotdb/src/main/java/org/apache/iotdb/db/sql/parse/AstErrorNode.java
+iotdb/src/main/java/org/apache/iotdb/db/sql/parse/AstNode.java
+iotdb/src/main/java/org/apache/iotdb/db/sql/parse/AstNodeOrigin.java
+iotdb/src/main/java/org/apache/iotdb/db/sql/parse/Node.java
+iotdb/src/main/java/org/apache/iotdb/db/sql/parse/ParseDriver.java
+iotdb/src/main/java/org/apache/iotdb/db/sql/parse/ParseError.java
+iotdb/src/main/java/org/apache/iotdb/db/sql/parse/ParseException.java
+iotdb/src/main/java/org/apache/iotdb/db/sql/parse/ParseUtils.java
+
+================================================================
+
+The binary distribution of this product bundles these dependencies under the
+following license. See licenses/ for text of these licenses.
+
+Apache Software Foundation License 2.0
+--------------------------------------
+commons-cli:commons-cli:1.3.1
+commons-io:commons-io:2.5
+org.apache.commons:commons-collections4:4.0
+org.apache.commons:commons-lang3:3.1
+org.apache.thrift:libthrift:0.9.3
+org.xerial.snappy:snappy-java:1.0.5-M1
+com.alibaba:fastjson:1.2.31
+com.sun.xml.fastinfoset:FastInfoset:1.2.14
+
+
+BSD 2-Clause
+------------
+jline:jline:2.14.5
+org.hamcrest:hamcrest-core:1.3
+org.hamcrest:hamcrest-library:1.3
+
+
+BSD 3-Clause
+------------
+org.antlr:antlr-runtime:3.5.2
+
+
+MIT License
+------------
+org.slf4j:slf4j-api
+org.mockito:mockito-all:1.10.19
+me.tongfei:progressbar:0.7.3
+
+
+EDL 1.0
+------------
+com.sun.istack:istack-commons-runtime:3.0.6
+org.jvnet.staxex:stax-ex:1.8
+
+
+EPL 1.0
+------------
+ch.qos.logback:logback-classic:1.1.11
+ch.qos.logback:logback-core:1.1.11
+
+
+EPL 2.0
+------------
+junit:junit:4.12
+
+
+CDDL 1.1
+------------
+javax.annotation:javax.annotation-api:1.3.2
+javax.activation:javax.activation-api:1.2.0
+javax.xml.bind:jaxb-api:2.4.0-b180725.0427
+org.glassfish.jaxb:jaxb-runtime:2.4.0-b180725.0644
\ No newline at end of file
diff --git a/NOTICE b/NOTICE
new file mode 100644
index 0000000..4fafe7b
--- /dev/null
+++ b/NOTICE
@@ -0,0 +1,545 @@
+Apache IoTDB
+Copyright 2018 and onwards The Apache Software Foundation.
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
+Export Control Notice
+---------------------
+
+This distribution includes cryptographic software. The country in which you currently reside may have
+restrictions on the import, possession, use, and/or re-export to another country, of encryption software.
+BEFORE using any encryption software, please check your country's laws, regulations and policies concerning
+the import, possession, or use, and re-export of encryption software, to see if this is permitted. See
+<http://www.wassenaar.org/> for more information.
+
+The U.S. Government Department of Commerce, Bureau of Industry and Security (BIS), has classified this
+software as Export Commodity Control Number (ECCN) 5D002.C.1, which includes information security software
+using or performing cryptographic functions with asymmetric algorithms. The form and manner of this Apache
+Software Foundation distribution makes it eligible for export under the License Exception ENC Technology
+Software Unrestricted (TSU) exception (see the BIS Export Administration Regulations, Section 740.13) for
+both object code and source code.
+
+The following provides more details on the included cryptographic software:
+
+This software uses Apache Commons Crypto (https://commons.apache.org/proper/commons-crypto/) to
+support authentication, and encryption and decryption of data sent across the network between
+services.
+
+
+IoTDB project uses 4 Chinese Patents:
+* 201711384490X
+* 201810111712.9
+* 201711322631.5
+* 201711319331.1
+
+According to the Apache 2.0 License. The owner of the patents, Tsinghua University, grant the users the right to the use of patent under the requirement of Apache 2.0 License.
+
+============================================================================
+
+This product contains a modified portion of 'Apache Hive', an open source
+Java SE, which can be obtained at:
+
+  * LICENSE:
+    * https://github.com/apache/hive/blob/master/LICENSE (Apache License 2.0)
+  * HOMEPAGE:
+    * https://github.com/apache/hive
+
+============================================================================
+
+The binary distribution of this product bundles binaries of
+jline:jline (https://github.com/jline/jline2), which is available under the BSD 2-Clause License.
+
+---
+
+The binary distribution of this product bundles binaries of
+org.hamcrest:hamcrest-core and org.hamcrest:hamcrest-library:1.3 (http://hamcrest.org/JavaHamcrest/), which is available under the BSD 2-Clause License.
+
+---
+
+The binary distribution of this product bundles binaries of
+org.antlr:antlr-runtime:3.5.2 (https://github.com/antlr/antlr3), which is available under the BSD 3-Clause License.
+
+---
+
+The binary distribution of this product bundles binaries of
+com.sun.istack:istack-commons-runtime:3.0.6 (https://mvnrepository.com/artifact/com.sun.istack/istack-commons-runtime), which is available under the EDL 1.0 License.
+
+---
+
+The binary distribution of this product bundles binaries of
+ch.qos.logback:logback-classic:1.1.11 and ch.qos.logback:logback-core:1.1.11 (https://logback.qos.ch/), which is available under the EPL 1.0 License.
+It has the following notices:
+
+Logback: the reliable, generic, fast and flexible logging framework.
+Copyright (C) 1999-2017, QOS.ch. All rights reserved.
+
+This program and the accompanying materials are dual-licensed under
+either the terms of the Eclipse Public License v1.0 as published by
+the Eclipse Foundation
+
+  or (per the licensee's choosing)
+
+under the terms of the GNU Lesser General Public License version 2.1
+as published by the Free Software Foundation.
+
+The EPL/LGPL dual-license serves several purposes. The LGPL license ensures continuity in terms of licensing of the logback project. Prior to version 0.9.18, logback was licensed (exclusively) under the LGPL v2.1. Moreover, since the EPL is deemed incompatible by the Free Software Foundation, the LGPL will allow various licensees, in particular software distributors who may be already bound by the terms of the GPL or the LGPL, to distribute our software.
+
+On the other hand, the EPL license will placate organizations which refuse certain restrictions imposed by the LGPL.
+
+Please note that logback-classic is intended to be used behind the SLF4J API, which is licensed under the MIT license.
+
+If you wish to make a significant contribution to the logback project, you are invited to file a Contributor License Agreement. The purpose of this agreement is to formalize the terms of your contribution and to protect the project in case of litigation.
+
+Upon request, we may exempt open-source projects from LGPL and EPL's reciprocity clauses so that the said projects can develop logback extensions under the license of their choice. Exemptions are granted on a case by case basis.
+
+---
+
+The binary distribution of this product bundles binaries of
+junit:junit:4.12 (https://github.com/junit-team/junit4), which is available under the EPL 2.0 License.
+It has the following notices:
+
+===================================================================================
+   ==  Notices and attributions required by libraries that the project depends on   ==
+   ===================================================================================
+
+ The JUnit depends on Java Hamcrest (http://hamcrest.org/JavaHamcrest/).
+
+---
+
+The binary distribution of this product bundles binaries of
+javax.annotation:javax.annotation-api:1.3.2 (https://github.com/javaee/javax.annotation), which is available under the CDDL 1.1 License.
+It has the following notices:
+
+NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION
+LICENSE (CDDL)
+
+The code released under the CDDL shall be governed by the laws of the
+State of California (excluding conflict-of-law provisions). Any
+litigation relating to this License shall be subject to the jurisdiction
+of the Federal Courts of the Northern District of California and the
+state courts of the State of California, with venue lying in Santa Clara
+County, California.
+
+
+
+  The GNU General Public License (GPL) Version 2, June 1991
+
+Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+51 Franklin Street, Fifth Floor
+Boston, MA 02110-1335
+USA
+
+Everyone is permitted to copy and distribute verbatim copies
+of this license document, but changing it is not allowed.
+
+Preamble
+
+The licenses for most software are designed to take away your freedom to
+share and change it. By contrast, the GNU General Public License is
+intended to guarantee your freedom to share and change free software--to
+make sure the software is free for all its users. This General Public
+License applies to most of the Free Software Foundation's software and
+to any other program whose authors commit to using it. (Some other Free
+Software Foundation software is covered by the GNU Library General
+Public License instead.) You can apply it to your programs, too.
+
+When we speak of free software, we are referring to freedom, not price.
+Our General Public Licenses are designed to make sure that you have the
+freedom to distribute copies of free software (and charge for this
+service if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs; and that you know you can do these things.
+
+To protect your rights, we need to make restrictions that forbid anyone
+to deny you these rights or to ask you to surrender the rights. These
+restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+For example, if you distribute copies of such a program, whether gratis
+or for a fee, you must give the recipients all the rights that you have.
+You must make sure that they, too, receive or can get the source code.
+And you must show them these terms so they know their rights.
+
+We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+Finally, any free program is threatened constantly by software patents.
+We wish to avoid the danger that redistributors of a free program will
+individually obtain patent licenses, in effect making the program
+proprietary. To prevent this, we have made it clear that any patent must
+be licensed for everyone's free use or not licensed at all.
+
+The precise terms and conditions for copying, distribution and
+modification follow.
+
+TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+0. This License applies to any program or other work which contains a
+notice placed by the copyright holder saying it may be distributed under
+the terms of this General Public License. The "Program", below, refers
+to any such program or work, and a "work based on the Program" means
+either the Program or any derivative work under copyright law: that is
+to say, a work containing the Program or a portion of it, either
+verbatim or with modifications and/or translated into another language.
+(Hereinafter, translation is included without limitation in the term
+"modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of running
+the Program is not restricted, and the output from the Program is
+covered only if its contents constitute a work based on the Program
+(independent of having been made by running the Program). Whether that
+is true depends on what the Program does.
+
+1. You may copy and distribute verbatim copies of the Program's source
+code as you receive it, in any medium, provided that you conspicuously
+and appropriately publish on each copy an appropriate copyright notice
+and disclaimer of warranty; keep intact all the notices that refer to
+this License and to the absence of any warranty; and give any other
+recipients of the Program a copy of this License along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+2. You may modify your copy or copies of the Program or any portion of
+it, thus forming a work based on the Program, and copy and distribute
+such modifications or work under the terms of Section 1 above, provided
+that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any part
+    thereof, to be licensed as a whole at no charge to all third parties
+    under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a notice
+    that there is no warranty (or else, saying that you provide a
+    warranty) and that users may redistribute the program under these
+    conditions, and telling the user how to view a copy of this License.
+    (Exception: if the Program itself is interactive but does not
+    normally print such an announcement, your work based on the Program
+    is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program, and
+can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based on
+the Program, the distribution of the whole must be on the terms of this
+License, whose permissions for other licensees extend to the entire
+whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of a
+storage or distribution medium does not bring the other work under the
+scope of this License.
+
+3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections 1
+    and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your cost
+    of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer to
+    distribute corresponding source code. (This alternative is allowed
+    only for noncommercial distribution and only if you received the
+    program in object code or executable form with such an offer, in
+    accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source code
+means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to control
+compilation and installation of the executable. However, as a special
+exception, the source code distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies the
+executable.
+
+If distribution of executable or object code is made by offering access
+to copy from a designated place, then offering equivalent access to copy
+the source code from the same place counts as distribution of the source
+code, even though third parties are not compelled to copy the source
+along with the object code.
+
+4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt otherwise
+to copy, modify, sublicense or distribute the Program is void, and will
+automatically terminate your rights under this License. However, parties
+who have received copies, or rights, from you under this License will
+not have their licenses terminated so long as such parties remain in
+full compliance.
+
+5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and all
+its terms and conditions for copying, distributing or modifying the
+Program or works based on it.
+
+6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further restrictions
+on the recipients' exercise of the rights granted herein. You are not
+responsible for enforcing compliance by third parties to this License.
+
+7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot distribute
+so as to satisfy simultaneously your obligations under this License and
+any other pertinent obligations, then as a consequence you may not
+distribute the Program at all. For example, if a patent license would
+not permit royalty-free redistribution of the Program by all those who
+receive copies directly or indirectly through you, then the only way you
+could satisfy both it and this License would be to refrain entirely from
+distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is implemented
+by public license practices. Many people have made generous
+contributions to the wide range of software distributed through that
+system in reliance on consistent application of that system; it is up to
+the author/donor to decide if he or she is willing to distribute
+software through any other system and a licensee cannot impose that choice.
+
+This section is intended to make thoroughly clear what is believed to be
+a consequence of the rest of this License.
+
+8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License may
+add an explicit geographical distribution limitation excluding those
+countries, so that distribution is permitted only in or among countries
+not thus excluded. In such case, this License incorporates the
+limitation as if written in the body of this License.
+
+9. The Free Software Foundation may publish revised and/or new
+versions of the General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Program does not specify a version
+number of this License, you may choose any version ever published by the
+Free Software Foundation.
+
+10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the
+author to ask for permission. For software which is copyrighted by the
+Free Software Foundation, write to the Free Software Foundation; we
+sometimes make exceptions for this. Our decision will be guided by the
+two goals of preserving the free status of all derivatives of our free
+software and of promoting the sharing and reuse of software generally.
+
+NO WARRANTY
+
+11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND,
+EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
+ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH
+YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL
+NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR
+DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL
+DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM
+(INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED
+INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF
+THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR
+OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+
+END OF TERMS AND CONDITIONS
+
+How to Apply These Terms to Your New Programs
+
+If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+To do so, attach the following notices to the program. It is safest to
+attach them to the start of each source file to most effectively convey
+the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+    One line to give the program's name and a brief idea of what it does.
+    Copyright (C) <year> <name of author>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful, but
+    WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+    General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) year name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type
+    `show w'. This is free software, and you are welcome to redistribute
+    it under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the
+appropriate parts of the General Public License. Of course, the commands
+you use may be called something other than `show w' and `show c'; they
+could even be mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+    Yoyodyne, Inc., hereby disclaims all copyright interest in the
+    program `Gnomovision' (which makes passes at compilers) written by
+    James Hacker.
+
+    signature of Ty Coon, 1 April 1989
+    Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications
+with the library. If this is what you want to do, use the GNU Library
+General Public License instead of this License.
+
+#
+
+Certain source files distributed by Oracle America, Inc. and/or its
+affiliates are subject to the following clarification and special
+exception to the GPLv2, based on the GNU Project exception for its
+Classpath libraries, known as the GNU Classpath Exception, but only
+where Oracle has expressly included in the particular source file's
+header the words "Oracle designates this particular file as subject to
+the "Classpath" exception as provided by Oracle in the LICENSE file
+that accompanied this code."
+
+You should also note that Oracle includes multiple, independent
+programs in this software package. Some of those programs are provided
+under licenses deemed incompatible with the GPLv2 by the Free Software
+Foundation and others.  For example, the package includes programs
+licensed under the Apache License, Version 2.0.  Such programs are
+licensed to you under their original licenses.
+
+Oracle facilitates your further distribution of this package by adding
+the Classpath Exception to the necessary parts of its GPLv2 code, which
+permits you to use that code in combination with other independent
+modules not licensed under the GPLv2.  However, note that this would
+not permit you to commingle code under an incompatible license with
+Oracle's GPLv2 licensed code by, for example, cutting and pasting such
+code into a file also containing Oracle's GPLv2 licensed code and then
+distributing the result.  Additionally, if you were to remove the
+Classpath Exception from any of the files to which it applies and
+distribute the result, you would likely be required to license some or
+all of the other code in that distribution under the GPLv2 as well, and
+since the GPLv2 is incompatible with the license terms of some items
+included in the distribution by Oracle, removing the Classpath
+Exception could therefore effectively compromise your ability to
+further distribute the package.
+
+Proceed with caution and we recommend that you obtain the advice of a
+lawyer skilled in open source matters before removing the Classpath
+Exception or making modifications to this package which may
+subsequently be redistributed and/or involve the use of third party
+software.
+
+CLASSPATH EXCEPTION
+Linking this library statically or dynamically with other modules is
+making a combined work based on this library.  Thus, the terms and
+conditions of the GNU General Public License version 2 cover the whole
+combination.
+
+As a special exception, the copyright holders of this library give you
+permission to link this library with independent modules to produce an
+executable, regardless of the license terms of these independent
+modules, and to copy and distribute the resulting executable under
+terms of your choice, provided that you also meet, for each linked
+independent module, the terms and conditions of the license of that
+module.  An independent module is a module which is not derived from or
+based on this library.  If you modify this library, you may extend this
+exception to your version of the library, but you are not obligated to
+do so.  If you do not wish to do so, delete this exception statement
+from your version.
+
+---
+
+The binary distribution of this product bundles binaries of
+javax.activation:javax.activation-api:1.2.0 (https://docs.oracle.com/javase/7/docs/api/javax/activation/package-summary.html), which is available under the CDDL 1.1 License.
+It has the following notices:
+
+Copyright © 1993, 2018, Oracle and/or its affiliates. All rights reserved. Use is subject to license terms. Also see the documentation redistribution policy.
+
+---
+
+The binary distribution of this product bundles binaries of
+javax.xml.bind:jaxb-api:2.4.0-b180725.0427 (https://docs.oracle.com/javase/8/docs/api/javax/xml/bind/JAXB.html), which is available under the CDDL 1.1 License.
+It has the following notices:
+
+Copyright © 1993, 2018, Oracle and/or its affiliates. All rights reserved. Use is subject to license terms. Also see the documentation redistribution policy.
+
+---
+
+The binary distribution of this product bundles binaries of
+org.glassfish.jaxb:jaxb-runtime:2.4.0-b180725.0644 (https://repo1.maven.org/maven2/org/glassfish/jaxb/osgi-test-runtime/2.4.0-b180725.0644/), which is available under the CDDL 1.1 License.
+
+---
+
+The binary distribution of this product bundles binaries of
+org.jvnet.staxex:stax-ex:1.8 (https://repo1.maven.org/maven2/org/glassfish/jaxb/osgi-test-runtime/2.4.0-b180725.0644/), which is available under the EDL 1.0 License.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..a418329
--- /dev/null
+++ b/README.md
@@ -0,0 +1,308 @@
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+
+-->
+
+# IoTDB
+[![Build Status](https://www.travis-ci.org/apache/incubator-iotdb.svg?branch=master)](https://www.travis-ci.org/apache/incubator-iotdb)
+[![codecov](https://codecov.io/gh/thulab/incubator-iotdb/branch/master/graph/badge.svg)](https://codecov.io/gh/thulab/incubator-iotdb)
+[![GitHub release](https://img.shields.io/github/release/apache/incubator-iotdb.svg)](https://github.com/apache/incubator-iotdb/releases)
+[![License](https://img.shields.io/badge/license-Apache%202-4EB1BA.svg)](https://www.apache.org/licenses/LICENSE-2.0.html)
+![](https://github-size-badge.herokuapp.com/apache/incubator-iotdb.svg)
+![](https://img.shields.io/github/downloads/apache/incubator-iotdb/total.svg)
+![](https://img.shields.io/badge/platform-win10%20%7C%20macox%20%7C%20linux-yellow.svg)
+![](https://img.shields.io/badge/java--language-1.8-blue.svg)
+[![IoTDB Website](https://img.shields.io/website-up-down-green-red/https/shields.io.svg?label=iotdb-website)](https://iotdb.apache.org/)
+
+# Overview
+
+IoTDB (Internet of Things Database) is an integrated data management engine designed for time series data, which can provide users specific services for data collection, storage and analysis. Due to its light weight structure, high performance and usable features together with its intense integration with Hadoop and Spark ecology, IoTDB meets the requirements of massive dataset storage, high-speed data input and complex data analysis in the IoT industrial field.
+
+# Main Features
+
+IoTDB's features are as following:
+
+1. Flexible deployment. IoTDB provides users one-click installation tool on the cloud, once-decompressed-used terminal tool and the bridge tool between cloud platform and terminal tool (Data Synchronization Tool).
+2. Low cost on hardware. IoTDB can reach a high compression ratio of disk storage
+3. Efficient directory structure. IoTDB supports efficient organization for complex time series data structure from intelligent networking devices, organization for time series data from devices of the same type, fuzzy searching strategy for massive and complex directory of time series data.
+4. High-throughput read and write. IoTDB supports millions of low-power devices' strong connection data access, high-speed data read and write for intelligent networking devices and mixed devices mentioned above.
+5. Rich query semantics. IoTDB supports time alignment for time series data across devices and sensors, computation in time series field (frequency domain transformation) and rich aggregation function support in time dimension.
+6. Easy to get start. IoTDB supports SQL-Like language, JDBC standard API and import/export tools which is easy to use.
+7. Intense integration with Open Source Ecosystem. IoTDB supports Hadoop, Spark, etc. analysis ecosystems and Grafana visualization tool.
+
+For the latest information about IoTDB, please visit our [IoTDB official website](https://iotdb.apache.org/).
+
+# Prerequisites
+
+IoTDB requires Java (>= 1.8), To use IoTDB, JRE should be installed.
+
+If you want to compile and install IoTDB from source code, JDK and Maven (>= 3.1) are required.
+While Maven is not mandatory to be installed standalone, you can use the provided Maven wrapper, `./mvnw.sh` on Linux/OS X or `.\mvnw.cmd` on Windows, to facilitate development.
+
+If you want to use Hadoop or Spark to analyze IoTDB data file (called as TsFile), you need to compile the hadoop and spark modules.
+
+# Quick Start
+
+This short guide will walk you through the basic process of using IoTDB. For a more-complete guide, please visit our website's [User Guide](https://iotdb.apache.org/#/Documents/latest/sec1).
+
+## Installation from source code
+
+Use git to get IoTDB source code:
+
+```
+> git clone https://github.com/apache/incubator-iotdb.git
+```
+
+Or use the following command if you have configured SSH key on GitHub:
+
+```
+> git clone git@github.com:apache/incubator-iotdb.git
+```
+
+Now suppose your directory is like this:
+
+```
+> pwd
+/workspace/incubator-iotdb
+
+> ls -l
+incubator-iotdb/     <-- root path
+|
++- iotdb/
+|
++- jdbc/
+|
++- tsfile/
+|
+...
+|
++- pom.xml
+```
+
+Let $IOTDB_HOME = /workspace/incubator-iotdb/iotdb/iotdb/
+
+Let $IOTDB_CLI_HOME = /workspace/incubator-iotdb/iotdb-cli/cli
+
+If you are not the first time that building IoTDB, remember deleting the following files:
+
+```
+> rm -rf $IOTDB_HOME/data/
+> rm -rf $IOTDB_HOME/lib/
+```
+
+Then under the root path of incubator-iotdb, you can build IoTDB using Maven:
+
+```
+> pwd
+/workspace/incubator-iotdb
+
+> mvn clean package -pl iotdb -am -Dmaven.test.skip=true
+```
+
+If successful, you will see the the following text in the terminal:
+
+```
+[INFO] ------------------------------------------------------------------------
+[INFO] Reactor Summary:
+[INFO]
+[INFO] IoTDB Root ......................................... SUCCESS [  7.020 s]
+[INFO] TsFile ............................................. SUCCESS [ 10.486 s]
+[INFO] Service-rpc ........................................ SUCCESS [  3.717 s]
+[INFO] IoTDB Jdbc ......................................... SUCCESS [  3.076 s]
+[INFO] IoTDB .............................................. SUCCESS [  8.258 s]
+[INFO] ------------------------------------------------------------------------
+[INFO] BUILD SUCCESS
+[INFO] ------------------------------------------------------------------------
+```
+Otherwise, you may need to check the error statements and fix the problems.
+
+After build, the IoTDB project will be at the folder "iotdb/iotdb". The folder will include the following contents:
+
+```
+iotdb/iotdb/  <-- root path
+|
++- bin/       <-- script files
+|
++- conf/      <-- configuration files
+|
++- lib/       <-- project dependencies
+```
+
+<!-- > NOTE: We also provide already built JARs and project at [http://tsfile.org/download](http://tsfile.org/download) instead of build the jar package yourself. -->
+
+## Configure
+
+Before starting to use IoTDB, you need to config the configuration files first. For your convenience, we have already set the default config in the files.
+
+In total, we provide users three kinds of configurations module: environment config module (iotdb-env.bat, iotdb-env.sh), system config module (tsfile-format.properties, iotdb-engine.properties) and log config module (logback.xml). All of these kinds of configuration files are put in iotdb/config folder.
+
+For more, you are advised to check our documentation [Chapter4: Deployment and Management](https://iotdb.apache.org/#/Documents/latest/sec4) in detail.
+
+## Start
+
+### Start Server
+
+After that we start the server. Running the startup script: 
+
+```
+# Unix/OS X
+> $IOTDB_HOME/bin/start-server.sh
+
+# Windows
+> $IOTDB_HOME\bin\start-server.bat
+```
+
+### Stop Server
+
+The server can be stopped with ctrl-C or the following script:
+
+```
+# Unix/OS X
+> $IOTDB_HOME/bin/stop-server.sh
+
+# Windows
+> $IOTDB_HOME\bin\stop-server.bat
+```
+
+### Start Client
+
+Now let's trying to read and write some data from IoTDB using our Client. To start the client, you need to explicit the server's IP and PORT as well as the USER_NAME and PASSWORD. 
+
+```
+# You can first build cli project
+> pwd
+/workspace/incubator-iotdb
+
+> mvn clean package -pl iotdb-cli -am -Dmaven.test.skip=true
+
+# Unix/OS X
+> $IOTDB_CLI_HOME/bin/start-client.sh -h <IP> -p <PORT> -u <USER_NAME>
+
+# Windows
+> $IOTDB_CLI_HOME\bin\start-client.bat -h <IP> -p <PORT> -u <USER_NAME>
+```
+
+> NOTE: In the system, we set a default user in IoTDB named 'root'. The default password for 'root' is 'root'. You can use this default user if you are making the first try or you didn't create users by yourself.
+
+The command line client is interactive so if everything is ready you should see the welcome logo and statements:
+
+```
+ _____       _________  ______   ______
+|_   _|     |  _   _  ||_   _ `.|_   _ \
+  | |   .--.|_/ | | \_|  | | `. \ | |_) |
+  | | / .'`\ \  | |      | |  | | |  __'.
+ _| |_| \__. | _| |_    _| |_.' /_| |__) |
+|_____|'.__.' |_____|  |______.'|_______/  version x.x.x
+
+
+IoTDB> login successfully
+IoTDB>
+```
+### Have a try
+Now, you can use IoTDB SQL to operate IoTDB, and when you've had enough fun, you can input 'quit' or 'exit' command to leave the client. 
+
+But lets try something slightly more interesting:
+
+``` 
+IoTDB> SET STORAGE GROUP TO root.vehicle
+execute successfully.
+IoTDB> CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=INT32, ENCODING=RLE
+execute successfully.
+```
+Till now, we have already create a table called root.vehicle and add a column called d0.s0 in the table. Let's take a look at what we have done by 'SHOW TIMESERIES' command.
+
+``` 
+IoTDB> SHOW TIMESERIES
+===  Timeseries Tree  ===
+
+root:{
+    vehicle:{
+        d0:{
+            s0:{
+                 DataType: INT32,
+                 Encoding: RLE,
+                 Compressor: UNCOMPRESSED,
+                 args: {},
+                 StorageGroup: root.vehicle
+            }
+        }
+    }
+}
+```
+Insert time series data is the basic operation of IoTDB, you can use 'INSERT' command to finish this:
+
+```
+IoTDB> insert into root.vehicle.d0(timestamp,s0) values(1,101);
+execute successfully.
+```
+The data we've just inserted displays like this:
+
+```
+IoTDB> SELECT d0.s0 FROM root.vehicle
++-----------------------+------------------+
+|                   Time|root.vehicle.d0.s0|
++-----------------------+------------------+
+|1970-01-01T08:00:00.001|               101|
++-----------------------+------------------+
+record number = 1
+execute successfully.
+```
+
+If your session looks similar to what's above, congrats, your IoTDB is operational!
+
+For more on what commands are supported by IoTDB SQL, see our documentation [Chapter 5: IoTDB SQL Documentation](https://iotdb.apache.org/#/Documents/latest/sec5).
+
+
+# Usage of import-csv.sh
+
+### Create metadata
+```
+SET STORAGE GROUP TO root.fit.d1;
+SET STORAGE GROUP TO root.fit.d2;
+SET STORAGE GROUP TO root.fit.p;
+CREATE TIMESERIES root.fit.d1.s1 WITH DATATYPE=INT32,ENCODING=RLE;
+CREATE TIMESERIES root.fit.d1.s2 WITH DATATYPE=TEXT,ENCODING=PLAIN;
+CREATE TIMESERIES root.fit.d2.s1 WITH DATATYPE=INT32,ENCODING=RLE;
+CREATE TIMESERIES root.fit.d2.s3 WITH DATATYPE=INT32,ENCODING=RLE;
+CREATE TIMESERIES root.fit.p.s1 WITH DATATYPE=INT32,ENCODING=RLE;
+```
+
+### Run import shell
+```
+# Unix/OS X
+> $IOTDB_CLI_HOME/bin/import-csv.sh -h <ip> -p <port> -u <username> -pw <password> -f <xxx.csv>
+
+# Windows
+> $IOTDB_CLI_HOME\bin\import-csv.bat -h <ip> -p <port> -u <username> -pw <password> -f <xxx.csv>
+```
+
+### Error data file
+
+`csvInsertError.error`
+
+# Usage of export-csv.sh
+
+### Run export shell
+```
+# Unix/OS X
+> $IOTDB_CLI_HOME/bin/export-csv.sh -h <ip> -p <port> -u <username> -pw <password> -td <xxx.csv> [-tf <time-format>]
+
+# Windows
+> $IOTDB_CLI_HOME\export-csv.bat -h <ip> -p <port> -u <username> -pw <password> -td <xxx.csv> [-tf <time-format>]
+```
diff --git a/RELEASE_NOTES b/RELEASE_NOTES
new file mode 100644
index 0000000..1ec2210
--- /dev/null
+++ b/RELEASE_NOTES
@@ -0,0 +1,39 @@
+==============================================================
+Apache IoTDB (incubating) 0.8.0
+==============================================================
+
+This is the first official release of Apache IoTDB after joining the Incubator.
+
+New Features
+------------
+
+IOTDB-5     Support data deletion
+IOTDB-21    Add ChunkGroup offset information in ChunkGroupMetaData
+IOTDB-25    Add some introduction for JMX MBean Monitor in user guide
+IOTDB-29    Multiple Exceptions when reading empty measurements from TsFileSequenceReader
+IOTDB-36    [TsFile] Enable recover data from a incomplete TsFile and continue to write
+
+Incompatible changes
+--------------------
+
+Miscellaneous changes
+---------------------
+
+Known Issues
+------------
+
+Bug Fixes
+---------
+
+IOTDB-7     OpenFileNumUtilTest failed
+IOTDB-16    invalid link on https://iotdb.apache.org/#/Documents/Quick Start
+IOTDB-17    Need to update chapter Start of https://iotdb.apache.org/#/Documents/Quick Start
+IOTDB-18    IoTDB startup script does not work on openjdk11
+IOTDB-19    Fail to start start-server.sh script on Ubuntu 14.04/Ubuntu 16.04
+IOTDB-22    BUG in TsFileSequenceReader when reading tsfile
+IOTDB-24    DELETION error after restart a server
+IOTDB-26    Return error when quit client
+IOTDB-27    Delete error message
+IOTDB-30    flush timeseries cause select to returns "Msg:null"
+IOTDB-31    Cannot set float number precision
+
diff --git a/asf.header b/asf.header
new file mode 100644
index 0000000..7f7950e
--- /dev/null
+++ b/asf.header
@@ -0,0 +1,16 @@
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
diff --git a/checkstyle.xml b/checkstyle.xml
new file mode 100644
index 0000000..07ad0ba
--- /dev/null
+++ b/checkstyle.xml
@@ -0,0 +1,235 @@
+<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE module PUBLIC "-//Checkstyle//DTD Checkstyle Configuration 1.3//EN" "https://checkstyle.org/dtds/configuration_1_3.dtd">
+
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+
+-->
+<!--
+    Checkstyle configuration that checks the Google coding conventions from Google Java Style
+    that can be found at https://google.github.io/styleguide/javaguide.html.
+    Checkstyle is very configurable. Be sure to read the documentation at
+    http://checkstyle.sf.net (or in your downloaded distribution).
+    To completely disable a check, just comment it out or delete it from the file.
+    Authors: Max Vetrenko, Ruslan Diachenko, Roman Ivanov.
+ -->
+<module name="Checker">
+    <property name="charset" value="UTF-8"/>
+    <property name="severity" value="warning"/>
+    <property name="fileExtensions" value="java, properties, xml"/>
+    <!-- Checks for whitespace                               -->
+    <!-- See http://checkstyle.sf.net/config_whitespace.html -->
+    <module name="FileTabCharacter">
+        <property name="eachLine" value="true"/>
+    </module>
+    <module name="TreeWalker">
+        <module name="OuterTypeFilename"/>
+        <module name="IllegalTokenText">
+            <property name="tokens" value="STRING_LITERAL, CHAR_LITERAL"/>
+            <property name="format" value="\\u00(09|0(a|A)|0(c|C)|0(d|D)|22|27|5(C|c))|\\(0(10|11|12|14|15|42|47)|134)"/>
+            <property name="message" value="Consider using special escape sequence instead of octal value or Unicode escaped value."/>
+        </module>
+        <module name="AvoidEscapedUnicodeCharacters">
+            <property name="allowEscapesForControlCharacters" value="true"/>
+            <property name="allowByTailComment" value="true"/>
+            <property name="allowNonPrintableEscapes" value="true"/>
+        </module>
+        <module name="LineLength">
+            <property name="max" value="100"/>
+            <property name="ignorePattern" value="^package.*|^import.*|a href|href|http://|https://|ftp://"/>
+        </module>
+        <module name="AvoidStarImport"/>
+        <module name="OneTopLevelClass"/>
+        <module name="NoLineWrap"/>
+        <module name="EmptyBlock">
+            <property name="option" value="TEXT"/>
+            <property name="tokens" value="LITERAL_TRY, LITERAL_FINALLY, LITERAL_IF, LITERAL_ELSE, LITERAL_SWITCH"/>
+        </module>
+        <module name="NeedBraces"/>
+        <module name="LeftCurly"/>
+        <module name="RightCurly">
+            <property name="id" value="RightCurlySame"/>
+            <property name="tokens" value="LITERAL_TRY, LITERAL_CATCH, LITERAL_FINALLY, LITERAL_IF, LITERAL_ELSE,                     LITERAL_DO"/>
+        </module>
+        <module name="RightCurly">
+            <property name="id" value="RightCurlyAlone"/>
+            <property name="option" value="alone"/>
+            <property name="tokens" value="CLASS_DEF, METHOD_DEF, CTOR_DEF, LITERAL_FOR, LITERAL_WHILE, STATIC_INIT,                     INSTANCE_INIT"/>
+        </module>
+        <module name="WhitespaceAround">
+            <property name="allowEmptyConstructors" value="true"/>
+            <property name="allowEmptyMethods" value="true"/>
+            <property name="allowEmptyTypes" value="true"/>
+            <property name="allowEmptyLoops" value="true"/>
+            <message key="ws.notFollowed" value="WhitespaceAround: ''{0}'' is not followed by whitespace. Empty blocks may only be represented as '{}' when not part of a multi-block statement (4.1.3)"/>
+            <message key="ws.notPreceded" value="WhitespaceAround: ''{0}'' is not preceded with whitespace."/>
+        </module>
+        <module name="OneStatementPerLine"/>
+        <module name="MultipleVariableDeclarations"/>
+        <module name="ArrayTypeStyle"/>
+        <module name="MissingSwitchDefault"/>
+        <module name="FallThrough"/>
+        <module name="UpperEll"/>
+        <module name="ModifierOrder"/>
+        <module name="EmptyLineSeparator">
+            <property name="allowNoEmptyLineBetweenFields" value="true"/>
+        </module>
+        <module name="SeparatorWrap">
+            <property name="id" value="SeparatorWrapDot"/>
+            <property name="tokens" value="DOT"/>
+            <property name="option" value="nl"/>
+        </module>
+        <module name="SeparatorWrap">
+            <property name="id" value="SeparatorWrapComma"/>
+            <property name="tokens" value="COMMA"/>
+            <property name="option" value="EOL"/>
+        </module>
+        <module name="SeparatorWrap">
+            <!-- ELLIPSIS is EOL until https://github.com/google/styleguide/issues/258 -->
+            <property name="id" value="SeparatorWrapEllipsis"/>
+            <property name="tokens" value="ELLIPSIS"/>
+            <property name="option" value="EOL"/>
+        </module>
+        <module name="SeparatorWrap">
+            <!-- ARRAY_DECLARATOR is EOL until https://github.com/google/styleguide/issues/259 -->
+            <property name="id" value="SeparatorWrapArrayDeclarator"/>
+            <property name="tokens" value="ARRAY_DECLARATOR"/>
+            <property name="option" value="EOL"/>
+        </module>
+        <module name="SeparatorWrap">
+            <property name="id" value="SeparatorWrapMethodRef"/>
+            <property name="tokens" value="METHOD_REF"/>
+            <property name="option" value="nl"/>
+        </module>
+        <module name="PackageName">
+            <property name="format" value="^[a-z]+(\.[a-z][a-z0-9]*)*$"/>
+            <message key="name.invalidPattern" value="Package name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="TypeName">
+            <message key="name.invalidPattern" value="Type name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="MemberName">
+            <property name="format" value="^[a-z][a-z0-9][a-zA-Z0-9]*$"/>
+            <message key="name.invalidPattern" value="Member name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="ParameterName">
+            <property name="format" value="^[a-z]([a-z0-9][a-zA-Z0-9]*)?$"/>
+            <message key="name.invalidPattern" value="Parameter name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="LambdaParameterName">
+            <property name="format" value="^[a-z]([a-z0-9][a-zA-Z0-9]*)?$"/>
+            <message key="name.invalidPattern" value="Lambda parameter name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="CatchParameterName">
+            <property name="format" value="^[a-z]([a-z0-9][a-zA-Z0-9]*)?$"/>
+            <message key="name.invalidPattern" value="Catch parameter name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="LocalVariableName">
+            <property name="tokens" value="VARIABLE_DEF"/>
+            <property name="format" value="^[a-z]([a-z0-9][a-zA-Z0-9]*)?$"/>
+            <message key="name.invalidPattern" value="Local variable name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="ClassTypeParameterName">
+            <property name="format" value="(^[A-Z][0-9]?)$|([A-Z][a-zA-Z0-9]*[T]$)"/>
+            <message key="name.invalidPattern" value="Class type name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="MethodTypeParameterName">
+            <property name="format" value="(^[A-Z][0-9]?)$|([A-Z][a-zA-Z0-9]*[T]$)"/>
+            <message key="name.invalidPattern" value="Method type name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="InterfaceTypeParameterName">
+            <property name="format" value="(^[A-Z][0-9]?)$|([A-Z][a-zA-Z0-9]*[T]$)"/>
+            <message key="name.invalidPattern" value="Interface type name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="NoFinalizer"/>
+        <module name="GenericWhitespace">
+            <message key="ws.followed" value="GenericWhitespace ''{0}'' is followed by whitespace."/>
+            <message key="ws.preceded" value="GenericWhitespace ''{0}'' is preceded with whitespace."/>
+            <message key="ws.illegalFollow" value="GenericWhitespace ''{0}'' should followed by whitespace."/>
+            <message key="ws.notPreceded" value="GenericWhitespace ''{0}'' is not preceded with whitespace."/>
+        </module>
+        <module name="Indentation">
+            <property name="basicOffset" value="2"/>
+            <property name="braceAdjustment" value="0"/>
+            <property name="caseIndent" value="2"/>
+            <property name="throwsIndent" value="4"/>
+            <property name="lineWrappingIndentation" value="4"/>
+            <property name="arrayInitIndent" value="2"/>
+        </module>
+        <module name="AbbreviationAsWordInName">
+            <property name="ignoreFinal" value="false"/>
+            <property name="allowedAbbreviationLength" value="1"/>
+        </module>
+        <module name="OverloadMethodsDeclarationOrder"/>
+        <module name="VariableDeclarationUsageDistance"/>
+        <module name="CustomImportOrder">
+            <property name="sortImportsInGroupAlphabetically" value="true"/>
+            <property name="separateLineBetweenGroups" value="true"/>
+            <property name="customImportOrderRules" value="STATIC###THIRD_PARTY_PACKAGE"/>
+        </module>
+        <module name="MethodParamPad"/>
+        <module name="NoWhitespaceBefore">
+            <property name="tokens" value="COMMA, SEMI, POST_INC, POST_DEC, DOT, ELLIPSIS, METHOD_REF"/>
+            <property name="allowLineBreaks" value="true"/>
+        </module>
+        <module name="ParenPad"/>
+        <module name="OperatorWrap">
+            <property name="option" value="NL"/>
+            <property name="tokens" value="BAND, BOR, BSR, BXOR, DIV, EQUAL, GE, GT, LAND, LE, LITERAL_INSTANCEOF, LOR,                     LT, MINUS, MOD, NOT_EQUAL, PLUS, QUESTION, SL, SR, STAR, METHOD_REF "/>
+        </module>
+        <module name="AnnotationLocation">
+            <property name="id" value="AnnotationLocationMostCases"/>
+            <property name="tokens" value="CLASS_DEF, INTERFACE_DEF, ENUM_DEF, METHOD_DEF, CTOR_DEF"/>
+        </module>
+        <module name="AnnotationLocation">
+            <property name="id" value="AnnotationLocationVariables"/>
+            <property name="tokens" value="VARIABLE_DEF"/>
+            <property name="allowSamelineMultipleAnnotations" value="true"/>
+        </module>
+        <module name="NonEmptyAtclauseDescription"/>
+        <module name="JavadocTagContinuationIndentation"/>
+        <module name="SummaryJavadoc">
+            <property name="forbiddenSummaryFragments" value="^@return the *|^This method returns |^A [{]@code [a-zA-Z0-9]+[}]( is a )"/>
+        </module>
+        <module name="JavadocParagraph"/>
+        <module name="AtclauseOrder">
+            <property name="tagOrder" value="@param, @return, @throws, @deprecated"/>
+            <property name="target" value="CLASS_DEF, INTERFACE_DEF, ENUM_DEF, METHOD_DEF, CTOR_DEF, VARIABLE_DEF"/>
+        </module>
+        <module name="JavadocMethod">
+            <property name="scope" value="public"/>
+            <property name="allowMissingParamTags" value="true"/>
+            <property name="allowMissingThrowsTags" value="true"/>
+            <property name="allowMissingReturnTag" value="true"/>
+            <property name="minLineCount" value="2"/>
+            <property name="allowedAnnotations" value="Override, Test"/>
+            <property name="allowThrowsTagsForSubclasses" value="true"/>
+        </module>
+        <module name="MethodName">
+            <property name="format" value="^[a-z][a-z0-9][a-zA-Z0-9_]*$"/>
+            <message key="name.invalidPattern" value="Method name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="SingleLineJavadoc">
+            <property name="ignoreInlineTags" value="false"/>
+        </module>
+        <module name="EmptyCatchBlock">
+            <property name="exceptionVariableName" value="expected"/>
+        </module>
+        <module name="CommentsIndentation"/>
+    </module>
+</module>
diff --git a/cluster/pom.xml b/cluster/pom.xml
new file mode 100644
index 0000000..4630f70
--- /dev/null
+++ b/cluster/pom.xml
@@ -0,0 +1,207 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <parent>
+        <artifactId>root</artifactId>
+        <groupId>org.apache.iotdb</groupId>
+        <version>0.8.0-SNAPSHOT</version>
+    </parent>
+    <modelVersion>4.0.0</modelVersion>
+    <artifactId>iotdb-cluster</artifactId>
+    <name>IoTDB Cluster</name>
+    <properties>
+        <jraft.version>1.2.5</jraft.version>
+        <antlr3.version>3.5.2</antlr3.version>
+        <common.lang3.version>3.8.1</common.lang3.version>
+        <cluster.test.skip>false</cluster.test.skip>
+        <cluster.it.skip>${cluster.test.skip}</cluster.it.skip>
+        <cluster.ut.skip>${cluster.test.skip}</cluster.ut.skip>
+    </properties>
+    <dependencies>
+        <dependency>
+            <groupId>org.apache.iotdb</groupId>
+            <artifactId>iotdb</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.iotdb</groupId>
+            <artifactId>iotdb-jdbc</artifactId>
+            <version>${project.version}</version>
+            <scope>test</scope>
+        </dependency>
+        <!-- jraft -->
+        <dependency>
+            <groupId>com.alipay.sofa</groupId>
+            <artifactId>jraft-core</artifactId>
+            <version>${jraft.version}</version>
+            <exclusions>
+                <exclusion>
+                    <groupId>org.slf4j</groupId>
+                    <artifactId>slf4j-api</artifactId>
+                </exclusion>
+                <exclusion>
+                    <groupId>org.apache.logging.log4j</groupId>
+                    <artifactId>log4j-api</artifactId>
+                </exclusion>
+                <exclusion>
+                    <groupId>org.apache.logging.log4j</groupId>
+                    <artifactId>log4j-core</artifactId>
+                </exclusion>
+                <exclusion>
+                    <groupId>org.apache.logging.log4j</groupId>
+                    <artifactId>log4j-slf4j-impl</artifactId>
+                </exclusion>
+                <exclusion>
+                    <groupId>org.apache.logging.log4j</groupId>
+                    <artifactId>log4j-jcl</artifactId>
+                </exclusion>
+            </exclusions>
+        </dependency>
+        <dependency>
+            <groupId>io.airlift</groupId>
+            <artifactId>airline</artifactId>
+            <version>0.8</version>
+        </dependency>
+    </dependencies>
+    <build>
+        <plugins>
+            <plugin>
+                <artifactId>maven-clean-plugin</artifactId>
+                <configuration>
+                    <filesets>
+                        <fileset>
+                            <directory>${project.basedir}/../iotdb/iotdb/lib_cluster</directory>
+                            <includes>
+                                <include>**/*.jar</include>
+                            </includes>
+                            <followSymlinks>false</followSymlinks>
+                        </fileset>
+                        <fileset>
+                            <directory>${project.basedir}/../iotdb/iotdb/data</directory>
+                            <includes>
+                                <include>**/*</include>
+                            </includes>
+                            <followSymlinks>false</followSymlinks>
+                        </fileset>
+                        <fileset>
+                            <directory>${project.basedir}/../iotdb/iotdb/logs</directory>
+                            <includes>
+                                <include>**/*</include>
+                            </includes>
+                            <followSymlinks>false</followSymlinks>
+                        </fileset>
+                    </filesets>
+                </configuration>
+            </plugin>
+            <plugin>
+                <groupId>org.antlr</groupId>
+                <artifactId>antlr3-maven-plugin</artifactId>
+                <version>${antlr3.version}</version>
+                <executions>
+                    <execution>
+                        <goals>
+                            <goal>antlr</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-dependency-plugin</artifactId>
+                <executions>
+                    <execution>
+                        <id>copy-dependencies</id>
+                        <phase>package</phase>
+                        <goals>
+                            <goal>copy-dependencies</goal>
+                        </goals>
+                        <configuration>
+                            <outputDirectory>${project.basedir}/../iotdb/iotdb/lib_cluster</outputDirectory>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-jar-plugin</artifactId>
+                <configuration>
+                    <outputDirectory>${project.basedir}/../iotdb/iotdb/lib_cluster</outputDirectory>
+                </configuration>
+            </plugin>
+            <!--using `mvn test` to triggerAction UT, `mvn verify` to triggerAction ITs
+                        Reference: https://antoniogoncalves.org/2012/12/13/lets-turn-integration-tests-with-maven-to-a-first-class-citizen/-->
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-surefire-plugin</artifactId>
+                <configuration>
+                    <skipTests>${cluster.ut.skip}</skipTests>
+                </configuration>
+            </plugin>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-failsafe-plugin</artifactId>
+                <executions>
+                    <execution>
+                        <id>run-integration-tests</id>
+                        <phase>integration-test</phase>
+                        <goals>
+                            <goal>integration-test</goal>
+                            <goal>verify</goal>
+                        </goals>
+                    </execution>
+                </executions>
+                <configuration>
+                    <skipTests>${cluster.test.skip}</skipTests>
+                    <skipITs>${cluster.it.skip}</skipITs>
+                </configuration>
+            </plugin>
+        </plugins>
+    </build>
+    <profiles>
+        <profile>
+            <id>skipClusterTests</id>
+            <activation>
+                <property>
+                    <name>skipTests</name>
+                    <value>true</value>
+                </property>
+            </activation>
+            <properties>
+                <cluster.test.skip>true</cluster.test.skip>
+                <cluster.ut.skip>true</cluster.ut.skip>
+                <cluster.it.skip>true</cluster.it.skip>
+            </properties>
+        </profile>
+        <profile>
+            <id>skipUT_Cluster_Tests</id>
+            <activation>
+                <property>
+                    <name>skipUTs</name>
+                    <value>true</value>
+                </property>
+            </activation>
+            <properties>
+                <iotdb.ut.skip>true</iotdb.ut.skip>
+            </properties>
+        </profile>
+    </profiles>
+</project>
diff --git a/cluster/script/deploy.sh b/cluster/script/deploy.sh
new file mode 100755
index 0000000..b842c80
--- /dev/null
+++ b/cluster/script/deploy.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+## Only for test
+nodes=$1
+if [ $nodes == "1" ]
+then
+  iplist=('192.168.130.14')
+elif [ $nodes == "3" ]
+then
+  iplist=('192.168.130.12' '192.168.130.15' '192.168.130.14')
+elif [ $nodes == "5" ]
+then
+  iplist=('192.168.130.12' '192.168.130.13' '192.168.130.14' '192.168.130.16' '192.168.130.18')
+elif [ $nodes == "7" ]
+then
+  iplist=('192.168.130.8' '192.168.130.12' '192.168.130.13' '192.168.130.14' '192.168.130.15' '192.168.130.16' '192.168.130.18')
+elif [ $nodes == "9" ]
+then
+  iplist=('192.168.130.6' '192.168.130.7' '192.168.130.8' '192.168.130.12' '192.168.130.13' '192.168.130.14' '192.168.130.15' '192.168.130.16' '192.168.130.18')
+elif [ $nodes == "10" ]
+then
+  iplist=('192.168.130.5' '192.168.130.6' '192.168.130.7' '192.168.130.8' '192.168.130.12' '192.168.130.13' '192.168.130.14' '192.168.130.15' '192.168.130.16' '192.168.130.18')
+else
+  echo "node number error"
+  exit 1;
+fi
+
+replication=$2
+
+
+for ip in ${iplist[@]}
+do
+  idx="$(cut -d'.' -f4 <<<"$ip")"
+  cat $3/src/test/resources/conf/$nodes-$replication-$idx.properties
+  scp $3/src/test/resources/conf/$nodes-$replication-$idx.properties fit@$ip:/home/fit/xuyi/incubator-iotdb/iotdb/iotdb/conf/iotdb-cluster.properties
+done
diff --git a/cluster/script/stop.sh b/cluster/script/stop.sh
new file mode 100755
index 0000000..583445c
--- /dev/null
+++ b/cluster/script/stop.sh
@@ -0,0 +1,53 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+## Only for test
+nodes=$1
+if [ $nodes == "1" ]
+then
+  iplist=('192.168.130.14')
+elif [ $nodes == "3" ]
+then
+  iplist=('192.168.130.12' '192.168.130.15' '192.168.130.14')
+elif [ $nodes == "5" ]
+then
+  iplist=('192.168.130.12' '192.168.130.13' '192.168.130.14' '192.168.130.16' '192.168.130.18')
+elif [ $nodes == "7" ]
+then
+  iplist=('192.168.130.8' '192.168.130.12' '192.168.130.13' '192.168.130.14' '192.168.130.15' '192.168.130.16' '192.168.130.18')
+elif [ $nodes == "9" ]
+then
+  iplist=('192.168.130.6' '192.168.130.7' '192.168.130.8' '192.168.130.12' '192.168.130.13' '192.168.130.14' '192.168.130.15' '192.168.130.16' '192.168.130.18')
+elif [ $nodes == "10" ]
+then
+  iplist=('192.168.130.5' '192.168.130.6' '192.168.130.7' '192.168.130.8' '192.168.130.12' '192.168.130.13' '192.168.130.14' '192.168.130.15' '192.168.130.16' '192.168.130.18')
+else
+  echo "node number error"
+  exit 1;
+fi
+
+for ip in ${iplist[@]}
+do
+  ssh fit@$ip "chmod a+x /home/fit/xuyi/incubator-iotdb/iotdb/iotdb/bin/stop-cluster.sh"
+  ssh fit@$ip "sh /home/fit/xuyi/incubator-iotdb/iotdb/iotdb/bin/stop-cluster.sh"
+  ssh fit@$ip "sh /home/fit/xuyi/incubator-iotdb/iotdb/iotdb/bin/stop-server.sh"
+  ssh fit@$ip "rm -rf /home/fit/xuyi/incubator-iotdb/iotdb/iotdb/data"
+  ssh fit@$ip "rm -rf /home/fit/xuyi/incubator-iotdb/iotdb/iotdb/logs"
+done
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
new file mode 100644
index 0000000..2e4cef6
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/ThreadName.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.concurrent;
+
+public enum ThreadName {
+
+  /**
+   * Node as client thread
+   */
+  NODE_AS_CLIENT("Node-As-Client-Thread"),
+
+  /**
+   * QP Task thread
+   */
+  QP_TASK("QP-Task-Thread"),
+
+  /**
+   * Remote query timer
+   */
+  REMOTE_QUERY_TIMER("Remote-Query-Timer");
+
+  private String name;
+
+  ThreadName(String name) {
+    this.name = name;
+  }
+
+  public String getName() {
+    return name;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/NodeAsClientThreadManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/NodeAsClientThreadManager.java
new file mode 100644
index 0000000..3b93623
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/NodeAsClientThreadManager.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.concurrent.pool;
+
+import org.apache.iotdb.cluster.concurrent.ThreadName;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
+
+/**
+ * Manage all node as client in thread.
+ */
+public class NodeAsClientThreadManager extends ThreadPoolManager {
+
+  private static final String MANAGER_NAME = "node as client thread manager";
+
+  private NodeAsClientThreadManager() {
+    init();
+  }
+
+  public static NodeAsClientThreadManager getInstance() {
+    return NodeAsClientThreadManager.InstanceHolder.instance;
+  }
+
+  /**
+   * Name of Pool Manager
+   */
+  @Override
+  public String getManagerName() {
+    return MANAGER_NAME;
+  }
+
+  @Override
+  public String getThreadName() {
+    return ThreadName.NODE_AS_CLIENT.getName();
+  }
+
+  @Override
+  public int getThreadPoolSize() {
+    return ClusterDescriptor.getInstance().getConfig().getConcurrentInnerRpcClientThread();
+  }
+
+  private static class InstanceHolder {
+
+    private InstanceHolder() {
+    }
+
+    private static NodeAsClientThreadManager instance = new NodeAsClientThreadManager();
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskThreadManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskThreadManager.java
new file mode 100644
index 0000000..1e33b77
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QPTaskThreadManager.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.concurrent.pool;
+
+import org.apache.iotdb.cluster.concurrent.ThreadName;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
+
+/**
+ * Manage all qp tasks in thread.
+ */
+public class QPTaskThreadManager extends ThreadPoolManager {
+
+  private static final String MANAGER_NAME = "qp-task-thread-manager";
+
+  private QPTaskThreadManager() {
+    init();
+  }
+
+  public static QPTaskThreadManager getInstance() {
+    return QPTaskThreadManager.InstanceHolder.instance;
+  }
+
+  /**
+   * Name of Pool Manager
+   */
+  @Override
+  public String getManagerName() {
+    return MANAGER_NAME;
+  }
+
+  @Override
+  public String getThreadName() {
+    return ThreadName.QP_TASK.getName();
+  }
+
+  @Override
+  public int getThreadPoolSize() {
+    return ClusterDescriptor.getInstance().getConfig().getConcurrentQPSubTaskThread();
+  }
+
+  private static class InstanceHolder {
+
+    private InstanceHolder() {
+    }
+
+    private static QPTaskThreadManager instance = new QPTaskThreadManager();
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QueryTimerThreadManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QueryTimerThreadManager.java
new file mode 100644
index 0000000..1362825
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/QueryTimerThreadManager.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.concurrent.pool;
+
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+import org.apache.iotdb.cluster.concurrent.ThreadName;
+import org.apache.iotdb.db.concurrent.IoTDBThreadPoolFactory;
+
+/**
+ * Manage all query timer in query node, if timer is timeout, close all query resource for remote
+ * coordinator node.
+ */
+public class QueryTimerThreadManager extends ThreadPoolManager {
+
+  private static final String MANAGER_NAME = "remote-query-timer-thread-manager";
+
+  private static final int CORE_POOL_SIZE = 1;
+
+  @Override
+  public void init() {
+    pool = IoTDBThreadPoolFactory.newScheduledThreadPool(getThreadPoolSize(), getThreadName());
+  }
+
+  public static QueryTimerThreadManager getInstance() {
+    return QueryTimerThreadManager.QueryTimerManagerHolder.INSTANCE;
+  }
+
+  @Override
+  public String getManagerName() {
+    return MANAGER_NAME;
+  }
+
+  @Override
+  public String getThreadName() {
+    return ThreadName.REMOTE_QUERY_TIMER.getName();
+  }
+
+  @Override
+  public int getThreadPoolSize() {
+    return CORE_POOL_SIZE;
+  }
+
+  public ScheduledFuture<?> execute(Runnable task, long delayMs) {
+    checkInit();
+    return ((ScheduledExecutorService) pool).schedule(task, delayMs, TimeUnit.MICROSECONDS);
+  }
+
+  private static class QueryTimerManagerHolder {
+
+    private static final QueryTimerThreadManager INSTANCE = new QueryTimerThreadManager();
+
+    private QueryTimerManagerHolder() {
+
+    }
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/ThreadPoolManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/ThreadPoolManager.java
new file mode 100644
index 0000000..72bec94
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/ThreadPoolManager.java
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.concurrent.pool;
+
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import org.apache.iotdb.db.concurrent.IoTDBThreadPoolFactory;
+import org.apache.iotdb.db.exception.ProcessorException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public abstract class ThreadPoolManager {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(ThreadPoolManager.class);
+
+  ExecutorService pool;
+
+  public void checkInit() {
+    if (pool == null) {
+      init();
+    }
+  }
+
+  /**
+   * Init pool manager
+   */
+  public void init(){
+    pool = IoTDBThreadPoolFactory.newFixedThreadPool(getThreadPoolSize(), getThreadName());
+  }
+
+  /**
+   * Block new submits and exit when all RUNNING THREADS AND TASKS IN THE QUEUE end.
+   *
+   * @param block if set to true, this method will wait for timeOut milliseconds. false, return
+   * directly.
+   * @param timeout block time out in milliseconds.
+   * @throws ProcessorException if timeOut is reached or being interrupted while waiting to exit.
+   */
+  public void close(boolean block, long timeout) throws ProcessorException {
+    if (pool != null) {
+      try {
+        pool.shutdown();
+        if (block) {
+          try {
+            if (!pool.awaitTermination(timeout, TimeUnit.MILLISECONDS)) {
+              LOGGER.debug(
+                  String
+                      .format("%s thread pool doesn't exit after %d ms", getManagerName(),
+                          timeout));
+            }
+          } catch (InterruptedException e) {
+            Thread.currentThread().interrupt();
+            throw new ProcessorException(
+                String
+                    .format("Interrupted while waiting %s thread pool to exit.", getManagerName()),
+                e);
+          }
+        }
+      } finally {
+        pool = null;
+      }
+    }
+  }
+
+  /**
+   * Name of Pool Manager
+   */
+  public abstract String getManagerName();
+
+  public abstract String getThreadName();
+
+  public abstract int getThreadPoolSize();
+
+  public void execute(Runnable task) {
+    checkInit();
+    pool.execute(task);
+  }
+
+  public Future<?> submit(Runnable task) {
+    checkInit();
+    return pool.submit(task);
+  }
+
+  public int getActiveCnt() {
+    return ((ThreadPoolExecutor) pool).getActiveCount();
+  }
+
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java
new file mode 100644
index 0000000..1ab6eda
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java
@@ -0,0 +1,355 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.config;
+
+import com.alipay.sofa.jraft.util.OnlyForTest;
+import java.io.File;
+import java.io.IOException;
+import org.apache.commons.io.FileUtils;
+import org.apache.iotdb.db.conf.IoTDBConfig;
+import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.utils.FilePathUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ClusterConfig {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(ClusterConfig.class);
+  public static final String CONFIG_NAME = "iotdb-cluster.properties";
+  public static final String DEFAULT_NODE = "127.0.0.1:8888";
+  public static final String METADATA_GROUP_ID = "metadata";
+  private static final String DEFAULT_RAFT_DIR = "raft";
+  private static final String DEFAULT_RAFT_METADATA_DIR = "metadata";
+  private static final String DEFAULT_RAFT_LOG_DIR = "log";
+  private static final String DEFAULT_RAFT_SNAPSHOT_DIR = "snapshot";
+
+  /**
+   * Cluster node: {ip1:port,ip2:port,...,ipn:port}
+   */
+  private String[] nodes = {DEFAULT_NODE};
+
+  /**
+   * Replication number
+   */
+  private int replication = 1;
+
+  private String ip = "127.0.0.1";
+
+  private int port = 8888;
+
+  /**
+   * Path for holder to store raft log
+   */
+  private String raftLogPath;
+
+  /**
+   * Path for holder to store raft snapshot
+   */
+  private String raftSnapshotPath;
+
+  /**
+   * Path for holder to store raft metadata
+   */
+  private String raftMetadataPath;
+
+  /**
+   * A follower would become a candidate if it doesn't receive any message from the leader in {@code
+   * electionTimeoutMs} milliseconds Default: 1000 (1s)
+   */
+  private int electionTimeoutMs = 1000;
+
+  /**
+   * When the number of the difference between leader and follower log is less than this value, it
+   * is considered as 'catch-up'
+   */
+  private int maxCatchUpLogNum = 100000;
+
+  /**
+   * Whether to enable the delayed snapshot mechanism or not
+   */
+  private boolean delaySnapshot = false;
+
+  /**
+   * Maximin allowed delay hours of snapshot
+   */
+  private int delayHours = 24;
+
+  /**
+   * When a node receives a request from client, if it finds itself unable to process the request,
+   * then it sends requests to other nodes in the cluster. This parameter represents the maximum
+   * count to redo the request.
+   **/
+  private int qpTaskRedoCount = 10;
+
+  /**
+   * When a node receives a request from client, if it finds itself unable to process the request,
+   * then it sends requests to other nodes in the cluster. This parameter represents the maximum
+   * timeout for these requests. The unit is milliseconds.
+   **/
+  private int qpTaskTimeout = 5000;
+
+  /**
+   * Number of virtual nodes
+   */
+  private int numOfVirtualNodes = 2;
+
+  /**
+   * Maximum number of inner rpc client thread. When this value <= 0, use CPU core number * 10
+   */
+  private int concurrentInnerRpcClientThread = Runtime.getRuntime().availableProcessors() * 10;
+
+  /**
+   * Maximum number of queue length of qp task which is waiting to be executed. If the num of
+   * waiting qp tasks exceed to this number, new qp task will be rejected.
+   */
+  private int maxQueueNumOfQPTask = 500;
+
+  /**
+   * ReadMetadataConsistencyLevel: 1 Strong consistency, 2 Weak consistency
+   */
+  private int readMetadataConsistencyLevel = 1;
+
+  /**
+   * ReadDataConsistencyLevel: 1 Strong consistency, 2 Weak consistency
+   */
+  private int readDataConsistencyLevel = 1;
+
+  /**
+   * Maximum number of threads which execute tasks generated by client requests concurrently. Each
+   * client request corresponds to a QP Task. A QP task may be divided into several sub-tasks. So
+   * this value is the sum of all sub-tasks. When this value <= 0, use CPU core number * 10
+   */
+  private int concurrentQPSubTaskThread = Runtime.getRuntime().availableProcessors() * 10;
+
+  /**
+   * Batch data size read from remote query node once while reading, default value is 10000. The
+   * smaller the parameter, the more communication times and the more time-consuming it is.
+   */
+  private int batchReadSize = 10000;
+
+  /**
+   * Maximum number of cached batch data list for each series in coordinator node while reading,
+   * default value is 2. The coordinator node is responsible for receiving client requests and
+   * requesting data from query nodes and collecting them.
+   */
+  private int maxCachedBatchDataListSize = 2;
+
+  public ClusterConfig() {
+    // empty constructor
+  }
+
+  public void setDefaultPath() {
+    IoTDBConfig conf = IoTDBDescriptor.getInstance().getConfig();
+    String iotdbDataDir = conf.getDataDir();
+    iotdbDataDir = FilePathUtils.regularizePath(iotdbDataDir);
+    String raftDir = iotdbDataDir + DEFAULT_RAFT_DIR;
+    this.raftSnapshotPath = raftDir + File.separatorChar + DEFAULT_RAFT_SNAPSHOT_DIR;
+    this.raftLogPath = raftDir + File.separatorChar + DEFAULT_RAFT_LOG_DIR;
+    this.raftMetadataPath = raftDir + File.separatorChar + DEFAULT_RAFT_METADATA_DIR;
+  }
+
+  public void createAllPath() {
+    createPath(this.raftSnapshotPath);
+    createPath(this.raftLogPath);
+    createPath(this.raftMetadataPath);
+  }
+
+  private void createPath(String path) {
+    try {
+      FileUtils.forceMkdir(new File(path));
+    } catch (IOException e) {
+      LOGGER.warn("Path {} already exists.", path);
+    }
+  }
+
+  @OnlyForTest
+  public void deleteAllPath() throws IOException {
+    FileUtils.deleteDirectory(new File(this.raftSnapshotPath));
+    FileUtils.deleteDirectory(new File(this.raftLogPath));
+    FileUtils.deleteDirectory(new File(this.raftMetadataPath));
+  }
+
+  public String[] getNodes() {
+    return nodes;
+  }
+
+  public void setNodes(String[] nodes) {
+    this.nodes = nodes;
+  }
+
+  public int getReplication() {
+    return replication;
+  }
+
+  public void setReplication(int replication) {
+    this.replication = replication;
+  }
+
+  public String getIp() {
+    return ip;
+  }
+
+  public void setIp(String ip) {
+    this.ip = ip;
+  }
+
+  public int getPort() {
+    return port;
+  }
+
+  public void setPort(int port) {
+    this.port = port;
+  }
+
+  public String getRaftLogPath() {
+    return raftLogPath;
+  }
+
+  public void setRaftLogPath(String raftLogPath) {
+    this.raftLogPath = raftLogPath;
+  }
+
+  public String getRaftSnapshotPath() {
+    return raftSnapshotPath;
+  }
+
+  public void setRaftSnapshotPath(String raftSnapshotPath) {
+    this.raftSnapshotPath = raftSnapshotPath;
+  }
+
+  public String getRaftMetadataPath() {
+    return raftMetadataPath;
+  }
+
+  public void setRaftMetadataPath(String raftMetadataPath) {
+    this.raftMetadataPath = raftMetadataPath;
+  }
+
+  public int getElectionTimeoutMs() {
+    return electionTimeoutMs;
+  }
+
+  public void setElectionTimeoutMs(int electionTimeoutMs) {
+    this.electionTimeoutMs = electionTimeoutMs;
+  }
+
+  public int getMaxCatchUpLogNum() {
+    return maxCatchUpLogNum;
+  }
+
+  public void setMaxCatchUpLogNum(int maxCatchUpLogNum) {
+    this.maxCatchUpLogNum = maxCatchUpLogNum;
+  }
+
+  public boolean isDelaySnapshot() {
+    return delaySnapshot;
+  }
+
+  public void setDelaySnapshot(boolean delaySnapshot) {
+    this.delaySnapshot = delaySnapshot;
+  }
+
+  public int getDelayHours() {
+    return delayHours;
+  }
+
+  public void setDelayHours(int delayHours) {
+    this.delayHours = delayHours;
+  }
+
+  public int getQpTaskRedoCount() {
+    return qpTaskRedoCount;
+  }
+
+  public void setQpTaskRedoCount(int qpTaskRedoCount) {
+    this.qpTaskRedoCount = qpTaskRedoCount;
+  }
+
+  public int getQpTaskTimeout() {
+    return qpTaskTimeout;
+  }
+
+  public void setQpTaskTimeout(int qpTaskTimeout) {
+    this.qpTaskTimeout = qpTaskTimeout;
+  }
+
+  public int getNumOfVirtualNodes() {
+    return numOfVirtualNodes;
+  }
+
+  public void setNumOfVirtualNodes(int numOfVirtualNodes) {
+    this.numOfVirtualNodes = numOfVirtualNodes;
+  }
+
+  public int getConcurrentInnerRpcClientThread() {
+    return concurrentInnerRpcClientThread;
+  }
+
+  public void setConcurrentInnerRpcClientThread(int concurrentInnerRpcClientThread) {
+    this.concurrentInnerRpcClientThread = concurrentInnerRpcClientThread;
+  }
+
+  public int getMaxQueueNumOfQPTask() {
+    return maxQueueNumOfQPTask;
+  }
+
+  public void setMaxQueueNumOfQPTask(int maxQueueNumOfQPTask) {
+    this.maxQueueNumOfQPTask = maxQueueNumOfQPTask;
+  }
+
+  public int getReadMetadataConsistencyLevel() {
+    return readMetadataConsistencyLevel;
+  }
+
+  public void setReadMetadataConsistencyLevel(int readMetadataConsistencyLevel) {
+    this.readMetadataConsistencyLevel = readMetadataConsistencyLevel;
+  }
+
+  public int getReadDataConsistencyLevel() {
+    return readDataConsistencyLevel;
+  }
+
+  public void setReadDataConsistencyLevel(int readDataConsistencyLevel) {
+    this.readDataConsistencyLevel = readDataConsistencyLevel;
+  }
+
+  public int getConcurrentQPSubTaskThread() {
+    return concurrentQPSubTaskThread;
+  }
+
+  public void setConcurrentQPSubTaskThread(int concurrentQPSubTaskThread) {
+    this.concurrentQPSubTaskThread = concurrentQPSubTaskThread;
+  }
+
+  public int getBatchReadSize() {
+    return batchReadSize;
+  }
+
+  public void setBatchReadSize(int batchReadSize) {
+    this.batchReadSize = batchReadSize;
+  }
+
+  public int getMaxCachedBatchDataListSize() {
+    return maxCachedBatchDataListSize;
+  }
+
+  public void setMaxCachedBatchDataListSize(int maxCachedBatchDataListSize) {
+    this.maxCachedBatchDataListSize = maxCachedBatchDataListSize;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConstant.java b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConstant.java
new file mode 100644
index 0000000..77e2476
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConstant.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.config;
+
+public class ClusterConstant {
+
+  private ClusterConstant() {
+  }
+
+  /**
+   * Set read metadata consistency level pattern
+   */
+  public static final String SET_READ_METADATA_CONSISTENCY_LEVEL_PATTERN = "set\\s+read\\s+metadata\\s+level\\s+to\\s+\\d+";
+  public static final String SET_READ_DATA_CONSISTENCY_LEVEL_PATTERN = "set\\s+read\\s+data\\s+level\\s+to\\s+\\d+";
+  public static final int MAX_CONSISTENCY_LEVEL = 2;
+  public static final int STRONG_CONSISTENCY_LEVEL = 1;
+  public static final int WEAK_CONSISTENCY_LEVEL = 2;
+
+  /**
+   * Maximum time of blocking main thread for waiting for all running task threads and tasks in the
+   * queue until end. Each client request corresponds to a QP Task. A QP task may be divided into
+   * several sub-tasks.The unit is milliseconds.
+   */
+  public static final int CLOSE_THREAD_POOL_BLOCK_TIMEOUT = 1000;
+
+  /**
+   * Query timeout in query node. If time interval between last communications with coordinator node
+   * and now exceed this parameter, release corresponding query resource.Each query in query node
+   * has a <code>QueryRepeaterTimer</code>, the unit is milliseconds. Default value is 30 minutes.
+   */
+  public static final int QUERY_TIMEOUT_IN_QUERY_NODE = 30 * 60 * 1000;
+
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterDescriptor.java b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterDescriptor.java
new file mode 100644
index 0000000..3251dbd
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterDescriptor.java
@@ -0,0 +1,204 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.config;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Properties;
+import org.apache.iotdb.cluster.service.TSServiceClusterImpl;
+import org.apache.iotdb.db.conf.IoTDBConfig;
+import org.apache.iotdb.db.conf.IoTDBConstant;
+import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ClusterDescriptor {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(ClusterDescriptor.class);
+
+  private IoTDBConfig ioTDBConf = IoTDBDescriptor.getInstance().getConfig();
+
+  private ClusterConfig conf = new ClusterConfig();
+
+  private ClusterDescriptor() {
+    loadProps();
+  }
+
+  public static ClusterDescriptor getInstance() {
+    return ClusterDescriptorHolder.INSTANCE;
+  }
+
+  public ClusterConfig getConfig() {
+    return conf;
+  }
+
+  /**
+   * Load an property file and set ClusterConfig variables. Change this method to public only for
+   * test. In most case, you should invoke this method.
+   */
+  public void loadProps() {
+    // modify iotdb config
+    ioTDBConf.setRpcImplClassName(TSServiceClusterImpl.class.getName());
+    ioTDBConf.setEnableWal(false);
+
+    // cluster config
+    conf.setDefaultPath();
+    InputStream inputStream;
+    String url = System.getProperty(IoTDBConstant.IOTDB_CONF, null);
+    if (url == null) {
+      url = System.getProperty(IoTDBConstant.IOTDB_HOME, null);
+      if (url != null) {
+        url = url + File.separatorChar + "conf" + File.separatorChar + ClusterConfig.CONFIG_NAME;
+      } else {
+        LOGGER.warn(
+            "Cannot find IOTDB_HOME or CLUSTER_CONF environment variable when loading "
+                + "config file {}, use default configuration",
+            ClusterConfig.CONFIG_NAME);
+        conf.createAllPath();
+        return;
+      }
+    } else {
+      url += (File.separatorChar + ClusterConfig.CONFIG_NAME);
+    }
+
+    try {
+      inputStream = new FileInputStream(new File(url));
+    } catch (FileNotFoundException e) {
+      LOGGER.warn("Fail to find config file {}", url, e);
+      conf.createAllPath();
+      return;
+    }
+
+    LOGGER.info("Start to read config file {}", url);
+    Properties properties = new Properties();
+    try {
+      properties.load(inputStream);
+      conf.setNodes(properties.getProperty("nodes", ClusterConfig.DEFAULT_NODE)
+          .split(","));
+
+      conf.setReplication(Integer
+          .parseInt(properties.getProperty("replication",
+              Integer.toString(conf.getReplication()))));
+
+      conf.setIp(properties.getProperty("ip", conf.getIp()));
+
+      conf.setPort(Integer.parseInt(properties.getProperty("port",
+          Integer.toString(conf.getPort()))));
+
+      conf.setRaftLogPath(properties.getProperty("raft_log_path", conf.getRaftLogPath()));
+
+      conf.setRaftSnapshotPath(
+          properties.getProperty("raft_snapshot_path", conf.getRaftSnapshotPath()));
+
+      conf.setRaftMetadataPath(
+          properties.getProperty("raft_metadata_path", conf.getRaftMetadataPath()));
+
+      conf.setElectionTimeoutMs(Integer
+          .parseInt(properties.getProperty("election_timeout_ms",
+              Integer.toString(conf.getElectionTimeoutMs()))));
+
+      conf.setMaxCatchUpLogNum(Integer
+          .parseInt(properties.getProperty("max_catch_up_log_num",
+              Integer.toString(conf.getMaxCatchUpLogNum()))));
+
+      conf.setDelaySnapshot(Boolean
+          .parseBoolean(properties.getProperty("delay_snapshot",
+              Boolean.toString(conf.isDelaySnapshot()))));
+
+      conf.setDelayHours(Integer
+          .parseInt(properties.getProperty("delay_hours",
+              Integer.toString(conf.getDelayHours()))));
+
+      conf.setQpTaskRedoCount(Integer
+          .parseInt(properties.getProperty("qp_task_redo_count",
+              Integer.toString(conf.getQpTaskRedoCount()))));
+
+      conf.setQpTaskTimeout(Integer
+          .parseInt(properties.getProperty("qp_task_timeout_ms",
+              Integer.toString(conf.getQpTaskTimeout()))));
+
+      conf.setNumOfVirtualNodes(Integer
+          .parseInt(properties.getProperty("num_of_virtual_nodes",
+              Integer.toString(conf.getNumOfVirtualNodes()))));
+
+      conf.setConcurrentInnerRpcClientThread(Integer
+          .parseInt(properties.getProperty("concurrent_inner_rpc_client_thread",
+              Integer.toString(conf.getConcurrentInnerRpcClientThread()))));
+
+      conf.setMaxQueueNumOfQPTask(Integer
+          .parseInt(properties.getProperty("max_queue_num_of_inner_rpc_client",
+              Integer.toString(conf.getMaxQueueNumOfQPTask()))));
+
+      conf.setReadMetadataConsistencyLevel(Integer
+          .parseInt(properties.getProperty("read_metadata_consistency_level",
+              Integer.toString(conf.getReadMetadataConsistencyLevel()))));
+
+      conf.setReadDataConsistencyLevel(Integer
+          .parseInt(properties.getProperty("read_data_consistency_level",
+              Integer.toString(conf.getReadDataConsistencyLevel()))));
+
+      conf.setConcurrentQPSubTaskThread(Integer
+          .parseInt(properties.getProperty("concurrent_qp_sub_task_thread",
+              Integer.toString(conf.getConcurrentQPSubTaskThread()))));
+
+      conf.setBatchReadSize(Integer.parseInt(properties.getProperty("batch_read_size",
+          Integer.toString(conf.getBatchReadSize()))));
+
+      conf.setMaxCachedBatchDataListSize(Integer.parseInt(properties
+          .getProperty("max_cached_batch_data_list_size",
+              Integer.toString(conf.getMaxCachedBatchDataListSize()))));
+
+      if (conf.getConcurrentQPSubTaskThread() <= 0) {
+        conf.setConcurrentQPSubTaskThread(Runtime.getRuntime().availableProcessors() * 10);
+      }
+
+      if (conf.getConcurrentInnerRpcClientThread() <= 0) {
+        conf.setConcurrentInnerRpcClientThread(Runtime.getRuntime().availableProcessors() * 10);
+      }
+
+      if (conf.getMaxCachedBatchDataListSize() <= 0) {
+        conf.setMaxCachedBatchDataListSize(2);
+      }
+
+      if (conf.getBatchReadSize() <= 0) {
+        conf.setBatchReadSize(10000);
+      }
+
+    } catch (IOException e) {
+      LOGGER.warn("Cannot load config file because, use default configuration", e);
+    } catch (Exception e) {
+      LOGGER.warn("Incorrect format in config file, use default configuration", e);
+    } finally {
+      conf.createAllPath();
+      try {
+        inputStream.close();
+      } catch (IOException e) {
+        LOGGER.error("Fail to close config file input stream because ", e);
+      }
+    }
+  }
+
+  private static class ClusterDescriptorHolder {
+
+    private static final ClusterDescriptor INSTANCE = new ClusterDescriptor();
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/entity/Server.java b/cluster/src/main/java/org/apache/iotdb/cluster/entity/Server.java
new file mode 100644
index 0000000..85ab80d
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/entity/Server.java
@@ -0,0 +1,192 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.entity;
+
+import com.alipay.remoting.rpc.RpcServer;
+import com.alipay.sofa.jraft.entity.PeerId;
+import com.alipay.sofa.jraft.rpc.RaftRpcServerFactory;
+import java.util.HashMap;
+import java.util.Map;
+import org.apache.iotdb.cluster.concurrent.pool.QPTaskThreadManager;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterConstant;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.cluster.entity.data.DataPartitionHolder;
+import org.apache.iotdb.cluster.entity.metadata.MetadataHolder;
+import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
+import org.apache.iotdb.cluster.entity.raft.MetadataRaftHolder;
+import org.apache.iotdb.cluster.rpc.raft.impl.RaftNodeAsClientManager;
+import org.apache.iotdb.cluster.rpc.raft.processor.QueryMetricAsyncProcessor;
+import org.apache.iotdb.cluster.rpc.raft.processor.nonquery.DataGroupNonQueryAsyncProcessor;
+import org.apache.iotdb.cluster.rpc.raft.processor.nonquery.MetaGroupNonQueryAsyncProcessor;
+import org.apache.iotdb.cluster.rpc.raft.processor.querymetadata.QueryMetadataAsyncProcessor;
+import org.apache.iotdb.cluster.rpc.raft.processor.querymetadata.QueryMetadataInStringAsyncProcessor;
+import org.apache.iotdb.cluster.rpc.raft.processor.querymetadata.QueryPathsAsyncProcessor;
+import org.apache.iotdb.cluster.rpc.raft.processor.querymetadata.QuerySeriesTypeAsyncProcessor;
+import org.apache.iotdb.cluster.rpc.raft.processor.querymetadata.QueryTimeSeriesAsyncProcessor;
+import org.apache.iotdb.cluster.utils.RaftUtils;
+import org.apache.iotdb.cluster.utils.hash.PhysicalNode;
+import org.apache.iotdb.cluster.utils.hash.Router;
+import org.apache.iotdb.db.exception.ProcessorException;
+import org.apache.iotdb.db.service.IoTDB;
+import org.apache.iotdb.db.service.RegisterManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Each server represents a node in the physical world.
+ */
+public class Server {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(Server.class);
+
+  private static final ClusterConfig CLUSTER_CONF = ClusterDescriptor.getInstance().getConfig();
+
+  private static final RaftNodeAsClientManager CLIENT_MANAGER = RaftNodeAsClientManager
+      .getInstance();
+
+  /**
+   * Metadata Group Holder
+   */
+  private MetadataHolder metadataHolder;
+
+  /**
+   * Data Group Holder Map String: group id
+   */
+  private Map<String, DataPartitionHolder> dataPartitionHolderMap;
+
+  /**
+   * PeerId of this physical node
+   */
+  private PeerId serverId;
+
+  /**
+   * IoTDB stand-alone instance
+   */
+  private IoTDB iotdb;
+
+  private RegisterManager registerManager = new RegisterManager();
+
+  public static void main(String[] args) {
+    Server server = Server.getInstance();
+    server.start();
+  }
+
+  public void start() {
+    /** Stand-alone version of IoTDB, be careful to replace the internal JDBC Server with a cluster version **/
+    iotdb = new IoTDB();
+    iotdb.active();
+    CLIENT_MANAGER.init();
+
+    /** Init raft groups **/
+    PeerId[] peerIds = RaftUtils.convertStringArrayToPeerIdArray(CLUSTER_CONF.getNodes());
+    serverId = new PeerId(CLUSTER_CONF.getIp(), CLUSTER_CONF.getPort());
+
+    // Rpc between raft groups
+    RpcServer rpcServer = new RpcServer(serverId.getPort());
+    RaftRpcServerFactory.addRaftRequestProcessors(rpcServer);
+
+    registerNonQueryProcessor(rpcServer);
+    registerQueryMetadataProcessor(rpcServer);
+    registerQueryMetricProcessor(rpcServer);
+
+    metadataHolder = new MetadataRaftHolder(peerIds, serverId, rpcServer, true);
+    metadataHolder.init();
+    metadataHolder.start();
+
+    LOGGER.info("Metadata group has started.");
+
+    dataPartitionHolderMap = new HashMap<>();
+    Router router = Router.getInstance();
+    PhysicalNode[][] groups = router.getGroupsNodes(serverId.getIp(), serverId.getPort());
+
+    for (int i = 0; i < groups.length; i++) {
+      PhysicalNode[] group = groups[i];
+      String groupId = router.getGroupID(group);
+      DataPartitionHolder dataPartitionHolder = new DataPartitionRaftHolder(groupId,
+          RaftUtils.getPeerIdArrayFrom(group), serverId, rpcServer, false);
+      dataPartitionHolder.init();
+      dataPartitionHolder.start();
+      dataPartitionHolderMap.put(groupId, dataPartitionHolder);
+      LOGGER.info("{} group has started", groupId);
+      Router.getInstance().showPhysicalNodes(groupId);
+    }
+  }
+
+  private void registerNonQueryProcessor(RpcServer rpcServer) {
+    rpcServer.registerUserProcessor(new DataGroupNonQueryAsyncProcessor());
+    rpcServer.registerUserProcessor(new MetaGroupNonQueryAsyncProcessor());
+  }
+
+  private void registerQueryMetadataProcessor(RpcServer rpcServer) {
+    rpcServer.registerUserProcessor(new QueryTimeSeriesAsyncProcessor());
+    rpcServer.registerUserProcessor(new QueryMetadataInStringAsyncProcessor());
+    rpcServer.registerUserProcessor(new QueryMetadataAsyncProcessor());
+    rpcServer.registerUserProcessor(new QuerySeriesTypeAsyncProcessor());
+    rpcServer.registerUserProcessor(new QueryPathsAsyncProcessor());
+  }
+
+  /**
+   * for nodetool
+   */
+  private void registerQueryMetricProcessor(RpcServer rpcServer) {
+    rpcServer.registerUserProcessor(new QueryMetricAsyncProcessor());
+  }
+
+  public void stop() throws ProcessorException {
+    QPTaskThreadManager.getInstance().close(true, ClusterConstant.CLOSE_THREAD_POOL_BLOCK_TIMEOUT);
+    iotdb.deactivate();
+    CLIENT_MANAGER.shutdown();
+    metadataHolder.stop();
+    for (DataPartitionHolder dataPartitionHolder : dataPartitionHolderMap.values()) {
+      dataPartitionHolder.stop();
+    }
+
+    registerManager.deregisterAll();
+  }
+
+  public PeerId getServerId() {
+    return serverId;
+  }
+
+  public MetadataHolder getMetadataHolder() {
+    return metadataHolder;
+  }
+
+  public Map<String, DataPartitionHolder> getDataPartitionHolderMap() {
+    return dataPartitionHolderMap;
+  }
+
+  public DataPartitionHolder getDataPartitionHolder(String groupId) {
+    return dataPartitionHolderMap.get(groupId);
+  }
+
+  public static final Server getInstance() {
+    return ServerHolder.INSTANCE;
+  }
+
+  private static class ServerHolder {
+
+    private static final Server INSTANCE = new Server();
+
+    private ServerHolder() {
+
+    }
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/entity/data/DataPartitionHolder.java b/cluster/src/main/java/org/apache/iotdb/cluster/entity/data/DataPartitionHolder.java
new file mode 100644
index 0000000..7a9f293
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/entity/data/DataPartitionHolder.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.entity.data;
+
+import org.apache.iotdb.cluster.entity.service.IService;
+
+public abstract class DataPartitionHolder implements IPartitionHolder {
+
+  protected IService service;
+
+  @Override
+  public void init() {
+    service.init();
+  }
+
+  @Override
+  public void start() {
+    service.start();
+  }
+
+  @Override
+  public void stop() {
+    service.stop();
+  }
+
+  public IService getService() {
+    return service;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/entity/data/IPartitionHolder.java b/cluster/src/main/java/org/apache/iotdb/cluster/entity/data/IPartitionHolder.java
new file mode 100644
index 0000000..b658590
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/entity/data/IPartitionHolder.java
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.entity.data;
+
+public interface IPartitionHolder {
+  void init();
+  void start();
+  void stop();
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/entity/metadata/IMetadataHolder.java b/cluster/src/main/java/org/apache/iotdb/cluster/entity/metadata/IMetadataHolder.java
new file mode 100644
index 0000000..2d66119
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/entity/metadata/IMetadataHolder.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.entity.metadata;
+
+public interface IMetadataHolder {
+
+  void init();
+
+  void start();
+
+  void stop();
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/entity/metadata/MetadataHolder.java b/cluster/src/main/java/org/apache/iotdb/cluster/entity/metadata/MetadataHolder.java
new file mode 100644
index 0000000..10157c5
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/entity/metadata/MetadataHolder.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.entity.metadata;
+
+import org.apache.iotdb.cluster.entity.service.IService;
+
+public abstract class MetadataHolder implements IMetadataHolder {
+
+  protected IService service;
+
+  @Override
+  public void init() {
+    service.init();
+  }
+
+  @Override
+  public void start() {
+    service.start();
+  }
+
+  @Override
+  public void stop() {
+    service.stop();
+  }
+
+  public IService getService() {
+    return service;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/DataPartitionRaftHolder.java b/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/DataPartitionRaftHolder.java
new file mode 100644
index 0000000..88e69a7
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/DataPartitionRaftHolder.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.entity.raft;
+
+import com.alipay.remoting.rpc.RpcServer;
+import com.alipay.sofa.jraft.entity.PeerId;
+import org.apache.iotdb.cluster.entity.data.DataPartitionHolder;
+
+public class DataPartitionRaftHolder extends DataPartitionHolder {
+
+  private String groupId;
+  private PeerId serverId;
+  private DataStateMachine fsm;
+
+  public DataPartitionRaftHolder(String groupId, PeerId[] peerIds, PeerId serverId, RpcServer rpcServer, boolean startRpcServer) {
+    this.groupId = groupId;
+    this.serverId = serverId;
+    fsm = new DataStateMachine(groupId, serverId);
+    service = new RaftService(groupId, peerIds, serverId, rpcServer, fsm, startRpcServer);
+  }
+
+  public DataStateMachine getFsm() {
+    return fsm;
+  }
+
+  public void setFsm(DataStateMachine fsm) {
+    this.fsm = fsm;
+  }
+
+  public String getGroupId() {
+    return groupId;
+  }
+
+  public void setGroupId(String groupId) {
+    this.groupId = groupId;
+  }
+
+  public PeerId getServerId() {
+    return serverId;
+  }
+
+  public void setServerId(PeerId serverId) {
+    this.serverId = serverId;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/DataStateMachine.java b/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/DataStateMachine.java
new file mode 100644
index 0000000..eb9db25
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/DataStateMachine.java
@@ -0,0 +1,191 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.entity.raft;
+
+import com.alipay.remoting.exception.CodecException;
+import com.alipay.remoting.serialization.SerializerManager;
+import com.alipay.sofa.jraft.Closure;
+import com.alipay.sofa.jraft.Iterator;
+import com.alipay.sofa.jraft.Status;
+import com.alipay.sofa.jraft.core.StateMachineAdapter;
+import com.alipay.sofa.jraft.entity.LeaderChangeContext;
+import com.alipay.sofa.jraft.entity.PeerId;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
+import org.apache.iotdb.cluster.rpc.raft.closure.ResponseClosure;
+import org.apache.iotdb.cluster.rpc.raft.request.nonquery.DataGroupNonQueryRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.nonquery.DataGroupNonQueryResponse;
+import org.apache.iotdb.cluster.utils.RaftUtils;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.exception.ProcessorException;
+import org.apache.iotdb.db.metadata.MManager;
+import org.apache.iotdb.db.qp.executor.OverflowQPExecutor;
+import org.apache.iotdb.db.qp.logical.Operator.OperatorType;
+import org.apache.iotdb.db.qp.physical.PhysicalPlan;
+import org.apache.iotdb.db.qp.physical.sys.MetadataPlan;
+import org.apache.iotdb.db.qp.physical.transfer.PhysicalPlanLogTransfer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * StateMachine of data group node.
+ */
+public class DataStateMachine extends StateMachineAdapter {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(DataStateMachine.class);
+
+  /**
+   * QP executor to apply task
+   */
+  private OverflowQPExecutor qpExecutor = new OverflowQPExecutor();
+
+  private PeerId peerId;
+
+  private String groupId;
+
+  private AtomicLong leaderTerm = new AtomicLong(-1);
+
+  public DataStateMachine(String groupId, PeerId peerId) {
+    this.peerId = peerId;
+    this.groupId = groupId;
+  }
+
+  /**
+   * Only deal with non query operation. The operation is completed by {@code qpExecutor}.
+   *
+   * @param iterator task iterator
+   */
+  @Override
+  public void onApply(Iterator iterator) {
+    while (iterator.hasNext()) {
+      final Closure closure = iterator.done();
+      final ByteBuffer data = iterator.getData();
+      applySingleTask(closure, data);
+      iterator.next();
+    }
+  }
+
+  /**
+   * Apply a single raft task. If this node is the leader of state machine's data group, apply the
+   * task in thread.
+   *
+   * @param closure if this node is leader, closure is not null.
+   * @param data Request data
+   */
+  private void applySingleTask(Closure closure, ByteBuffer data) {
+    /** If closure is not null, the node is leader **/
+    DataGroupNonQueryResponse response = (closure == null) ? null
+        : (DataGroupNonQueryResponse) ((ResponseClosure) closure).getResponse();
+    DataGroupNonQueryRequest request;
+    try {
+      request = SerializerManager.getSerializer(SerializerManager.Hessian2)
+          .deserialize(data.array(), DataGroupNonQueryRequest.class.getName());
+    } catch (final CodecException e) {
+      LOGGER.error("Fail to deserialize DataGroupNonQueryRequest", e);
+      if (closure != null) {
+        closure.run(RaftUtils.createErrorStatus(e.getMessage()));
+      }
+      return;
+    }
+
+    Status status = Status.OK();
+    List<byte[]> planBytes = request.getPhysicalPlanBytes();
+
+    LOGGER.debug("State machine batch size(): {}", planBytes.size());
+
+    /** handle batch plans(planBytes.size() > 0) or single plan(planBytes.size()==1) **/
+    for (byte[] planByte : planBytes) {
+      try {
+        PhysicalPlan plan = PhysicalPlanLogTransfer.logToOperator(planByte);
+
+        LOGGER.debug("OperatorType :{}", plan.getOperatorType());
+        /** If the request is to set path and sg of the path doesn't exist, it needs to receive null-read in meta group to avoid out of data sync **/
+        if (plan.getOperatorType() == OperatorType.CREATE_TIMESERIES && !checkPathExistence(
+            ((MetadataPlan) plan).getPath().getFullPath())) {
+          RaftUtils.handleNullReadToMetaGroup(status);
+          if(!status.isOk()){
+            addResult(response, false);
+            addErrorMsg(response, status.getErrorMsg());
+            continue;
+          }
+        }
+        qpExecutor.processNonQuery(plan);
+        addResult(response, true);
+      } catch (ProcessorException | IOException | PathErrorException e) {
+        LOGGER.error("Execute physical plan error", e);
+        status = new Status(-1, e.getMessage());
+        addResult(response, false);
+        addErrorMsg(response, status.getErrorMsg());
+      }
+    }
+    if (closure != null) {
+      closure.run(status);
+    }
+  }
+
+  /**
+   * Add result to response
+   */
+  private void addResult(DataGroupNonQueryResponse response, boolean result){
+    if(response != null){
+      response.addResult(result);
+    }
+  }
+
+  /**
+   * Add result to response
+   */
+  private void addErrorMsg(DataGroupNonQueryResponse response, String errorMsg){
+    if(response != null){
+      response.addErrorMsg(errorMsg);
+    }
+  }
+
+  /**
+   * Check the existence of a specific path
+   */
+  private boolean checkPathExistence(String path) throws PathErrorException {
+    return !MManager.getInstance().getAllFileNamesByPath(path).isEmpty();
+  }
+
+  @Override
+  public void onLeaderStart(final long term) {
+    RaftUtils.updateRaftGroupLeader(groupId, peerId);
+    LOGGER.info("On leader start, {} starts to be leader of {}", peerId, groupId);
+    this.leaderTerm.set(term);
+  }
+
+  @Override
+  public void onStartFollowing(LeaderChangeContext ctx) {
+    RaftUtils.updateRaftGroupLeader(groupId, ctx.getLeaderId());
+    this.leaderTerm.set(-1);
+    LOGGER.info("Start following, {} starts to be leader of {}", ctx.getLeaderId(), groupId);
+  }
+
+  @Override
+  public void onLeaderStop(final Status status) {
+    this.leaderTerm.set(-1);
+  }
+
+  public boolean isLeader() {
+    return this.leaderTerm.get() > 0;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/MetadataRaftHolder.java b/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/MetadataRaftHolder.java
new file mode 100644
index 0000000..44fdd27
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/MetadataRaftHolder.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.entity.raft;
+
+import com.alipay.remoting.rpc.RpcServer;
+import com.alipay.sofa.jraft.entity.PeerId;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.entity.metadata.MetadataHolder;
+
+public class MetadataRaftHolder extends MetadataHolder {
+
+  private MetadataStateManchine fsm;
+
+  public MetadataRaftHolder(PeerId[] peerIds, PeerId serverId, RpcServer rpcServer, boolean startRpcServer) {
+    fsm = new MetadataStateManchine(ClusterConfig.METADATA_GROUP_ID, serverId);
+    service = new RaftService(ClusterConfig.METADATA_GROUP_ID, peerIds, serverId, rpcServer, fsm, startRpcServer);
+  }
+
+  public MetadataStateManchine getFsm() {
+    return fsm;
+  }
+
+  public void setFsm(MetadataStateManchine fsm) {
+    this.fsm = fsm;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/MetadataStateManchine.java b/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/MetadataStateManchine.java
new file mode 100644
index 0000000..78dd3e8
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/MetadataStateManchine.java
@@ -0,0 +1,178 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.entity.raft;
+
+import com.alipay.remoting.exception.CodecException;
+import com.alipay.remoting.serialization.SerializerManager;
+import com.alipay.sofa.jraft.Closure;
+import com.alipay.sofa.jraft.Iterator;
+import com.alipay.sofa.jraft.Status;
+import com.alipay.sofa.jraft.core.StateMachineAdapter;
+import com.alipay.sofa.jraft.entity.LeaderChangeContext;
+import com.alipay.sofa.jraft.entity.PeerId;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
+import org.apache.iotdb.cluster.rpc.raft.closure.ResponseClosure;
+import org.apache.iotdb.cluster.rpc.raft.request.nonquery.MetaGroupNonQueryRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
+import org.apache.iotdb.cluster.utils.RaftUtils;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.exception.ProcessorException;
+import org.apache.iotdb.db.metadata.MManager;
+import org.apache.iotdb.db.qp.executor.OverflowQPExecutor;
+import org.apache.iotdb.db.qp.logical.Operator.OperatorType;
+import org.apache.iotdb.db.qp.physical.PhysicalPlan;
+import org.apache.iotdb.db.qp.physical.sys.AuthorPlan;
+import org.apache.iotdb.db.qp.physical.sys.MetadataPlan;
+import org.apache.iotdb.db.qp.physical.transfer.PhysicalPlanLogTransfer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * StateMachine of metadata group node.
+ */
+public class MetadataStateManchine extends StateMachineAdapter {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(MetadataStateManchine.class);
+
+  /**
+   * Manager of storage groups
+   **/
+  private MManager mManager = MManager.getInstance();
+
+  private OverflowQPExecutor qpExecutor = new OverflowQPExecutor();
+
+  private PeerId peerId;
+
+  private String groupId;
+
+  private AtomicLong leaderTerm = new AtomicLong(-1);
+
+  public MetadataStateManchine(String groupId, PeerId peerId) {
+    this.peerId = peerId;
+    this.groupId = groupId;
+  }
+
+  /**
+   * Update StrageGroup List and userProfileMap based on QPTask read from raft log.
+   * SG by {@code mManager}, User Info by {@code qpExecutor}
+   *
+   * @param iterator task iterator
+   */
+  @Override
+  public void onApply(Iterator iterator) {
+    while (iterator.hasNext()) {
+      final ByteBuffer data = iterator.getData();
+      final Closure closure = iterator.done();
+      applySingleTask(closure, data);
+      iterator.next();
+    }
+  }
+
+  private void applySingleTask(Closure closure, ByteBuffer data){
+    /** If closure is not null, the node is leader **/
+    BasicResponse response = (closure==null) ? null: ((ResponseClosure)closure).getResponse();
+    MetaGroupNonQueryRequest request;
+
+    try {
+      request = SerializerManager.getSerializer(SerializerManager.Hessian2)
+          .deserialize(data.array(), MetaGroupNonQueryRequest.class.getName());
+    } catch (final CodecException e) {
+      LOGGER.error("Fail to deserialize MetadataGroupNonQueryRequest", e);
+      if(closure != null){
+        closure.run(RaftUtils.createErrorStatus(e.getMessage()));
+      }
+      return;
+    }
+    Status status = new Status();
+
+    List<byte[]> planBytes = request.getPhysicalPlanBytes();
+
+    /** handle batch plans(planBytes.size() > 0) or single plan(planBytes.size()==1) **/
+    for (byte[] planByte : planBytes) {
+      try {
+        PhysicalPlan physicalPlan = PhysicalPlanLogTransfer
+            .logToOperator(planByte);
+        if (physicalPlan.getOperatorType() == OperatorType.SET_STORAGE_GROUP) {
+          MetadataPlan plan = (MetadataPlan) physicalPlan;
+          addStorageGroup(plan.getPath().getFullPath());
+        } else {
+          AuthorPlan plan = (AuthorPlan) physicalPlan;
+          qpExecutor.processNonQuery(plan);
+        }
+        addResult(response, true);
+      } catch (IOException | PathErrorException e) {
+        LOGGER.error("Execute metadata plan error", e);
+        status = new Status(-1, e.getMessage());
+        addResult(response, false);
+      } catch (ProcessorException e) {
+        LOGGER.error("Execute author plan error", e);
+        status = new Status(-1, e.getMessage());
+        addResult(response, false);
+      }
+    }
+    if (closure != null) {
+      closure.run(status);
+    }
+  }
+
+  /**
+   * Add result to response
+   */
+  private void addResult(BasicResponse response, boolean result){
+    if(response != null){
+      response.addResult(result);
+    }
+  }
+
+  public void addStorageGroup(String sg) throws IOException, PathErrorException {
+    mManager.setStorageLevelToMTree(sg);
+  }
+
+  public Set<String> getAllStorageGroups() {
+    return mManager.getAllStorageGroup();
+  }
+
+  @Override
+  public void onLeaderStart(final long term) {
+    RaftUtils.updateRaftGroupLeader(groupId, peerId);
+    LOGGER.info("On leader start, {} starts to be leader of {}", peerId, groupId);
+    this.leaderTerm.set(term);
+  }
+
+  @Override
+  public void onStartFollowing(LeaderChangeContext ctx) {
+    RaftUtils.updateRaftGroupLeader(groupId, ctx.getLeaderId());
+    this.leaderTerm.set(-1);
+    LOGGER.info("Start following, {} starts to be leader of {}", ctx.getLeaderId(), groupId);
+  }
+
+  @Override
+  public void onLeaderStop(final Status status) {
+    this.leaderTerm.set(-1);
+  }
+
+  public boolean isLeader() {
+    return this.leaderTerm.get() > 0;
+  }
+
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/RaftService.java b/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/RaftService.java
new file mode 100644
index 0000000..45d2221
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/entity/raft/RaftService.java
@@ -0,0 +1,112 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.entity.raft;
+
+import com.alipay.remoting.rpc.RpcServer;
+import com.alipay.sofa.jraft.Node;
+import com.alipay.sofa.jraft.RaftGroupService;
+import com.alipay.sofa.jraft.StateMachine;
+import com.alipay.sofa.jraft.conf.Configuration;
+import com.alipay.sofa.jraft.entity.PeerId;
+import com.alipay.sofa.jraft.option.NodeOptions;
+import com.codahale.metrics.ConsoleReporter;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.cluster.entity.service.IService;
+import org.apache.iotdb.db.utils.FilePathUtils;
+
+public class RaftService implements IService {
+
+  private ClusterConfig config = ClusterDescriptor.getInstance().getConfig();
+  private List<PeerId> peerIdList;
+  private Node node;
+  private StateMachine fsm;
+  private String groupId;
+  private RaftGroupService raftGroupService;
+  private boolean startRpcServer;
+
+  public RaftService(String groupId, PeerId[] peerIds, PeerId serverId, RpcServer rpcServer, StateMachine fsm, boolean startRpcServer) {
+    this.peerIdList = new ArrayList<>(peerIds.length);
+    peerIdList.addAll(Arrays.asList(peerIds));
+    this.fsm = fsm;
+    this.groupId = groupId;
+    this.startRpcServer = startRpcServer;
+    raftGroupService = new RaftGroupService(groupId, serverId, null, rpcServer);
+  }
+
+  @Override
+  public void init() {
+    NodeOptions nodeOptions = new NodeOptions();
+    nodeOptions.setDisableCli(false);
+    nodeOptions.setFsm(this.fsm);
+    nodeOptions.setLogUri(FilePathUtils.regularizePath(config.getRaftLogPath()) + groupId);
+    nodeOptions.setRaftMetaUri(FilePathUtils.regularizePath(config.getRaftMetadataPath()) + groupId);
+    nodeOptions.setSnapshotUri(FilePathUtils.regularizePath(config.getRaftSnapshotPath()) + groupId);
+    nodeOptions.setElectionTimeoutMs(config.getElectionTimeoutMs());
+    nodeOptions.setEnableMetrics(true);
+    final Configuration initConf = new Configuration();
+    initConf.setPeers(peerIdList);
+    nodeOptions.setInitialConf(initConf);
+    raftGroupService.setNodeOptions(nodeOptions);
+  }
+
+  @Override
+  public void start() {
+    this.node = raftGroupService.start(startRpcServer);
+
+//    ConsoleReporter reporter = ConsoleReporter.forRegistry(node.getNodeMetrics().getMetricRegistry())
+//        .convertRatesTo(TimeUnit.SECONDS)
+//        .convertDurationsTo(TimeUnit.MILLISECONDS)
+//        .build();
+//    reporter.start(30, TimeUnit.SECONDS);
+  }
+
+  @Override
+  public void stop() {
+    raftGroupService.shutdown();
+  }
+
+  public List<PeerId> getPeerIdList() {
+    return peerIdList;
+  }
+
+  public RaftGroupService getRaftGroupService() {
+    return raftGroupService;
+  }
+
+  public Node getNode() {
+    return node;
+  }
+
+  public void setNode(Node node) {
+    this.node = node;
+  }
+
+  public StateMachine getFsm() {
+    return fsm;
+  }
+
+  public String getGroupId() {
+    return groupId;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/entity/service/IService.java b/cluster/src/main/java/org/apache/iotdb/cluster/entity/service/IService.java
new file mode 100644
index 0000000..2bf336b
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/entity/service/IService.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.entity.service;
+
+public interface IService {
+
+  void init();
+
+  void start();
+
+  void stop();
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/ConsistencyLevelException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/ConsistencyLevelException.java
new file mode 100644
index 0000000..d45d4af
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/exception/ConsistencyLevelException.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.exception;
+
+public class ConsistencyLevelException extends Exception {
+
+  private static final long serialVersionUID = -6909575387553857034L;
+
+  public ConsistencyLevelException() {
+    super();
+  }
+
+  public ConsistencyLevelException(Exception pathExcp) {
+    super(pathExcp.getMessage());
+  }
+
+  public ConsistencyLevelException(String msg) {
+    super(msg);
+  }
+
+  public ConsistencyLevelException(Throwable throwable) {
+    super(throwable.getMessage());
+  }
+
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/ErrorConfigureExecption.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/ErrorConfigureExecption.java
new file mode 100644
index 0000000..7bbfa4d
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/exception/ErrorConfigureExecption.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.exception;
+
+public class ErrorConfigureExecption extends RuntimeException {
+	private static final long serialVersionUID = 5530077196040763508L;
+
+	public ErrorConfigureExecption() {
+		super();
+	}
+
+	public ErrorConfigureExecption(Exception pathExcp) {
+		super(pathExcp.getMessage());
+	}
+
+	public ErrorConfigureExecption(String msg) {
+		super(msg);
+	}
+
+	public ErrorConfigureExecption(Throwable throwable) {
+		super(throwable.getMessage());
+	}
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/RaftConnectionException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/RaftConnectionException.java
new file mode 100644
index 0000000..79ab812
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/exception/RaftConnectionException.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.exception;
+
+public class RaftConnectionException extends Exception {
+
+  private static final long serialVersionUID = -8842105888155009520L;
+
+  public RaftConnectionException(String message) {
+    super(message);
+  }
+
+  public RaftConnectionException(String message, Throwable cause) {
+    super(message, cause);
+  }
+
+  public RaftConnectionException(Throwable cause) {
+    super(cause);
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/AbstractQPExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/AbstractQPExecutor.java
new file mode 100644
index 0000000..96f150f
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/AbstractQPExecutor.java
@@ -0,0 +1,175 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.qp.executor;
+
+import com.alipay.sofa.jraft.entity.PeerId;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterConstant;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.cluster.entity.Server;
+import org.apache.iotdb.cluster.exception.ConsistencyLevelException;
+import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.qp.task.QPTask;
+import org.apache.iotdb.cluster.qp.task.QPTask.TaskState;
+import org.apache.iotdb.cluster.qp.task.SingleQPTask;
+import org.apache.iotdb.cluster.rpc.raft.impl.RaftNodeAsClientManager;
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
+import org.apache.iotdb.cluster.utils.RaftUtils;
+import org.apache.iotdb.cluster.utils.hash.Router;
+import org.apache.iotdb.db.metadata.MManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public abstract class AbstractQPExecutor {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(AbstractQPExecutor.class);
+
+  private static final ClusterConfig CLUSTER_CONFIG = ClusterDescriptor.getInstance().getConfig();
+
+  protected Router router = Router.getInstance();
+
+  protected MManager mManager = MManager.getInstance();
+
+  protected final Server server = Server.getInstance();
+
+  /**
+   * The task in progress.
+   */
+  protected ThreadLocal<QPTask> currentTask = new ThreadLocal<>();
+
+  /**
+   * Count limit to redo a single task
+   */
+  private static final int TASK_MAX_RETRY = CLUSTER_CONFIG.getQpTaskRedoCount();
+
+  /**
+   * ReadMetadataConsistencyLevel: 1 Strong consistency, 2 Weak consistency
+   */
+  private ThreadLocal<Integer> readMetadataConsistencyLevel = new ThreadLocal<>();
+
+  /**
+   * ReadDataConsistencyLevel: 1 Strong consistency, 2 Weak consistency
+   */
+  private ThreadLocal<Integer> readDataConsistencyLevel = new ThreadLocal<>();
+
+  public AbstractQPExecutor() {
+  }
+
+  /**
+   * Check init of consistency level(<code>ThreadLocal</code>)
+   */
+  private void checkInitConsistencyLevel() {
+    if (readMetadataConsistencyLevel.get() == null) {
+      readMetadataConsistencyLevel.set(CLUSTER_CONFIG.getReadMetadataConsistencyLevel());
+    }
+    if (readDataConsistencyLevel.get() == null) {
+      readDataConsistencyLevel.set(CLUSTER_CONFIG.getReadDataConsistencyLevel());
+    }
+  }
+
+  /**
+   * Async handle QPTask by QPTask and leader id
+   *
+   * @param task request QPTask
+   * @param leader leader of the target raft group
+   * @param taskRetryNum Number of QPTask retries due to timeout and redirected.
+   * @return basic response
+   */
+  protected BasicResponse syncHandleNonQuerySingleTaskGetRes(SingleQPTask task, int taskRetryNum)
+      throws InterruptedException, RaftConnectionException {
+    asyncSendNonQuerySingleTask(task, taskRetryNum);
+    return syncGetNonQueryRes(task, taskRetryNum);
+  }
+
+  /**
+   * Asynchronous send rpc task via client
+   *  @param task rpc task
+   * @param taskRetryNum Retry time of the task
+   */
+  protected void asyncSendNonQuerySingleTask(SingleQPTask task, int taskRetryNum)
+      throws RaftConnectionException {
+    if (taskRetryNum >= TASK_MAX_RETRY) {
+      throw new RaftConnectionException(String.format("QPTask retries reach the upper bound %s",
+          TASK_MAX_RETRY));
+    }
+    RaftNodeAsClientManager.getInstance().produceQPTask(task);
+  }
+
+  /**
+   * Synchronous get task response. If it's redirected or status is exception, the task needs to be
+   * resent. Note: If status is Exception, it marks that an exception occurred during the task is
+   * being sent instead of executed.
+   * @param task rpc task
+   * @param taskRetryNum Retry time of the task
+   */
+  private BasicResponse syncGetNonQueryRes(SingleQPTask task, int taskRetryNum)
+      throws InterruptedException, RaftConnectionException {
+    task.await();
+    PeerId leader;
+    if (task.getTaskState() != TaskState.FINISH) {
+      if (task.getTaskState() == TaskState.REDIRECT) {
+        /** redirect to the right leader **/
+        leader = PeerId.parsePeer(task.getResponse().getLeaderStr());
+        LOGGER.debug("Redirect leader: {}, group id = {}", leader, task.getRequest().getGroupID());
+        RaftUtils.updateRaftGroupLeader(task.getRequest().getGroupID(), leader);
+      } else {
+        String groupId = task.getRequest().getGroupID();
+        RaftUtils.removeCachedRaftGroupLeader(groupId);
+        LOGGER.debug("Remove cached raft group leader of {}", groupId);
+        leader = RaftUtils.getLeaderPeerID(groupId);
+      }
+      task.setTargetNode(leader);
+      task.resetTask();
+      return syncHandleNonQuerySingleTaskGetRes(task, taskRetryNum + 1);
+    }
+    return task.getResponse();
+  }
+
+  public void shutdown() {
+    if (currentTask.get() != null) {
+      currentTask.get().shutdown();
+    }
+  }
+
+  public void setReadMetadataConsistencyLevel(int level) throws ConsistencyLevelException {
+    if (level <= ClusterConstant.MAX_CONSISTENCY_LEVEL) {
+      readMetadataConsistencyLevel.set(level);
+    } else {
+      throw new ConsistencyLevelException(String.format("Consistency level %d not support", level));
+    }
+  }
+
+  public void setReadDataConsistencyLevel(int level) throws ConsistencyLevelException {
+    if (level <= ClusterConstant.MAX_CONSISTENCY_LEVEL) {
+      readDataConsistencyLevel.set(level);
+    } else {
+      throw new ConsistencyLevelException(String.format("Consistency level %d not support", level));
+    }
+  }
+
+  public int getReadMetadataConsistencyLevel() {
+    checkInitConsistencyLevel();
+    return readMetadataConsistencyLevel.get();
+  }
+
+  public int getReadDataConsistencyLevel() {
+    checkInitConsistencyLevel();
+    return readDataConsistencyLevel.get();
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/ClusterQueryProcessExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/ClusterQueryProcessExecutor.java
new file mode 100644
index 0000000..8f5c513
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/ClusterQueryProcessExecutor.java
@@ -0,0 +1,150 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.qp.executor;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import org.apache.iotdb.db.exception.FileNodeManagerException;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.exception.ProcessorException;
+import org.apache.iotdb.db.qp.constant.SQLConstant;
+import org.apache.iotdb.db.qp.executor.IQueryProcessExecutor;
+import org.apache.iotdb.db.qp.physical.PhysicalPlan;
+import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
+import org.apache.iotdb.db.query.context.QueryContext;
+import org.apache.iotdb.db.query.fill.IFill;
+import org.apache.iotdb.tsfile.exception.filter.QueryFilterOptimizationException;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.read.common.Path;
+import org.apache.iotdb.tsfile.read.expression.IExpression;
+import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
+import org.apache.iotdb.tsfile.utils.Pair;
+
+public class ClusterQueryProcessExecutor extends AbstractQPExecutor implements IQueryProcessExecutor {
+
+  private ThreadLocal<Integer> fetchSize = new ThreadLocal<>();
+
+  private QueryMetadataExecutor queryMetadataExecutor = new QueryMetadataExecutor();
+
+  @Override
+  public QueryDataSet processQuery(QueryPlan queryPlan, QueryContext context)
+      throws IOException, FileNodeManagerException, PathErrorException,
+      QueryFilterOptimizationException, ProcessorException {
+    return null;
+  }
+
+  @Override
+  public QueryDataSet aggregate(List<Path> paths, List<String> aggres, IExpression expression,
+      QueryContext context)
+      throws ProcessorException, IOException, PathErrorException, FileNodeManagerException, QueryFilterOptimizationException {
+    return null;
+  }
+
+  @Override
+  public QueryDataSet groupBy(List<Path> paths, List<String> aggres, IExpression expression,
+      long unit, long origin, List<Pair<Long, Long>> intervals, QueryContext context)
+      throws ProcessorException, IOException, PathErrorException, FileNodeManagerException, QueryFilterOptimizationException {
+    return null;
+  }
+
+  @Override
+  public QueryDataSet fill(List<Path> fillPaths, long queryTime, Map<TSDataType, IFill> fillTypes,
+      QueryContext context)
+      throws ProcessorException, IOException, PathErrorException, FileNodeManagerException {
+    return null;
+  }
+
+  @Override
+  public TSDataType getSeriesType(Path path) throws PathErrorException {
+    if (path.equals(SQLConstant.RESERVED_TIME)) {
+      return TSDataType.INT64;
+    }
+    if (path.equals(SQLConstant.RESERVED_FREQ)) {
+      return TSDataType.FLOAT;
+    }
+    try {
+      return queryMetadataExecutor.processSeriesTypeQuery(path.getFullPath());
+    } catch (InterruptedException | ProcessorException e) {
+      throw new PathErrorException(e.getMessage());
+    }
+  }
+
+  @Override
+  public List<String> getAllPaths(String originPath)
+      throws PathErrorException {
+    try {
+      return queryMetadataExecutor.processPathsQuery(originPath);
+    } catch (InterruptedException | ProcessorException e) {
+      throw new PathErrorException(e.getMessage());
+    }
+  }
+
+  @Override
+  public boolean judgePathExists(Path fullPath) {
+    try {
+      List<List<String>> results = queryMetadataExecutor.processTimeSeriesQuery(fullPath.toString());
+      return !results.isEmpty();
+    } catch (InterruptedException | PathErrorException | ProcessorException e) {
+      return false;
+    }
+  }
+
+  @Override
+  public int getFetchSize() {
+    return fetchSize.get();
+  }
+
+  @Override
+  public void setFetchSize(int fetchSize) {
+    this.fetchSize.set(fetchSize);
+  }
+
+  @Override
+  public boolean update(Path path, long startTime, long endTime, String value)
+      throws ProcessorException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean delete(List<Path> paths, long deleteTime) throws ProcessorException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean delete(Path path, long deleteTime) throws ProcessorException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public int insert(Path path, long insertTime, String value) throws ProcessorException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public int multiInsert(String deviceId, long insertTime, String[] measurementList,
+      String[] insertValues) throws ProcessorException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean processNonQuery(PhysicalPlan plan) throws ProcessorException {
+    throw new UnsupportedOperationException();
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/NonQueryExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/NonQueryExecutor.java
new file mode 100644
index 0000000..f62a83f
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/NonQueryExecutor.java
@@ -0,0 +1,366 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.qp.executor;
+
+import com.alipay.sofa.jraft.Status;
+import com.alipay.sofa.jraft.entity.PeerId;
+import java.io.IOException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
+import org.apache.iotdb.cluster.entity.raft.MetadataRaftHolder;
+import org.apache.iotdb.cluster.entity.raft.RaftService;
+import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.qp.task.BatchQPTask;
+import org.apache.iotdb.cluster.qp.task.QPTask;
+import org.apache.iotdb.cluster.qp.task.SingleQPTask;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.nonquery.DataGroupNonQueryRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.nonquery.MetaGroupNonQueryRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.nonquery.DataGroupNonQueryResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.nonquery.MetaGroupNonQueryResponse;
+import org.apache.iotdb.cluster.service.TSServiceClusterImpl.BatchResult;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
+import org.apache.iotdb.cluster.utils.RaftUtils;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.exception.ProcessorException;
+import org.apache.iotdb.db.qp.logical.sys.MetadataOperator;
+import org.apache.iotdb.db.qp.physical.PhysicalPlan;
+import org.apache.iotdb.db.qp.physical.crud.DeletePlan;
+import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
+import org.apache.iotdb.db.qp.physical.crud.UpdatePlan;
+import org.apache.iotdb.db.qp.physical.sys.MetadataPlan;
+import org.apache.iotdb.tsfile.read.common.Path;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Handle distributed non-query logic
+ */
+public class NonQueryExecutor extends AbstractQPExecutor {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(NonQueryExecutor.class);
+
+  private static final String OPERATION_NOT_SUPPORTED = "Operation %s does not support";
+
+  /**
+   * When executing Metadata Plan, it's necessary to do empty-read in single non query request or do
+   * the first empty-read in batch non query request
+   */
+  private boolean emptyTaskEnable = false;
+
+  public NonQueryExecutor() {
+    super();
+  }
+
+  /**
+   * Execute single non query request.
+   */
+  public boolean processNonQuery(PhysicalPlan plan) throws ProcessorException {
+    try {
+      emptyTaskEnable = true;
+      String groupId = getGroupIdFromPhysicalPlan(plan);
+      return handleNonQueryRequest(groupId, plan);
+    } catch (RaftConnectionException e) {
+      LOGGER.error(e.getMessage());
+      throw new ProcessorException("Raft connection occurs error.", e);
+    } catch (InterruptedException | PathErrorException | IOException e) {
+      throw new ProcessorException(e);
+    }
+  }
+
+  /**
+   * Execute batch statement by physical plans and update results.
+   *
+   * @param physicalPlans List of physical plan
+   * @param batchResult batch result
+   */
+  public void processBatch(PhysicalPlan[] physicalPlans, BatchResult batchResult)
+      throws InterruptedException, ProcessorException {
+
+    Status nullReadTaskStatus = Status.OK();
+    RaftUtils.handleNullReadToMetaGroup(nullReadTaskStatus);
+    if (!nullReadTaskStatus.isOk()) {
+      throw new ProcessorException("Null read while processing batch failed");
+    }
+    emptyTaskEnable = false;
+
+    /* 1. Classify physical plans by group id */
+    Map<String, List<PhysicalPlan>> physicalPlansMap = new HashMap<>();
+    Map<String, List<Integer>> planIndexMap = new HashMap<>();
+    classifyPhysicalPlanByGroupId(physicalPlans, batchResult, physicalPlansMap, planIndexMap);
+
+    /* 2. Construct Multiple Data Group Requests */
+    Map<String, SingleQPTask> subTaskMap = new HashMap<>();
+    constructMultipleRequests(physicalPlansMap, planIndexMap, subTaskMap, batchResult);
+
+    /* 3. Execute Multiple Sub Tasks */
+    BatchQPTask task = new BatchQPTask(subTaskMap.size(), batchResult, subTaskMap, planIndexMap);
+    currentTask.set(task);
+    task.executeBy(this);
+    task.await();
+  }
+
+  /**
+   * Classify batch physical plan by groupId
+   */
+  private void classifyPhysicalPlanByGroupId(PhysicalPlan[] physicalPlans, BatchResult batchResult,
+      Map<String, List<PhysicalPlan>> physicalPlansMap, Map<String, List<Integer>> planIndexMap) {
+    int[] result = batchResult.getResultArray();
+    for (int i = 0; i < result.length; i++) {
+      /** Check if the request has failed. If it has failed, ignore it. **/
+      if (result[i] != Statement.EXECUTE_FAILED) {
+        PhysicalPlan plan = physicalPlans[i];
+        try {
+          String groupId = getGroupIdFromPhysicalPlan(plan);
+          if (groupId.equals(ClusterConfig.METADATA_GROUP_ID)) {
+            // this is for set storage group statement and role/user management statement.
+            LOGGER.debug("Execute metadata group task");
+            boolean executeResult = handleNonQueryRequest(groupId, plan);
+            emptyTaskEnable = true;
+            result[i] = executeResult ? Statement.SUCCESS_NO_INFO
+                : Statement.EXECUTE_FAILED;
+            batchResult.setAllSuccessful(executeResult);
+          } else {
+            physicalPlansMap.computeIfAbsent(groupId, l -> new ArrayList<>()).add(plan);
+            planIndexMap.computeIfAbsent(groupId, l -> new ArrayList<>()).add(i);
+          }
+        } catch (PathErrorException | ProcessorException | IOException | RaftConnectionException | InterruptedException e) {
+          result[i] = Statement.EXECUTE_FAILED;
+          batchResult.setAllSuccessful(false);
+          batchResult.addBatchErrorMessage(i, e.getMessage());
+          LOGGER.error(e.getMessage());
+        }
+      }
+    }
+  }
+
+  /**
+   * Construct multiple data group requests
+   */
+  private void constructMultipleRequests(Map<String, List<PhysicalPlan>> physicalPlansMap,
+      Map<String, List<Integer>> planIndexMap, Map<String, SingleQPTask> subTaskMap,
+      BatchResult batchResult) {
+    int[] result = batchResult.getResultArray();
+    for (Entry<String, List<PhysicalPlan>> entry : physicalPlansMap.entrySet()) {
+      String groupId = entry.getKey();
+      SingleQPTask singleQPTask;
+      BasicRequest request;
+      try {
+        LOGGER.debug("DATA_GROUP_ID Send batch size() : {}", entry.getValue().size());
+        request = new DataGroupNonQueryRequest(groupId, entry.getValue());
+        singleQPTask = new SingleQPTask(false, request);
+        subTaskMap.put(groupId, singleQPTask);
+      } catch (IOException e) {
+        batchResult.setAllSuccessful(false);
+        for (int index : planIndexMap.get(groupId)) {
+          batchResult.addBatchErrorMessage(index, e.getMessage());
+        }
+        for (int index : planIndexMap.get(groupId)) {
+          result[index] = Statement.EXECUTE_FAILED;
+        }
+      }
+    }
+  }
+
+  /**
+   * Get group id from physical plan
+   */
+  private String getGroupIdFromPhysicalPlan(PhysicalPlan plan)
+      throws PathErrorException, ProcessorException {
+    String storageGroup;
+    String groupId;
+    switch (plan.getOperatorType()) {
+      case DELETE:
+        storageGroup = getStorageGroupFromDeletePlan((DeletePlan) plan);
+        groupId = router.getGroupIdBySG(storageGroup);
+        break;
+      case UPDATE:
+        Path path = ((UpdatePlan) plan).getPath();
+        storageGroup = QPExecutorUtils.getStroageGroupByDevice(path.getDevice());
+        groupId = router.getGroupIdBySG(storageGroup);
+        break;
+      case INSERT:
+        storageGroup = QPExecutorUtils.getStroageGroupByDevice(((InsertPlan) plan).getDeviceId());
+        groupId = router.getGroupIdBySG(storageGroup);
+        break;
+      case CREATE_ROLE:
+      case DELETE_ROLE:
+      case CREATE_USER:
+      case REVOKE_USER_ROLE:
+      case REVOKE_ROLE_PRIVILEGE:
+      case REVOKE_USER_PRIVILEGE:
+      case GRANT_ROLE_PRIVILEGE:
+      case GRANT_USER_PRIVILEGE:
+      case GRANT_USER_ROLE:
+      case MODIFY_PASSWORD:
+      case DELETE_USER:
+      case LIST_ROLE:
+      case LIST_USER:
+      case LIST_ROLE_PRIVILEGE:
+      case LIST_ROLE_USERS:
+      case LIST_USER_PRIVILEGE:
+      case LIST_USER_ROLES:
+        groupId = ClusterConfig.METADATA_GROUP_ID;
+        break;
+      case LOADDATA:
+        throw new UnsupportedOperationException(
+            String.format(OPERATION_NOT_SUPPORTED, plan.getOperatorType()));
+      case DELETE_TIMESERIES:
+      case CREATE_TIMESERIES:
+      case SET_STORAGE_GROUP:
+      case METADATA:
+        if (emptyTaskEnable) {
+          Status nullReadTaskStatus = Status.OK();
+          RaftUtils.handleNullReadToMetaGroup(nullReadTaskStatus);
+          if (!nullReadTaskStatus.isOk()) {
+            throw new ProcessorException("Null read to metadata group failed");
+          }
+          emptyTaskEnable = false;
+        }
+        groupId = getGroupIdFromMetadataPlan((MetadataPlan) plan);
+        break;
+      case PROPERTY:
+        throw new UnsupportedOperationException(
+            String.format(OPERATION_NOT_SUPPORTED, plan.getOperatorType()));
+      default:
+        throw new UnsupportedOperationException(
+            String.format(OPERATION_NOT_SUPPORTED, plan.getOperatorType()));
+    }
+    return groupId;
+  }
+
+  /**
+   * Get storage group from delete plan
+   */
+  public String getStorageGroupFromDeletePlan(DeletePlan deletePlan)
+      throws PathErrorException, ProcessorException {
+    List<Path> paths = deletePlan.getPaths();
+    Set<String> sgSet = new HashSet<>();
+    for (Path path : paths) {
+      List<String> storageGroupList = mManager.getAllFileNamesByPath(path.getFullPath());
+      sgSet.addAll(storageGroupList);
+      if (sgSet.size() > 1) {
+        throw new ProcessorException(
+            "Delete function in distributed iotdb only supports single storage group");
+      }
+    }
+    List<String> sgList = new ArrayList<>(sgSet);
+    return sgList.get(0);
+  }
+
+  /**
+   * Get group id from metadata plan
+   */
+  public String getGroupIdFromMetadataPlan(MetadataPlan metadataPlan)
+      throws ProcessorException, PathErrorException {
+    MetadataOperator.NamespaceType namespaceType = metadataPlan.getNamespaceType();
+    Path path = metadataPlan.getPath();
+    String groupId;
+    switch (namespaceType) {
+      case ADD_PATH:
+      case DELETE_PATH:
+        String deviceId = path.getDevice();
+        String storageGroup = QPExecutorUtils.getStroageGroupByDevice(deviceId);
+        groupId = router.getGroupIdBySG(storageGroup);
+        break;
+      case SET_FILE_LEVEL:
+        boolean fileLevelExist = mManager.checkStorageLevelOfMTree(path.getFullPath());
+        if (fileLevelExist) {
+          throw new ProcessorException(
+              String.format("File level %s already exists.", path.getFullPath()));
+        } else {
+          groupId = ClusterConfig.METADATA_GROUP_ID;
+        }
+        break;
+      default:
+        throw new ProcessorException("unknown namespace type:" + namespaceType);
+    }
+    return groupId;
+  }
+
+  /**
+   * Handle non query single request by group id and physical plan
+   */
+  private boolean handleNonQueryRequest(String groupId, PhysicalPlan plan)
+      throws IOException, RaftConnectionException, InterruptedException {
+    List<PhysicalPlan> plans = Collections.singletonList(plan);
+    BasicRequest request;
+    if (groupId.equals(ClusterConfig.METADATA_GROUP_ID)) {
+      request = new MetaGroupNonQueryRequest(groupId, plans);
+    } else {
+      request = new DataGroupNonQueryRequest(groupId, plans);
+    }
+    SingleQPTask qpTask = new SingleQPTask(true, request);
+    currentTask.set(qpTask);
+
+    /** Check if the plan can be executed locally. **/
+    if (QPExecutorUtils.canHandleNonQueryByGroupId(groupId)) {
+      return handleNonQueryRequestLocally(groupId, qpTask);
+    } else {
+      PeerId leader = RaftUtils.getLeaderPeerID(groupId);
+      qpTask.setTargetNode(leader);
+      return syncHandleNonQueryTask(qpTask);
+    }
+  }
+
+  /**
+   * Handle data group request locally.
+   */
+  public boolean handleNonQueryRequestLocally(String groupId, QPTask qpTask)
+      throws InterruptedException {
+    BasicResponse response;
+    RaftService service;
+    if (groupId.equals(ClusterConfig.METADATA_GROUP_ID)) {
+      response = MetaGroupNonQueryResponse.createEmptyResponse(groupId);
+      MetadataRaftHolder metadataRaftHolder = RaftUtils.getMetadataRaftHolder();
+      service = (RaftService) metadataRaftHolder.getService();
+    } else {
+      response = DataGroupNonQueryResponse.createEmptyResponse(groupId);
+      DataPartitionRaftHolder dataRaftHolder = RaftUtils.getDataPartitonRaftHolder(groupId);
+      service = (RaftService) dataRaftHolder.getService();
+    }
+
+    /** Apply qpTask to Raft Node **/
+    return RaftUtils.executeRaftTaskForLocalProcessor(service, qpTask, response);
+  }
+
+
+  /**
+   * Async handle task by QPTask and leader id.
+   *
+   * @param task request QPTask
+   * @return request result
+   */
+  public boolean syncHandleNonQueryTask(SingleQPTask task)
+      throws RaftConnectionException, InterruptedException {
+    BasicResponse response = syncHandleNonQuerySingleTaskGetRes(task, 0);
+    return response != null && response.isSuccess();
+  }
+
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/QueryMetadataExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/QueryMetadataExecutor.java
new file mode 100644
index 0000000..a258d7f
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/QueryMetadataExecutor.java
@@ -0,0 +1,378 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.qp.executor;
+
+import com.alipay.sofa.jraft.Status;
+import com.alipay.sofa.jraft.closure.ReadIndexClosure;
+import com.alipay.sofa.jraft.entity.PeerId;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import org.apache.iotdb.cluster.qp.task.SingleQPTask;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterConstant;
+import org.apache.iotdb.cluster.entity.raft.MetadataRaftHolder;
+import org.apache.iotdb.cluster.entity.raft.RaftService;
+import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QueryMetadataInStringRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QueryMetadataRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QueryPathsRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QuerySeriesTypeRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QueryStorageGroupRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QueryTimeSeriesRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QueryMetadataInStringResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QueryMetadataResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QueryPathsResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QuerySeriesTypeResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QueryStorageGroupResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QueryTimeSeriesResponse;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
+import org.apache.iotdb.cluster.utils.RaftUtils;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.exception.ProcessorException;
+import org.apache.iotdb.db.metadata.MManager;
+import org.apache.iotdb.db.metadata.Metadata;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Handle < show timeseries <path> > logic
+ */
+public class QueryMetadataExecutor extends AbstractQPExecutor {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(QueryMetadataExecutor.class);
+  private static final String DOUB_SEPARATOR = "\\.";
+  private static final char SINGLE_SEPARATOR = '.';
+  private static final String RAFT_CONNECTION_ERROR = "Raft connection occurs error.";
+
+  public QueryMetadataExecutor() {
+    super();
+  }
+
+  public Set<String> processStorageGroupQuery() throws InterruptedException {
+    return queryStorageGroupLocally();
+  }
+
+  /**
+   * Handle show timeseries <path> statement
+   */
+  public List<List<String>> processTimeSeriesQuery(String path)
+      throws InterruptedException, PathErrorException, ProcessorException {
+    List<List<String>> res = new ArrayList<>();
+    List<String> storageGroupList = mManager.getAllFileNamesByPath(path);
+    if (storageGroupList.isEmpty()) {
+      return new ArrayList<>();
+    } else {
+      Map<String, Set<String>> groupIdSGMap = QPExecutorUtils.classifySGByGroupId(storageGroupList);
+      for (Entry<String, Set<String>> entry : groupIdSGMap.entrySet()) {
+        List<String> paths = getSubQueryPaths(entry.getValue(), path);
+        String groupId = entry.getKey();
+        handleTimseriesQuery(groupId, paths, res);
+      }
+    }
+    return res;
+  }
+
+  /**
+   * Get all query path in storage group relatively to query path
+   */
+  private List<String> getSubQueryPaths(Set<String> stoageGroupList, String queryPath) {
+    List<String> paths = new ArrayList<>();
+    for (String storageGroup : stoageGroupList) {
+      if (storageGroup.length() >= queryPath.length()) {
+        paths.add(storageGroup);
+      } else {
+        StringBuilder path = new StringBuilder();
+        String[] storageGroupNodes = storageGroup.split(DOUB_SEPARATOR);
+        String[] queryPathNodes = queryPath.split(DOUB_SEPARATOR);
+        for(int  i = 0 ; i < queryPathNodes.length ; i++){
+          if(i >= storageGroupNodes.length){
+            path.append(queryPathNodes[i]).append(SINGLE_SEPARATOR);
+          } else {
+            path.append(storageGroupNodes[i]).append(SINGLE_SEPARATOR);
+          }
+        }
+        paths.add(path.deleteCharAt(path.length()-1).toString());
+      }
+    }
+    return paths;
+  }
+  
+  /**
+   * Handle query timeseries in one data group
+   *
+   * @param groupId data group id
+   */
+  private void handleTimseriesQuery(String groupId, List<String> pathList, List<List<String>> res)
+      throws ProcessorException, InterruptedException {
+    QueryTimeSeriesRequest request = new QueryTimeSeriesRequest(groupId,
+        getReadMetadataConsistencyLevel(), pathList);
+    SingleQPTask task = new SingleQPTask(false, request);
+
+    LOGGER.debug("Execute show timeseries {} statement for group {}.", pathList, groupId);
+    PeerId holder;
+    /** Check if the plan can be executed locally. **/
+    if (QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
+      LOGGER.debug("Execute show timeseries {} statement locally for group {} by sending request to local node.", pathList, groupId);
+      holder = this.server.getServerId();
+    } else {
+      holder = RaftUtils.getRandomPeerID(groupId);
+    }
+    task.setTargetNode(holder);
+    try {
+      res.addAll(queryTimeSeries(task));
+    } catch (RaftConnectionException e) {
+      throw new ProcessorException(RAFT_CONNECTION_ERROR, e);
+    }
+  }
+
+  public String processMetadataInStringQuery()
+      throws InterruptedException, ProcessorException {
+    Set<String> groupIdSet = router.getAllGroupId();
+
+    List<String> metadataList = new ArrayList<>(groupIdSet.size());
+    List<SingleQPTask> taskList = new ArrayList<>();
+    for (String groupId : groupIdSet) {
+      QueryMetadataInStringRequest request = new QueryMetadataInStringRequest(groupId,
+          getReadMetadataConsistencyLevel());
+      SingleQPTask task = new SingleQPTask(false, request);
+      taskList.add(task);
+
+      LOGGER.debug("Execute show metadata in string statement for group {}.", groupId);
+      PeerId holder;
+      /** Check if the plan can be executed locally. **/
+      if (QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
+        LOGGER.debug("Execute show metadata in string statement locally for group {} by sending request to local node.", groupId);
+        holder = this.server.getServerId();
+      } else {
+        holder = RaftUtils.getRandomPeerID(groupId);
+      }
+      task.setTargetNode(holder);
+      try {
+        asyncSendNonQuerySingleTask(task, 0);
+      } catch (RaftConnectionException e) {
+        throw new ProcessorException(RAFT_CONNECTION_ERROR, e);
+      }
+    }
+    for (int i = 0; i < taskList.size(); i++) {
+      SingleQPTask task = taskList.get(i);
+      task.await();
+      BasicResponse response = task.getResponse();
+      if (response == null || !response.isSuccess()) {
+        throw new ProcessorException();
+      }
+      metadataList.add(((QueryMetadataInStringResponse)response).getMetadata());
+    }
+    return combineMetadataInStringList(metadataList);
+  }
+
+  public Metadata processMetadataQuery()
+      throws InterruptedException, ProcessorException {
+    Set<String> groupIdSet = router.getAllGroupId();
+
+    Metadata[] metadatas = new Metadata[groupIdSet.size()];
+    List<SingleQPTask> taskList = new ArrayList<>();
+    for (String groupId : groupIdSet) {
+      QueryMetadataRequest request = new QueryMetadataRequest(groupId,
+          getReadMetadataConsistencyLevel());
+      SingleQPTask task = new SingleQPTask(false, request);
+      taskList.add(task);
+
+      LOGGER.debug("Execute query metadata statement for group {}.", groupId);
+      PeerId holder;
+      /** Check if the plan can be executed locally. **/
+      if (QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
+        LOGGER.debug("Execute query metadata statement locally for group {} by sending request to local node.", groupId);
+        holder = this.server.getServerId();
+      } else {
+        holder = RaftUtils.getRandomPeerID(groupId);
+      }
+      task.setTargetNode(holder);
+      try {
+        asyncSendNonQuerySingleTask(task, 0);
+      } catch (RaftConnectionException e) {
+        throw new ProcessorException(RAFT_CONNECTION_ERROR, e);
+      }
+    }
+    for (int i = 0; i < taskList.size(); i++) {
+      SingleQPTask task = taskList.get(i);
+      task.await();
+      BasicResponse response = task.getResponse();
+      if (response == null || !response.isSuccess()) {
+        String errorMessage = "response is null";
+        if (response != null && response.getErrorMsg() != null) {
+          errorMessage = response.getErrorMsg();
+        }
+        throw new ProcessorException("Execute query metadata statement false because " + errorMessage);
+      }
+      metadatas[i] = ((QueryMetadataResponse)response).getMetadata();
+    }
+    return Metadata.combineMetadatas(metadatas);
+  }
+
+  public TSDataType processSeriesTypeQuery(String path)
+      throws InterruptedException, ProcessorException, PathErrorException {
+    TSDataType dataType;
+    List<String> storageGroupList = mManager.getAllFileNamesByPath(path);
+    if (storageGroupList.size() != 1) {
+      throw new PathErrorException("path " + path + " is not valid.");
+    } else {
+      String groupId = router.getGroupIdBySG(storageGroupList.get(0));
+      QuerySeriesTypeRequest request = new QuerySeriesTypeRequest(groupId,
+          getReadMetadataConsistencyLevel(), path);
+      SingleQPTask task = new SingleQPTask(false, request);
+
+      LOGGER.debug("Execute get series type for {} statement for group {}.", path, groupId);
+      PeerId holder;
+      /** Check if the plan can be executed locally. **/
+      if (QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
+        LOGGER.debug("Execute get series type for {} statement locally for group {} by sending request to local node.", path, groupId);
+        holder = this.server.getServerId();
+      } else {
+        holder = RaftUtils.getRandomPeerID(groupId);
+      }
+      task.setTargetNode(holder);
+      try {
+        dataType = querySeriesType(task);
+      } catch (RaftConnectionException e) {
+        throw new ProcessorException(RAFT_CONNECTION_ERROR, e);
+      }
+    }
+    return dataType;
+  }
+
+  /**
+   * Handle show timeseries <path> statement
+   */
+  public List<String> processPathsQuery(String path)
+      throws InterruptedException, PathErrorException, ProcessorException {
+    List<String> res = new ArrayList<>();
+    List<String> storageGroupList = mManager.getAllFileNamesByPath(path);
+    if (storageGroupList.isEmpty()) {
+      return new ArrayList<>();
+    } else {
+      Map<String, Set<String>> groupIdSGMap = QPExecutorUtils.classifySGByGroupId(storageGroupList);
+      for (Entry<String, Set<String>> entry : groupIdSGMap.entrySet()) {
+        List<String> paths = getSubQueryPaths(entry.getValue(), path);
+        String groupId = entry.getKey();
+        handlePathsQuery(groupId, paths, res);
+      }
+    }
+    return res;
+  }
+
+  /**
+   * Handle query timeseries in one data group
+   *
+   * @param groupId data group id
+   */
+  private void handlePathsQuery(String groupId, List<String> pathList, List<String> res)
+      throws ProcessorException, InterruptedException {
+    QueryPathsRequest request = new QueryPathsRequest(groupId,
+        getReadMetadataConsistencyLevel(), pathList);
+    SingleQPTask task = new SingleQPTask(false, request);
+
+    LOGGER.debug("Execute get paths for {} statement for group {}.", pathList, groupId);
+    PeerId holder;
+    /** Check if the plan can be executed locally. **/
+    if (QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
+      LOGGER.debug("Execute get paths for {} statement locally for group {} by sending request to local node.", pathList, groupId);
+      holder = this.server.getServerId();
+    } else {
+      holder = RaftUtils.getRandomPeerID(groupId);
+    }
+    task.setTargetNode(holder);
+    try {
+      res.addAll(queryPaths(task));
+    } catch (RaftConnectionException e) {
+      throw new ProcessorException(RAFT_CONNECTION_ERROR, e);
+    }
+  }
+
+  private List<List<String>> queryTimeSeries(SingleQPTask task)
+      throws InterruptedException, RaftConnectionException {
+    BasicResponse response = syncHandleNonQuerySingleTaskGetRes(task, 0);
+    return response == null ? new ArrayList<>()
+        : ((QueryTimeSeriesResponse) response).getTimeSeries();
+  }
+
+  private TSDataType querySeriesType(SingleQPTask task)
+      throws InterruptedException, RaftConnectionException {
+    BasicResponse response = syncHandleNonQuerySingleTaskGetRes(task, 0);
+    return response == null ? null
+        : ((QuerySeriesTypeResponse) response).getDataType();
+  }
+
+  /**
+   * Handle "show storage group" statement locally
+   *
+   * @return Set of storage group name
+   */
+  private Set<String> queryStorageGroupLocally() throws InterruptedException {
+    final byte[] reqContext = RaftUtils.createRaftRequestContext();
+    QueryStorageGroupRequest request = new QueryStorageGroupRequest(
+        ClusterConfig.METADATA_GROUP_ID, getReadMetadataConsistencyLevel());
+    SingleQPTask task = new SingleQPTask(false, request);
+    MetadataRaftHolder metadataHolder = (MetadataRaftHolder) server.getMetadataHolder();
+    if (getReadMetadataConsistencyLevel() == ClusterConstant.WEAK_CONSISTENCY_LEVEL) {
+      QueryStorageGroupResponse response;
+      response = QueryStorageGroupResponse
+          .createSuccessResponse(metadataHolder.getFsm().getAllStorageGroups());
+      task.receive(response);
+    } else {
+      ((RaftService) metadataHolder.getService()).getNode()
+          .readIndex(reqContext, new ReadIndexClosure() {
+
+            @Override
+            public void run(Status status, long index, byte[] reqCtx) {
+              QueryStorageGroupResponse response;
+              if (status.isOk()) {
+                response = QueryStorageGroupResponse
+                    .createSuccessResponse(metadataHolder.getFsm().getAllStorageGroups());
+              } else {
+                response = QueryStorageGroupResponse.createErrorResponse(status.getErrorMsg());
+              }
+              task.receive(response);
+            }
+          });
+    }
+    task.await();
+    return ((QueryStorageGroupResponse) task.getResponse()).getStorageGroups();
+  }
+
+  private List<String> queryPaths(SingleQPTask task)
+      throws InterruptedException, RaftConnectionException {
+    BasicResponse response = syncHandleNonQuerySingleTaskGetRes(task, 0);
+    return response == null ? new ArrayList<>()
+        : ((QueryPathsResponse) response).getPaths();
+  }
+
+  /**
+   * Combine multiple metadata in String format into single String
+   *
+   * @return single String of all metadata
+   */
+  private String combineMetadataInStringList(List<String> metadataList) {
+    return MManager.combineMetadataInStrings(metadataList.toArray(new String[metadataList.size()]));
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/BatchQPTask.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/BatchQPTask.java
new file mode 100644
index 0000000..f06fa4b
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/BatchQPTask.java
@@ -0,0 +1,163 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.qp.task;
+
+import com.alipay.sofa.jraft.entity.PeerId;
+import java.sql.Statement;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.Future;
+import java.util.concurrent.locks.ReentrantLock;
+import org.apache.iotdb.cluster.concurrent.pool.QPTaskThreadManager;
+import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.qp.executor.NonQueryExecutor;
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.nonquery.DataGroupNonQueryResponse;
+import org.apache.iotdb.cluster.service.TSServiceClusterImpl.BatchResult;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
+import org.apache.iotdb.cluster.utils.RaftUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Execute batch statement tasks. It's thread-safe.
+ */
+public class BatchQPTask extends MultiQPTask {
+
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(BatchQPTask.class);
+
+  /**
+   * Record the index of physical plans in a data group. The index means the position in result
+   * String: group id
+   */
+  private Map<String, List<Integer>> planIndexMap;
+
+  /**
+   * Batch result array, mark the result type, which is in BatchResult
+   */
+  private int[] resultArray;
+
+  /**
+   * Batch result
+   */
+  private BatchResult batchResult;
+
+  /**
+   * Lock to update result
+   */
+  private ReentrantLock lock = new ReentrantLock();
+
+  private NonQueryExecutor executor;
+
+
+  public BatchQPTask(int taskNum, BatchResult result, Map<String, SingleQPTask> taskMap,
+      Map<String, List<Integer>> planIndexMap) {
+    super(false, taskNum, TaskType.BATCH);
+    this.resultArray = result.getResultArray();
+    this.batchResult = result;
+    this.taskMap = taskMap;
+    this.planIndexMap = planIndexMap;
+    this.taskThreadMap = new HashMap<>();
+  }
+
+  /**
+   * Process response
+   *
+   * @param basicResponse response from receiver
+   */
+  @Override
+  public void receive(BasicResponse basicResponse) {
+    lock.lock();
+    try {
+      String groupId = basicResponse.getGroupId();
+      List<Boolean> results = basicResponse.getResults();
+      List<Integer> indexList = planIndexMap.get(groupId);
+      List<String> errorMsgList = ((DataGroupNonQueryResponse) basicResponse).getErrorMsgList();
+      int errorMsgIndex = 0;
+      for (int i = 0; i < indexList.size(); i++) {
+        if (i >= results.size()) {
+          resultArray[indexList.get(i)] = Statement.EXECUTE_FAILED;
+          batchResult.addBatchErrorMessage(indexList.get(i), basicResponse.getErrorMsg());
+        } else {
+          if (results.get(i)) {
+            resultArray[indexList.get(i)] = Statement.SUCCESS_NO_INFO;
+          } else {
+            resultArray[indexList.get(i)] = Statement.EXECUTE_FAILED;
+            batchResult.addBatchErrorMessage(indexList.get(i), errorMsgList.get(errorMsgIndex++));
+          }
+        }
+      }
+      if (!basicResponse.isSuccess()) {
+        batchResult.setAllSuccessful(false);
+      }
+    } finally {
+      lock.unlock();
+    }
+    taskCountDownLatch.countDown();
+  }
+
+  public void executeBy(NonQueryExecutor executor) {
+    this.executor = executor;
+
+    for (Entry<String, SingleQPTask> entry : taskMap.entrySet()) {
+      String groupId = entry.getKey();
+      SingleQPTask subTask = entry.getValue();
+      Future<?> taskThread;
+      if (QPExecutorUtils.canHandleNonQueryByGroupId(groupId)) {
+        taskThread = QPTaskThreadManager.getInstance()
+            .submit(() -> executeLocalSubTask(subTask, groupId));
+      } else {
+        PeerId leader = RaftUtils.getLeaderPeerID(groupId);
+        subTask.setTargetNode(leader);
+        taskThread = QPTaskThreadManager.getInstance()
+            .submit(() -> executeRpcSubTask(subTask, groupId));
+      }
+      taskThreadMap.put(groupId, taskThread);
+    }
+  }
+
+  /**
+   * Execute local sub task
+   */
+  private void executeLocalSubTask(QPTask subTask, String groupId) {
+    try {
+      executor.handleNonQueryRequestLocally(groupId, subTask);
+      this.receive(subTask.getResponse());
+    } catch (InterruptedException e) {
+      LOGGER.error("Handle sub task locally failed.");
+      this.receive(DataGroupNonQueryResponse.createErrorResponse(groupId, e.getMessage()));
+    }
+  }
+
+  /**
+   * Execute RPC sub task
+   */
+  private void executeRpcSubTask(SingleQPTask subTask, String groupId) {
+    try {
+      executor.syncHandleNonQueryTask(subTask);
+      this.receive(subTask.getResponse());
+    } catch (RaftConnectionException | InterruptedException e) {
+      LOGGER.error("Async handle sub task failed.");
+      this.receive(DataGroupNonQueryResponse.createErrorResponse(groupId, e.getMessage()));
+    }
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/DataQueryTask.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/DataQueryTask.java
new file mode 100644
index 0000000..3b905d8
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/DataQueryTask.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.qp.task;
+
+import org.apache.iotdb.cluster.qp.task.QPTask.TaskState;
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
+
+public class DataQueryTask {
+  private BasicResponse basicResponse;
+  private TaskState state;
+
+  public DataQueryTask(BasicResponse basicResponse,
+      TaskState state) {
+    this.basicResponse = basicResponse;
+    this.state = state;
+  }
+
+  public BasicResponse getBasicResponse() {
+    return basicResponse;
+  }
+
+  public void setBasicResponse(BasicResponse basicResponse) {
+    this.basicResponse = basicResponse;
+  }
+
+  public TaskState getState() {
+    return state;
+  }
+
+  public void setState(TaskState state) {
+    this.state = state;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/MultiQPTask.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/MultiQPTask.java
new file mode 100644
index 0000000..e451f3e
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/MultiQPTask.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.qp.task;
+
+import java.util.Map;
+import java.util.concurrent.Future;
+
+public abstract class MultiQPTask extends QPTask {
+
+  /**
+   * Each request is corresponding to a group id. String: group id
+   */
+  Map<String, SingleQPTask> taskMap;
+
+  /**
+   * Each future task handle a request in taskMap, which is corresponding to a group id. String:
+   * group id
+   */
+  Map<String, Future<?>> taskThreadMap;
+
+  public MultiQPTask(boolean isSyncTask, int taskNum, TaskType taskType) {
+    super(isSyncTask, taskNum, TaskState.INITIAL, taskType);
+  }
+
+  @Override
+  public void shutdown() {
+    for (Future<?> task : taskThreadMap.values()) {
+      if (!task.isDone()) {
+        task.cancel(true);
+      }
+    }
+    while (taskCountDownLatch.getCount() != 0) {
+      this.taskCountDownLatch.countDown();
+    }
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/QPTask.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/QPTask.java
new file mode 100644
index 0000000..b86e92a
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/QPTask.java
@@ -0,0 +1,187 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.qp.task;
+
+import com.alipay.sofa.jraft.entity.PeerId;
+import java.util.concurrent.CountDownLatch;
+import org.apache.iotdb.cluster.entity.Server;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
+
+public abstract class QPTask {
+
+  /**
+   * QPTask response
+   */
+  protected BasicResponse response;
+
+  /**
+   * QPTask request
+   */
+  protected BasicRequest request;
+
+  /**
+   * The target peer of this task
+   */
+  protected PeerId targetNode;
+
+  /**
+   * Whether it's a synchronization task or not.
+   */
+  boolean isSyncTask;
+
+  /**
+   * Count down latch for sub-tasks
+   */
+  CountDownLatch taskCountDownLatch;
+
+  /**
+   * Num of sub-task
+   */
+  private int taskNum;
+
+  /**
+   * Describe task type
+   */
+  TaskState taskState;
+
+  /**
+   * Describe task type
+   */
+  TaskType taskType;
+
+  /**
+   * Server instance
+   */
+  protected Server server = Server.getInstance();
+
+  public QPTask(boolean isSyncTask, int taskNum, TaskState taskState, TaskType taskType) {
+    this.isSyncTask = isSyncTask;
+    this.taskNum = taskNum;
+    this.taskCountDownLatch = new CountDownLatch(taskNum);
+    this.taskState = taskState;
+    this.taskType = taskType;
+  }
+
+  /**
+   * Process response
+   *
+   * @param basicResponse response from receiver
+   */
+  public abstract void receive(BasicResponse basicResponse);
+
+  public boolean isSyncTask() {
+    return isSyncTask;
+  }
+
+  public void setSyncTask(boolean syncTask) {
+    isSyncTask = syncTask;
+  }
+
+  public CountDownLatch getTaskCountDownLatch() {
+    return taskCountDownLatch;
+  }
+
+  public void resetTask() {
+    this.taskCountDownLatch = new CountDownLatch(taskNum);
+  }
+
+  public TaskState getTaskState() {
+    return taskState;
+  }
+
+  public void setTaskState(TaskState taskState) {
+    this.taskState = taskState;
+  }
+
+
+  public BasicResponse getResponse() {
+    return response;
+  }
+
+  public void setResponse(BasicResponse response) {
+    this.response = response;
+  }
+
+  public BasicRequest getRequest() {
+    return request;
+  }
+
+  public void setRequest(BasicRequest request) {
+    this.request = request;
+  }
+
+  public enum TaskState {
+
+    /**
+     * Initial state
+     */
+    INITIAL,
+
+    /**
+     * Redirect leader
+     */
+    REDIRECT,
+
+    /**
+     * Task finish
+     */
+    FINISH,
+
+    /**
+     * Occur exception in remote node
+     */
+    EXCEPTION,
+
+    /**
+     * Can not connect to remote node
+     */
+    RAFT_CONNECTION_EXCEPTION
+  }
+
+  public enum TaskType {
+
+    /**
+     * Single task
+     */
+    SINGLE,
+
+    /**
+     * Batch task
+     */
+    BATCH
+  }
+
+  /**
+   * Wait until task is finished.
+   */
+  public void await() throws InterruptedException {
+    this.taskCountDownLatch.await();
+  }
+
+  public abstract void shutdown();
+
+  public PeerId getTargetNode() {
+    return targetNode;
+  }
+
+  public void setTargetNode(PeerId targetNode) {
+    this.targetNode = targetNode;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/SingleQPTask.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/SingleQPTask.java
new file mode 100644
index 0000000..c684cf1
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/SingleQPTask.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.qp.task;
+
+import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Process task(s) for only one raft group, which is used for operations except for querying data.
+ */
+public class SingleQPTask extends QPTask {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(SingleQPTask.class);
+
+  private static final int TASK_NUM = 1;
+
+  public SingleQPTask(boolean isSyncTask, BasicRequest request) {
+    super(isSyncTask, TASK_NUM, TaskState.INITIAL, TaskType.SINGLE);
+    this.request = request;
+  }
+
+  /**
+   * Process response. If it's necessary to redirect leader, redo the task.
+   */
+  @Override
+  public void receive(BasicResponse response) {
+    if(taskState != TaskState.EXCEPTION) {
+      this.response = response;
+      if(response == null){
+        LOGGER.error("Response is null");
+      } else if (response.isRedirected()) {
+        this.taskState = TaskState.REDIRECT;
+      } else {
+        this.taskState = TaskState.FINISH;
+      }
+    }
+    this.taskCountDownLatch.countDown();
+  }
+
+  @Override
+  public void shutdown() {
+    if (taskCountDownLatch.getCount() != 0) {
+      this.taskCountDownLatch.countDown();
+    }
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/NodeAsClient.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/NodeAsClient.java
new file mode 100644
index 0000000..6257fbd
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/NodeAsClient.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft;
+
+import com.alipay.sofa.jraft.entity.PeerId;
+import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.qp.task.SingleQPTask;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
+import org.apache.iotdb.cluster.qp.task.DataQueryTask;
+
+/**
+ * Handle the request and process the result as a client with the current node
+ */
+public interface NodeAsClient {
+
+  /**
+   * Asynchronous processing requests
+   * @param qpTask single QPTask to be executed
+   */
+  void asyncHandleRequest(SingleQPTask qpTask) throws RaftConnectionException;
+
+//  /**
+//   * Synchronous processing requests
+//   * @param peerId leader node of the target group
+//   *
+//   */
+//  DataQueryTask syncHandleRequest(BasicRequest request, PeerId peerId)
+//      throws RaftConnectionException;
+
+  /**
+   * Shut down client
+   */
+  void shutdown();
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/closure/ResponseClosure.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/closure/ResponseClosure.java
new file mode 100644
index 0000000..e2dbd53
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/closure/ResponseClosure.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.closure;
+
+import com.alipay.sofa.jraft.Closure;
+import com.alipay.sofa.jraft.Status;
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
+
+public class ResponseClosure implements Closure {
+
+  private BasicResponse response;
+  private Closure closure;
+
+  public ResponseClosure(BasicResponse response, Closure closure) {
+    this.response = response;
+    this.closure = closure;
+  }
+
+  @Override
+  public void run(Status status) {
+    if (this.closure != null) {
+      closure.run(status);
+    }
+  }
+
+  public BasicResponse getResponse() {
+    return response;
+  }
+
+  public void setResponse(BasicResponse response) {
+    this.response = response;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/impl/RaftNodeAsClientManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/impl/RaftNodeAsClientManager.java
new file mode 100644
index 0000000..e96da99
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/impl/RaftNodeAsClientManager.java
@@ -0,0 +1,244 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.impl;
+
+import com.alipay.remoting.InvokeCallback;
+import com.alipay.remoting.exception.RemotingException;
+import com.alipay.sofa.jraft.option.CliOptions;
+import com.alipay.sofa.jraft.rpc.impl.cli.BoltCliClientService;
+import java.util.LinkedList;
+import java.util.concurrent.Executor;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+import org.apache.iotdb.cluster.concurrent.pool.NodeAsClientThreadManager;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterConstant;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.qp.task.QPTask.TaskState;
+import org.apache.iotdb.cluster.qp.task.SingleQPTask;
+import org.apache.iotdb.cluster.rpc.raft.NodeAsClient;
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
+import org.apache.iotdb.db.exception.ProcessorException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Manage resource of @NodeAsClient
+ */
+public class RaftNodeAsClientManager {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(RaftNodeAsClientManager.class);
+
+  private static final ClusterConfig CLUSTER_CONFIG = ClusterDescriptor.getInstance().getConfig();
+  /**
+   * Timeout limit for a task, the unit is milliseconds
+   */
+  private static final int TASK_TIMEOUT_MS = CLUSTER_CONFIG.getQpTaskTimeout();
+
+  /**
+   * Max request number in queue
+   */
+  private static final int MAX_QUEUE_TASK_NUM = CLUSTER_CONFIG.getMaxQueueNumOfQPTask();
+
+  /**
+   * Node as client thread pool manager
+   */
+  private static final NodeAsClientThreadManager THREAD_POOL_MANAGER = NodeAsClientThreadManager
+      .getInstance();
+
+  /**
+   * QPTask queue list
+   */
+  private final LinkedList<SingleQPTask> taskQueue = new LinkedList<>();
+
+  /**
+   * Lock to update clientNumInUse
+   */
+  private Lock resourceLock = new ReentrantLock();
+
+  /**
+   * Condition to get client
+   */
+  private Condition resourceCondition = resourceLock.newCondition();
+
+  /**
+   * Mark whether system is shutting down
+   */
+  private volatile boolean isShuttingDown;
+
+  private RaftNodeAsClientManager() {
+
+  }
+
+  public void init() {
+    isShuttingDown = false;
+    for (int i = 0; i < CLUSTER_CONFIG.getConcurrentInnerRpcClientThread(); i++) {
+      THREAD_POOL_MANAGER.execute(() -> {
+        RaftNodeAsClient client = new RaftNodeAsClient();
+        while (true) {
+          consumeQPTask(client);
+          if (Thread.currentThread().isInterrupted()) {
+            break;
+          }
+        }
+        client.shutdown();
+      });
+    }
+  }
+
+  /**
+   * Produce qp task to be executed.
+   */
+  public void produceQPTask(SingleQPTask qpTask) throws RaftConnectionException {
+    resourceLock.lock();
+    try {
+      checkShuttingDown();
+      if (taskQueue.size() >= MAX_QUEUE_TASK_NUM) {
+        throw new RaftConnectionException(String
+            .format("Raft inner rpc clients have reached the max numbers %s",
+                CLUSTER_CONFIG.getConcurrentInnerRpcClientThread() + CLUSTER_CONFIG
+                    .getMaxQueueNumOfQPTask()));
+      }
+      taskQueue.addLast(qpTask);
+      resourceCondition.signal();
+    } finally {
+      resourceLock.unlock();
+    }
+  }
+
+  /**
+   * Consume qp task
+   */
+  private void consumeQPTask(RaftNodeAsClient client) {
+    resourceLock.lock();
+    try {
+      while (taskQueue.isEmpty()) {
+        if (Thread.currentThread().isInterrupted()) {
+          return;
+        }
+        resourceCondition.await();
+      }
+      client.asyncHandleRequest(taskQueue.removeFirst());
+    } catch (InterruptedException e) {
+      LOGGER.error("An error occurred when await for ResourceContidion", e);
+    } finally {
+      resourceLock.unlock();
+    }
+  }
+
+  private void checkShuttingDown() throws RaftConnectionException {
+    if (isShuttingDown) {
+      throw new RaftConnectionException(
+          "Reject to execute QPTask because cluster system is shutting down");
+    }
+  }
+
+  public void shutdown() throws ProcessorException {
+    isShuttingDown = true;
+    THREAD_POOL_MANAGER.close(true, ClusterConstant.CLOSE_THREAD_POOL_BLOCK_TIMEOUT);
+  }
+
+  /**
+   * Get qp task number in queue
+   */
+  public int getQPTaskNumInQueue() {
+    return taskQueue.size();
+  }
+
+  public static final RaftNodeAsClientManager getInstance() {
+    return RaftNodeAsClientManager.ClientManagerHolder.INSTANCE;
+  }
+
+  private static class ClientManagerHolder {
+
+    private static final RaftNodeAsClientManager INSTANCE = new RaftNodeAsClientManager();
+
+    private ClientManagerHolder() {
+
+    }
+  }
+
+  /**
+   * Implement NodeAsClient with Raft Service
+   *
+   * @see NodeAsClient
+   */
+  public class RaftNodeAsClient implements NodeAsClient {
+
+    /**
+     * Rpc Service Client
+     */
+    private BoltCliClientService boltClientService;
+
+    private RaftNodeAsClient() {
+      init();
+    }
+
+    private void init() {
+      boltClientService = new BoltCliClientService();
+      boltClientService.init(new CliOptions());
+    }
+
+    @Override
+    public void asyncHandleRequest(SingleQPTask qpTask) {
+      LOGGER.debug("Node as client to send request to leader: {}", qpTask.getTargetNode());
+      try {
+        boltClientService.getRpcClient()
+            .invokeWithCallback(qpTask.getTargetNode().getEndpoint().toString(),
+                qpTask.getRequest(),
+                new InvokeCallback() {
+
+                  @Override
+                  public void onResponse(Object result) {
+                    BasicResponse response = (BasicResponse) result;
+                    qpTask.receive(response);
+                  }
+
+                  @Override
+                  public void onException(Throwable e) {
+                    LOGGER.error("Bolt rpc client occurs errors when handling Request", e);
+                    qpTask.setTaskState(TaskState.EXCEPTION);
+                    qpTask.receive(null);
+                  }
+
+                  @Override
+                  public Executor getExecutor() {
+                    return null;
+                  }
+                }, TASK_TIMEOUT_MS);
+      } catch (RemotingException | InterruptedException e) {
+        LOGGER.error(e.getMessage());
+        qpTask.setTaskState(TaskState.RAFT_CONNECTION_EXCEPTION);
+        qpTask.receive(null);
+      }
+    }
+
+    /**
+     * Shut down taskQueue
+     */
+    @Override
+    public void shutdown() {
+      boltClientService.shutdown();
+    }
+
+  }
+
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/BasicAsyncUserProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/BasicAsyncUserProcessor.java
new file mode 100644
index 0000000..4f0003c
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/BasicAsyncUserProcessor.java
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.processor;
+
+import com.alipay.remoting.rpc.protocol.AsyncUserProcessor;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
+
+public abstract class BasicAsyncUserProcessor<T extends BasicRequest> extends
+    AsyncUserProcessor<T> {
+
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/BasicSyncUserProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/BasicSyncUserProcessor.java
new file mode 100644
index 0000000..1b7920f
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/BasicSyncUserProcessor.java
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.processor;
+
+import com.alipay.remoting.rpc.protocol.SyncUserProcessor;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
+
+public abstract class BasicSyncUserProcessor<T extends BasicRequest> extends SyncUserProcessor<T> {
+
+
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/QueryMetricAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/QueryMetricAsyncProcessor.java
new file mode 100644
index 0000000..a76d2a6
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/QueryMetricAsyncProcessor.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.processor;
+
+import com.alipay.remoting.AsyncContext;
+import com.alipay.remoting.BizContext;
+import org.apache.iotdb.cluster.rpc.raft.request.QueryMetricRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.QueryMetricResponse;
+import org.apache.iotdb.cluster.utils.RaftUtils;
+
+public class QueryMetricAsyncProcessor extends BasicAsyncUserProcessor<QueryMetricRequest> {
+
+  @Override
+  public void handleRequest(BizContext bizContext, AsyncContext asyncContext,
+      QueryMetricRequest request) {
+    String groupId = request.getGroupID();
+
+    QueryMetricResponse response = QueryMetricResponse.createSuccessResponse(groupId,
+        RaftUtils.getReplicaMetric(request.getGroupID(), request.getMetric()));
+    response.addResult(true);
+    asyncContext.sendResponse(response);
+  }
+
+  @Override
+  public String interest() {
+    return QueryMetricRequest.class.getName();
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/DataGroupNonQueryAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/DataGroupNonQueryAsyncProcessor.java
new file mode 100644
index 0000000..44f42a7
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/DataGroupNonQueryAsyncProcessor.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.processor.nonquery;
+
+import com.alipay.remoting.AsyncContext;
+import com.alipay.remoting.BizContext;
+import com.alipay.sofa.jraft.entity.PeerId;
+import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
+import org.apache.iotdb.cluster.entity.raft.RaftService;
+import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
+import org.apache.iotdb.cluster.rpc.raft.request.nonquery.DataGroupNonQueryRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.nonquery.DataGroupNonQueryResponse;
+import org.apache.iotdb.cluster.utils.RaftUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Async handle those requests which need to be applied in data group.
+ */
+public class DataGroupNonQueryAsyncProcessor extends
+    BasicAsyncUserProcessor<DataGroupNonQueryRequest> {
+
+  private static final Logger LOGGER = LoggerFactory
+      .getLogger(DataGroupNonQueryAsyncProcessor.class);
+
+  @Override
+  public void handleRequest(BizContext bizContext, AsyncContext asyncContext,
+      DataGroupNonQueryRequest request) {
+    LOGGER.debug("Handle data non query request");
+
+    /* Check if it's the leader */
+    String groupId = request.getGroupID();
+    DataPartitionRaftHolder dataPartitionRaftHolder = RaftUtils.getDataPartitonRaftHolder(groupId);
+    if (!dataPartitionRaftHolder.getFsm().isLeader()) {
+      PeerId leader = RaftUtils.getLeaderPeerID(groupId);
+      LOGGER.debug("Request need to redirect leader: {}, groupId : {} ", leader, groupId);
+
+      DataGroupNonQueryResponse response = DataGroupNonQueryResponse
+          .createRedirectedResponse(groupId, leader.toString());
+      asyncContext.sendResponse(response);
+    } else {
+      LOGGER.debug("Apply task to raft node");
+
+      /* Apply Task to Raft Node */
+      BasicResponse response = DataGroupNonQueryResponse.createEmptyResponse(groupId);
+      RaftService service = (RaftService) dataPartitionRaftHolder.getService();
+      RaftUtils.executeRaftTaskForRpcProcessor(service, asyncContext, request, response);
+    }
+  }
+
+  @Override
+  public String interest() {
+    return DataGroupNonQueryRequest.class.getName();
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/MetaGroupNonQueryAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/MetaGroupNonQueryAsyncProcessor.java
new file mode 100644
index 0000000..9f09bbb
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/MetaGroupNonQueryAsyncProcessor.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.processor.nonquery;
+
+import com.alipay.remoting.AsyncContext;
+import com.alipay.remoting.BizContext;
+import com.alipay.sofa.jraft.entity.PeerId;
+import org.apache.iotdb.cluster.entity.raft.MetadataRaftHolder;
+import org.apache.iotdb.cluster.entity.raft.RaftService;
+import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
+import org.apache.iotdb.cluster.rpc.raft.request.nonquery.MetaGroupNonQueryRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
+import org.apache.iotdb.cluster.rpc.raft.response.nonquery.MetaGroupNonQueryResponse;
+import org.apache.iotdb.cluster.utils.RaftUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Async handle those requests which need to be applied in metadata group.
+ */
+public class MetaGroupNonQueryAsyncProcessor extends
+    BasicAsyncUserProcessor<MetaGroupNonQueryRequest> {
+
+  private static final Logger LOGGER = LoggerFactory
+      .getLogger(MetaGroupNonQueryAsyncProcessor.class);
+
+  @Override
+  public void handleRequest(BizContext bizContext, AsyncContext asyncContext,
+      MetaGroupNonQueryRequest request) {
+    LOGGER.debug("Handle metadata non query query request.");
+
+    /** Check if it's the leader **/
+    String groupId = request.getGroupID();
+    MetadataRaftHolder metadataHolder = RaftUtils.getMetadataRaftHolder();
+    if (!metadataHolder.getFsm().isLeader()) {
+      PeerId leader = RaftUtils.getLeaderPeerID(groupId);
+      LOGGER.debug("Request need to redirect leader: {}, groupId : {} ", leader, groupId);
+
+      MetaGroupNonQueryResponse response = MetaGroupNonQueryResponse
+          .createRedirectedResponse(groupId, leader.toString());
+      asyncContext.sendResponse(response);
+    } else {
+      LOGGER.debug("Apply task to metadata raft node");
+
+      /** Apply Task to Raft Node **/
+      BasicResponse response = MetaGroupNonQueryResponse.createEmptyResponse(groupId);
+      RaftService service = (RaftService) metadataHolder.getService();
+      RaftUtils.executeRaftTaskForRpcProcessor(service, asyncContext, request, response);
+    }
+  }
+
+  @Override
+  public String interest() {
+    return MetaGroupNonQueryRequest.class.getName();
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryMetadataAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryMetadataAsyncProcessor.java
new file mode 100644
index 0000000..3073df9
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryMetadataAsyncProcessor.java
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.processor.querymetadata;
+
+import com.alipay.remoting.AsyncContext;
+import com.alipay.remoting.BizContext;
+import com.alipay.sofa.jraft.Status;
+import com.alipay.sofa.jraft.closure.ReadIndexClosure;
+import org.apache.iotdb.cluster.config.ClusterConstant;
+import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
+import org.apache.iotdb.cluster.entity.raft.RaftService;
+import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QueryMetadataRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QueryMetadataResponse;
+import org.apache.iotdb.cluster.utils.RaftUtils;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.metadata.MManager;
+
+public class QueryMetadataAsyncProcessor extends
+    BasicAsyncUserProcessor<QueryMetadataRequest> {
+
+  private MManager mManager = MManager.getInstance();
+
+  @Override
+  public void handleRequest(BizContext bizContext, AsyncContext asyncContext,
+      QueryMetadataRequest request) {
+    String groupId = request.getGroupID();
+
+    if (request.getReadConsistencyLevel() == ClusterConstant.WEAK_CONSISTENCY_LEVEL) {
+      QueryMetadataResponse response;
+      try {
+        response = QueryMetadataResponse
+            .createSuccessResponse(groupId, mManager.getMetadata());
+        response.addResult(true);
+      } catch (PathErrorException e) {
+        response = QueryMetadataResponse.createErrorResponse(groupId, e.getMessage());
+        response.addResult(false);
+      }
+      asyncContext.sendResponse(response);
+    } else {
+      final byte[] reqContext = RaftUtils.createRaftRequestContext();
+      DataPartitionRaftHolder dataPartitionHolder = RaftUtils.getDataPartitonRaftHolder(groupId);
+
+      ((RaftService) dataPartitionHolder.getService()).getNode()
+          .readIndex(reqContext, new ReadIndexClosure() {
+
+            @Override
+            public void run(Status status, long index, byte[] reqCtx) {
+              QueryMetadataResponse response;
+              if (status.isOk()) {
+                try {
+                  response = QueryMetadataResponse
+                      .createSuccessResponse(groupId, mManager.getMetadata());
+                  response.addResult(true);
+                } catch (PathErrorException e) {
+                  response = QueryMetadataResponse.createErrorResponse(groupId, e.getMessage());
+                  response.addResult(false);
+                }
+              } else {
+                response = QueryMetadataResponse
+                    .createErrorResponse(groupId, status.getErrorMsg());
+                response.addResult(false);
+              }
+              asyncContext.sendResponse(response);
+            }
+          });
+    }
+  }
+
+  @Override
+  public String interest() {
+    return QueryMetadataRequest.class.getName();
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryMetadataInStringAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryMetadataInStringAsyncProcessor.java
new file mode 100644
index 0000000..8771eea
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryMetadataInStringAsyncProcessor.java
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.processor.querymetadata;
+
+import com.alipay.remoting.AsyncContext;
+import com.alipay.remoting.BizContext;
+import com.alipay.sofa.jraft.Status;
+import com.alipay.sofa.jraft.closure.ReadIndexClosure;
+import org.apache.iotdb.cluster.config.ClusterConstant;
+import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
+import org.apache.iotdb.cluster.entity.raft.RaftService;
+import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QueryMetadataInStringRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QueryMetadataInStringResponse;
+import org.apache.iotdb.cluster.utils.RaftUtils;
+import org.apache.iotdb.db.metadata.MManager;
+
+public class QueryMetadataInStringAsyncProcessor extends
+    BasicAsyncUserProcessor<QueryMetadataInStringRequest> {
+
+  private MManager mManager = MManager.getInstance();
+
+  @Override
+  public void handleRequest(BizContext bizContext, AsyncContext asyncContext,
+      QueryMetadataInStringRequest request) {
+    String groupId = request.getGroupID();
+
+    if (request.getReadConsistencyLevel() == ClusterConstant.WEAK_CONSISTENCY_LEVEL) {
+      QueryMetadataInStringResponse response = QueryMetadataInStringResponse
+          .createSuccessResponse(groupId, mManager.getMetadataInString());
+      response.addResult(true);
+      asyncContext.sendResponse(response);
+    } else {
+      final byte[] reqContext = RaftUtils.createRaftRequestContext();
+      DataPartitionRaftHolder dataPartitionHolder = RaftUtils.getDataPartitonRaftHolder(groupId);
+
+      ((RaftService) dataPartitionHolder.getService()).getNode()
+          .readIndex(reqContext, new ReadIndexClosure() {
+
+            @Override
+            public void run(Status status, long index, byte[] reqCtx) {
+              QueryMetadataInStringResponse response;
+              if (status.isOk()) {
+                response = QueryMetadataInStringResponse
+                    .createSuccessResponse(groupId, mManager.getMetadataInString());
+                response.addResult(true);
+              } else {
+                response = QueryMetadataInStringResponse
+                    .createErrorResponse(groupId, status.getErrorMsg());
+                response.addResult(false);
+              }
+              asyncContext.sendResponse(response);
+            }
+          });
+    }
+  }
+
+  @Override
+  public String interest() {
+    return QueryMetadataInStringRequest.class.getName();
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryPathsAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryPathsAsyncProcessor.java
new file mode 100644
index 0000000..8e1e47b
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryPathsAsyncProcessor.java
@@ -0,0 +1,99 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.processor.querymetadata;
+
+import com.alipay.remoting.AsyncContext;
+import com.alipay.remoting.BizContext;
+import com.alipay.sofa.jraft.Status;
+import com.alipay.sofa.jraft.closure.ReadIndexClosure;
+import org.apache.iotdb.cluster.config.ClusterConstant;
+import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
+import org.apache.iotdb.cluster.entity.raft.RaftService;
+import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QueryPathsRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QueryPathsResponse;
+import org.apache.iotdb.cluster.utils.RaftUtils;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.metadata.MManager;
+
+public class QueryPathsAsyncProcessor extends BasicAsyncUserProcessor<QueryPathsRequest> {
+
+  private MManager mManager = MManager.getInstance();
+
+  @Override
+  public void handleRequest(BizContext bizContext, AsyncContext asyncContext,
+      QueryPathsRequest request) {
+    String groupId = request.getGroupID();
+
+    if (request.getReadConsistencyLevel() == ClusterConstant.WEAK_CONSISTENCY_LEVEL) {
+      QueryPathsResponse response = QueryPathsResponse
+          .createEmptyResponse(groupId);
+      try {
+        queryPaths(request, response);
+        response.addResult(true);
+      } catch (final PathErrorException e) {
+        response = QueryPathsResponse.createErrorResponse(groupId, e.getMessage());
+        response.addResult(false);
+      }
+      asyncContext.sendResponse(response);
+    } else {
+      final byte[] reqContext = RaftUtils.createRaftRequestContext();
+      DataPartitionRaftHolder dataPartitionHolder = RaftUtils.getDataPartitonRaftHolder(groupId);
+
+      ((RaftService) dataPartitionHolder.getService()).getNode()
+          .readIndex(reqContext, new ReadIndexClosure() {
+
+            @Override
+            public void run(Status status, long index, byte[] reqCtx) {
+              QueryPathsResponse response = QueryPathsResponse
+                  .createEmptyResponse(groupId);
+              if (status.isOk()) {
+                try {
+                  queryPaths(request, response);
+                  response.addResult(true);
+                } catch (final PathErrorException e) {
+                  response = QueryPathsResponse.createErrorResponse(groupId, e.getMessage());
+                  response.addResult(false);
+                }
+              } else {
+                response = QueryPathsResponse
+                    .createErrorResponse(groupId, status.getErrorMsg());
+                response.addResult(false);
+              }
+              asyncContext.sendResponse(response);
+            }
+          });
+    }
+  }
+
+  /**
+   * Query paths
+   */
+  private void queryPaths(QueryPathsRequest request,
+      QueryPathsResponse response) throws PathErrorException {
+    for (String path : request.getPath()) {
+      response.addPaths(mManager.getPaths(path));
+    }
+  }
+
+  @Override
+  public String interest() {
+    return QueryPathsRequest.class.getName();
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QuerySeriesTypeAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QuerySeriesTypeAsyncProcessor.java
new file mode 100644
index 0000000..9e4b1c7
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QuerySeriesTypeAsyncProcessor.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.processor.querymetadata;
+
+import com.alipay.remoting.AsyncContext;
+import com.alipay.remoting.BizContext;
+import com.alipay.sofa.jraft.Status;
+import com.alipay.sofa.jraft.closure.ReadIndexClosure;
+import org.apache.iotdb.cluster.config.ClusterConstant;
+import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
+import org.apache.iotdb.cluster.entity.raft.RaftService;
+import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QuerySeriesTypeRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QuerySeriesTypeResponse;
+import org.apache.iotdb.cluster.utils.RaftUtils;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.metadata.MManager;
+
+public class QuerySeriesTypeAsyncProcessor extends BasicAsyncUserProcessor<QuerySeriesTypeRequest> {
+
+  private MManager mManager = MManager.getInstance();
+
+  @Override
+  public void handleRequest(BizContext bizContext, AsyncContext asyncContext,
+      QuerySeriesTypeRequest request) {
+    String groupId = request.getGroupID();
+
+    if (request.getReadConsistencyLevel() == ClusterConstant.WEAK_CONSISTENCY_LEVEL) {
+      QuerySeriesTypeResponse response;
+      try {
+        response = QuerySeriesTypeResponse.createSuccessResponse(groupId, mManager.getSeriesType(request.getPath()));
+        response.addResult(true);
+      } catch (final PathErrorException e) {
+        response = QuerySeriesTypeResponse.createErrorResponse(groupId, e.getMessage());
+        response.addResult(false);
+      }
+      asyncContext.sendResponse(response);
+    } else {
+      final byte[] reqContext = RaftUtils.createRaftRequestContext();
+      DataPartitionRaftHolder dataPartitionHolder = RaftUtils.getDataPartitonRaftHolder(groupId);
+
+      ((RaftService) dataPartitionHolder.getService()).getNode()
+          .readIndex(reqContext, new ReadIndexClosure() {
+
+            @Override
+            public void run(Status status, long index, byte[] reqCtx) {
+              QuerySeriesTypeResponse response;
+              if (status.isOk()) {
+                try {
+                  response = QuerySeriesTypeResponse.createSuccessResponse(groupId, mManager.getSeriesType(request.getPath()));
+                  response.addResult(true);
+                } catch (final PathErrorException e) {
+                  response = QuerySeriesTypeResponse.createErrorResponse(groupId, e.getMessage());
+                  response.addResult(false);
+                }
+              } else {
+                response = QuerySeriesTypeResponse
+                    .createErrorResponse(groupId, status.getErrorMsg());
+                response.addResult(false);
+              }
+              asyncContext.sendResponse(response);
+            }
+          });
+    }
+  }
+
+  @Override
+  public String interest() {
+    return QuerySeriesTypeRequest.class.getName();
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryTimeSeriesAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryTimeSeriesAsyncProcessor.java
new file mode 100644
index 0000000..593f99d
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryTimeSeriesAsyncProcessor.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.processor.querymetadata;
+
+import com.alipay.remoting.AsyncContext;
+import com.alipay.remoting.BizContext;
+import com.alipay.sofa.jraft.Status;
+import com.alipay.sofa.jraft.closure.ReadIndexClosure;
+import org.apache.iotdb.cluster.config.ClusterConstant;
+import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
+import org.apache.iotdb.cluster.entity.raft.RaftService;
+import org.apache.iotdb.cluster.rpc.raft.processor.BasicAsyncUserProcessor;
+import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QueryTimeSeriesRequest;
+import org.apache.iotdb.cluster.rpc.raft.response.querymetadata.QueryTimeSeriesResponse;
+import org.apache.iotdb.cluster.utils.RaftUtils;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.metadata.MManager;
+
+
+public class QueryTimeSeriesAsyncProcessor extends BasicAsyncUserProcessor<QueryTimeSeriesRequest> {
+
+  private MManager mManager = MManager.getInstance();
+
+  @Override
+  public void handleRequest(BizContext bizContext, AsyncContext asyncContext,
+      QueryTimeSeriesRequest request) {
+    String groupId = request.getGroupID();
+
+    if (request.getReadConsistencyLevel() == ClusterConstant.WEAK_CONSISTENCY_LEVEL) {
+      QueryTimeSeriesResponse response = QueryTimeSeriesResponse
+          .createEmptyResponse(groupId);
+      try {
+        queryTimeSeries(request, response);
+        response.addResult(true);
+      } catch (final PathErrorException e) {
+        response = QueryTimeSeriesResponse.createErrorResponse(groupId, e.getMessage());
+        response.addResult(false);
+      }
+      asyncContext.sendResponse(response);
+    } else {
+      final byte[] reqContext = RaftUtils.createRaftRequestContext();
+      DataPartitionRaftHolder dataPartitionHolder = RaftUtils.getDataPartitonRaftHolder(groupId);
+
+      ((RaftService) dataPartitionHolder.getService()).getNode()
+          .readIndex(reqContext, new ReadIndexClosure() {
+
+            @Override
+            public void run(Status status, long index, byte[] reqCtx) {
+              QueryTimeSeriesResponse response = QueryTimeSeriesResponse
+                  .createEmptyResponse(groupId);
+              if (status.isOk()) {
+                try {
+                  queryTimeSeries(request, response);
+                  response.addResult(true);
+                } catch (final PathErrorException e) {
+                  response = QueryTimeSeriesResponse.createErrorResponse(groupId, e.getMessage());
+                  response.addResult(false);
+                }
+              } else {
+                response = QueryTimeSeriesResponse
+                    .createErrorResponse(groupId, status.getErrorMsg());
+                response.addResult(false);
+              }
+              asyncContext.sendResponse(response);
+            }
+          });
+    }
+  }
+
+  /**
+   * Query timeseries
+   */
+  private void queryTimeSeries(QueryTimeSeriesRequest queryMetadataRequest,
+      QueryTimeSeriesResponse response) throws PathErrorException {
+    for (String path : queryMetadataRequest.getPath()) {
+      response.addTimeSeries(mManager.getShowTimeseriesPath(path));
+    }
+  }
+
+  @Override
+  public String interest() {
+    return QueryTimeSeriesRequest.class.getName();
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicNonQueryRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicNonQueryRequest.java
new file mode 100644
index 0000000..33a4d8e
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicNonQueryRequest.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.request;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.iotdb.db.qp.physical.PhysicalPlan;
+import org.apache.iotdb.db.qp.physical.transfer.PhysicalPlanLogTransfer;
+
+public abstract class BasicNonQueryRequest extends BasicRequest{
+
+  private static final long serialVersionUID = -3082772186451384202L;
+  /**
+   * Serialized physical plans
+   */
+  private List<byte[]> physicalPlanBytes;
+
+  public BasicNonQueryRequest(String groupID) {
+    super(groupID);
+  }
+
+  protected void init(List<PhysicalPlan> physicalPlanBytes) throws IOException {
+    this.physicalPlanBytes = new ArrayList<>(physicalPlanBytes.size());
+    for (PhysicalPlan plan : physicalPlanBytes) {
+      this.physicalPlanBytes.add(PhysicalPlanLogTransfer.operatorToLog(plan));
+    }
+  }
+
+  public List<byte[]> getPhysicalPlanBytes() {
+    return physicalPlanBytes;
+  }
+
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicQueryRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicQueryRequest.java
new file mode 100644
index 0000000..3ceddaf
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicQueryRequest.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.request;
+
+public abstract class BasicQueryRequest extends BasicRequest {
+
+  private static final long serialVersionUID = 2993000692822502110L;
+  /**
+   * Read Consistency Level
+   */
+  private int readConsistencyLevel;
+
+  public BasicQueryRequest(String groupID, int readConsistencyLevel) {
+    super(groupID);
+    this.readConsistencyLevel = readConsistencyLevel;
+  }
+
+  public BasicQueryRequest(String groupID) {
+    super(groupID);
+  }
+
+  public int getReadConsistencyLevel() {
+    return readConsistencyLevel;
+  }
+
+  public void setReadConsistencyLevel(int readConsistencyLevel) {
+    this.readConsistencyLevel = readConsistencyLevel;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicRequest.java
new file mode 100644
index 0000000..dd4758a
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/BasicRequest.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.request;
+
+import java.io.Serializable;
+
+public abstract class BasicRequest implements Serializable {
+
+  private static final long serialVersionUID = 8434915845259380829L;
+
+  /**
+   * Group ID
+   */
+  private String groupID;
+
+  public BasicRequest(String groupID) {
+    this.groupID = groupID;
+  }
+
+  public String getGroupID() {
+    return groupID;
+  }
+
+  public void setGroupID(String groupID) {
+    this.groupID = groupID;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/QueryMetricRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/QueryMetricRequest.java
new file mode 100644
index 0000000..eb81769
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/QueryMetricRequest.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.request;
+
+import java.io.Serializable;
+
+public class QueryMetricRequest extends BasicQueryRequest implements Serializable {
+
+  private String metric;
+
+  public QueryMetricRequest(String groupID, int readConsistencyLevel, String metric) {
+    super(groupID, readConsistencyLevel);
+    this.metric = metric;
+  }
+
+  public String getMetric() {
+    return metric;
+  }
+}
\ No newline at end of file
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/nonquery/DataGroupNonQueryRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/nonquery/DataGroupNonQueryRequest.java
new file mode 100644
index 0000000..8413373
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/nonquery/DataGroupNonQueryRequest.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.request.nonquery;
+
+import java.io.IOException;
+import java.util.List;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicNonQueryRequest;
+import org.apache.iotdb.db.qp.physical.PhysicalPlan;
+
+/**
+ * Handle request to data group
+ */
+public class DataGroupNonQueryRequest extends BasicNonQueryRequest {
+
+  private static final long serialVersionUID = -2442407985738324604L;
+
+  public DataGroupNonQueryRequest(String groupID, List<PhysicalPlan> physicalPlanBytes)
+      throws IOException {
+    super(groupID);
+    init(physicalPlanBytes);
+  }
+
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/nonquery/MetaGroupNonQueryRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/nonquery/MetaGroupNonQueryRequest.java
new file mode 100644
index 0000000..b29609a
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/nonquery/MetaGroupNonQueryRequest.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.request.nonquery;
+
+import java.io.IOException;
+import java.util.List;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicNonQueryRequest;
+import org.apache.iotdb.db.qp.physical.PhysicalPlan;
+
+/**
+ * Handle request to metadata group leader
+ */
+public class MetaGroupNonQueryRequest extends BasicNonQueryRequest {
+
+  private static final long serialVersionUID = 312899249719243646L;
+
+  public MetaGroupNonQueryRequest(String groupID, List<PhysicalPlan> plans)
+      throws IOException {
+    super(groupID);
+    this.init(plans);
+  }
+
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryMetadataInStringRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryMetadataInStringRequest.java
new file mode 100644
index 0000000..c90cf80
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryMetadataInStringRequest.java
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.request.querymetadata;
+
+import org.apache.iotdb.cluster.rpc.raft.request.BasicQueryRequest;
+
+public class QueryMetadataInStringRequest extends BasicQueryRequest {
+
+  private static final long serialVersionUID = -7037884610669129082L;
+
+  public QueryMetadataInStringRequest(String groupID, int readConsistencyLevel) {
+    super(groupID, readConsistencyLevel);
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryMetadataRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryMetadataRequest.java
new file mode 100644
index 0000000..75ae438
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryMetadataRequest.java
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.request.querymetadata;
+
+import org.apache.iotdb.cluster.rpc.raft.request.BasicQueryRequest;
+
+public class QueryMetadataRequest extends BasicQueryRequest {
+
+  private static final long serialVersionUID = -1976805423799324348L;
+
+  public QueryMetadataRequest(String groupID, int readConsistencyLevel) {
+    super(groupID, readConsistencyLevel);
+  }
+}
\ No newline at end of file
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryPathsRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryPathsRequest.java
new file mode 100644
index 0000000..b92a0e6
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryPathsRequest.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.request.querymetadata;
+
+import java.util.List;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicQueryRequest;
+
+public class QueryPathsRequest extends BasicQueryRequest {
+
+  private static final long serialVersionUID = -4334131357874435256L;
+  private List<String> path;
+
+  public QueryPathsRequest(String groupID, int readConsistencyLevel, List<String> path) {
+    super(groupID, readConsistencyLevel);
+    this.path = path;
+  }
+
+  public List<String> getPath() {
+    return path;
+  }
+}
\ No newline at end of file
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QuerySeriesTypeRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QuerySeriesTypeRequest.java
new file mode 100644
index 0000000..e46fe66
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QuerySeriesTypeRequest.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.request.querymetadata;
+
+import org.apache.iotdb.cluster.rpc.raft.request.BasicQueryRequest;
+
+public class QuerySeriesTypeRequest extends BasicQueryRequest {
+
+  private static final long serialVersionUID = -7917403708996214075L;
+  private String path;
+
+  public QuerySeriesTypeRequest(String groupID, int readConsistencyLevel, String path) {
+    super(groupID, readConsistencyLevel);
+    this.path = path;
+  }
+
+  public String getPath() {
+    return path;
+  }
+}
\ No newline at end of file
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryStorageGroupRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryStorageGroupRequest.java
new file mode 100644
index 0000000..bb3d847
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryStorageGroupRequest.java
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.request.querymetadata;
+
+import org.apache.iotdb.cluster.rpc.raft.request.BasicQueryRequest;
+
+public class QueryStorageGroupRequest extends BasicQueryRequest {
+
+  private static final long serialVersionUID = -1260362721166408556L;
+
+  public QueryStorageGroupRequest(String groupID, int readConsistencyLevel) {
+    super(groupID, readConsistencyLevel);
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryTimeSeriesRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryTimeSeriesRequest.java
new file mode 100644
index 0000000..92d2f8a
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querymetadata/QueryTimeSeriesRequest.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.request.querymetadata;
+
+import java.util.List;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicQueryRequest;
+
+public class QueryTimeSeriesRequest extends BasicQueryRequest {
+
+  private static final long serialVersionUID = -1902657459558399385L;
+  private List<String> path;
+
+  public QueryTimeSeriesRequest(String groupID, int readConsistencyLevel, List<String> path) {
+    super(groupID, readConsistencyLevel);
+    this.path = path;
+  }
+
+  public List<String> getPath() {
+    return path;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/BasicQueryDataResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/BasicQueryDataResponse.java
new file mode 100644
index 0000000..53e7923
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/BasicQueryDataResponse.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.response;
+
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.iotdb.tsfile.read.common.BatchData;
+
+public abstract class BasicQueryDataResponse extends BasicResponse{
+
+
+  private List<BatchData> seriesBatchData = new ArrayList<>();
+
+  public BasicQueryDataResponse(String groupId, boolean redirected, String leaderStr,
+      String errorMsg) {
+    super(groupId, redirected, leaderStr, errorMsg);
+  }
+
+  public List<BatchData> getSeriesBatchData() {
+    return seriesBatchData;
+  }
+
+  public void setSeriesBatchData(
+      List<BatchData> seriesBatchData) {
+    this.seriesBatchData = seriesBatchData;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/BasicResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/BasicResponse.java
new file mode 100644
index 0000000..bc356a5
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/BasicResponse.java
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.response;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+public abstract class BasicResponse implements Serializable {
+
+  private static final long serialVersionUID = 7509860476962493127L;
+
+  /**
+   * Group ID
+   */
+  private String groupId;
+  /**
+   * Mark if it needs to redirect to right leader
+   */
+  private boolean redirected;
+  /**
+   * Mark if item of a request is success
+   */
+  private List<Boolean> results;
+  /**
+   * Redirect leader id
+   */
+  private String leaderStr;
+  /**
+   * Error message
+   */
+  private String errorMsg;
+
+  public BasicResponse(String groupId, boolean redirected, String leaderStr, String errorMsg) {
+    this.groupId = groupId;
+    this.redirected = redirected;
+    this.results = new ArrayList<>();
+    this.errorMsg = errorMsg;
+    this.leaderStr = leaderStr;
+  }
+
+  public String getGroupId() {
+    return groupId;
+  }
+
+  public void setGroupId(String groupId) {
+    this.groupId = groupId;
+  }
+
+  public boolean isRedirected() {
+    return redirected;
+  }
+
+  public void setRedirected(boolean redirected) {
+    this.redirected = redirected;
+  }
+
+  public String getLeaderStr() {
+    return leaderStr;
+  }
+
+  public void setLeaderStr(String leaderStr) {
+    this.leaderStr = leaderStr;
+  }
+
+  public String getErrorMsg() {
+    return errorMsg;
+  }
+
+  public void setErrorMsg(String errorMsg) {
+    this.errorMsg = errorMsg;
+  }
+
+  public void addResult(boolean success) {
+    results.add(success);
+  }
+
+  public List<Boolean> getResults() {
+    return results;
+  }
+
+  public boolean isSuccess() {
+    if (errorMsg != null) {
+      return false;
+    }
+    for (boolean result : results) {
+      if (!result) {
+        return false;
+      }
+    }
+    return true;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/QueryMetricResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/QueryMetricResponse.java
new file mode 100644
index 0000000..9c77792
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/QueryMetricResponse.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.response;
+
+import java.util.Map;
+
+public class QueryMetricResponse extends BasicResponse {
+
+  private Map<String, Long> value;
+
+  private QueryMetricResponse(String groupId, boolean redirected, String leaderStr,
+      String errorMsg) {
+    super(groupId, redirected, leaderStr, errorMsg);
+  }
+
+  public static QueryMetricResponse createSuccessResponse(String groupId, Map<String, Long> value) {
+    QueryMetricResponse response = new QueryMetricResponse(groupId, false, null,
+        null);
+    response.value = value;
+    return response;
+  }
+
+  public static QueryMetricResponse createErrorResponse(String groupId, String errorMsg) {
+    return new QueryMetricResponse(groupId, false, null, errorMsg);
+  }
+
+  public Map<String, Long> getValue() {
+    return value;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java
new file mode 100644
index 0000000..e9e858d
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/DataGroupNonQueryResponse.java
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.response.nonquery;
+
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
+
+/**
+ * Handle response from data group leader
+ */
+public class DataGroupNonQueryResponse extends BasicResponse {
+
+  private static final long serialVersionUID = -8288044965888956717L;
+
+  private List<String> errorMsgList;
+
+  private DataGroupNonQueryResponse(String groupId, boolean redirected, String leaderStr,
+      String errorMsg) {
+    super(groupId, redirected, leaderStr, errorMsg);
+    errorMsgList = new ArrayList<>();
+  }
+
+  public static DataGroupNonQueryResponse createRedirectedResponse(String groupId, String leaderStr) {
+    return new DataGroupNonQueryResponse(groupId, true, leaderStr, null);
+  }
+
+  public static DataGroupNonQueryResponse createEmptyResponse(String groupId) {
+    return new DataGroupNonQueryResponse(groupId, false, null, null);
+  }
+
+  public static DataGroupNonQueryResponse createErrorResponse(String groupId, String errorMsg) {
+    return new DataGroupNonQueryResponse(groupId, false, null, errorMsg);
+  }
+
+  public List<String> getErrorMsgList() {
+    return errorMsgList;
+  }
+
+  public void addErrorMsg(String errorMsg) {
+    this.errorMsgList.add(errorMsg);
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/MetaGroupNonQueryResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/MetaGroupNonQueryResponse.java
new file mode 100644
index 0000000..653958a
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/nonquery/MetaGroupNonQueryResponse.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.response.nonquery;
+
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
+
+/**
+ * Handle response from metadata group leader
+ */
+public class MetaGroupNonQueryResponse extends BasicResponse {
+
+  private static final long serialVersionUID = -7444143717755803056L;
+
+  private MetaGroupNonQueryResponse(String groupId, boolean redirected, String leaderStr,
+      String errorMsg) {
+    super(groupId, redirected, leaderStr, errorMsg);
+  }
+
+  public static MetaGroupNonQueryResponse createRedirectedResponse(String groupId, String leaderStr) {
+    return new MetaGroupNonQueryResponse(groupId, true, leaderStr, null);
+  }
+
+  public static MetaGroupNonQueryResponse createEmptyResponse(String groupId) {
+    return new MetaGroupNonQueryResponse(groupId, false, null, null);
+  }
+
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryMetadataInStringResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryMetadataInStringResponse.java
new file mode 100644
index 0000000..98b8201
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryMetadataInStringResponse.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.response.querymetadata;
+
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
+
+public class QueryMetadataInStringResponse extends BasicResponse {
+
+  private static final long serialVersionUID = 5704333006127833921L;
+  private String metadata;
+
+  private QueryMetadataInStringResponse(String groupId, boolean redirected, String leaderStr,
+      String errorMsg) {
+    super(groupId, redirected, leaderStr, errorMsg);
+  }
+
+  public static QueryMetadataInStringResponse createSuccessResponse(String groupId,
+      String metadata) {
+    QueryMetadataInStringResponse response = new QueryMetadataInStringResponse(groupId, false, null,
+        null);
+    response.metadata = metadata;
+    return response;
+  }
+
+  public static QueryMetadataInStringResponse createErrorResponse(String groupId, String errorMsg) {
+    return new QueryMetadataInStringResponse(groupId, false, null, errorMsg);
+  }
+
+  public String getMetadata() {
+    return metadata;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryMetadataResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryMetadataResponse.java
new file mode 100644
index 0000000..20e09f2
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryMetadataResponse.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.response.querymetadata;
+
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
+import org.apache.iotdb.db.metadata.Metadata;
+
+public class QueryMetadataResponse extends BasicResponse {
+
+  private static final long serialVersionUID = -3969749781116510054L;
+  private Metadata metadata;
+
+  private QueryMetadataResponse(String groupId, boolean redirected, String leaderStr,
+      String errorMsg) {
+    super(groupId, redirected, leaderStr, errorMsg);
+  }
+
+  public static QueryMetadataResponse createSuccessResponse(String groupId,
+      Metadata metadata) {
+    QueryMetadataResponse response = new QueryMetadataResponse(groupId, false, null,
+        null);
+    response.metadata = metadata;
+    return response;
+  }
+
+  public static QueryMetadataResponse createErrorResponse(String groupId, String errorMsg) {
+    return new QueryMetadataResponse(groupId, false, null, errorMsg);
+  }
+
+  public Metadata getMetadata() {
+    return metadata;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryPathsResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryPathsResponse.java
new file mode 100644
index 0000000..171563a
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryPathsResponse.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.response.querymetadata;
+
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
+
+public class QueryPathsResponse extends BasicResponse {
+
+  private static final long serialVersionUID = -8255822509893237195L;
+  private List<String> paths;
+
+  private QueryPathsResponse(String groupId, boolean redirected, boolean success, String leaderStr, String errorMsg) {
+    super(groupId, redirected, leaderStr, errorMsg);
+    this.addResult(success);
+    paths = new ArrayList<>();
+  }
+
+  public static QueryPathsResponse createEmptyResponse(String groupId){
+    return new QueryPathsResponse(groupId, false, true, null, null);
+  }
+
+  public static QueryPathsResponse createErrorResponse(String groupId, String errorMsg) {
+    return new QueryPathsResponse(groupId, false, false, null, errorMsg);
+  }
+
+  public List<String> getPaths() {
+    return paths;
+  }
+
+  public void addPaths(List<String> paths){
+    this.paths.addAll(paths);
+  }
+
+}
\ No newline at end of file
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QuerySeriesTypeResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QuerySeriesTypeResponse.java
new file mode 100644
index 0000000..eee45c6
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QuerySeriesTypeResponse.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.response.querymetadata;
+
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+
+public class QuerySeriesTypeResponse extends BasicResponse {
+
+  private static final long serialVersionUID = 7977583965911799165L;
+  private TSDataType dataType;
+
+  private QuerySeriesTypeResponse(String groupId, boolean redirected, String leaderStr,
+      String errorMsg) {
+    super(groupId, redirected, leaderStr, errorMsg);
+  }
+
+  public static QuerySeriesTypeResponse createSuccessResponse(String groupId, TSDataType dataType) {
+    QuerySeriesTypeResponse response = new QuerySeriesTypeResponse(groupId, false, null,
+        null);
+    response.dataType = dataType;
+    return response;
+  }
+
+  public static QuerySeriesTypeResponse createErrorResponse(String groupId, String errorMsg) {
+    return new QuerySeriesTypeResponse(groupId, false, null, errorMsg);
+  }
+
+  public TSDataType getDataType() {
+    return dataType;
+  }
+
+  public void setDataType(TSDataType dataType) {
+    this.dataType = dataType;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryStorageGroupResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryStorageGroupResponse.java
new file mode 100644
index 0000000..8a3bb11
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryStorageGroupResponse.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.response.querymetadata;
+
+import java.util.Set;
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
+
+public class QueryStorageGroupResponse extends BasicResponse {
+
+  private static final long serialVersionUID = 248840631619860233L;
+  private Set<String> storageGroups;
+
+  private QueryStorageGroupResponse(boolean success, String leaderStr, String errorMsg) {
+    super(null, false, leaderStr, errorMsg);
+    this.addResult(success);
+  }
+
+  public static QueryStorageGroupResponse createSuccessResponse(Set<String> storageGroups) {
+    QueryStorageGroupResponse response = new QueryStorageGroupResponse(true, null, null);
+    response.setStorageGroups(storageGroups);
+    return response;
+  }
+
+  public static QueryStorageGroupResponse createErrorResponse(String errorMsg) {
+    return new QueryStorageGroupResponse(false, null, errorMsg);
+  }
+
+  public Set<String> getStorageGroups() {
+    return storageGroups;
+  }
+
+  public void setStorageGroups(Set<String> storageGroups) {
+    this.storageGroups = storageGroups;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryTimeSeriesResponse.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryTimeSeriesResponse.java
new file mode 100644
index 0000000..1e029e8
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/response/querymetadata/QueryTimeSeriesResponse.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.rpc.raft.response.querymetadata;
+
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
+
+public class QueryTimeSeriesResponse extends BasicResponse {
+
+  private static final long serialVersionUID = 8313150788331085964L;
+  private List<List<String>> timeSeries;
+
+  private QueryTimeSeriesResponse(String groupId, boolean redirected, boolean success, String leaderStr, String errorMsg) {
+    super(groupId, redirected, leaderStr, errorMsg);
+    this.addResult(success);
+    timeSeries = new ArrayList<>();
+  }
+
+  public static QueryTimeSeriesResponse createEmptyResponse(String groupId){
+    return new QueryTimeSeriesResponse(groupId, false, true, null, null);
+  }
+
+  public static QueryTimeSeriesResponse createErrorResponse(String groupId, String errorMsg) {
+    return new QueryTimeSeriesResponse(groupId, false, false, null, errorMsg);
+  }
+
+  public List<List<String>> getTimeSeries() {
+    return timeSeries;
+  }
+
+  public void addTimeSeries(List<List<String>> timeSeries){
+    this.timeSeries.addAll(timeSeries);
+  }
+
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/service/TSServiceClusterImpl.java b/cluster/src/main/java/org/apache/iotdb/cluster/service/TSServiceClusterImpl.java
new file mode 100644
index 0000000..498fdd1
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/service/TSServiceClusterImpl.java
@@ -0,0 +1,298 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.service;
+
+import java.io.IOException;
+import java.sql.Statement;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Set;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+import org.apache.iotdb.cluster.config.ClusterConstant;
+import org.apache.iotdb.cluster.exception.ConsistencyLevelException;
+import org.apache.iotdb.cluster.qp.executor.ClusterQueryProcessExecutor;
+import org.apache.iotdb.cluster.qp.executor.NonQueryExecutor;
+import org.apache.iotdb.cluster.qp.executor.QueryMetadataExecutor;
+import org.apache.iotdb.db.auth.AuthException;
+import org.apache.iotdb.db.conf.IoTDBConstant;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.exception.ProcessorException;
+import org.apache.iotdb.db.metadata.Metadata;
+import org.apache.iotdb.db.qp.QueryProcessor;
+import org.apache.iotdb.db.qp.physical.PhysicalPlan;
+import org.apache.iotdb.db.service.TSServiceImpl;
+import org.apache.iotdb.service.rpc.thrift.TSExecuteBatchStatementReq;
+import org.apache.iotdb.service.rpc.thrift.TSExecuteBatchStatementResp;
+import org.apache.iotdb.service.rpc.thrift.TS_StatusCode;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.read.common.Path;
+import org.apache.thrift.TException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Distributed version of PRC implementation
+ */
+public class TSServiceClusterImpl extends TSServiceImpl {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(TSServiceClusterImpl.class);
+
+  private ClusterQueryProcessExecutor queryDataExecutor = new ClusterQueryProcessExecutor();
+  private NonQueryExecutor nonQueryExecutor = new NonQueryExecutor();
+  private QueryMetadataExecutor queryMetadataExecutor = new QueryMetadataExecutor();
+
+  public TSServiceClusterImpl() throws IOException {
+    super();
+    processor = new QueryProcessor(queryDataExecutor);
+  }
+
+
+  @Override
+  protected Set<String> getAllStorageGroups() throws InterruptedException {
+    return queryMetadataExecutor.processStorageGroupQuery();
+  }
+
+  @Override
+  protected List<List<String>> getTimeSeriesForPath(String path)
+      throws PathErrorException, InterruptedException, ProcessorException {
+    return queryMetadataExecutor.processTimeSeriesQuery(path);
+  }
+
+  @Override
+  protected String getMetadataInString()
+      throws InterruptedException, ProcessorException {
+    return queryMetadataExecutor.processMetadataInStringQuery();
+  }
+
+  @Override
+  protected Metadata getMetadata()
+      throws InterruptedException, ProcessorException, PathErrorException {
+    return queryMetadataExecutor.processMetadataQuery();
+  }
+
+  @Override
+  protected TSDataType getSeriesType(String path)
+      throws PathErrorException, InterruptedException, ProcessorException {
+    return queryMetadataExecutor.processSeriesTypeQuery(path);
+  }
+
+  @Override
+  protected List<String> getPaths(String path)
+      throws PathErrorException, InterruptedException, ProcessorException {
+    return queryMetadataExecutor.processPathsQuery(path);
+  }
+
+  @Override
+  public TSExecuteBatchStatementResp executeBatchStatement(TSExecuteBatchStatementReq req)
+      throws TException {
+    try {
+      if (!checkLogin()) {
+        LOGGER.info(INFO_NOT_LOGIN, IoTDBConstant.GLOBAL_DB_NAME);
+        return getTSBathExecuteStatementResp(TS_StatusCode.ERROR_STATUS, ERROR_NOT_LOGIN, null);
+      }
+      List<String> statements = req.getStatements();
+      PhysicalPlan[] physicalPlans = new PhysicalPlan[statements.size()];
+      int[] result = new int[statements.size()];
+      StringBuilder batchErrorMessage = new StringBuilder();
+      boolean isAllSuccessful = true;
+
+      /* find all valid physical plans */
+      for (int i = 0; i < statements.size(); i++) {
+        try {
+          PhysicalPlan plan = processor
+              .parseSQLToPhysicalPlan(statements.get(i), zoneIds.get());
+          plan.setProposer(username.get());
+
+          /* if meet a query, handle all requests before the query request. */
+          if (plan.isQuery()) {
+            int[] resultTemp = new int[i];
+            PhysicalPlan[] physicalPlansTemp = new PhysicalPlan[i];
+            System.arraycopy(result, 0, resultTemp, 0, i);
+            System.arraycopy(physicalPlans, 0, physicalPlansTemp, 0, i);
+            result = resultTemp;
+            physicalPlans = physicalPlansTemp;
+            BatchResult batchResult = new BatchResult(isAllSuccessful, batchErrorMessage, result);
+            nonQueryExecutor.processBatch(physicalPlans, batchResult);
+            batchErrorMessage.append(String
+                .format(ERROR_MESSAGE_FORMAT_IN_BATCH, i,
+                    "statement is query :" + statements.get(i)));
+            return getTSBathExecuteStatementResp(TS_StatusCode.ERROR_STATUS,
+                statements.get(i), Arrays.stream(result).boxed().collect(
+                    Collectors.toList()));
+          }
+
+          // check permissions
+          List<Path> paths = plan.getPaths();
+          if (!checkAuthorization(paths, plan)) {
+            String errMessage = String.format("No permissions for this operation %s",
+                plan.getOperatorType());
+            result[i] = Statement.EXECUTE_FAILED;
+            isAllSuccessful = false;
+            batchErrorMessage.append(String.format(ERROR_MESSAGE_FORMAT_IN_BATCH, i, errMessage));
+          } else {
+            physicalPlans[i] = plan;
+          }
+        } catch (AuthException e) {
+          LOGGER.error("meet error while checking authorization.", e);
+          String errMessage = String.format("Uninitialized authorizer" + " beacuse %s",
+              e.getMessage());
+          result[i] = Statement.EXECUTE_FAILED;
+          isAllSuccessful = false;
+          batchErrorMessage.append(String.format(ERROR_MESSAGE_FORMAT_IN_BATCH, i, errMessage));
+        } catch (Exception e) {
+          String errMessage = String.format("Fail to generate physcial plan" + "%s beacuse %s",
+              statements.get(i), e.getMessage());
+          result[i] = Statement.EXECUTE_FAILED;
+          isAllSuccessful = false;
+          batchErrorMessage.append(String.format(ERROR_MESSAGE_FORMAT_IN_BATCH, i, errMessage));
+        }
+      }
+
+      BatchResult batchResult = new BatchResult(isAllSuccessful, batchErrorMessage, result);
+      nonQueryExecutor.processBatch(physicalPlans, batchResult);
+      batchErrorMessage.append(batchResult.batchErrorMessage);
+      isAllSuccessful = batchResult.isAllSuccessful;
+
+      if (isAllSuccessful) {
+        return getTSBathExecuteStatementResp(TS_StatusCode.SUCCESS_STATUS,
+            "Execute batch statements successfully", Arrays.stream(result).boxed().collect(
+                Collectors.toList()));
+      } else {
+        return getTSBathExecuteStatementResp(TS_StatusCode.ERROR_STATUS,
+            batchErrorMessage.toString(),
+            Arrays.stream(result).boxed().collect(
+                Collectors.toList()));
+      }
+    } catch (Exception e) {
+      LOGGER.error("{}: error occurs when executing statements", IoTDBConstant.GLOBAL_DB_NAME, e);
+      return getTSBathExecuteStatementResp(TS_StatusCode.ERROR_STATUS, e.getMessage(), null);
+    }
+  }
+
+  /**
+   * Present batch results.
+   */
+  public class BatchResult {
+
+    private boolean isAllSuccessful;
+    private StringBuilder batchErrorMessage;
+    private int[] resultArray;
+
+    private BatchResult(boolean isAllSuccessful, StringBuilder batchErrorMessage,
+        int[] resultArray) {
+      this.isAllSuccessful = isAllSuccessful;
+      this.batchErrorMessage = batchErrorMessage;
+      this.resultArray = resultArray;
+    }
+
+    public boolean isAllSuccessful() {
+      return isAllSuccessful;
+    }
+
+    public void setAllSuccessful(boolean allSuccessful) {
+      isAllSuccessful = allSuccessful;
+    }
+
+    public StringBuilder getBatchErrorMessage() {
+      return batchErrorMessage;
+    }
+
+    public void addBatchErrorMessage(int index, String batchErrorMessage) {
+      this.batchErrorMessage
+          .append(String.format(ERROR_MESSAGE_FORMAT_IN_BATCH, index, batchErrorMessage));
+    }
+
+    public int[] getResultArray() {
+      return resultArray;
+    }
+
+    public void setResultArray(int[] resultArray) {
+      this.resultArray = resultArray;
+    }
+  }
+
+  @Override
+  public boolean execSetConsistencyLevel(String statement) throws Exception {
+    if (statement == null) {
+      return false;
+    }
+    statement = statement.toLowerCase().trim();
+    try {
+      if (Pattern.matches(ClusterConstant.SET_READ_METADATA_CONSISTENCY_LEVEL_PATTERN, statement)) {
+        String[] splits = statement.split("\\s+");
+        int level = Integer.parseInt(splits[splits.length - 1]);
+        queryMetadataExecutor.setReadMetadataConsistencyLevel(level);
+        return true;
+      } else if (Pattern
+          .matches(ClusterConstant.SET_READ_DATA_CONSISTENCY_LEVEL_PATTERN, statement)) {
+        String[] splits = statement.split("\\s+");
+        int level = Integer.parseInt(splits[splits.length - 1]);
+        queryDataExecutor.setReadDataConsistencyLevel(level);
+        return true;
+      } else {
+        return false;
+      }
+    } catch (ConsistencyLevelException e) {
+      throw new Exception(e.getMessage());
+    }
+  }
+
+  @Override
+  protected boolean executeNonQuery(PhysicalPlan plan) throws ProcessorException {
+    return nonQueryExecutor.processNonQuery(plan);
+  }
+
+  @Override
+  protected void checkFileLevelSet(List<Path> paths) throws PathErrorException {
+    //It's unnecessary to do this check. It has benn checked in transforming query physical plan.
+  }
+
+  @Override
+  public void handleClientExit() throws TException {
+    closeClusterService();
+    closeOperation(null);
+    closeSession(null);
+  }
+
+  /**
+   * Close cluster service
+   */
+  public void closeClusterService() {
+    nonQueryExecutor.shutdown();
+    queryMetadataExecutor.shutdown();
+  }
+
+  public ClusterQueryProcessExecutor getQueryDataExecutor() {
+    return queryDataExecutor;
+  }
+
+  public void setQueryDataExecutor(
+      ClusterQueryProcessExecutor queryDataExecutor) {
+    this.queryDataExecutor = queryDataExecutor;
+  }
+
+  public QueryMetadataExecutor getQueryMetadataExecutor() {
+    return queryMetadataExecutor;
+  }
+
+  public void setNonQueryExecutor(NonQueryExecutor nonQueryExecutor) {
+    this.nonQueryExecutor = nonQueryExecutor;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/QPExecutorUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/QPExecutorUtils.java
new file mode 100644
index 0000000..5a60351
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/utils/QPExecutorUtils.java
@@ -0,0 +1,158 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.utils;
+
+import com.alipay.sofa.jraft.util.OnlyForTest;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.cluster.entity.Server;
+import org.apache.iotdb.cluster.entity.raft.MetadataRaftHolder;
+import org.apache.iotdb.cluster.utils.hash.PhysicalNode;
+import org.apache.iotdb.cluster.utils.hash.Router;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.metadata.MManager;
+
+/**
+ * Utils for QP executor
+ */
+public class QPExecutorUtils {
+
+  private static final ClusterConfig CLUSTER_CONFIG = ClusterDescriptor.getInstance().getConfig();
+
+  private static final Router router = Router.getInstance();
+
+  private static final PhysicalNode localNode = new PhysicalNode(CLUSTER_CONFIG.getIp(),
+      CLUSTER_CONFIG.getPort());
+
+  private static final MManager mManager = MManager.getInstance();
+
+  private static final Server server = Server.getInstance();
+
+  private QPExecutorUtils() {
+  }
+
+  /**
+   * Get Storage Group Name by device name
+   */
+  public static String getStroageGroupByDevice(String device) throws PathErrorException {
+    String storageGroup;
+    try {
+      storageGroup = MManager.getInstance().getFileNameByPath(device);
+    } catch (PathErrorException e) {
+      throw new PathErrorException(String.format("File level of %s doesn't exist.", device));
+    }
+    return storageGroup;
+  }
+
+  /**
+   * Get all Storage Group Names by path
+   */
+  public static List<String> getAllStroageGroupsByPath(String path) throws PathErrorException {
+    List<String> storageGroupList;
+    try {
+      storageGroupList = mManager.getAllFileNamesByPath(path);
+    } catch (PathErrorException e) {
+      throw new PathErrorException(String.format("File level of %s doesn't exist.", path));
+    }
+    return storageGroupList;
+  }
+
+  /**
+   * Classify the input storage group list by which data group it belongs to.
+   *
+   * @return key is groupId, value is all SGs belong to this data group
+   */
+  public static Map<String, Set<String>> classifySGByGroupId(List<String> sgList) {
+    Map<String, Set<String>> map = new HashMap<>();
+    for (int i = 0; i < sgList.size(); i++) {
+      String sg = sgList.get(i);
+      String groupId = router.getGroupIdBySG(sg);
+      if (map.containsKey(groupId)) {
+        map.get(groupId).add(sg);
+      } else {
+        Set<String> set = new HashSet<>();
+        set.add(sg);
+        map.put(groupId, set);
+      }
+    }
+    return map;
+  }
... 201758 lines suppressed ...


[incubator-iotdb] 02/02: fix lots of conflicts with cluster_framework

Posted by lt...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

lta pushed a commit to branch cluster
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git

commit 51f91b6b99a8b86c68790bf7af308ee9380b31e6
Merge: 411ea77 41a49ad
Author: lta <li...@163.com>
AuthorDate: Thu May 23 15:27:05 2019 +0800

    fix lots of conflicts with cluster_framework

 .../iotdb/cluster/concurrent/ThreadName.java       |   5 +
 .../concurrent/pool/NodeAsClientThreadManager.java |  64 ++++++++
 .../concurrent/pool/QPTaskThreadManager.java       |  64 ++++++++
 .../concurrent/pool/QueryTimerThreadManager.java   |  74 ++++++++++
 .../cluster/concurrent/pool/ThreadPoolManager.java |  15 +-
 .../apache/iotdb/cluster/config/ClusterConfig.java |  34 ++---
 .../iotdb/cluster/config/ClusterConstant.java      |   2 +-
 .../iotdb/cluster/config/ClusterDescriptor.java    |  17 ++-
 .../org/apache/iotdb/cluster/entity/Server.java    |  23 ++-
 .../cluster/entity/raft/DataStateMachine.java      |  18 ++-
 .../cluster/qp/executor/AbstractQPExecutor.java    |  23 ++-
 .../cluster/qp/executor/NonQueryExecutor.java      |  72 ++++-----
 .../cluster/qp/executor/QueryMetadataExecutor.java | 142 +++++++++++-------
 .../apache/iotdb/cluster/qp/task/BatchQPTask.java  |  72 ++++-----
 .../iotdb/cluster/qp/task/DataQueryTask.java       |  30 +---
 .../org/apache/iotdb/cluster/qp/task/QPTask.java   |  51 ++++++-
 .../apache/iotdb/cluster/qp/task/SingleQPTask.java |   2 +-
 .../coordinatornode/ClusterRpcQueryManager.java    |  12 ++
 .../coordinatornode/IClusterRpcQueryManager.java   |   5 +
 .../querynode/ClusterLocalQueryManager.java        |  13 ++
 .../querynode/IClusterLocalQueryManager.java       |   5 +
 .../cluster/query/utils/ClusterRpcReaderUtils.java |  22 ++-
 .../iotdb/cluster/rpc/raft/NodeAsClient.java       |  14 +-
 .../rpc/raft/impl/RaftNodeAsClientManager.java     | 161 ++++++++-------------
 .../raft/processor/QueryMetricAsyncProcessor.java  |  44 ++++++
 .../nonquery/DataGroupNonQueryAsyncProcessor.java  |   5 +-
 .../querymetadata/QueryMetadataAsyncProcessor.java |   2 +-
 .../raft/request/QueryMetricRequest.java}          |  27 ++--
 .../raft/response/QueryMetricResponse.java}        |  35 ++---
 .../nonquery/DataGroupNonQueryResponse.java        |  12 ++
 .../cluster/service/TSServiceClusterImpl.java      |  51 ++++---
 .../org/apache/iotdb/cluster/utils/RaftUtils.java  |  54 +++----
 .../iotdb/cluster/utils/hash/PhysicalNode.java     |   5 +
 .../apache/iotdb/cluster/utils/hash/Router.java    |   8 +-
 .../iotdb/cluster/utils/hash/VirtualNode.java      |  18 +--
 .../concurrent/pool/QPTaskThreadManagerTest.java   |  85 +++++++++++
 .../cluster/config/ClusterDescriptorTest.java      |  14 +-
 .../integration/IoTDBMetadataFetchAbstract.java    |  63 ++++----
 .../integration/IoTDBMetadataFetchLocallyIT.java   |   1 +
 .../apache/iotdb/cluster/utils/RaftUtilsTest.java  |  19 +--
 .../java/org/apache/iotdb/cluster/utils/Utils.java |   1 -
 .../iotdb/cluster/utils/hash/MD5HashTest.java      |   8 +-
 .../iotdb/cluster/utils/hash/PhysicalNodeTest.java |   4 +-
 .../iotdb/cluster/utils/hash/RouterTest.java       |  11 +-
 .../UserGuideV0.7.0/7-Tools-NodeTool.md            |   2 +-
 iotdb/iotdb/conf/iotdb-cluster.properties          |  15 +-
 .../iotdb/db/qp/executor/QueryProcessExecutor.java |   2 +-
 .../db/query/executor/AggregateEngineExecutor.java |   2 -
 .../iotdb/db/query/executor/EngineQueryRouter.java |   1 -
 .../db/query/executor/IEngineQueryRouter.java      |  78 ++++++++++
 .../org/apache/iotdb/db/service/TSServiceImpl.java |  19 ++-
 service-rpc/src/main/thrift/rpc.thrift             |   2 +-
 52 files changed, 1015 insertions(+), 513 deletions(-)

diff --cc cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/ThreadPoolManager.java
index 828cc1a,72bec94..60e8a75
--- a/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/ThreadPoolManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/concurrent/pool/ThreadPoolManager.java
@@@ -38,7 -42,7 +42,7 @@@ public abstract class ThreadPoolManage
    /**
     * Init pool manager
     */
--  public void init(){
++  public void init() {
      pool = IoTDBThreadPoolFactory.newFixedThreadPool(getThreadPoolSize(), getThreadName());
    }
  
@@@ -53,14 -57,14 +57,13 @@@
    public void close(boolean block, long timeout) throws ProcessorException {
      if (pool != null) {
        try {
--        pool.shutdown();
++        pool.shutdownNow();
          if (block) {
            try {
              if (!pool.awaitTermination(timeout, TimeUnit.MILLISECONDS)) {
-               throw new ProcessorException(
 -              LOGGER.debug(
--                  String
--                      .format("%s thread pool doesn't exit after %d ms", getManagerName(),
--                          timeout));
++              LOGGER
++                  .debug(String.format("%s thread pool doesn't exit after %d ms", getManagerName(),
++                      timeout));
              }
            } catch (InterruptedException e) {
              Thread.currentThread().interrupt();
diff --cc cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java
index 627f561,1ab6eda..230afcb
--- a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java
@@@ -110,15 -110,15 +110,15 @@@ public class ClusterConfig 
    private int numOfVirtualNodes = 2;
  
    /**
-    * Maximum number of @NodeAsClient usage
 -   * Maximum number of inner rpc client thread. When this value <= 0, use CPU core number * 10
++   * Maximum number of inner rpc client thread. When this value <= 0, use CPU core number * 5
     */
-   private int maxNumOfInnerRpcClient = 500;
 -  private int concurrentInnerRpcClientThread = Runtime.getRuntime().availableProcessors() * 10;
++  private int concurrentInnerRpcClientThread = Runtime.getRuntime().availableProcessors() * 5;
  
    /**
-    * Maximum number of queue length to use @NodeAsClient, the request which exceed to this number
-    * will be rejected.
+    * Maximum number of queue length of qp task which is waiting to be executed. If the num of
+    * waiting qp tasks exceed to this number, new qp task will be rejected.
     */
-   private int maxQueueNumOfInnerRpcClient = 500;
+   private int maxQueueNumOfQPTask = 500;
  
    /**
     * ReadMetadataConsistencyLevel: 1 Strong consistency, 2 Weak consistency
@@@ -135,11 -135,11 +135,11 @@@
     * client request corresponds to a QP Task. A QP task may be divided into several sub-tasks. So
     * this value is the sum of all sub-tasks. When this value <= 0, use CPU core number * 10
     */
--  private int concurrentQPSubTaskThread = Runtime.getRuntime().availableProcessors() * 10;
++  private int concurrentQPSubTaskThread = Runtime.getRuntime().availableProcessors() * 5;
  
    /**
-    * Batch data size read from remote query node once while reading, default value is 10000.
-    * The smaller the parameter, the more communication times and the more time-consuming it is.
+    * Batch data size read from remote query node once while reading, default value is 10000. The
+    * smaller the parameter, the more communication times and the more time-consuming it is.
     */
    private int batchReadSize = 10000;
  
diff --cc cluster/src/main/java/org/apache/iotdb/cluster/entity/Server.java
index a84e389,85ab80d..d12f78f
--- a/cluster/src/main/java/org/apache/iotdb/cluster/entity/Server.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/entity/Server.java
@@@ -31,28 -31,19 +31,32 @@@ import org.apache.iotdb.cluster.entity.
  import org.apache.iotdb.cluster.entity.metadata.MetadataHolder;
  import org.apache.iotdb.cluster.entity.raft.DataPartitionRaftHolder;
  import org.apache.iotdb.cluster.entity.raft.MetadataRaftHolder;
++import org.apache.iotdb.cluster.exception.RaftConnectionException;
++import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcQueryManager;
++import org.apache.iotdb.cluster.query.manager.querynode.ClusterLocalQueryManager;
  import org.apache.iotdb.cluster.rpc.raft.impl.RaftNodeAsClientManager;
+ import org.apache.iotdb.cluster.rpc.raft.processor.QueryMetricAsyncProcessor;
  import org.apache.iotdb.cluster.rpc.raft.processor.nonquery.DataGroupNonQueryAsyncProcessor;
  import org.apache.iotdb.cluster.rpc.raft.processor.nonquery.MetaGroupNonQueryAsyncProcessor;
 +import org.apache.iotdb.cluster.rpc.raft.processor.querydata.CloseSeriesReaderSyncProcessor;
 +import org.apache.iotdb.cluster.rpc.raft.processor.querydata.InitSeriesReaderSyncProcessor;
 +import org.apache.iotdb.cluster.rpc.raft.processor.querydata.QuerySeriesDataByTimestampSyncProcessor;
 +import org.apache.iotdb.cluster.rpc.raft.processor.querydata.QuerySeriesDataSyncProcessor;
  import org.apache.iotdb.cluster.rpc.raft.processor.querymetadata.QueryMetadataAsyncProcessor;
  import org.apache.iotdb.cluster.rpc.raft.processor.querymetadata.QueryMetadataInStringAsyncProcessor;
  import org.apache.iotdb.cluster.rpc.raft.processor.querymetadata.QueryPathsAsyncProcessor;
  import org.apache.iotdb.cluster.rpc.raft.processor.querymetadata.QuerySeriesTypeAsyncProcessor;
  import org.apache.iotdb.cluster.rpc.raft.processor.querymetadata.QueryTimeSeriesAsyncProcessor;
 +import org.apache.iotdb.cluster.rpc.raft.processor.querymetric.QueryJobNumAsyncProcessor;
 +import org.apache.iotdb.cluster.rpc.raft.processor.querymetric.QueryLeaderAsyncProcessor;
- import org.apache.iotdb.cluster.rpc.raft.processor.querymetric.QueryMetricAsyncProcessor;
 +import org.apache.iotdb.cluster.rpc.raft.processor.querymetric.QueryStatusAsyncProcessor;
 +import org.apache.iotdb.cluster.service.ClusterMonitor;
  import org.apache.iotdb.cluster.utils.RaftUtils;
  import org.apache.iotdb.cluster.utils.hash.PhysicalNode;
  import org.apache.iotdb.cluster.utils.hash.Router;
++import org.apache.iotdb.db.exception.FileNodeManagerException;
  import org.apache.iotdb.db.exception.ProcessorException;
 +import org.apache.iotdb.db.exception.StartupException;
  import org.apache.iotdb.db.service.IoTDB;
  import org.apache.iotdb.db.service.RegisterManager;
  import org.slf4j.Logger;
@@@ -92,12 -83,12 +96,15 @@@ public class Server 
  
    private RegisterManager registerManager = new RegisterManager();
  
-   public static void main(String[] args) throws ProcessorException, InterruptedException {
 -  public static void main(String[] args) {
++  public static void main(String[] args)
++      throws ProcessorException, InterruptedException, RaftConnectionException, FileNodeManagerException {
      Server server = Server.getInstance();
      server.start();
    }
  
-   public void start() throws ProcessorException, InterruptedException {
 -  public void start() {
++  public void start()
++      throws ProcessorException, InterruptedException, RaftConnectionException, FileNodeManagerException {
++
      /** Stand-alone version of IoTDB, be careful to replace the internal JDBC Server with a cluster version **/
      iotdb = new IoTDB();
      iotdb.active();
@@@ -157,24 -142,17 +166,26 @@@
      rpcServer.registerUserProcessor(new QueryPathsAsyncProcessor());
    }
  
 -  /**
 -   * for nodetool
 -   */
 +  private void registerQueryDataProcessor(RpcServer rpcServer) {
 +    rpcServer.registerUserProcessor(new InitSeriesReaderSyncProcessor());
 +    rpcServer.registerUserProcessor(new QuerySeriesDataSyncProcessor());
 +    rpcServer.registerUserProcessor(new QuerySeriesDataByTimestampSyncProcessor());
 +    rpcServer.registerUserProcessor(new CloseSeriesReaderSyncProcessor());
 +  }
 +
    private void registerQueryMetricProcessor(RpcServer rpcServer) {
      rpcServer.registerUserProcessor(new QueryMetricAsyncProcessor());
 +    rpcServer.registerUserProcessor(new QueryJobNumAsyncProcessor());
 +    rpcServer.registerUserProcessor(new QueryStatusAsyncProcessor());
 +    rpcServer.registerUserProcessor(new QueryLeaderAsyncProcessor());
    }
  
-   public void stop() throws ProcessorException, InterruptedException {
-     QPTaskManager.getInstance().close(true, ClusterConstant.CLOSE_QP_SUB_TASK_BLOCK_TIMEOUT);
 -  public void stop() throws ProcessorException {
 -    QPTaskThreadManager.getInstance().close(true, ClusterConstant.CLOSE_THREAD_POOL_BLOCK_TIMEOUT);
--    iotdb.deactivate();
++  public void stop() throws ProcessorException, RaftConnectionException, FileNodeManagerException {
++    QPTaskManager.getInstance().close(true, ClusterConstant.CLOSE_THREAD_POOL_BLOCK_TIMEOUT);
++    ClusterRpcQueryManager.getInstance().close();
++    ClusterLocalQueryManager.getInstance().close();
      CLIENT_MANAGER.shutdown();
++    iotdb.deactivate();
      metadataHolder.stop();
      for (DataPartitionHolder dataPartitionHolder : dataPartitionHolderMap.values()) {
        dataPartitionHolder.stop();
diff --cc cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/AbstractQPExecutor.java
index 9841781,96f150f..faacfff
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/AbstractQPExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/AbstractQPExecutor.java
@@@ -87,7 -87,7 +87,6 @@@ public abstract class AbstractQPExecuto
     * Async handle QPTask by QPTask and leader id
     *
     * @param task request QPTask
--   * @param leader leader of the target raft group
     * @param taskRetryNum Number of QPTask retries due to timeout and redirected.
     * @return basic response
     */
@@@ -127,7 -123,7 +122,10 @@@
      task.await();
      PeerId leader;
      if (task.getTaskState() != TaskState.FINISH) {
--      if (task.getTaskState() == TaskState.REDIRECT) {
++      if (task.getTaskState() == TaskState.RAFT_CONNECTION_EXCEPTION) {
++        throw new RaftConnectionException(
++            String.format("Can not connect to remote node : %s", task.getTargetNode()));
++      } else if (task.getTaskState() == TaskState.REDIRECT) {
          /** redirect to the right leader **/
          leader = PeerId.parsePeer(task.getResponse().getLeaderStr());
          LOGGER.debug("Redirect leader: {}, group id = {}", leader, task.getRequest().getGroupID());
@@@ -136,10 -132,11 +134,11 @@@
          String groupId = task.getRequest().getGroupID();
          RaftUtils.removeCachedRaftGroupLeader(groupId);
          LOGGER.debug("Remove cached raft group leader of {}", groupId);
 -        leader = RaftUtils.getLeaderPeerID(groupId);
 +        leader = RaftUtils.getLocalLeaderPeerID(groupId);
        }
+       task.setTargetNode(leader);
        task.resetTask();
-       return asyncHandleNonQuerySingleTaskGetRes(task, leader, taskRetryNum + 1);
+       return syncHandleNonQuerySingleTaskGetRes(task, taskRetryNum + 1);
      }
      return task.getResponse();
    }
diff --cc cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/NonQueryExecutor.java
index 515fb6a,f62a83f..7ba9ef7
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/NonQueryExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/NonQueryExecutor.java
@@@ -133,7 -131,7 +132,8 @@@ public class NonQueryExecutor extends A
     */
    private void classifyPhysicalPlanByGroupId(PhysicalPlan[] physicalPlans, BatchResult batchResult,
        Map<String, List<PhysicalPlan>> physicalPlansMap, Map<String, List<Integer>> planIndexMap) {
-     int[] result = batchResult.getResult();
++
+     int[] result = batchResult.getResultArray();
      for (int i = 0; i < result.length; i++) {
        /** Check if the request has failed. If it has failed, ignore it. **/
        if (result[i] != Statement.EXECUTE_FAILED) {
@@@ -141,19 -139,16 +141,17 @@@
          try {
            String groupId = getGroupIdFromPhysicalPlan(plan);
            if (groupId.equals(ClusterConfig.METADATA_GROUP_ID)) {
++
+             // this is for set storage group statement and role/user management statement.
              LOGGER.debug("Execute metadata group task");
              boolean executeResult = handleNonQueryRequest(groupId, plan);
-             nullReaderEnable = true;
-             result[i] =  executeResult ? Statement.SUCCESS_NO_INFO
+             emptyTaskEnable = true;
+             result[i] = executeResult ? Statement.SUCCESS_NO_INFO
                  : Statement.EXECUTE_FAILED;
              batchResult.setAllSuccessful(executeResult);
-           }else {
-             if (!physicalPlansMap.containsKey(groupId)) {
-               physicalPlansMap.put(groupId, new ArrayList<>());
-               planIndexMap.put(groupId, new ArrayList<>());
-             }
-             physicalPlansMap.get(groupId).add(plan);
-             planIndexMap.get(groupId).add(i);
+           } else {
+             physicalPlansMap.computeIfAbsent(groupId, l -> new ArrayList<>()).add(plan);
+             planIndexMap.computeIfAbsent(groupId, l -> new ArrayList<>()).add(i);
            }
          } catch (PathErrorException | ProcessorException | IOException | RaftConnectionException | InterruptedException e) {
            result[i] = Statement.EXECUTE_FAILED;
@@@ -327,42 -323,9 +326,44 @@@
      if (QPExecutorUtils.canHandleNonQueryByGroupId(groupId)) {
        return handleNonQueryRequestLocally(groupId, qpTask);
      } else {
 -      PeerId leader = RaftUtils.getLeaderPeerID(groupId);
 +      PeerId leader = RaftUtils.getLocalLeaderPeerID(groupId);
 +      boolean res = false;
+       qpTask.setTargetNode(leader);
 -      return syncHandleNonQueryTask(qpTask);
 +      try {
-          res = asyncHandleNonQueryTask(qpTask, leader);
++         res = syncHandleNonQueryTask(qpTask);
 +      } catch (RaftConnectionException ex) {
 +        boolean success = false;
 +        PeerId nextNode = RaftUtils.getPeerIDInOrder(groupId);
 +        PeerId firstNode = nextNode;
 +        boolean first = true;
 +        while (!success) {
 +          try {
 +            if (!first) {
 +              nextNode = RaftUtils.getPeerIDInOrder(groupId);
 +              if (firstNode.equals(nextNode)) {
 +                break;
 +              }
 +            }
 +            first = false;
 +            LOGGER.debug("Previous task fail, then send non-query task for group {} to node {}.", groupId, nextNode);
 +            qpTask.resetTask();
++            qpTask.setTargetNode(nextNode);
 +            qpTask.setTaskState(TaskState.INITIAL);
 +            currentTask.set(qpTask);
-             res = asyncHandleNonQueryTask(qpTask, nextNode);
++            res = syncHandleNonQueryTask(qpTask);
 +            LOGGER.debug("Non-query task for group {} to node {} succeed.", groupId, nextNode);
 +            success = true;
 +            RaftUtils.updateRaftGroupLeader(groupId, nextNode);
 +          } catch (RaftConnectionException e1) {
 +            LOGGER.debug("Non-query task for group {} to node {} fail.", groupId, nextNode);
 +          }
 +        }
 +        LOGGER.debug("The final result for non-query task is {}", success);
 +        if (!success) {
 +          throw ex;
 +        }
 +      }
 +      return res;
      }
    }
  
diff --cc cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/QueryMetadataExecutor.java
index 6043ce6,a258d7f..625269e
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/QueryMetadataExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/QueryMetadataExecutor.java
@@@ -26,13 -26,12 +26,13 @@@ import java.util.List
  import java.util.Map;
  import java.util.Map.Entry;
  import java.util.Set;
- import org.apache.iotdb.cluster.qp.task.QPTask.TaskState;
--import org.apache.iotdb.cluster.qp.task.SingleQPTask;
  import org.apache.iotdb.cluster.config.ClusterConfig;
  import org.apache.iotdb.cluster.config.ClusterConstant;
  import org.apache.iotdb.cluster.entity.raft.MetadataRaftHolder;
  import org.apache.iotdb.cluster.entity.raft.RaftService;
  import org.apache.iotdb.cluster.exception.RaftConnectionException;
++import org.apache.iotdb.cluster.qp.task.QPTask.TaskState;
++import org.apache.iotdb.cluster.qp.task.SingleQPTask;
  import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QueryMetadataInStringRequest;
  import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QueryMetadataRequest;
  import org.apache.iotdb.cluster.rpc.raft.request.querymetadata.QueryPathsRequest;
@@@ -106,19 -105,19 +106,19 @@@ public class QueryMetadataExecutor exte
          StringBuilder path = new StringBuilder();
          String[] storageGroupNodes = storageGroup.split(DOUB_SEPARATOR);
          String[] queryPathNodes = queryPath.split(DOUB_SEPARATOR);
--        for(int  i = 0 ; i < queryPathNodes.length ; i++){
--          if(i >= storageGroupNodes.length){
++        for (int i = 0; i < queryPathNodes.length; i++) {
++          if (i >= storageGroupNodes.length) {
              path.append(queryPathNodes[i]).append(SINGLE_SEPARATOR);
            } else {
              path.append(storageGroupNodes[i]).append(SINGLE_SEPARATOR);
            }
          }
--        paths.add(path.deleteCharAt(path.length()-1).toString());
++        paths.add(path.deleteCharAt(path.length() - 1).toString());
        }
      }
      return paths;
    }
--  
++
    /**
     * Handle query timeseries in one data group
     *
@@@ -134,38 -133,16 +134,47 @@@
      PeerId holder;
      /** Check if the plan can be executed locally. **/
      if (QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
--      LOGGER.debug("Execute show timeseries {} statement locally for group {} by sending request to local node.", pathList, groupId);
++      LOGGER.debug(
++          "Execute show timeseries {} statement locally for group {} by sending request to local node.",
++          pathList, groupId);
        holder = this.server.getServerId();
      } else {
 -      holder = RaftUtils.getRandomPeerID(groupId);
 +      holder = RaftUtils.getPeerIDInOrder(groupId);
      }
+     task.setTargetNode(holder);
      try {
-       LOGGER.debug("Send show timeseries {} task for group {} to node {}.", pathList, groupId, holder);
-       res.addAll(queryTimeSeries(task, holder));
++      LOGGER.debug("Send show timeseries {} task for group {} to node {}.", pathList, groupId,
++          holder);
+       res.addAll(queryTimeSeries(task));
      } catch (RaftConnectionException e) {
 -      throw new ProcessorException(RAFT_CONNECTION_ERROR, e);
 +      boolean success = false;
 +      while (!success) {
 +        PeerId nextNode = null;
 +        try {
 +          nextNode = RaftUtils.getPeerIDInOrder(groupId);
 +          if (holder.equals(nextNode)) {
 +            break;
 +          }
-           LOGGER.debug("Previous task fail, then send show timeseries {} task for group {} to node {}.", pathList, groupId, nextNode);
++          LOGGER.debug(
++              "Previous task fail, then send show timeseries {} task for group {} to node {}.",
++              pathList, groupId, nextNode);
 +          task.resetTask();
++          task.setTargetNode(holder);
 +          task.setTaskState(TaskState.INITIAL);
-           res.addAll(queryTimeSeries(task, nextNode));
-           LOGGER.debug("Show timeseries {} task for group {} to node {} succeed.", pathList, groupId, nextNode);
++          res.addAll(queryTimeSeries(task));
++          LOGGER
++              .debug("Show timeseries {} task for group {} to node {} succeed.", pathList, groupId,
++                  nextNode);
 +          success = true;
 +        } catch (RaftConnectionException e1) {
-           LOGGER.debug("Show timeseries {} task for group {} to node {} fail.", pathList, groupId, nextNode);
-           continue;
++          LOGGER.debug("Show timeseries {} task for group {} to node {} fail.", pathList, groupId,
++              nextNode);
 +        }
 +      }
 +      LOGGER.debug("The final result for show timeseries {} task is {}", pathList, success);
 +      if (!success) {
 +        throw new ProcessorException(RAFT_CONNECTION_ERROR, e);
 +      }
      }
    }
  
@@@ -185,38 -162,16 +194,45 @@@
        PeerId holder;
        /** Check if the plan can be executed locally. **/
        if (QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
--        LOGGER.debug("Execute show metadata in string statement locally for group {} by sending request to local node.", groupId);
++        LOGGER.debug(
++            "Execute show metadata in string statement locally for group {} by sending request to local node.",
++            groupId);
          holder = this.server.getServerId();
        } else {
 -        holder = RaftUtils.getRandomPeerID(groupId);
 +        holder = RaftUtils.getPeerIDInOrder(groupId);
        }
+       task.setTargetNode(holder);
        try {
 +        LOGGER.debug("Send show metadata in string task for group {} to node {}.", groupId, holder);
-         asyncSendNonQuerySingleTask(task, holder, 0);
+         asyncSendNonQuerySingleTask(task, 0);
        } catch (RaftConnectionException e) {
 -        throw new ProcessorException(RAFT_CONNECTION_ERROR, e);
 +        boolean success = false;
 +        while (!success) {
 +          PeerId nextNode = null;
 +          try {
 +            nextNode = RaftUtils.getPeerIDInOrder(groupId);
 +            if (holder.equals(nextNode)) {
 +              break;
 +            }
-             LOGGER.debug("Previous task fail, then send show metadata in string task for group {} to node {}.", groupId, nextNode);
++            LOGGER.debug(
++                "Previous task fail, then send show metadata in string task for group {} to node {}.",
++                groupId, nextNode);
 +            task.resetTask();
++            task.setTargetNode(nextNode);
 +            task.setTaskState(TaskState.INITIAL);
-             asyncSendNonQuerySingleTask(task, nextNode, 0);
-             LOGGER.debug("Show metadata in string task for group {} to node {} succeed.", groupId, nextNode);
++            asyncSendNonQuerySingleTask(task, 0);
++            LOGGER.debug("Show metadata in string task for group {} to node {} succeed.", groupId,
++                nextNode);
 +            success = true;
 +          } catch (RaftConnectionException e1) {
-             LOGGER.debug("Show metadata in string task for group {} to node {} fail.", groupId, nextNode);
-             continue;
++            LOGGER.debug("Show metadata in string task for group {} to node {} fail.", groupId,
++                nextNode);
 +          }
 +        }
 +        LOGGER.debug("The final result for show metadata in string task is {}", success);
 +        if (!success) {
 +          throw new ProcessorException(RAFT_CONNECTION_ERROR, e);
 +        }
        }
      }
      for (int i = 0; i < taskList.size(); i++) {
@@@ -224,13 -179,9 +240,14 @@@
        task.await();
        BasicResponse response = task.getResponse();
        if (response == null || !response.isSuccess()) {
 -        throw new ProcessorException();
 +        String errorMessage = "response is null";
 +        if (response != null && response.getErrorMsg() != null) {
 +          errorMessage = response.getErrorMsg();
 +        }
-         throw new ProcessorException("Execute show metadata in string statement fail because " + errorMessage);
++        throw new ProcessorException(
++            "Execute show metadata in string statement fail because " + errorMessage);
        }
--      metadataList.add(((QueryMetadataInStringResponse)response).getMetadata());
++      metadataList.add(((QueryMetadataInStringResponse) response).getMetadata());
      }
      return combineMetadataInStringList(metadataList);
    }
@@@ -251,38 -202,16 +268,44 @@@
        PeerId holder;
        /** Check if the plan can be executed locally. **/
        if (QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
--        LOGGER.debug("Execute query metadata statement locally for group {} by sending request to local node.", groupId);
++        LOGGER.debug(
++            "Execute query metadata statement locally for group {} by sending request to local node.",
++            groupId);
          holder = this.server.getServerId();
        } else {
 -        holder = RaftUtils.getRandomPeerID(groupId);
 +        holder = RaftUtils.getPeerIDInOrder(groupId);
        }
+       task.setTargetNode(holder);
        try {
 +        LOGGER.debug("Send query metadata task for group {} to node {}.", groupId, holder);
-         asyncSendNonQuerySingleTask(task, holder, 0);
+         asyncSendNonQuerySingleTask(task, 0);
        } catch (RaftConnectionException e) {
 -        throw new ProcessorException(RAFT_CONNECTION_ERROR, e);
 +        boolean success = false;
 +        while (!success) {
 +          PeerId nextNode = null;
 +          try {
 +            nextNode = RaftUtils.getPeerIDInOrder(groupId);
 +            if (holder.equals(nextNode)) {
 +              break;
 +            }
-             LOGGER.debug("Previous task fail, then send query metadata task for group {} to node {}.", groupId, nextNode);
++            LOGGER
++                .debug("Previous task fail, then send query metadata task for group {} to node {}.",
++                    groupId, nextNode);
 +            task.resetTask();
++            task.setTargetNode(nextNode);
 +            task.setTaskState(TaskState.INITIAL);
-             asyncSendNonQuerySingleTask(task, nextNode, 0);
++            asyncSendNonQuerySingleTask(task, 0);
 +            LOGGER.debug("Query metadata task for group {} to node {} succeed.", groupId, nextNode);
 +            success = true;
 +          } catch (RaftConnectionException e1) {
 +            LOGGER.debug("Query metadata task for group {} to node {} fail.", groupId, nextNode);
 +            continue;
 +          }
 +        }
 +        LOGGER.debug("The final result for query metadata task is {}", success);
 +        if (!success) {
 +          throw new ProcessorException(RAFT_CONNECTION_ERROR, e);
 +        }
        }
      }
      for (int i = 0; i < taskList.size(); i++) {
@@@ -294,9 -223,9 +317,10 @@@
          if (response != null && response.getErrorMsg() != null) {
            errorMessage = response.getErrorMsg();
          }
-         throw new ProcessorException("Execute query metadata statement fail because " + errorMessage);
 -        throw new ProcessorException("Execute query metadata statement false because " + errorMessage);
++        throw new ProcessorException(
++            "Execute query metadata statement fail because " + errorMessage);
        }
--      metadatas[i] = ((QueryMetadataResponse)response).getMetadata();
++      metadatas[i] = ((QueryMetadataResponse) response).getMetadata();
      }
      return Metadata.combineMetadatas(metadatas);
    }
@@@ -317,38 -246,16 +341,47 @@@
        PeerId holder;
        /** Check if the plan can be executed locally. **/
        if (QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
--        LOGGER.debug("Execute get series type for {} statement locally for group {} by sending request to local node.", path, groupId);
++        LOGGER.debug(
++            "Execute get series type for {} statement locally for group {} by sending request to local node.",
++            path, groupId);
          holder = this.server.getServerId();
        } else {
 -        holder = RaftUtils.getRandomPeerID(groupId);
 +        holder = RaftUtils.getPeerIDInOrder(groupId);
        }
+       task.setTargetNode(holder);
        try {
-         LOGGER.debug("Send get series type for {} task for group {} to node {}.", path, groupId, holder);
-         dataType = querySeriesType(task, holder);
++        LOGGER.debug("Send get series type for {} task for group {} to node {}.", path, groupId,
++            holder);
+         dataType = querySeriesType(task);
        } catch (RaftConnectionException e) {
 -        throw new ProcessorException(RAFT_CONNECTION_ERROR, e);
 +        boolean success = false;
 +        while (!success) {
 +          PeerId nextNode = null;
 +          try {
 +            nextNode = RaftUtils.getPeerIDInOrder(groupId);
 +            if (holder.equals(nextNode)) {
 +              break;
 +            }
-             LOGGER.debug("Previous task fail, then send get series type for {} task for group {} to node {}.", path, groupId, nextNode);
++            LOGGER.debug(
++                "Previous task fail, then send get series type for {} task for group {} to node {}.",
++                path, groupId, nextNode);
 +            task.resetTask();
++            task.setTargetNode(nextNode);
 +            task.setTaskState(TaskState.INITIAL);
-             dataType = querySeriesType(task, nextNode);
-             LOGGER.debug("Get series type for {} task for group {} to node {} succeed.", path, groupId, nextNode);
++            dataType = querySeriesType(task);
++            LOGGER.debug("Get series type for {} task for group {} to node {} succeed.", path,
++                groupId, nextNode);
 +            success = true;
 +          } catch (RaftConnectionException e1) {
-             LOGGER.debug("Get series type for {} task for group {} to node {} fail.", path, groupId, nextNode);
++            LOGGER.debug("Get series type for {} task for group {} to node {} fail.", path, groupId,
++                nextNode);
 +            continue;
 +          }
 +        }
 +        LOGGER.debug("The final result for get series type for {} task is {}", path, success);
 +        if (!success) {
 +          throw new ProcessorException(RAFT_CONNECTION_ERROR, e);
 +        }
        }
      }
      return dataType;
@@@ -389,37 -296,16 +422,46 @@@
      PeerId holder;
      /** Check if the plan can be executed locally. **/
      if (QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
--      LOGGER.debug("Execute get paths for {} statement locally for group {} by sending request to local node.", pathList, groupId);
++      LOGGER.debug(
++          "Execute get paths for {} statement locally for group {} by sending request to local node.",
++          pathList, groupId);
        holder = this.server.getServerId();
      } else {
 -      holder = RaftUtils.getRandomPeerID(groupId);
 +      holder = RaftUtils.getPeerIDInOrder(groupId);
      }
+     task.setTargetNode(holder);
      try {
-       LOGGER.debug("Send get paths for {} task for group {} to node {}.", pathList, groupId, holder);
-       res.addAll(queryPaths(task, holder));
++      LOGGER
++          .debug("Send get paths for {} task for group {} to node {}.", pathList, groupId, holder);
+       res.addAll(queryPaths(task));
      } catch (RaftConnectionException e) {
 -      throw new ProcessorException(RAFT_CONNECTION_ERROR, e);
 +      boolean success = false;
 +      while (!success) {
 +        PeerId nextNode = null;
 +        try {
 +          nextNode = RaftUtils.getPeerIDInOrder(groupId);
 +          if (holder.equals(nextNode)) {
 +            break;
 +          }
-           LOGGER.debug("Previous task fail, then send get paths for {} task for group {} to node {}.", pathList, groupId, nextNode);
++          LOGGER
++              .debug("Previous task fail, then send get paths for {} task for group {} to node {}.",
++                  pathList, groupId, nextNode);
++          task.setTargetNode(nextNode);
 +          task.resetTask();
 +          task.setTaskState(TaskState.INITIAL);
-           res.addAll(queryPaths(task, nextNode));
-           LOGGER.debug("Get paths for {} task for group {} to node {} succeed.", pathList, groupId, nextNode);
++          res.addAll(queryPaths(task));
++          LOGGER.debug("Get paths for {} task for group {} to node {} succeed.", pathList, groupId,
++              nextNode);
 +          success = true;
 +        } catch (RaftConnectionException e1) {
-           LOGGER.debug("Get paths for {} task for group {} to node {} fail.", pathList, groupId, nextNode);
++          LOGGER.debug("Get paths for {} task for group {} to node {} fail.", pathList, groupId,
++              nextNode);
 +        }
 +      }
 +      LOGGER.debug("The final result for get paths for {} task is {}", pathList, success);
 +      if (!success) {
 +        throw new ProcessorException(RAFT_CONNECTION_ERROR, e);
 +      }
      }
    }
  
diff --cc cluster/src/main/java/org/apache/iotdb/cluster/qp/task/BatchQPTask.java
index 4562d77,f06fa4b..b881549
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/BatchQPTask.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/BatchQPTask.java
@@@ -73,13 -68,12 +68,11 @@@ public class BatchQPTask extends MultiQ
  
    private NonQueryExecutor executor;
  
--
-   public BatchQPTask(int taskNum, BatchResult batchResult, Map<String, SingleQPTask> taskMap,
+   public BatchQPTask(int taskNum, BatchResult result, Map<String, SingleQPTask> taskMap,
        Map<String, List<Integer>> planIndexMap) {
      super(false, taskNum, TaskType.BATCH);
-     this.batchResult = batchResult.getResult();
-     this.isAllSuccessful = batchResult.isAllSuccessful();
-     this.batchErrorMessage = batchResult.getBatchErrorMessage();
+     this.resultArray = result.getResultArray();
+     this.batchResult = result;
      this.taskMap = taskMap;
      this.planIndexMap = planIndexMap;
      this.taskThreadMap = new HashMap<>();
@@@ -123,12 -123,13 +122,13 @@@
        SingleQPTask subTask = entry.getValue();
        Future<?> taskThread;
        if (QPExecutorUtils.canHandleNonQueryByGroupId(groupId)) {
-         taskThread = QPTaskManager.getInstance()
+         taskThread = QPTaskThreadManager.getInstance()
              .submit(() -> executeLocalSubTask(subTask, groupId));
        } else {
 -        PeerId leader = RaftUtils.getLeaderPeerID(groupId);
 +        PeerId leader = RaftUtils.getLocalLeaderPeerID(groupId);
-         taskThread = QPTaskManager.getInstance()
-             .submit(() -> executeRpcSubTask(subTask, leader, groupId));
+         subTask.setTargetNode(leader);
+         taskThread = QPTaskThreadManager.getInstance()
+             .submit(() -> executeRpcSubTask(subTask, groupId));
        }
        taskThreadMap.put(groupId, taskThread);
      }
diff --cc cluster/src/main/java/org/apache/iotdb/cluster/qp/task/DataQueryTask.java
index 3b905d8,3b905d8..f861f55
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/DataQueryTask.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/task/DataQueryTask.java
@@@ -18,32 -18,32 +18,12 @@@
   */
  package org.apache.iotdb.cluster.qp.task;
  
--import org.apache.iotdb.cluster.qp.task.QPTask.TaskState;
--import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
++import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
  
--public class DataQueryTask {
--  private BasicResponse basicResponse;
--  private TaskState state;
++public class DataQueryTask extends SingleQPTask {
  
--  public DataQueryTask(BasicResponse basicResponse,
--      TaskState state) {
--    this.basicResponse = basicResponse;
--    this.state = state;
--  }
--
--  public BasicResponse getBasicResponse() {
--    return basicResponse;
--  }
--
--  public void setBasicResponse(BasicResponse basicResponse) {
--    this.basicResponse = basicResponse;
--  }
--
--  public TaskState getState() {
--    return state;
--  }
--
--  public void setState(TaskState state) {
--    this.state = state;
++  public DataQueryTask(boolean isSyncTask,
++      BasicRequest request) {
++    super(isSyncTask, request);
    }
  }
diff --cc cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcQueryManager.java
index 6653601,0000000..f57c538
mode 100644,000000..100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcQueryManager.java
@@@ -1,111 -1,0 +1,123 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing,
 + * software distributed under the License is distributed on an
 + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 + * KIND, either express or implied.  See the License for the
 + * specific language governing permissions and limitations
 + * under the License.
 + */
 +package org.apache.iotdb.cluster.query.manager.coordinatornode;
 +
 +import com.alipay.sofa.jraft.util.OnlyForTest;
 +import java.util.HashMap;
++import java.util.Iterator;
 +import java.util.Map;
++import java.util.Map.Entry;
 +import java.util.concurrent.ConcurrentHashMap;
 +import org.apache.iotdb.cluster.config.ClusterConfig;
 +import org.apache.iotdb.cluster.config.ClusterDescriptor;
 +import org.apache.iotdb.cluster.exception.RaftConnectionException;
 +import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
 +
 +public class ClusterRpcQueryManager implements IClusterRpcQueryManager {
 +
 +  /**
 +   * Key is job id, value is task id.
 +   */
 +  private static final ConcurrentHashMap<Long, String> JOB_ID_MAP_TASK_ID = new ConcurrentHashMap<>();
 +
 +  /**
 +   * Key is task id, value is manager of a client query.
 +   */
 +  private static final ConcurrentHashMap<String, ClusterRpcSingleQueryManager> SINGLE_QUERY_MANAGER_MAP = new ConcurrentHashMap<>();
 +
 +  private static final ClusterConfig CLUSTER_CONFIG = ClusterDescriptor.getInstance().getConfig();
 +
 +  /**
 +   * Local address
 +   */
 +  private static final String LOCAL_ADDR = String
 +      .format("%s:%d", CLUSTER_CONFIG.getIp(), CLUSTER_CONFIG.getPort());
 +
 +  @Override
 +  public void addSingleQuery(long jobId, QueryPlan physicalPlan) {
 +    String taskId = createTaskId(jobId);
 +    JOB_ID_MAP_TASK_ID.put(jobId, taskId);
 +    SINGLE_QUERY_MANAGER_MAP.put(taskId, new ClusterRpcSingleQueryManager(taskId, physicalPlan));
 +  }
 +
 +  @Override
 +  public String createTaskId(long jobId) {
 +    return String.format("%s:%d", LOCAL_ADDR, jobId);
 +  }
 +
 +  @Override
 +  public ClusterRpcSingleQueryManager getSingleQuery(long jobId) {
 +    return SINGLE_QUERY_MANAGER_MAP.get(JOB_ID_MAP_TASK_ID.get(jobId));
 +  }
 +
 +  @Override
 +  public ClusterRpcSingleQueryManager getSingleQuery(String taskId) {
 +    return SINGLE_QUERY_MANAGER_MAP.get(taskId);
 +  }
 +
 +  @Override
 +  public void releaseQueryResource(long jobId) throws RaftConnectionException {
 +    if (JOB_ID_MAP_TASK_ID.containsKey(jobId)) {
 +      SINGLE_QUERY_MANAGER_MAP.remove(JOB_ID_MAP_TASK_ID.remove(jobId)).releaseQueryResource();
 +    }
 +  }
 +
 +  @Override
 +  public Map<String, Integer> getAllReadUsage() {
 +    Map<String, Integer> readerUsageMap = new HashMap<>();
 +    SINGLE_QUERY_MANAGER_MAP.values().forEach(singleQueryManager -> {
 +      for (String groupId : singleQueryManager.getDataGroupUsage()) {
 +        readerUsageMap.put(groupId, readerUsageMap.getOrDefault(groupId, 0) + 1);
 +      }
 +    });
 +    return readerUsageMap;
 +  }
 +
++  @Override
++  public void close() throws RaftConnectionException {
++    Iterator<Map.Entry<String, ClusterRpcSingleQueryManager>> iterator = SINGLE_QUERY_MANAGER_MAP.entrySet().iterator();
++    while(iterator.hasNext()){
++      Entry<String, ClusterRpcSingleQueryManager> entry = iterator.next();
++      entry.getValue().releaseQueryResource();
++      iterator.remove();
++    }
++  }
++
 +  @OnlyForTest
 +  public static ConcurrentHashMap<Long, String> getJobIdMapTaskId() {
 +    return JOB_ID_MAP_TASK_ID;
 +  }
 +
 +  private ClusterRpcQueryManager() {
 +  }
 +
 +  public static final ClusterRpcQueryManager getInstance() {
 +    return ClusterRpcQueryManagerHolder.INSTANCE;
 +  }
 +
 +  private static class ClusterRpcQueryManagerHolder {
 +
 +    private static final ClusterRpcQueryManager INSTANCE = new ClusterRpcQueryManager();
 +
 +    private ClusterRpcQueryManagerHolder() {
 +
 +    }
 +  }
 +
 +}
diff --cc cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcQueryManager.java
index b8e4f5d,0000000..0917631
mode 100644,000000..100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcQueryManager.java
@@@ -1,69 -1,0 +1,74 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing,
 + * software distributed under the License is distributed on an
 + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 + * KIND, either express or implied.  See the License for the
 + * specific language governing permissions and limitations
 + * under the License.
 + */
 +package org.apache.iotdb.cluster.query.manager.coordinatornode;
 +
 +import java.util.Map;
 +import org.apache.iotdb.cluster.exception.RaftConnectionException;
 +import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
 +
 +/**
 + * Manage all query series reader resources which fetch data from remote query nodes in coordinator
 + * node
 + */
 +public interface IClusterRpcQueryManager {
 +
 +  /**
 +   * Add a query
 +   *
 +   * @param jobId job id assigned by QueryResourceManager
 +   * @param physicalPlan physical plan
 +   */
 +  void addSingleQuery(long jobId, QueryPlan physicalPlan);
 +
 +  /**
 +   * Get full task id (local address + job id)
 +   */
 +  String createTaskId(long jobId);
 +
 +  /**
 +   * Get query manager by jobId
 +   *
 +   * @param jobId job id assigned by QueryResourceManager
 +   */
 +  ClusterRpcSingleQueryManager getSingleQuery(long jobId);
 +
 +  /**
 +   * Get query manager by taskId
 +   *
 +   * @param taskId task id assigned by getAndIncreaTaskId() method
 +   */
 +  ClusterRpcSingleQueryManager getSingleQuery(String taskId);
 +
 +  /**
 +   * Release query resource
 +   *
 +   * @param jobId job id
 +   */
 +  void releaseQueryResource(long jobId) throws RaftConnectionException;
 +
 +  /**
 +   * Get all read usage count group by data group id, key is group id, value is usage count
 +   */
 +  Map<String, Integer> getAllReadUsage();
++
++  /**
++   * Close manager
++   */
++  void close() throws RaftConnectionException;
 +}
diff --cc cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalQueryManager.java
index 4e09af8,0000000..a602c84
mode 100644,000000..100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalQueryManager.java
@@@ -1,125 -1,0 +1,138 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing,
 + * software distributed under the License is distributed on an
 + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 + * KIND, either express or implied.  See the License for the
 + * specific language governing permissions and limitations
 + * under the License.
 + */
 +package org.apache.iotdb.cluster.query.manager.querynode;
 +
 +import com.alipay.sofa.jraft.util.OnlyForTest;
 +import java.io.IOException;
 +import java.util.HashMap;
++import java.util.Iterator;
 +import java.util.Map;
++import java.util.Map.Entry;
 +import java.util.concurrent.ConcurrentHashMap;
++import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
 +import org.apache.iotdb.cluster.rpc.raft.request.querydata.InitSeriesReaderRequest;
 +import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataByTimestampRequest;
 +import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataRequest;
 +import org.apache.iotdb.cluster.rpc.raft.response.querydata.InitSeriesReaderResponse;
 +import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataByTimestampResponse;
 +import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
 +import org.apache.iotdb.db.exception.FileNodeManagerException;
 +import org.apache.iotdb.db.exception.PathErrorException;
 +import org.apache.iotdb.db.exception.ProcessorException;
 +import org.apache.iotdb.db.query.control.QueryResourceManager;
 +import org.apache.iotdb.tsfile.exception.filter.QueryFilterOptimizationException;
 +
 +public class ClusterLocalQueryManager implements IClusterLocalQueryManager {
 +
 +  /**
 +   * Key is task id which is assigned by coordinator node, value is job id which is assigned by
 +   * query node(local).
 +   */
 +  private static final ConcurrentHashMap<String, Long> TASK_ID_MAP_JOB_ID = new ConcurrentHashMap<>();
 +
 +  /**
 +   * Key is job id, value is manager of a client query.
 +   */
 +  private static final ConcurrentHashMap<Long, ClusterLocalSingleQueryManager> SINGLE_QUERY_MANAGER_MAP = new ConcurrentHashMap<>();
 +
 +  private ClusterLocalQueryManager() {
 +  }
 +
 +  @Override
 +  public InitSeriesReaderResponse createQueryDataSet(InitSeriesReaderRequest request)
 +      throws IOException, FileNodeManagerException, PathErrorException, ProcessorException, QueryFilterOptimizationException, ClassNotFoundException {
 +    long jobId = QueryResourceManager.getInstance().assignJobId();
 +    String taskId = request.getTaskId();
 +    TASK_ID_MAP_JOB_ID.put(taskId, jobId);
 +    ClusterLocalSingleQueryManager localQueryManager = new ClusterLocalSingleQueryManager(jobId);
 +    SINGLE_QUERY_MANAGER_MAP.put(jobId, localQueryManager);
 +    return localQueryManager.createSeriesReader(request);
 +  }
 +
 +  @Override
 +  public QuerySeriesDataResponse readBatchData(QuerySeriesDataRequest request)
 +      throws IOException {
 +    long jobId = TASK_ID_MAP_JOB_ID.get(request.getTaskId());
 +    return SINGLE_QUERY_MANAGER_MAP.get(jobId).readBatchData(request);
 +  }
 +
 +  @Override
 +  public QuerySeriesDataByTimestampResponse readBatchDataByTimestamp(
 +      QuerySeriesDataByTimestampRequest request)
 +      throws IOException {
 +    long jobId = TASK_ID_MAP_JOB_ID.get(request.getTaskId());
 +    return SINGLE_QUERY_MANAGER_MAP.get(jobId).readBatchDataByTimestamp(request);
 +  }
 +
 +  @Override
 +  public void close(String taskId) throws FileNodeManagerException {
 +    if (TASK_ID_MAP_JOB_ID.containsKey(taskId)) {
 +      SINGLE_QUERY_MANAGER_MAP.remove(TASK_ID_MAP_JOB_ID.remove(taskId)).close();
 +    }
 +  }
 +
 +  @Override
 +  public ClusterLocalSingleQueryManager getSingleQuery(String taskId) {
 +    long jobId = TASK_ID_MAP_JOB_ID.get(taskId);
 +    return SINGLE_QUERY_MANAGER_MAP.get(jobId);
 +  }
 +
 +  public static final ClusterLocalQueryManager getInstance() {
 +    return ClusterLocalQueryManager.ClusterLocalQueryManagerHolder.INSTANCE;
 +  }
 +
 +  private static class ClusterLocalQueryManagerHolder {
 +
 +    private static final ClusterLocalQueryManager INSTANCE = new ClusterLocalQueryManager();
 +
 +    private ClusterLocalQueryManagerHolder() {
 +
 +    }
 +  }
 +
 +  @Override
 +  public Map<String, Integer> getAllReadUsage() {
 +    Map<String, Integer> readerUsageMap = new HashMap<>();
 +    SINGLE_QUERY_MANAGER_MAP.values().forEach(singleQueryManager -> {
 +      String groupId = singleQueryManager.getGroupId();
 +      readerUsageMap.put(groupId, readerUsageMap.getOrDefault(groupId, 0) + 1);
 +    });
 +    return readerUsageMap;
 +  }
 +
++  @Override
++  public void close() throws FileNodeManagerException {
++    Iterator<Entry<Long, ClusterLocalSingleQueryManager>> iterator = SINGLE_QUERY_MANAGER_MAP.entrySet().iterator();
++    while(iterator.hasNext()){
++      Entry<Long, ClusterLocalSingleQueryManager> entry = iterator.next();
++      entry.getValue().close();
++      iterator.remove();
++    }
++  }
++
 +  @OnlyForTest
 +  public static ConcurrentHashMap<String, Long> getTaskIdMapJobId() {
 +    return TASK_ID_MAP_JOB_ID;
 +  }
 +
 +  @OnlyForTest
 +  public static ConcurrentHashMap<Long, ClusterLocalSingleQueryManager> getSingleQueryManagerMap() {
 +    return SINGLE_QUERY_MANAGER_MAP;
 +  }
 +}
diff --cc cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalQueryManager.java
index 1105bb2,0000000..42374d5
mode 100644,000000..100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalQueryManager.java
@@@ -1,82 -1,0 +1,87 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing,
 + * software distributed under the License is distributed on an
 + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 + * KIND, either express or implied.  See the License for the
 + * specific language governing permissions and limitations
 + * under the License.
 + */
 +package org.apache.iotdb.cluster.query.manager.querynode;
 +
 +import java.io.IOException;
 +import java.util.Map;
 +import org.apache.iotdb.cluster.rpc.raft.request.querydata.InitSeriesReaderRequest;
 +import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataByTimestampRequest;
 +import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataRequest;
 +import org.apache.iotdb.cluster.rpc.raft.response.querydata.InitSeriesReaderResponse;
 +import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataByTimestampResponse;
 +import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
 +import org.apache.iotdb.db.exception.FileNodeManagerException;
 +import org.apache.iotdb.db.exception.PathErrorException;
 +import org.apache.iotdb.db.exception.ProcessorException;
 +import org.apache.iotdb.tsfile.exception.filter.QueryFilterOptimizationException;
 +
 +/**
 + * Manage all local query resources which provide data for coordinator node in cluster query node.
 + */
 +public interface IClusterLocalQueryManager {
 +
 +  /**
 +   * Initially create query data set for coordinator node.
 +   *
 +   * @param request request for query data from coordinator node
 +   */
 +  InitSeriesReaderResponse createQueryDataSet(InitSeriesReaderRequest request)
 +      throws IOException, FileNodeManagerException, PathErrorException, ProcessorException, QueryFilterOptimizationException, ClassNotFoundException;
 +
 +  /**
 +   * Read batch data of all querying series in request and set response.
 +   *
 +   * @param request request of querying series
 +   */
 +  QuerySeriesDataResponse readBatchData(QuerySeriesDataRequest request)
 +      throws IOException;
 +
 +  /**
 +   * Read batch data of select series by batch timestamp which is used in query with value filter
 +   *
 +   * @param request request of querying select paths
 +   */
 +  QuerySeriesDataByTimestampResponse readBatchDataByTimestamp(
 +      QuerySeriesDataByTimestampRequest request) throws IOException;
 +
 +  /**
 +   * Close query resource of a task
 +   *
 +   * @param taskId task id of local single query manager
 +   */
 +  void close(String taskId) throws FileNodeManagerException;
 +
 +
 +  /**
 +   * Get query manager by taskId
 +   *
 +   * @param taskId task id assigned by ClusterRpcQueryManager
 +   */
 +  ClusterLocalSingleQueryManager getSingleQuery(String taskId);
 +
 +  /**
 +   * Get all read usage count group by data group id, key is group id, value is usage count
 +   */
 +  Map<String, Integer> getAllReadUsage();
++
++  /**
++   * Close manager
++   */
++  void close() throws FileNodeManagerException;
 +}
diff --cc cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterRpcReaderUtils.java
index 75c2381,0000000..bd61375
mode 100644,000000..100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterRpcReaderUtils.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterRpcReaderUtils.java
@@@ -1,103 -1,0 +1,113 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing,
 + * software distributed under the License is distributed on an
 + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 + * KIND, either express or implied.  See the License for the
 + * specific language governing permissions and limitations
 + * under the License.
 + */
 +package org.apache.iotdb.cluster.query.utils;
 +
 +import com.alipay.sofa.jraft.entity.PeerId;
 +import java.util.List;
 +import org.apache.iotdb.cluster.config.ClusterDescriptor;
 +import org.apache.iotdb.cluster.entity.Server;
 +import org.apache.iotdb.cluster.exception.RaftConnectionException;
- import org.apache.iotdb.cluster.qp.task.QPTask.TaskState;
 +import org.apache.iotdb.cluster.qp.task.DataQueryTask;
++import org.apache.iotdb.cluster.qp.task.QPTask.TaskState;
 +import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
- import org.apache.iotdb.cluster.rpc.raft.NodeAsClient;
++import org.apache.iotdb.cluster.rpc.raft.impl.RaftNodeAsClientManager;
 +import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
 +import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 +import org.apache.iotdb.cluster.utils.RaftUtils;
 +import org.apache.iotdb.cluster.utils.hash.Router;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +/**
 + * Utils for cluster reader which needs to acquire data from remote query node.
 + */
 +public class ClusterRpcReaderUtils {
 +
 +  private static final Logger LOGGER = LoggerFactory.getLogger(ClusterRpcReaderUtils.class);
 +
 +  /**
 +   * Count limit to redo a task
 +   */
 +  private static final int TASK_MAX_RETRY = ClusterDescriptor.getInstance().getConfig()
 +      .getQpTaskRedoCount();
 +
 +  private ClusterRpcReaderUtils() {
 +  }
 +
 +  /**
 +   * Create cluster series reader
 +   */
 +  public static BasicResponse createClusterSeriesReader(String groupId, BasicRequest request,
 +      ClusterRpcSingleQueryManager manager)
 +      throws RaftConnectionException {
 +
 +    List<PeerId> peerIdList = RaftUtils
 +        .getPeerIDList(groupId, Server.getInstance(), Router.getInstance());
 +    int randomPeerIndex = RaftUtils.getRandomInt(peerIdList.size());
 +    BasicResponse response;
 +    for (int i = 0; i < peerIdList.size(); i++) {
 +      PeerId peerId = peerIdList.get((i + randomPeerIndex) % peerIdList.size());
 +      try {
 +        response = handleQueryRequest(request, peerId, 0);
 +        manager.setQueryNode(groupId, peerId);
 +        LOGGER.debug("Init series reader in Node<{}> of group<{}> success.", peerId, groupId);
 +        return response;
 +      } catch (RaftConnectionException e) {
 +        LOGGER.debug("Can not init series reader in Node<{}> of group<{}>", peerId, groupId, e);
 +      }
 +    }
 +    throw new RaftConnectionException(
 +        String.format("Can not init series reader in all nodes of group<%s>.", groupId));
 +  }
 +
 +  /**
 +   * Send query request to remote node and return response
 +   *
 +   * @param request query request
 +   * @param peerId target remote query node
 +   * @param taskRetryNum retry num of the request
 +   * @return Response from remote query node
 +   */
 +  public static BasicResponse handleQueryRequest(BasicRequest request, PeerId peerId,
 +      int taskRetryNum)
 +      throws RaftConnectionException {
 +    if (taskRetryNum > TASK_MAX_RETRY) {
 +      throw new RaftConnectionException(
 +          String.format("Query request retries reach the upper bound %s",
 +              TASK_MAX_RETRY));
 +    }
-     NodeAsClient nodeAsClient = RaftUtils.getRaftNodeAsClient();
-     DataQueryTask dataQueryTask = nodeAsClient.syncHandleRequest(request, peerId);
-     if (dataQueryTask.getState() == TaskState.FINISH) {
-       return dataQueryTask.getBasicResponse();
++    DataQueryTask dataQueryTask = new DataQueryTask(true, request);
++    dataQueryTask.setTargetNode(peerId);
++    RaftNodeAsClientManager.getInstance().produceQPTask(dataQueryTask);
++    try {
++      dataQueryTask.await();
++    } catch (InterruptedException e) {
++      throw new RaftConnectionException(
++          String.format("Can not connect to remote node {%s} for query", peerId));
++    }
++    if (dataQueryTask.getTaskState() == TaskState.RAFT_CONNECTION_EXCEPTION) {
++      throw new RaftConnectionException(
++          String.format("Can not connect to remote node {%s} for query", peerId));
++    } else if (dataQueryTask.getTaskState() == TaskState.FINISH) {
++      return dataQueryTask.getResponse();
 +    } else {
 +      return handleQueryRequest(request, peerId, taskRetryNum + 1);
 +    }
 +  }
 +}
diff --cc cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/NodeAsClient.java
index 994fc07,6257fbd..d0690cd
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/NodeAsClient.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/NodeAsClient.java
@@@ -31,20 -31,18 +31,10 @@@ public interface NodeAsClient 
  
    /**
     * Asynchronous processing requests
-    *
-    * @param leader leader node of the target group
     * @param qpTask single QPTask to be executed
     */
-   void asyncHandleRequest(BasicRequest request, PeerId leader,
-       SingleQPTask qpTask) throws RaftConnectionException;
- 
-   /**
-    * Synchronous processing requests
-    *
-    * @param peerId leader node of the target group
-    */
-   DataQueryTask syncHandleRequest(BasicRequest request, PeerId peerId);
+   void asyncHandleRequest(SingleQPTask qpTask) throws RaftConnectionException;
  
 -//  /**
 -//   * Synchronous processing requests
 -//   * @param peerId leader node of the target group
 -//   *
 -//   */
 -//  DataQueryTask syncHandleRequest(BasicRequest request, PeerId peerId)
 -//      throws RaftConnectionException;
 -
    /**
     * Shut down client
     */
diff --cc cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/impl/RaftNodeAsClientManager.java
index 1d32fd5,e96da99..351eece
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/impl/RaftNodeAsClientManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/impl/RaftNodeAsClientManager.java
@@@ -101,6 -90,18 +90,19 @@@ public class RaftNodeAsClientManager 
  
    public void init() {
      isShuttingDown = false;
++    taskQueue.clear();
+     for (int i = 0; i < CLUSTER_CONFIG.getConcurrentInnerRpcClientThread(); i++) {
+       THREAD_POOL_MANAGER.execute(() -> {
+         RaftNodeAsClient client = new RaftNodeAsClient();
+         while (true) {
+           consumeQPTask(client);
+           if (Thread.currentThread().isInterrupted()) {
+             break;
+           }
+         }
+         client.shutdown();
+       });
+     }
    }
  
    /**
@@@ -135,46 -124,30 +125,33 @@@
      }
    }
  
-   private void checkShuttingDown() throws RaftConnectionException {
-     if (isShuttingDown) {
-       throw new RaftConnectionException(
-           "Reject to provide RaftNodeAsClient client because cluster system is shutting down");
-     }
-   }
- 
-   /**
-    * No-safe method, get client
-    */
-   private RaftNodeAsClient getClient() {
-     if (clientList.isEmpty()) {
-       return new RaftNodeAsClient();
-     } else {
-       return clientList.removeFirst();
-     }
-   }
 +
    /**
-    * Release usage of a client
+    * Consume qp task
     */
-   public void releaseClient(RaftNodeAsClient client) {
+   private void consumeQPTask(RaftNodeAsClient client) {
      resourceLock.lock();
      try {
-       clientNumInUse.decrementAndGet();
-       resourceCondition.signalAll();
-       clientList.addLast(client);
+       while (taskQueue.isEmpty()) {
+         if (Thread.currentThread().isInterrupted()) {
+           return;
+         }
+         resourceCondition.await();
+       }
+       client.asyncHandleRequest(taskQueue.removeFirst());
+     } catch (InterruptedException e) {
 -      LOGGER.error("An error occurred when await for ResourceContidion", e);
++      Thread.currentThread().interrupt();
++      LOGGER.debug("Occur interruption when await for ResourceContidion", e);
      } finally {
        resourceLock.unlock();
      }
    }
  
-   public void shutdown() throws InterruptedException {
-     isShuttingDown = true;
-     while (clientNumInUse.get() != 0 && queueClientNum != 0) {
-       // wait until releasing all usage of clients.
-       resourceCondition.await();
-     }
-     while (!clientList.isEmpty()) {
-       clientList.removeFirst().shutdown();
++
+   private void checkShuttingDown() throws RaftConnectionException {
+     if (isShuttingDown) {
+       throw new RaftConnectionException(
+           "Reject to execute QPTask because cluster system is shutting down");
      }
    }
  
diff --cc cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/DataGroupNonQueryAsyncProcessor.java
index c331714,44f42a7..291da32
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/DataGroupNonQueryAsyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/nonquery/DataGroupNonQueryAsyncProcessor.java
@@@ -58,7 -58,7 +58,8 @@@ public class DataGroupNonQueryAsyncProc
      } else {
        LOGGER.debug("Apply task to raft node");
  
-       /** Apply Task to Raft Node **/
++
+       /* Apply Task to Raft Node */
        BasicResponse response = DataGroupNonQueryResponse.createEmptyResponse(groupId);
        RaftService service = (RaftService) dataPartitionRaftHolder.getService();
        RaftUtils.executeRaftTaskForRpcProcessor(service, asyncContext, request, response);
diff --cc cluster/src/main/java/org/apache/iotdb/cluster/utils/RaftUtils.java
index 601b902,aa2b46a..673fd12
--- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/RaftUtils.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/utils/RaftUtils.java
@@@ -54,23 -54,16 +54,22 @@@ import org.apache.iotdb.cluster.excepti
  import org.apache.iotdb.cluster.qp.task.QPTask;
  import org.apache.iotdb.cluster.qp.task.QPTask.TaskState;
  import org.apache.iotdb.cluster.qp.task.SingleQPTask;
 -import org.apache.iotdb.cluster.rpc.raft.NodeAsClient;
 +import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcQueryManager;
- import org.apache.iotdb.cluster.rpc.raft.NodeAsClient;
  import org.apache.iotdb.cluster.rpc.raft.closure.ResponseClosure;
  import org.apache.iotdb.cluster.rpc.raft.impl.RaftNodeAsClientManager;
  import org.apache.iotdb.cluster.rpc.raft.request.BasicNonQueryRequest;
  import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
 -import org.apache.iotdb.cluster.rpc.raft.request.QueryMetricRequest;
 +import org.apache.iotdb.cluster.rpc.raft.request.querymetric.QueryJobNumRequest;
 +import org.apache.iotdb.cluster.rpc.raft.request.querymetric.QueryLeaderRequest;
 +import org.apache.iotdb.cluster.rpc.raft.request.querymetric.QueryMetricRequest;
 +import org.apache.iotdb.cluster.rpc.raft.request.querymetric.QueryStatusRequest;
  import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 -import org.apache.iotdb.cluster.rpc.raft.response.QueryMetricResponse;
+ import org.apache.iotdb.cluster.rpc.raft.response.nonquery.DataGroupNonQueryResponse;
+ import org.apache.iotdb.cluster.rpc.raft.response.nonquery.MetaGroupNonQueryResponse;
 +import org.apache.iotdb.cluster.rpc.raft.response.querymetric.QueryJobNumResponse;
 +import org.apache.iotdb.cluster.rpc.raft.response.querymetric.QueryLeaderResponse;
 +import org.apache.iotdb.cluster.rpc.raft.response.querymetric.QueryMetricResponse;
- import org.apache.iotdb.cluster.rpc.raft.response.nonquery.DataGroupNonQueryResponse;
- import org.apache.iotdb.cluster.rpc.raft.response.nonquery.MetaGroupNonQueryResponse;
 +import org.apache.iotdb.cluster.rpc.raft.response.querymetric.QueryStatusResponse;
  import org.apache.iotdb.cluster.utils.hash.PhysicalNode;
  import org.apache.iotdb.cluster.utils.hash.Router;
  import org.apache.iotdb.cluster.utils.hash.VirtualNode;
@@@ -94,9 -85,9 +93,9 @@@ public class RaftUtils 
  
    /**
     * The cache will be update in two case: 1. When @onLeaderStart() method of state machine is
-    * called, the cache will be update. 2. When @getLocalLeaderPeerID() in this class is called and cache
 -   * called, the cache will be update. 2. When @getLeaderPeerID() in this class is called and cache
--   * don't have the key, it's will get random peer and update. 3. When @redirected of BasicRequest
--   * is true, the task will be retry and the cache will update.
++   * called, the cache will be update. 2. When @getLocalLeaderPeerID() in this class is called and
++   * cache don't have the key, it's will get random peer and update. 3. When @redirected of
++   * BasicRequest is true, the task will be retry and the cache will update.
     */
    private static final ConcurrentHashMap<String, PeerId> groupLeaderCache = new ConcurrentHashMap<>();
  
@@@ -144,44 -100,8 +143,43 @@@
     *
     * @return leader id
     */
 -  public static PeerId getLeaderPeerID(String groupId) {
 -    return groupLeaderCache.computeIfAbsent(groupId, RaftUtils::getRandomPeerID);
 +  public static PeerId getLocalLeaderPeerID(String groupId) {
 +    if (!groupLeaderCache.containsKey(groupId)) {
 +      PeerId randomPeerId = getRandomPeerID(groupId);
 +      groupLeaderCache.put(groupId, randomPeerId);
 +    }
 +    PeerId leader = groupLeaderCache.get(groupId);
 +    LOGGER.debug("Get local cached leader {} of group {}.", leader, groupId);
 +    return leader;
 +  }
 +
 +  /**
 +   * Get peer id to send request. If groupLeaderCache has the group id, then return leader id of the
 +   * group.Otherwise, random get a peer of the group.
 +   *
 +   * @return leader id
 +   */
 +  public static PeerId getLeaderPeerIDFromRemoteNode(PeerId peerId, String groupId) {
 +    QueryLeaderRequest request = new QueryLeaderRequest(groupId);
 +    SingleQPTask task = new SingleQPTask(false, request);
- 
++    task.setTargetNode(peerId);
 +    LOGGER.debug("Execute get leader of group {} from node {}.", groupId, peerId);
 +    try {
-       NodeAsClient client = RaftNodeAsClientManager.getInstance().getRaftNodeAsClient();
-       /** Call async method **/
-       client.asyncHandleRequest(task.getRequest(), peerId, task);
++      CLIENT_MANAGER.produceQPTask(task);
 +
 +      task.await();
 +      PeerId leader = null;
 +      if (task.getTaskState() == TaskState.FINISH) {
 +        BasicResponse response = task.getResponse();
 +        leader = response == null ? null : ((QueryLeaderResponse) response).getLeader();
 +      }
 +      LOGGER.debug("Get leader {} of group {} from node {}.", leader, groupId, peerId);
 +      return leader;
 +    } catch (RaftConnectionException | InterruptedException e) {
-       LOGGER.error("Fail to get leader of group {} from remote node {} because of {}.", groupId, peerId, e.getMessage());
++      LOGGER.error("Fail to get leader of group {} from remote node {} because of {}.", groupId,
++          peerId, e.getMessage());
 +      return null;
 +    }
    }
  
    /**
@@@ -493,23 -402,7 +491,24 @@@
        groupId = router.getGroupID(group);
        nodes = getPeerIdArrayFrom(group);
      }
 -    PeerId leader = RaftUtils.getLeaderPeerID(groupId);
 +
 +    PeerId leader = null;
 +    for (PeerId node : nodes) {
 +      LOGGER.debug("Try to get leader of group {} from node {}.", groupId, node);
 +      leader = getLeaderPeerIDFromRemoteNode(node, groupId);
 +      LOGGER.debug("Get leader {} of group {} from node {}.", leader, groupId, node);
 +      if (leader != null) {
 +        break;
 +      }
 +    }
 +
 +    if (leader == null) {
-       LOGGER.debug("Fail to get leader of group {} from all remote nodes, get it locally.", groupId);
++      LOGGER
++          .debug("Fail to get leader of group {} from all remote nodes, get it locally.", groupId);
 +      leader = RaftUtils.getLocalLeaderPeerID(groupId);
 +      LOGGER.debug("Get leader {} of group {} locally.", leader, groupId);
 +    }
 +
      for (int i = 0; i < nodes.length; i++) {
        if (leader.equals(nodes[i])) {
          PeerId t = nodes[i];
@@@ -657,12 -551,11 +656,11 @@@
      SingleQPTask task = new SingleQPTask(false, request);
  
      LOGGER.debug("Execute get metric for {} statement for group {}.", metric, groupId);
 -    PeerId holder = RaftUtils.getLeaderPeerID(groupId);
 -    task.setTargetNode(holder);
 +    PeerId holder = RaftUtils.getLocalLeaderPeerID(groupId);
      LOGGER.debug("Get metric from node {}.", holder);
++    task.setTargetNode(holder);
      try {
-       NodeAsClient client = RaftNodeAsClientManager.getInstance().getRaftNodeAsClient();
-       /** Call async method **/
-       client.asyncHandleRequest(task.getRequest(), holder, task);
+       CLIENT_MANAGER.produceQPTask(task);
  
        task.await();
        Map<String, Long> value = null;
@@@ -676,100 -569,4 +674,82 @@@
        return null;
      }
    }
 +
 +  /**
 +   * Get query job number running on each data partition for all nodes
 +   *
 +   * @return outer key: ip, inner key: groupId, value: number of query jobs
 +   */
 +  public static Map<String, Map<String, Integer>> getQueryJobNumMapForCluster() {
 +    PeerId[] peerIds = RaftUtils.convertStringArrayToPeerIdArray(config.getNodes());
 +    Map<String, Map<String, Integer>> res = new HashMap<>();
 +    for (int i = 0; i < peerIds.length; i++) {
 +      PeerId peerId = peerIds[i];
 +      res.put(peerId.getIp(), getQueryJobNumMapFromRemoteNode(peerId));
 +    }
 +
 +    return res;
 +  }
 +
 +  public static Map<String, Integer> getLocalQueryJobNumMap() {
 +    return ClusterRpcQueryManager.getInstance().getAllReadUsage();
 +  }
 +
 +  private static Map<String, Integer> getQueryJobNumMapFromRemoteNode(PeerId peerId) {
 +    QueryJobNumRequest request = new QueryJobNumRequest("");
 +    SingleQPTask task = new SingleQPTask(false, request);
- 
++    task.setTargetNode(peerId);
 +    LOGGER.debug("Execute get query job num map for node {}.", peerId);
 +    try {
-       NodeAsClient client = RaftNodeAsClientManager.getInstance().getRaftNodeAsClient();
-       /** Call async method **/
-       client.asyncHandleRequest(task.getRequest(), peerId, task);
++      CLIENT_MANAGER.produceQPTask(task);
 +
 +      task.await();
 +      Map<String, Integer> value = null;
 +      if (task.getTaskState() == TaskState.FINISH) {
 +        BasicResponse response = task.getResponse();
 +        value = response == null ? null : ((QueryJobNumResponse) response).getValue();
 +      }
 +      return value;
 +    } catch (RaftConnectionException | InterruptedException e) {
 +      LOGGER.error("Fail to get query job num map from remote node {} because of {}.", peerId, e);
 +      return null;
 +    }
 +  }
 +
 +  /**
 +   * Get status of each node in cluster
 +   *
 +   * @return key: node ip, value: live or not
 +   */
 +  public static Map<String, Boolean> getStatusMapForCluster() {
 +    PeerId[] peerIds = RaftUtils.convertStringArrayToPeerIdArray(config.getNodes());
 +    Map<String, Boolean> res = new HashMap<>();
 +    for (int i = 0; i < peerIds.length; i++) {
 +      PeerId peerId = peerIds[i];
 +      res.put(peerId.getIp(), getStatusOfNode(peerId));
 +    }
 +
 +    return res;
 +  }
 +
 +  private static boolean getStatusOfNode(PeerId peerId) {
 +    QueryStatusRequest request = new QueryStatusRequest("");
 +    SingleQPTask task = new SingleQPTask(false, request);
- 
++    task.setTargetNode(peerId);
 +    LOGGER.debug("Execute get status for node {}.", peerId);
 +    try {
-       NodeAsClient client = RaftNodeAsClientManager.getInstance().getRaftNodeAsClient();
-       /** Call async method **/
-       client.asyncHandleRequest(task.getRequest(), peerId, task);
++      CLIENT_MANAGER.produceQPTask(task);
 +
 +      task.await();
 +      boolean status = false;
 +      if (task.getTaskState() == TaskState.FINISH) {
 +        BasicResponse response = task.getResponse();
 +        status = response == null ? null : ((QueryStatusResponse) response).getStatus();
 +      }
 +      return status;
 +    } catch (RaftConnectionException | InterruptedException e) {
 +      LOGGER.error("Fail to get status from remote node {} because of {}.", peerId, e);
 +      return false;
 +    }
 +  }
- 
-   /**
-    * try to get raft rpc client
-    */
-   public static NodeAsClient getRaftNodeAsClient() throws RaftConnectionException {
-     NodeAsClient client = CLIENT_MANAGER.getRaftNodeAsClient();
-     if (client == null) {
-       throw new RaftConnectionException(String
-           .format("Raft inner rpc clients have reached the max numbers %s",
-               CLUSTER_CONFIG.getMaxNumOfInnerRpcClient() + CLUSTER_CONFIG
-                   .getMaxQueueNumOfInnerRpcClient()));
-     }
-     return client;
-   }
  }
diff --cc cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBMetadataFetchAbstract.java
index d0f371f,be24b4d..4c4c78b
--- a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBMetadataFetchAbstract.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBMetadataFetchAbstract.java
@@@ -110,8 -108,8 +108,6 @@@ public abstract class IoTDBMetadataFetc
          "show timeseries root.ln.*.wt01.*", // seriesPath with stars
          "show timeseries root.a.b", // nonexistent timeseries, thus returning ""
          "show timeseries root.ln,root.ln",
--        // SHOW TIMESERIES <PATH> only accept single seriesPath, thus
--        // returning ""
      };
      String[] standards = new String[]{
          "root.ln.wf01.wt01.status,root.ln.wf01,BOOLEAN,PLAIN,\n",
@@@ -355,7 -358,8 +356,9 @@@
              + "root.ln.wf02,\n"
              + "root.ln.wf01,\n"
              + "root.ln.wf05,\n";
-     ResultSet resultSet = databaseMetaData.getColumns(Constant.CATALOG_STORAGE_GROUP, null, null, null);
++
+     ResultSet resultSet = databaseMetaData
+         .getColumns(Constant.CATALOG_STORAGE_GROUP, null, null, null);
      checkCorrectness(resultSet, standard);
    }
  
diff --cc cluster/src/test/java/org/apache/iotdb/cluster/utils/Utils.java
index 0bb3c08,f080a9d..8800415
--- a/cluster/src/test/java/org/apache/iotdb/cluster/utils/Utils.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/utils/Utils.java
@@@ -26,11 -26,6 +26,10 @@@ import java.sql.SQLException
  import java.sql.Statement;
  
  public class Utils {
 +
 +  private Utils() {
 +
 +  }
- 
    public static String getCurrentPath(String... command) throws IOException {
      ProcessBuilder builder = new ProcessBuilder(command);
      builder.redirectErrorStream(true);
diff --cc cluster/src/test/java/org/apache/iotdb/cluster/utils/hash/MD5HashTest.java
index cd488d1,5af917b..129d1ac
--- a/cluster/src/test/java/org/apache/iotdb/cluster/utils/hash/MD5HashTest.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/utils/hash/MD5HashTest.java
@@@ -18,10 -18,9 +18,10 @@@
   */
  package org.apache.iotdb.cluster.utils.hash;
  
- import static org.junit.Assert.*;
++
+ import static org.junit.Assert.assertEquals;
  
  import java.util.concurrent.CountDownLatch;
- import org.apache.iotdb.cluster.utils.hash.MD5Hash;
  import org.junit.After;
  import org.junit.Before;
  import org.junit.Test;
diff --cc docs/Documentation/UserGuideV0.7.0/7-Tools-NodeTool.md
index a442bf5,a7113b6..011271d
--- a/docs/Documentation/UserGuideV0.7.0/7-Tools-NodeTool.md
+++ b/docs/Documentation/UserGuideV0.7.0/7-Tools-NodeTool.md
@@@ -285,50 -285,8 +285,50 @@@ The Windows system startup commands ar
  After using the command, the successful output will be as follows: 
  	
  ```
 -data-group-0	->	1
 -data-group-3	->	3
 -Total	->	4
 +192.168.130.14:
 +  data-group-0  ->   1
 +  data-group-1  ->   3
 +192.168.130.16:
 +  data-group-2  ->   2
 +  data-group-1  ->   0
 +192.168.130.18:
 +  data-group-0  ->   0
 +  data-group-2  ->   1
 +Total  ->   7
  ```
 -The above output indicates that node 192.168.130.14 contains 2 data partitions and 4 query tasks are running on it, wherein 1 query tasks is running on data partition data-group-0, and 3 query tasks are running on data partition data-group-1.
 +The above output indicates that 7 query tasks are running on cluster. Moreover, node 192.168.130.14 contains 2 data partitions and 4 query tasks are running on it, wherein 1 query task is running on data partition data-group-0, and 3 query tasks are running on data partition data-group-1; node 192.168.130.16 contains 2 data partitions and 2 query tasks are running on it, wherein 2 query tasks is running on data partition data-group-2, and no query task is running on data partition data- [...]
 +
 +### Query Status of Nodes in Cluster (status)
 +
 +IoTDB Cluster contains multiple nodes. For any node, there is a possibility that the service cannot be provided normally due to problems of network and hardware. With this command, users are able to know the current status of all nodes in the cluster.
 +
 +#### Input
 +
 +The command to query status of nodes is `status`, no additional parameters are needed.
 +
 +#### Output
 +
 +The output is multiple string lines, each line represents a key-value pair, where the key is node IP and the value is status of this node (`on` represents normal and `off` represents abnormal. The format of each line is `key -> value`.
 +
 +#### Example
 +
 +Assume that the IoTDB Cluster is running on 3 nodes: 192.168.130.14, 192.168.130.16 and 192.168.130.18, and number of replicas is 2.
 +
 +The Linux and MacOS system startup commands are as follows:
 +```
 +  Shell > ./bin/nodetool.sh -h 192.168.130.14 status
 +```
 +  
 +The Windows system startup commands are as follows:
 +```
 +  Shell > \bin\nodetool.bat -h 192.168.130.14 status
 +```
 +  
 +After using the command, the successful output will be as follows: 
 +	
 +```
 +192.168.130.14  ->  on
 +192.168.130.16  ->  on
 +192.168.130.18  ->  off
 +```
- The above output indicates that node 192.168.130.14 and node 192.168.130.16 are in normal state, and node 192.168.130.18 cannot provide services.
++The above output indicates that node 192.168.130.14 and node 192.168.130.16 are in normal state, and node 192.168.130.18 cannot provide services.
diff --cc iotdb/iotdb/conf/iotdb-cluster.properties
index 29df4ce,fc27ee0..2761b22
--- a/iotdb/iotdb/conf/iotdb-cluster.properties
+++ b/iotdb/iotdb/conf/iotdb-cluster.properties
@@@ -63,11 -67,12 +67,14 @@@ qp_task_timeout_ms = 500
  # number of virtual nodes
  num_of_virtual_nodes = 2
  
- # Maximum number of use inner rpc client
- max_num_of_inner_rpc_client = 500
- 
 +# Maximum number of queue length to use inner rpc client, the request which exceed to this
 +# number will be rejected.
+ # Maximum number of inner rpc client thread.
 -# When this value <= 0, use CPU core number * 10
++# When this value <= 0, use CPU core number * 5
+ concurrent_inner_rpc_client_thread = 0
+ 
+ # Maximum number of queue length of qp task which is waiting to be executed. If the num of
+ # waiting qp tasks exceed to this number, new qp task will be rejected.
  max_queue_num_of_inner_rpc_client = 500
  
  # ReadMetadataConsistencyLevel: 1  Strong consistency, 2  Weak consistency
@@@ -79,7 -84,7 +86,7 @@@ read_data_consistency_level = 
  # Maximum number of threads which execute tasks generated by client requests concurrently.
  # Each client request corresponds to a QP Task. A QP task may be divided into several sub-tasks.
  # So this value is the sum of all sub-tasks.
--# When this value <= 0, use CPU core number * 10
++# When this value <= 0, use CPU core number * 5
  concurrent_qp_sub_task_thread = 0
  
  # Batch data size read from remote query node once while reading, default value is 10000.
diff --cc iotdb/src/main/java/org/apache/iotdb/db/qp/executor/QueryProcessExecutor.java
index 9bf87c2,99476f2..04e905b
--- a/iotdb/src/main/java/org/apache/iotdb/db/qp/executor/QueryProcessExecutor.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/qp/executor/QueryProcessExecutor.java
@@@ -32,8 -33,8 +32,8 @@@ import org.apache.iotdb.db.qp.physical.
  import org.apache.iotdb.db.qp.physical.crud.GroupByPlan;
  import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
  import org.apache.iotdb.db.query.context.QueryContext;
- import org.apache.iotdb.db.query.executor.EngineQueryRouter;
 +import org.apache.iotdb.db.query.executor.AbstractQueryRouter;
+ import org.apache.iotdb.db.query.executor.EngineQueryRouter;
 -import org.apache.iotdb.db.query.executor.IEngineQueryRouter;
  import org.apache.iotdb.tsfile.exception.filter.QueryFilterOptimizationException;
  import org.apache.iotdb.tsfile.read.common.Path;
  import org.apache.iotdb.tsfile.read.expression.QueryExpression;
diff --cc iotdb/src/main/java/org/apache/iotdb/db/query/executor/EngineQueryRouter.java
index c46c6f1,03c600d..b321f45
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/executor/EngineQueryRouter.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/query/executor/EngineQueryRouter.java
@@@ -162,5 -172,29 +162,4 @@@ public class EngineQueryRouter extends 
          fillType);
      return fillEngineExecutor.execute(context);
    }
 -
 -  /**
 -   * sort intervals by start time and merge overlapping intervals.
 -   *
 -   * @param intervals time interval
 -   */
 -  private List<Pair<Long, Long>> mergeInterval(List<Pair<Long, Long>> intervals) {
 -    // sort by interval start time.
 -    intervals.sort(((o1, o2) -> (int) (o1.left - o2.left)));
 -
 -    LinkedList<Pair<Long, Long>> merged = new LinkedList<>();
 -    for (Pair<Long, Long> interval : intervals) {
 -      // if the list of merged intervals is empty or
 -      // if the current interval does not overlap with the previous, simply append it.
 -      if (merged.isEmpty() || merged.getLast().right < interval.left) {
 -        merged.add(interval);
 -      } else {
 -        // otherwise, there is overlap, so we merge the current and previous intervals.
 -        merged.getLast().right = Math.max(merged.getLast().right, interval.right);
 -      }
 -    }
 -    return merged;
 -  }
 -
--
  }