You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by lt...@apache.org on 2019/05/21 01:51:27 UTC

[incubator-iotdb] branch cluster updated (618cc39 -> 3577857)

This is an automated email from the ASF dual-hosted git repository.

lta pushed a change to branch cluster
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git.


    from 618cc39  improve robustness of query metadata
     new 167a30e  add fill feature
     new fe9937a  add aggre feature without timegenerator
     new aa3aa0d  fix a serve bug of filter serializable
     new 4abde09  add aggregation query it
     new 3020603  Increase the function of query polling
     new 3ee37c3  fix some error bugs: add select series group entity, add query for all nodes features
     new 9487dbf  add it test of aggregation function
     new d079f5e  Merge branch 'cluster' into cluster_fill_aggre_groupby
     new 6a733d9  fix a serve bug of set readmetadata level
     new 20c93d1  fix a serve bug
     new 3577857  fix a bug

The 11 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../org/apache/iotdb/cluster/entity/Server.java    |  24 +-
 .../qp/executor/ClusterQueryProcessExecutor.java   |  11 +-
 .../cluster/qp/executor/QueryMetadataExecutor.java |   1 -
 .../iotdb/cluster/query/common/FillBatchData.java  |  47 ++--
 .../executor/ClusterAggregateEngineExecutor.java   | 251 +++++++++++++++++++++
 .../executor/ClusterExecutorWithTimeGenerator.java |  35 +--
 .../ClusterExecutorWithoutTimeGenerator.java       |  21 +-
 .../query/executor/ClusterFillEngineExecutor.java  |  75 +++---
 .../cluster/query/executor/ClusterQueryRouter.java |  40 +++-
 .../query/factory/ClusterSeriesReaderFactory.java  |  28 ++-
 .../ClusterRpcSingleQueryManager.java              | 239 ++++++++------------
 ...oupEntity.java => FilterSeriesGroupEntity.java} |   4 +-
 .../IClusterRpcSingleQueryManager.java             |  11 +-
 ...oupEntity.java => SelectSeriesGroupEntity.java} |  56 ++---
 .../querynode/ClusterLocalQueryManager.java        |   2 +-
 .../querynode/ClusterLocalSingleQueryManager.java  | 222 ++++++++++++++----
 .../querynode/IClusterLocalQueryManager.java       |   4 +-
 .../querynode/IClusterLocalSingleQueryManager.java |   5 +-
 .../AbstractClusterPointReader.java                |   7 +-
 .../coordinatornode/ClusterFilterSeriesReader.java |  19 +-
 .../coordinatornode/ClusterSelectSeriesReader.java |  25 +-
 ...=> AbstractClusterSelectSeriesBatchReader.java} |   2 +-
 ...ava => ClusterFillSelectSeriesBatchReader.java} |  23 +-
 ...a => ClusterFilterSeriesBatchReaderEntity.java} |  14 +-
 ...or.java => ClusterSelectSeriesBatchReader.java} |  14 +-
 ...ClusterSelectSeriesBatchReaderByTimestamp.java} |   7 +-
 ...a => ClusterSelectSeriesBatchReaderEntity.java} |  43 +++-
 ... => IClusterFilterSeriesBatchReaderEntity.java} |   2 +-
 .../timegenerator/ClusterNodeConstructor.java      |   4 +-
 .../cluster/query/utils/ClusterRpcReaderUtils.java |  74 +++---
 .../ClusterTimeValuePairUtils.java}                |  31 +--
 .../iotdb/cluster/query/utils/ExpressionUtils.java |  12 +-
 .../query/utils/QueryPlanPartitionUtils.java       | 192 ++++++++++++----
 .../iotdb/cluster/rpc/raft/NodeAsClient.java       |   3 +-
 .../querydata/InitSeriesReaderSyncProcessor.java   |   5 +
 .../request/querydata/InitSeriesReaderRequest.java |  72 ++++--
 .../QuerySeriesDataByTimestampRequest.java         |  17 +-
 .../request/querydata/QuerySeriesDataRequest.java  |  16 +-
 .../cluster/service/TSServiceClusterImpl.java      |  14 +-
 .../org/apache/iotdb/cluster/utils/RaftUtils.java  |  87 ++++---
 .../iotdb/cluster}/integration/Constant.java       |   2 +-
 .../cluster}/integration/IoTDBAggregationIT.java   | 157 ++++++++++---
 .../integration/IoTDBAggregationLargeDataIT.java   | 102 +++++----
 .../integration/IoTDBAggregationSmallDataIT.java   | 221 ++++++------------
 .../cluster/integration/IoTDBFillQueryIT.java      |  79 ++++---
 .../integration/IoTDBMetadataFetchLocallyIT.java   |   1 -
 .../IoTDBQueryIT.java}                             |   5 +-
 .../IoTDBQueryLargeDataIT.java}                    |   6 +-
 .../query/manager/ClusterLocalManagerTest.java     | 135 +++++------
 .../query/manager/ClusterRpcManagerTest.java       |  46 +---
 .../cluster/query/utils/ExpressionUtilsTest.java   |  17 +-
 .../query/utils/QueryPlanPartitionUtilsTest.java   |  60 +++--
 .../apache/iotdb/cluster/utils/RaftUtilsTest.java  |  19 +-
 .../java/org/apache/iotdb/cluster/utils/Utils.java |   5 +
 .../iotdb/db/engine/filenode/FileNodeManager.java  |   3 +
 .../io/LocalTextModificationAccessor.java          |   1 +
 .../db/qp/executor/IQueryProcessExecutor.java      |   4 +-
 .../db/query/control/QueryResourceManager.java     |   1 +
 .../dataset/EngineDataSetWithoutTimeGenerator.java |   4 +
 .../groupby/GroupByWithValueFilterDataSet.java     |   8 +-
 .../db/query/executor/AggregateEngineExecutor.java |  87 ++++---
 .../iotdb/db/query/executor/EngineQueryRouter.java |   4 +-
 .../db/query/executor/FillEngineExecutor.java      |  11 +-
 .../db/query/executor/IFillEngineExecutor.java     |  16 +-
 .../java/org/apache/iotdb/db/query/fill/IFill.java |  14 +-
 .../org/apache/iotdb/db/query/fill/LinearFill.java |   4 +-
 .../apache/iotdb/db/query/fill/PreviousFill.java   |   6 +-
 .../timegenerator/AbstractNodeConstructor.java     |   3 -
 .../org/apache/iotdb/db/service/TSServiceImpl.java |   5 +
 .../java/org/apache/iotdb/db/service/Utils.java    |   3 +
 70 files changed, 1677 insertions(+), 1111 deletions(-)
 copy tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/series/EmptyFileSeriesReader.java => cluster/src/main/java/org/apache/iotdb/cluster/query/common/FillBatchData.java (50%)
 create mode 100644 cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterAggregateEngineExecutor.java
 copy iotdb/src/main/java/org/apache/iotdb/db/query/executor/FillEngineExecutor.java => cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterFillEngineExecutor.java (50%)
 copy cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/{FilterGroupEntity.java => FilterSeriesGroupEntity.java} (97%)
 rename cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/{FilterGroupEntity.java => SelectSeriesGroupEntity.java} (54%)
 copy cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/{AbstractClusterBatchReader.java => AbstractClusterSelectSeriesBatchReader.java} (93%)
 copy cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/{IClusterFilterSeriesBatchReader.java => ClusterFillSelectSeriesBatchReader.java} (62%)
 rename cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/{ClusterFilterSeriesBatchReader.java => ClusterFilterSeriesBatchReaderEntity.java} (89%)
 rename cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/{ClusterBatchReaderWithoutTimeGenerator.java => ClusterSelectSeriesBatchReader.java} (84%)
 rename cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/{ClusterBatchReaderByTimestamp.java => ClusterSelectSeriesBatchReaderByTimestamp.java} (90%)
 copy cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/{AbstractClusterBatchReader.java => ClusterSelectSeriesBatchReaderEntity.java} (52%)
 rename cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/{IClusterFilterSeriesBatchReader.java => IClusterFilterSeriesBatchReaderEntity.java} (95%)
 rename cluster/src/main/java/org/apache/iotdb/cluster/query/{reader/querynode/AbstractClusterBatchReader.java => utils/ClusterTimeValuePairUtils.java} (57%)
 copy {iotdb/src/test/java/org/apache/iotdb/db => cluster/src/test/java/org/apache/iotdb/cluster}/integration/Constant.java (98%)
 copy {iotdb/src/test/java/org/apache/iotdb/db => cluster/src/test/java/org/apache/iotdb/cluster}/integration/IoTDBAggregationIT.java (81%)
 copy {iotdb/src/test/java/org/apache/iotdb/db => cluster/src/test/java/org/apache/iotdb/cluster}/integration/IoTDBAggregationLargeDataIT.java (93%)
 copy {iotdb/src/test/java/org/apache/iotdb/db => cluster/src/test/java/org/apache/iotdb/cluster}/integration/IoTDBAggregationSmallDataIT.java (82%)
 copy iotdb/src/test/java/org/apache/iotdb/db/integration/IOTDBFillIT.java => cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBFillQueryIT.java (88%)
 rename cluster/src/test/java/org/apache/iotdb/cluster/{query/ClusterQueryTest.java => integration/IoTDBQueryIT.java} (99%)
 rename cluster/src/test/java/org/apache/iotdb/cluster/{query/ClusterQueryLargeDataTest.java => integration/IoTDBQueryLargeDataIT.java} (99%)
 copy tsfile/src/main/java/org/apache/iotdb/tsfile/read/query/executor/QueryExecutor.java => iotdb/src/main/java/org/apache/iotdb/db/query/executor/IFillEngineExecutor.java (67%)


[incubator-iotdb] 06/11: fix some error bugs: add select series group entity, add query for all nodes features

Posted by lt...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

lta pushed a commit to branch cluster
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git

commit 3ee37c3afc234b347c82b9ac984fb487988f8aeb
Author: lta <li...@163.com>
AuthorDate: Mon May 20 15:34:30 2019 +0800

    fix some error bugs: add select series group entity, add query for all nodes features
---
 .../executor/ClusterAggregateEngineExecutor.java   |  48 ++++----
 .../executor/ClusterExecutorWithTimeGenerator.java |  35 ++----
 .../ClusterExecutorWithoutTimeGenerator.java       |  21 +++-
 .../query/executor/ClusterFillEngineExecutor.java  |  15 ++-
 .../query/factory/ClusterSeriesReaderFactory.java  |  28 +++--
 .../ClusterRpcSingleQueryManager.java              | 137 +++++++++------------
 ...oupEntity.java => FilterSeriesGroupEntity.java} |   4 +-
 .../IClusterRpcSingleQueryManager.java             |   7 --
 ...oupEntity.java => SelectSeriesGroupEntity.java} |  55 +++------
 .../AbstractClusterPointReader.java                |   2 +
 .../coordinatornode/ClusterFilterSeriesReader.java |  17 +--
 .../coordinatornode/ClusterSelectSeriesReader.java |  25 +---
 .../timegenerator/ClusterNodeConstructor.java      |   4 +-
 .../query/utils/ClusterTimeValuePairUtils.java     |  18 +++
 .../iotdb/cluster/query/utils/ExpressionUtils.java |  12 +-
 .../query/utils/QueryPlanPartitionUtils.java       |  65 +++++-----
 .../query/manager/ClusterRpcManagerTest.java       |  46 ++-----
 .../query/utils/QueryPlanPartitionUtilsTest.java   |  56 +++++----
 18 files changed, 268 insertions(+), 327 deletions(-)

diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterAggregateEngineExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterAggregateEngineExecutor.java
index 51113c9..b34afa1 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterAggregateEngineExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterAggregateEngineExecutor.java
@@ -20,6 +20,7 @@ package org.apache.iotdb.cluster.query.executor;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
@@ -29,9 +30,11 @@ import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.cluster.exception.RaftConnectionException;
 import org.apache.iotdb.cluster.query.factory.ClusterSeriesReaderFactory;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
-import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterGroupEntity;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterSeriesGroupEntity;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.SelectSeriesGroupEntity;
 import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
 import org.apache.iotdb.cluster.query.timegenerator.ClusterTimeGenerator;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
 import org.apache.iotdb.db.exception.FileNodeManagerException;
 import org.apache.iotdb.db.exception.PathErrorException;
@@ -79,18 +82,25 @@ public class ClusterAggregateEngineExecutor extends AggregateEngineExecutor {
   public QueryDataSet executeWithoutTimeGenerator(QueryContext context)
       throws FileNodeManagerException, IOException, PathErrorException, ProcessorException {
     Filter timeFilter = expression != null ? ((GlobalTimeExpression) expression).getFilter() : null;
-    Map<Path, ClusterSelectSeriesReader> selectPathReaders = queryManager.getSelectSeriesReaders();
+    Map<String, SelectSeriesGroupEntity> selectSeriesGroupEntityMap = queryManager
+        .getSelectSeriesGroupEntityMap();
 
     List<Path> paths = new ArrayList<>();
     List<IPointReader> readers = new ArrayList<>();
     List<TSDataType> dataTypes = new ArrayList<>();
+    //Mark filter series reader index group by group id
+    Map<String, Integer> selectSeriesReaderIndex = new HashMap<>();
     for (int i = 0; i < selectedSeries.size(); i++) {
       Path path = selectedSeries.get(i);
+      String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
 
-      if (selectPathReaders.containsKey(path)) {
-        ClusterSelectSeriesReader reader = selectPathReaders.get(path);
+      if (selectSeriesGroupEntityMap.containsKey(groupId)) {
+        int index = selectSeriesReaderIndex.getOrDefault(groupId, 0);
+        ClusterSelectSeriesReader reader = selectSeriesGroupEntityMap.get(groupId)
+            .getSelectSeriesReaders().get(index);
         readers.add(reader);
         dataTypes.add(reader.getDataType());
+        selectSeriesReaderIndex.put(groupId, index + 1);
       } else {
         paths.add(path);
         // construct AggregateFunction
@@ -140,15 +150,19 @@ public class ClusterAggregateEngineExecutor extends AggregateEngineExecutor {
 
     /** add query token for query series which can handle locally **/
     List<Path> localQuerySeries = new ArrayList<>(selectedSeries);
-    Set<Path> remoteQuerySeries = queryManager.getSelectSeriesReaders().keySet();
+    Set<Path> remoteQuerySeries = new HashSet<>();
+    queryManager.getSelectSeriesGroupEntityMap().values().forEach(
+        selectSeriesGroupEntity -> selectSeriesGroupEntity.getSelectPaths()
+            .forEach(path -> remoteQuerySeries.add(path)));
     localQuerySeries.removeAll(remoteQuerySeries);
     QueryResourceManager.getInstance()
         .beginQueryOfGivenQueryPaths(context.getJobId(), localQuerySeries);
 
     /** add query token for filter series which can handle locally **/
     Set<String> deviceIdSet = new HashSet<>();
-    for (FilterGroupEntity filterGroupEntity : queryManager.getFilterGroupEntityMap().values()) {
-      List<Path> remoteFilterSeries = filterGroupEntity.getFilterPaths();
+    for (FilterSeriesGroupEntity filterSeriesGroupEntity : queryManager
+        .getFilterSeriesGroupEntityMap().values()) {
+      List<Path> remoteFilterSeries = filterSeriesGroupEntity.getFilterPaths();
       remoteFilterSeries.forEach(seriesPath -> deviceIdSet.add(seriesPath.getDevice()));
     }
     QueryResourceManager.getInstance()
@@ -156,32 +170,18 @@ public class ClusterAggregateEngineExecutor extends AggregateEngineExecutor {
 
     ClusterTimeGenerator timestampGenerator;
     List<EngineReaderByTimeStamp> readersOfSelectedSeries;
+    // origin data type of select paths
+    List<TSDataType> originDataTypes = new ArrayList<>();
     try {
       timestampGenerator = new ClusterTimeGenerator(expression, context,
           queryManager);
       readersOfSelectedSeries = ClusterSeriesReaderFactory
           .createReadersByTimestampOfSelectedPaths(selectedSeries, context,
-              queryManager);
+              queryManager, originDataTypes);
     } catch (IOException ex) {
       throw new FileNodeManagerException(ex);
     }
 
-    /** Get data type of select paths **/
-    List<TSDataType> originDataTypes = new ArrayList<>();
-    Map<Path, ClusterSelectSeriesReader> selectSeriesReaders = queryManager
-        .getSelectSeriesReaders();
-    for (Path path : selectedSeries) {
-      try {
-        if (selectSeriesReaders.containsKey(path)) {
-          originDataTypes.add(selectSeriesReaders.get(path).getDataType());
-        } else {
-          originDataTypes.add(MManager.getInstance().getSeriesType(path.getFullPath()));
-        }
-      } catch (PathErrorException e) {
-        throw new FileNodeManagerException(e);
-      }
-    }
-
     List<AggregateFunction> aggregateFunctions = new ArrayList<>();
     for (int i = 0; i < selectedSeries.size(); i++) {
       TSDataType type = originDataTypes.get(i);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithTimeGenerator.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithTimeGenerator.java
index fed8c0d..fe2511a 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithTimeGenerator.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithTimeGenerator.java
@@ -27,7 +27,7 @@ import java.util.Set;
 import org.apache.iotdb.cluster.query.dataset.ClusterDataSetWithTimeGenerator;
 import org.apache.iotdb.cluster.query.factory.ClusterSeriesReaderFactory;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
-import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterGroupEntity;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterSeriesGroupEntity;
 import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
 import org.apache.iotdb.cluster.query.timegenerator.ClusterTimeGenerator;
 import org.apache.iotdb.db.exception.FileNodeManagerException;
@@ -71,15 +71,19 @@ public class ClusterExecutorWithTimeGenerator {
 
     /** add query token for query series which can handle locally **/
     List<Path> localQuerySeries = new ArrayList<>(queryExpression.getSelectedSeries());
-    Set<Path> remoteQuerySeries = queryManager.getSelectSeriesReaders().keySet();
+    Set<Path> remoteQuerySeries = new HashSet<>();
+    queryManager.getSelectSeriesGroupEntityMap().values().forEach(
+        selectSeriesGroupEntity -> selectSeriesGroupEntity.getSelectPaths()
+            .forEach(path -> remoteQuerySeries.add(path)));
     localQuerySeries.removeAll(remoteQuerySeries);
     QueryResourceManager.getInstance()
         .beginQueryOfGivenQueryPaths(context.getJobId(), localQuerySeries);
 
     /** add query token for filter series which can handle locally **/
     Set<String> deviceIdSet = new HashSet<>();
-    for (FilterGroupEntity filterGroupEntity : queryManager.getFilterGroupEntityMap().values()) {
-      List<Path> remoteFilterSeries = filterGroupEntity.getFilterPaths();
+    for (FilterSeriesGroupEntity filterSeriesGroupEntity : queryManager
+        .getFilterSeriesGroupEntityMap().values()) {
+      List<Path> remoteFilterSeries = filterSeriesGroupEntity.getFilterPaths();
       remoteFilterSeries.forEach(seriesPath -> deviceIdSet.add(seriesPath.getDevice()));
     }
     QueryResourceManager.getInstance()
@@ -88,33 +92,18 @@ public class ClusterExecutorWithTimeGenerator {
 
     ClusterTimeGenerator timestampGenerator;
     List<EngineReaderByTimeStamp> readersOfSelectedSeries;
+    /** Get data type of select paths **/
+    List<TSDataType> dataTypes = new ArrayList<>();
     try {
       timestampGenerator = new ClusterTimeGenerator(queryExpression.getExpression(), context,
           queryManager);
       readersOfSelectedSeries = ClusterSeriesReaderFactory
           .createReadersByTimestampOfSelectedPaths(queryExpression.getSelectedSeries(), context,
-              queryManager);
-    } catch (IOException ex) {
+              queryManager, dataTypes);
+    } catch (IOException | PathErrorException ex) {
       throw new FileNodeManagerException(ex);
     }
 
-    /** Get data type of select paths **/
-    List<TSDataType> dataTypes = new ArrayList<>();
-    Map<Path, ClusterSelectSeriesReader> selectSeriesReaders = queryManager
-        .getSelectSeriesReaders();
-    for (Path path : queryExpression.getSelectedSeries()) {
-      try {
-        if (selectSeriesReaders.containsKey(path)) {
-          dataTypes.add(selectSeriesReaders.get(path).getDataType());
-        } else {
-          dataTypes.add(MManager.getInstance().getSeriesType(path.getFullPath()));
-        }
-      } catch (PathErrorException e) {
-        throw new FileNodeManagerException(e);
-      }
-
-    }
-
     EngineReaderByTimeStamp[] readersOfSelectedSeriesArray = new EngineReaderByTimeStamp[readersOfSelectedSeries
         .size()];
     int index = 0;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithoutTimeGenerator.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithoutTimeGenerator.java
index 8f42c9f..95e5f1a 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithoutTimeGenerator.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterExecutorWithoutTimeGenerator.java
@@ -20,11 +20,15 @@ package org.apache.iotdb.cluster.query.executor;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.SelectSeriesGroupEntity;
 import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.db.exception.FileNodeManagerException;
+import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.control.QueryResourceManager;
 import org.apache.iotdb.db.query.dataset.EngineDataSetWithoutTimeGenerator;
@@ -62,7 +66,7 @@ public class ClusterExecutorWithoutTimeGenerator extends AbstractExecutorWithout
    * Execute query without filter or with only global time filter.
    */
   public QueryDataSet execute(QueryContext context)
-      throws FileNodeManagerException {
+      throws FileNodeManagerException, PathErrorException {
 
     Filter timeFilter = null;
     if (queryExpression.getExpression() != null) {
@@ -72,15 +76,22 @@ public class ClusterExecutorWithoutTimeGenerator extends AbstractExecutorWithout
     List<IPointReader> readersOfSelectedSeries = new ArrayList<>();
     List<TSDataType> dataTypes = new ArrayList<>();
 
-    Map<Path, ClusterSelectSeriesReader> selectPathReaders = queryManager.getSelectSeriesReaders();
+    Map<String, SelectSeriesGroupEntity> selectSeriesGroupEntityMap = queryManager
+        .getSelectSeriesGroupEntityMap();
     List<Path> paths = new ArrayList<>();
+    //Mark filter series reader index group by group id
+    Map<String, Integer> selectSeriesReaderIndex = new HashMap<>();
     for (Path path : queryExpression.getSelectedSeries()) {
 
-      if (selectPathReaders.containsKey(path)) {
-        ClusterSelectSeriesReader reader = selectPathReaders.get(path);
+      String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
+
+      if (selectSeriesGroupEntityMap.containsKey(groupId)) {
+        int index = selectSeriesReaderIndex.getOrDefault(groupId, 0);
+        ClusterSelectSeriesReader reader = selectSeriesGroupEntityMap.get(groupId)
+            .getSelectSeriesReaders().get(index);
         readersOfSelectedSeries.add(reader);
         dataTypes.add(reader.getDataType());
-
+        selectSeriesReaderIndex.put(groupId, index + 1);
       } else {
         IPointReader reader = createSeriesReader(context, path, dataTypes, timeFilter);
         readersOfSelectedSeries.add(reader);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterFillEngineExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterFillEngineExecutor.java
index 771637e..608a479 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterFillEngineExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterFillEngineExecutor.java
@@ -20,10 +20,13 @@ package org.apache.iotdb.cluster.query.executor;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.SelectSeriesGroupEntity;
 import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
 import org.apache.iotdb.db.exception.FileNodeManagerException;
 import org.apache.iotdb.db.exception.PathErrorException;
@@ -58,16 +61,22 @@ public class ClusterFillEngineExecutor implements IFillEngineExecutor {
   @Override
   public QueryDataSet execute(QueryContext context)
       throws FileNodeManagerException, PathErrorException, IOException {
-    Map<Path, ClusterSelectSeriesReader> selectPathReaders = queryManager.getSelectSeriesReaders();
     List<Path> paths = new ArrayList<>();
     List<IFill> fillList = new ArrayList<>();
     List<TSDataType> dataTypeList = new ArrayList<>();
     List<IPointReader> readers = new ArrayList<>();
+    Map<String, SelectSeriesGroupEntity> selectSeriesEntityMap = queryManager.getSelectSeriesGroupEntityMap();
+    //Mark filter series reader index group by group id
+    Map<String, Integer> selectSeriesReaderIndex = new HashMap<>();
     for (Path path : selectedSeries) {
-      if (selectPathReaders.containsKey(path)) {
-        ClusterSelectSeriesReader reader = selectPathReaders.get(path);
+      String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
+
+      if (selectSeriesEntityMap.containsKey(groupId)) {
+        int index = selectSeriesReaderIndex.getOrDefault(groupId, 0);
+        ClusterSelectSeriesReader reader = selectSeriesEntityMap.get(groupId).getSelectSeriesReaders().get(index);
         readers.add(reader);
         dataTypeList.add(reader.getDataType());
+        selectSeriesReaderIndex.put(groupId, index + 1);
       } else {
         QueryDataSource queryDataSource = QueryResourceManager.getInstance()
             .getQueryDataSource(path, context);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/factory/ClusterSeriesReaderFactory.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/factory/ClusterSeriesReaderFactory.java
index d65ed58..a9ee032 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/factory/ClusterSeriesReaderFactory.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/factory/ClusterSeriesReaderFactory.java
@@ -20,18 +20,24 @@ package org.apache.iotdb.cluster.query.factory;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.SelectSeriesGroupEntity;
 import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
 import org.apache.iotdb.db.exception.FileNodeManagerException;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.metadata.MManager;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.control.QueryResourceManager;
 import org.apache.iotdb.db.query.factory.SeriesReaderFactory;
 import org.apache.iotdb.db.query.reader.merge.EngineReaderByTimeStamp;
 import org.apache.iotdb.db.query.reader.merge.PriorityMergeReaderByTimestamp;
 import org.apache.iotdb.db.query.reader.sequence.SequenceDataReaderByTimestamp;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.Path;
 
 /**
@@ -43,27 +49,35 @@ public class ClusterSeriesReaderFactory {
   }
 
   /**
-   * Construct ReaderByTimestamp , include sequential data and unsequential data.
+   * Construct ReaderByTimestamp , include sequential data and unsequential data. And get all series dataType.
    *
    * @param paths selected series path
    * @param context query context
    * @return the list of EngineReaderByTimeStamp
    */
   public static List<EngineReaderByTimeStamp> createReadersByTimestampOfSelectedPaths(
-      List<Path> paths, QueryContext context, ClusterRpcSingleQueryManager queryManager)
-      throws IOException, FileNodeManagerException {
+      List<Path> paths, QueryContext context, ClusterRpcSingleQueryManager queryManager, List<TSDataType> dataTypes)
+      throws IOException, FileNodeManagerException, PathErrorException {
 
-    Map<Path, ClusterSelectSeriesReader> selectSeriesReaders = queryManager.getSelectSeriesReaders();
+    Map<String, SelectSeriesGroupEntity> selectSeriesEntityMap = queryManager
+        .getSelectSeriesGroupEntityMap();
     List<EngineReaderByTimeStamp> readersOfSelectedSeries = new ArrayList<>();
+    //Mark filter series reader index group by group id
+    Map<String, Integer> selectSeriesReaderIndex = new HashMap<>();
 
     for (Path path : paths) {
-
-      if (selectSeriesReaders.containsKey(path)) {
-        readersOfSelectedSeries.add(selectSeriesReaders.get(path));
+      String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
+      if (selectSeriesEntityMap.containsKey(groupId)) {
+        int index = selectSeriesReaderIndex.getOrDefault(groupId, 0);
+        ClusterSelectSeriesReader reader = selectSeriesEntityMap.get(groupId).getSelectSeriesReaders().get(index);
+        readersOfSelectedSeries.add(reader);
+        dataTypes.add(reader.getDataType());
+        selectSeriesReaderIndex.put(groupId, index + 1);
       } else {
         /** can handle series query locally **/
         EngineReaderByTimeStamp readerByTimeStamp = createReaderByTimeStamp(path, context);
         readersOfSelectedSeries.add(readerByTimeStamp);
+        dataTypes.add(MManager.getInstance().getSeriesType(path.getFullPath()));
       }
     }
     return readersOfSelectedSeries;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java
index af4db31..905ce1b 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java
@@ -86,27 +86,15 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
 
   // select path resource
   /**
-   * Query plans of select paths which are divided from queryPlan group by group id, it contains all
-   * group id ,including local data group if it involves.
+   * Select series group entity group by data group, key is group id(only contain remote group id)
    */
-  private Map<String, QueryPlan> selectPathPlans = new HashMap<>();
-
-  /**
-   * Key is group id (only contains remote group id), value is all select series in group id.
-   */
-  private Map<String, List<Path>> selectSeriesByGroupId = new HashMap<>();
-
-  /**
-   * Series reader of select paths (only contains remote series), key is series path , value is
-   * reader
-   */
-  private Map<Path, ClusterSelectSeriesReader> selectSeriesReaders = new HashMap<>();
+  private Map<String, SelectSeriesGroupEntity> selectSeriesGroupEntityMap = new HashMap<>();
 
   // filter path resource
   /**
-   * Filter group entity group by data group, key is group id(only contain remote group id)
+   * Filter series group entity group by data group, key is group id(only contain remote group id)
    */
-  private Map<String, FilterGroupEntity> filterGroupEntityMap = new HashMap<>();
+  private Map<String, FilterSeriesGroupEntity> filterSeriesGroupEntityMap = new HashMap<>();
 
   private static final ClusterConfig CLUSTER_CONF = ClusterDescriptor.getInstance().getConfig();
 
@@ -140,17 +128,18 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
   private void initSeriesReader(int readDataConsistencyLevel)
       throws RaftConnectionException, IOException {
     // Init all series with data group of select series,if filter series has the same data group, init them together.
-    for (Entry<String, QueryPlan> entry : selectPathPlans.entrySet()) {
+    for (Entry<String, SelectSeriesGroupEntity> entry : selectSeriesGroupEntityMap.entrySet()) {
       String groupId = entry.getKey();
-      QueryPlan queryPlan = entry.getValue();
+      SelectSeriesGroupEntity selectEntity = entry.getValue();
+      QueryPlan queryPlan = selectEntity.getQueryPlan();
       if (!QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
         Map<PathType, QueryPlan> allQueryPlan = new EnumMap<>(PathType.class);
         allQueryPlan.put(PathType.SELECT_PATH, queryPlan);
         List<Filter> filterList = new ArrayList<>();
-        if (filterGroupEntityMap.containsKey(groupId)) {
-          FilterGroupEntity filterGroupEntity = filterGroupEntityMap.get(groupId);
-          allQueryPlan.put(PathType.FILTER_PATH, filterGroupEntity.getQueryPlan());
-          filterList = filterGroupEntity.getFilters();
+        if (filterSeriesGroupEntityMap.containsKey(groupId)) {
+          FilterSeriesGroupEntity filterSeriesGroupEntity = filterSeriesGroupEntityMap.get(groupId);
+          allQueryPlan.put(PathType.FILTER_PATH, filterSeriesGroupEntity.getQueryPlan());
+          filterList = filterSeriesGroupEntity.getFilters();
         }
         /** create request **/
         BasicRequest request = InitSeriesReaderRequest
@@ -161,27 +150,29 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
         handleInitReaderResponse(groupId, allQueryPlan, response);
       } else {
         dataGroupUsage.add(groupId);
-        selectSeriesByGroupId.remove(groupId);
-        if (filterGroupEntityMap.containsKey(groupId)) {
-          filterGroupEntityMap.remove(groupId);
-        }
+        selectSeriesGroupEntityMap.remove(groupId);
+        filterSeriesGroupEntityMap.remove(groupId);
       }
     }
 
     //Init series reader with data groups of filter series, which don't exist in data groups list of select series.
-    for (Entry<String, FilterGroupEntity> entry : filterGroupEntityMap.entrySet()) {
+    for (Entry<String, FilterSeriesGroupEntity> entry : filterSeriesGroupEntityMap.entrySet()) {
       String groupId = entry.getKey();
-      if (!selectPathPlans.containsKey(groupId)) {
+      if (!selectSeriesGroupEntityMap.containsKey(groupId) && !QPExecutorUtils
+          .canHandleQueryByGroupId(groupId)) {
         Map<PathType, QueryPlan> allQueryPlan = new EnumMap<>(PathType.class);
-        FilterGroupEntity filterGroupEntity = filterGroupEntityMap.get(groupId);
-        allQueryPlan.put(PathType.FILTER_PATH, filterGroupEntity.getQueryPlan());
-        List<Filter> filterList = filterGroupEntity.getFilters();
+        FilterSeriesGroupEntity filterSeriesGroupEntity = filterSeriesGroupEntityMap.get(groupId);
+        allQueryPlan.put(PathType.FILTER_PATH, filterSeriesGroupEntity.getQueryPlan());
+        List<Filter> filterList = filterSeriesGroupEntity.getFilters();
         BasicRequest request = InitSeriesReaderRequest
             .createInitialQueryRequest(groupId, taskId, readDataConsistencyLevel,
                 allQueryPlan, filterList);
         InitSeriesReaderResponse response = (InitSeriesReaderResponse) ClusterRpcReaderUtils
             .createClusterSeriesReader(groupId, request, this);
         handleInitReaderResponse(groupId, allQueryPlan, response);
+      } else if (!selectSeriesGroupEntityMap.containsKey(groupId)) {
+        dataGroupUsage.add(groupId);
+        filterSeriesGroupEntityMap.remove(groupId);
       }
     }
   }
@@ -201,7 +192,7 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
         TSDataType dataType = seriesType.get(i);
         ClusterSelectSeriesReader seriesReader = new ClusterSelectSeriesReader(groupId, seriesPath,
             dataType, this);
-        selectSeriesReaders.put(seriesPath, seriesReader);
+        selectSeriesGroupEntityMap.get(groupId).addSelectSeriesReader(seriesReader);
       }
     }
     if (allQueryPlan.containsKey(PathType.FILTER_PATH)) {
@@ -213,10 +204,7 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
         TSDataType dataType = seriesType.get(i);
         ClusterFilterSeriesReader seriesReader = new ClusterFilterSeriesReader(groupId, seriesPath,
             dataType, this);
-        if (!filterGroupEntityMap.containsKey(groupId)) {
-          filterGroupEntityMap.put(groupId, new FilterGroupEntity(groupId));
-        }
-        filterGroupEntityMap.get(groupId).addFilterSeriesReader(seriesReader);
+        filterSeriesGroupEntityMap.get(groupId).addFilterSeriesReader(seriesReader);
       }
     }
   }
@@ -224,16 +212,15 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
   @Override
   public void fetchBatchDataForSelectPaths(String groupId) throws RaftConnectionException {
     List<String> fetchDataSeries = new ArrayList<>();
-    Map<String, List<Path>> seriesByGroupId;
-    Map<Path, ClusterSelectSeriesReader> seriesReaders;
-    seriesByGroupId = selectSeriesByGroupId;
-    seriesReaders = selectSeriesReaders;
-    if (seriesByGroupId.containsKey(groupId)) {
-      List<Path> allFilterSeries = seriesByGroupId.get(groupId);
-      for (Path series : allFilterSeries) {
-        if (seriesReaders.get(series).enableFetchData()) {
-          fetchDataSeries.add(series.getFullPath());
-        }
+    List<Integer> selectSeriesIndexs = new ArrayList<>();
+    List<Path> selectSeries = selectSeriesGroupEntityMap.get(groupId).getSelectPaths();
+    List<ClusterSelectSeriesReader> seriesReaders = selectSeriesGroupEntityMap.get(groupId)
+        .getSelectSeriesReaders();
+    for (int i = 0; i < selectSeries.size(); i++) {
+      Path series = selectSeries.get(i);
+      if (seriesReaders.get(i).enableFetchData()) {
+        fetchDataSeries.add(series.getFullPath());
+        selectSeriesIndexs.add(i);
       }
     }
     BasicRequest request = QuerySeriesDataRequest
@@ -241,7 +228,8 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
             queryRounds++);
     QuerySeriesDataResponse response = (QuerySeriesDataResponse) ClusterRpcReaderUtils
         .handleQueryRequest(request, queryNodes.get(groupId), 0);
-    handleFetchDataResponseForSelectPaths(fetchDataSeries, response);
+
+    handleFetchDataResponseForSelectPaths(groupId, selectSeriesIndexs, response);
   }
 
   @Override
@@ -258,42 +246,45 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
   @Override
   public void fetchBatchDataByTimestampForAllSelectPaths(List<Long> batchTimestamp)
       throws RaftConnectionException {
-    for (Entry<String, List<Path>> entry : selectSeriesByGroupId.entrySet()) {
+    for (Entry<String, SelectSeriesGroupEntity> entry : selectSeriesGroupEntityMap.entrySet()) {
       String groupId = entry.getKey();
       List<String> fetchDataFilterSeries = new ArrayList<>();
-      entry.getValue().forEach(path -> fetchDataFilterSeries.add(path.getFullPath()));
+      entry.getValue().getSelectPaths()
+          .forEach(path -> fetchDataFilterSeries.add(path.getFullPath()));
       BasicRequest request = QuerySeriesDataByTimestampRequest
           .createRequest(groupId, queryRounds++, taskId, batchTimestamp, fetchDataFilterSeries);
       QuerySeriesDataByTimestampResponse response = (QuerySeriesDataByTimestampResponse) ClusterRpcReaderUtils
           .handleQueryRequest(request, queryNodes.get(groupId), 0);
-      handleFetchDataByTimestampResponseForSelectPaths(fetchDataFilterSeries, response);
+      handleFetchDataByTimestampResponseForSelectPaths(groupId, fetchDataFilterSeries, response);
     }
   }
 
   /**
    * Handle response of fetching data, and add batch data to corresponding reader.
    */
-  private void handleFetchDataByTimestampResponseForSelectPaths(List<String> fetchDataSeries,
+  private void handleFetchDataByTimestampResponseForSelectPaths(String groupId,
+      List<String> fetchDataSeries,
       BasicQueryDataResponse response) {
     List<BatchData> batchDataList = response.getSeriesBatchData();
+    List<ClusterSelectSeriesReader> selectSeriesReaders = selectSeriesGroupEntityMap.get(groupId)
+        .getSelectSeriesReaders();
     for (int i = 0; i < fetchDataSeries.size(); i++) {
-      String series = fetchDataSeries.get(i);
       BatchData batchData = batchDataList.get(i);
-      selectSeriesReaders.get(new Path(series))
-          .addBatchData(batchData, true);
+      selectSeriesReaders.get(i).addBatchData(batchData, true);
     }
   }
 
   /**
    * Handle response of fetching data, and add batch data to corresponding reader.
    */
-  private void handleFetchDataResponseForSelectPaths(List<String> fetchDataSeries,
-      BasicQueryDataResponse response) {
+  private void handleFetchDataResponseForSelectPaths(String groupId,
+      List<Integer> selectSeriesIndexs, BasicQueryDataResponse response) {
     List<BatchData> batchDataList = response.getSeriesBatchData();
-    for (int i = 0; i < fetchDataSeries.size(); i++) {
-      String series = fetchDataSeries.get(i);
+    List<ClusterSelectSeriesReader> selectSeriesReaders = selectSeriesGroupEntityMap.get(groupId)
+        .getSelectSeriesReaders();
+    for (int i = 0; i < selectSeriesIndexs.size(); i++) {
       BatchData batchData = batchDataList.get(i);
-      selectSeriesReaders.get(new Path(series))
+      selectSeriesReaders.get(selectSeriesIndexs.get(i))
           .addBatchData(batchData, batchData.length() < CLUSTER_CONF.getBatchReadSize());
     }
   }
@@ -303,10 +294,11 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
    */
   private void handleFetchDataResponseForFilterPaths(String groupId,
       QuerySeriesDataResponse response) {
-    FilterGroupEntity filterGroupEntity = filterGroupEntityMap.get(groupId);
-    List<Path> fetchDataSeries = filterGroupEntity.getFilterPaths();
+    FilterSeriesGroupEntity filterSeriesGroupEntity = filterSeriesGroupEntityMap.get(groupId);
+    List<Path> fetchDataSeries = filterSeriesGroupEntity.getFilterPaths();
     List<BatchData> batchDataList = response.getSeriesBatchData();
-    List<ClusterFilterSeriesReader> filterReaders = filterGroupEntity.getFilterSeriesReaders();
+    List<ClusterFilterSeriesReader> filterReaders = filterSeriesGroupEntity
+        .getFilterSeriesReaders();
     boolean remoteDataFinish = true;
     for (int i = 0; i < batchDataList.size(); i++) {
       if (batchDataList.get(i).length() != 0) {
@@ -323,11 +315,6 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
   }
 
   @Override
-  public QueryPlan getSelectPathQueryPlan(String fullPath) {
-    return selectPathPlans.get(fullPath);
-  }
-
-  @Override
   public void setDataGroupReaderNode(String groupId, PeerId readerNode) {
     queryNodes.put(groupId, readerNode);
   }
@@ -375,19 +362,11 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
     this.queryNodes.put(groupID, peerId);
   }
 
-  public Map<String, QueryPlan> getSelectPathPlans() {
-    return selectPathPlans;
-  }
-
-  public Map<String, List<Path>> getSelectSeriesByGroupId() {
-    return selectSeriesByGroupId;
-  }
-
-  public Map<Path, ClusterSelectSeriesReader> getSelectSeriesReaders() {
-    return selectSeriesReaders;
+  public Map<String, SelectSeriesGroupEntity> getSelectSeriesGroupEntityMap() {
+    return selectSeriesGroupEntityMap;
   }
 
-  public Map<String, FilterGroupEntity> getFilterGroupEntityMap() {
-    return filterGroupEntityMap;
+  public Map<String, FilterSeriesGroupEntity> getFilterSeriesGroupEntityMap() {
+    return filterSeriesGroupEntityMap;
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterGroupEntity.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterSeriesGroupEntity.java
similarity index 97%
copy from cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterGroupEntity.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterSeriesGroupEntity.java
index 326af11..19407a0 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterGroupEntity.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterSeriesGroupEntity.java
@@ -28,7 +28,7 @@ import org.apache.iotdb.tsfile.read.filter.basic.Filter;
 /**
  * Filter entities of a data group, concluding QueryPlan, filters, all filter paths and filter readers
  */
-public class FilterGroupEntity {
+public class FilterSeriesGroupEntity {
 
   /**
    * Group id
@@ -62,7 +62,7 @@ public class FilterGroupEntity {
    */
   private List<ClusterFilterSeriesReader> filterSeriesReaders;
 
-  public FilterGroupEntity(String groupId) {
+  public FilterSeriesGroupEntity(String groupId) {
     this.groupId = groupId;
     this.filterPaths = new ArrayList<>();
     this.filters = new ArrayList<>();
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcSingleQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcSingleQueryManager.java
index c4aec9c..d6ca0d7 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcSingleQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcSingleQueryManager.java
@@ -69,13 +69,6 @@ public interface IClusterRpcSingleQueryManager {
       throws RaftConnectionException;
 
   /**
-   * Get query plan of select path
-   *
-   * @param fullPath Timeseries full path in select paths
-   */
-  QueryPlan getSelectPathQueryPlan(String fullPath);
-
-  /**
    * Set reader node of a data group
    *
    * @param groupId data group id
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterGroupEntity.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/SelectSeriesGroupEntity.java
similarity index 55%
rename from cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterGroupEntity.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/SelectSeriesGroupEntity.java
index 326af11..9f35117 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/FilterGroupEntity.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/SelectSeriesGroupEntity.java
@@ -21,15 +21,14 @@ package org.apache.iotdb.cluster.query.manager.coordinatornode;
 import java.util.ArrayList;
 import java.util.List;
 import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterFilterSeriesReader;
+import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
 import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
 import org.apache.iotdb.tsfile.read.common.Path;
-import org.apache.iotdb.tsfile.read.filter.basic.Filter;
 
 /**
- * Filter entities of a data group, concluding QueryPlan, filters, all filter paths and filter readers
+ * Select series entity entities of a data group, concluding QueryPlan, all select paths and series readers
  */
-public class FilterGroupEntity {
-
+public class SelectSeriesGroupEntity {
   /**
    * Group id
    */
@@ -41,32 +40,24 @@ public class FilterGroupEntity {
   private QueryPlan queryPlan;
 
   /**
-   * Filters of filter path.
-   */
-  private List<Filter> filters;
-
-  /**
    *
-   * all filter series
+   * all select series
    * <p>
-   * Note: It may contain multiple series in a complicated tree
-   * for example: select * from root.vehicle where d0.s0 > 10 and d0.s0 < 101 or time = 12,
-   * filter tree: <code>[[[[root.vehicle.d0.s0:time == 12] || [root.vehicle.d0.s1:time == 12]] || [root.vehicle.d1.s2:time == 12]] || [root.vehicle.d1.s3:time == 12]]</code>
+   * Note: It may contain multiple series in a query
+   * for example: select sum(s0), max(s0) from root.vehicle.d0 where s0 > 10
    * </p>
    */
-  private List<Path> filterPaths;
-
+  private List<Path> selectPaths;
 
   /**
    * Series reader of filter paths (only contains remote series)
    */
-  private List<ClusterFilterSeriesReader> filterSeriesReaders;
+  private List<ClusterSelectSeriesReader> selectSeriesReaders;
 
-  public FilterGroupEntity(String groupId) {
+  public SelectSeriesGroupEntity(String groupId) {
     this.groupId = groupId;
-    this.filterPaths = new ArrayList<>();
-    this.filters = new ArrayList<>();
-    this.filterSeriesReaders = new ArrayList<>();
+    this.selectPaths = new ArrayList<>();
+    this.selectSeriesReaders = new ArrayList<>();
   }
 
   public String getGroupId() {
@@ -85,27 +76,19 @@ public class FilterGroupEntity {
     this.queryPlan = queryPlan;
   }
 
-  public List<Filter> getFilters() {
-    return filters;
-  }
-
-  public void addFilter(Filter filter) {
-    this.filters.add(filter);
-  }
-
-  public List<Path> getFilterPaths() {
-    return filterPaths;
+  public List<Path> getSelectPaths() {
+    return selectPaths;
   }
 
-  public void addFilterPaths(Path filterPath) {
-    this.filterPaths.add(filterPath);
+  public void addSelectPaths(Path selectPath) {
+    this.selectPaths.add(selectPath);
   }
 
-  public List<ClusterFilterSeriesReader> getFilterSeriesReaders() {
-    return filterSeriesReaders;
+  public List<ClusterSelectSeriesReader> getSelectSeriesReaders() {
+    return selectSeriesReaders;
   }
 
-  public void addFilterSeriesReader(ClusterFilterSeriesReader filterSeriesReader) {
-    this.filterSeriesReaders.add(filterSeriesReader);
+  public void addSelectSeriesReader(ClusterSelectSeriesReader selectSeriesReader) {
+    this.selectSeriesReaders.add(selectSeriesReader);
   }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/AbstractClusterPointReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/AbstractClusterPointReader.java
index 3f73160..c0012a1 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/AbstractClusterPointReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/AbstractClusterPointReader.java
@@ -71,4 +71,6 @@ public abstract class AbstractClusterPointReader implements IPointReader {
     }
     return null;
   }
+
+  public abstract void addBatchData(BatchData batchData, boolean remoteDataFinish);
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterFilterSeriesReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterFilterSeriesReader.java
index 805d3af..0c0287e 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterFilterSeriesReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterFilterSeriesReader.java
@@ -95,14 +95,6 @@ public class ClusterFilterSeriesReader extends AbstractClusterPointReader {
     //Do nothing
   }
 
-  public Path getSeriesPath() {
-    return seriesPath;
-  }
-
-  public void setSeriesPath(Path seriesPath) {
-    this.seriesPath = seriesPath;
-  }
-
   public TSDataType getDataType() {
     return dataType;
   }
@@ -111,14 +103,7 @@ public class ClusterFilterSeriesReader extends AbstractClusterPointReader {
     this.dataType = dataType;
   }
 
-  public BatchData getCurrentBatchData() {
-    return currentBatchData;
-  }
-
-  public void setCurrentBatchData(BatchData currentBatchData) {
-    this.currentBatchData = currentBatchData;
-  }
-
+  @Override
   public void addBatchData(BatchData batchData, boolean remoteDataFinish) {
     batchDataList.addLast(batchData);
     this.remoteDataFinish = remoteDataFinish;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterSelectSeriesReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterSelectSeriesReader.java
index 0a507d5..c640b53 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterSelectSeriesReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterSelectSeriesReader.java
@@ -119,14 +119,6 @@ public class ClusterSelectSeriesReader extends AbstractClusterPointReader implem
     batchDataList = null;
   }
 
-  public Path getSeriesPath() {
-    return seriesPath;
-  }
-
-  public void setSeriesPath(Path seriesPath) {
-    this.seriesPath = seriesPath;
-  }
-
   public TSDataType getDataType() {
     return dataType;
   }
@@ -135,27 +127,12 @@ public class ClusterSelectSeriesReader extends AbstractClusterPointReader implem
     this.dataType = dataType;
   }
 
-  public BatchData getCurrentBatchData() {
-    return currentBatchData;
-  }
-
-  public void setCurrentBatchData(BatchData currentBatchData) {
-    this.currentBatchData = currentBatchData;
-  }
-
+  @Override
   public void addBatchData(BatchData batchData, boolean remoteDataFinish) {
     batchDataList.addLast(batchData);
     this.remoteDataFinish = remoteDataFinish;
   }
 
-  public boolean isRemoteDataFinish() {
-    return remoteDataFinish;
-  }
-
-  public void setRemoteDataFinish(boolean remoteDataFinish) {
-    this.remoteDataFinish = remoteDataFinish;
-  }
-
   /**
    * Check if this series need to fetch data from remote query node
    */
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/timegenerator/ClusterNodeConstructor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/timegenerator/ClusterNodeConstructor.java
index 639dce8..2b3ab18 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/timegenerator/ClusterNodeConstructor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/timegenerator/ClusterNodeConstructor.java
@@ -25,7 +25,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
-import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterGroupEntity;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterSeriesGroupEntity;
 import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterFilterSeriesReader;
 import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.db.exception.FileNodeManagerException;
@@ -65,7 +65,7 @@ public class ClusterNodeConstructor extends AbstractNodeConstructor {
    * Init filter series reader
    */
   private void init(ClusterRpcSingleQueryManager queryManager) {
-    Map<String, FilterGroupEntity> filterGroupEntityMap = queryManager.getFilterGroupEntityMap();
+    Map<String, FilterSeriesGroupEntity> filterGroupEntityMap = queryManager.getFilterSeriesGroupEntityMap();
     filterGroupEntityMap.forEach(
         (key, value) -> filterSeriesReadersByGroupId.put(key, value.getFilterSeriesReaders()));
     filterSeriesReadersByGroupId.forEach((key, value) -> filterSeriesReaderIndex.put(key, 0));
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterTimeValuePairUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterTimeValuePairUtils.java
index a0ee256..0f05cf2 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterTimeValuePairUtils.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterTimeValuePairUtils.java
@@ -1,3 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
 package org.apache.iotdb.cluster.query.utils;
 
 import org.apache.iotdb.cluster.query.manager.common.FillBatchData;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ExpressionUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ExpressionUtils.java
index 0024138..4089e9b 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ExpressionUtils.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ExpressionUtils.java
@@ -26,7 +26,7 @@ import static org.apache.iotdb.tsfile.read.expression.ExpressionType.TRUE;
 import java.util.List;
 import java.util.Map;
 import org.apache.iotdb.cluster.query.expression.TrueExpression;
-import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterGroupEntity;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterSeriesGroupEntity;
 import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
@@ -46,17 +46,17 @@ public class ExpressionUtils {
    * Get all series path of expression group by group id
    */
   public static void getAllExpressionSeries(IExpression expression,
-      Map<String, FilterGroupEntity> filterGroupEntityMap)
+      Map<String, FilterSeriesGroupEntity> filterGroupEntityMap)
       throws PathErrorException {
     if (expression.getType() == ExpressionType.SERIES) {
       Path path = ((SingleSeriesExpression) expression).getSeriesPath();
       String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
       if (!filterGroupEntityMap.containsKey(groupId)) {
-        filterGroupEntityMap.put(groupId, new FilterGroupEntity(groupId));
+        filterGroupEntityMap.put(groupId, new FilterSeriesGroupEntity(groupId));
       }
-      FilterGroupEntity filterGroupEntity = filterGroupEntityMap.get(groupId);
-      filterGroupEntity.addFilterPaths(path);
-      filterGroupEntity.addFilter(((SingleSeriesExpression) expression).getFilter());
+      FilterSeriesGroupEntity filterSeriesGroupEntity = filterGroupEntityMap.get(groupId);
+      filterSeriesGroupEntity.addFilterPaths(path);
+      filterSeriesGroupEntity.addFilter(((SingleSeriesExpression) expression).getFilter());
     } else if (expression.getType() == OR || expression.getType() == AND) {
       getAllExpressionSeries(((IBinaryExpression) expression).getLeft(), filterGroupEntityMap);
       getAllExpressionSeries(((IBinaryExpression) expression).getRight(), filterGroupEntityMap);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtils.java
index 5fbd30c..3a2746f 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtils.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtils.java
@@ -25,7 +25,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
-import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterGroupEntity;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterSeriesGroupEntity;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.SelectSeriesGroupEntity;
 import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.qp.physical.crud.AggregationPlan;
@@ -84,24 +85,24 @@ public class QueryPlanPartitionUtils {
   private static void splitQueryPlanBySelectPath(ClusterRpcSingleQueryManager singleQueryManager)
       throws PathErrorException {
     QueryPlan queryPlan = singleQueryManager.getOriginQueryPlan();
-    Map<String, List<Path>> selectSeriesByGroupId = singleQueryManager.getSelectSeriesByGroupId();
-    Map<String, QueryPlan> selectPathPlans = singleQueryManager.getSelectPathPlans();
+    // split query plan by select path
+    Map<String, SelectSeriesGroupEntity> selectGroupEntityMap = singleQueryManager
+        .getSelectSeriesGroupEntityMap();
     List<Path> selectPaths = queryPlan.getPaths();
     for (Path path : selectPaths) {
       String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
-      if (!selectSeriesByGroupId.containsKey(groupId)) {
-        selectSeriesByGroupId.put(groupId, new ArrayList<>());
+      if (!selectGroupEntityMap.containsKey(groupId)) {
+        selectGroupEntityMap.put(groupId, new SelectSeriesGroupEntity(groupId));
       }
-      selectSeriesByGroupId.get(groupId).add(path);
+      selectGroupEntityMap.get(groupId).addSelectPaths(path);
     }
-    for (Entry<String, List<Path>> entry : selectSeriesByGroupId.entrySet()) {
-      String groupId = entry.getKey();
-      List<Path> paths = entry.getValue();
+    for (SelectSeriesGroupEntity entity : selectGroupEntityMap.values()) {
+      List<Path> paths = entity.getSelectPaths();
       QueryPlan subQueryPlan = new QueryPlan();
       subQueryPlan.setProposer(queryPlan.getProposer());
       subQueryPlan.setPaths(paths);
       subQueryPlan.setExpression(queryPlan.getExpression());
-      selectPathPlans.put(groupId, subQueryPlan);
+      entity.setQueryPlan(subQueryPlan);
     }
   }
 
@@ -113,12 +114,12 @@ public class QueryPlanPartitionUtils {
       throws PathErrorException {
     QueryPlan queryPlan = singleQueryManager.getOriginQueryPlan();
     // split query plan by filter path
-    Map<String, FilterGroupEntity> filterGroupEntityMap = singleQueryManager
-        .getFilterGroupEntityMap();
+    Map<String, FilterSeriesGroupEntity> filterGroupEntityMap = singleQueryManager
+        .getFilterSeriesGroupEntityMap();
     IExpression expression = queryPlan.getExpression();
     ExpressionUtils.getAllExpressionSeries(expression, filterGroupEntityMap);
-    for (FilterGroupEntity filterGroupEntity : filterGroupEntityMap.values()) {
-      List<Path> filterSeriesList = filterGroupEntity.getFilterPaths();
+    for (FilterSeriesGroupEntity filterSeriesGroupEntity : filterGroupEntityMap.values()) {
+      List<Path> filterSeriesList = filterSeriesGroupEntity.getFilterPaths();
       // create filter sub query plan
       QueryPlan subQueryPlan = new QueryPlan();
       subQueryPlan.setPaths(filterSeriesList);
@@ -127,7 +128,7 @@ public class QueryPlanPartitionUtils {
       if (subExpression.getType() != ExpressionType.TRUE) {
         subQueryPlan.setExpression(subExpression);
       }
-      filterGroupEntity.setQueryPlan(subQueryPlan);
+      filterSeriesGroupEntity.setQueryPlan(subQueryPlan);
     }
   }
 
@@ -157,29 +158,30 @@ public class QueryPlanPartitionUtils {
     AggregationPlan queryPlan = (AggregationPlan) singleQueryManager.getOriginQueryPlan();
     List<Path> selectPaths = queryPlan.getPaths();
     List<String> aggregations = queryPlan.getAggregations();
-    Map<String, List<Path>> selectSeriesByGroupId = singleQueryManager.getSelectSeriesByGroupId();
     Map<String, List<String>> selectAggregationByGroupId = new HashMap<>();
-    Map<String, QueryPlan> selectPathPlans = singleQueryManager.getSelectPathPlans();
+    Map<String, SelectSeriesGroupEntity> selectGroupEntityMap = singleQueryManager
+        .getSelectSeriesGroupEntityMap();
     for (int i = 0; i < selectPaths.size(); i++) {
       Path path = selectPaths.get(i);
       String aggregation = aggregations.get(i);
       String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
-      if (!selectSeriesByGroupId.containsKey(groupId)) {
-        selectSeriesByGroupId.put(groupId, new ArrayList<>());
+      if (!selectGroupEntityMap.containsKey(groupId)) {
+        selectGroupEntityMap.put(groupId, new SelectSeriesGroupEntity(groupId));
         selectAggregationByGroupId.put(groupId, new ArrayList<>());
       }
       selectAggregationByGroupId.get(groupId).add(aggregation);
-      selectSeriesByGroupId.get(groupId).add(path);
+      selectGroupEntityMap.get(groupId).addSelectPaths(path);
     }
-    for (Entry<String, List<Path>> entry : selectSeriesByGroupId.entrySet()) {
+    for (Entry<String, SelectSeriesGroupEntity> entry : selectGroupEntityMap.entrySet()) {
       String groupId = entry.getKey();
-      List<Path> paths = entry.getValue();
+      SelectSeriesGroupEntity entity = entry.getValue();
+      List<Path> paths = entity.getSelectPaths();
       AggregationPlan subQueryPlan = new AggregationPlan();
       subQueryPlan.setProposer(queryPlan.getProposer());
       subQueryPlan.setPaths(paths);
       subQueryPlan.setExpression(queryPlan.getExpression());
       subQueryPlan.setAggregations(selectAggregationByGroupId.get(groupId));
-      selectPathPlans.put(groupId, subQueryPlan);
+      entity.setQueryPlan(subQueryPlan);
     }
   }
 
@@ -200,25 +202,24 @@ public class QueryPlanPartitionUtils {
       throws PathErrorException {
     FillQueryPlan fillQueryPlan = (FillQueryPlan) singleQueryManager.getOriginQueryPlan();
     List<Path> selectPaths = fillQueryPlan.getPaths();
-    Map<String, List<Path>> selectSeriesByGroupId = singleQueryManager.getSelectSeriesByGroupId();
-    Map<String, QueryPlan> selectPathPlans = singleQueryManager.getSelectPathPlans();
+    Map<String, SelectSeriesGroupEntity> selectGroupEntityMap = singleQueryManager
+        .getSelectSeriesGroupEntityMap();
     for (Path path : selectPaths) {
       String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
-      if (!selectSeriesByGroupId.containsKey(groupId)) {
-        selectSeriesByGroupId.put(groupId, new ArrayList<>());
+      if (!selectGroupEntityMap.containsKey(groupId)) {
+        selectGroupEntityMap.put(groupId, new SelectSeriesGroupEntity(groupId));
       }
-      selectSeriesByGroupId.get(groupId).add(path);
+      selectGroupEntityMap.get(groupId).addSelectPaths(path);
     }
-    for (Entry<String, List<Path>> entry : selectSeriesByGroupId.entrySet()) {
-      String groupId = entry.getKey();
-      List<Path> paths = entry.getValue();
+    for (SelectSeriesGroupEntity entity : selectGroupEntityMap.values()) {
+      List<Path> paths = entity.getSelectPaths();
       FillQueryPlan subQueryPlan = new FillQueryPlan();
       subQueryPlan.setProposer(fillQueryPlan.getProposer());
       subQueryPlan.setPaths(paths);
       subQueryPlan.setExpression(fillQueryPlan.getExpression());
       subQueryPlan.setQueryTime(fillQueryPlan.getQueryTime());
       subQueryPlan.setFillType(new EnumMap<>(fillQueryPlan.getFillType()));
-      selectPathPlans.put(groupId, subQueryPlan);
+      entity.setQueryPlan(subQueryPlan);
     }
   }
 
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/manager/ClusterRpcManagerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/manager/ClusterRpcManagerTest.java
index b800fbf..4745553 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/query/manager/ClusterRpcManagerTest.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/query/manager/ClusterRpcManagerTest.java
@@ -39,7 +39,8 @@ import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.cluster.entity.Server;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcQueryManager;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
-import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterGroupEntity;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterSeriesGroupEntity;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.SelectSeriesGroupEntity;
 import org.apache.iotdb.cluster.utils.EnvironmentUtils;
 import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
 import org.apache.iotdb.jdbc.Config;
@@ -261,25 +262,8 @@ public class ClusterRpcManagerTest {
         assertEquals(taskId, singleManager.getTaskId());
 
         // select path plans
-        Map<String, QueryPlan> selectPathPlans = singleManager.getSelectPathPlans();
-        assertEquals(1, selectPathPlans.size());
-        for (QueryPlan queryPlan : selectPathPlans.values()) {
-          List<Path> paths = queryPlan.getPaths();
-          List<Path> correctPaths = new ArrayList<>();
-          correctPaths.add(new Path("root.vehicle.d0.s0"));
-          correctPaths.add(new Path("root.vehicle.d0.s1"));
-          correctPaths.add(new Path("root.vehicle.d0.s3"));
-          assertEquals(correctPaths, paths);
-          assertNull(queryPlan.getExpression());
-        }
-
-        // select series by group id
-        assertEquals(0, singleManager.getSelectSeriesByGroupId().size());
-
-        // select series reader
-        assertTrue(singleManager
-            .getSelectSeriesReaders().isEmpty());
-
+        Map<String, SelectSeriesGroupEntity> selectSeriesGroupEntityMap = singleManager.getSelectSeriesGroupEntityMap();
+        assertTrue(selectSeriesGroupEntityMap.isEmpty());
       }
       statement.close();
     }
@@ -304,27 +288,11 @@ public class ClusterRpcManagerTest {
         assertEquals(taskId, singleManager.getTaskId());
 
         // select path plans
-        Map<String, QueryPlan> selectPathPlans = singleManager.getSelectPathPlans();
-        assertEquals(1, selectPathPlans.size());
-        for (QueryPlan queryPlan : selectPathPlans.values()) {
-          List<Path> paths = queryPlan.getPaths();
-          List<Path> correctPaths = new ArrayList<>();
-          correctPaths.add(new Path("root.vehicle.d0.s0"));
-          correctPaths.add(new Path("root.vehicle.d0.s1"));
-          correctPaths.add(new Path("root.vehicle.d0.s3"));
-          assertEquals(correctPaths, paths);
-          assertNotNull(queryPlan.getExpression());
-        }
-
-        // select series by group id
-        assertTrue(singleManager.getSelectSeriesByGroupId().isEmpty());
-
-        // select series reader
-        assertTrue(singleManager
-            .getSelectSeriesReaders().isEmpty());
+        Map<String, SelectSeriesGroupEntity> selectSeriesGroupEntityMap = singleManager.getSelectSeriesGroupEntityMap();
+        assertTrue(selectSeriesGroupEntityMap.isEmpty());
 
         // filter path plans
-        Map<String, FilterGroupEntity> filterGroupEntityMap = singleManager.getFilterGroupEntityMap();
+        Map<String, FilterSeriesGroupEntity> filterGroupEntityMap = singleManager.getFilterSeriesGroupEntityMap();
         assertTrue(filterGroupEntityMap.isEmpty());
 
       }
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtilsTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtilsTest.java
index a0d409b..363ef98 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtilsTest.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtilsTest.java
@@ -39,8 +39,11 @@ import org.apache.iotdb.cluster.entity.Server;
 import org.apache.iotdb.cluster.qp.executor.ClusterQueryProcessExecutor;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcQueryManager;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
-import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterGroupEntity;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterSeriesGroupEntity;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.SelectSeriesGroupEntity;
 import org.apache.iotdb.cluster.utils.EnvironmentUtils;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
+import org.apache.iotdb.cluster.utils.hash.PhysicalNode;
 import org.apache.iotdb.db.qp.QueryProcessor;
 import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
 import org.apache.iotdb.jdbc.Config;
@@ -60,6 +63,9 @@ public class QueryPlanPartitionUtilsTest {
   private static ClusterRpcQueryManager manager = ClusterRpcQueryManager.getInstance();
   private ClusterQueryProcessExecutor queryDataExecutor = new ClusterQueryProcessExecutor();
   private QueryProcessor queryProcessor = new QueryProcessor(queryDataExecutor);
+  private static final PhysicalNode localNode = new PhysicalNode(CLUSTER_CONFIG.getIp(),
+      CLUSTER_CONFIG.getPort());
+
 
   private static final String URL = "127.0.0.1:6667/";
 
@@ -105,6 +111,7 @@ public class QueryPlanPartitionUtilsTest {
     EnvironmentUtils.cleanEnv();
     EnvironmentUtils.closeStatMonitor();
     EnvironmentUtils.closeMemControl();
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
     CLUSTER_CONFIG.createAllPath();
     server = Server.getInstance();
     server.start();
@@ -115,6 +122,7 @@ public class QueryPlanPartitionUtilsTest {
   @After
   public void tearDown() throws Exception {
     server.stop();
+    QPExecutorUtils.setLocalNodeAddr(localNode.getIp(), localNode.getPort());
     EnvironmentUtils.cleanEnv();
   }
 
@@ -230,14 +238,14 @@ public class QueryPlanPartitionUtilsTest {
   }
 
   @Test
-  public void splitQueryPlanWithoutValueFilter() throws Exception{
+  public void splitQueryPlanWithoutValueFilter() throws Exception {
     try (Connection connection = DriverManager
         .getConnection(Config.IOTDB_URL_PREFIX + URL, "root", "root")) {
       insertData(connection, createSQLs, insertSQLs);
       initCorrectResult();
-      for(int i = 0 ; i < queryStatementsWithoutFilters.length; i++) {
+      for (int i = 0; i < queryStatementsWithoutFilters.length; i++) {
         String queryStatementsWithoutFilter = queryStatementsWithoutFilters[i];
-        try(Statement statement = connection.createStatement()) {
+        try (Statement statement = connection.createStatement()) {
           boolean hasResultSet = statement.execute(queryStatementsWithoutFilter);
           assertTrue(hasResultSet);
           ResultSet resultSet = statement.getResultSet();
@@ -256,14 +264,15 @@ public class QueryPlanPartitionUtilsTest {
             assertEquals(taskId, String.format("%s:%d", LOCAL_ADDR, jobId));
             ClusterRpcSingleQueryManager singleQueryManager = ClusterRpcQueryManager.getInstance()
                 .getSingleQuery(jobId);
-            assertTrue(singleQueryManager.getFilterGroupEntityMap().isEmpty());
-            Map<String, QueryPlan> selectPathPlans = singleQueryManager.getSelectPathPlans();
-            assertFalse(selectPathPlans.isEmpty());
-            for(Entry<String, QueryPlan> entry1: selectPathPlans.entrySet()){
-              QueryPlan queryPlan = entry1.getValue();
+            assertTrue(singleQueryManager.getFilterSeriesGroupEntityMap().isEmpty());
+            Map<String, SelectSeriesGroupEntity> selectSeriesGroupEntityMap = singleQueryManager
+                .getSelectSeriesGroupEntityMap();
+            assertFalse(selectSeriesGroupEntityMap.isEmpty());
+            for (SelectSeriesGroupEntity entity : selectSeriesGroupEntityMap.values()) {
+              QueryPlan queryPlan = entity.getQueryPlan();
               QueryPlan correctQueryPlan = withoutFilterResults.get(i + 1);
               assertTrue(correctQueryPlan.getPaths().containsAll(queryPlan.getPaths()));
-              assertEquals(correctQueryPlan.getExpression(),queryPlan.getExpression());
+              assertEquals(correctQueryPlan.getExpression(), queryPlan.getExpression());
               assertEquals(correctQueryPlan.isQuery(), queryPlan.isQuery());
               assertEquals(correctQueryPlan.getOperatorType(), queryPlan.getOperatorType());
               assertEquals(correctQueryPlan.getAggregations(), queryPlan.getAggregations());
@@ -275,14 +284,14 @@ public class QueryPlanPartitionUtilsTest {
   }
 
   @Test
-  public void splitQueryPlanWithValueFilter() throws Exception{
+  public void splitQueryPlanWithValueFilter() throws Exception {
     try (Connection connection = DriverManager
         .getConnection(Config.IOTDB_URL_PREFIX + URL, "root", "root")) {
       insertData(connection, createSQLs, insertSQLs);
       initCorrectResult();
-      for(int i = 0 ; i < queryStatementsWithFilters.length; i++) {
+      for (int i = 0; i < queryStatementsWithFilters.length; i++) {
         String queryStatementsWithoutFilter = queryStatementsWithFilters[i];
-        try(Statement statement = connection.createStatement()) {
+        try (Statement statement = connection.createStatement()) {
           boolean hasResultSet = statement.execute(queryStatementsWithoutFilter);
           assertTrue(hasResultSet);
           ResultSet resultSet = statement.getResultSet();
@@ -301,21 +310,24 @@ public class QueryPlanPartitionUtilsTest {
             assertEquals(taskId, String.format("%s:%d", LOCAL_ADDR, jobId));
             ClusterRpcSingleQueryManager singleQueryManager = ClusterRpcQueryManager.getInstance()
                 .getSingleQuery(jobId);
-            assertTrue(singleQueryManager.getFilterGroupEntityMap().isEmpty());
-            Map<String, QueryPlan> selectPathPlans = singleQueryManager.getSelectPathPlans();
-            assertFalse(selectPathPlans.isEmpty());
-            for(Entry<String, QueryPlan> entry1 : selectPathPlans.entrySet()) {
-              QueryPlan queryPlan = entry1.getValue();
+            assertFalse(singleQueryManager.getFilterSeriesGroupEntityMap().isEmpty());
+            Map<String, SelectSeriesGroupEntity> selectSeriesGroupEntityMap = singleQueryManager
+                .getSelectSeriesGroupEntityMap();
+            assertFalse(selectSeriesGroupEntityMap.isEmpty());
+            for (SelectSeriesGroupEntity entity : selectSeriesGroupEntityMap.values()) {
+              QueryPlan queryPlan = entity.getQueryPlan();
               QueryPlan correctQueryPlan = withFilterSelectResults.get(i + 1);
               assertTrue(correctQueryPlan.getPaths().containsAll(queryPlan.getPaths()));
-              assertEquals(correctQueryPlan.getExpression().getType(), queryPlan.getExpression().getType());
+              assertEquals(correctQueryPlan.getExpression().getType(),
+                  queryPlan.getExpression().getType());
               assertEquals(correctQueryPlan.isQuery(), queryPlan.isQuery());
               assertEquals(correctQueryPlan.getOperatorType(), queryPlan.getOperatorType());
               assertEquals(correctQueryPlan.getAggregations(), queryPlan.getAggregations());
             }
-            Map<String, FilterGroupEntity> filterGroupEntityMap = singleQueryManager.getFilterGroupEntityMap();
-            for (FilterGroupEntity filterGroupEntity:filterGroupEntityMap.values()) {
-              QueryPlan queryPlan = filterGroupEntity.getQueryPlan();
+            Map<String, FilterSeriesGroupEntity> filterGroupEntityMap = singleQueryManager
+                .getFilterSeriesGroupEntityMap();
+            for (FilterSeriesGroupEntity filterSeriesGroupEntity : filterGroupEntityMap.values()) {
+              QueryPlan queryPlan = filterSeriesGroupEntity.getQueryPlan();
               QueryPlan correctQueryPlan = withFilterFilterResults.get(i + 1);
               assertTrue(correctQueryPlan.getPaths().containsAll(queryPlan.getPaths()));
               assertEquals(correctQueryPlan.getExpression().getType(),


[incubator-iotdb] 09/11: fix a serve bug of set readmetadata level

Posted by lt...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

lta pushed a commit to branch cluster
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git

commit 6a733d9270b06fa1d22f0ef8488562d09a739d4b
Author: lta <li...@163.com>
AuthorDate: Mon May 20 22:33:10 2019 +0800

    fix a serve bug of set readmetadata level
---
 .../qp/executor/ClusterQueryProcessExecutor.java   | 11 ++++++-
 .../cluster/qp/executor/QueryMetadataExecutor.java |  1 -
 .../ClusterRpcSingleQueryManager.java              |  5 ++++
 .../cluster/query/utils/ClusterRpcReaderUtils.java |  3 +-
 .../querymetadata/QueryPathsAsyncProcessor.java    |  1 +
 .../cluster/service/TSServiceClusterImpl.java      |  5 ++--
 .../org/apache/iotdb/cluster/utils/RaftUtils.java  | 35 +++++-----------------
 .../iotdb/cluster/integration/IoTDBQueryIT.java    |  1 +
 .../cluster/query/utils/ExpressionUtilsTest.java   | 17 ++++-------
 .../query/utils/QueryPlanPartitionUtilsTest.java   |  4 ++-
 10 files changed, 39 insertions(+), 44 deletions(-)

diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/ClusterQueryProcessExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/ClusterQueryProcessExecutor.java
index 3dc8c43..30659e5 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/ClusterQueryProcessExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/ClusterQueryProcessExecutor.java
@@ -41,13 +41,21 @@ import org.apache.iotdb.tsfile.read.expression.IExpression;
 import org.apache.iotdb.tsfile.read.expression.QueryExpression;
 import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
 import org.apache.iotdb.tsfile.utils.Pair;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class ClusterQueryProcessExecutor extends AbstractQPExecutor implements IQueryProcessExecutor {
 
+  private static final Logger LOGGER = LoggerFactory.getLogger(ClusterQueryProcessExecutor.class);
   private ThreadLocal<Integer> fetchSize = new ThreadLocal<>();
   private ClusterQueryRouter clusterQueryRouter = new ClusterQueryRouter();
 
-  private QueryMetadataExecutor queryMetadataExecutor = new QueryMetadataExecutor();
+  private QueryMetadataExecutor queryMetadataExecutor;
+
+  public ClusterQueryProcessExecutor(
+      QueryMetadataExecutor queryMetadataExecutor) {
+    this.queryMetadataExecutor = queryMetadataExecutor;
+  }
 
   @Override
   public QueryDataSet processQuery(QueryPlan queryPlan, QueryContext context)
@@ -117,6 +125,7 @@ public class ClusterQueryProcessExecutor extends AbstractQPExecutor implements I
   public List<String> getAllPaths(String originPath)
       throws PathErrorException {
     try {
+      LOGGER.debug("read metadata level :" + getReadMetadataConsistencyLevel());
       return queryMetadataExecutor.processPathsQuery(originPath);
     } catch (InterruptedException | ProcessorException e) {
       throw new PathErrorException(e.getMessage());
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/QueryMetadataExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/QueryMetadataExecutor.java
index 9a3f61d..229009e 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/QueryMetadataExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/qp/executor/QueryMetadataExecutor.java
@@ -414,7 +414,6 @@ public class QueryMetadataExecutor extends AbstractQPExecutor {
           success = true;
         } catch (RaftConnectionException e1) {
           LOGGER.debug("Get paths for {} task for group {} to node {} fail.", pathList, groupId, nextNode);
-          continue;
         }
       }
       LOGGER.debug("The final result for get paths for {} task is {}", pathList, success);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java
index 05bf9df..81ca292 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java
@@ -53,12 +53,15 @@ import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.BatchData;
 import org.apache.iotdb.tsfile.read.common.Path;
 import org.apache.iotdb.tsfile.read.filter.basic.Filter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Manage all remote series reader resource in a query resource in coordinator node.
  */
 public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManager {
 
+  private static final Logger LOGGER = LoggerFactory.getLogger(ClusterRpcSingleQueryManager.class);
   /**
    * Statistic all usage of local data group.
    */
@@ -133,6 +136,7 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
       SelectSeriesGroupEntity selectEntity = entry.getValue();
       QueryPlan queryPlan = selectEntity.getQueryPlan();
       if (!QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
+        LOGGER.debug("Init series reader for group id {} from remote node." , groupId);
         Map<PathType, QueryPlan> allQueryPlan = new EnumMap<>(PathType.class);
         allQueryPlan.put(PathType.SELECT_PATH, queryPlan);
         List<Filter> filterList = new ArrayList<>();
@@ -149,6 +153,7 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
             .createClusterSeriesReader(groupId, request, this);
         handleInitReaderResponse(groupId, allQueryPlan, response);
       } else {
+        LOGGER.debug("Init series reader for group id {} locally." , groupId);
         dataGroupUsage.add(groupId);
         selectSeriesGroupEntityMap.remove(groupId);
         filterSeriesGroupEntityMap.remove(groupId);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterRpcReaderUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterRpcReaderUtils.java
index 0247bbe..d38ca83 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterRpcReaderUtils.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterRpcReaderUtils.java
@@ -72,9 +72,10 @@ public class ClusterRpcReaderUtils {
       try {
         response = handleQueryRequest(request, peerId, 0);
         manager.setQueryNode(groupId, peerId);
+        LOGGER.debug("Init series reader in Node<{}> of group<{}> success.", peerId, groupId);
         return response;
       } catch (RaftConnectionException e) {
-        LOGGER.error("Can not init series reader in Node<{}> of group<{}>", peerId, groupId, e);
+        LOGGER.debug("Can not init series reader in Node<{}> of group<{}>", peerId, groupId, e);
       }
     }
     throw new RaftConnectionException(
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryPathsAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryPathsAsyncProcessor.java
index 8e1e47b..8e96032 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryPathsAsyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryPathsAsyncProcessor.java
@@ -90,6 +90,7 @@ public class QueryPathsAsyncProcessor extends BasicAsyncUserProcessor<QueryPaths
     for (String path : request.getPath()) {
       response.addPaths(mManager.getPaths(path));
     }
+    System.out.println("Paths: " + response.getPaths());
   }
 
   @Override
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/service/TSServiceClusterImpl.java b/cluster/src/main/java/org/apache/iotdb/cluster/service/TSServiceClusterImpl.java
index ff716a3..950d02c 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/service/TSServiceClusterImpl.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/service/TSServiceClusterImpl.java
@@ -66,9 +66,10 @@ public class TSServiceClusterImpl extends TSServiceImpl {
 
   private static final Logger LOGGER = LoggerFactory.getLogger(TSServiceClusterImpl.class);
 
-  private ClusterQueryProcessExecutor queryDataExecutor = new ClusterQueryProcessExecutor();
-  private NonQueryExecutor nonQueryExecutor = new NonQueryExecutor();
   private QueryMetadataExecutor queryMetadataExecutor = new QueryMetadataExecutor();
+  private ClusterQueryProcessExecutor queryDataExecutor = new ClusterQueryProcessExecutor(
+      queryMetadataExecutor);
+  private NonQueryExecutor nonQueryExecutor = new NonQueryExecutor();
 
   private IClusterRpcQueryManager queryManager = ClusterRpcQueryManager.getInstance();
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/RaftUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/RaftUtils.java
index 9dd3867..c2303d3 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/RaftUtils.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/utils/RaftUtils.java
@@ -98,38 +98,19 @@ public class RaftUtils {
    */
   private static final ConcurrentHashMap<String, PeerId> groupLeaderCache = new ConcurrentHashMap<>();
 
-  private static ThreadLocal<Map<String, Integer>> nodeIndexMap = new ThreadLocal<Map<String, Integer>>() {
-    @Override
-    protected Map<String, Integer> initialValue() {
-      Map<String, Integer> map = new HashMap<>();
-      router.getAllGroupId().forEach(groupId -> {
-        PhysicalNode[] physicalNodes = router.getNodesByGroupId(groupId);
-        map.put(groupId, getRandomInt(physicalNodes.length));
-      });
-      return map;
-    }
-  };
+  private static ThreadLocal<Map<String, Integer>> nodeIndexMap = ThreadLocal.withInitial(() -> {
+    Map<String, Integer> map = new HashMap<>();
+    router.getAllGroupId().forEach(groupId -> {
+      PhysicalNode[] physicalNodes = router.getNodesByGroupId(groupId);
+      map.put(groupId, getRandomInt(physicalNodes.length));
+    });
+    return map;
+  });
 
   private RaftUtils() {
   }
 
   /**
-   * Get peer id by input ip
-   *
-   * @return null if not found
-   */
-  public static PeerId getPeerIDByIP(String ip) {
-    RaftService service = (RaftService) server.getMetadataHolder().getService();
-    List<PeerId> peerIdList = service.getPeerIdList();
-    for (int i = 0; i < peerIdList.size(); i++) {
-      if (peerIdList.get(i).getIp().equals(ip)) {
-        return peerIdList.get(i);
-      }
-    }
-    return null;
-  }
-
-  /**
    * Get peer ID in order
    *
    * @return node id
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBQueryIT.java b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBQueryIT.java
index 5be3cbc..de2bf64 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBQueryIT.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBQueryIT.java
@@ -291,6 +291,7 @@ public class IoTDBQueryIT {
         .getConnection(Config.IOTDB_URL_PREFIX + URL, "root", "root")) {
       insertData(connection, createSQLs, insertSQLs);
       Statement statement = connection.createStatement();
+      statement.execute("set read metadata level to 2");
 
       for(int i =0 ; i < queryStatementsWithoutFilter.length; i++) {
         String queryStatement = queryStatementsWithoutFilter[i];
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/utils/ExpressionUtilsTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/utils/ExpressionUtilsTest.java
index 84f8f5f..fb67377 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/query/utils/ExpressionUtilsTest.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/query/utils/ExpressionUtilsTest.java
@@ -19,26 +19,20 @@
 package org.apache.iotdb.cluster.query.utils;
 
 import static org.apache.iotdb.cluster.utils.Utils.insertData;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertNull;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
 import java.util.ArrayList;
-import java.util.HashMap;
 import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.ConcurrentHashMap;
 import org.apache.iotdb.cluster.config.ClusterConfig;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.cluster.entity.Server;
 import org.apache.iotdb.cluster.qp.executor.ClusterQueryProcessExecutor;
+import org.apache.iotdb.cluster.qp.executor.QueryMetadataExecutor;
 import org.apache.iotdb.cluster.query.expression.TrueExpression;
-import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcQueryManager;
-import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
 import org.apache.iotdb.cluster.utils.EnvironmentUtils;
 import org.apache.iotdb.db.qp.QueryProcessor;
 import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
@@ -54,7 +48,8 @@ import org.junit.Test;
 public class ExpressionUtilsTest {
   private Server server;
   private static final ClusterConfig CLUSTER_CONFIG = ClusterDescriptor.getInstance().getConfig();
-  private ClusterQueryProcessExecutor queryDataExecutor = new ClusterQueryProcessExecutor();
+  private ClusterQueryProcessExecutor queryDataExecutor = new ClusterQueryProcessExecutor(
+      new QueryMetadataExecutor());
   private QueryProcessor queryProcessor = new QueryProcessor(queryDataExecutor);
 
   private static final String URL = "127.0.0.1:6667/";
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtilsTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtilsTest.java
index 363ef98..9b44ade 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtilsTest.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtilsTest.java
@@ -37,6 +37,7 @@ import org.apache.iotdb.cluster.config.ClusterConfig;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.cluster.entity.Server;
 import org.apache.iotdb.cluster.qp.executor.ClusterQueryProcessExecutor;
+import org.apache.iotdb.cluster.qp.executor.QueryMetadataExecutor;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcQueryManager;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterSeriesGroupEntity;
@@ -61,7 +62,8 @@ public class QueryPlanPartitionUtilsTest {
   private static final String LOCAL_ADDR = String
       .format("%s:%d", CLUSTER_CONFIG.getIp(), CLUSTER_CONFIG.getPort());
   private static ClusterRpcQueryManager manager = ClusterRpcQueryManager.getInstance();
-  private ClusterQueryProcessExecutor queryDataExecutor = new ClusterQueryProcessExecutor();
+  private ClusterQueryProcessExecutor queryDataExecutor = new ClusterQueryProcessExecutor(
+      new QueryMetadataExecutor());
   private QueryProcessor queryProcessor = new QueryProcessor(queryDataExecutor);
   private static final PhysicalNode localNode = new PhysicalNode(CLUSTER_CONFIG.getIp(),
       CLUSTER_CONFIG.getPort());


[incubator-iotdb] 08/11: Merge branch 'cluster' into cluster_fill_aggre_groupby

Posted by lt...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

lta pushed a commit to branch cluster
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git

commit d079f5e3aca5d0303d0f0554cb45d449621a86a4
Merge: 9487dbf 618cc39
Author: lta <li...@163.com>
AuthorDate: Mon May 20 17:29:00 2019 +0800

    Merge branch 'cluster' into cluster_fill_aggre_groupby

 .../org/apache/iotdb/cluster/entity/Server.java    |   6 +-
 .../cluster/qp/executor/QueryMetadataExecutor.java | 148 +++++++++++++++++++--
 .../QueryJobNumAsyncProcessor.java}                |  17 +--
 .../QueryMetricAsyncProcessor.java                 |   7 +-
 .../QueryStatusAsyncProcessor.java}                |  18 +--
 .../QueryJobNumRequest.java}                       |  18 +--
 .../{ => querymetric}/QueryMetricRequest.java      |  11 +-
 .../QueryStatusRequest.java}                       |  16 +--
 .../QueryJobNumResponse.java}                      |  19 +--
 .../{ => querymetric}/QueryMetricResponse.java     |   3 +-
 .../QueryStatusResponse.java}                      |  24 ++--
 .../iotdb/cluster/service/ClusterMonitor.java      |   9 +-
 .../iotdb/cluster/service/ClusterMonitorMBean.java |  13 +-
 .../iotdb/cluster/service/nodetool/NodeTool.java   |   3 +-
 .../iotdb/cluster/service/nodetool/Query.java      |  20 ++-
 .../service/nodetool/{Query.java => Status.java}   |  10 +-
 .../org/apache/iotdb/cluster/utils/RaftUtils.java  | 144 +++++++++++++++++++-
 .../UserGuideV0.7.0/7-Tools-NodeTool.md            |  58 ++++++--
 18 files changed, 430 insertions(+), 114 deletions(-)



[incubator-iotdb] 10/11: fix a serve bug

Posted by lt...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

lta pushed a commit to branch cluster
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git

commit 20c93d16e736c672c8c75ba62909afce5361171e
Author: lta <li...@163.com>
AuthorDate: Tue May 21 01:08:28 2019 +0800

    fix a serve bug
---
 .../manager/coordinatornode/ClusterRpcSingleQueryManager.java    | 7 +++++++
 .../query/reader/querynode/ClusterSelectSeriesBatchReader.java   | 1 +
 .../raft/processor/querydata/InitSeriesReaderSyncProcessor.java  | 5 +++++
 .../raft/processor/querymetadata/QueryPathsAsyncProcessor.java   | 1 -
 .../org/apache/iotdb/cluster/service/TSServiceClusterImpl.java   | 9 ++++++++-
 .../java/org/apache/iotdb/cluster/integration/IoTDBQueryIT.java  | 1 -
 6 files changed, 21 insertions(+), 3 deletions(-)

diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java
index 81ca292..8cc4ccd 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java
@@ -187,6 +187,7 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
    */
   private void handleInitReaderResponse(String groupId, Map<PathType, QueryPlan> allQueryPlan,
       InitSeriesReaderResponse response) {
+    LOGGER.debug("Handle init reader response of group id {}", groupId);
     /** create cluster series reader **/
     if (allQueryPlan.containsKey(PathType.SELECT_PATH)) {
       QueryPlan plan = allQueryPlan.get(PathType.SELECT_PATH);
@@ -217,14 +218,18 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
   @Override
   public void fetchBatchDataForSelectPaths(String groupId) throws RaftConnectionException {
     List<Integer> fetchDataSeriesIndexs = new ArrayList<>();
+    List<Path> fetchDataSeries = new ArrayList<>();
     List<Path> selectSeries = selectSeriesGroupEntityMap.get(groupId).getSelectPaths();
     List<ClusterSelectSeriesReader> seriesReaders = selectSeriesGroupEntityMap.get(groupId)
         .getSelectSeriesReaders();
     for (int i = 0; i < selectSeries.size(); i++) {
       if (seriesReaders.get(i).enableFetchData()) {
         fetchDataSeriesIndexs.add(i);
+        fetchDataSeries.add(selectSeries.get(i));
       }
     }
+    LOGGER.debug("Fetch data for paths {} of group id {} from node {}", fetchDataSeries, groupId,
+        queryNodes.get(groupId));
     BasicRequest request = QuerySeriesDataRequest
         .createFetchDataRequest(groupId, taskId, PathType.SELECT_PATH, fetchDataSeriesIndexs,
             queryRounds++);
@@ -236,6 +241,8 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
 
   @Override
   public void fetchBatchDataForAllFilterPaths(String groupId) throws RaftConnectionException {
+    LOGGER.debug("Fetch Data for filter paths {} of group id {} from node {}",
+        filterSeriesGroupEntityMap.get(groupId).getFilterPaths(), groupId, queryNodes.get(groupId));
     BasicRequest request = QuerySeriesDataRequest
         .createFetchDataRequest(groupId, taskId, PathType.FILTER_PATH, null, queryRounds++);
     QuerySeriesDataResponse response = (QuerySeriesDataResponse) ClusterRpcReaderUtils
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReader.java
index cbbad2e..cfc43b8 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReader.java
@@ -62,6 +62,7 @@ public class ClusterSelectSeriesBatchReader extends
     for (int i = 0; i < CLUSTER_CONF.getBatchReadSize(); i++) {
       if (hasNext()) {
         TimeValuePair pair = reader.next();
+        System.out.println("reader value:" + pair);
         batchData.putTime(pair.getTimestamp());
         batchData.putAnObject(pair.getValue().getValue());
       } else {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/InitSeriesReaderSyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/InitSeriesReaderSyncProcessor.java
index 7474eec..a64d909 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/InitSeriesReaderSyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querydata/InitSeriesReaderSyncProcessor.java
@@ -27,9 +27,13 @@ import org.apache.iotdb.cluster.rpc.raft.request.querydata.InitSeriesReaderReque
 import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.cluster.utils.RaftUtils;
 import org.apache.iotdb.db.exception.ProcessorException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class InitSeriesReaderSyncProcessor extends BasicSyncUserProcessor<InitSeriesReaderRequest> {
 
+  private static final Logger LOGGER = LoggerFactory.getLogger(InitSeriesReaderSyncProcessor.class);
+
   @Override
   public Object handleRequest(BizContext bizContext, InitSeriesReaderRequest request)
       throws Exception {
@@ -46,6 +50,7 @@ public class InitSeriesReaderSyncProcessor extends BasicSyncUserProcessor<InitSe
    * @param groupId group id
    */
   private void handleNullRead(int readConsistencyLevel, String groupId) throws ProcessorException {
+    LOGGER.debug("Read data level is {}", readConsistencyLevel);
     if (readConsistencyLevel == ClusterConstant.STRONG_CONSISTENCY_LEVEL && !QPExecutorUtils
         .checkDataGroupLeader(groupId)) {
       Status nullReadTaskStatus = Status.OK();
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryPathsAsyncProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryPathsAsyncProcessor.java
index 8e96032..8e1e47b 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryPathsAsyncProcessor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/processor/querymetadata/QueryPathsAsyncProcessor.java
@@ -90,7 +90,6 @@ public class QueryPathsAsyncProcessor extends BasicAsyncUserProcessor<QueryPaths
     for (String path : request.getPath()) {
       response.addPaths(mManager.getPaths(path));
     }
-    System.out.println("Paths: " + response.getPaths());
   }
 
   @Override
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/service/TSServiceClusterImpl.java b/cluster/src/main/java/org/apache/iotdb/cluster/service/TSServiceClusterImpl.java
index 950d02c..4306d673 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/service/TSServiceClusterImpl.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/service/TSServiceClusterImpl.java
@@ -307,7 +307,14 @@ public class TSServiceClusterImpl extends TSServiceImpl {
     queryManager.addSingleQuery(jobId, (QueryPlan) physicalPlan);
     QueryDataSet queryDataSet = processor.getExecutor().processQuery((QueryPlan) physicalPlan,
         context);
-    queryRet.get().put(statement, queryDataSet);
+    System.out.println("Create new Data Set");
+    try {
+      queryRet.get().put(statement, queryDataSet);
+    }catch (Exception e){
+      e.printStackTrace();
+    }
+    System.out.println("Create new Data Set complete");
+    System.out.println(queryDataSet == null);
     return queryDataSet;
   }
 
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBQueryIT.java b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBQueryIT.java
index de2bf64..5be3cbc 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBQueryIT.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBQueryIT.java
@@ -291,7 +291,6 @@ public class IoTDBQueryIT {
         .getConnection(Config.IOTDB_URL_PREFIX + URL, "root", "root")) {
       insertData(connection, createSQLs, insertSQLs);
       Statement statement = connection.createStatement();
-      statement.execute("set read metadata level to 2");
 
       for(int i =0 ; i < queryStatementsWithoutFilter.length; i++) {
         String queryStatement = queryStatementsWithoutFilter[i];


[incubator-iotdb] 04/11: add aggregation query it

Posted by lt...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

lta pushed a commit to branch cluster
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git

commit 4abde09c4e71a8de156a63a495d47fff3d5547c0
Author: lta <li...@163.com>
AuthorDate: Fri May 17 17:47:59 2019 +0800

    add aggregation query it
---
 .../org/apache/iotdb/cluster/entity/Server.java    |  24 +-
 .../cluster/integration/IoTDBAggregationIT.java    |   5 -
 .../integration/IoTDBAggregationLargeDataIT.java   | 916 +++++++++++++++++++++
 .../integration/IoTDBAggregationSmallDataIT.java   | 760 +++++++++++++++++
 .../cluster/integration/IoTDBFillQueryIT.java      |   1 -
 .../integration/IoTDBMetadataFetchLocallyIT.java   |   1 -
 .../iotdb/cluster/integration/IoTDBQueryIT.java    |   1 -
 .../cluster/integration/IoTDBQueryLargeDataIT.java |   2 -
 .../apache/iotdb/cluster/utils/RaftUtilsTest.java  |  19 +-
 .../java/org/apache/iotdb/cluster/utils/Utils.java |   5 +
 .../iotdb/db/engine/filenode/FileNodeManager.java  |   3 +
 11 files changed, 1709 insertions(+), 28 deletions(-)

diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/entity/Server.java b/cluster/src/main/java/org/apache/iotdb/cluster/entity/Server.java
index 842d2ed..c6cb03b 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/entity/Server.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/entity/Server.java
@@ -121,16 +121,20 @@ public class Server {
     Router router = Router.getInstance();
     PhysicalNode[][] groups = router.getGroupsNodes(serverId.getIp(), serverId.getPort());
 
-    for (int i = 0; i < groups.length; i++) {
-      PhysicalNode[] group = groups[i];
-      String groupId = router.getGroupID(group);
-      DataPartitionHolder dataPartitionHolder = new DataPartitionRaftHolder(groupId,
-          RaftUtils.getPeerIdArrayFrom(group), serverId, rpcServer, false);
-      dataPartitionHolder.init();
-      dataPartitionHolder.start();
-      dataPartitionHolderMap.put(groupId, dataPartitionHolder);
-      LOGGER.info("{} group has started", groupId);
-      Router.getInstance().showPhysicalNodes(groupId);
+    try {
+      for (int i = 0; i < groups.length; i++) {
+        PhysicalNode[] group = groups[i];
+        String groupId = router.getGroupID(group);
+        DataPartitionHolder dataPartitionHolder = new DataPartitionRaftHolder(groupId,
+            RaftUtils.getPeerIdArrayFrom(group), serverId, rpcServer, false);
+        dataPartitionHolder.init();
+        dataPartitionHolder.start();
+        dataPartitionHolderMap.put(groupId, dataPartitionHolder);
+        LOGGER.info("{} group has started", groupId);
+        Router.getInstance().showPhysicalNodes(groupId);
+      }
+    }catch (Exception e){
+      e.printStackTrace();
     }
 
     try {
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBAggregationIT.java b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBAggregationIT.java
index bf7c4da..201ddf6 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBAggregationIT.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBAggregationIT.java
@@ -182,11 +182,6 @@ public class IoTDBAggregationIT {
   public void remoteTest() throws SQLException {
     QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
     test();
-    try {
-      Thread.sleep(200);
-    } catch (InterruptedException e) {
-      e.printStackTrace();
-    }
   }
 
   @Test
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBAggregationLargeDataIT.java b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBAggregationLargeDataIT.java
new file mode 100644
index 0000000..973d3e7
--- /dev/null
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBAggregationLargeDataIT.java
@@ -0,0 +1,916 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.integration;
+
+import static org.apache.iotdb.cluster.integration.Constant.count;
+import static org.apache.iotdb.cluster.integration.Constant.first;
+import static org.apache.iotdb.cluster.integration.Constant.max_time;
+import static org.apache.iotdb.cluster.integration.Constant.max_value;
+import static org.apache.iotdb.cluster.integration.Constant.mean;
+import static org.apache.iotdb.cluster.integration.Constant.min_value;
+import static org.apache.iotdb.cluster.integration.Constant.sum;
+import static org.junit.Assert.fail;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.cluster.entity.Server;
+import org.apache.iotdb.cluster.integration.Constant;
+import org.apache.iotdb.cluster.utils.EnvironmentUtils;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
+import org.apache.iotdb.cluster.utils.hash.PhysicalNode;
+import org.apache.iotdb.jdbc.Config;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+
+public class IoTDBAggregationLargeDataIT {
+
+  private Server server;
+  private static final ClusterConfig CLUSTER_CONFIG = ClusterDescriptor.getInstance().getConfig();
+  private static final PhysicalNode localNode = new PhysicalNode(CLUSTER_CONFIG.getIp(),
+      CLUSTER_CONFIG.getPort());
+
+  private static final String TIMESTAMP_STR = "Time";
+  private final String d0s0 = "root.vehicle.d0.s0";
+  private final String d0s1 = "root.vehicle.d0.s1";
+  private final String d0s2 = "root.vehicle.d0.s2";
+  private final String d0s3 = "root.vehicle.d0.s3";
+  private final String d0s4 = "root.vehicle.d0.s4";
+
+  private static String[] createSql = new String[]{
+      "SET STORAGE GROUP TO root.vehicle",
+      "CREATE TIMESERIES root.vehicle.d1.s0 WITH DATATYPE=INT32, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d0.s2 WITH DATATYPE=FLOAT, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d0.s3 WITH DATATYPE=TEXT, ENCODING=PLAIN",
+      "CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=INT32, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d0.s1 WITH DATATYPE=INT64, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d0.s4 WITH DATATYPE=BOOLEAN, ENCODING=PLAIN",
+  };
+
+  private static String[] insertSql = new String[]{
+      "insert into root.vehicle.d0(timestamp,s0) values(1,101)",
+      "insert into root.vehicle.d0(timestamp,s0) values(2,198)",
+      "insert into root.vehicle.d0(timestamp,s0) values(100,99)",
+      "insert into root.vehicle.d0(timestamp,s0) values(101,99)",
+      "insert into root.vehicle.d0(timestamp,s0) values(102,80)",
+      "insert into root.vehicle.d0(timestamp,s0) values(103,99)",
+      "insert into root.vehicle.d0(timestamp,s0) values(104,90)",
+      "insert into root.vehicle.d0(timestamp,s0) values(105,99)",
+      "insert into root.vehicle.d0(timestamp,s0) values(106,99)",
+      "insert into root.vehicle.d0(timestamp,s0) values(2,10000)",
+      "insert into root.vehicle.d0(timestamp,s0) values(50,10000)",
+      "insert into root.vehicle.d0(timestamp,s0) values(1000,22222)",
+      "insert into root.vehicle.d0(timestamp,s0) values(106,199)",
+      "DELETE FROM root.vehicle.d0.s0 WHERE time < 104",
+
+      "insert into root.vehicle.d0(timestamp,s1) values(1,1101)",
+      "insert into root.vehicle.d0(timestamp,s1) values(2,198)",
+      "insert into root.vehicle.d0(timestamp,s1) values(100,199)",
+      "insert into root.vehicle.d0(timestamp,s1) values(101,199)",
+      "insert into root.vehicle.d0(timestamp,s1) values(102,180)",
+      "insert into root.vehicle.d0(timestamp,s1) values(103,199)",
+      "insert into root.vehicle.d0(timestamp,s1) values(104,190)",
+      "insert into root.vehicle.d0(timestamp,s1) values(105,199)",
+      "insert into root.vehicle.d0(timestamp,s1) values(2,40000)",
+      "insert into root.vehicle.d0(timestamp,s1) values(50,50000)",
+      "insert into root.vehicle.d0(timestamp,s1) values(1000,55555)",
+
+      "insert into root.vehicle.d0(timestamp,s2) values(1000,55555)",
+      "insert into root.vehicle.d0(timestamp,s2) values(2,2.22)",
+      "insert into root.vehicle.d0(timestamp,s2) values(3,3.33)",
+      "insert into root.vehicle.d0(timestamp,s2) values(4,4.44)",
+      "insert into root.vehicle.d0(timestamp,s2) values(102,10.00)",
+      "insert into root.vehicle.d0(timestamp,s2) values(105,11.11)",
+      "insert into root.vehicle.d0(timestamp,s2) values(1000,1000.11)",
+
+      "insert into root.vehicle.d0(timestamp,s3) values(60,'aaaaa')",
+      "insert into root.vehicle.d0(timestamp,s3) values(70,'bbbbb')",
+      "insert into root.vehicle.d0(timestamp,s3) values(80,'ccccc')",
+      "insert into root.vehicle.d0(timestamp,s3) values(101,'ddddd')",
+      "insert into root.vehicle.d0(timestamp,s3) values(102,'fffff')",
+
+      "insert into root.vehicle.d1(timestamp,s0) values(1,999)",
+      "insert into root.vehicle.d1(timestamp,s0) values(1000,888)",
+
+      "insert into root.vehicle.d0(timestamp,s4) values(100, false)",
+      "insert into root.vehicle.d0(timestamp,s4) values(100, true)",
+  };
+
+  @Before
+  public void setUp() throws Exception {
+    EnvironmentUtils.closeStatMonitor();
+    EnvironmentUtils.closeMemControl();
+    CLUSTER_CONFIG.createAllPath();
+    server = Server.getInstance();
+
+    server.start();
+    EnvironmentUtils.envSetUp();
+    Class.forName(Config.JDBC_DRIVER_NAME);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    server.stop();
+    QPExecutorUtils.setLocalNodeAddr(localNode.getIp(), localNode.getPort());
+    EnvironmentUtils.cleanEnv();
+  }
+
+
+  @Test
+  public void test() throws ClassNotFoundException, SQLException {
+    insertSQL();
+
+    lastAggreWithSingleFilterTest();
+    meanAggreWithSingleFilterTest();
+    sumAggreWithSingleFilterTest();
+    firstAggreWithSingleFilterTest();
+    countAggreWithSingleFilterTest();
+    minMaxTimeAggreWithSingleFilterTest();
+    minValueAggreWithSingleFilterTest();
+    maxValueAggreWithSingleFilterTest();
+
+    lastAggreWithMultiFilterTest();
+    countAggreWithMultiFilterTest();
+    minTimeAggreWithMultiFilterTest();
+    maxTimeAggreWithMultiFilterTest();
+    minValueAggreWithMultiFilterTest();
+    maxValueAggreWithMultiFilterTest();
+    meanAggreWithMultiFilterTest();
+    sumAggreWithMultiFilterTest();
+    firstAggreWithMultiFilterTest();
+  }
+
+  @Test
+  @Ignore
+  public void remoteTest() throws ClassNotFoundException, SQLException {
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    insertSQL();
+
+    lastAggreWithSingleFilterTest();
+    meanAggreWithSingleFilterTest();
+    sumAggreWithSingleFilterTest();
+    firstAggreWithSingleFilterTest();
+    countAggreWithSingleFilterTest();
+    minMaxTimeAggreWithSingleFilterTest();
+    minValueAggreWithSingleFilterTest();
+    maxValueAggreWithSingleFilterTest();
+
+    lastAggreWithMultiFilterTest();
+    countAggreWithMultiFilterTest();
+    minTimeAggreWithMultiFilterTest();
+    maxTimeAggreWithMultiFilterTest();
+    minValueAggreWithMultiFilterTest();
+    maxValueAggreWithMultiFilterTest();
+//    meanAggreWithMultiFilterTest();
+    sumAggreWithMultiFilterTest();
+    firstAggreWithMultiFilterTest();
+  }
+
+  private void lastAggreWithMultiFilterTest() throws SQLException {
+    String[] retArray = new String[]{
+        "0,9,39,63.0,E,true"
+    };
+
+    Connection connection = null;
+    try {
+      connection = DriverManager.
+          getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute(
+          "select last(s0),last(s1),last(s2),last(s3),last(s4)" +
+              " from root.vehicle.d0 where s1 >= 0");
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," +
+            resultSet.getString(Constant.last(d0s0)) + "," + resultSet
+            .getString(Constant.last(d0s1))
+            + "," + resultSet.getString(Constant.last(d0s2)) + "," +
+            resultSet.getString(Constant.last(d0s3)) + "," + resultSet
+            .getString(Constant.last(d0s4));
+        Assert.assertEquals(ans, retArray[cnt]);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    } finally {
+      if (connection != null) {
+        connection.close();
+      }
+    }
+  }
+
+  private void lastAggreWithSingleFilterTest() throws SQLException {
+    String[] retArray = new String[]{
+        "0,9,39,63.0"
+    };
+
+    Connection connection = null;
+    try {
+      connection = DriverManager.
+          getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute("select last(s0),last(s1),last(s2)" +
+          " from root.vehicle.d0 where s1 >= 0");
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," +
+            resultSet.getString(Constant.last(d0s0)) + ","
+            + resultSet.getString(Constant.last(d0s1)) + "," +
+            resultSet.getString(Constant.last(d0s2));
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    } finally {
+      if (connection != null) {
+        connection.close();
+      }
+    }
+  }
+
+  private void sumAggreWithSingleFilterTest() throws SQLException {
+    String[] retArray = new String[]{
+        "0,55061.0,156752.0,20254"
+    };
+
+    Connection connection = null;
+    try {
+      connection = DriverManager.
+          getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute("select sum(s0),sum(s1),sum(s2)" +
+          " from root.vehicle.d0 where s1 >= 0");
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(sum(d0s0)) + ","
+            + resultSet.getString(sum(d0s1)) + "," + Math
+            .round(resultSet.getDouble(Constant.sum(d0s2)));
+        Assert.assertEquals(ans, retArray[cnt]);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    } finally {
+      if (connection != null) {
+        connection.close();
+      }
+    }
+  }
+
+  private void firstAggreWithSingleFilterTest() throws SQLException {
+    String[] retArray = new String[]{
+        "0,90,1101,2.22,ddddd,true"
+    };
+
+    Connection connection = null;
+    try {
+      connection = DriverManager.
+          getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute("select first(s0),first(s1),first(s2),first(s3),"
+          + "first(s4) from root.vehicle.d0 where s1 >= 0");
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," +
+            resultSet.getString(Constant.first(d0s0)) + ","
+            + resultSet.getString(Constant.first(d0s1)) + "," +
+            resultSet.getString(Constant.first(d0s2))
+            + "," + resultSet.getString(Constant.first(d0s3)) + "," +
+            resultSet.getString(Constant.first(d0s4));
+        Assert.assertEquals(ans, retArray[cnt]);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    } finally {
+      if (connection != null) {
+        connection.close();
+      }
+    }
+  }
+
+  private void meanAggreWithSingleFilterTest() throws SQLException {
+    String[] retArray = new String[]{
+        "0,75,212,28"
+    };
+    Connection connection = null;
+    try {
+      connection = DriverManager.
+          getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute(
+          "select mean(s0),mean(s1),mean(s2) from root.vehicle.d0 where s1 >= 0");
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans =
+            resultSet.getString(TIMESTAMP_STR) + "," + Math.round(resultSet.getDouble(mean(d0s0)))
+                + "," + Math.round(resultSet.getDouble(mean(d0s1))) + "," +
+                Math.round(resultSet.getDouble(mean(d0s2)));
+        //System.out.println("!!!!!============ " + ans);
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    } finally {
+      if (connection != null) {
+        connection.close();
+      }
+    }
+  }
+
+  private void countAggreWithSingleFilterTest() throws SQLException {
+    String[] retArray = new String[]{
+        "0,733,740,734"
+    };
+
+    Connection connection = null;
+    try {
+      connection = DriverManager.
+          getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute(
+          "select count(s0),count(s1),count(s2) from root.vehicle.d0 where s1 >= 0");
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(count(d0s0))
+            + "," + resultSet.getString(count(d0s1)) + "," + resultSet.getString(count(d0s2));
+        //System.out.println("!!!!!============ " + ans);
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    } finally {
+      if (connection != null) {
+        connection.close();
+      }
+    }
+  }
+
+  private void minMaxTimeAggreWithSingleFilterTest() throws SQLException {
+    String[] retArray = new String[]{
+        "0,104,1,2,101,100"
+    };
+
+    Connection connection = null;
+    try {
+      connection = DriverManager.
+          getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute(
+          "select min_time(s0),min_time(s1),min_time(s2),min_time(s3),min_time(s4)" +
+              " from root.vehicle.d0 where s1 >= 0");
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," +
+            resultSet.getString(Constant.min_time(d0s0)) + "," +
+            resultSet.getString(Constant.min_time(d0s1)) + "," +
+            resultSet.getString(Constant.min_time(d0s2)) +
+            "," + resultSet.getString(Constant.min_time(d0s3)) +
+            "," + resultSet.getString(Constant.min_time(d0s4));
+        Assert.assertEquals(ans, retArray[cnt]);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+
+      retArray = new String[]{
+          "0,3999,3999,3999,3599,100"
+      };
+      statement = connection.createStatement();
+      hasResultSet = statement.execute("select max_time(s0),max_time(s1),max_time(s2),"
+          + "max_time(s3),max_time(s4) from root.vehicle.d0 " +
+          "where s1 < 50000 and s1 != 100");
+      Assert.assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(max_time(d0s0))
+            + "," + resultSet.getString(max_time(d0s1)) + "," + resultSet.getString(max_time(d0s2))
+            + "," + resultSet.getString(max_time(d0s3)) + "," + resultSet.getString(max_time(d0s4));
+        Assert.assertEquals(ans, retArray[cnt]);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    } finally {
+      if (connection != null) {
+        connection.close();
+      }
+    }
+  }
+
+  private void minValueAggreWithSingleFilterTest() throws ClassNotFoundException, SQLException {
+    String[] retArray = new String[]{
+        "0,0,0,0.0,B,true"
+    };
+
+    Connection connection = null;
+    try {
+      connection = DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute("select min_value(s0),min_value(s1),min_value(s2),"
+          + "min_value(s3),min_value(s4) from root.vehicle.d0 " +
+          "where s1 < 50000 and s1 != 100");
+
+      if (hasResultSet) {
+        ResultSet resultSet = statement.getResultSet();
+        int cnt = 0;
+        while (resultSet.next()) {
+          String ans =
+              resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(min_value(d0s0))
+                  + "," + resultSet.getString(min_value(d0s1)) + "," + resultSet
+                  .getString(min_value(d0s2))
+                  + "," + resultSet.getString(min_value(d0s3)) + "," + resultSet
+                  .getString(min_value(d0s4));
+          //System.out.println("============ " + ans);
+          Assert.assertEquals(ans, retArray[cnt]);
+          cnt++;
+        }
+        Assert.assertEquals(1, cnt);
+      }
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    } finally {
+      if (connection != null) {
+        connection.close();
+      }
+    }
+  }
+
+  private void maxValueAggreWithSingleFilterTest() throws ClassNotFoundException, SQLException {
+    String[] retArray = new String[]{
+        "0,99,40000,122.0,fffff,true"
+    };
+
+    Connection connection = null;
+    try {
+      connection = DriverManager.
+          getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute("select max_value(s0),max_value(s1),max_value(s2),"
+          + "max_value(s3),max_value(s4) from root.vehicle.d0 " +
+          "where s1 < 50000 and s1 != 100");
+
+      if (hasResultSet) {
+        ResultSet resultSet = statement.getResultSet();
+        int cnt = 0;
+        while (resultSet.next()) {
+          String ans =
+              resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(max_value(d0s0))
+                  + "," + resultSet.getString(max_value(d0s1)) + "," + resultSet
+                  .getString(max_value(d0s2))
+                  + "," + resultSet.getString(max_value(d0s3)) + "," + resultSet
+                  .getString(max_value(d0s4));
+          //System.out.println("============ " + ans);
+          Assert.assertEquals(ans, retArray[cnt]);
+          cnt++;
+        }
+        Assert.assertEquals(1, cnt);
+      }
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    } finally {
+      if (connection != null) {
+        connection.close();
+      }
+    }
+  }
+
+  private void meanAggreWithMultiFilterTest() throws ClassNotFoundException, SQLException {
+    String[] retArray = new String[]{
+        "0,55061.0,733,75,212,28"
+    };
+
+    try(Connection connection = DriverManager.
+          getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root")){
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute("select sum(s0),count(s0),mean(s0),mean(s1),"
+          + "mean(s2) from root.vehicle.d0 " +
+          "where s1 >= 0 or s2 < 10");
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans =
+            resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(sum(d0s0)) + "," +
+                resultSet.getString(count(d0s0)) + "," + Math.round(resultSet.getDouble(mean(d0s0)))
+                + "," +
+                Math.round(resultSet.getDouble(mean(d0s1))) + "," + Math
+                .round(resultSet.getDouble(mean(d0s2)));
+        System.out.println("!!!!!============ " + ans);
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    }
+  }
+
+  private void sumAggreWithMultiFilterTest() throws ClassNotFoundException, SQLException {
+    String[] retArray = new String[]{
+        "0,55061.0,156752.0,20262"
+    };
+
+    Connection connection = null;
+    try {
+      connection = DriverManager.
+          getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute("select sum(s0),sum(s1),sum(s2) from root.vehicle.d0"
+          + " where s1 >= 0 or s2 < 10");
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(sum(d0s0))
+            + "," + resultSet.getString(sum(d0s1)) + "," + Math
+            .round(resultSet.getDouble(sum(d0s2)));
+        //String ans = resultSet.getString(sum(d0s3));
+        //System.out.println("!!!!!============ " + ans);
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    } finally {
+      if (connection != null) {
+        connection.close();
+      }
+    }
+  }
+
+  private void firstAggreWithMultiFilterTest() throws ClassNotFoundException, SQLException {
+    String[] retArray = new String[]{
+        "0,90,1101,2.22,ddddd,true"
+    };
+
+    Connection connection = null;
+    try {
+      connection = DriverManager.
+          getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute("select first(s0),first(s1),first(s2),first(s3),"
+          + "first(s4) from root.vehicle.d0 where s1 >= 0 or s2 < 10");
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(first(d0s0))
+            + "," + resultSet.getString(first(d0s1)) + "," + resultSet.getString(first(d0s2))
+            + "," + resultSet.getString(first(d0s3)) + "," + resultSet.getString(first(d0s4));
+        //String ans = resultSet.getString(first(d0s3));
+        //System.out.println("!!!!!============ " + ans);
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    } finally {
+      if (connection != null) {
+        connection.close();
+      }
+    }
+  }
+
+  private void countAggreWithMultiFilterTest() throws ClassNotFoundException, SQLException {
+    String[] retArray = new String[]{
+        "0,733,740,736,482,1"
+    };
+
+    Connection connection = null;
+    try {
+      connection = DriverManager.
+          getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute("select count(s0),count(s1),count(s2),count(s3),"
+          + "count(s4) from root.vehicle.d0 where s1 >= 0 or s2 < 10");
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(count(d0s0))
+            + "," + resultSet.getString(count(d0s1)) + "," + resultSet.getString(count(d0s2))
+            + "," + resultSet.getString(count(d0s3)) + "," + resultSet.getString(count(d0s4));
+        //String ans = resultSet.getString(count(d0s3));
+        //System.out.println("!!!!!============ " + ans);
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    } finally {
+      if (connection != null) {
+        connection.close();
+      }
+    }
+  }
+
+  private void minTimeAggreWithMultiFilterTest() throws ClassNotFoundException, SQLException {
+    String[] retArray = new String[]{
+        "0,104,1,2,101,100"
+    };
+
+    Connection connection = null;
+    try {
+      connection = DriverManager.
+          getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute("select min_time(s0),min_time(s1),min_time(s2),"
+          + "min_time(s3),min_time(s4) from root.vehicle.d0 where s1 >= 0");
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," +
+            resultSet.getString(Constant.min_time(d0s0)) + "," +
+            resultSet.getString(Constant.min_time(d0s1)) + "," +
+            resultSet.getString(Constant.min_time(d0s2)) + "," +
+            resultSet.getString(Constant.min_time(d0s3)) + "," +
+            resultSet.getString(Constant.min_time(d0s4));
+        // System.out.println("============ " + ans);
+        Assert.assertEquals(ans, retArray[cnt]);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    } finally {
+      if (connection != null) {
+        connection.close();
+      }
+    }
+  }
+
+  private void maxTimeAggreWithMultiFilterTest() throws ClassNotFoundException, SQLException {
+    String[] retArray = new String[]{
+        "0,3999,3999,3999,3599,100"
+    };
+
+    Connection connection = null;
+    try {
+      connection = DriverManager.
+          getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute("select max_time(s0),max_time(s1),max_time(s2),"
+          + "max_time(s3),max_time(s4) from root.vehicle.d0 " +
+          "where s1 < 50000 and s1 != 100");
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(max_time(d0s0))
+            + "," + resultSet.getString(max_time(d0s1)) + "," + resultSet.getString(max_time(d0s2))
+            + "," + resultSet.getString(max_time(d0s3)) + "," + resultSet.getString(max_time(d0s4));
+        //System.out.println("============ " + ans);
+        Assert.assertEquals(ans, retArray[cnt]);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    } finally {
+      if (connection != null) {
+        connection.close();
+      }
+    }
+  }
+
+  private void minValueAggreWithMultiFilterTest() throws ClassNotFoundException, SQLException {
+    String[] retArray = new String[]{
+        "0,0,0,0.0,B,true"
+    };
+
+    Connection connection = null;
+    try {
+      connection = DriverManager.
+          getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute("select min_value(s0),min_value(s1),min_value(s2),"
+          + "min_value(s3),min_value(s4) from root.vehicle.d0 " +
+          "where s1 < 50000 and s1 != 100");
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(min_value(d0s0))
+            + "," + resultSet.getString(min_value(d0s1)) + "," + resultSet
+            .getString(min_value(d0s2))
+            + "," + resultSet.getString(min_value(d0s3)) + "," + resultSet
+            .getString(min_value(d0s4));
+        //System.out.println("============ " + ans);
+        Assert.assertEquals(ans, retArray[cnt]);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    } finally {
+      if (connection != null) {
+        connection.close();
+      }
+    }
+  }
+
+  private void maxValueAggreWithMultiFilterTest() throws ClassNotFoundException, SQLException {
+    String[] retArray = new String[]{
+        "0,99,40000,122.0,fffff,true"
+    };
+
+    Connection connection = null;
+    try {
+      connection = DriverManager.
+          getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute("select max_value(s0),max_value(s1),max_value(s2),"
+          + "max_value(s3),max_value(s4) from root.vehicle.d0 where s1 < 50000 and s1 != 100");
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(max_value(d0s0))
+            + "," + resultSet.getString(max_value(d0s1)) + "," + resultSet
+            .getString(max_value(d0s2))
+            + "," + resultSet.getString(max_value(d0s3)) + "," + resultSet
+            .getString(max_value(d0s4));
+        Assert.assertEquals(ans, retArray[cnt]);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    } finally {
+      if (connection != null) {
+        connection.close();
+      }
+    }
+  }
+
+  private static String[] stringValue = new String[]{"A", "B", "C", "D", "E"};
+
+  private static void insertSQL() throws SQLException {
+    Connection connection = null;
+    double d0s0sum = 0.0, d0s1sum = 0.0, d0s2sum = 0.0;
+    int cnt = 0;
+    try {
+      connection = DriverManager.
+          getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+      Statement statement = connection.createStatement();
+      for (String sql : createSql) {
+        statement.execute(sql);
+      }
+
+      // insert large amount of data
+      for (int time = 3000; time < 3600; time++) {
+        if (time % 5 == 0) {
+          continue;
+        }
+
+        String sql = String.format("insert into root.vehicle.d0(timestamp,s0) values(%s,%s)",
+            time, time % 100);
+        statement.execute(sql);
+        sql = String.format("insert into root.vehicle.d0(timestamp,s1) values(%s,%s)",
+            time, time % 17);
+        statement.execute(sql);
+        sql = String.format("insert into root.vehicle.d0(timestamp,s2) values(%s,%s)",
+            time, time % 22);
+        statement.execute(sql);
+        sql = String.format("insert into root.vehicle.d0(timestamp,s3) values(%s,'%s')",
+            time, stringValue[time % 5]);
+        statement.execute(sql);
+        cnt++;
+        d0s0sum += time % 100;
+        d0s1sum += time % 17;
+        d0s2sum += time % 22;
+      }
+
+      statement.execute("flush");
+      System.out.println("cnt = " + cnt);
+
+      // insert large amount of data
+      for (int time = 3700; time < 4000; time++) {
+        if (time % 6 == 0) {
+          continue;
+        }
+
+        String sql = String.format("insert into root.vehicle.d0(timestamp,s0) values(%s,%s)",
+            time, time % 70);
+        statement.execute(sql);
+        sql = String.format("insert into root.vehicle.d0(timestamp,s1) values(%s,%s)",
+            time, time % 40);
+        statement.execute(sql);
+        sql = String.format("insert into root.vehicle.d0(timestamp,s2) values(%s,%s)",
+            time, time % 123);
+        statement.execute(sql);
+
+        cnt++;
+        d0s0sum += time % 70;
+        d0s1sum += time % 40;
+        d0s2sum += time % 123;
+      }
+
+      statement.execute("merge");
+
+      System.out.println("large insert cnt = " + cnt);
+      System.out
+          .println("d0s0sum = " + d0s0sum + "; d0s1sum = " + d0s1sum + "; d0s2sum = " + d0s2sum);
+      for (String sql : insertSql) {
+        statement.execute(sql);
+      }
+
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    } finally {
+      if (connection != null) {
+        connection.close();
+      }
+    }
+  }
+}
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBAggregationSmallDataIT.java b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBAggregationSmallDataIT.java
new file mode 100644
index 0000000..02dc01f
--- /dev/null
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBAggregationSmallDataIT.java
@@ -0,0 +1,760 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.integration;
+
+import static org.apache.iotdb.cluster.integration.Constant.count;
+import static org.apache.iotdb.cluster.integration.Constant.first;
+import static org.apache.iotdb.cluster.integration.Constant.last;
+import static org.apache.iotdb.cluster.integration.Constant.max_time;
+import static org.apache.iotdb.cluster.integration.Constant.max_value;
+import static org.apache.iotdb.cluster.integration.Constant.mean;
+import static org.apache.iotdb.cluster.integration.Constant.min_time;
+import static org.apache.iotdb.cluster.integration.Constant.min_value;
+import static org.apache.iotdb.cluster.integration.Constant.sum;
+import static org.apache.iotdb.cluster.utils.Utils.insertData;
+import static org.junit.Assert.fail;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.cluster.entity.Server;
+import org.apache.iotdb.cluster.utils.EnvironmentUtils;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
+import org.apache.iotdb.cluster.utils.hash.PhysicalNode;
+import org.apache.iotdb.jdbc.Config;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+
+/**
+ * Multiple aggregation with filter test.
+ */
+public class IoTDBAggregationSmallDataIT {
+
+  private Server server;
+  private static final ClusterConfig CLUSTER_CONFIG = ClusterDescriptor.getInstance().getConfig();
+  private static final PhysicalNode localNode = new PhysicalNode(CLUSTER_CONFIG.getIp(),
+      CLUSTER_CONFIG.getPort());
+
+  private static final String TIMESTAMP_STR = "Time";
+  private final String d0s0 = "root.vehicle.d0.s0";
+  private final String d0s1 = "root.vehicle.d0.s1";
+  private final String d0s2 = "root.vehicle.d0.s2";
+  private final String d0s3 = "root.vehicle.d0.s3";
+  private final String d0s4 = "root.vehicle.d0.s4";
+  private final String d1s0 = "root.vehicle.d1.s0";
+  private final String d1s1 = "root.vehicle.d1.s1";
+
+  private static String[] createSqls = new String[]{
+      "SET STORAGE GROUP TO root.vehicle",
+      "CREATE TIMESERIES root.vehicle.d1.s0 WITH DATATYPE=INT32, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d1.s1 WITH DATATYPE=INT64, ENCODING=RLE",
+
+      "CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=INT32, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d0.s1 WITH DATATYPE=INT64, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d0.s2 WITH DATATYPE=FLOAT, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d0.s3 WITH DATATYPE=TEXT, ENCODING=PLAIN",
+      "CREATE TIMESERIES root.vehicle.d0.s4 WITH DATATYPE=BOOLEAN, ENCODING=PLAIN"};
+  private static String[] insertSqls = new String[]{
+
+      "insert into root.vehicle.d0(timestamp,s0) values(1,101)",
+      "insert into root.vehicle.d0(timestamp,s0) values(2,198)",
+      "insert into root.vehicle.d0(timestamp,s0) values(100,99)",
+      "insert into root.vehicle.d0(timestamp,s0) values(101,99)",
+      "insert into root.vehicle.d0(timestamp,s0) values(102,80)",
+      "insert into root.vehicle.d0(timestamp,s0) values(103,99)",
+      "insert into root.vehicle.d0(timestamp,s0) values(104,90)",
+      "insert into root.vehicle.d0(timestamp,s0) values(105,99)",
+      "insert into root.vehicle.d0(timestamp,s0) values(106,99)",
+      "insert into root.vehicle.d0(timestamp,s0) values(2,10000)",
+      "insert into root.vehicle.d0(timestamp,s0) values(50,10000)",
+      "insert into root.vehicle.d0(timestamp,s0) values(1000,22222)",
+      "insert into root.vehicle.d0(timestamp,s0) values(106,199)",
+      "DELETE FROM root.vehicle.d0.s0 WHERE time < 104",
+
+      "insert into root.vehicle.d0(timestamp,s1) values(1,1101)",
+      "insert into root.vehicle.d0(timestamp,s1) values(2,198)",
+      "insert into root.vehicle.d0(timestamp,s1) values(100,199)",
+      "insert into root.vehicle.d0(timestamp,s1) values(101,199)",
+      "insert into root.vehicle.d0(timestamp,s1) values(102,180)",
+      "insert into root.vehicle.d0(timestamp,s1) values(103,199)",
+      "insert into root.vehicle.d0(timestamp,s1) values(104,190)",
+      "insert into root.vehicle.d0(timestamp,s1) values(105,199)",
+      "insert into root.vehicle.d0(timestamp,s1) values(2,40000)",
+      "insert into root.vehicle.d0(timestamp,s1) values(50,50000)",
+      "insert into root.vehicle.d0(timestamp,s1) values(1000,55555)",
+
+      "insert into root.vehicle.d0(timestamp,s2) values(1000,55555)",
+      "insert into root.vehicle.d0(timestamp,s2) values(2,2.22)",
+      "insert into root.vehicle.d0(timestamp,s2) values(3,3.33)",
+      "insert into root.vehicle.d0(timestamp,s2) values(4,4.44)",
+      "insert into root.vehicle.d0(timestamp,s2) values(102,10.00)",
+      "insert into root.vehicle.d0(timestamp,s2) values(105,11.11)",
+      "insert into root.vehicle.d0(timestamp,s2) values(1000,1000.11)",
+
+      "insert into root.vehicle.d0(timestamp,s3) values(60,'aaaaa')",
+      "insert into root.vehicle.d0(timestamp,s3) values(70,'bbbbb')",
+      "insert into root.vehicle.d0(timestamp,s3) values(80,'ccccc')",
+      "insert into root.vehicle.d0(timestamp,s3) values(101,'ddddd')",
+      "insert into root.vehicle.d0(timestamp,s3) values(102,'fffff')",
+
+      "insert into root.vehicle.d1(timestamp,s0) values(1,999)",
+      "insert into root.vehicle.d1(timestamp,s0) values(1000,888)",
+
+      "insert into root.vehicle.d0(timestamp,s1) values(2000-01-01T08:00:00+08:00, 100)",
+      "insert into root.vehicle.d0(timestamp,s3) values(2000-01-01T08:00:00+08:00, 'good')",
+
+      "insert into root.vehicle.d0(timestamp,s4) values(100, false)",
+      "insert into root.vehicle.d0(timestamp,s4) values(100, true)"
+  };
+
+  @Before
+  public void setUp() throws Exception {
+    EnvironmentUtils.closeStatMonitor();
+    EnvironmentUtils.closeMemControl();
+    CLUSTER_CONFIG.createAllPath();
+    server = Server.getInstance();
+    server.start();
+    EnvironmentUtils.envSetUp();
+    Class.forName(Config.JDBC_DRIVER_NAME);
+    insertSQL();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    server.stop();
+    QPExecutorUtils.setLocalNodeAddr(localNode.getIp(), localNode.getPort());
+    EnvironmentUtils.cleanEnv();
+  }
+
+  @Test
+  public void countOnlyTimeFilterTest() throws ClassNotFoundException, SQLException {
+    String[] retArray = new String[]{
+        "0,3,7,4,5,1"
+    };
+
+    try (Connection connection = DriverManager.getConnection
+        (Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root")) {
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute(
+          "select count(s0),count(s1),count(s2),count(s3),count(s4) " +
+              "from root.vehicle.d0 where time >= 3 and time <= 106");
+
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(count(d0s0))
+            + "," + resultSet.getString(count(d0s1)) + "," + resultSet.getString(count(d0s2))
+            + "," + resultSet.getString(count(d0s3)) + "," + resultSet.getString(count(d0s4));
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    }
+  }
+
+  @Test
+  @Ignore
+  public void countOnlyTimeFilterRemoteTest() throws ClassNotFoundException, SQLException {
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    countOnlyTimeFilterTest();
+  }
+
+  @Test
+  public void functionsNoFilterTest() throws ClassNotFoundException, SQLException {
+    String[] retArray = new String[]{
+        "0,4,0,6,1",
+        "0,22222,null,good",
+        "0,90,null,aaaaa",
+        "0,22222,null,good",
+        "0,22610.0,0.0"
+    };
+
+    try (Connection connection = DriverManager.getConnection
+        (Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root")) {
+
+      //select count(d0.s0),count(d1.s1),count(d0.s3),count(d0.s4) from root.vehicle
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute(
+          "select count(d0.s0),count(d1.s1),count(d0.s3),count(d0.s4) from root.vehicle");
+
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(count(d0s0))
+            + "," + resultSet.getString(count(d1s1)) + "," + resultSet.getString(count(d0s3))
+            + "," + resultSet.getString(count(d0s4));
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+
+      //select max_value(d0.s0),max_value(d1.s1),max_value(d0.s3) from root.vehicle
+      statement = connection.createStatement();
+      hasResultSet = statement.execute(
+          "select max_value(d0.s0),max_value(d1.s1),max_value(d0.s3) from root.vehicle");
+      resultSet = statement.getResultSet();
+      Assert.assertTrue(hasResultSet);
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(max_value(d0s0))
+            + "," + resultSet.getString(max_value(d1s1)) + ","
+            + resultSet.getString(max_value(d0s3));
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(2, cnt);
+      statement.close();
+
+      //select first(d0.s0),first(d1.s1),first(d0.s3) from root.vehicle
+      statement = connection.createStatement();
+      hasResultSet = statement.execute(
+          "select first(d0.s0),first(d1.s1),first(d0.s3) from root.vehicle");
+      resultSet = statement.getResultSet();
+      Assert.assertTrue(hasResultSet);
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(first(d0s0))
+            + "," + resultSet.getString(first(d1s1)) + "," + resultSet.getString(first(d0s3));
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(3, cnt);
+      statement.close();
+
+      //select last(d0.s0),last(d1.s1),last(d0.s3) from root.vehicle
+      statement = connection.createStatement();
+      hasResultSet = statement.execute(
+          "select last(d0.s0),last(d1.s1),last(d0.s3) from root.vehicle");
+      resultSet = statement.getResultSet();
+      Assert.assertTrue(hasResultSet);
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(last(d0s0))
+            + "," + resultSet.getString(last(d1s1)) + "," + resultSet.getString(last(d0s3));
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(4, cnt);
+      statement.close();
+
+      //select sum(d0.s0),sum(d1.s1),sum(d0.s3) from root.vehicle
+      statement = connection.createStatement();
+      hasResultSet = statement.execute("select sum(d0.s0),sum(d1.s1) from root.vehicle");
+      resultSet = statement.getResultSet();
+      Assert.assertTrue(hasResultSet);
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(sum(d0s0))
+            + "," + resultSet.getString(sum(d1s1));
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(5, cnt);
+      statement.close();
+
+
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    }
+  }
+
+  @Test
+  @Ignore
+  public void functionsNoFilterRemoteTest() throws ClassNotFoundException, SQLException {
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    functionsNoFilterTest();
+  }
+
+  @Test
+  public void lastAggreWithSingleFilterTest() throws ClassNotFoundException, SQLException {
+    String[] retArray = new String[]{
+        "0,22222,55555"
+    };
+    try (Connection connection = DriverManager.getConnection
+        (Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root")) {
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute(
+          "select last(s0),last(s1) from root.vehicle.d0 where s2 >= 3.33");
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans =
+            resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(last(d0s0)) + ","
+                + resultSet.getString(last(d0s1));
+        //System.out.println("!!!!!============ " + ans);
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    }
+  }
+
+  @Test
+  @Ignore
+  public void lastAggreWithSingleFilterRemoteTest() throws ClassNotFoundException, SQLException {
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    lastAggreWithSingleFilterTest();
+  }
+
+  @Test
+  public void firstAggreWithSingleFilterTest() throws ClassNotFoundException, SQLException {
+    String[] retArray = new String[]{
+        "0,99,180"
+    };
+    try (Connection connection = DriverManager.getConnection
+        (Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root")) {
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute(
+          "select first(s0),first(s1) from root.vehicle.d0 where s2 >= 3.33");
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans =
+            resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(first(d0s0)) + ","
+                + resultSet.getString(first(d0s1));
+        //System.out.println("!!!!!============ " + ans);
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    }
+  }
+
+  @Test
+  @Ignore
+  public void firstAggreWithSingleFilterRemoteTest() throws ClassNotFoundException, SQLException {
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    firstAggreWithSingleFilterTest();
+  }
+
+  @Test
+  public void sumAggreWithSingleFilterTest() throws ClassNotFoundException, SQLException {
+    String[] retArray = new String[]{
+        "0,22321.0,55934.0,1029"
+    };
+    try (Connection connection = DriverManager.getConnection
+        (Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root")) {
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute(
+          "select sum(s0),sum(s1),sum(s2) from root.vehicle.d0 where s2 >= 3.33");
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(sum(d0s0))
+            + "," + resultSet.getString(sum(d0s1)) + "," + Math
+            .round(resultSet.getDouble(sum(d0s2)));
+        //System.out.println("!!!!!============ " + ans);
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    }
+  }
+
+  @Test
+  @Ignore
+  public void sumAggreWithSingleFilterRemoteTest() throws ClassNotFoundException, SQLException {
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    sumAggreWithSingleFilterTest();
+  }
+
+  @Test
+  public void meanAggreWithSingleFilterTest() throws ClassNotFoundException, SQLException {
+    String[] retArray = new String[]{
+        "0,11160.5,18645,206"
+    };
+    try (Connection connection = DriverManager.getConnection
+        (Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root")) {
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute(
+          "select mean(s0),mean(s1),mean(s2) from root.vehicle.d0 where s2 >= 3.33");
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(mean(d0s0))
+            + "," + Math.round(resultSet.getDouble(mean(d0s1))) + ","
+            + Math.round(resultSet.getDouble(mean(d0s2)));
+        //System.out.println("!!!!!============ " + ans);
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    }
+  }
+
+  @Test
+  @Ignore
+  public void meanAggreWithSingleFilterRemoteTest() throws ClassNotFoundException, SQLException {
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    meanAggreWithSingleFilterTest();
+  }
+
+  @Test
+  public void countAggreWithSingleFilterTest() throws ClassNotFoundException, SQLException {
+    String[] retArray = new String[]{
+        "0,2,3,5,1,0"
+    };
+
+    try (Connection connection = DriverManager.getConnection
+        (Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root")) {
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute("select count(s0),count(s1),count(s2),count(s3),"
+          + "count(s4) from root.vehicle.d0 where s2 >= 3.33");
+      // System.out.println(hasResultSet + "...");
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(count(d0s0))
+            + "," + resultSet.getString(count(d0s1)) + "," + resultSet.getString(count(d0s2))
+            + "," + resultSet.getString(count(d0s3)) + "," + resultSet.getString(count(d0s4));
+        // System.out.println("============ " + ans);
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    }
+  }
+
+  @Test
+  @Ignore
+  public void countAggreWithSingleFilterRemoteTest() throws ClassNotFoundException, SQLException {
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    countAggreWithSingleFilterTest();
+  }
+
+  @Test
+  public void minTimeAggreWithSingleFilterTest() throws ClassNotFoundException, SQLException {
+    String[] retArray = new String[]{
+        "0,104,1,2,101,100"
+    };
+
+    try (Connection connection = DriverManager.getConnection
+        (Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root")) {
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute("select min_time(s0),min_time(s1),min_time(s2)"
+          + ",min_time(s3),min_time(s4) from root.vehicle.d0 " +
+          "where s1 < 50000 and s1 != 100");
+
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(min_time(d0s0))
+            + "," + resultSet.getString(min_time(d0s1)) + "," + resultSet.getString(min_time(d0s2))
+            + "," + resultSet.getString(min_time(d0s3)) + "," + resultSet.getString(min_time(d0s4));
+        // System.out.println("============ " + ans);
+        Assert.assertEquals(ans, retArray[cnt]);
+        cnt++;
+        Assert.assertEquals(1, cnt);
+      }
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    }
+  }
+
+  @Test
+  @Ignore
+  public void minTimeAggreWithSingleFilterRemoteTest() throws ClassNotFoundException, SQLException {
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    minTimeAggreWithSingleFilterTest();
+  }
+
+  @Test
+  public void maxTimeAggreWithSingleFilterTest() throws ClassNotFoundException, SQLException {
+    String[] retArray = new String[]{
+        "0,105,105,105,102,100"
+    };
+
+    try (Connection connection = DriverManager.getConnection
+        (Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root")) {
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute("select max_time(s0),max_time(s1),max_time(s2)"
+          + ",max_time(s3),max_time(s4) from root.vehicle.d0 " +
+          "where s1 < 50000 and s1 != 100");
+
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(max_time(d0s0))
+            + "," + resultSet.getString(max_time(d0s1)) + "," + resultSet.getString(max_time(d0s2))
+            + "," + resultSet.getString(max_time(d0s3)) + "," + resultSet.getString(max_time(d0s4));
+        // System.out.println("============ " + ans);
+        Assert.assertEquals(ans, retArray[cnt]);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    }
+  }
+
+  @Test
+  @Ignore
+  public void maxTimeAggreWithSingleFilterRemoteTest() throws ClassNotFoundException, SQLException {
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    maxTimeAggreWithSingleFilterTest();
+  }
+
+  @Test
+  public void minValueAggreWithSingleFilterTest() throws ClassNotFoundException, SQLException {
+    String[] retArray = new String[]{
+        "0,90,180,2.22,ddddd,true"
+    };
+
+    try (Connection connection = DriverManager.getConnection
+        (Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root")) {
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute("select min_value(s0),min_value(s1),min_value(s2)"
+          + ",min_value(s3),min_value(s4) from root.vehicle.d0 " +
+          "where s1 < 50000 and s1 != 100");
+      Assert.assertTrue(hasResultSet);
+
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(min_value(d0s0))
+            + "," + resultSet.getString(min_value(d0s1)) +
+            "," + resultSet.getString(min_value(d0s2))
+            + "," + resultSet.getString(min_value(d0s3)) + ","
+            + resultSet.getString(min_value(d0s4));
+        // System.out.println("============ " + ans);
+        Assert.assertEquals(ans, retArray[cnt]);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    }
+  }
+
+  @Test
+  @Ignore
+  public void minValueAggreWithSingleFilterRemoteTest()
+      throws ClassNotFoundException, SQLException {
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    minValueAggreWithSingleFilterTest();
+  }
+
+  @Test
+  public void maxValueAggreWithSingleFilterTest() throws ClassNotFoundException, SQLException {
+    String[] retArray = new String[]{
+        "0,99,50000,11.11,fffff,true"
+    };
+
+    try (Connection connection = DriverManager.getConnection
+        (Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root")) {
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute("select max_value(s0),max_value(s1),max_value(s2),"
+          + "max_value(s3),max_value(s4) from root.vehicle.d0 " +
+          "where s1 < 50000 and s1 != 100");
+
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(max_value(d0s0))
+            + "," + resultSet.getString(max_value(d0s1)) + "," + resultSet
+            .getString(max_value(d0s2))
+            + "," + resultSet.getString(max_value(d0s3)) + "," + resultSet
+            .getString(max_value(d0s4));
+        //System.out.println("============ " + ans);
+        //Assert.assertEquals(ans, retArray[cnt]);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    }
+  }
+
+  @Test
+  @Ignore
+  public void maxValueAggreWithSingleFilterRemoteTest()
+      throws ClassNotFoundException, SQLException {
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    maxValueAggreWithSingleFilterTest();
+  }
+
+  @Test
+  public void countAggreWithMultiMultiFilterTest() {
+    String[] retArray = new String[]{
+        "0,2",
+    };
+
+    try (Connection connection = DriverManager.getConnection
+        (Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root")) {
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute(
+          "select count(s0) from root.vehicle.d0 where s2 >= 3.33");
+      // System.out.println(hasResultSet + "...");
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(count(d0s0));
+        //System.out.println("============ " + ans);
+        Assert.assertEquals(ans, retArray[cnt]);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    }
+  }
+
+  @Test
+  @Ignore
+  public void countAggreWithMultiMultiFilterRemoteTest()
+      throws ClassNotFoundException, SQLException {
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    countAggreWithMultiMultiFilterTest();
+  }
+
+  @Test
+  public void selectAllSQLTest() throws ClassNotFoundException, SQLException {
+    //d0s0,d0s1,d0s2,d0s3,d1s0
+    String[] retArray = new String[]{
+        "1,null,1101,null,null,999",
+        "2,null,40000,2.22,null,null",
+        "3,null,null,3.33,null,null",
+        "4,null,null,4.44,null,null",
+        "50,null,50000,null,null,null",
+        "60,null,null,null,aaaaa,null",
+        "70,null,null,null,bbbbb,null",
+        "80,null,null,null,ccccc,null",
+        "100,null,199,null,null,null",
+        "101,null,199,null,ddddd,null",
+        "102,null,180,10.0,fffff,null",
+        "103,null,199,null,null,null",
+        "104,90,190,null,null,null",
+        "105,99,199,11.11,null,null",
+        "106,199,null,null,null,null",
+        "1000,22222,55555,1000.11,null,888",
+        "946684800000,null,100,null,good,null"
+    };
+
+    try (Connection connection = DriverManager.getConnection
+        (Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root")) {
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute("select * from root");
+      // System.out.println(hasResultSet + "...");
+      if (hasResultSet) {
+        ResultSet resultSet = statement.getResultSet();
+        int cnt = 0;
+        while (resultSet.next()) {
+          String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(d0s0) + ","
+              + resultSet.getString(d0s1) + "," + resultSet.getString(d0s2) + "," +
+              resultSet.getString(d0s3) + "," + resultSet.getString(d1s0);
+          // System.out.println(ans);
+          Assert.assertEquals(ans, retArray[cnt]);
+          cnt++;
+        }
+        Assert.assertEquals(17, cnt);
+      }
+      statement.close();
+
+      retArray = new String[]{
+          "100,true"
+      };
+      statement = connection.createStatement();
+      hasResultSet = statement.execute("select s4 from root.vehicle.d0");
+      if (hasResultSet) {
+        ResultSet resultSet = statement.getResultSet();
+        int cnt = 0;
+        while (resultSet.next()) {
+          String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(d0s4);
+          Assert.assertEquals(ans, retArray[cnt]);
+          cnt++;
+        }
+        Assert.assertEquals(1, cnt);
+      }
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    }
+  }
+
+  @Test
+  @Ignore
+  public void selectAllSQLRemoteTest() throws ClassNotFoundException, SQLException {
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    selectAllSQLTest();
+  }
+
+  private static void insertSQL() {
+    try (Connection connection = DriverManager.getConnection
+        (Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root")) {
+      insertData(connection, createSqls, insertSqls);
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    }
+  }
+
+}
+
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBFillQueryIT.java b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBFillQueryIT.java
index ba8746d..f78c39a 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBFillQueryIT.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBFillQueryIT.java
@@ -110,7 +110,6 @@ public class IoTDBFillQueryIT {
 
   @Before
   public void setUp() throws Exception {
-    EnvironmentUtils.cleanEnv();
     EnvironmentUtils.closeStatMonitor();
     EnvironmentUtils.closeMemControl();
     CLUSTER_CONFIG.createAllPath();
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBMetadataFetchLocallyIT.java b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBMetadataFetchLocallyIT.java
index a007582..f662307 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBMetadataFetchLocallyIT.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBMetadataFetchLocallyIT.java
@@ -37,7 +37,6 @@ public class IoTDBMetadataFetchLocallyIT extends IoTDBMetadataFetchAbstract{
 
   @Before
   public void setUp() throws Exception {
-    EnvironmentUtils.cleanEnv();
     EnvironmentUtils.closeStatMonitor();
     EnvironmentUtils.closeMemControl();
     config.createAllPath();
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBQueryIT.java b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBQueryIT.java
index 90d4474..5be3cbc 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBQueryIT.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBQueryIT.java
@@ -126,7 +126,6 @@ public class IoTDBQueryIT {
 
   @Before
   public void setUp() throws Exception {
-    EnvironmentUtils.cleanEnv();
     EnvironmentUtils.closeStatMonitor();
     EnvironmentUtils.closeMemControl();
     CLUSTER_CONFIG.createAllPath();
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBQueryLargeDataIT.java b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBQueryLargeDataIT.java
index 926d8a7..b40de2d 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBQueryLargeDataIT.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBQueryLargeDataIT.java
@@ -43,7 +43,6 @@ import org.junit.Test;
 
 public class IoTDBQueryLargeDataIT {
 
-
   private Server server;
   private static final ClusterConfig CLUSTER_CONFIG = ClusterDescriptor.getInstance().getConfig();
   private static final PhysicalNode localNode = new PhysicalNode(CLUSTER_CONFIG.getIp(),
@@ -176,7 +175,6 @@ public class IoTDBQueryLargeDataIT {
 
   @Before
   public void setUp() throws Exception {
-    EnvironmentUtils.cleanEnv();
     EnvironmentUtils.closeStatMonitor();
     EnvironmentUtils.closeMemControl();
     CLUSTER_CONFIG.createAllPath();
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/utils/RaftUtilsTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/utils/RaftUtilsTest.java
index e7a4201..787bf47 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/utils/RaftUtilsTest.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/utils/RaftUtilsTest.java
@@ -60,15 +60,16 @@ public class RaftUtilsTest {
 
   private ClusterConfig config = ClusterDescriptor.getInstance().getConfig();
   private String[] ipListOld;
+  private int portOld;
   private int replicatorOld;
   private int numOfVirtualNodesOld;
-  private int PORT = 7777;
+  private int port = 7777;
   private String[] ipList = {
-      "192.168.130.4:" + PORT,
-      "192.168.130.5:" + PORT,
-      "192.168.130.2:" + PORT,
-      "192.168.130.1:" + PORT,
-      "192.168.130.3:" + PORT
+      "192.168.130.4:" + port,
+      "192.168.130.5:" + port,
+      "192.168.130.2:" + port,
+      "192.168.130.1:" + port,
+      "192.168.130.3:" + port
   };
   private int replicator = 3;
 
@@ -124,11 +125,12 @@ public class RaftUtilsTest {
     Mockito.doNothing().when(response).addResult(any(boolean.class));
     Mockito.doNothing().when(response).setErrorMsg(any(String.class));
     ipListOld = config.getNodes();
+    portOld = config.getPort();
     replicatorOld = config.getReplication();
     numOfVirtualNodesOld = config.getNumOfVirtualNodes();
 
     int numOfVirtualNodes = 2;
-    config.setPort(PORT);
+    config.setPort(port);
     config.setNodes(ipList);
     config.setReplication(replicator);
     config.setNumOfVirtualNodes(numOfVirtualNodes);
@@ -138,8 +140,9 @@ public class RaftUtilsTest {
   }
 
   @After
-  public void tearDown() throws Exception {
+  public void tearDown() {
     peerIds.clear();
+    config.setPort(portOld);
     config.setNodes(ipListOld);
     config.setReplication(replicatorOld);
     config.setNumOfVirtualNodes(numOfVirtualNodesOld);
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/utils/Utils.java b/cluster/src/test/java/org/apache/iotdb/cluster/utils/Utils.java
index f080a9d..0bb3c08 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/utils/Utils.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/utils/Utils.java
@@ -26,6 +26,11 @@ import java.sql.SQLException;
 import java.sql.Statement;
 
 public class Utils {
+
+  private Utils() {
+
+  }
+
   public static String getCurrentPath(String... command) throws IOException {
     ProcessBuilder builder = new ProcessBuilder(command);
     builder.redirectErrorStream(true);
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeManager.java b/iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeManager.java
index 516abdc..1abc553 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeManager.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeManager.java
@@ -551,6 +551,9 @@ public class FileNodeManager implements IStatistic, IService {
         try {
           fileNodeProcessor.delete(deviceId, measurementId, timestamp);
         } catch (IOException e) {
+          System.out.println(deviceId);
+          System.out.println(measurementId);
+          System.out.println(timestamp);
           throw new FileNodeManagerException(e);
         }
         // change the type of tsfile to overflowed


[incubator-iotdb] 03/11: fix a serve bug of filter serializable

Posted by lt...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

lta pushed a commit to branch cluster
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git

commit aa3aa0d943626e3864753dd93ff4585e6f60e3eb
Author: lta <li...@163.com>
AuthorDate: Fri May 17 15:08:50 2019 +0800

    fix a serve bug of filter serializable
---
 .../executor/ClusterAggregateEngineExecutor.java   | 140 ++++-
 .../ClusterRpcSingleQueryManager.java              |   4 +-
 .../querynode/ClusterLocalQueryManager.java        |   2 +-
 .../querynode/ClusterLocalSingleQueryManager.java  |  25 +-
 .../querynode/IClusterLocalQueryManager.java       |   4 +-
 .../querynode/IClusterLocalSingleQueryManager.java |   5 +-
 .../querynode/ClusterFilterSeriesBatchReader.java  |   8 +-
 .../cluster/query/utils/ClusterRpcReaderUtils.java |  28 +-
 .../query/utils/QueryPlanPartitionUtils.java       |   2 +-
 .../request/querydata/InitSeriesReaderRequest.java |  72 ++-
 .../apache/iotdb/cluster/integration/Constant.java | 100 ++++
 .../cluster/integration/IoTDBAggregationIT.java    | 640 +++++++++++++++++++++
 .../cluster/integration/IoTDBFillQueryIT.java      |   1 -
 .../db/query/executor/AggregateEngineExecutor.java |  14 +-
 .../timegenerator/AbstractNodeConstructor.java     |   3 -
 15 files changed, 996 insertions(+), 52 deletions(-)

diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterAggregateEngineExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterAggregateEngineExecutor.java
index b63b311..51113c9 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterAggregateEngineExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterAggregateEngineExecutor.java
@@ -20,10 +20,18 @@ package org.apache.iotdb.cluster.query.executor;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.query.factory.ClusterSeriesReaderFactory;
 import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterGroupEntity;
 import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
+import org.apache.iotdb.cluster.query.timegenerator.ClusterTimeGenerator;
 import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
 import org.apache.iotdb.db.exception.FileNodeManagerException;
 import org.apache.iotdb.db.exception.PathErrorException;
@@ -41,6 +49,7 @@ import org.apache.iotdb.db.query.executor.AggregateEngineExecutor;
 import org.apache.iotdb.db.query.factory.AggreFuncFactory;
 import org.apache.iotdb.db.query.factory.SeriesReaderFactory;
 import org.apache.iotdb.db.query.reader.IPointReader;
+import org.apache.iotdb.db.query.reader.merge.EngineReaderByTimeStamp;
 import org.apache.iotdb.db.query.reader.merge.PriorityMergeReader;
 import org.apache.iotdb.db.query.reader.sequence.SequenceDataReader;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
@@ -49,10 +58,16 @@ import org.apache.iotdb.tsfile.read.expression.IExpression;
 import org.apache.iotdb.tsfile.read.expression.impl.GlobalTimeExpression;
 import org.apache.iotdb.tsfile.read.filter.basic.Filter;
 import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
+import org.apache.iotdb.tsfile.read.query.timegenerator.TimeGenerator;
 
+/**
+ * Handle aggregation query and construct dataset in cluster
+ */
 public class ClusterAggregateEngineExecutor extends AggregateEngineExecutor {
 
   private ClusterRpcSingleQueryManager queryManager;
+  private static final ClusterConfig CLUSTER_CONF = ClusterDescriptor.getInstance().getConfig();
+
 
   public ClusterAggregateEngineExecutor(List<Path> selectedSeries, List<String> aggres,
       IExpression expression, ClusterRpcSingleQueryManager queryManager) {
@@ -80,7 +95,7 @@ public class ClusterAggregateEngineExecutor extends AggregateEngineExecutor {
         paths.add(path);
         // construct AggregateFunction
         TSDataType tsDataType = MManager.getInstance()
-            .getSeriesType(selectedSeries.get(i).getFullPath());
+            .getSeriesType(path.getFullPath());
         AggregateFunction function = AggreFuncFactory.getAggrFuncByName(aggres.get(i), tsDataType);
         function.init();
 
@@ -103,6 +118,8 @@ public class ClusterAggregateEngineExecutor extends AggregateEngineExecutor {
 
         AggreResultData aggreResultData = aggregateWithoutTimeGenerator(function,
             sequenceReader, unSeqMergeReader, timeFilter);
+
+        dataTypes.add(aggreResultData.getDataType());
         readers.add(new AggreResultDataPointReader(aggreResultData));
       }
     }
@@ -111,4 +128,125 @@ public class ClusterAggregateEngineExecutor extends AggregateEngineExecutor {
 
     return new EngineDataSetWithoutTimeGenerator(selectedSeries, dataTypes, readers);
   }
+
+  /**
+   * execute aggregate function with value filter.
+   *
+   * @param context query context.
+   */
+  @Override
+  public QueryDataSet executeWithTimeGenerator(QueryContext context)
+      throws FileNodeManagerException, PathErrorException, IOException, ProcessorException {
+
+    /** add query token for query series which can handle locally **/
+    List<Path> localQuerySeries = new ArrayList<>(selectedSeries);
+    Set<Path> remoteQuerySeries = queryManager.getSelectSeriesReaders().keySet();
+    localQuerySeries.removeAll(remoteQuerySeries);
+    QueryResourceManager.getInstance()
+        .beginQueryOfGivenQueryPaths(context.getJobId(), localQuerySeries);
+
+    /** add query token for filter series which can handle locally **/
+    Set<String> deviceIdSet = new HashSet<>();
+    for (FilterGroupEntity filterGroupEntity : queryManager.getFilterGroupEntityMap().values()) {
+      List<Path> remoteFilterSeries = filterGroupEntity.getFilterPaths();
+      remoteFilterSeries.forEach(seriesPath -> deviceIdSet.add(seriesPath.getDevice()));
+    }
+    QueryResourceManager.getInstance()
+        .beginQueryOfGivenExpression(context.getJobId(), expression, deviceIdSet);
+
+    ClusterTimeGenerator timestampGenerator;
+    List<EngineReaderByTimeStamp> readersOfSelectedSeries;
+    try {
+      timestampGenerator = new ClusterTimeGenerator(expression, context,
+          queryManager);
+      readersOfSelectedSeries = ClusterSeriesReaderFactory
+          .createReadersByTimestampOfSelectedPaths(selectedSeries, context,
+              queryManager);
+    } catch (IOException ex) {
+      throw new FileNodeManagerException(ex);
+    }
+
+    /** Get data type of select paths **/
+    List<TSDataType> originDataTypes = new ArrayList<>();
+    Map<Path, ClusterSelectSeriesReader> selectSeriesReaders = queryManager
+        .getSelectSeriesReaders();
+    for (Path path : selectedSeries) {
+      try {
+        if (selectSeriesReaders.containsKey(path)) {
+          originDataTypes.add(selectSeriesReaders.get(path).getDataType());
+        } else {
+          originDataTypes.add(MManager.getInstance().getSeriesType(path.getFullPath()));
+        }
+      } catch (PathErrorException e) {
+        throw new FileNodeManagerException(e);
+      }
+    }
+
+    List<AggregateFunction> aggregateFunctions = new ArrayList<>();
+    for (int i = 0; i < selectedSeries.size(); i++) {
+      TSDataType type = originDataTypes.get(i);
+      AggregateFunction function = AggreFuncFactory.getAggrFuncByName(aggres.get(i), type);
+      function.init();
+      aggregateFunctions.add(function);
+    }
+    List<AggreResultData> aggreResultDataList = aggregateWithTimeGenerator(aggregateFunctions,
+        timestampGenerator,
+        readersOfSelectedSeries);
+
+    List<IPointReader> resultDataPointReaders = new ArrayList<>();
+    List<TSDataType> dataTypes = new ArrayList<>();
+    for (AggreResultData resultData : aggreResultDataList) {
+      dataTypes.add(resultData.getDataType());
+      resultDataPointReaders.add(new AggreResultDataPointReader(resultData));
+    }
+    return new EngineDataSetWithoutTimeGenerator(selectedSeries, dataTypes, resultDataPointReaders);
+  }
+
+  /**
+   * calculation aggregate result with value filter.
+   */
+  @Override
+  protected List<AggreResultData> aggregateWithTimeGenerator(
+      List<AggregateFunction> aggregateFunctions,
+      TimeGenerator timestampGenerator,
+      List<EngineReaderByTimeStamp> readersOfSelectedSeries)
+      throws IOException {
+
+    while (timestampGenerator.hasNext()) {
+
+      // generate timestamps for aggregate
+      long[] timeArray = new long[aggregateFetchSize];
+      List<Long> batchTimestamp = new ArrayList<>();
+      int timeArrayLength = 0;
+      for (int cnt = 0; cnt < aggregateFetchSize; cnt++) {
+        if (!timestampGenerator.hasNext()) {
+          break;
+        }
+        long time = timestampGenerator.next();
+        timeArray[timeArrayLength++] = time;
+        batchTimestamp.add(time);
+      }
+
+      // fetch all remote select series data by timestamp list.
+      if (!batchTimestamp.isEmpty()) {
+        try {
+          queryManager.fetchBatchDataByTimestampForAllSelectPaths(batchTimestamp);
+        } catch (RaftConnectionException e) {
+          throw new IOException(e);
+        }
+      }
+
+      // cal part of aggregate result
+      for (int i = 0; i < readersOfSelectedSeries.size(); i++) {
+        aggregateFunctions.get(i).calcAggregationUsingTimestamps(timeArray, timeArrayLength,
+            readersOfSelectedSeries.get(i));
+      }
+    }
+
+    List<AggreResultData> aggreResultDataArrayList = new ArrayList<>();
+    for (AggregateFunction function : aggregateFunctions) {
+      aggreResultDataArrayList.add(function.getResult());
+    }
+    return aggreResultDataArrayList;
+  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java
index faeda22..6c4f2ad 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java
@@ -134,7 +134,7 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
    * group
    */
   private void initSeriesReader(int readDataConsistencyLevel)
-      throws RaftConnectionException {
+      throws RaftConnectionException, IOException {
     // Init all series with data group of select series,if filter series has the same data group, init them together.
     for (Entry<String, QueryPlan> entry : selectPathPlans.entrySet()) {
       String groupId = entry.getKey();
@@ -144,7 +144,7 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
         queryNodes.put(groupId, randomPeer);
         Map<PathType, QueryPlan> allQueryPlan = new EnumMap<>(PathType.class);
         allQueryPlan.put(PathType.SELECT_PATH, queryPlan);
-        List<Filter> filterList = null;
+        List<Filter> filterList = new ArrayList<>();
         if (filterGroupEntityMap.containsKey(groupId)) {
           FilterGroupEntity filterGroupEntity = filterGroupEntityMap.get(groupId);
           allQueryPlan.put(PathType.FILTER_PATH, filterGroupEntity.getQueryPlan());
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalQueryManager.java
index fe3ac52..4e09af8 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalQueryManager.java
@@ -53,7 +53,7 @@ public class ClusterLocalQueryManager implements IClusterLocalQueryManager {
 
   @Override
   public InitSeriesReaderResponse createQueryDataSet(InitSeriesReaderRequest request)
-      throws IOException, FileNodeManagerException, PathErrorException, ProcessorException, QueryFilterOptimizationException {
+      throws IOException, FileNodeManagerException, PathErrorException, ProcessorException, QueryFilterOptimizationException, ClassNotFoundException {
     long jobId = QueryResourceManager.getInstance().assignJobId();
     String taskId = request.getTaskId();
     TASK_ID_MAP_JOB_ID.put(taskId, jobId);
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalSingleQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalSingleQueryManager.java
index 76a141e..0f2cf62 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalSingleQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalSingleQueryManager.java
@@ -64,9 +64,7 @@ import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.BatchData;
 import org.apache.iotdb.tsfile.read.common.Path;
 import org.apache.iotdb.tsfile.read.expression.ExpressionType;
-import org.apache.iotdb.tsfile.read.expression.IExpression;
 import org.apache.iotdb.tsfile.read.expression.impl.GlobalTimeExpression;
-import org.apache.iotdb.tsfile.read.expression.util.ExpressionOptimizer;
 import org.apache.iotdb.tsfile.read.filter.basic.Filter;
 import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
 import org.slf4j.Logger;
@@ -75,7 +73,8 @@ import org.slf4j.LoggerFactory;
 
 public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryManager {
 
-  private static final Logger LOGGER = LoggerFactory.getLogger(ClusterLocalSingleQueryManager.class);
+  private static final Logger LOGGER = LoggerFactory
+      .getLogger(ClusterLocalSingleQueryManager.class);
 
   private String groupId;
 
@@ -127,7 +126,7 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
 
   @Override
   public InitSeriesReaderResponse createSeriesReader(InitSeriesReaderRequest request)
-      throws IOException, PathErrorException, FileNodeManagerException, ProcessorException, QueryFilterOptimizationException {
+      throws IOException, PathErrorException, FileNodeManagerException, ProcessorException, QueryFilterOptimizationException, ClassNotFoundException {
     this.groupId = request.getGroupID();
     InitSeriesReaderResponse response = new InitSeriesReaderResponse(groupId);
     QueryContext context = new QueryContext(jobId);
@@ -199,7 +198,7 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
       throws FileNodeManagerException, PathErrorException, IOException, QueryFilterOptimizationException, ProcessorException {
     if (queryPlan.getExpression() == null
         || queryPlan.getExpression().getType() == ExpressionType.GLOBAL_TIME) {
-      handleAggreSeriesReaderWithoutTimeGenerator(queryPlan,context,response);
+      handleAggreSeriesReaderWithoutTimeGenerator(queryPlan, context, response);
     } else {
       handleSelectReaderWithTimeGenerator(queryPlan, context, response);
     }
@@ -210,24 +209,23 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
    *
    * @param queryPlan fill query plan
    */
-  private void handleAggreSeriesReaderWithoutTimeGenerator(QueryPlan queryPlan, QueryContext context,
+  private void handleAggreSeriesReaderWithoutTimeGenerator(QueryPlan queryPlan,
+      QueryContext context,
       InitSeriesReaderResponse response)
-      throws FileNodeManagerException, PathErrorException, IOException, QueryFilterOptimizationException, ProcessorException {
+      throws FileNodeManagerException, PathErrorException, IOException, ProcessorException {
     AggregationPlan fillQueryPlan = (AggregationPlan) queryPlan;
 
     List<Path> selectedPaths = fillQueryPlan.getPaths();
     QueryResourceManager.getInstance().beginQueryOfGivenQueryPaths(jobId, selectedPaths);
 
-    IExpression optimizedExpression = ExpressionOptimizer.getInstance()
-        .optimize(fillQueryPlan.getExpression(), selectedPaths);
     AggregateEngineExecutor engineExecutor = new AggregateEngineExecutor(
-        selectedPaths, fillQueryPlan.getAggregations(), optimizedExpression);
+        selectedPaths, fillQueryPlan.getAggregations(), fillQueryPlan.getExpression());
 
     List<IPointReader> readers = engineExecutor.constructAggreReadersWithoutTimeGenerator(context);
 
     List<TSDataType> dataTypes = engineExecutor.getDataTypes();
 
-    for (int i =0 ; i < selectedPaths.size(); i ++) {
+    for (int i = 0; i < selectedPaths.size(); i++) {
       Path path = selectedPaths.get(i);
       selectSeriesReaders.put(path.getFullPath(),
           new ClusterSelectSeriesBatchReader(dataTypes.get(i), readers.get(i)));
@@ -291,7 +289,7 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
    */
   private void handleFilterSeriesReader(QueryPlan plan, QueryContext context,
       InitSeriesReaderRequest request, InitSeriesReaderResponse response, PathType pathType)
-      throws PathErrorException, QueryFilterOptimizationException, FileNodeManagerException, ProcessorException, IOException {
+      throws PathErrorException, QueryFilterOptimizationException, FileNodeManagerException, ProcessorException, IOException, ClassNotFoundException {
     QueryDataSet queryDataSet = queryProcessExecutor
         .processQuery(plan, context);
     List<Path> paths = plan.getPaths();
@@ -321,7 +319,8 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
           .createReaderByTimeStamp(path, context);
       TSDataType dataType = MManager.getInstance().getSeriesType(path.getFullPath());
       selectSeriesReaders
-          .put(path.getFullPath(), new ClusterSelectSeriesBatchReaderByTimestamp(readerByTimeStamp, dataType));
+          .put(path.getFullPath(),
+              new ClusterSelectSeriesBatchReaderByTimestamp(readerByTimeStamp, dataType));
       dataTypeMap.put(path.getFullPath(), dataType);
       dataTypeList.add(dataType);
     }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalQueryManager.java
index cc0f103..1105bb2 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalQueryManager.java
@@ -42,7 +42,7 @@ public interface IClusterLocalQueryManager {
    * @param request request for query data from coordinator node
    */
   InitSeriesReaderResponse createQueryDataSet(InitSeriesReaderRequest request)
-      throws IOException, FileNodeManagerException, PathErrorException, ProcessorException, QueryFilterOptimizationException;
+      throws IOException, FileNodeManagerException, PathErrorException, ProcessorException, QueryFilterOptimizationException, ClassNotFoundException;
 
   /**
    * Read batch data of all querying series in request and set response.
@@ -54,8 +54,8 @@ public interface IClusterLocalQueryManager {
 
   /**
    * Read batch data of select series by batch timestamp which is used in query with value filter
-   *  @param request request of querying select paths
    *
+   * @param request request of querying select paths
    */
   QuerySeriesDataByTimestampResponse readBatchDataByTimestamp(
       QuerySeriesDataByTimestampRequest request) throws IOException;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalSingleQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalSingleQueryManager.java
index 318772f..1d89c5c 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalSingleQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/IClusterLocalSingleQueryManager.java
@@ -40,18 +40,19 @@ public interface IClusterLocalSingleQueryManager {
 
   /**
    * Initially create corresponding series readers.
+   *
    * @param request request of querying series data
    */
   InitSeriesReaderResponse createSeriesReader(InitSeriesReaderRequest request)
-      throws IOException, PathErrorException, FileNodeManagerException, ProcessorException, QueryFilterOptimizationException;
+      throws IOException, PathErrorException, FileNodeManagerException, ProcessorException, QueryFilterOptimizationException, ClassNotFoundException;
 
   /**
    * <p>
    * Read batch data If query round in cache is equal to target query round, it means that batch
    * data in query node transfer to coordinator fail and return cached batch data.
    * </p>
-   *  @param request request of querying series data
    *
+   * @param request request of querying series data
    */
   QuerySeriesDataResponse readBatchData(QuerySeriesDataRequest request)
       throws IOException;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFilterSeriesBatchReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFilterSeriesBatchReader.java
index 3f21835..1cd357e 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFilterSeriesBatchReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFilterSeriesBatchReader.java
@@ -68,12 +68,12 @@ public class ClusterFilterSeriesBatchReader implements IClusterFilterSeriesBatch
       batchDataList.add(new BatchData(dataTypeList.get(i), true));
     }
     int dataPointCount = 0;
-    while(true){
-      if(!hasNext() || dataPointCount == CLUSTER_CONF.getBatchReadSize()){
+    while (true) {
+      if (!hasNext() || dataPointCount == CLUSTER_CONF.getBatchReadSize()) {
         break;
       }
-      if(hasNext() && addTimeValuePair(batchDataList, dataTypeList)){
-          dataPointCount++;
+      if (hasNext() && addTimeValuePair(batchDataList, dataTypeList)) {
+        dataPointCount++;
       }
     }
     return batchDataList;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterRpcReaderUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterRpcReaderUtils.java
index 424ba95..dca2d30 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterRpcReaderUtils.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterRpcReaderUtils.java
@@ -19,6 +19,7 @@
 package org.apache.iotdb.cluster.query.utils;
 
 import com.alipay.sofa.jraft.entity.PeerId;
+import java.io.IOException;
 import java.util.List;
 import java.util.Map;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
@@ -62,7 +63,7 @@ public class ClusterRpcReaderUtils {
    */
   public static BasicResponse createClusterSeriesReader(String groupId, PeerId peerId,
       int readDataConsistencyLevel, Map<PathType, QueryPlan> allQueryPlan, String taskId,
-      List<Filter> filterList) throws RaftConnectionException {
+      List<Filter> filterList) throws RaftConnectionException, IOException {
 
     /** handle request **/
     BasicRequest request = InitSeriesReaderRequest
@@ -71,14 +72,35 @@ public class ClusterRpcReaderUtils {
     return handleQueryRequest(request, peerId, 0);
   }
 
-  public static QuerySeriesDataResponse fetchBatchData(String groupID, PeerId peerId, String taskId,
+  /**
+   * Fetch batch data for select series in a query without value filter or filter series.
+   *
+   * @param groupId data group id
+   * @param peerId query node id
+   * @param taskId task id of query task
+   * @param pathType type of path
+   * @param fetchDataSeries series list which need to fetch data
+   * @param queryRounds query rounds
+   */
+  public static QuerySeriesDataResponse fetchBatchData(String groupId, PeerId peerId, String taskId,
       PathType pathType, List<String> fetchDataSeries, long queryRounds)
       throws RaftConnectionException {
     BasicRequest request = QuerySeriesDataRequest
-        .createFetchDataRequest(groupID, taskId, pathType, fetchDataSeries, queryRounds);
+        .createFetchDataRequest(groupId, taskId, pathType, fetchDataSeries, queryRounds);
     return (QuerySeriesDataResponse) handleQueryRequest(request, peerId, 0);
   }
 
+  /**
+   * Fetch batch data corresponding to a given list of timestamp for select series in a query with
+   * value filter.
+   *
+   * @param groupId data group id
+   * @param peerId query node id
+   * @param taskId task id of query task
+   * @param queryRounds query rounds
+   * @param batchTimestamp list of valid timestamp
+   * @param fetchDataSeries series list which need to fetch data
+   */
   public static QuerySeriesDataByTimestampResponse fetchBatchDataByTimestamp(String groupId,
       PeerId peerId, String taskId, long queryRounds, List<Long> batchTimestamp,
       List<String> fetchDataSeries)
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtils.java
index fc0d401..5fbd30c 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtils.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtils.java
@@ -156,7 +156,7 @@ public class QueryPlanPartitionUtils {
       throws PathErrorException {
     AggregationPlan queryPlan = (AggregationPlan) singleQueryManager.getOriginQueryPlan();
     List<Path> selectPaths = queryPlan.getPaths();
-    List<String> aggregations = new ArrayList<>();
+    List<String> aggregations = queryPlan.getAggregations();
     Map<String, List<Path>> selectSeriesByGroupId = singleQueryManager.getSelectSeriesByGroupId();
     Map<String, List<String>> selectAggregationByGroupId = new HashMap<>();
     Map<String, QueryPlan> selectPathPlans = singleQueryManager.getSelectPathPlans();
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/InitSeriesReaderRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/InitSeriesReaderRequest.java
index c974e2f..e28ac15 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/InitSeriesReaderRequest.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/InitSeriesReaderRequest.java
@@ -18,10 +18,16 @@
  */
 package org.apache.iotdb.cluster.rpc.raft.request.querydata;
 
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
 import java.util.ArrayList;
 import java.util.EnumMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 import org.apache.iotdb.cluster.query.PathType;
 import org.apache.iotdb.cluster.rpc.raft.request.BasicQueryRequest;
 import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
@@ -42,12 +48,12 @@ public class InitSeriesReaderRequest extends BasicQueryRequest {
   /**
    * Key is series type, value is query plan
    */
-  private Map<PathType, QueryPlan> allQueryPlan = new EnumMap<>(PathType.class);
+  private Map<PathType, byte[]> allQueryPlan = new EnumMap<>(PathType.class);
 
   /**
    * Represent all filter of leaf node in filter tree while executing a query with value filter.
    */
-  private List<Filter> filterList = new ArrayList<>();
+  private List<byte[]> filterList = new ArrayList<>();
 
 
   private InitSeriesReaderRequest(String groupID, String taskId) {
@@ -55,12 +61,17 @@ public class InitSeriesReaderRequest extends BasicQueryRequest {
     this.taskId = taskId;
   }
 
-  public static InitSeriesReaderRequest createInitialQueryRequest(String groupId, String taskId, int readConsistencyLevel,
-      Map<PathType, QueryPlan> allQueryPlan, List<Filter> filterList){
+  public static InitSeriesReaderRequest createInitialQueryRequest(String groupId, String taskId,
+      int readConsistencyLevel,
+      Map<PathType, QueryPlan> allQueryPlan, List<Filter> filterList) throws IOException {
     InitSeriesReaderRequest request = new InitSeriesReaderRequest(groupId, taskId);
     request.setReadConsistencyLevel(readConsistencyLevel);
-    request.allQueryPlan = allQueryPlan;
-    request.filterList = filterList;
+    for (Entry<PathType, QueryPlan> entry : allQueryPlan.entrySet()) {
+      request.allQueryPlan.put(entry.getKey(), toByteArray(entry.getValue()));
+    }
+    for (Filter filter : filterList) {
+      request.filterList.add(toByteArray(filter));
+    }
     return request;
   }
 
@@ -72,20 +83,51 @@ public class InitSeriesReaderRequest extends BasicQueryRequest {
     this.taskId = taskId;
   }
 
-  public Map<PathType, QueryPlan> getAllQueryPlan() {
-    return allQueryPlan;
+  public Map<PathType, QueryPlan> getAllQueryPlan() throws IOException, ClassNotFoundException {
+    Map<PathType, QueryPlan> queryPlanMap = new EnumMap<>(PathType.class);
+    for (Entry<PathType, byte[]> entry : allQueryPlan.entrySet()) {
+      queryPlanMap.put(entry.getKey(), (QueryPlan) toObject(entry.getValue()));
+    }
+    return queryPlanMap;
   }
 
-  public void setAllQueryPlan(
-      Map<PathType, QueryPlan> allQueryPlan) {
-    this.allQueryPlan = allQueryPlan;
+  public List<Filter> getFilterList() throws IOException, ClassNotFoundException {
+    List<Filter> filters = new ArrayList<>();
+    for (byte[] filterBytes : filterList) {
+      filters.add((Filter) toObject(filterBytes));
+    }
+    return filters;
   }
 
-  public List<Filter> getFilterList() {
-    return filterList;
+  /**
+   * Convert an object to byte array
+   *
+   * @param obj Object, which need to implement Serializable
+   * @return byte array of object
+   */
+  private static byte[] toByteArray(Object obj) throws IOException {
+    ByteArrayOutputStream bos = new ByteArrayOutputStream();
+    ObjectOutputStream oos = new ObjectOutputStream(bos);
+    oos.writeObject(obj);
+    oos.flush();
+    byte[] bytes = bos.toByteArray();
+    oos.close();
+    bos.close();
+    return bytes;
   }
 
-  public void setFilterList(List<Filter> filterList) {
-    this.filterList = filterList;
+  /**
+   * Convert byte array back to Object
+   *
+   * @param bytes byte array of object
+   * @return object
+   */
+  private static Object toObject(byte[] bytes) throws IOException, ClassNotFoundException {
+    ByteArrayInputStream bis = new ByteArrayInputStream(bytes);
+    ObjectInputStream ois = new ObjectInputStream(bis);
+    Object obj = ois.readObject();
+    ois.close();
+    bis.close();
+    return obj;
   }
 }
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/integration/Constant.java b/cluster/src/test/java/org/apache/iotdb/cluster/integration/Constant.java
new file mode 100644
index 0000000..71cf523
--- /dev/null
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/integration/Constant.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.integration;
+
+import org.apache.iotdb.tsfile.write.record.TSRecord;
+import org.apache.iotdb.tsfile.write.record.datapoint.DataPoint;
+
+public class Constant {
+
+  public static final String d0s0 = "root.vehicle.d0.s0";
+  public static final String d0s1 = "root.vehicle.d0.s1";
+  public static final String d0s2 = "root.vehicle.d0.s2";
+  public static final String d0s3 = "root.vehicle.d0.s3";
+  public static final String d0s4 = "root.vehicle.d0.s4";
+  public static final String d0s5 = "root.vehicle.d0.s5";
+  public static final String d1s0 = "root.vehicle.d1.s0";
+  public static final String d1s1 = "root.vehicle.d1.s1";
+  public static final String TIMESTAMP_STR = "Time";
+  public static boolean testFlag = true;
+  public static String[] stringValue = new String[]{"A", "B", "C", "D", "E"};
+  public static String[] booleanValue = new String[]{"true", "false"};
+
+  public static String[] create_sql = new String[]{"SET STORAGE GROUP TO root.vehicle",
+
+      "CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=INT32, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d0.s1 WITH DATATYPE=INT64, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d0.s2 WITH DATATYPE=FLOAT, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d0.s3 WITH DATATYPE=TEXT, ENCODING=PLAIN",
+      "CREATE TIMESERIES root.vehicle.d0.s4 WITH DATATYPE=BOOLEAN, ENCODING=PLAIN",
+      "CREATE TIMESERIES root.vehicle.d0.s5 WITH DATATYPE=DOUBLE, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d1.s0 WITH DATATYPE=INT32, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d1.s1 WITH DATATYPE=INT64, ENCODING=RLE",
+
+  };
+
+  public static String insertTemplate = "insert into %s(timestamp%s) values(%d%s)";
+
+  public static String first(String path) {
+    return String.format("first(%s)", path);
+  }
+
+  public static String last(String path) {
+    return String.format("last(%s)", path);
+  }
+
+  public static String sum(String path) {
+    return String.format("sum(%s)", path);
+  }
+
+  public static String mean(String path) {
+    return String.format("mean(%s)", path);
+  }
+
+  public static String count(String path) {
+    return String.format("count(%s)", path);
+  }
+
+  public static String max_time(String path) {
+    return String.format("max_time(%s)", path);
+  }
+
+  public static String min_time(String path) {
+    return String.format("min_time(%s)", path);
+  }
+
+  public static String max_value(String path) {
+    return String.format("max_value(%s)", path);
+  }
+
+  public static String min_value(String path) {
+    return String.format("min_value(%s)", path);
+  }
+
+  public static String recordToInsert(TSRecord record) {
+    StringBuilder measurements = new StringBuilder();
+    StringBuilder values = new StringBuilder();
+    for (DataPoint dataPoint : record.dataPointList) {
+      measurements.append(",").append(dataPoint.getMeasurementId());
+      values.append(",").append(dataPoint.getValue());
+    }
+    return String
+        .format(insertTemplate, record.deviceId, measurements.toString(), record.time, values);
+  }
+}
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBAggregationIT.java b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBAggregationIT.java
new file mode 100644
index 0000000..bf7c4da
--- /dev/null
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBAggregationIT.java
@@ -0,0 +1,640 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.integration;
+
+import static org.apache.iotdb.cluster.integration.Constant.count;
+import static org.apache.iotdb.cluster.integration.Constant.first;
+import static org.apache.iotdb.cluster.integration.Constant.last;
+import static org.apache.iotdb.cluster.integration.Constant.max_time;
+import static org.apache.iotdb.cluster.integration.Constant.max_value;
+import static org.apache.iotdb.cluster.integration.Constant.mean;
+import static org.apache.iotdb.cluster.integration.Constant.min_time;
+import static org.apache.iotdb.cluster.integration.Constant.min_value;
+import static org.apache.iotdb.cluster.integration.Constant.sum;
+import static org.apache.iotdb.cluster.utils.Utils.insertData;
+import static org.junit.Assert.fail;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.cluster.entity.Server;
+import org.apache.iotdb.cluster.utils.EnvironmentUtils;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
+import org.apache.iotdb.cluster.utils.hash.PhysicalNode;
+import org.apache.iotdb.jdbc.Config;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class IoTDBAggregationIT {
+
+  private Server server;
+  private static final ClusterConfig CLUSTER_CONFIG = ClusterDescriptor.getInstance().getConfig();
+  private static final PhysicalNode localNode = new PhysicalNode(CLUSTER_CONFIG.getIp(),
+      CLUSTER_CONFIG.getPort());
+
+  private static String[] creationSqls = new String[]{
+      "SET STORAGE GROUP TO root.vehicle.d0",
+      "SET STORAGE GROUP TO root.vehicle.d1",
+
+      "CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=INT32, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d0.s1 WITH DATATYPE=INT64, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d0.s2 WITH DATATYPE=FLOAT, ENCODING=RLE",
+      "CREATE TIMESERIES root.vehicle.d0.s3 WITH DATATYPE=TEXT, ENCODING=PLAIN"
+  };
+
+  private static String[] dataSet2 = new String[]{
+      "SET STORAGE GROUP TO root.ln.wf01.wt01",
+      "CREATE TIMESERIES root.ln.wf01.wt01.status WITH DATATYPE=BOOLEAN, ENCODING=PLAIN",
+      "CREATE TIMESERIES root.ln.wf01.wt01.temperature WITH DATATYPE=FLOAT, ENCODING=PLAIN",
+      "CREATE TIMESERIES root.ln.wf01.wt01.hardware WITH DATATYPE=INT32, ENCODING=PLAIN",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(1, 1.1, false, 11)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(2, 2.2, true, 22)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(3, 3.3, false, 33 )",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(4, 4.4, false, 44)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(5, 5.5, false, 55)"
+  };
+
+  private String insertTemplate = "INSERT INTO root.vehicle.d0(timestamp,s0,s1,s2,s3)"
+      + " VALUES(%d,%d,%d,%f,%s)";
+
+  private static final String TIMESTAMP_STR = "Time";
+  private final String d0s0 = "root.vehicle.d0.s0";
+  private final String d0s1 = "root.vehicle.d0.s1";
+  private final String d0s2 = "root.vehicle.d0.s2";
+  private final String d0s3 = "root.vehicle.d0.s3";
+  private static final String TEMPERATURE_STR = "root.ln.wf01.wt01.temperature";
+
+  @Before
+  public void setUp() throws Exception {
+    EnvironmentUtils.cleanEnv();
+    EnvironmentUtils.closeStatMonitor();
+    EnvironmentUtils.closeMemControl();
+    CLUSTER_CONFIG.createAllPath();
+    server = Server.getInstance();
+    server.start();
+    EnvironmentUtils.envSetUp();
+    Class.forName(Config.JDBC_DRIVER_NAME);
+    prepareData();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    server.stop();
+    QPExecutorUtils.setLocalNodeAddr(localNode.getIp(), localNode.getPort());
+    EnvironmentUtils.cleanEnv();
+  }
+
+  @Test
+  public void test() throws SQLException {
+    String[] retArray = new String[]{
+        "0,2",
+        "0,4",
+        "0,3"
+    };
+    Connection connection = null;
+    try {
+      connection = DriverManager.
+          getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute(
+          "select count(temperature) from root.ln.wf01.wt01 where time > 3");
+
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," +
+            resultSet.getString(count(TEMPERATURE_STR));
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+
+      statement = connection.createStatement();
+      hasResultSet = statement.execute(
+          "select min_time(temperature) from root.ln.wf01.wt01 where time > 3");
+
+      Assert.assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," +
+            resultSet.getString(min_time(TEMPERATURE_STR));
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(2, cnt);
+      statement.close();
+
+      statement = connection.createStatement();
+      hasResultSet = statement.execute(
+          "select min_time(temperature) from root.ln.wf01.wt01 where temperature > 3");
+
+      Assert.assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," +
+            resultSet.getString(min_time(TEMPERATURE_STR));
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(3, cnt);
+      statement.close();
+
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    } finally {
+      if (connection != null) {
+        connection.close();
+      }
+    }
+  }
+
+  @Test
+  public void remoteTest() throws SQLException {
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    test();
+    try {
+      Thread.sleep(200);
+    } catch (InterruptedException e) {
+      e.printStackTrace();
+    }
+  }
+
+  @Test
+  public void countTest() throws SQLException {
+    String[] retArray = new String[]{
+        "0,2001,2001,2001,2001",
+        "0,7500,7500,7500,7500"
+    };
+    Connection connection = null;
+    try {
+      connection = DriverManager.
+          getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute("select count(s0),count(s1),count(s2),count(s3) " +
+          "from root.vehicle.d0 where time >= 6000 and time <= 9000");
+
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(count(d0s0))
+            + "," + resultSet.getString(count(d0s1)) + "," + resultSet.getString(count(d0s2))
+            + "," + resultSet.getString(count(d0s3));
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+
+      statement = connection.createStatement();
+      hasResultSet = statement.execute("select count(s0),count(s1),count(s2),count(s3) " +
+          "from root.vehicle.d0");
+
+      Assert.assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(count(d0s0))
+            + "," + resultSet.getString(count(d0s1)) + "," + resultSet.getString(count(d0s2))
+            + "," + resultSet.getString(count(d0s3));
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(2, cnt);
+      statement.close();
+
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    } finally {
+      if (connection != null) {
+        connection.close();
+      }
+    }
+  }
+
+  @Test
+  public void countRemoteTest() throws SQLException {
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    countTest();
+    try {
+      Thread.sleep(200);
+    } catch (InterruptedException e) {
+      e.printStackTrace();
+    }
+  }
+
+
+  @Test
+  public void firstTest() throws SQLException {
+    String[] retArray = new String[]{
+        "0,2000,2000,2000.0,2000",
+        "0,500,500,500.0,500"
+    };
+    Connection connection = null;
+    try {
+      connection = DriverManager.
+          getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute("select first(s0),first(s1),first(s2),first(s3) " +
+          "from root.vehicle.d0 where time >= 1500 and time <= 9000");
+
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(first(d0s0))
+            + "," + resultSet.getString(first(d0s1)) + "," + resultSet.getString(first(d0s2))
+            + "," + resultSet.getString(first(d0s3));
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+
+      statement = connection.createStatement();
+      hasResultSet = statement.execute("select first(s0),first(s1),first(s2),first(s3) " +
+          "from root.vehicle.d0");
+
+      Assert.assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(first(d0s0))
+            + "," + resultSet.getString(first(d0s1)) + "," + resultSet.getString(first(d0s2))
+            + "," + resultSet.getString(first(d0s3));
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(2, cnt);
+      statement.close();
+
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    } finally {
+      if (connection != null) {
+        connection.close();
+      }
+    }
+  }
+
+  @Test
+  public void firstRemoteTest() throws SQLException {
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    firstTest();
+    try {
+      Thread.sleep(200);
+    } catch (InterruptedException e) {
+      e.printStackTrace();
+    }
+  }
+
+  @Test
+  public void lastTest() throws SQLException {
+    String[] retArray = new String[]{
+        "0,8499,8499.0",
+        "0,1499,1499.0",
+        "0,2200,2200.0"
+    };
+    Connection connection = null;
+    try {
+      connection = DriverManager.
+          getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute("select last(s0),last(s2) " +
+          "from root.vehicle.d0 where time >= 1500 and time < 9000");
+
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(last(d0s0))
+            + "," + resultSet.getString(last(d0s2));
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+
+      statement = connection.createStatement();
+      hasResultSet = statement.execute("select last(s0),last(s2) " +
+          "from root.vehicle.d0 where time <= 1600");
+
+      Assert.assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(last(d0s0))
+            + "," + resultSet.getString(last(d0s2));
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(2, cnt);
+      statement.close();
+
+      statement = connection.createStatement();
+      hasResultSet = statement.execute("select last(s0),last(s2) " +
+          "from root.vehicle.d0 where time <= 2200");
+
+      Assert.assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(last(d0s0))
+            + "," + resultSet.getString(last(d0s2));
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(3, cnt);
+      statement.close();
+
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    } finally {
+      if (connection != null) {
+        connection.close();
+      }
+    }
+  }
+
+  @Test
+  public void lastRemoteTest() throws SQLException {
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    lastTest();
+    try {
+      Thread.sleep(200);
+    } catch (InterruptedException e) {
+      e.printStackTrace();
+    }
+  }
+
+  @Test
+  public void maxminTimeTest() throws SQLException {
+    String[] retArray = new String[]{
+        "0,8499,500",
+        "0,2499,2000"
+    };
+    Connection connection = null;
+    try {
+      connection = DriverManager.
+          getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute("select max_time(s0),min_time(s2) " +
+          "from root.vehicle.d0 where time >= 100 and time < 9000");
+
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(max_time(d0s0))
+            + "," + resultSet.getString(min_time(d0s2));
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+
+      statement = connection.createStatement();
+      hasResultSet = statement.execute("select max_time(s0),min_time(s2) " +
+          "from root.vehicle.d0 where time <= 2500 and time > 1800");
+
+      Assert.assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(max_time(d0s0))
+            + "," + resultSet.getString(min_time(d0s2));
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(2, cnt);
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    } finally {
+      if (connection != null) {
+        connection.close();
+      }
+    }
+  }
+
+  @Test
+  public void maxminTimeRemoteTest() throws SQLException {
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    maxminTimeTest();
+    try {
+      Thread.sleep(200);
+    } catch (InterruptedException e) {
+      e.printStackTrace();
+    }
+  }
+
+  @Test
+  public void maxminValueTest() throws SQLException {
+    String[] retArray = new String[]{
+        "0,8499,500.0",
+        "0,2499,500.0"
+    };
+    Connection connection = null;
+    try {
+      connection = DriverManager.
+          getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute("select max_value(s0),min_value(s2) " +
+          "from root.vehicle.d0 where time >= 100 and time < 9000");
+
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(max_value(d0s0))
+            + "," + resultSet.getString(min_value(d0s2));
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+
+      statement = connection.createStatement();
+      hasResultSet = statement.execute("select max_value(s0),min_value(s2) " +
+          "from root.vehicle.d0 where time < 2500");
+
+      Assert.assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(max_value(d0s0))
+            + "," + resultSet.getString(min_value(d0s2));
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(2, cnt);
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    } finally {
+      if (connection != null) {
+        connection.close();
+      }
+    }
+  }
+
+  @Test
+  public void maxminValueRemoteTest() throws SQLException {
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    maxminTimeTest();
+    try {
+      Thread.sleep(200);
+    } catch (InterruptedException e) {
+      e.printStackTrace();
+    }
+  }
+
+  @Test
+  public void meanSumTest() throws SQLException {
+    String[] retArray = new String[]{
+        "0,1.4508E7,7250.374812593703",
+        "0,626750.0,1250.998003992016"
+    };
+    Connection connection = null;
+    try {
+      connection = DriverManager.
+          getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute("select sum(s0),mean(s2)" +
+          "from root.vehicle.d0 where time >= 6000 and time <= 9000");
+
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(sum(d0s0))
+            + "," + resultSet.getString(mean(d0s2));
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(1, cnt);
+      statement.close();
+
+      statement = connection.createStatement();
+      hasResultSet = statement.execute("select sum(s0),mean(s2)" +
+          "from root.vehicle.d0 where time >= 1000 and time <= 2000");
+
+      Assert.assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(sum(d0s0))
+            + "," + resultSet.getString(mean(d0s2));
+        Assert.assertEquals(retArray[cnt], ans);
+        cnt++;
+      }
+      Assert.assertEquals(2, cnt);
+      statement.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    } finally {
+      if (connection != null) {
+        connection.close();
+      }
+    }
+  }
+
+  @Test
+  public void meanSumRemoteTest() throws SQLException {
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    meanSumTest();
+    try {
+      Thread.sleep(200);
+    } catch (InterruptedException e) {
+      e.printStackTrace();
+    }
+  }
+
+  private void prepareData() throws SQLException {
+    Connection connection = null;
+    try {
+      connection = DriverManager
+          .getConnection(Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root",
+              "root");
+      Statement statement = connection.createStatement();
+      insertData(connection, creationSqls, dataSet2);
+      // prepare BufferWrite file
+      for (int i = 5000; i < 7000; i++) {
+        statement.addBatch(String.format(insertTemplate, i, i, i, (double) i, "\'" + i + "\'"));
+      }
+      statement.executeBatch();
+      statement.clearBatch();
+      statement.execute("flush");
+      for (int i = 7500; i < 8500; i++) {
+        statement.addBatch(String.format(insertTemplate, i, i, i, (double) i, "\'" + i + "\'"));
+      }
+      statement.executeBatch();
+      statement.clearBatch();
+      statement.execute("flush");
+
+      // prepare Unseq-File
+      for (int i = 500; i < 1500; i++) {
+        statement.addBatch(String.format(insertTemplate, i, i, i, (double) i, "\'" + i + "\'"));
+      }
+      statement.executeBatch();
+      statement.clearBatch();
+      statement.execute("flush");
+      for (int i = 3000; i < 6500; i++) {
+        statement.addBatch(String.format(insertTemplate, i, i, i, (double) i, "\'" + i + "\'"));
+      }
+      statement.executeBatch();
+      statement.clearBatch();
+      statement.execute("merge");
+
+      // prepare BufferWrite cache
+      for (int i = 9000; i < 10000; i++) {
+        statement.addBatch(String.format(insertTemplate, i, i, i, (double) i, "\'" + i + "\'"));
+      }
+      statement.executeBatch();
+      statement.clearBatch();
+      // prepare Overflow cache
+      for (int i = 2000; i < 2500; i++) {
+        statement.addBatch(String.format(insertTemplate, i, i, i, (double) i, "\'" + i + "\'"));
+      }
+      statement.executeBatch();
+      statement.clearBatch();
+      statement.close();
+
+    } catch (Exception e) {
+      e.printStackTrace();
+    } finally {
+      if (connection != null) {
+        connection.close();
+      }
+    }
+  }
+}
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBFillQueryIT.java b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBFillQueryIT.java
index f5bf17f..ba8746d 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBFillQueryIT.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBFillQueryIT.java
@@ -40,7 +40,6 @@ import org.junit.Test;
 
 public class IoTDBFillQueryIT {
 
-
   private Server server;
   private static final ClusterConfig CLUSTER_CONFIG = ClusterDescriptor.getInstance().getConfig();
   private static final PhysicalNode localNode = new PhysicalNode(CLUSTER_CONFIG.getIp(),
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/query/executor/AggregateEngineExecutor.java b/iotdb/src/main/java/org/apache/iotdb/db/query/executor/AggregateEngineExecutor.java
index 6c458a5..401b056 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/executor/AggregateEngineExecutor.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/query/executor/AggregateEngineExecutor.java
@@ -50,18 +50,22 @@ import org.apache.iotdb.tsfile.read.expression.IExpression;
 import org.apache.iotdb.tsfile.read.expression.impl.GlobalTimeExpression;
 import org.apache.iotdb.tsfile.read.filter.basic.Filter;
 import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
+import org.apache.iotdb.tsfile.read.query.timegenerator.TimeGenerator;
 
+/**
+ * Handle aggregation query and construct dataset
+ */
 public class AggregateEngineExecutor {
 
   protected List<Path> selectedSeries;
   protected List<String> aggres;
   protected IExpression expression;
-  protected List<TSDataType> dataTypes;
+  private List<TSDataType> dataTypes;
 
   /**
    * aggregation batch calculation size.
    **/
-  private int aggregateFetchSize;
+  protected int aggregateFetchSize;
 
   /**
    * constructor.
@@ -317,9 +321,9 @@ public class AggregateEngineExecutor {
   /**
    * calculation aggregate result with value filter.
    */
-  private List<AggreResultData> aggregateWithTimeGenerator(
+  protected List<AggreResultData> aggregateWithTimeGenerator(
       List<AggregateFunction> aggregateFunctions,
-      EngineTimeGenerator timestampGenerator,
+      TimeGenerator timestampGenerator,
       List<EngineReaderByTimeStamp> readersOfSelectedSeries)
       throws IOException {
 
@@ -335,6 +339,8 @@ public class AggregateEngineExecutor {
         timeArray[timeArrayLength++] = timestampGenerator.next();
       }
 
+
+
       // cal part of aggregate result
       for (int i = 0; i < readersOfSelectedSeries.size(); i++) {
         aggregateFunctions.get(i).calcAggregationUsingTimestamps(timeArray, timeArrayLength,
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/query/timegenerator/AbstractNodeConstructor.java b/iotdb/src/main/java/org/apache/iotdb/db/query/timegenerator/AbstractNodeConstructor.java
index c23c2b9..5d83314 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/timegenerator/AbstractNodeConstructor.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/query/timegenerator/AbstractNodeConstructor.java
@@ -58,10 +58,7 @@ public abstract class AbstractNodeConstructor {
 
   /**
    * Construct not series type node.
-<<<<<<< HEAD
-=======
    *
->>>>>>> master
    * @param expression expression
    * @return Node object
    * @throws FileNodeManagerException FileNodeManagerException


[incubator-iotdb] 05/11: Increase the function of query polling

Posted by lt...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

lta pushed a commit to branch cluster
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git

commit 3020603ac6130b06d499b3da10120be5dbadc420
Author: lta <li...@163.com>
AuthorDate: Mon May 20 11:12:12 2019 +0800

    Increase the function of query polling
---
 .../ClusterRpcSingleQueryManager.java              | 84 +++++++------------
 .../cluster/query/utils/ClusterRpcReaderUtils.java | 95 ++++++----------------
 .../iotdb/cluster/rpc/raft/NodeAsClient.java       |  3 +-
 .../org/apache/iotdb/cluster/utils/RaftUtils.java  | 52 ++++++++----
 .../integration/IoTDBAggregationLargeDataIT.java   |  1 -
 5 files changed, 93 insertions(+), 142 deletions(-)

diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java
index 6c4f2ad..af4db31 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java
@@ -37,12 +37,16 @@ import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterFilterSeries
 import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
 import org.apache.iotdb.cluster.query.utils.ClusterRpcReaderUtils;
 import org.apache.iotdb.cluster.query.utils.QueryPlanPartitionUtils;
+import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.CloseSeriesReaderRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.InitSeriesReaderRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataByTimestampRequest;
+import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataRequest;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicQueryDataResponse;
 import org.apache.iotdb.cluster.rpc.raft.response.querydata.InitSeriesReaderResponse;
 import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataByTimestampResponse;
 import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
 import org.apache.iotdb.cluster.utils.QPExecutorUtils;
-import org.apache.iotdb.cluster.utils.RaftUtils;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
@@ -140,8 +144,6 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
       String groupId = entry.getKey();
       QueryPlan queryPlan = entry.getValue();
       if (!QPExecutorUtils.canHandleQueryByGroupId(groupId)) {
-        PeerId randomPeer = RaftUtils.getRandomPeerID(groupId);
-        queryNodes.put(groupId, randomPeer);
         Map<PathType, QueryPlan> allQueryPlan = new EnumMap<>(PathType.class);
         allQueryPlan.put(PathType.SELECT_PATH, queryPlan);
         List<Filter> filterList = new ArrayList<>();
@@ -150,9 +152,12 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
           allQueryPlan.put(PathType.FILTER_PATH, filterGroupEntity.getQueryPlan());
           filterList = filterGroupEntity.getFilters();
         }
+        /** create request **/
+        BasicRequest request = InitSeriesReaderRequest
+            .createInitialQueryRequest(groupId, taskId, readDataConsistencyLevel,
+                allQueryPlan, filterList);
         InitSeriesReaderResponse response = (InitSeriesReaderResponse) ClusterRpcReaderUtils
-            .createClusterSeriesReader(groupId, randomPeer, readDataConsistencyLevel,
-                allQueryPlan, taskId, filterList);
+            .createClusterSeriesReader(groupId, request, this);
         handleInitReaderResponse(groupId, allQueryPlan, response);
       } else {
         dataGroupUsage.add(groupId);
@@ -167,14 +172,15 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
     for (Entry<String, FilterGroupEntity> entry : filterGroupEntityMap.entrySet()) {
       String groupId = entry.getKey();
       if (!selectPathPlans.containsKey(groupId)) {
-        PeerId randomPeer = RaftUtils.getRandomPeerID(groupId);
         Map<PathType, QueryPlan> allQueryPlan = new EnumMap<>(PathType.class);
         FilterGroupEntity filterGroupEntity = filterGroupEntityMap.get(groupId);
         allQueryPlan.put(PathType.FILTER_PATH, filterGroupEntity.getQueryPlan());
         List<Filter> filterList = filterGroupEntity.getFilters();
+        BasicRequest request = InitSeriesReaderRequest
+            .createInitialQueryRequest(groupId, taskId, readDataConsistencyLevel,
+                allQueryPlan, filterList);
         InitSeriesReaderResponse response = (InitSeriesReaderResponse) ClusterRpcReaderUtils
-            .createClusterSeriesReader(groupId, randomPeer, readDataConsistencyLevel,
-                allQueryPlan, taskId, filterList);
+            .createClusterSeriesReader(groupId, request, this);
         handleInitReaderResponse(groupId, allQueryPlan, response);
       }
     }
@@ -230,18 +236,21 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
         }
       }
     }
-    QuerySeriesDataResponse response = ClusterRpcReaderUtils
-        .fetchBatchData(groupId, queryNodes.get(groupId), taskId, PathType.SELECT_PATH,
-            fetchDataSeries,
+    BasicRequest request = QuerySeriesDataRequest
+        .createFetchDataRequest(groupId, taskId, PathType.SELECT_PATH, fetchDataSeries,
             queryRounds++);
+    QuerySeriesDataResponse response = (QuerySeriesDataResponse) ClusterRpcReaderUtils
+        .handleQueryRequest(request, queryNodes.get(groupId), 0);
     handleFetchDataResponseForSelectPaths(fetchDataSeries, response);
   }
 
   @Override
   public void fetchBatchDataForFilterPaths(String groupId) throws RaftConnectionException {
-    QuerySeriesDataResponse response = ClusterRpcReaderUtils
-        .fetchBatchData(groupId, queryNodes.get(groupId), taskId, PathType.FILTER_PATH, null,
-            queryRounds++);
+    BasicRequest request = QuerySeriesDataRequest
+        .createFetchDataRequest(groupId, taskId, PathType.FILTER_PATH, null, queryRounds++);
+    QuerySeriesDataResponse response = (QuerySeriesDataResponse) ClusterRpcReaderUtils
+        .handleQueryRequest(request, queryNodes.get(groupId), 0);
+
     handleFetchDataResponseForFilterPaths(groupId, response);
   }
 
@@ -253,9 +262,10 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
       String groupId = entry.getKey();
       List<String> fetchDataFilterSeries = new ArrayList<>();
       entry.getValue().forEach(path -> fetchDataFilterSeries.add(path.getFullPath()));
-      QuerySeriesDataByTimestampResponse response = ClusterRpcReaderUtils
-          .fetchBatchDataByTimestamp(groupId, queryNodes.get(groupId), taskId, queryRounds++,
-              batchTimestamp, fetchDataFilterSeries);
+      BasicRequest request = QuerySeriesDataByTimestampRequest
+          .createRequest(groupId, queryRounds++, taskId, batchTimestamp, fetchDataFilterSeries);
+      QuerySeriesDataByTimestampResponse response = (QuerySeriesDataByTimestampResponse) ClusterRpcReaderUtils
+          .handleQueryRequest(request, queryNodes.get(groupId), 0);
       handleFetchDataByTimestampResponseForSelectPaths(fetchDataFilterSeries, response);
     }
   }
@@ -332,7 +342,8 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
     for (Entry<String, PeerId> entry : queryNodes.entrySet()) {
       String groupId = entry.getKey();
       PeerId queryNode = entry.getValue();
-      ClusterRpcReaderUtils.releaseRemoteQueryResource(groupId, queryNode, taskId);
+      BasicRequest request = CloseSeriesReaderRequest.createReleaseResourceRequest(groupId, taskId);
+      ClusterRpcReaderUtils.handleQueryRequest(request, queryNode, 0);
     }
   }
 
@@ -356,60 +367,27 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
     return queryRounds;
   }
 
-  public void setQueryRounds(long queryRounds) {
-    this.queryRounds = queryRounds;
-  }
-
   public QueryPlan getOriginQueryPlan() {
     return originQueryPlan;
   }
 
-  public void setOriginQueryPlan(QueryPlan queryPlan) {
-    this.originQueryPlan = queryPlan;
-  }
-
-  public Map<String, PeerId> getQueryNodes() {
-    return queryNodes;
-  }
-
-  public void setQueryNodes(
-      Map<String, PeerId> queryNodes) {
-    this.queryNodes = queryNodes;
+  public void setQueryNode(String groupID, PeerId peerId) {
+    this.queryNodes.put(groupID, peerId);
   }
 
   public Map<String, QueryPlan> getSelectPathPlans() {
     return selectPathPlans;
   }
 
-  public void setSelectPathPlans(
-      Map<String, QueryPlan> selectPathPlans) {
-    this.selectPathPlans = selectPathPlans;
-  }
-
   public Map<String, List<Path>> getSelectSeriesByGroupId() {
     return selectSeriesByGroupId;
   }
 
-  public void setSelectSeriesByGroupId(
-      Map<String, List<Path>> selectSeriesByGroupId) {
-    this.selectSeriesByGroupId = selectSeriesByGroupId;
-  }
-
   public Map<Path, ClusterSelectSeriesReader> getSelectSeriesReaders() {
     return selectSeriesReaders;
   }
 
-  public void setSelectSeriesReaders(
-      Map<Path, ClusterSelectSeriesReader> selectSeriesReaders) {
-    this.selectSeriesReaders = selectSeriesReaders;
-  }
-
   public Map<String, FilterGroupEntity> getFilterGroupEntityMap() {
     return filterGroupEntityMap;
   }
-
-  public void setFilterGroupEntityMap(
-      Map<String, FilterGroupEntity> filterGroupEntityMap) {
-    this.filterGroupEntityMap = filterGroupEntityMap;
-  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterRpcReaderUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterRpcReaderUtils.java
index dca2d30..0247bbe 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterRpcReaderUtils.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterRpcReaderUtils.java
@@ -19,32 +19,34 @@
 package org.apache.iotdb.cluster.query.utils;
 
 import com.alipay.sofa.jraft.entity.PeerId;
-import java.io.IOException;
 import java.util.List;
-import java.util.Map;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.cluster.entity.Server;
 import org.apache.iotdb.cluster.exception.RaftConnectionException;
 import org.apache.iotdb.cluster.qp.task.QPTask.TaskState;
 import org.apache.iotdb.cluster.qp.task.QueryTask;
 import org.apache.iotdb.cluster.query.PathType;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
 import org.apache.iotdb.cluster.rpc.raft.NodeAsClient;
 import org.apache.iotdb.cluster.rpc.raft.request.BasicRequest;
 import org.apache.iotdb.cluster.rpc.raft.request.querydata.CloseSeriesReaderRequest;
-import org.apache.iotdb.cluster.rpc.raft.request.querydata.InitSeriesReaderRequest;
 import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataByTimestampRequest;
 import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataRequest;
 import org.apache.iotdb.cluster.rpc.raft.response.BasicResponse;
 import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataByTimestampResponse;
 import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
 import org.apache.iotdb.cluster.utils.RaftUtils;
-import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
-import org.apache.iotdb.tsfile.read.filter.basic.Filter;
+import org.apache.iotdb.cluster.utils.hash.Router;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Utils for cluster reader which needs to acquire data from remote query node.
  */
 public class ClusterRpcReaderUtils {
 
+  private static final Logger LOGGER = LoggerFactory.getLogger(ClusterRpcReaderUtils.class);
+
   /**
    * Count limit to redo a task
    */
@@ -56,72 +58,27 @@ public class ClusterRpcReaderUtils {
 
   /**
    * Create cluster series reader
-   *
-   * @param peerId query node to fetch data
-   * @param readDataConsistencyLevel consistency level of read data
-   * @param taskId task id assigned by coordinator node
    */
-  public static BasicResponse createClusterSeriesReader(String groupId, PeerId peerId,
-      int readDataConsistencyLevel, Map<PathType, QueryPlan> allQueryPlan, String taskId,
-      List<Filter> filterList) throws RaftConnectionException, IOException {
-
-    /** handle request **/
-    BasicRequest request = InitSeriesReaderRequest
-        .createInitialQueryRequest(groupId, taskId, readDataConsistencyLevel,
-            allQueryPlan, filterList);
-    return handleQueryRequest(request, peerId, 0);
-  }
-
-  /**
-   * Fetch batch data for select series in a query without value filter or filter series.
-   *
-   * @param groupId data group id
-   * @param peerId query node id
-   * @param taskId task id of query task
-   * @param pathType type of path
-   * @param fetchDataSeries series list which need to fetch data
-   * @param queryRounds query rounds
-   */
-  public static QuerySeriesDataResponse fetchBatchData(String groupId, PeerId peerId, String taskId,
-      PathType pathType, List<String> fetchDataSeries, long queryRounds)
+  public static BasicResponse createClusterSeriesReader(String groupId, BasicRequest request,
+      ClusterRpcSingleQueryManager manager)
       throws RaftConnectionException {
-    BasicRequest request = QuerySeriesDataRequest
-        .createFetchDataRequest(groupId, taskId, pathType, fetchDataSeries, queryRounds);
-    return (QuerySeriesDataResponse) handleQueryRequest(request, peerId, 0);
-  }
 
-  /**
-   * Fetch batch data corresponding to a given list of timestamp for select series in a query with
-   * value filter.
-   *
-   * @param groupId data group id
-   * @param peerId query node id
-   * @param taskId task id of query task
-   * @param queryRounds query rounds
-   * @param batchTimestamp list of valid timestamp
-   * @param fetchDataSeries series list which need to fetch data
-   */
-  public static QuerySeriesDataByTimestampResponse fetchBatchDataByTimestamp(String groupId,
-      PeerId peerId, String taskId, long queryRounds, List<Long> batchTimestamp,
-      List<String> fetchDataSeries)
-      throws RaftConnectionException {
-    BasicRequest request = QuerySeriesDataByTimestampRequest
-        .createRequest(groupId, queryRounds, taskId, batchTimestamp, fetchDataSeries);
-    return (QuerySeriesDataByTimestampResponse) handleQueryRequest(request, peerId, 0);
-  }
-
-  /**
-   * Release remote query resources
-   *
-   * @param groupId data group id
-   * @param peerId target query node
-   * @param taskId unique task id
-   */
-  public static void releaseRemoteQueryResource(String groupId, PeerId peerId, String taskId)
-      throws RaftConnectionException {
-
-    BasicRequest request = CloseSeriesReaderRequest.createReleaseResourceRequest(groupId, taskId);
-    handleQueryRequest(request, peerId, 0);
+    List<PeerId> peerIdList = RaftUtils
+        .getPeerIDList(groupId, Server.getInstance(), Router.getInstance());
+    int randomPeerIndex = RaftUtils.getRandomInt(peerIdList.size());
+    BasicResponse response;
+    for (int i = 0; i < peerIdList.size(); i++) {
+      PeerId peerId = peerIdList.get((i + randomPeerIndex) % peerIdList.size());
+      try {
+        response = handleQueryRequest(request, peerId, 0);
+        manager.setQueryNode(groupId, peerId);
+        return response;
+      } catch (RaftConnectionException e) {
+        LOGGER.error("Can not init series reader in Node<{}> of group<{}>", peerId, groupId, e);
+      }
+    }
+    throw new RaftConnectionException(
+        String.format("Can not init series reader in all nodes of group<%s>.", groupId));
   }
 
   /**
@@ -132,7 +89,7 @@ public class ClusterRpcReaderUtils {
    * @param taskRetryNum retry num of the request
    * @return Response from remote query node
    */
-  private static BasicResponse handleQueryRequest(BasicRequest request, PeerId peerId,
+  public static BasicResponse handleQueryRequest(BasicRequest request, PeerId peerId,
       int taskRetryNum)
       throws RaftConnectionException {
     if (taskRetryNum > TASK_MAX_RETRY) {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/NodeAsClient.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/NodeAsClient.java
index bab1536..197c7eb 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/NodeAsClient.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/NodeAsClient.java
@@ -42,8 +42,7 @@ public interface NodeAsClient {
    * @param peerId leader node of the target group
    *
    */
-  QueryTask syncHandleRequest(BasicRequest request, PeerId peerId)
-      throws RaftConnectionException;
+  QueryTask syncHandleRequest(BasicRequest request, PeerId peerId);
 
   /**
    * Shut down client
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/RaftUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/RaftUtils.java
index d5486fb..61fbdca 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/RaftUtils.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/utils/RaftUtils.java
@@ -118,18 +118,29 @@ public class RaftUtils {
     return getRandomPeerID(groupId, server, router);
   }
 
+  /**
+   * Get random peer id
+   */
   public static PeerId getRandomPeerID(String groupId, Server server, Router router) {
-    PeerId randomPeerId;
+    List<PeerId> peerIdList = getPeerIDList(groupId, server, router);
+    return peerIdList.get(getRandomInt(peerIdList.size()));
+  }
+
+  /**
+   * Get peer id list by groupid
+   */
+  public static List<PeerId> getPeerIDList(String groupId, Server server, Router router) {
+    List<PeerId> peerIdList = new ArrayList<>();
     if (groupId.equals(ClusterConfig.METADATA_GROUP_ID)) {
       RaftService service = (RaftService) server.getMetadataHolder().getService();
-      List<PeerId> peerIdList = service.getPeerIdList();
-      randomPeerId = peerIdList.get(getRandomInt(peerIdList.size()));
+      peerIdList.addAll(service.getPeerIdList());
     } else {
       PhysicalNode[] physicalNodes = router.getNodesByGroupId(groupId);
-      PhysicalNode node = physicalNodes[getRandomInt(physicalNodes.length)];
-      randomPeerId = getPeerIDFrom(node);
+      for (PhysicalNode node : physicalNodes) {
+        peerIdList.add(getPeerIDFrom(node));
+      }
     }
-    return randomPeerId;
+    return peerIdList;
   }
 
   /**
@@ -196,7 +207,7 @@ public class RaftUtils {
 
   @OnlyForTest
   public static void clearRaftGroupLeader() {
-	  groupLeaderCache.clear();
+    groupLeaderCache.clear();
   }
 
   /**
@@ -339,7 +350,8 @@ public class RaftUtils {
     try {
       LOGGER.debug("Handle null-read in data group for reading.");
       final byte[] reqContext = RaftUtils.createRaftRequestContext();
-      DataPartitionRaftHolder dataPartitionRaftHolder = (DataPartitionRaftHolder) server.getDataPartitionHolder(groupId);
+      DataPartitionRaftHolder dataPartitionRaftHolder = (DataPartitionRaftHolder) server
+          .getDataPartitionHolder(groupId);
       ((RaftService) dataPartitionRaftHolder.getService()).getNode()
           .readIndex(reqContext, new ReadIndexClosure() {
             @Override
@@ -360,7 +372,7 @@ public class RaftUtils {
     }
   }
 
-  public static Status createErrorStatus(String errorMsg){
+  public static Status createErrorStatus(String errorMsg) {
     Status status = new Status();
     status.setErrorMsg(errorMsg);
     status.setCode(-1);
@@ -386,8 +398,8 @@ public class RaftUtils {
   }
 
   /**
-   * Get all node information of the data group of input storage group.
-   * The first node is the current leader
+   * Get all node information of the data group of input storage group. The first node is the
+   * current leader
    *
    * @param sg storage group ID. If null, return metadata group info
    */
@@ -400,7 +412,8 @@ public class RaftUtils {
     PeerId[] nodes;
     if (sg == null) {
       groupId = ClusterConfig.METADATA_GROUP_ID;
-      List<PeerId> peerIdList = ((RaftService) server.getMetadataHolder().getService()).getPeerIdList();
+      List<PeerId> peerIdList = ((RaftService) server.getMetadataHolder().getService())
+          .getPeerIdList();
       nodes = peerIdList.toArray(new PeerId[peerIdList.size()]);
     } else {
       PhysicalNode[] group = router.routeGroup(sg);
@@ -434,7 +447,8 @@ public class RaftUtils {
     return getDataPartitionOfNode(ip, port, server, router);
   }
 
-  public static Map<String[], String[]> getDataPartitionOfNode(String ip, int port, Server server, Router router) {
+  public static Map<String[], String[]> getDataPartitionOfNode(String ip, int port, Server server,
+      Router router) {
     PhysicalNode[][] groups = router.getGroupsNodes(ip, port);
     if (groups == null) {
       return null;
@@ -444,7 +458,8 @@ public class RaftUtils {
     for (int i = 0; i < groups.length; i++) {
       groupSGMap.put(generateStringKey(groups[i]), new ArrayList<>());
     }
-    Set<String> allSGList = ((MetadataStateManchine)((RaftService)server.getMetadataHolder().getService()).getFsm()).getAllStorageGroups();
+    Set<String> allSGList = ((MetadataStateManchine) ((RaftService) server.getMetadataHolder()
+        .getService()).getFsm()).getAllStorageGroups();
     for (String sg : allSGList) {
       String key = generateStringKey(router.routeGroup(sg));
       if (groupSGMap.containsKey(key)) {
@@ -496,7 +511,8 @@ public class RaftUtils {
     RaftService raftService = (RaftService) server.getMetadataHolder().getService();
     metricMap.put(raftService.getGroupId(), getReplicaMetricFromRaftService(raftService, metric));
 
-    router.getAllGroupId().forEach(groupId -> metricMap.put(groupId, getReplicaMetric(groupId, metric)));
+    router.getAllGroupId()
+        .forEach(groupId -> metricMap.put(groupId, getReplicaMetric(groupId, metric)));
     return metricMap;
   }
 
@@ -505,12 +521,14 @@ public class RaftUtils {
       RaftService service = (RaftService) server.getDataPartitionHolder(groupId).getService();
       return getReplicaMetricFromRaftService(service, metric);
     } else {
-      LOGGER.debug("Current host does not contain group {}, all groups are {}.", groupId, server.getDataPartitionHolderMap().keySet());
+      LOGGER.debug("Current host does not contain group {}, all groups are {}.", groupId,
+          server.getDataPartitionHolderMap().keySet());
       return getReplicaMetricFromRemoteNode(groupId, metric);
     }
   }
 
-  private static Map<String, Long> getReplicaMetricFromRaftService(RaftService service, String metric) {
+  private static Map<String, Long> getReplicaMetricFromRaftService(RaftService service,
+      String metric) {
     String groupId = service.getGroupId();
     LOGGER.debug("Get replica metric {} for group {}.", metric, service.getGroupId());
     NodeImpl node = (NodeImpl) service.getNode();
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBAggregationLargeDataIT.java b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBAggregationLargeDataIT.java
index 973d3e7..494029c 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBAggregationLargeDataIT.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBAggregationLargeDataIT.java
@@ -35,7 +35,6 @@ import java.sql.Statement;
 import org.apache.iotdb.cluster.config.ClusterConfig;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.cluster.entity.Server;
-import org.apache.iotdb.cluster.integration.Constant;
 import org.apache.iotdb.cluster.utils.EnvironmentUtils;
 import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.cluster.utils.hash.PhysicalNode;


[incubator-iotdb] 01/11: add fill feature

Posted by lt...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

lta pushed a commit to branch cluster
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git

commit 167a30e2f30d89dc91670a731fbb7fadac161a27
Author: lta <li...@163.com>
AuthorDate: Thu May 16 14:57:41 2019 +0800

    add fill feature
---
 .../query/executor/ClusterFillEngineExecutor.java  |  66 ++--
 .../cluster/query/executor/ClusterQueryRouter.java |  13 +-
 .../query/manager/common/FillBatchData.java        |  65 ++++
 .../querynode/ClusterLocalSingleQueryManager.java  |  86 +++--
 .../AbstractClusterPointReader.java                |   5 +-
 ...=> AbstractClusterSelectSeriesBatchReader.java} |   2 +-
 ...ava => ClusterFillSelectSeriesBatchReader.java} |  26 +-
 ...or.java => ClusterSelectSeriesBatchReader.java} |  13 +-
 ...ClusterSelectSeriesBatchReaderByTimestamp.java} |   7 +-
 .../query/utils/ClusterTimeValuePairUtils.java     |  26 ++
 .../query/utils/QueryPlanPartitionUtils.java       |  41 ++-
 .../cluster/integration/IoTDBFillQueryIT.java      | 366 +++++++++++++++++++++
 .../IoTDBQueryIT.java}                             |   4 +-
 .../IoTDBQueryLargeDataIT.java}                    |   4 +-
 .../query/manager/ClusterLocalManagerTest.java     |  66 ++--
 .../db/query/executor/FillEngineExecutor.java      |  11 +-
 .../db/query/executor/IFillEngineExecutor.java     |  23 +-
 .../java/org/apache/iotdb/db/query/fill/IFill.java |  14 +-
 .../org/apache/iotdb/db/query/fill/LinearFill.java |   4 +-
 .../apache/iotdb/db/query/fill/PreviousFill.java   |   6 +-
 20 files changed, 701 insertions(+), 147 deletions(-)

diff --git a/iotdb/src/main/java/org/apache/iotdb/db/query/executor/FillEngineExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterFillEngineExecutor.java
similarity index 57%
copy from iotdb/src/main/java/org/apache/iotdb/db/query/executor/FillEngineExecutor.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterFillEngineExecutor.java
index 83c5fa9..771637e 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/executor/FillEngineExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterFillEngineExecutor.java
@@ -16,13 +16,14 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
-package org.apache.iotdb.db.query.executor;
+package org.apache.iotdb.cluster.query.executor;
 
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
 import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
 import org.apache.iotdb.db.exception.FileNodeManagerException;
 import org.apache.iotdb.db.exception.PathErrorException;
@@ -30,6 +31,7 @@ import org.apache.iotdb.db.metadata.MManager;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.control.QueryResourceManager;
 import org.apache.iotdb.db.query.dataset.EngineDataSetWithoutTimeGenerator;
+import org.apache.iotdb.db.query.executor.IFillEngineExecutor;
 import org.apache.iotdb.db.query.fill.IFill;
 import org.apache.iotdb.db.query.fill.PreviousFill;
 import org.apache.iotdb.db.query.reader.IPointReader;
@@ -37,55 +39,57 @@ import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.Path;
 import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
 
-public class FillEngineExecutor {
+public class ClusterFillEngineExecutor implements IFillEngineExecutor {
 
-  private long jobId;
   private List<Path> selectedSeries;
   private long queryTime;
   private Map<TSDataType, IFill> typeIFillMap;
+  private ClusterRpcSingleQueryManager queryManager;
+
 
-  public FillEngineExecutor(long jobId, List<Path> selectedSeries, long queryTime,
-      Map<TSDataType, IFill> typeIFillMap) {
-    this.jobId = jobId;
+  public ClusterFillEngineExecutor(List<Path> selectedSeries, long queryTime,
+      Map<TSDataType, IFill> typeIFillMap, ClusterRpcSingleQueryManager queryManager) {
     this.selectedSeries = selectedSeries;
     this.queryTime = queryTime;
     this.typeIFillMap = typeIFillMap;
+    this.queryManager = queryManager;
   }
 
-  /**
-   * execute fill.
-   *
-   * @param context query context
-   */
+  @Override
   public QueryDataSet execute(QueryContext context)
       throws FileNodeManagerException, PathErrorException, IOException {
-    QueryResourceManager.getInstance().beginQueryOfGivenQueryPaths(jobId, selectedSeries);
-
+    Map<Path, ClusterSelectSeriesReader> selectPathReaders = queryManager.getSelectSeriesReaders();
+    List<Path> paths = new ArrayList<>();
     List<IFill> fillList = new ArrayList<>();
     List<TSDataType> dataTypeList = new ArrayList<>();
+    List<IPointReader> readers = new ArrayList<>();
     for (Path path : selectedSeries) {
-      QueryDataSource queryDataSource = QueryResourceManager.getInstance()
-          .getQueryDataSource(path, context);
-      TSDataType dataType = MManager.getInstance().getSeriesType(path.getFullPath());
-      dataTypeList.add(dataType);
-      IFill fill = null;
-      if (!typeIFillMap.containsKey(dataType)) {
-        fill = new PreviousFill(dataType, queryTime, 0);
+      if (selectPathReaders.containsKey(path)) {
+        ClusterSelectSeriesReader reader = selectPathReaders.get(path);
+        readers.add(reader);
+        dataTypeList.add(reader.getDataType());
       } else {
-        fill = typeIFillMap.get(dataType).copy(path);
+        QueryDataSource queryDataSource = QueryResourceManager.getInstance()
+            .getQueryDataSource(path, context);
+        TSDataType dataType = MManager.getInstance().getSeriesType(path.getFullPath());
+        dataTypeList.add(dataType);
+        IFill fill;
+        if (!typeIFillMap.containsKey(dataType)) {
+          fill = new PreviousFill(dataType, queryTime, 0);
+        } else {
+          fill = typeIFillMap.get(dataType).copy(path);
+        }
+        fill.setDataType(dataType);
+        fill.setQueryTime(queryTime);
+        fill.constructReaders(queryDataSource, context);
+        fillList.add(fill);
+        readers.add(fill.getFillResult());
       }
-      fill.setDataType(dataType);
-      fill.setQueryTime(queryTime);
-      fill.constructReaders(queryDataSource, context);
-      fillList.add(fill);
     }
 
-    List<IPointReader> readers = new ArrayList<>();
-    for (IFill fill : fillList) {
-      readers.add(fill.getFillResult());
-    }
+    QueryResourceManager.getInstance()
+        .beginQueryOfGivenQueryPaths(context.getJobId(), paths);
 
     return new EngineDataSetWithoutTimeGenerator(selectedSeries, dataTypeList, readers);
   }
-
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterQueryRouter.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterQueryRouter.java
index 4211528..2fa4576 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterQueryRouter.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterQueryRouter.java
@@ -29,6 +29,7 @@ import org.apache.iotdb.db.exception.FileNodeManagerException;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.exception.ProcessorException;
 import org.apache.iotdb.db.query.context.QueryContext;
+import org.apache.iotdb.db.query.executor.FillEngineExecutor;
 import org.apache.iotdb.db.query.executor.IEngineQueryRouter;
 import org.apache.iotdb.db.query.fill.IFill;
 import org.apache.iotdb.tsfile.exception.filter.QueryFilterOptimizationException;
@@ -108,7 +109,17 @@ public class ClusterQueryRouter implements IEngineQueryRouter {
   @Override
   public QueryDataSet fill(List<Path> fillPaths, long queryTime, Map<TSDataType, IFill> fillType,
       QueryContext context) throws FileNodeManagerException, PathErrorException, IOException {
-    throw new UnsupportedOperationException();
+    ClusterRpcSingleQueryManager queryManager = ClusterRpcQueryManager.getInstance()
+        .getSingleQuery(context.getJobId());
+    try {
+      queryManager.initQueryResource(QueryType.NO_FILTER, getReadDataConsistencyLevel());
+
+      ClusterFillEngineExecutor fillEngineExecutor = new ClusterFillEngineExecutor(fillPaths, queryTime,
+          fillType, queryManager);
+      return fillEngineExecutor.execute(context);
+    } catch (IOException | RaftConnectionException e) {
+      throw new FileNodeManagerException(e);
+    }
   }
 
   public int getReadDataConsistencyLevel() {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/common/FillBatchData.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/common/FillBatchData.java
new file mode 100644
index 0000000..3e128e3
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/common/FillBatchData.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.manager.common;
+
+import org.apache.iotdb.db.utils.TimeValuePair;
+import org.apache.iotdb.tsfile.read.common.BatchData;
+
+/**
+ * <code>FillBatchData</code> is a self-defined data structure which is used in cluster query
+ * process of fill type, which only contains one TimeValuePair and value can be null.
+ */
+public class FillBatchData extends BatchData {
+
+  private TimeValuePair timeValuePair;
+  private boolean isUsed;
+
+  public FillBatchData(TimeValuePair timeValuePair, boolean isUsed) {
+    this.timeValuePair = timeValuePair;
+    this.isUsed = isUsed;
+  }
+
+  @Override
+  public boolean hasNext() {
+    return !isUsed;
+  }
+
+  @Override
+  public void next() {
+    isUsed = true;
+  }
+
+  @Override
+  public long currentTime() {
+    return timeValuePair.getTimestamp();
+  }
+
+  @Override
+  public Object currentValue() {
+    if (!isUsed) {
+      return timeValuePair.getValue() == null ? null : timeValuePair.getValue().getValue();
+    } else {
+      return null;
+    }
+  }
+
+  public TimeValuePair getTimeValuePair() {
+    return isUsed ? null : timeValuePair;
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalSingleQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalSingleQueryManager.java
index 37e3c57..f776477 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalSingleQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalSingleQueryManager.java
@@ -28,9 +28,10 @@ import org.apache.iotdb.cluster.concurrent.pool.QueryTimerManager;
 import org.apache.iotdb.cluster.config.ClusterConstant;
 import org.apache.iotdb.cluster.query.PathType;
 import org.apache.iotdb.cluster.query.factory.ClusterSeriesReaderFactory;
-import org.apache.iotdb.cluster.query.reader.querynode.AbstractClusterBatchReader;
-import org.apache.iotdb.cluster.query.reader.querynode.ClusterBatchReaderByTimestamp;
-import org.apache.iotdb.cluster.query.reader.querynode.ClusterBatchReaderWithoutTimeGenerator;
+import org.apache.iotdb.cluster.query.reader.querynode.AbstractClusterSelectSeriesBatchReader;
+import org.apache.iotdb.cluster.query.reader.querynode.ClusterFillSelectSeriesBatchReader;
+import org.apache.iotdb.cluster.query.reader.querynode.ClusterSelectSeriesBatchReader;
+import org.apache.iotdb.cluster.query.reader.querynode.ClusterSelectSeriesBatchReaderByTimestamp;
 import org.apache.iotdb.cluster.query.reader.querynode.ClusterFilterSeriesBatchReader;
 import org.apache.iotdb.cluster.query.reader.querynode.IClusterFilterSeriesBatchReader;
 import org.apache.iotdb.cluster.rpc.raft.request.querydata.InitSeriesReaderRequest;
@@ -39,6 +40,7 @@ import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataReques
 import org.apache.iotdb.cluster.rpc.raft.response.querydata.InitSeriesReaderResponse;
 import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataByTimestampResponse;
 import org.apache.iotdb.cluster.rpc.raft.response.querydata.QuerySeriesDataResponse;
+import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
 import org.apache.iotdb.db.exception.FileNodeManagerException;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.exception.ProcessorException;
@@ -46,11 +48,14 @@ import org.apache.iotdb.db.metadata.MManager;
 import org.apache.iotdb.db.qp.executor.OverflowQPExecutor;
 import org.apache.iotdb.db.qp.executor.QueryProcessExecutor;
 import org.apache.iotdb.db.qp.physical.crud.AggregationPlan;
+import org.apache.iotdb.db.qp.physical.crud.FillQueryPlan;
 import org.apache.iotdb.db.qp.physical.crud.GroupByPlan;
 import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.control.QueryResourceManager;
 import org.apache.iotdb.db.query.executor.AbstractExecutorWithoutTimeGenerator;
+import org.apache.iotdb.db.query.fill.IFill;
+import org.apache.iotdb.db.query.fill.PreviousFill;
 import org.apache.iotdb.db.query.reader.IPointReader;
 import org.apache.iotdb.db.query.reader.merge.EngineReaderByTimeStamp;
 import org.apache.iotdb.tsfile.exception.filter.QueryFilterOptimizationException;
@@ -89,7 +94,7 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
   /**
    * Key is series full path, value is reader of select series
    */
-  private Map<String, AbstractClusterBatchReader> selectSeriesReaders = new HashMap<>();
+  private Map<String, AbstractClusterSelectSeriesBatchReader> selectSeriesReaders = new HashMap<>();
 
   /**
    * Filter reader
@@ -130,6 +135,8 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
         throw new UnsupportedOperationException();
       } else if (plan instanceof AggregationPlan) {
         throw new UnsupportedOperationException();
+      } else if (plan instanceof FillQueryPlan) {
+        handleFillSeriesRerader(plan, context, response);
       } else {
         if (plan.getExpression() == null
             || plan.getExpression().getType() == ExpressionType.GLOBAL_TIME) {
@@ -147,22 +154,40 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
   }
 
   /**
-   * Handle filter series reader
+   * Handle fill series reader
    *
-   * @param plan filter series query plan
+   * @param queryPlan fill query plan
    */
-  private void handleFilterSeriesReader(QueryPlan plan, QueryContext context,
-      InitSeriesReaderRequest request, InitSeriesReaderResponse response, PathType pathType)
-      throws PathErrorException, QueryFilterOptimizationException, FileNodeManagerException, ProcessorException, IOException {
-    QueryDataSet queryDataSet = queryProcessExecutor
-        .processQuery(plan, context);
-    List<Path> paths = plan.getPaths();
-    List<TSDataType> dataTypes = queryDataSet.getDataTypes();
-    for (int i = 0; i < paths.size(); i++) {
-      dataTypeMap.put(paths.get(i).getFullPath(), dataTypes.get(i));
+  private void handleFillSeriesRerader(QueryPlan queryPlan, QueryContext context,
+      InitSeriesReaderResponse response)
+      throws FileNodeManagerException, PathErrorException, IOException {
+    FillQueryPlan fillQueryPlan = (FillQueryPlan) queryPlan;
+
+    List<Path> selectedPaths = queryPlan.getPaths();
+    List<TSDataType> dataTypes = new ArrayList<>();
+    QueryResourceManager.getInstance().beginQueryOfGivenQueryPaths(jobId, selectedPaths);
+
+    Map<TSDataType, IFill> typeIFillMap = fillQueryPlan.getFillType();
+    for (Path path : selectedPaths) {
+      QueryDataSource queryDataSource = QueryResourceManager.getInstance()
+          .getQueryDataSource(path, context);
+      TSDataType dataType = MManager.getInstance().getSeriesType(path.getFullPath());
+      dataTypes.add(dataType);
+      IFill fill;
+      if (!typeIFillMap.containsKey(dataType)) {
+        fill = new PreviousFill(dataType, fillQueryPlan.getQueryTime(), 0);
+      } else {
+        fill = typeIFillMap.get(dataType).copy(path);
+      }
+      fill.setDataType(dataType);
+      fill.setQueryTime(fillQueryPlan.getQueryTime());
+      fill.constructReaders(queryDataSource, context);
+      selectSeriesReaders.put(path.getFullPath(),
+          new ClusterFillSelectSeriesBatchReader(dataType, fill.getFillResult()));
+      dataTypeMap.put(path.getFullPath(), dataType);
     }
-    response.getSeriesDataTypes().put(pathType, dataTypes);
-    filterReader = new ClusterFilterSeriesBatchReader(queryDataSet, paths, request.getFilterList());
+
+    response.getSeriesDataTypes().put(PathType.SELECT_PATH, dataTypes);
   }
 
   /**
@@ -188,13 +213,32 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
       IPointReader reader = AbstractExecutorWithoutTimeGenerator
           .createSeriesReader(context, paths.get(i), dataTypes, timeFilter);
       selectSeriesReaders
-          .put(fullPath, new ClusterBatchReaderWithoutTimeGenerator(dataTypes.get(i), reader));
+          .put(fullPath, new ClusterSelectSeriesBatchReader(dataTypes.get(i), reader));
       dataTypeMap.put(fullPath, dataTypes.get(i));
     }
     response.getSeriesDataTypes().put(PathType.SELECT_PATH, dataTypes);
   }
 
   /**
+   * Handle filter series reader
+   *
+   * @param plan filter series query plan
+   */
+  private void handleFilterSeriesReader(QueryPlan plan, QueryContext context,
+      InitSeriesReaderRequest request, InitSeriesReaderResponse response, PathType pathType)
+      throws PathErrorException, QueryFilterOptimizationException, FileNodeManagerException, ProcessorException, IOException {
+    QueryDataSet queryDataSet = queryProcessExecutor
+        .processQuery(plan, context);
+    List<Path> paths = plan.getPaths();
+    List<TSDataType> dataTypes = queryDataSet.getDataTypes();
+    for (int i = 0; i < paths.size(); i++) {
+      dataTypeMap.put(paths.get(i).getFullPath(), dataTypes.get(i));
+    }
+    response.getSeriesDataTypes().put(pathType, dataTypes);
+    filterReader = new ClusterFilterSeriesBatchReader(queryDataSet, paths, request.getFilterList());
+  }
+
+  /**
    * Handle select series query with value filter
    *
    * @param plan plan query plan
@@ -212,7 +256,7 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
           .createReaderByTimeStamp(path, context);
       TSDataType dataType = MManager.getInstance().getSeriesType(path.getFullPath());
       selectSeriesReaders
-          .put(path.getFullPath(), new ClusterBatchReaderByTimestamp(readerByTimeStamp, dataType));
+          .put(path.getFullPath(), new ClusterSelectSeriesBatchReaderByTimestamp(readerByTimeStamp, dataType));
       dataTypeMap.put(path.getFullPath(), dataType);
       dataTypeList.add(dataType);
     }
@@ -253,7 +297,7 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
       this.queryRound = targetQueryRounds;
       List<BatchData> batchDataList = new ArrayList<>();
       for (String series : fetchDataSeries) {
-        AbstractClusterBatchReader reader = selectSeriesReaders.get(series);
+        AbstractClusterSelectSeriesBatchReader reader = selectSeriesReaders.get(series);
         batchDataList.add(reader.nextBatch(request.getBatchTimestamp()));
       }
       cachedBatchDataResult = batchDataList;
@@ -309,7 +353,7 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
     return queryRound;
   }
 
-  public Map<String, AbstractClusterBatchReader> getSelectSeriesReaders() {
+  public Map<String, AbstractClusterSelectSeriesBatchReader> getSelectSeriesReaders() {
     return selectSeriesReaders;
   }
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/AbstractClusterPointReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/AbstractClusterPointReader.java
index 72c7c70..3f73160 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/AbstractClusterPointReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/AbstractClusterPointReader.java
@@ -20,9 +20,9 @@ package org.apache.iotdb.cluster.query.reader.coordinatornode;
 
 import java.io.IOException;
 import org.apache.iotdb.cluster.exception.RaftConnectionException;
+import org.apache.iotdb.cluster.query.utils.ClusterTimeValuePairUtils;
 import org.apache.iotdb.db.query.reader.IPointReader;
 import org.apache.iotdb.db.utils.TimeValuePair;
-import org.apache.iotdb.db.utils.TimeValuePairUtils;
 import org.apache.iotdb.tsfile.read.common.BatchData;
 
 /**
@@ -63,7 +63,8 @@ public abstract class AbstractClusterPointReader implements IPointReader {
   @Override
   public TimeValuePair next() throws IOException {
     if (hasNext()) {
-      TimeValuePair timeValuePair = TimeValuePairUtils.getCurrentTimeValuePair(currentBatchData);
+      TimeValuePair timeValuePair = ClusterTimeValuePairUtils
+          .getCurrentTimeValuePair(currentBatchData);
       currentTimeValuePair = timeValuePair;
       currentBatchData.next();
       return timeValuePair;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/AbstractClusterBatchReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/AbstractClusterSelectSeriesBatchReader.java
similarity index 93%
copy from cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/AbstractClusterBatchReader.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/AbstractClusterSelectSeriesBatchReader.java
index b0a86bd..6fe28e2 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/AbstractClusterBatchReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/AbstractClusterSelectSeriesBatchReader.java
@@ -26,7 +26,7 @@ import org.apache.iotdb.tsfile.read.common.BatchData;
 /**
  * Cluster batch reader, which provides another method to get batch data by batch timestamp.
  */
-public abstract class AbstractClusterBatchReader implements IBatchReader {
+public abstract class AbstractClusterSelectSeriesBatchReader implements IBatchReader {
 
   /**
    * Get batch data by batch time
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/AbstractClusterBatchReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFillSelectSeriesBatchReader.java
similarity index 62%
copy from cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/AbstractClusterBatchReader.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFillSelectSeriesBatchReader.java
index b0a86bd..55639a1 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/AbstractClusterBatchReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFillSelectSeriesBatchReader.java
@@ -19,21 +19,21 @@
 package org.apache.iotdb.cluster.query.reader.querynode;
 
 import java.io.IOException;
-import java.util.List;
-import org.apache.iotdb.db.query.reader.IBatchReader;
+import org.apache.iotdb.cluster.query.manager.common.FillBatchData;
+import org.apache.iotdb.db.query.reader.IPointReader;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.BatchData;
 
-/**
- * Cluster batch reader, which provides another method to get batch data by batch timestamp.
- */
-public abstract class AbstractClusterBatchReader implements IBatchReader {
+public class ClusterFillSelectSeriesBatchReader extends ClusterSelectSeriesBatchReader {
 
-  /**
-   * Get batch data by batch time
-   *
-   * @param batchTime valid batch timestamp
-   * @return corresponding batch data
-   */
-  public abstract BatchData nextBatch(List<Long> batchTime) throws IOException;
+  public ClusterFillSelectSeriesBatchReader(
+      TSDataType dataType,
+      IPointReader reader) {
+    super(dataType, reader);
+  }
 
+  @Override
+  public BatchData nextBatch() throws IOException {
+    return hasNext() ? new FillBatchData(reader.next(), false) : new FillBatchData(null, true);
+  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterBatchReaderWithoutTimeGenerator.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReader.java
similarity index 85%
rename from cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterBatchReaderWithoutTimeGenerator.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReader.java
index a59c79c..cbbad2e 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterBatchReaderWithoutTimeGenerator.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReader.java
@@ -30,21 +30,22 @@ import org.apache.iotdb.tsfile.read.common.BatchData;
 /**
  * BatchReader without time generator for cluster which is used in query node.
  */
-public class ClusterBatchReaderWithoutTimeGenerator extends AbstractClusterBatchReader {
+public class ClusterSelectSeriesBatchReader extends
+    AbstractClusterSelectSeriesBatchReader {
 
   /**
    * Data type
    */
-  private TSDataType dataType;
+  protected TSDataType dataType;
 
   /**
    * Point reader
    */
-  private IPointReader reader;
+  protected IPointReader reader;
 
-  private static final ClusterConfig CLUSTER_CONF = ClusterDescriptor.getInstance().getConfig();
+  static final ClusterConfig CLUSTER_CONF = ClusterDescriptor.getInstance().getConfig();
 
-  public ClusterBatchReaderWithoutTimeGenerator(
+  public ClusterSelectSeriesBatchReader(
       TSDataType dataType, IPointReader reader) {
     this.dataType = dataType;
     this.reader = reader;
@@ -80,7 +81,7 @@ public class ClusterBatchReaderWithoutTimeGenerator extends AbstractClusterBatch
   @Override
   public BatchData nextBatch(List<Long> batchTime) throws IOException {
     throw new IOException(
-        "nextBatch(List<Long> batchTime) in ClusterBatchReaderWithoutTimeGenerator is an empty method.");
+        "nextBatch(List<Long> batchTime) in ClusterSelectSeriesBatchReader is an empty method.");
   }
 
   public TSDataType getDataType() {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterBatchReaderByTimestamp.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReaderByTimestamp.java
similarity index 90%
rename from cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterBatchReaderByTimestamp.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReaderByTimestamp.java
index b8c36eb..72dce05 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterBatchReaderByTimestamp.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReaderByTimestamp.java
@@ -27,7 +27,8 @@ import org.apache.iotdb.tsfile.read.common.BatchData;
 /**
  * BatchReader by timestamp for cluster which is used in query node.
  */
-public class ClusterBatchReaderByTimestamp extends AbstractClusterBatchReader {
+public class ClusterSelectSeriesBatchReaderByTimestamp extends
+    AbstractClusterSelectSeriesBatchReader {
 
   /**
    * Reader
@@ -39,7 +40,7 @@ public class ClusterBatchReaderByTimestamp extends AbstractClusterBatchReader {
    */
   private TSDataType dataType;
 
-  public ClusterBatchReaderByTimestamp(
+  public ClusterSelectSeriesBatchReaderByTimestamp(
       EngineReaderByTimeStamp readerByTimeStamp,
       TSDataType dataType) {
     this.readerByTimeStamp = readerByTimeStamp;
@@ -54,7 +55,7 @@ public class ClusterBatchReaderByTimestamp extends AbstractClusterBatchReader {
   @Override
   public BatchData nextBatch() throws IOException {
     throw new UnsupportedOperationException(
-        "nextBatch() in ClusterBatchReaderByTimestamp is an empty method.");
+        "nextBatch() in ClusterSelectSeriesBatchReaderByTimestamp is an empty method.");
   }
 
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterTimeValuePairUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterTimeValuePairUtils.java
new file mode 100644
index 0000000..a0ee256
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterTimeValuePairUtils.java
@@ -0,0 +1,26 @@
+package org.apache.iotdb.cluster.query.utils;
+
+import org.apache.iotdb.cluster.query.manager.common.FillBatchData;
+import org.apache.iotdb.db.utils.TimeValuePair;
+import org.apache.iotdb.db.utils.TimeValuePairUtils;
+import org.apache.iotdb.tsfile.read.common.BatchData;
+
+public class ClusterTimeValuePairUtils {
+
+  private ClusterTimeValuePairUtils() {
+  }
+
+  /**
+   * get given data's current (time,value) pair.
+   *
+   * @param data -batch data
+   * @return -given data's (time,value) pair
+   */
+  public static TimeValuePair getCurrentTimeValuePair(BatchData data) {
+    if (data instanceof FillBatchData){
+      return ((FillBatchData)data).getTimeValuePair();
+    }else{
+      return TimeValuePairUtils.getCurrentTimeValuePair(data);
+    }
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtils.java
index be762e1..546282a 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtils.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtils.java
@@ -19,6 +19,8 @@
 package org.apache.iotdb.cluster.query.utils;
 
 import java.util.ArrayList;
+import java.util.EnumMap;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -27,8 +29,11 @@ import org.apache.iotdb.cluster.query.manager.coordinatornode.FilterGroupEntity;
 import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.qp.physical.crud.AggregationPlan;
+import org.apache.iotdb.db.qp.physical.crud.FillQueryPlan;
 import org.apache.iotdb.db.qp.physical.crud.GroupByPlan;
 import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
+import org.apache.iotdb.db.query.fill.IFill;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.Path;
 import org.apache.iotdb.tsfile.read.expression.ExpressionType;
 import org.apache.iotdb.tsfile.read.expression.IExpression;
@@ -43,12 +48,17 @@ public class QueryPlanPartitionUtils {
   }
 
   /**
-   * Split query plan with no filter or with only global time filter by group id
+   * Split query plan with no filter, with only global time filter by group id or fill query
    */
   public static void splitQueryPlanWithoutValueFilter(
       ClusterRpcSingleQueryManager singleQueryManager)
       throws PathErrorException {
-    splitQueryPlanBySelectPath(singleQueryManager);
+    QueryPlan queryPLan = singleQueryManager.getOriginQueryPlan();
+    if (queryPLan instanceof FillQueryPlan) {
+      splitFillPlan((FillQueryPlan)queryPLan, singleQueryManager);
+    } else {
+      splitQueryPlanBySelectPath(singleQueryManager);
+    }
   }
 
   /**
@@ -93,7 +103,7 @@ public class QueryPlanPartitionUtils {
     }
   }
 
-  private static void splitGroupByPlan(GroupByPlan queryPlan,
+  private static void splitGroupByPlan(GroupByPlan groupByPlan,
       ClusterRpcSingleQueryManager singleQueryManager) {
     throw new UnsupportedOperationException();
   }
@@ -103,6 +113,31 @@ public class QueryPlanPartitionUtils {
     throw new UnsupportedOperationException();
   }
 
+  private static void splitFillPlan(FillQueryPlan fillQueryPlan,
+      ClusterRpcSingleQueryManager singleQueryManager) throws PathErrorException {
+    List<Path> selectPaths = fillQueryPlan.getPaths();
+    Map<String, List<Path>> selectSeriesByGroupId = singleQueryManager.getSelectSeriesByGroupId();
+    Map<String, QueryPlan> selectPathPlans = singleQueryManager.getSelectPathPlans();
+    for (Path path : selectPaths) {
+      String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
+      if (!selectSeriesByGroupId.containsKey(groupId)) {
+        selectSeriesByGroupId.put(groupId, new ArrayList<>());
+      }
+      selectSeriesByGroupId.get(groupId).add(path);
+    }
+    for (Entry<String, List<Path>> entry : selectSeriesByGroupId.entrySet()) {
+      String groupId = entry.getKey();
+      List<Path> paths = entry.getValue();
+      FillQueryPlan subQueryPlan = new FillQueryPlan();
+      subQueryPlan.setProposer(fillQueryPlan.getProposer());
+      subQueryPlan.setPaths(paths);
+      subQueryPlan.setExpression(fillQueryPlan.getExpression());
+      subQueryPlan.setQueryTime(fillQueryPlan.getQueryTime());
+      subQueryPlan.setFillType(new EnumMap<>(fillQueryPlan.getFillType()));
+      selectPathPlans.put(groupId, subQueryPlan);
+    }
+  }
+
   private static void splitQueryPlan(QueryPlan queryPlan,
       ClusterRpcSingleQueryManager singleQueryManager) throws PathErrorException {
     splitQueryPlanBySelectPath(singleQueryManager);
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBFillQueryIT.java b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBFillQueryIT.java
new file mode 100644
index 0000000..f5bf17f
--- /dev/null
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBFillQueryIT.java
@@ -0,0 +1,366 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.integration;
+
+import static org.apache.iotdb.cluster.utils.Utils.insertData;
+import static org.junit.Assert.fail;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
+import org.apache.iotdb.cluster.entity.Server;
+import org.apache.iotdb.cluster.utils.EnvironmentUtils;
+import org.apache.iotdb.cluster.utils.QPExecutorUtils;
+import org.apache.iotdb.cluster.utils.hash.PhysicalNode;
+import org.apache.iotdb.jdbc.Config;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class IoTDBFillQueryIT {
+
+
+  private Server server;
+  private static final ClusterConfig CLUSTER_CONFIG = ClusterDescriptor.getInstance().getConfig();
+  private static final PhysicalNode localNode = new PhysicalNode(CLUSTER_CONFIG.getIp(),
+      CLUSTER_CONFIG.getPort());
+
+  private static String[] createSQLs = new String[]{
+      "SET STORAGE GROUP TO root.ln.wf01.wt01",
+      "CREATE TIMESERIES root.ln.wf01.wt01.status WITH DATATYPE=BOOLEAN, ENCODING=PLAIN",
+      "CREATE TIMESERIES root.ln.wf01.wt01.temperature WITH DATATYPE=DOUBLE, ENCODING=PLAIN",
+      "CREATE TIMESERIES root.ln.wf01.wt01.hardware WITH DATATYPE=INT32, ENCODING=PLAIN"};
+  private static String[] insertSQLs = new String[]{
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(1, 1.1, false, 11)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(2, 2.2, true, 22)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(3, 3.3, false, 33 )",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(4, 4.4, false, 44)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(5, 5.5, false, 55)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(100, 100.1, false, 110)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(150, 200.2, true, 220)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(200, 300.3, false, 330 )",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(250, 400.4, false, 440)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(300, 500.5, false, 550)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(10, 10.1, false, 110)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(20, 20.2, true, 220)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(30, 30.3, false, 330 )",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(40, 40.4, false, 440)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(50, 50.5, false, 550)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(500, 100.1, false, 110)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(510, 200.2, true, 220)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(520, 300.3, false, 330 )",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(530, 400.4, false, 440)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(540, 500.5, false, 550)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(580, 100.1, false, 110)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(590, 200.2, true, 220)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(600, 300.3, false, 330 )",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(610, 400.4, false, 440)",
+      "INSERT INTO root.ln.wf01.wt01(timestamp,temperature,status, hardware) "
+          + "values(620, 500.5, false, 550)",
+  };
+
+  private static final String TIMESTAMP_STR = "Time";
+  private static final String TEMPERATURE_STR = "root.ln.wf01.wt01.temperature";
+  private static final String STATUS_STR = "root.ln.wf01.wt01.status";
+  private static final String HARDWARE_STR = "root.ln.wf01.wt01.hardware";
+
+  @Before
+  public void setUp() throws Exception {
+    EnvironmentUtils.cleanEnv();
+    EnvironmentUtils.closeStatMonitor();
+    EnvironmentUtils.closeMemControl();
+    CLUSTER_CONFIG.createAllPath();
+    server = Server.getInstance();
+    server.start();
+    EnvironmentUtils.envSetUp();
+    Class.forName(Config.JDBC_DRIVER_NAME);
+    prepareData();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    server.stop();
+    QPExecutorUtils.setLocalNodeAddr(localNode.getIp(), localNode.getPort());
+    EnvironmentUtils.cleanEnv();
+  }
+
+
+  @Test
+  public void LinearFillTest() throws SQLException {
+    String[] retArray1 = new String[]{
+        "3,3.3,false,33",
+        "70,70.34,false,374",
+        "70,null,null,null",
+        "625,null,false,null"
+    };
+    Connection connection = null;
+    try {
+      connection = DriverManager.
+          getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute("select temperature,status, hardware from "
+          + "root.ln.wf01.wt01 where time = 3 "
+          + "Fill(int32[linear, 5ms, 5ms], double[linear, 5ms, 5ms], boolean[previous, 5ms])");
+
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(TEMPERATURE_STR)
+            + "," + resultSet.getString(STATUS_STR) + "," + resultSet.getString(HARDWARE_STR);
+        System.out.println(ans);
+        Assert.assertEquals(retArray1[cnt], ans);
+        cnt++;
+      }
+      statement.close();
+
+      statement = connection.createStatement();
+      hasResultSet = statement.execute("select temperature,status, hardware "
+          + "from root.ln.wf01.wt01 where time = 70 Fill(int32[linear, 500ms, 500ms], "
+          + "double[linear, 500ms, 500ms], boolean[previous, 500ms])");
+
+      Assert.assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(TEMPERATURE_STR)
+            + "," + resultSet.getString(STATUS_STR) + "," + resultSet.getString(HARDWARE_STR);
+        Assert.assertEquals(retArray1[cnt], ans);
+        cnt++;
+        System.out.println(ans);
+      }
+      statement.close();
+
+      statement = connection.createStatement();
+      hasResultSet = statement.execute("select temperature,status, hardware "
+          + "from root.ln.wf01.wt01 where time = 70 "
+          + "Fill(int32[linear, 25ms, 25ms], double[linear, 25ms, 25ms], boolean[previous, 5ms])");
+
+      Assert.assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(TEMPERATURE_STR)
+            + "," + resultSet.getString(STATUS_STR) + "," + resultSet.getString(HARDWARE_STR);
+        Assert.assertEquals(retArray1[cnt], ans);
+        cnt++;
+        System.out.println(ans);
+      }
+      statement.close();
+
+      statement = connection.createStatement();
+      hasResultSet = statement.execute("select temperature,status, hardware "
+          + "from root.ln.wf01.wt01 where time = 625 "
+          + "Fill(int32[linear, 25ms, 25ms], double[linear, 25ms, 25ms], boolean[previous, 5ms])");
+
+      Assert.assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(TEMPERATURE_STR)
+            + "," + resultSet.getString(STATUS_STR) + "," + resultSet.getString(HARDWARE_STR);
+        System.out.println(cnt + " " + ans);
+        Assert.assertEquals(retArray1[cnt], ans);
+        cnt++;
+      }
+      statement.close();
+      Assert.assertEquals(retArray1.length, cnt);
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    } finally {
+      if (connection != null) {
+        connection.close();
+      }
+    }
+  }
+
+  @Test
+  public void LinearFillRemoteTest() throws SQLException {
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    LinearFillTest();
+  }
+
+  @Test
+  public void PreviousFillTest() throws SQLException {
+    String[] retArray1 = new String[]{
+        "3,3.3,false,33",
+        "70,50.5,false,550",
+        "70,null,null,null"
+    };
+    Connection connection = null;
+    try {
+      connection = DriverManager.
+          getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute("select temperature,status, hardware "
+          + "from root.ln.wf01.wt01 where time = 3 "
+          + "Fill(int32[previous, 5ms], double[previous, 5ms], boolean[previous, 5ms])");
+
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(TEMPERATURE_STR)
+            + "," + resultSet.getString(STATUS_STR) + "," + resultSet.getString(HARDWARE_STR);
+        Assert.assertEquals(retArray1[cnt], ans);
+        cnt++;
+      }
+      statement.close();
+
+      statement = connection.createStatement();
+      hasResultSet = statement.execute("select temperature,status, hardware "
+          + "from root.ln.wf01.wt01 where time = 70 "
+          + "Fill(int32[previous, 500ms], double[previous, 500ms], boolean[previous, 500ms])");
+
+      Assert.assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(TEMPERATURE_STR)
+            + "," + resultSet.getString(STATUS_STR) + "," + resultSet.getString(HARDWARE_STR);
+        Assert.assertEquals(retArray1[cnt], ans);
+        cnt++;
+        System.out.println(ans);
+      }
+      statement.close();
+
+      statement = connection.createStatement();
+      hasResultSet = statement.execute("select temperature,status, hardware "
+          + "from root.ln.wf01.wt01 where time = 70 "
+          + "Fill(int32[previous, 15ms], double[previous, 15ms], boolean[previous, 5ms])");
+
+      Assert.assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(TEMPERATURE_STR)
+            + "," + resultSet.getString(STATUS_STR) + "," + resultSet.getString(HARDWARE_STR);
+        Assert.assertEquals(retArray1[cnt], ans);
+        cnt++;
+        System.out.println(ans);
+      }
+      statement.close();
+      Assert.assertEquals(retArray1.length, cnt);
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    } finally {
+      if (connection != null) {
+        connection.close();
+      }
+    }
+  }
+
+  @Test
+  public void PreviousFillRemoteTest() throws SQLException {
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    PreviousFillTest();
+  }
+
+  @Test
+  public void EmptyTimeRangeFillTest() throws SQLException {
+    String[] retArray1 = new String[]{
+        "3,3.3,false,33",
+        "70,70.34,false,374"
+    };
+    Connection connection = null;
+    try {
+      connection = DriverManager.
+          getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+      Statement statement = connection.createStatement();
+      boolean hasResultSet = statement.execute("select temperature,status, hardware "
+          + "from root.ln.wf01.wt01 where time = 3 "
+          + "Fill(int32[linear], double[linear], boolean[previous])");
+
+      Assert.assertTrue(hasResultSet);
+      ResultSet resultSet = statement.getResultSet();
+      int cnt = 0;
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(TEMPERATURE_STR)
+            + "," + resultSet.getString(STATUS_STR) + "," + resultSet.getString(HARDWARE_STR);
+        Assert.assertEquals(retArray1[cnt], ans);
+        cnt++;
+      }
+      statement.close();
+
+      statement = connection.createStatement();
+      hasResultSet = statement.execute("select temperature,status, hardware "
+          + "from root.ln.wf01.wt01 where time = 70 "
+          + "Fill(int32[linear], double[linear], boolean[previous])");
+
+      Assert.assertTrue(hasResultSet);
+      resultSet = statement.getResultSet();
+      while (resultSet.next()) {
+        String ans = resultSet.getString(TIMESTAMP_STR) + "," + resultSet.getString(TEMPERATURE_STR)
+            + "," + resultSet.getString(STATUS_STR) + "," + resultSet.getString(HARDWARE_STR);
+        Assert.assertEquals(retArray1[cnt], ans);
+        cnt++;
+        System.out.println(ans);
+      }
+      statement.close();
+      Assert.assertEquals(retArray1.length, cnt);
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.getMessage());
+    } finally {
+      if (connection != null) {
+        connection.close();
+      }
+    }
+  }
+
+  @Test
+  public void EmptyTimeRangeFillRemoteTest() throws SQLException {
+    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
+    EmptyTimeRangeFillTest();
+  }
+
+  private void prepareData() throws SQLException {
+    try (Connection connection = DriverManager
+        .getConnection(Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root",
+            "root")) {
+      insertData(connection, createSQLs, insertSQLs);
+    }
+  }
+}
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterQueryTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBQueryIT.java
similarity index 99%
rename from cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterQueryTest.java
rename to cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBQueryIT.java
index f5cc295..90d4474 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterQueryTest.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBQueryIT.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.query;
+package org.apache.iotdb.cluster.integration;
 
 import static org.apache.iotdb.cluster.utils.Utils.insertBatchData;
 import static org.apache.iotdb.cluster.utils.Utils.insertData;
@@ -43,7 +43,7 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
-public class ClusterQueryTest {
+public class IoTDBQueryIT {
 
   private Server server;
   private static final ClusterConfig CLUSTER_CONFIG = ClusterDescriptor.getInstance().getConfig();
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterQueryLargeDataTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBQueryLargeDataIT.java
similarity index 99%
rename from cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterQueryLargeDataTest.java
rename to cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBQueryLargeDataIT.java
index 223f0dc..926d8a7 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterQueryLargeDataTest.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBQueryLargeDataIT.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.query;
+package org.apache.iotdb.cluster.integration;
 
 import static org.apache.iotdb.cluster.utils.Utils.insertData;
 import static org.junit.Assert.assertEquals;
@@ -41,7 +41,7 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
-public class ClusterQueryLargeDataTest {
+public class IoTDBQueryLargeDataIT {
 
 
   private Server server;
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/manager/ClusterLocalManagerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/manager/ClusterLocalManagerTest.java
index c09aaa5..b822831 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/query/manager/ClusterLocalManagerTest.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/query/manager/ClusterLocalManagerTest.java
@@ -39,10 +39,10 @@ import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.cluster.entity.Server;
 import org.apache.iotdb.cluster.query.manager.querynode.ClusterLocalQueryManager;
 import org.apache.iotdb.cluster.query.manager.querynode.ClusterLocalSingleQueryManager;
-import org.apache.iotdb.cluster.query.reader.querynode.ClusterBatchReaderByTimestamp;
-import org.apache.iotdb.cluster.query.reader.querynode.ClusterBatchReaderWithoutTimeGenerator;
+import org.apache.iotdb.cluster.query.reader.querynode.AbstractClusterSelectSeriesBatchReader;
+import org.apache.iotdb.cluster.query.reader.querynode.ClusterSelectSeriesBatchReader;
+import org.apache.iotdb.cluster.query.reader.querynode.ClusterSelectSeriesBatchReaderByTimestamp;
 import org.apache.iotdb.cluster.query.reader.querynode.ClusterFilterSeriesBatchReader;
-import org.apache.iotdb.cluster.query.reader.querynode.AbstractClusterBatchReader;
 import org.apache.iotdb.cluster.utils.EnvironmentUtils;
 import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.cluster.utils.hash.PhysicalNode;
@@ -220,17 +220,17 @@ public class ClusterLocalManagerTest {
         assertEquals((long) map.get(taskId), singleQueryManager.getJobId());
         assertEquals(0, singleQueryManager.getQueryRound());
         assertNull(singleQueryManager.getFilterReader());
-        Map<String, AbstractClusterBatchReader> selectSeriesReaders = singleQueryManager
+        Map<String, AbstractClusterSelectSeriesBatchReader> selectSeriesReaders = singleQueryManager
             .getSelectSeriesReaders();
         assertEquals(3, selectSeriesReaders.size());
         Map<String, TSDataType> typeMap = singleQueryManager.getDataTypeMap();
-        for (Entry<String, AbstractClusterBatchReader> entry : selectSeriesReaders.entrySet()) {
+        for (Entry<String, AbstractClusterSelectSeriesBatchReader> entry : selectSeriesReaders.entrySet()) {
           String path = entry.getKey();
           TSDataType dataType = typeMap.get(path);
-          AbstractClusterBatchReader clusterBatchReader = entry.getValue();
-          assertNotNull(((ClusterBatchReaderWithoutTimeGenerator) clusterBatchReader).getReader());
+          AbstractClusterSelectSeriesBatchReader clusterBatchReader = entry.getValue();
+          assertNotNull(((ClusterSelectSeriesBatchReader) clusterBatchReader).getReader());
           assertEquals(dataType,
-              ((ClusterBatchReaderWithoutTimeGenerator) clusterBatchReader).getDataType());
+              ((ClusterSelectSeriesBatchReader) clusterBatchReader).getDataType());
         }
       }
 
@@ -247,17 +247,17 @@ public class ClusterLocalManagerTest {
         assertEquals((long) map.get(taskId), singleQueryManager.getJobId());
         assertEquals(0, singleQueryManager.getQueryRound());
         assertNull(singleQueryManager.getFilterReader());
-        Map<String, AbstractClusterBatchReader> selectSeriesReaders = singleQueryManager
+        Map<String, AbstractClusterSelectSeriesBatchReader> selectSeriesReaders = singleQueryManager
             .getSelectSeriesReaders();
         assertEquals(3, selectSeriesReaders.size());
         Map<String, TSDataType> typeMap = singleQueryManager.getDataTypeMap();
-        for (Entry<String, AbstractClusterBatchReader> entry : selectSeriesReaders.entrySet()) {
+        for (Entry<String, AbstractClusterSelectSeriesBatchReader> entry : selectSeriesReaders.entrySet()) {
           String path = entry.getKey();
           TSDataType dataType = typeMap.get(path);
-          AbstractClusterBatchReader clusterBatchReader = entry.getValue();
-          assertNotNull(((ClusterBatchReaderWithoutTimeGenerator) clusterBatchReader).getReader());
+          AbstractClusterSelectSeriesBatchReader clusterBatchReader = entry.getValue();
+          assertNotNull(((ClusterSelectSeriesBatchReader) clusterBatchReader).getReader());
           assertEquals(dataType,
-              ((ClusterBatchReaderWithoutTimeGenerator) clusterBatchReader).getDataType());
+              ((ClusterSelectSeriesBatchReader) clusterBatchReader).getDataType());
         }
       }
 
@@ -274,17 +274,17 @@ public class ClusterLocalManagerTest {
         assertEquals((long) map.get(taskId), singleQueryManager.getJobId());
         assertEquals(0, singleQueryManager.getQueryRound());
         assertNull(singleQueryManager.getFilterReader());
-        Map<String, AbstractClusterBatchReader> selectSeriesReaders = singleQueryManager
+        Map<String, AbstractClusterSelectSeriesBatchReader> selectSeriesReaders = singleQueryManager
             .getSelectSeriesReaders();
         assertEquals(3, selectSeriesReaders.size());
         Map<String, TSDataType> typeMap = singleQueryManager.getDataTypeMap();
-        for (Entry<String, AbstractClusterBatchReader> entry : selectSeriesReaders.entrySet()) {
+        for (Entry<String, AbstractClusterSelectSeriesBatchReader> entry : selectSeriesReaders.entrySet()) {
           String path = entry.getKey();
           TSDataType dataType = typeMap.get(path);
-          AbstractClusterBatchReader clusterBatchReader = entry.getValue();
-          assertNotNull(((ClusterBatchReaderWithoutTimeGenerator) clusterBatchReader).getReader());
+          AbstractClusterSelectSeriesBatchReader clusterBatchReader = entry.getValue();
+          assertNotNull(((ClusterSelectSeriesBatchReader) clusterBatchReader).getReader());
           assertEquals(dataType,
-              ((ClusterBatchReaderWithoutTimeGenerator) clusterBatchReader).getDataType());
+              ((ClusterSelectSeriesBatchReader) clusterBatchReader).getDataType());
         }
       }
       statement.close();
@@ -317,18 +317,18 @@ public class ClusterLocalManagerTest {
         assertTrue(allFilterPaths.containsAll(filterReader.getAllFilterPath()));
         assertNotNull(filterReader.getQueryDataSet());
 
-        Map<String, AbstractClusterBatchReader> selectSeriesReaders = singleQueryManager
+        Map<String, AbstractClusterSelectSeriesBatchReader> selectSeriesReaders = singleQueryManager
             .getSelectSeriesReaders();
         assertNotNull(selectSeriesReaders);
         assertEquals(3, selectSeriesReaders.size());
         Map<String, TSDataType> typeMap = singleQueryManager.getDataTypeMap();
-        for (Entry<String, AbstractClusterBatchReader> entry : selectSeriesReaders.entrySet()) {
+        for (Entry<String, AbstractClusterSelectSeriesBatchReader> entry : selectSeriesReaders.entrySet()) {
           String path = entry.getKey();
           TSDataType dataType = typeMap.get(path);
-          AbstractClusterBatchReader clusterBatchReader = entry.getValue();
-          assertNotNull(((ClusterBatchReaderByTimestamp) clusterBatchReader).getReaderByTimeStamp());
+          AbstractClusterSelectSeriesBatchReader clusterBatchReader = entry.getValue();
+          assertNotNull(((ClusterSelectSeriesBatchReaderByTimestamp) clusterBatchReader).getReaderByTimeStamp());
           assertEquals(dataType,
-              ((ClusterBatchReaderByTimestamp) clusterBatchReader).getDataType());
+              ((ClusterSelectSeriesBatchReaderByTimestamp) clusterBatchReader).getDataType());
         }
       }
 
@@ -351,18 +351,18 @@ public class ClusterLocalManagerTest {
         assertTrue(allFilterPaths.containsAll(filterReader.getAllFilterPath()));
         assertNotNull(filterReader.getQueryDataSet());
 
-        Map<String, AbstractClusterBatchReader> selectSeriesReaders = singleQueryManager
+        Map<String, AbstractClusterSelectSeriesBatchReader> selectSeriesReaders = singleQueryManager
             .getSelectSeriesReaders();
         assertNotNull(selectSeriesReaders);
         assertEquals(3, selectSeriesReaders.size());
         Map<String, TSDataType> typeMap = singleQueryManager.getDataTypeMap();
-        for (Entry<String, AbstractClusterBatchReader> entry : selectSeriesReaders.entrySet()) {
+        for (Entry<String, AbstractClusterSelectSeriesBatchReader> entry : selectSeriesReaders.entrySet()) {
           String path = entry.getKey();
           TSDataType dataType = typeMap.get(path);
-          AbstractClusterBatchReader clusterBatchReader = entry.getValue();
-          assertNotNull(((ClusterBatchReaderByTimestamp) clusterBatchReader).getReaderByTimeStamp());
+          AbstractClusterSelectSeriesBatchReader clusterBatchReader = entry.getValue();
+          assertNotNull(((ClusterSelectSeriesBatchReaderByTimestamp) clusterBatchReader).getReaderByTimeStamp());
           assertEquals(dataType,
-              ((ClusterBatchReaderByTimestamp) clusterBatchReader).getDataType());
+              ((ClusterSelectSeriesBatchReaderByTimestamp) clusterBatchReader).getDataType());
         }
       }
 
@@ -385,18 +385,18 @@ public class ClusterLocalManagerTest {
         assertTrue(allFilterPaths.containsAll(filterReader.getAllFilterPath()));
         assertNotNull(filterReader.getQueryDataSet());
 
-        Map<String, AbstractClusterBatchReader> selectSeriesReaders = singleQueryManager
+        Map<String, AbstractClusterSelectSeriesBatchReader> selectSeriesReaders = singleQueryManager
             .getSelectSeriesReaders();
         assertNotNull(selectSeriesReaders);
         assertEquals(3, selectSeriesReaders.size());
         Map<String, TSDataType> typeMap = singleQueryManager.getDataTypeMap();
-        for (Entry<String, AbstractClusterBatchReader> entry : selectSeriesReaders.entrySet()) {
+        for (Entry<String, AbstractClusterSelectSeriesBatchReader> entry : selectSeriesReaders.entrySet()) {
           String path = entry.getKey();
           TSDataType dataType = typeMap.get(path);
-          AbstractClusterBatchReader clusterBatchReader = entry.getValue();
-          assertNotNull(((ClusterBatchReaderByTimestamp) clusterBatchReader).getReaderByTimeStamp());
+          AbstractClusterSelectSeriesBatchReader clusterBatchReader = entry.getValue();
+          assertNotNull(((ClusterSelectSeriesBatchReaderByTimestamp) clusterBatchReader).getReaderByTimeStamp());
           assertEquals(dataType,
-              ((ClusterBatchReaderByTimestamp) clusterBatchReader).getDataType());
+              ((ClusterSelectSeriesBatchReaderByTimestamp) clusterBatchReader).getDataType());
         }
       }
       statement.close();
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/query/executor/FillEngineExecutor.java b/iotdb/src/main/java/org/apache/iotdb/db/query/executor/FillEngineExecutor.java
index 83c5fa9..904bc2d 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/executor/FillEngineExecutor.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/query/executor/FillEngineExecutor.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iotdb.db.query.executor;
 
 import java.io.IOException;
@@ -37,7 +36,7 @@ import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.Path;
 import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
 
-public class FillEngineExecutor {
+public class FillEngineExecutor implements IFillEngineExecutor{
 
   private long jobId;
   private List<Path> selectedSeries;
@@ -52,11 +51,7 @@ public class FillEngineExecutor {
     this.typeIFillMap = typeIFillMap;
   }
 
-  /**
-   * execute fill.
-   *
-   * @param context query context
-   */
+  @Override
   public QueryDataSet execute(QueryContext context)
       throws FileNodeManagerException, PathErrorException, IOException {
     QueryResourceManager.getInstance().beginQueryOfGivenQueryPaths(jobId, selectedSeries);
@@ -68,7 +63,7 @@ public class FillEngineExecutor {
           .getQueryDataSource(path, context);
       TSDataType dataType = MManager.getInstance().getSeriesType(path.getFullPath());
       dataTypeList.add(dataType);
-      IFill fill = null;
+      IFill fill;
       if (!typeIFillMap.containsKey(dataType)) {
         fill = new PreviousFill(dataType, queryTime, 0);
       } else {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/AbstractClusterBatchReader.java b/iotdb/src/main/java/org/apache/iotdb/db/query/executor/IFillEngineExecutor.java
similarity index 60%
rename from cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/AbstractClusterBatchReader.java
rename to iotdb/src/main/java/org/apache/iotdb/db/query/executor/IFillEngineExecutor.java
index b0a86bd..9e207ae 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/AbstractClusterBatchReader.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/query/executor/IFillEngineExecutor.java
@@ -16,24 +16,21 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.query.reader.querynode;
+package org.apache.iotdb.db.query.executor;
 
 import java.io.IOException;
-import java.util.List;
-import org.apache.iotdb.db.query.reader.IBatchReader;
-import org.apache.iotdb.tsfile.read.common.BatchData;
+import org.apache.iotdb.db.exception.FileNodeManagerException;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.query.context.QueryContext;
+import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
 
-/**
- * Cluster batch reader, which provides another method to get batch data by batch timestamp.
- */
-public abstract class AbstractClusterBatchReader implements IBatchReader {
+public interface IFillEngineExecutor {
 
   /**
-   * Get batch data by batch time
+   * execute fill.
    *
-   * @param batchTime valid batch timestamp
-   * @return corresponding batch data
+   * @param context query context
    */
-  public abstract BatchData nextBatch(List<Long> batchTime) throws IOException;
-
+  QueryDataSet execute(QueryContext context)
+      throws FileNodeManagerException, PathErrorException, IOException;
 }
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/query/fill/IFill.java b/iotdb/src/main/java/org/apache/iotdb/db/query/fill/IFill.java
index d64b49a..9f9a050 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/fill/IFill.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/query/fill/IFill.java
@@ -16,10 +16,10 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iotdb.db.query.fill;
 
 import java.io.IOException;
+import java.io.Serializable;
 import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.factory.SeriesReaderFactory;
@@ -33,12 +33,13 @@ import org.apache.iotdb.tsfile.read.common.Path;
 import org.apache.iotdb.tsfile.read.filter.TimeFilter;
 import org.apache.iotdb.tsfile.read.filter.basic.Filter;
 
-public abstract class IFill {
+public abstract class IFill implements Serializable {
 
+  private static final long serialVersionUID = -357739398193527464L;
   long queryTime;
   TSDataType dataType;
 
-  IPointReader allDataReader;
+  transient IPointReader allDataReader;
 
   public IFill(TSDataType dataType, long queryTime) {
     this.dataType = dataType;
@@ -106,8 +107,11 @@ public abstract class IFill {
 
     @Override
     public TimeValuePair next() {
-      isUsed = true;
-      return pair;
+      if (!isUsed) {
+        isUsed = true;
+        return pair;
+      }
+      return null;
     }
 
     @Override
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/query/fill/LinearFill.java b/iotdb/src/main/java/org/apache/iotdb/db/query/fill/LinearFill.java
index dc46082..399e2b8 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/fill/LinearFill.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/query/fill/LinearFill.java
@@ -20,6 +20,7 @@
 package org.apache.iotdb.db.query.fill;
 
 import java.io.IOException;
+import java.io.Serializable;
 import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
 import org.apache.iotdb.db.exception.UnSupportedFillTypeException;
 import org.apache.iotdb.db.query.context.QueryContext;
@@ -29,8 +30,9 @@ import org.apache.iotdb.db.utils.TsPrimitiveType;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.Path;
 
-public class LinearFill extends IFill {
+public class LinearFill extends IFill implements Serializable {
 
+  private static final long serialVersionUID = -1774599523110930574L;
   private long beforeRange;
   private long afterRange;
 
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/query/fill/PreviousFill.java b/iotdb/src/main/java/org/apache/iotdb/db/query/fill/PreviousFill.java
index b75fb4f..93c50a8 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/fill/PreviousFill.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/query/fill/PreviousFill.java
@@ -19,6 +19,7 @@
 package org.apache.iotdb.db.query.fill;
 
 import java.io.IOException;
+import java.io.Serializable;
 import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.reader.IPointReader;
@@ -26,8 +27,9 @@ import org.apache.iotdb.db.utils.TimeValuePair;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.Path;
 
-public class PreviousFill extends IFill {
+public class PreviousFill extends IFill implements Serializable {
 
+  private static final long serialVersionUID = -7946089166912781464L;
   private long beforeRange;
 
   public PreviousFill(TSDataType dataType, long queryTime, long beforeRange) {
@@ -57,7 +59,7 @@ public class PreviousFill extends IFill {
   @Override
   public IPointReader getFillResult() throws IOException {
     TimeValuePair beforePair = null;
-    TimeValuePair cachedPair = null;
+    TimeValuePair cachedPair;
     while (allDataReader.hasNext()) {
       cachedPair = allDataReader.next();
       if (cachedPair.getTimestamp() <= queryTime) {


[incubator-iotdb] 07/11: add it test of aggregation function

Posted by lt...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

lta pushed a commit to branch cluster
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git

commit 9487dbfc0a68fb62efa5270cc4fb080ea4e2a3a0
Author: lta <li...@163.com>
AuthorDate: Mon May 20 17:27:57 2019 +0800

    add it test of aggregation function
---
 .../query/{manager => }/common/FillBatchData.java  |   2 +-
 .../executor/ClusterAggregateEngineExecutor.java   |   1 -
 .../ClusterRpcSingleQueryManager.java              |  26 ++---
 .../IClusterRpcSingleQueryManager.java             |   4 +-
 .../coordinatornode/SelectSeriesGroupEntity.java   |   1 -
 .../querynode/ClusterLocalSingleQueryManager.java  |  76 +++++++++------
 .../coordinatornode/ClusterFilterSeriesReader.java |   2 +-
 .../ClusterFillSelectSeriesBatchReader.java        |   2 +-
 ...a => ClusterFilterSeriesBatchReaderEntity.java} |   6 +-
 ...a => ClusterSelectSeriesBatchReaderEntity.java} |  42 +++++++--
 ... => IClusterFilterSeriesBatchReaderEntity.java} |   2 +-
 .../query/utils/ClusterTimeValuePairUtils.java     |   2 +-
 .../QuerySeriesDataByTimestampRequest.java         |  17 +---
 .../request/querydata/QuerySeriesDataRequest.java  |  16 ++--
 .../integration/IoTDBAggregationLargeDataIT.java   |   3 +-
 .../integration/IoTDBAggregationSmallDataIT.java   |  94 ------------------
 .../query/manager/ClusterLocalManagerTest.java     | 105 +++++++++++----------
 17 files changed, 160 insertions(+), 241 deletions(-)

diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/common/FillBatchData.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/common/FillBatchData.java
similarity index 97%
rename from cluster/src/main/java/org/apache/iotdb/cluster/query/manager/common/FillBatchData.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/query/common/FillBatchData.java
index 3e128e3..2d17d0c 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/common/FillBatchData.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/common/FillBatchData.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.iotdb.cluster.query.manager.common;
+package org.apache.iotdb.cluster.query.common;
 
 import org.apache.iotdb.db.utils.TimeValuePair;
 import org.apache.iotdb.tsfile.read.common.BatchData;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterAggregateEngineExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterAggregateEngineExecutor.java
index b34afa1..2cf4e87 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterAggregateEngineExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterAggregateEngineExecutor.java
@@ -69,7 +69,6 @@ import org.apache.iotdb.tsfile.read.query.timegenerator.TimeGenerator;
 public class ClusterAggregateEngineExecutor extends AggregateEngineExecutor {
 
   private ClusterRpcSingleQueryManager queryManager;
-  private static final ClusterConfig CLUSTER_CONF = ClusterDescriptor.getInstance().getConfig();
 
 
   public ClusterAggregateEngineExecutor(List<Path> selectedSeries, List<String> aggres,
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java
index 905ce1b..05bf9df 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/ClusterRpcSingleQueryManager.java
@@ -211,29 +211,26 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
 
   @Override
   public void fetchBatchDataForSelectPaths(String groupId) throws RaftConnectionException {
-    List<String> fetchDataSeries = new ArrayList<>();
-    List<Integer> selectSeriesIndexs = new ArrayList<>();
+    List<Integer> fetchDataSeriesIndexs = new ArrayList<>();
     List<Path> selectSeries = selectSeriesGroupEntityMap.get(groupId).getSelectPaths();
     List<ClusterSelectSeriesReader> seriesReaders = selectSeriesGroupEntityMap.get(groupId)
         .getSelectSeriesReaders();
     for (int i = 0; i < selectSeries.size(); i++) {
-      Path series = selectSeries.get(i);
       if (seriesReaders.get(i).enableFetchData()) {
-        fetchDataSeries.add(series.getFullPath());
-        selectSeriesIndexs.add(i);
+        fetchDataSeriesIndexs.add(i);
       }
     }
     BasicRequest request = QuerySeriesDataRequest
-        .createFetchDataRequest(groupId, taskId, PathType.SELECT_PATH, fetchDataSeries,
+        .createFetchDataRequest(groupId, taskId, PathType.SELECT_PATH, fetchDataSeriesIndexs,
             queryRounds++);
     QuerySeriesDataResponse response = (QuerySeriesDataResponse) ClusterRpcReaderUtils
         .handleQueryRequest(request, queryNodes.get(groupId), 0);
 
-    handleFetchDataResponseForSelectPaths(groupId, selectSeriesIndexs, response);
+    handleFetchDataResponseForSelectPaths(groupId, fetchDataSeriesIndexs, response);
   }
 
   @Override
-  public void fetchBatchDataForFilterPaths(String groupId) throws RaftConnectionException {
+  public void fetchBatchDataForAllFilterPaths(String groupId) throws RaftConnectionException {
     BasicRequest request = QuerySeriesDataRequest
         .createFetchDataRequest(groupId, taskId, PathType.FILTER_PATH, null, queryRounds++);
     QuerySeriesDataResponse response = (QuerySeriesDataResponse) ClusterRpcReaderUtils
@@ -248,27 +245,22 @@ public class ClusterRpcSingleQueryManager implements IClusterRpcSingleQueryManag
       throws RaftConnectionException {
     for (Entry<String, SelectSeriesGroupEntity> entry : selectSeriesGroupEntityMap.entrySet()) {
       String groupId = entry.getKey();
-      List<String> fetchDataFilterSeries = new ArrayList<>();
-      entry.getValue().getSelectPaths()
-          .forEach(path -> fetchDataFilterSeries.add(path.getFullPath()));
       BasicRequest request = QuerySeriesDataByTimestampRequest
-          .createRequest(groupId, queryRounds++, taskId, batchTimestamp, fetchDataFilterSeries);
+          .createRequest(groupId, queryRounds++, taskId, batchTimestamp);
       QuerySeriesDataByTimestampResponse response = (QuerySeriesDataByTimestampResponse) ClusterRpcReaderUtils
           .handleQueryRequest(request, queryNodes.get(groupId), 0);
-      handleFetchDataByTimestampResponseForSelectPaths(groupId, fetchDataFilterSeries, response);
+      handleFetchDataByTimestampResponseForSelectPaths(groupId, response);
     }
   }
 
   /**
    * Handle response of fetching data, and add batch data to corresponding reader.
    */
-  private void handleFetchDataByTimestampResponseForSelectPaths(String groupId,
-      List<String> fetchDataSeries,
-      BasicQueryDataResponse response) {
+  private void handleFetchDataByTimestampResponseForSelectPaths(String groupId, BasicQueryDataResponse response) {
     List<BatchData> batchDataList = response.getSeriesBatchData();
     List<ClusterSelectSeriesReader> selectSeriesReaders = selectSeriesGroupEntityMap.get(groupId)
         .getSelectSeriesReaders();
-    for (int i = 0; i < fetchDataSeries.size(); i++) {
+    for (int i = 0; i < selectSeriesReaders.size(); i++) {
       BatchData batchData = batchDataList.get(i);
       selectSeriesReaders.get(i).addBatchData(batchData, true);
     }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcSingleQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcSingleQueryManager.java
index d6ca0d7..19d8f25 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcSingleQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/IClusterRpcSingleQueryManager.java
@@ -53,11 +53,11 @@ public interface IClusterRpcSingleQueryManager {
   void fetchBatchDataForSelectPaths(String groupId) throws RaftConnectionException;
 
   /**
-   * Fetch data for filter path.
+   * Fetch data for all filter paths.
    *
    * @param groupId data group id
    */
-  void fetchBatchDataForFilterPaths(String groupId) throws RaftConnectionException;
+  void fetchBatchDataForAllFilterPaths(String groupId) throws RaftConnectionException;
 
   /**
    * Fetch batch data for all select paths by batch timestamp. If target data can be fetched, skip
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/SelectSeriesGroupEntity.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/SelectSeriesGroupEntity.java
index 9f35117..1de26bd 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/SelectSeriesGroupEntity.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/coordinatornode/SelectSeriesGroupEntity.java
@@ -20,7 +20,6 @@ package org.apache.iotdb.cluster.query.manager.coordinatornode;
 
 import java.util.ArrayList;
 import java.util.List;
-import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterFilterSeriesReader;
 import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
 import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
 import org.apache.iotdb.tsfile.read.common.Path;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalSingleQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalSingleQueryManager.java
index 0f2cf62..8799be2 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalSingleQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalSingleQueryManager.java
@@ -30,10 +30,11 @@ import org.apache.iotdb.cluster.query.PathType;
 import org.apache.iotdb.cluster.query.factory.ClusterSeriesReaderFactory;
 import org.apache.iotdb.cluster.query.reader.querynode.AbstractClusterSelectSeriesBatchReader;
 import org.apache.iotdb.cluster.query.reader.querynode.ClusterFillSelectSeriesBatchReader;
-import org.apache.iotdb.cluster.query.reader.querynode.ClusterFilterSeriesBatchReader;
+import org.apache.iotdb.cluster.query.reader.querynode.ClusterFilterSeriesBatchReaderEntity;
 import org.apache.iotdb.cluster.query.reader.querynode.ClusterSelectSeriesBatchReader;
 import org.apache.iotdb.cluster.query.reader.querynode.ClusterSelectSeriesBatchReaderByTimestamp;
-import org.apache.iotdb.cluster.query.reader.querynode.IClusterFilterSeriesBatchReader;
+import org.apache.iotdb.cluster.query.reader.querynode.ClusterSelectSeriesBatchReaderEntity;
+import org.apache.iotdb.cluster.query.reader.querynode.IClusterFilterSeriesBatchReaderEntity;
 import org.apache.iotdb.cluster.rpc.raft.request.querydata.InitSeriesReaderRequest;
 import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataByTimestampRequest;
 import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataRequest;
@@ -79,6 +80,11 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
   private String groupId;
 
   /**
+   * Mark whether this manager has initialized or not.
+   */
+  private boolean isInit = false;
+
+  /**
    * Timer of Query, if the time is up, close query resource.
    */
   private ScheduledFuture<?> queryTimer;
@@ -94,14 +100,14 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
   private long queryRound = -1;
 
   /**
-   * Key is series full path, value is reader of select series
+   * Select reader entity
    */
-  private Map<String, AbstractClusterSelectSeriesBatchReader> selectSeriesReaders = new HashMap<>();
+  private ClusterSelectSeriesBatchReaderEntity selectReaderEntity;
 
   /**
-   * Filter reader
+   * Filter reader entity
    */
-  private IClusterFilterSeriesBatchReader filterReader;
+  private IClusterFilterSeriesBatchReaderEntity filterReaderEntity;
 
   /**
    * Key is series full path, value is data type of series
@@ -127,11 +133,17 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
   @Override
   public InitSeriesReaderResponse createSeriesReader(InitSeriesReaderRequest request)
       throws IOException, PathErrorException, FileNodeManagerException, ProcessorException, QueryFilterOptimizationException, ClassNotFoundException {
+    if (isInit) {
+      throw new IOException(String
+          .format("ClusterLocalSingleQueryManager has already initialized. Job id = %s", jobId));
+    }
+    isInit = true;
     this.groupId = request.getGroupID();
     InitSeriesReaderResponse response = new InitSeriesReaderResponse(groupId);
     QueryContext context = new QueryContext(jobId);
     Map<PathType, QueryPlan> queryPlanMap = request.getAllQueryPlan();
     if (queryPlanMap.containsKey(PathType.SELECT_PATH)) {
+      selectReaderEntity = new ClusterSelectSeriesBatchReaderEntity();
       QueryPlan plan = queryPlanMap.get(PathType.SELECT_PATH);
       if (plan instanceof GroupByPlan) {
         throw new UnsupportedOperationException();
@@ -179,8 +191,9 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
       fill.setDataType(dataType);
       fill.setQueryTime(fillQueryPlan.getQueryTime());
       fill.constructReaders(queryDataSource, context);
-      selectSeriesReaders.put(path.getFullPath(),
-          new ClusterFillSelectSeriesBatchReader(dataType, fill.getFillResult()));
+      selectReaderEntity.addPath(path.getFullPath());
+      selectReaderEntity
+          .addReaders(new ClusterFillSelectSeriesBatchReader(dataType, fill.getFillResult()));
       dataTypeMap.put(path.getFullPath(), dataType);
     }
 
@@ -195,7 +208,7 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
    */
   private void handleAggreSeriesReader(QueryPlan queryPlan, QueryContext context,
       InitSeriesReaderResponse response)
-      throws FileNodeManagerException, PathErrorException, IOException, QueryFilterOptimizationException, ProcessorException {
+      throws FileNodeManagerException, PathErrorException, IOException, ProcessorException {
     if (queryPlan.getExpression() == null
         || queryPlan.getExpression().getType() == ExpressionType.GLOBAL_TIME) {
       handleAggreSeriesReaderWithoutTimeGenerator(queryPlan, context, response);
@@ -227,7 +240,8 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
 
     for (int i = 0; i < selectedPaths.size(); i++) {
       Path path = selectedPaths.get(i);
-      selectSeriesReaders.put(path.getFullPath(),
+      selectReaderEntity.addPath(path.getFullPath());
+      selectReaderEntity.addReaders(
           new ClusterSelectSeriesBatchReader(dataTypes.get(i), readers.get(i)));
       dataTypeMap.put(path.getFullPath(), dataTypes.get(i));
     }
@@ -275,8 +289,8 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
       String fullPath = paths.get(i).getFullPath();
       IPointReader reader = AbstractExecutorWithoutTimeGenerator
           .createSeriesReader(context, paths.get(i), dataTypes, timeFilter);
-      selectSeriesReaders
-          .put(fullPath, new ClusterSelectSeriesBatchReader(dataTypes.get(i), reader));
+      selectReaderEntity.addPath(fullPath);
+      selectReaderEntity.addReaders(new ClusterSelectSeriesBatchReader(dataTypes.get(i), reader));
       dataTypeMap.put(fullPath, dataTypes.get(i));
     }
     response.getSeriesDataTypes().put(PathType.SELECT_PATH, dataTypes);
@@ -298,7 +312,8 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
       dataTypeMap.put(paths.get(i).getFullPath(), dataTypes.get(i));
     }
     response.getSeriesDataTypes().put(pathType, dataTypes);
-    filterReader = new ClusterFilterSeriesBatchReader(queryDataSet, paths, request.getFilterList());
+    filterReaderEntity = new ClusterFilterSeriesBatchReaderEntity(queryDataSet, paths,
+        request.getFilterList());
   }
 
   /**
@@ -318,9 +333,9 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
       EngineReaderByTimeStamp readerByTimeStamp = ClusterSeriesReaderFactory
           .createReaderByTimeStamp(path, context);
       TSDataType dataType = MManager.getInstance().getSeriesType(path.getFullPath());
-      selectSeriesReaders
-          .put(path.getFullPath(),
-              new ClusterSelectSeriesBatchReaderByTimestamp(readerByTimeStamp, dataType));
+      selectReaderEntity.addPath(path.getFullPath());
+      selectReaderEntity
+          .addReaders(new ClusterSelectSeriesBatchReaderByTimestamp(readerByTimeStamp, dataType));
       dataTypeMap.put(path.getFullPath(), dataType);
       dataTypeList.add(dataType);
     }
@@ -336,10 +351,9 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
     if (targetQueryRounds != this.queryRound) {
       this.queryRound = targetQueryRounds;
       PathType pathType = request.getPathType();
-      List<String> paths = request.getSeriesPaths();
       List<BatchData> batchDataList;
       if (pathType == PathType.SELECT_PATH) {
-        batchDataList = readSelectSeriesBatchData(paths);
+        batchDataList = readSelectSeriesBatchData(request.getSeriesPathIndexs());
       } else {
         batchDataList = readFilterSeriesBatchData();
       }
@@ -355,13 +369,12 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
       throws IOException {
     resetQueryTimer();
     QuerySeriesDataByTimestampResponse response = new QuerySeriesDataByTimestampResponse(groupId);
-    List<String> fetchDataSeries = request.getFetchDataSeries();
     long targetQueryRounds = request.getQueryRounds();
     if (targetQueryRounds != this.queryRound) {
       this.queryRound = targetQueryRounds;
+      List<AbstractClusterSelectSeriesBatchReader> readers = selectReaderEntity.getAllReaders();
       List<BatchData> batchDataList = new ArrayList<>();
-      for (String series : fetchDataSeries) {
-        AbstractClusterSelectSeriesBatchReader reader = selectSeriesReaders.get(series);
+      for (AbstractClusterSelectSeriesBatchReader reader : readers) {
         batchDataList.add(reader.nextBatch(request.getBatchTimestamp()));
       }
       cachedBatchDataResult = batchDataList;
@@ -378,14 +391,15 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
   }
 
   /**
-   * Read batch data of select series
+   * Read batch data of select series by series index
    *
-   * @param paths all series to query
+   * @param seriesIndexs all series index to query
    */
-  private List<BatchData> readSelectSeriesBatchData(List<String> paths) throws IOException {
+  private List<BatchData> readSelectSeriesBatchData(List<Integer> seriesIndexs) throws IOException {
     List<BatchData> batchDataList = new ArrayList<>();
-    for (String fullPath : paths) {
-      batchDataList.add(selectSeriesReaders.get(fullPath).nextBatch());
+    for (int index : seriesIndexs) {
+      AbstractClusterSelectSeriesBatchReader reader = selectReaderEntity.getReaderByIndex(index);
+      batchDataList.add(reader.nextBatch());
     }
     return batchDataList;
   }
@@ -396,7 +410,7 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
    * @return batch data of all filter series
    */
   private List<BatchData> readFilterSeriesBatchData() throws IOException {
-    return filterReader.nextBatchList();
+    return filterReaderEntity.nextBatchList();
   }
 
   public String getGroupId() {
@@ -417,12 +431,12 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
     return queryRound;
   }
 
-  public Map<String, AbstractClusterSelectSeriesBatchReader> getSelectSeriesReaders() {
-    return selectSeriesReaders;
+  public ClusterSelectSeriesBatchReaderEntity getSelectReaderEntity() {
+    return selectReaderEntity;
   }
 
-  public IClusterFilterSeriesBatchReader getFilterReader() {
-    return filterReader;
+  public IClusterFilterSeriesBatchReaderEntity getFilterReaderEntity() {
+    return filterReaderEntity;
   }
 
   public Map<String, TSDataType> getDataTypeMap() {
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterFilterSeriesReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterFilterSeriesReader.java
index 0c0287e..9d60ae2 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterFilterSeriesReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/coordinatornode/ClusterFilterSeriesReader.java
@@ -83,7 +83,7 @@ public class ClusterFilterSeriesReader extends AbstractClusterPointReader {
   @Override
   protected void updateCurrentBatchData() throws RaftConnectionException {
     if (batchDataList.isEmpty() && !remoteDataFinish) {
-      queryManager.fetchBatchDataForFilterPaths(groupId);
+      queryManager.fetchBatchDataForAllFilterPaths(groupId);
     }
     if (!batchDataList.isEmpty()) {
       currentBatchData = batchDataList.removeFirst();
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFillSelectSeriesBatchReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFillSelectSeriesBatchReader.java
index 55639a1..fadd92f 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFillSelectSeriesBatchReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFillSelectSeriesBatchReader.java
@@ -19,7 +19,7 @@
 package org.apache.iotdb.cluster.query.reader.querynode;
 
 import java.io.IOException;
-import org.apache.iotdb.cluster.query.manager.common.FillBatchData;
+import org.apache.iotdb.cluster.query.common.FillBatchData;
 import org.apache.iotdb.db.query.reader.IPointReader;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.BatchData;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFilterSeriesBatchReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFilterSeriesBatchReaderEntity.java
similarity index 93%
rename from cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFilterSeriesBatchReader.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFilterSeriesBatchReaderEntity.java
index 1cd357e..65f8c1c 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFilterSeriesBatchReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterFilterSeriesBatchReaderEntity.java
@@ -32,9 +32,9 @@ import org.apache.iotdb.tsfile.read.filter.basic.Filter;
 import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
 
 /**
- * Batch reader for all filter paths.
+ * Batch reader entity for all filter paths.
  */
-public class ClusterFilterSeriesBatchReader implements IClusterFilterSeriesBatchReader {
+public class ClusterFilterSeriesBatchReaderEntity implements IClusterFilterSeriesBatchReaderEntity {
 
   private List<Path> allFilterPath;
 
@@ -44,7 +44,7 @@ public class ClusterFilterSeriesBatchReader implements IClusterFilterSeriesBatch
 
   private static final ClusterConfig CLUSTER_CONF = ClusterDescriptor.getInstance().getConfig();
 
-  public ClusterFilterSeriesBatchReader(QueryDataSet queryDataSet, List<Path> allFilterPath,
+  public ClusterFilterSeriesBatchReaderEntity(QueryDataSet queryDataSet, List<Path> allFilterPath,
       List<Filter> filters) {
     this.queryDataSet = queryDataSet;
     this.allFilterPath = allFilterPath;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReaderEntity.java
similarity index 52%
copy from cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReader.java
copy to cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReaderEntity.java
index 218d68b..f0dea38 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/ClusterSelectSeriesBatchReaderEntity.java
@@ -18,19 +18,45 @@
  */
 package org.apache.iotdb.cluster.query.reader.querynode;
 
-import java.io.IOException;
+import java.util.ArrayList;
 import java.util.List;
-import org.apache.iotdb.tsfile.read.common.BatchData;
 
 /**
- * Batch reader for filter series which is used in query node.
+ * Batch reader entity for all select paths.
  */
-public interface IClusterFilterSeriesBatchReader {
-
-  boolean hasNext() throws IOException;
+public class ClusterSelectSeriesBatchReaderEntity {
+  /**
+   * All select paths
+   */
+  List<String> paths;
 
   /**
-   * Get next batch data of all filter series.
+   * All select readers
    */
-  List<BatchData> nextBatchList() throws IOException;
+  List<AbstractClusterSelectSeriesBatchReader> readers;
+
+  public ClusterSelectSeriesBatchReaderEntity() {
+    paths = new ArrayList<>();
+    readers = new ArrayList<>();
+  }
+
+  public void addPath(String path) {
+    this.paths.add(path);
+  }
+
+  public void addReaders(AbstractClusterSelectSeriesBatchReader reader) {
+    this.readers.add(reader);
+  }
+
+  public List<AbstractClusterSelectSeriesBatchReader> getAllReaders() {
+    return readers;
+  }
+
+  public AbstractClusterSelectSeriesBatchReader getReaderByIndex(int index){
+    return readers.get(index);
+  }
+
+  public List<String> getAllPaths() {
+    return paths;
+  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReaderEntity.java
similarity index 95%
rename from cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReader.java
rename to cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReaderEntity.java
index 218d68b..a045e2a 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReader.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/querynode/IClusterFilterSeriesBatchReaderEntity.java
@@ -25,7 +25,7 @@ import org.apache.iotdb.tsfile.read.common.BatchData;
 /**
  * Batch reader for filter series which is used in query node.
  */
-public interface IClusterFilterSeriesBatchReader {
+public interface IClusterFilterSeriesBatchReaderEntity {
 
   boolean hasNext() throws IOException;
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterTimeValuePairUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterTimeValuePairUtils.java
index 0f05cf2..7525368 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterTimeValuePairUtils.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/ClusterTimeValuePairUtils.java
@@ -18,7 +18,7 @@
  */
 package org.apache.iotdb.cluster.query.utils;
 
-import org.apache.iotdb.cluster.query.manager.common.FillBatchData;
+import org.apache.iotdb.cluster.query.common.FillBatchData;
 import org.apache.iotdb.db.utils.TimeValuePair;
 import org.apache.iotdb.db.utils.TimeValuePairUtils;
 import org.apache.iotdb.tsfile.read.common.BatchData;
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataByTimestampRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataByTimestampRequest.java
index 351e6eb..cbcef15 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataByTimestampRequest.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataByTimestampRequest.java
@@ -39,21 +39,16 @@ public class QuerySeriesDataByTimestampRequest extends BasicQueryRequest {
    */
   private List<Long> batchTimestamp;
 
-  /**
-   * Series to fetch data from remote query node
-   */
-  private List<String> fetchDataSeries;
-
   private QuerySeriesDataByTimestampRequest(String groupID) {
     super(groupID);
   }
 
-  public static QuerySeriesDataByTimestampRequest createRequest(String groupId, long queryRounds, String taskId, List<Long> batchTimestamp, List<String> fetchDataSeries){
+  public static QuerySeriesDataByTimestampRequest createRequest(String groupId, long queryRounds,
+      String taskId, List<Long> batchTimestamp) {
     QuerySeriesDataByTimestampRequest request = new QuerySeriesDataByTimestampRequest(groupId);
     request.queryRounds = queryRounds;
     request.taskId = taskId;
     request.batchTimestamp = batchTimestamp;
-    request.fetchDataSeries = fetchDataSeries;
     return request;
   }
 
@@ -80,12 +75,4 @@ public class QuerySeriesDataByTimestampRequest extends BasicQueryRequest {
   public void setBatchTimestamp(List<Long> batchTimestamp) {
     this.batchTimestamp = batchTimestamp;
   }
-
-  public List<String> getFetchDataSeries() {
-    return fetchDataSeries;
-  }
-
-  public void setFetchDataSeries(List<String> fetchDataSeries) {
-    this.fetchDataSeries = fetchDataSeries;
-  }
 }
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataRequest.java b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataRequest.java
index 554b8c1..e0fc23c 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataRequest.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/rpc/raft/request/querydata/QuerySeriesDataRequest.java
@@ -46,9 +46,9 @@ public class QuerySeriesDataRequest extends BasicQueryRequest {
   private PathType pathType;
 
   /**
-   * Key is series type, value is series list
+   * list of series path index.
    */
-  private List<String> seriesPaths = new ArrayList<>();
+  private List<Integer> seriesPathIndexs = new ArrayList<>();
 
   private QuerySeriesDataRequest(String groupID, String taskId) {
     super(groupID);
@@ -56,10 +56,10 @@ public class QuerySeriesDataRequest extends BasicQueryRequest {
   }
 
   public static QuerySeriesDataRequest createFetchDataRequest(String groupId, String taskId,
-      PathType pathType, List<String> seriesPaths, long queryRounds) {
+      PathType pathType, List<Integer> seriesPathIndexs, long queryRounds) {
     QuerySeriesDataRequest request = new QuerySeriesDataRequest(groupId, taskId);
     request.pathType = pathType;
-    request.seriesPaths = seriesPaths;
+    request.seriesPathIndexs = seriesPathIndexs;
     request.queryRounds = queryRounds;
     return request;
   }
@@ -88,11 +88,7 @@ public class QuerySeriesDataRequest extends BasicQueryRequest {
     this.pathType = pathType;
   }
 
-  public List<String> getSeriesPaths() {
-    return seriesPaths;
-  }
-
-  public void setSeriesPaths(List<String> seriesPaths) {
-    this.seriesPaths = seriesPaths;
+  public List<Integer> getSeriesPathIndexs() {
+    return seriesPathIndexs;
   }
 }
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBAggregationLargeDataIT.java b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBAggregationLargeDataIT.java
index 494029c..45ab923 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBAggregationLargeDataIT.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBAggregationLargeDataIT.java
@@ -163,7 +163,6 @@ public class IoTDBAggregationLargeDataIT {
   }
 
   @Test
-  @Ignore
   public void remoteTest() throws ClassNotFoundException, SQLException {
     QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
     insertSQL();
@@ -183,7 +182,7 @@ public class IoTDBAggregationLargeDataIT {
     maxTimeAggreWithMultiFilterTest();
     minValueAggreWithMultiFilterTest();
     maxValueAggreWithMultiFilterTest();
-//    meanAggreWithMultiFilterTest();
+    meanAggreWithMultiFilterTest();
     sumAggreWithMultiFilterTest();
     firstAggreWithMultiFilterTest();
   }
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBAggregationSmallDataIT.java b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBAggregationSmallDataIT.java
index 02dc01f..162c5ac 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBAggregationSmallDataIT.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/integration/IoTDBAggregationSmallDataIT.java
@@ -181,13 +181,6 @@ public class IoTDBAggregationSmallDataIT {
   }
 
   @Test
-  @Ignore
-  public void countOnlyTimeFilterRemoteTest() throws ClassNotFoundException, SQLException {
-    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
-    countOnlyTimeFilterTest();
-  }
-
-  @Test
   public void functionsNoFilterTest() throws ClassNotFoundException, SQLException {
     String[] retArray = new String[]{
         "0,4,0,6,1",
@@ -286,13 +279,6 @@ public class IoTDBAggregationSmallDataIT {
   }
 
   @Test
-  @Ignore
-  public void functionsNoFilterRemoteTest() throws ClassNotFoundException, SQLException {
-    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
-    functionsNoFilterTest();
-  }
-
-  @Test
   public void lastAggreWithSingleFilterTest() throws ClassNotFoundException, SQLException {
     String[] retArray = new String[]{
         "0,22222,55555"
@@ -322,13 +308,6 @@ public class IoTDBAggregationSmallDataIT {
   }
 
   @Test
-  @Ignore
-  public void lastAggreWithSingleFilterRemoteTest() throws ClassNotFoundException, SQLException {
-    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
-    lastAggreWithSingleFilterTest();
-  }
-
-  @Test
   public void firstAggreWithSingleFilterTest() throws ClassNotFoundException, SQLException {
     String[] retArray = new String[]{
         "0,99,180"
@@ -358,13 +337,6 @@ public class IoTDBAggregationSmallDataIT {
   }
 
   @Test
-  @Ignore
-  public void firstAggreWithSingleFilterRemoteTest() throws ClassNotFoundException, SQLException {
-    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
-    firstAggreWithSingleFilterTest();
-  }
-
-  @Test
   public void sumAggreWithSingleFilterTest() throws ClassNotFoundException, SQLException {
     String[] retArray = new String[]{
         "0,22321.0,55934.0,1029"
@@ -394,13 +366,6 @@ public class IoTDBAggregationSmallDataIT {
   }
 
   @Test
-  @Ignore
-  public void sumAggreWithSingleFilterRemoteTest() throws ClassNotFoundException, SQLException {
-    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
-    sumAggreWithSingleFilterTest();
-  }
-
-  @Test
   public void meanAggreWithSingleFilterTest() throws ClassNotFoundException, SQLException {
     String[] retArray = new String[]{
         "0,11160.5,18645,206"
@@ -430,13 +395,6 @@ public class IoTDBAggregationSmallDataIT {
   }
 
   @Test
-  @Ignore
-  public void meanAggreWithSingleFilterRemoteTest() throws ClassNotFoundException, SQLException {
-    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
-    meanAggreWithSingleFilterTest();
-  }
-
-  @Test
   public void countAggreWithSingleFilterTest() throws ClassNotFoundException, SQLException {
     String[] retArray = new String[]{
         "0,2,3,5,1,0"
@@ -468,13 +426,6 @@ public class IoTDBAggregationSmallDataIT {
   }
 
   @Test
-  @Ignore
-  public void countAggreWithSingleFilterRemoteTest() throws ClassNotFoundException, SQLException {
-    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
-    countAggreWithSingleFilterTest();
-  }
-
-  @Test
   public void minTimeAggreWithSingleFilterTest() throws ClassNotFoundException, SQLException {
     String[] retArray = new String[]{
         "0,104,1,2,101,100"
@@ -507,13 +458,6 @@ public class IoTDBAggregationSmallDataIT {
   }
 
   @Test
-  @Ignore
-  public void minTimeAggreWithSingleFilterRemoteTest() throws ClassNotFoundException, SQLException {
-    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
-    minTimeAggreWithSingleFilterTest();
-  }
-
-  @Test
   public void maxTimeAggreWithSingleFilterTest() throws ClassNotFoundException, SQLException {
     String[] retArray = new String[]{
         "0,105,105,105,102,100"
@@ -546,13 +490,6 @@ public class IoTDBAggregationSmallDataIT {
   }
 
   @Test
-  @Ignore
-  public void maxTimeAggreWithSingleFilterRemoteTest() throws ClassNotFoundException, SQLException {
-    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
-    maxTimeAggreWithSingleFilterTest();
-  }
-
-  @Test
   public void minValueAggreWithSingleFilterTest() throws ClassNotFoundException, SQLException {
     String[] retArray = new String[]{
         "0,90,180,2.22,ddddd,true"
@@ -588,14 +525,6 @@ public class IoTDBAggregationSmallDataIT {
   }
 
   @Test
-  @Ignore
-  public void minValueAggreWithSingleFilterRemoteTest()
-      throws ClassNotFoundException, SQLException {
-    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
-    minValueAggreWithSingleFilterTest();
-  }
-
-  @Test
   public void maxValueAggreWithSingleFilterTest() throws ClassNotFoundException, SQLException {
     String[] retArray = new String[]{
         "0,99,50000,11.11,fffff,true"
@@ -630,14 +559,6 @@ public class IoTDBAggregationSmallDataIT {
   }
 
   @Test
-  @Ignore
-  public void maxValueAggreWithSingleFilterRemoteTest()
-      throws ClassNotFoundException, SQLException {
-    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
-    maxValueAggreWithSingleFilterTest();
-  }
-
-  @Test
   public void countAggreWithMultiMultiFilterTest() {
     String[] retArray = new String[]{
         "0,2",
@@ -667,14 +588,6 @@ public class IoTDBAggregationSmallDataIT {
   }
 
   @Test
-  @Ignore
-  public void countAggreWithMultiMultiFilterRemoteTest()
-      throws ClassNotFoundException, SQLException {
-    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
-    countAggreWithMultiMultiFilterTest();
-  }
-
-  @Test
   public void selectAllSQLTest() throws ClassNotFoundException, SQLException {
     //d0s0,d0s1,d0s2,d0s3,d1s0
     String[] retArray = new String[]{
@@ -739,13 +652,6 @@ public class IoTDBAggregationSmallDataIT {
     }
   }
 
-  @Test
-  @Ignore
-  public void selectAllSQLRemoteTest() throws ClassNotFoundException, SQLException {
-    QPExecutorUtils.setLocalNodeAddr("0.0.0.0", 0);
-    selectAllSQLTest();
-  }
-
   private static void insertSQL() {
     try (Connection connection = DriverManager.getConnection
         (Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root")) {
diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/manager/ClusterLocalManagerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/manager/ClusterLocalManagerTest.java
index b822831..e71e489 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/query/manager/ClusterLocalManagerTest.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/query/manager/ClusterLocalManagerTest.java
@@ -42,7 +42,8 @@ import org.apache.iotdb.cluster.query.manager.querynode.ClusterLocalSingleQueryM
 import org.apache.iotdb.cluster.query.reader.querynode.AbstractClusterSelectSeriesBatchReader;
 import org.apache.iotdb.cluster.query.reader.querynode.ClusterSelectSeriesBatchReader;
 import org.apache.iotdb.cluster.query.reader.querynode.ClusterSelectSeriesBatchReaderByTimestamp;
-import org.apache.iotdb.cluster.query.reader.querynode.ClusterFilterSeriesBatchReader;
+import org.apache.iotdb.cluster.query.reader.querynode.ClusterFilterSeriesBatchReaderEntity;
+import org.apache.iotdb.cluster.query.reader.querynode.ClusterSelectSeriesBatchReaderEntity;
 import org.apache.iotdb.cluster.utils.EnvironmentUtils;
 import org.apache.iotdb.cluster.utils.QPExecutorUtils;
 import org.apache.iotdb.cluster.utils.hash.PhysicalNode;
@@ -219,15 +220,15 @@ public class ClusterLocalManagerTest {
         assertNotNull(singleQueryManager);
         assertEquals((long) map.get(taskId), singleQueryManager.getJobId());
         assertEquals(0, singleQueryManager.getQueryRound());
-        assertNull(singleQueryManager.getFilterReader());
-        Map<String, AbstractClusterSelectSeriesBatchReader> selectSeriesReaders = singleQueryManager
-            .getSelectSeriesReaders();
-        assertEquals(3, selectSeriesReaders.size());
+        assertNull(singleQueryManager.getFilterReaderEntity());
+        ClusterSelectSeriesBatchReaderEntity selectSeriesBatchReaderEntity = singleQueryManager.getSelectReaderEntity();
+        assertEquals(3, selectSeriesBatchReaderEntity.getAllReaders().size());
         Map<String, TSDataType> typeMap = singleQueryManager.getDataTypeMap();
-        for (Entry<String, AbstractClusterSelectSeriesBatchReader> entry : selectSeriesReaders.entrySet()) {
-          String path = entry.getKey();
-          TSDataType dataType = typeMap.get(path);
-          AbstractClusterSelectSeriesBatchReader clusterBatchReader = entry.getValue();
+        List<AbstractClusterSelectSeriesBatchReader> readers = selectSeriesBatchReaderEntity.getAllReaders();
+        List<String> paths = selectSeriesBatchReaderEntity.getAllPaths();
+        for (int i =0 ; i < readers.size(); i++) {
+          TSDataType dataType = typeMap.get(paths.get(i));
+          AbstractClusterSelectSeriesBatchReader clusterBatchReader = readers.get(i);
           assertNotNull(((ClusterSelectSeriesBatchReader) clusterBatchReader).getReader());
           assertEquals(dataType,
               ((ClusterSelectSeriesBatchReader) clusterBatchReader).getDataType());
@@ -246,15 +247,15 @@ public class ClusterLocalManagerTest {
         assertNotNull(singleQueryManager);
         assertEquals((long) map.get(taskId), singleQueryManager.getJobId());
         assertEquals(0, singleQueryManager.getQueryRound());
-        assertNull(singleQueryManager.getFilterReader());
-        Map<String, AbstractClusterSelectSeriesBatchReader> selectSeriesReaders = singleQueryManager
-            .getSelectSeriesReaders();
-        assertEquals(3, selectSeriesReaders.size());
+        assertNull(singleQueryManager.getFilterReaderEntity());
+        ClusterSelectSeriesBatchReaderEntity selectSeriesBatchReaderEntity = singleQueryManager.getSelectReaderEntity();
+        assertEquals(3, selectSeriesBatchReaderEntity.getAllReaders().size());
         Map<String, TSDataType> typeMap = singleQueryManager.getDataTypeMap();
-        for (Entry<String, AbstractClusterSelectSeriesBatchReader> entry : selectSeriesReaders.entrySet()) {
-          String path = entry.getKey();
-          TSDataType dataType = typeMap.get(path);
-          AbstractClusterSelectSeriesBatchReader clusterBatchReader = entry.getValue();
+        List<AbstractClusterSelectSeriesBatchReader> readers = selectSeriesBatchReaderEntity.getAllReaders();
+        List<String> paths = selectSeriesBatchReaderEntity.getAllPaths();
+        for (int i =0 ; i < readers.size(); i++) {
+          TSDataType dataType = typeMap.get(paths.get(i));
+          AbstractClusterSelectSeriesBatchReader clusterBatchReader = readers.get(i);
           assertNotNull(((ClusterSelectSeriesBatchReader) clusterBatchReader).getReader());
           assertEquals(dataType,
               ((ClusterSelectSeriesBatchReader) clusterBatchReader).getDataType());
@@ -273,15 +274,15 @@ public class ClusterLocalManagerTest {
         assertNotNull(singleQueryManager);
         assertEquals((long) map.get(taskId), singleQueryManager.getJobId());
         assertEquals(0, singleQueryManager.getQueryRound());
-        assertNull(singleQueryManager.getFilterReader());
-        Map<String, AbstractClusterSelectSeriesBatchReader> selectSeriesReaders = singleQueryManager
-            .getSelectSeriesReaders();
-        assertEquals(3, selectSeriesReaders.size());
+        assertNull(singleQueryManager.getFilterReaderEntity());
+        ClusterSelectSeriesBatchReaderEntity selectSeriesBatchReaderEntity = singleQueryManager.getSelectReaderEntity();
+        assertEquals(3, selectSeriesBatchReaderEntity.getAllReaders().size());
         Map<String, TSDataType> typeMap = singleQueryManager.getDataTypeMap();
-        for (Entry<String, AbstractClusterSelectSeriesBatchReader> entry : selectSeriesReaders.entrySet()) {
-          String path = entry.getKey();
-          TSDataType dataType = typeMap.get(path);
-          AbstractClusterSelectSeriesBatchReader clusterBatchReader = entry.getValue();
+        List<AbstractClusterSelectSeriesBatchReader> readers = selectSeriesBatchReaderEntity.getAllReaders();
+        List<String> paths = selectSeriesBatchReaderEntity.getAllPaths();
+        for (int i =0 ; i < readers.size(); i++) {
+          TSDataType dataType = typeMap.get(paths.get(i));
+          AbstractClusterSelectSeriesBatchReader clusterBatchReader = readers.get(i);
           assertNotNull(((ClusterSelectSeriesBatchReader) clusterBatchReader).getReader());
           assertEquals(dataType,
               ((ClusterSelectSeriesBatchReader) clusterBatchReader).getDataType());
@@ -310,22 +311,22 @@ public class ClusterLocalManagerTest {
         assertNotNull(singleQueryManager);
         assertEquals((long) map.get(taskId), singleQueryManager.getJobId());
         assertEquals(3, singleQueryManager.getQueryRound());
-        ClusterFilterSeriesBatchReader filterReader = (ClusterFilterSeriesBatchReader) singleQueryManager.getFilterReader();
+        ClusterFilterSeriesBatchReaderEntity filterReader = (ClusterFilterSeriesBatchReaderEntity) singleQueryManager.getFilterReaderEntity();
         assertNotNull(filterReader);
         List<Path> allFilterPaths = new ArrayList<>();
         allFilterPaths.add(new Path("root.vehicle.d0.s0"));
         assertTrue(allFilterPaths.containsAll(filterReader.getAllFilterPath()));
         assertNotNull(filterReader.getQueryDataSet());
 
-        Map<String, AbstractClusterSelectSeriesBatchReader> selectSeriesReaders = singleQueryManager
-            .getSelectSeriesReaders();
-        assertNotNull(selectSeriesReaders);
-        assertEquals(3, selectSeriesReaders.size());
+        ClusterSelectSeriesBatchReaderEntity selectSeriesBatchReaderEntity = singleQueryManager.getSelectReaderEntity();
+        assertNotNull(selectSeriesBatchReaderEntity.getAllReaders());
+        assertEquals(3, selectSeriesBatchReaderEntity.getAllReaders().size());
         Map<String, TSDataType> typeMap = singleQueryManager.getDataTypeMap();
-        for (Entry<String, AbstractClusterSelectSeriesBatchReader> entry : selectSeriesReaders.entrySet()) {
-          String path = entry.getKey();
-          TSDataType dataType = typeMap.get(path);
-          AbstractClusterSelectSeriesBatchReader clusterBatchReader = entry.getValue();
+        List<AbstractClusterSelectSeriesBatchReader> readers = selectSeriesBatchReaderEntity.getAllReaders();
+        List<String> paths = selectSeriesBatchReaderEntity.getAllPaths();
+        for (int i =0 ; i < readers.size(); i++) {
+          TSDataType dataType = typeMap.get(paths.get(i));
+          AbstractClusterSelectSeriesBatchReader clusterBatchReader = readers.get(i);
           assertNotNull(((ClusterSelectSeriesBatchReaderByTimestamp) clusterBatchReader).getReaderByTimeStamp());
           assertEquals(dataType,
               ((ClusterSelectSeriesBatchReaderByTimestamp) clusterBatchReader).getDataType());
@@ -344,22 +345,22 @@ public class ClusterLocalManagerTest {
         assertNotNull(singleQueryManager);
         assertEquals((long) map.get(taskId), singleQueryManager.getJobId());
         assertEquals(3, singleQueryManager.getQueryRound());
-        ClusterFilterSeriesBatchReader filterReader = (ClusterFilterSeriesBatchReader) singleQueryManager.getFilterReader();
+        ClusterFilterSeriesBatchReaderEntity filterReader = (ClusterFilterSeriesBatchReaderEntity) singleQueryManager.getFilterReaderEntity();
         assertNotNull(filterReader);
         List<Path> allFilterPaths = new ArrayList<>();
         allFilterPaths.add(new Path("root.vehicle.d0.s0"));
         assertTrue(allFilterPaths.containsAll(filterReader.getAllFilterPath()));
         assertNotNull(filterReader.getQueryDataSet());
 
-        Map<String, AbstractClusterSelectSeriesBatchReader> selectSeriesReaders = singleQueryManager
-            .getSelectSeriesReaders();
-        assertNotNull(selectSeriesReaders);
-        assertEquals(3, selectSeriesReaders.size());
+        ClusterSelectSeriesBatchReaderEntity selectSeriesBatchReaderEntity = singleQueryManager.getSelectReaderEntity();
+        List<AbstractClusterSelectSeriesBatchReader> readers = selectSeriesBatchReaderEntity.getAllReaders();
+        assertNotNull(readers);
+        assertEquals(3, readers.size());
         Map<String, TSDataType> typeMap = singleQueryManager.getDataTypeMap();
-        for (Entry<String, AbstractClusterSelectSeriesBatchReader> entry : selectSeriesReaders.entrySet()) {
-          String path = entry.getKey();
-          TSDataType dataType = typeMap.get(path);
-          AbstractClusterSelectSeriesBatchReader clusterBatchReader = entry.getValue();
+        List<String> paths = selectSeriesBatchReaderEntity.getAllPaths();
+        for (int i =0 ; i < readers.size(); i++) {
+          TSDataType dataType = typeMap.get(paths.get(i));
+          AbstractClusterSelectSeriesBatchReader clusterBatchReader = readers.get(i);
           assertNotNull(((ClusterSelectSeriesBatchReaderByTimestamp) clusterBatchReader).getReaderByTimeStamp());
           assertEquals(dataType,
               ((ClusterSelectSeriesBatchReaderByTimestamp) clusterBatchReader).getDataType());
@@ -378,22 +379,22 @@ public class ClusterLocalManagerTest {
         assertNotNull(singleQueryManager);
         assertEquals((long) map.get(taskId), singleQueryManager.getJobId());
         assertEquals(3, singleQueryManager.getQueryRound());
-        ClusterFilterSeriesBatchReader filterReader = (ClusterFilterSeriesBatchReader) singleQueryManager.getFilterReader();
+        ClusterFilterSeriesBatchReaderEntity filterReader = (ClusterFilterSeriesBatchReaderEntity) singleQueryManager.getFilterReaderEntity();
         assertNotNull(filterReader);
         List<Path> allFilterPaths = new ArrayList<>();
         allFilterPaths.add(new Path("root.vehicle.d0.s0"));
         assertTrue(allFilterPaths.containsAll(filterReader.getAllFilterPath()));
         assertNotNull(filterReader.getQueryDataSet());
 
-        Map<String, AbstractClusterSelectSeriesBatchReader> selectSeriesReaders = singleQueryManager
-            .getSelectSeriesReaders();
-        assertNotNull(selectSeriesReaders);
-        assertEquals(3, selectSeriesReaders.size());
+        ClusterSelectSeriesBatchReaderEntity selectSeriesBatchReaderEntity = singleQueryManager.getSelectReaderEntity();
+        List<AbstractClusterSelectSeriesBatchReader> readers = selectSeriesBatchReaderEntity.getAllReaders();
+        assertNotNull(readers);
+        assertEquals(3, readers.size());
         Map<String, TSDataType> typeMap = singleQueryManager.getDataTypeMap();
-        for (Entry<String, AbstractClusterSelectSeriesBatchReader> entry : selectSeriesReaders.entrySet()) {
-          String path = entry.getKey();
-          TSDataType dataType = typeMap.get(path);
-          AbstractClusterSelectSeriesBatchReader clusterBatchReader = entry.getValue();
+        List<String> paths = selectSeriesBatchReaderEntity.getAllPaths();
+        for (int i =0 ; i < readers.size(); i++) {
+          TSDataType dataType = typeMap.get(paths.get(i));
+          AbstractClusterSelectSeriesBatchReader clusterBatchReader = readers.get(i);
           assertNotNull(((ClusterSelectSeriesBatchReaderByTimestamp) clusterBatchReader).getReaderByTimeStamp());
           assertEquals(dataType,
               ((ClusterSelectSeriesBatchReaderByTimestamp) clusterBatchReader).getDataType());


[incubator-iotdb] 11/11: fix a bug

Posted by lt...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

lta pushed a commit to branch cluster
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git

commit 3577857d40b08fb45ab9803c94b5eb742aa29e87
Author: lta <li...@163.com>
AuthorDate: Tue May 21 09:48:56 2019 +0800

    fix a bug
---
 .../db/engine/modification/io/LocalTextModificationAccessor.java  | 1 +
 .../org/apache/iotdb/db/query/control/QueryResourceManager.java   | 1 +
 .../iotdb/db/query/dataset/EngineDataSetWithoutTimeGenerator.java | 4 ++++
 .../db/query/dataset/groupby/GroupByWithValueFilterDataSet.java   | 8 ++++----
 .../src/main/java/org/apache/iotdb/db/service/TSServiceImpl.java  | 5 +++++
 iotdb/src/main/java/org/apache/iotdb/db/service/Utils.java        | 3 +++
 6 files changed, 18 insertions(+), 4 deletions(-)

diff --git a/iotdb/src/main/java/org/apache/iotdb/db/engine/modification/io/LocalTextModificationAccessor.java b/iotdb/src/main/java/org/apache/iotdb/db/engine/modification/io/LocalTextModificationAccessor.java
index 9e86400..b1a60cf 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/modification/io/LocalTextModificationAccessor.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/modification/io/LocalTextModificationAccessor.java
@@ -55,6 +55,7 @@ public class LocalTextModificationAccessor implements ModificationReader, Modifi
   public LocalTextModificationAccessor(String filePath) {
     this.filePath = filePath;
   }
+
   @Override
   public Collection<Modification> read() throws IOException {
     BufferedReader reader;
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/query/control/QueryResourceManager.java b/iotdb/src/main/java/org/apache/iotdb/db/query/control/QueryResourceManager.java
index c5f029e..612f71a 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/control/QueryResourceManager.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/query/control/QueryResourceManager.java
@@ -164,6 +164,7 @@ public class QueryResourceManager {
       throws FileNodeManagerException {
 
     SingleSeriesExpression singleSeriesExpression = new SingleSeriesExpression(selectedPath, null);
+    System.out.println("查询的path为:" + selectedPath);
     QueryDataSource queryDataSource = FileNodeManager.getInstance()
         .query(singleSeriesExpression, context);
 
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/query/dataset/EngineDataSetWithoutTimeGenerator.java b/iotdb/src/main/java/org/apache/iotdb/db/query/dataset/EngineDataSetWithoutTimeGenerator.java
index bc9bb08..fd1d393 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/dataset/EngineDataSetWithoutTimeGenerator.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/query/dataset/EngineDataSetWithoutTimeGenerator.java
@@ -60,10 +60,12 @@ public class EngineDataSetWithoutTimeGenerator extends QueryDataSet {
       throws IOException {
     super(paths, dataTypes);
     this.readers = readers;
+    System.out.println("EngineDataSetWithoutTimeGenerator inner set");
     initHeap();
   }
 
   private void initHeap() throws IOException {
+    System.out.println("Start init heap");
     timeSet = new HashSet<>();
     timeHeap = new PriorityQueue<>();
     cacheTimeValueList = new TimeValuePair[readers.size()];
@@ -72,10 +74,12 @@ public class EngineDataSetWithoutTimeGenerator extends QueryDataSet {
       IPointReader reader = readers.get(i);
       if (reader.hasNext()) {
         TimeValuePair timeValuePair = reader.next();
+        System.out.println(timeValuePair);
         cacheTimeValueList[i] = timeValuePair;
         timeHeapPut(timeValuePair.getTimestamp());
       }
     }
+    System.out.println("Stop init heap");
   }
 
   @Override
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/query/dataset/groupby/GroupByWithValueFilterDataSet.java b/iotdb/src/main/java/org/apache/iotdb/db/query/dataset/groupby/GroupByWithValueFilterDataSet.java
index f7ffa29..2991ff7 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/dataset/groupby/GroupByWithValueFilterDataSet.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/query/dataset/groupby/GroupByWithValueFilterDataSet.java
@@ -54,7 +54,7 @@ public class GroupByWithValueFilterDataSet extends GroupByEngineDataSet {
   /**
    * group by batch calculation size.
    */
-  private int timeStampFetchSize;
+  private int timestampFetchSize;
 
   /**
    * constructor.
@@ -63,7 +63,7 @@ public class GroupByWithValueFilterDataSet extends GroupByEngineDataSet {
       List<Pair<Long, Long>> mergedIntervals) {
     super(jobId, paths, unit, origin, mergedIntervals);
     this.allDataReaderList = new ArrayList<>();
-    this.timeStampFetchSize = 10 * IoTDBDescriptor.getInstance().getConfig().getFetchSize();
+    this.timestampFetchSize = 10 * IoTDBDescriptor.getInstance().getConfig().getFetchSize();
   }
 
   /**
@@ -92,7 +92,7 @@ public class GroupByWithValueFilterDataSet extends GroupByEngineDataSet {
       function.init();
     }
 
-    long[] timestampArray = new long[timeStampFetchSize];
+    long[] timestampArray = new long[timestampFetchSize];
     int timeArrayLength = 0;
     if (hasCachedTimestamp) {
       if (timestamp < endTime) {
@@ -140,7 +140,7 @@ public class GroupByWithValueFilterDataSet extends GroupByEngineDataSet {
    */
   private int constructTimeArrayForOneCal(long[] timestampArray, int timeArrayLength)
       throws IOException {
-    for (int cnt = 1; cnt < timeStampFetchSize && timestampGenerator.hasNext(); cnt++) {
+    for (int cnt = 1; cnt < timestampFetchSize && timestampGenerator.hasNext(); cnt++) {
       timestamp = timestampGenerator.next();
       if (timestamp < endTime) {
         timestampArray[timeArrayLength++] = timestamp;
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/service/TSServiceImpl.java b/iotdb/src/main/java/org/apache/iotdb/db/service/TSServiceImpl.java
index b02f1c2..0ff4995 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/service/TSServiceImpl.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/service/TSServiceImpl.java
@@ -673,12 +673,17 @@ public class TSServiceImpl implements TSIService.Iface, ServerContext {
       int fetchSize = req.getFetch_size();
       QueryDataSet queryDataSet;
       if (!queryRet.get().containsKey(statement)) {
+        System.out.println("Create new data set");
         queryDataSet = createNewDataSet(statement, fetchSize, req);
+        System.out.println("Create new data set success");
+        LOGGER.error("Create new data set success.");
       } else {
         queryDataSet = queryRet.get().get(statement);
       }
+      System.out.println("Get next value");
       TSQueryDataSet result = Utils.convertQueryDataSetByFetchSize(queryDataSet, fetchSize);
       boolean hasResultSet = !result.getRecords().isEmpty();
+      System.out.println("hasResultSet : " + hasResultSet);
       if (!hasResultSet && queryRet.get() != null) {
         queryRet.get().remove(statement);
       }
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/service/Utils.java b/iotdb/src/main/java/org/apache/iotdb/db/service/Utils.java
index 75826da..03a5a2f 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/service/Utils.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/service/Utils.java
@@ -47,8 +47,11 @@ public class Utils {
     tsQueryDataSet.setRecords(new ArrayList<>());
     for (int i = 0; i < fetchsize; i++) {
       if (queryDataSet.hasNext()) {
+        System.out.println("Get value in convertQueryDataSetByFetchSize");
         RowRecord rowRecord = queryDataSet.next();
+        System.out.println(rowRecord);
         tsQueryDataSet.getRecords().add(convertToTSRecord(rowRecord));
+        System.out.println("Get value success in convertQueryDataSetByFetchSize");
       } else {
         break;
       }


[incubator-iotdb] 02/11: add aggre feature without timegenerator

Posted by lt...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

lta pushed a commit to branch cluster
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git

commit fe9937a8cfe6db48dd4b4d2cff31b10ebbc91f77
Author: lta <li...@163.com>
AuthorDate: Thu May 16 18:41:26 2019 +0800

    add aggre feature without timegenerator
---
 .../executor/ClusterAggregateEngineExecutor.java   | 114 ++++++++++++++++
 .../cluster/query/executor/ClusterQueryRouter.java |  29 +++-
 .../querynode/ClusterLocalSingleQueryManager.java  |  85 ++++++++++--
 .../query/utils/QueryPlanPartitionUtils.java       | 146 +++++++++++++++------
 .../db/qp/executor/IQueryProcessExecutor.java      |   4 +-
 .../db/query/executor/AggregateEngineExecutor.java |  75 +++++++----
 .../iotdb/db/query/executor/EngineQueryRouter.java |   4 +-
 7 files changed, 378 insertions(+), 79 deletions(-)

diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterAggregateEngineExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterAggregateEngineExecutor.java
new file mode 100644
index 0000000..b63b311
--- /dev/null
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterAggregateEngineExecutor.java
@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.cluster.query.executor;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import org.apache.iotdb.cluster.query.manager.coordinatornode.ClusterRpcSingleQueryManager;
+import org.apache.iotdb.cluster.query.reader.coordinatornode.ClusterSelectSeriesReader;
+import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
+import org.apache.iotdb.db.exception.FileNodeManagerException;
+import org.apache.iotdb.db.exception.PathErrorException;
+import org.apache.iotdb.db.exception.ProcessorException;
+import org.apache.iotdb.db.metadata.MManager;
+import org.apache.iotdb.db.query.aggregation.AggreResultData;
+import org.apache.iotdb.db.query.aggregation.AggregateFunction;
+import org.apache.iotdb.db.query.aggregation.impl.LastAggrFunc;
+import org.apache.iotdb.db.query.aggregation.impl.MaxTimeAggrFunc;
+import org.apache.iotdb.db.query.context.QueryContext;
+import org.apache.iotdb.db.query.control.QueryResourceManager;
+import org.apache.iotdb.db.query.dataset.AggreResultDataPointReader;
+import org.apache.iotdb.db.query.dataset.EngineDataSetWithoutTimeGenerator;
+import org.apache.iotdb.db.query.executor.AggregateEngineExecutor;
+import org.apache.iotdb.db.query.factory.AggreFuncFactory;
+import org.apache.iotdb.db.query.factory.SeriesReaderFactory;
+import org.apache.iotdb.db.query.reader.IPointReader;
+import org.apache.iotdb.db.query.reader.merge.PriorityMergeReader;
+import org.apache.iotdb.db.query.reader.sequence.SequenceDataReader;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.read.common.Path;
+import org.apache.iotdb.tsfile.read.expression.IExpression;
+import org.apache.iotdb.tsfile.read.expression.impl.GlobalTimeExpression;
+import org.apache.iotdb.tsfile.read.filter.basic.Filter;
+import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
+
+public class ClusterAggregateEngineExecutor extends AggregateEngineExecutor {
+
+  private ClusterRpcSingleQueryManager queryManager;
+
+  public ClusterAggregateEngineExecutor(List<Path> selectedSeries, List<String> aggres,
+      IExpression expression, ClusterRpcSingleQueryManager queryManager) {
+    super(selectedSeries, aggres, expression);
+    this.queryManager = queryManager;
+  }
+
+  @Override
+  public QueryDataSet executeWithoutTimeGenerator(QueryContext context)
+      throws FileNodeManagerException, IOException, PathErrorException, ProcessorException {
+    Filter timeFilter = expression != null ? ((GlobalTimeExpression) expression).getFilter() : null;
+    Map<Path, ClusterSelectSeriesReader> selectPathReaders = queryManager.getSelectSeriesReaders();
+
+    List<Path> paths = new ArrayList<>();
+    List<IPointReader> readers = new ArrayList<>();
+    List<TSDataType> dataTypes = new ArrayList<>();
+    for (int i = 0; i < selectedSeries.size(); i++) {
+      Path path = selectedSeries.get(i);
+
+      if (selectPathReaders.containsKey(path)) {
+        ClusterSelectSeriesReader reader = selectPathReaders.get(path);
+        readers.add(reader);
+        dataTypes.add(reader.getDataType());
+      } else {
+        paths.add(path);
+        // construct AggregateFunction
+        TSDataType tsDataType = MManager.getInstance()
+            .getSeriesType(selectedSeries.get(i).getFullPath());
+        AggregateFunction function = AggreFuncFactory.getAggrFuncByName(aggres.get(i), tsDataType);
+        function.init();
+
+        QueryDataSource queryDataSource = QueryResourceManager.getInstance()
+            .getQueryDataSource(selectedSeries.get(i), context);
+
+        // sequence reader for sealed tsfile, unsealed tsfile, memory
+        SequenceDataReader sequenceReader;
+        if (function instanceof MaxTimeAggrFunc || function instanceof LastAggrFunc) {
+          sequenceReader = new SequenceDataReader(queryDataSource.getSeqDataSource(), timeFilter,
+              context, true);
+        } else {
+          sequenceReader = new SequenceDataReader(queryDataSource.getSeqDataSource(), timeFilter,
+              context, false);
+        }
+
+        // unseq reader for all chunk groups in unSeqFile, memory
+        PriorityMergeReader unSeqMergeReader = SeriesReaderFactory.getInstance()
+            .createUnSeqMergeReader(queryDataSource.getOverflowSeriesDataSource(), timeFilter);
+
+        AggreResultData aggreResultData = aggregateWithoutTimeGenerator(function,
+            sequenceReader, unSeqMergeReader, timeFilter);
+        readers.add(new AggreResultDataPointReader(aggreResultData));
+      }
+    }
+    QueryResourceManager.getInstance()
+        .beginQueryOfGivenQueryPaths(context.getJobId(), paths);
+
+    return new EngineDataSetWithoutTimeGenerator(selectedSeries, dataTypes, readers);
+  }
+}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterQueryRouter.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterQueryRouter.java
index 2fa4576..672ca9d 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterQueryRouter.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/executor/ClusterQueryRouter.java
@@ -29,7 +29,7 @@ import org.apache.iotdb.db.exception.FileNodeManagerException;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.exception.ProcessorException;
 import org.apache.iotdb.db.query.context.QueryContext;
-import org.apache.iotdb.db.query.executor.FillEngineExecutor;
+import org.apache.iotdb.db.query.executor.AggregateEngineExecutor;
 import org.apache.iotdb.db.query.executor.IEngineQueryRouter;
 import org.apache.iotdb.db.query.fill.IFill;
 import org.apache.iotdb.tsfile.exception.filter.QueryFilterOptimizationException;
@@ -95,7 +95,32 @@ public class ClusterQueryRouter implements IEngineQueryRouter {
   public QueryDataSet aggregate(List<Path> selectedSeries, List<String> aggres,
       IExpression expression, QueryContext context)
       throws QueryFilterOptimizationException, FileNodeManagerException, IOException, PathErrorException, ProcessorException {
-    throw new UnsupportedOperationException();
+
+    ClusterRpcSingleQueryManager queryManager = ClusterRpcQueryManager.getInstance()
+        .getSingleQuery(context.getJobId());
+
+    try {
+      if (expression != null) {
+        IExpression optimizedExpression = ExpressionOptimizer.getInstance()
+            .optimize(expression, selectedSeries);
+        AggregateEngineExecutor engineExecutor = new ClusterAggregateEngineExecutor(
+            selectedSeries, aggres, optimizedExpression, queryManager);
+        if (optimizedExpression.getType() == ExpressionType.GLOBAL_TIME) {
+          queryManager.initQueryResource(QueryType.GLOBAL_TIME, getReadDataConsistencyLevel());
+          return engineExecutor.executeWithoutTimeGenerator(context);
+        } else {
+          queryManager.initQueryResource(QueryType.FILTER, getReadDataConsistencyLevel());
+          return engineExecutor.executeWithTimeGenerator(context);
+        }
+      } else {
+        AggregateEngineExecutor engineExecutor = new ClusterAggregateEngineExecutor(
+            selectedSeries, aggres, null, queryManager);
+        queryManager.initQueryResource(QueryType.NO_FILTER, getReadDataConsistencyLevel());
+        return engineExecutor.executeWithoutTimeGenerator(context);
+      }
+    } catch (QueryFilterOptimizationException | IOException | RaftConnectionException e) {
+      throw new FileNodeManagerException(e);
+    }
   }
 
   @Override
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalSingleQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalSingleQueryManager.java
index f776477..76a141e 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalSingleQueryManager.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/manager/querynode/ClusterLocalSingleQueryManager.java
@@ -30,9 +30,9 @@ import org.apache.iotdb.cluster.query.PathType;
 import org.apache.iotdb.cluster.query.factory.ClusterSeriesReaderFactory;
 import org.apache.iotdb.cluster.query.reader.querynode.AbstractClusterSelectSeriesBatchReader;
 import org.apache.iotdb.cluster.query.reader.querynode.ClusterFillSelectSeriesBatchReader;
+import org.apache.iotdb.cluster.query.reader.querynode.ClusterFilterSeriesBatchReader;
 import org.apache.iotdb.cluster.query.reader.querynode.ClusterSelectSeriesBatchReader;
 import org.apache.iotdb.cluster.query.reader.querynode.ClusterSelectSeriesBatchReaderByTimestamp;
-import org.apache.iotdb.cluster.query.reader.querynode.ClusterFilterSeriesBatchReader;
 import org.apache.iotdb.cluster.query.reader.querynode.IClusterFilterSeriesBatchReader;
 import org.apache.iotdb.cluster.rpc.raft.request.querydata.InitSeriesReaderRequest;
 import org.apache.iotdb.cluster.rpc.raft.request.querydata.QuerySeriesDataByTimestampRequest;
@@ -54,6 +54,7 @@ import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.control.QueryResourceManager;
 import org.apache.iotdb.db.query.executor.AbstractExecutorWithoutTimeGenerator;
+import org.apache.iotdb.db.query.executor.AggregateEngineExecutor;
 import org.apache.iotdb.db.query.fill.IFill;
 import org.apache.iotdb.db.query.fill.PreviousFill;
 import org.apache.iotdb.db.query.reader.IPointReader;
@@ -63,7 +64,9 @@ import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.BatchData;
 import org.apache.iotdb.tsfile.read.common.Path;
 import org.apache.iotdb.tsfile.read.expression.ExpressionType;
+import org.apache.iotdb.tsfile.read.expression.IExpression;
 import org.apache.iotdb.tsfile.read.expression.impl.GlobalTimeExpression;
+import org.apache.iotdb.tsfile.read.expression.util.ExpressionOptimizer;
 import org.apache.iotdb.tsfile.read.filter.basic.Filter;
 import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
 import org.slf4j.Logger;
@@ -134,16 +137,11 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
       if (plan instanceof GroupByPlan) {
         throw new UnsupportedOperationException();
       } else if (plan instanceof AggregationPlan) {
-        throw new UnsupportedOperationException();
+        handleAggreSeriesReader(plan, context, response);
       } else if (plan instanceof FillQueryPlan) {
-        handleFillSeriesRerader(plan, context, response);
+        handleFillSeriesReader(plan, context, response);
       } else {
-        if (plan.getExpression() == null
-            || plan.getExpression().getType() == ExpressionType.GLOBAL_TIME) {
-          handleSelectReaderWithoutTimeGenerator(plan, context, response);
-        } else {
-          handleSelectReaderWithTimeGenerator(plan, context, response);
-        }
+        handleSelectSeriesReader(plan, context, response);
       }
     }
     if (queryPlanMap.containsKey(PathType.FILTER_PATH)) {
@@ -158,7 +156,7 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
    *
    * @param queryPlan fill query plan
    */
-  private void handleFillSeriesRerader(QueryPlan queryPlan, QueryContext context,
+  private void handleFillSeriesReader(QueryPlan queryPlan, QueryContext context,
       InitSeriesReaderResponse response)
       throws FileNodeManagerException, PathErrorException, IOException {
     FillQueryPlan fillQueryPlan = (FillQueryPlan) queryPlan;
@@ -190,6 +188,73 @@ public class ClusterLocalSingleQueryManager implements IClusterLocalSingleQueryM
     response.getSeriesDataTypes().put(PathType.SELECT_PATH, dataTypes);
   }
 
+
+  /**
+   * Handle aggregation series reader
+   *
+   * @param queryPlan fill query plan
+   */
+  private void handleAggreSeriesReader(QueryPlan queryPlan, QueryContext context,
+      InitSeriesReaderResponse response)
+      throws FileNodeManagerException, PathErrorException, IOException, QueryFilterOptimizationException, ProcessorException {
+    if (queryPlan.getExpression() == null
+        || queryPlan.getExpression().getType() == ExpressionType.GLOBAL_TIME) {
+      handleAggreSeriesReaderWithoutTimeGenerator(queryPlan,context,response);
+    } else {
+      handleSelectReaderWithTimeGenerator(queryPlan, context, response);
+    }
+  }
+
+  /**
+   * Handle aggregation series reader without value filter
+   *
+   * @param queryPlan fill query plan
+   */
+  private void handleAggreSeriesReaderWithoutTimeGenerator(QueryPlan queryPlan, QueryContext context,
+      InitSeriesReaderResponse response)
+      throws FileNodeManagerException, PathErrorException, IOException, QueryFilterOptimizationException, ProcessorException {
+    AggregationPlan fillQueryPlan = (AggregationPlan) queryPlan;
+
+    List<Path> selectedPaths = fillQueryPlan.getPaths();
+    QueryResourceManager.getInstance().beginQueryOfGivenQueryPaths(jobId, selectedPaths);
+
+    IExpression optimizedExpression = ExpressionOptimizer.getInstance()
+        .optimize(fillQueryPlan.getExpression(), selectedPaths);
+    AggregateEngineExecutor engineExecutor = new AggregateEngineExecutor(
+        selectedPaths, fillQueryPlan.getAggregations(), optimizedExpression);
+
+    List<IPointReader> readers = engineExecutor.constructAggreReadersWithoutTimeGenerator(context);
+
+    List<TSDataType> dataTypes = engineExecutor.getDataTypes();
+
+    for (int i =0 ; i < selectedPaths.size(); i ++) {
+      Path path = selectedPaths.get(i);
+      selectSeriesReaders.put(path.getFullPath(),
+          new ClusterSelectSeriesBatchReader(dataTypes.get(i), readers.get(i)));
+      dataTypeMap.put(path.getFullPath(), dataTypes.get(i));
+    }
+
+    response.getSeriesDataTypes().put(PathType.SELECT_PATH, dataTypes);
+  }
+
+  /**
+   * Handle select series query
+   *
+   * @param plan plan query plan
+   * @param context query context
+   * @param response response for coordinator node
+   */
+  private void handleSelectSeriesReader(QueryPlan plan, QueryContext context,
+      InitSeriesReaderResponse response)
+      throws FileNodeManagerException, IOException, PathErrorException {
+    if (plan.getExpression() == null
+        || plan.getExpression().getType() == ExpressionType.GLOBAL_TIME) {
+      handleSelectReaderWithoutTimeGenerator(plan, context, response);
+    } else {
+      handleSelectReaderWithTimeGenerator(plan, context, response);
+    }
+  }
+
   /**
    * Handle select series query with no filter or only global time filter
    *
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtils.java
index 546282a..fc0d401 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtils.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/utils/QueryPlanPartitionUtils.java
@@ -32,8 +32,6 @@ import org.apache.iotdb.db.qp.physical.crud.AggregationPlan;
 import org.apache.iotdb.db.qp.physical.crud.FillQueryPlan;
 import org.apache.iotdb.db.qp.physical.crud.GroupByPlan;
 import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
-import org.apache.iotdb.db.query.fill.IFill;
-import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.Path;
 import org.apache.iotdb.tsfile.read.expression.ExpressionType;
 import org.apache.iotdb.tsfile.read.expression.IExpression;
@@ -55,13 +53,32 @@ public class QueryPlanPartitionUtils {
       throws PathErrorException {
     QueryPlan queryPLan = singleQueryManager.getOriginQueryPlan();
     if (queryPLan instanceof FillQueryPlan) {
-      splitFillPlan((FillQueryPlan)queryPLan, singleQueryManager);
+      splitFillPlan(singleQueryManager);
+    } else if (queryPLan instanceof AggregationPlan) {
+      splitAggregationPlanBySelectPath(singleQueryManager);
+    } else if (queryPLan instanceof GroupByPlan) {
+      splitGroupByPlanBySelectPath(singleQueryManager);
     } else {
       splitQueryPlanBySelectPath(singleQueryManager);
     }
   }
 
   /**
+   * Split query plan with filter.
+   */
+  public static void splitQueryPlanWithValueFilter(
+      ClusterRpcSingleQueryManager singleQueryManager) throws PathErrorException {
+    QueryPlan queryPlan = singleQueryManager.getOriginQueryPlan();
+    if (queryPlan instanceof GroupByPlan) {
+      splitGroupByPlanWithFilter(singleQueryManager);
+    } else if (queryPlan instanceof AggregationPlan) {
+      splitAggregationPlanWithFilter(singleQueryManager);
+    } else {
+      splitQueryPlanWithFilter(singleQueryManager);
+    }
+  }
+
+  /**
    * Split query plan by select paths
    */
   private static void splitQueryPlanBySelectPath(ClusterRpcSingleQueryManager singleQueryManager)
@@ -88,33 +105,100 @@ public class QueryPlanPartitionUtils {
     }
   }
 
+
   /**
-   * Split query plan with filter.
+   * Split query plan by filter paths
    */
-  public static void splitQueryPlanWithValueFilter(
-      ClusterRpcSingleQueryManager singleQueryManager) throws PathErrorException {
+  private static void splitQueryPlanByFilterPath(ClusterRpcSingleQueryManager singleQueryManager)
+      throws PathErrorException {
     QueryPlan queryPlan = singleQueryManager.getOriginQueryPlan();
-    if (queryPlan instanceof GroupByPlan) {
-      splitGroupByPlan((GroupByPlan) queryPlan, singleQueryManager);
-    } else if (queryPlan instanceof AggregationPlan) {
-      splitAggregationPlan((AggregationPlan) queryPlan, singleQueryManager);
-    } else {
-      splitQueryPlan(queryPlan, singleQueryManager);
+    // split query plan by filter path
+    Map<String, FilterGroupEntity> filterGroupEntityMap = singleQueryManager
+        .getFilterGroupEntityMap();
+    IExpression expression = queryPlan.getExpression();
+    ExpressionUtils.getAllExpressionSeries(expression, filterGroupEntityMap);
+    for (FilterGroupEntity filterGroupEntity : filterGroupEntityMap.values()) {
+      List<Path> filterSeriesList = filterGroupEntity.getFilterPaths();
+      // create filter sub query plan
+      QueryPlan subQueryPlan = new QueryPlan();
+      subQueryPlan.setPaths(filterSeriesList);
+      IExpression subExpression = ExpressionUtils
+          .pruneFilterTree(expression.clone(), filterSeriesList);
+      if (subExpression.getType() != ExpressionType.TRUE) {
+        subQueryPlan.setExpression(subExpression);
+      }
+      filterGroupEntity.setQueryPlan(subQueryPlan);
     }
   }
 
-  private static void splitGroupByPlan(GroupByPlan groupByPlan,
+  /**
+   * Split group by plan by select path
+   */
+  private static void splitGroupByPlanBySelectPath(
       ClusterRpcSingleQueryManager singleQueryManager) {
     throw new UnsupportedOperationException();
   }
 
-  private static void splitAggregationPlan(AggregationPlan aggregationPlan,
-      ClusterRpcSingleQueryManager singleQueryManager) {
-    throw new UnsupportedOperationException();
+  /**
+   * Split group by plan with filter path
+   */
+  private static void splitGroupByPlanWithFilter(ClusterRpcSingleQueryManager singleQueryManager)
+      throws PathErrorException {
+    splitGroupByPlanBySelectPath(singleQueryManager);
+    splitQueryPlanByFilterPath(singleQueryManager);
   }
 
-  private static void splitFillPlan(FillQueryPlan fillQueryPlan,
-      ClusterRpcSingleQueryManager singleQueryManager) throws PathErrorException {
+  /**
+   * Split aggregation plan by select path
+   */
+  private static void splitAggregationPlanBySelectPath(
+      ClusterRpcSingleQueryManager singleQueryManager)
+      throws PathErrorException {
+    AggregationPlan queryPlan = (AggregationPlan) singleQueryManager.getOriginQueryPlan();
+    List<Path> selectPaths = queryPlan.getPaths();
+    List<String> aggregations = new ArrayList<>();
+    Map<String, List<Path>> selectSeriesByGroupId = singleQueryManager.getSelectSeriesByGroupId();
+    Map<String, List<String>> selectAggregationByGroupId = new HashMap<>();
+    Map<String, QueryPlan> selectPathPlans = singleQueryManager.getSelectPathPlans();
+    for (int i = 0; i < selectPaths.size(); i++) {
+      Path path = selectPaths.get(i);
+      String aggregation = aggregations.get(i);
+      String groupId = QPExecutorUtils.getGroupIdByDevice(path.getDevice());
+      if (!selectSeriesByGroupId.containsKey(groupId)) {
+        selectSeriesByGroupId.put(groupId, new ArrayList<>());
+        selectAggregationByGroupId.put(groupId, new ArrayList<>());
+      }
+      selectAggregationByGroupId.get(groupId).add(aggregation);
+      selectSeriesByGroupId.get(groupId).add(path);
+    }
+    for (Entry<String, List<Path>> entry : selectSeriesByGroupId.entrySet()) {
+      String groupId = entry.getKey();
+      List<Path> paths = entry.getValue();
+      AggregationPlan subQueryPlan = new AggregationPlan();
+      subQueryPlan.setProposer(queryPlan.getProposer());
+      subQueryPlan.setPaths(paths);
+      subQueryPlan.setExpression(queryPlan.getExpression());
+      subQueryPlan.setAggregations(selectAggregationByGroupId.get(groupId));
+      selectPathPlans.put(groupId, subQueryPlan);
+    }
+  }
+
+  /**
+   * Split aggregation plan with filter path
+   */
+  private static void splitAggregationPlanWithFilter(
+      ClusterRpcSingleQueryManager singleQueryManager)
+      throws PathErrorException {
+    splitAggregationPlanBySelectPath(singleQueryManager);
+    splitQueryPlanByFilterPath(singleQueryManager);
+  }
+
+  /**
+   * Split fill plan which only contain select paths.
+   */
+  private static void splitFillPlan(ClusterRpcSingleQueryManager singleQueryManager)
+      throws PathErrorException {
+    FillQueryPlan fillQueryPlan = (FillQueryPlan) singleQueryManager.getOriginQueryPlan();
     List<Path> selectPaths = fillQueryPlan.getPaths();
     Map<String, List<Path>> selectSeriesByGroupId = singleQueryManager.getSelectSeriesByGroupId();
     Map<String, QueryPlan> selectPathPlans = singleQueryManager.getSelectPathPlans();
@@ -138,25 +222,13 @@ public class QueryPlanPartitionUtils {
     }
   }
 
-  private static void splitQueryPlan(QueryPlan queryPlan,
-      ClusterRpcSingleQueryManager singleQueryManager) throws PathErrorException {
+  /**
+   * Split query plan with filter
+   */
+  private static void splitQueryPlanWithFilter(ClusterRpcSingleQueryManager singleQueryManager)
+      throws PathErrorException {
     splitQueryPlanBySelectPath(singleQueryManager);
-    // split query plan by filter path
-    Map<String, FilterGroupEntity> filterGroupEntityMap = singleQueryManager
-        .getFilterGroupEntityMap();
-    IExpression expression = queryPlan.getExpression();
-    ExpressionUtils.getAllExpressionSeries(expression, filterGroupEntityMap);
-    for (FilterGroupEntity filterGroupEntity : filterGroupEntityMap.values()) {
-      List<Path> filterSeriesList = filterGroupEntity.getFilterPaths();
-      // create filter sub query plan
-      QueryPlan subQueryPlan = new QueryPlan();
-      subQueryPlan.setPaths(filterSeriesList);
-      IExpression subExpression = ExpressionUtils
-          .pruneFilterTree(expression.clone(), filterSeriesList);
-      if (subExpression.getType() != ExpressionType.TRUE) {
-        subQueryPlan.setExpression(subExpression);
-      }
-      filterGroupEntity.setQueryPlan(subQueryPlan);
-    }
+    splitQueryPlanByFilterPath(singleQueryManager);
   }
+
 }
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/qp/executor/IQueryProcessExecutor.java b/iotdb/src/main/java/org/apache/iotdb/db/qp/executor/IQueryProcessExecutor.java
index 42a2e8b..920aeef 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/qp/executor/IQueryProcessExecutor.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/qp/executor/IQueryProcessExecutor.java
@@ -56,9 +56,7 @@ public interface IQueryProcessExecutor {
       QueryFilterOptimizationException, ProcessorException;
 
   /**
-   * process aggregate plan of qp layer, construct queryDataSet. <<<<<<< HEAD
-   *
-   * ======= >>>>>>> master
+   * process aggregate plan of qp layer, construct queryDataSet.
    */
   QueryDataSet aggregate(List<Path> paths, List<String> aggres, IExpression expression,
       QueryContext context)
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/query/executor/AggregateEngineExecutor.java b/iotdb/src/main/java/org/apache/iotdb/db/query/executor/AggregateEngineExecutor.java
index 508a787..6c458a5 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/executor/AggregateEngineExecutor.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/query/executor/AggregateEngineExecutor.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iotdb.db.query.executor;
 
 import java.io.IOException;
@@ -28,7 +27,6 @@ import org.apache.iotdb.db.exception.FileNodeManagerException;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.exception.ProcessorException;
 import org.apache.iotdb.db.metadata.MManager;
-import org.apache.iotdb.db.query.factory.AggreFuncFactory;
 import org.apache.iotdb.db.query.aggregation.AggreResultData;
 import org.apache.iotdb.db.query.aggregation.AggregateFunction;
 import org.apache.iotdb.db.query.aggregation.impl.LastAggrFunc;
@@ -37,6 +35,7 @@ import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.control.QueryResourceManager;
 import org.apache.iotdb.db.query.dataset.AggreResultDataPointReader;
 import org.apache.iotdb.db.query.dataset.EngineDataSetWithoutTimeGenerator;
+import org.apache.iotdb.db.query.factory.AggreFuncFactory;
 import org.apache.iotdb.db.query.factory.SeriesReaderFactory;
 import org.apache.iotdb.db.query.reader.IPointReader;
 import org.apache.iotdb.db.query.reader.merge.EngineReaderByTimeStamp;
@@ -54,9 +53,10 @@ import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
 
 public class AggregateEngineExecutor {
 
-  private List<Path> selectedSeries;
-  private List<String> aggres;
-  private IExpression expression;
+  protected List<Path> selectedSeries;
+  protected List<String> aggres;
+  protected IExpression expression;
+  protected List<TSDataType> dataTypes;
 
   /**
    * aggregation batch calculation size.
@@ -72,6 +72,7 @@ public class AggregateEngineExecutor {
     this.aggres = aggres;
     this.expression = expression;
     this.aggregateFetchSize = 10 * IoTDBDescriptor.getInstance().getConfig().getFetchSize();
+    this.dataTypes = new ArrayList<>();
   }
 
   /**
@@ -79,8 +80,19 @@ public class AggregateEngineExecutor {
    *
    * @param context query context
    */
-  public QueryDataSet executeWithOutTimeGenerator(QueryContext context)
+  public QueryDataSet executeWithoutTimeGenerator(QueryContext context)
       throws FileNodeManagerException, IOException, PathErrorException, ProcessorException {
+    List<IPointReader> resultDataPointReaders = constructAggreReadersWithoutTimeGenerator(context);
+    return new EngineDataSetWithoutTimeGenerator(selectedSeries, dataTypes, resultDataPointReaders);
+  }
+
+  /**
+   * Construct aggregate readers with only time filter or no filter.
+   *
+   * @param context query context
+   */
+  public List<IPointReader> constructAggreReadersWithoutTimeGenerator(QueryContext context)
+      throws FileNodeManagerException, PathErrorException, IOException, ProcessorException {
     Filter timeFilter = null;
     if (expression != null) {
       timeFilter = ((GlobalTimeExpression) expression).getFilter();
@@ -121,11 +133,17 @@ public class AggregateEngineExecutor {
     List<AggreResultData> aggreResultDataList = new ArrayList<>();
     //TODO use multi-thread
     for (int i = 0; i < selectedSeries.size(); i++) {
-      AggreResultData aggreResultData = aggregateWithOutTimeGenerator(aggregateFunctions.get(i),
+      AggreResultData aggreResultData = aggregateWithoutTimeGenerator(aggregateFunctions.get(i),
           readersOfSequenceData.get(i), readersOfUnSequenceData.get(i), timeFilter);
       aggreResultDataList.add(aggreResultData);
     }
-    return constructDataSet(aggreResultDataList);
+
+    List<IPointReader> resultDataPointReaders = new ArrayList<>();
+    for (AggreResultData resultData : aggreResultDataList) {
+      dataTypes.add(resultData.getDataType());
+      resultDataPointReaders.add(new AggreResultDataPointReader(resultData));
+    }
+    return resultDataPointReaders;
   }
 
   /**
@@ -137,7 +155,7 @@ public class AggregateEngineExecutor {
    * @param filter time filter or null
    * @return one series aggregate result data
    */
-  private AggreResultData aggregateWithOutTimeGenerator(AggregateFunction function,
+  protected AggreResultData aggregateWithoutTimeGenerator(AggregateFunction function,
       SequenceDataReader sequenceReader, IPointReader unSequenceReader, Filter filter)
       throws IOException, ProcessorException {
     if (function instanceof MaxTimeAggrFunc || function instanceof LastAggrFunc) {
@@ -256,6 +274,18 @@ public class AggregateEngineExecutor {
    */
   public QueryDataSet executeWithTimeGenerator(QueryContext context)
       throws FileNodeManagerException, PathErrorException, IOException, ProcessorException {
+    List<IPointReader> resultDataPointReaders = constructAggreReadersWithTimeGenerator(context);
+    return new EngineDataSetWithoutTimeGenerator(selectedSeries, dataTypes, resultDataPointReaders);
+  }
+
+
+  /**
+   * Construct aggregate readers with value filter.
+   *
+   * @param context query context
+   */
+  private List<IPointReader> constructAggreReadersWithTimeGenerator(QueryContext context)
+      throws FileNodeManagerException, PathErrorException, IOException, ProcessorException {
     QueryResourceManager
         .getInstance().beginQueryOfGivenQueryPaths(context.getJobId(), selectedSeries);
     QueryResourceManager.getInstance().beginQueryOfGivenExpression(context.getJobId(), expression);
@@ -271,12 +301,19 @@ public class AggregateEngineExecutor {
       function.init();
       aggregateFunctions.add(function);
     }
-    List<AggreResultData> batchDataList = aggregateWithTimeGenerator(aggregateFunctions,
+    List<AggreResultData> aggreResultDataList = aggregateWithTimeGenerator(aggregateFunctions,
         timestampGenerator,
         readersOfSelectedSeries);
-    return constructDataSet(batchDataList);
+
+    List<IPointReader> resultDataPointReaders = new ArrayList<>();
+    for (AggreResultData resultData : aggreResultDataList) {
+      dataTypes.add(resultData.getDataType());
+      resultDataPointReaders.add(new AggreResultDataPointReader(resultData));
+    }
+    return resultDataPointReaders;
   }
 
+
   /**
    * calculation aggregate result with value filter.
    */
@@ -312,19 +349,7 @@ public class AggregateEngineExecutor {
     return aggreResultDataArrayList;
   }
 
-  /**
-   * using aggregate result data list construct QueryDataSet.
-   *
-   * @param aggreResultDataList aggregate result data list
-   */
-  private QueryDataSet constructDataSet(List<AggreResultData> aggreResultDataList)
-      throws IOException {
-    List<TSDataType> dataTypes = new ArrayList<>();
-    List<IPointReader> resultDataPointReaders = new ArrayList<>();
-    for (AggreResultData resultData : aggreResultDataList) {
-      dataTypes.add(resultData.getDataType());
-      resultDataPointReaders.add(new AggreResultDataPointReader(resultData));
-    }
-    return new EngineDataSetWithoutTimeGenerator(selectedSeries, dataTypes, resultDataPointReaders);
+  public List<TSDataType> getDataTypes() {
+    return dataTypes;
   }
 }
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/query/executor/EngineQueryRouter.java b/iotdb/src/main/java/org/apache/iotdb/db/query/executor/EngineQueryRouter.java
index 03c600d..32fcbf7 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/executor/EngineQueryRouter.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/query/executor/EngineQueryRouter.java
@@ -90,14 +90,14 @@ public class EngineQueryRouter implements IEngineQueryRouter{
       AggregateEngineExecutor engineExecutor = new AggregateEngineExecutor(
           selectedSeries, aggres, optimizedExpression);
       if (optimizedExpression.getType() == ExpressionType.GLOBAL_TIME) {
-        return engineExecutor.executeWithOutTimeGenerator(context);
+        return engineExecutor.executeWithoutTimeGenerator(context);
       } else {
         return engineExecutor.executeWithTimeGenerator(context);
       }
     } else {
       AggregateEngineExecutor engineExecutor = new AggregateEngineExecutor(
           selectedSeries, aggres, null);
-      return engineExecutor.executeWithOutTimeGenerator(context);
+      return engineExecutor.executeWithoutTimeGenerator(context);
     }
   }