You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by hu...@apache.org on 2022/12/18 15:58:43 UTC

[iotdb] branch lmh/addQueryMetrics created (now 7bf9d103a1)

This is an automated email from the ASF dual-hosted git repository.

hui pushed a change to branch lmh/addQueryMetrics
in repository https://gitbox.apache.org/repos/asf/iotdb.git


      at 7bf9d103a1 add metrics: series_scan_cost

This branch includes the following new commits:

     new 08d478707b add metrics: query_plan_cost
     new 868a7586c1 add metrics: operator_execution
     new 3ad3ed1d12 replace Operation with OperationType in ClientRPCServiceImpl
     new c35ccc4b9a rename QueryPlanCostMetrics to QueryPlanCostMetricSet
     new 7bf9d103a1 add metrics: series_scan_cost

The 5 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



[iotdb] 02/05: add metrics: operator_execution

Posted by hu...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

hui pushed a commit to branch lmh/addQueryMetrics
in repository https://gitbox.apache.org/repos/asf/iotdb.git

commit 868a7586c18124034f409d00069ecedf67ee57a4
Author: Minghui Liu <li...@foxmail.com>
AuthorDate: Thu Dec 15 16:46:44 2022 +0800

    add metrics: operator_execution
---
 .../apache/iotdb/metrics/config/MetricConfig.java  |  5 ++--
 .../iotdb/commons/service/metric/enums/Metric.java |  4 ++-
 .../iotdb/db/mpp/execution/driver/Driver.java      | 19 ++++++++++--
 .../iotdb/db/mpp/execution/operator/Operator.java  | 23 ++++++++++++++
 .../db/mpp/execution/operator/OperatorContext.java | 19 ++++++++++++
 .../operator/process/AbstractIntoOperator.java     |  4 +--
 .../operator/process/AggregationOperator.java      |  4 +--
 .../operator/process/DeviceMergeOperator.java      |  6 ++--
 .../operator/process/DeviceViewOperator.java       |  6 ++--
 .../execution/operator/process/FillOperator.java   |  4 +--
 .../operator/process/FilterAndProjectOperator.java |  4 +--
 .../execution/operator/process/LimitOperator.java  |  4 +--
 .../operator/process/LinearFillOperator.java       |  6 ++--
 .../operator/process/MergeSortOperator.java        |  6 ++--
 .../execution/operator/process/OffsetOperator.java |  4 +--
 .../process/RawDataAggregationOperator.java        |  8 ++---
 .../operator/process/SingleDeviceViewOperator.java |  6 ++--
 .../process/SingleInputAggregationOperator.java    |  2 +-
 .../process/SlidingWindowAggregationOperator.java  |  6 ++--
 .../operator/process/TagAggregationOperator.java   |  6 ++--
 .../operator/process/TransformOperator.java        |  2 +-
 .../process/join/RowBasedTimeJoinOperator.java     |  6 ++--
 .../operator/process/join/TimeJoinOperator.java    |  6 ++--
 .../process/join/VerticallyConcatOperator.java     |  6 ++--
 .../last/AbstractUpdateLastCacheOperator.java      |  2 +-
 .../last/AlignedUpdateLastCacheOperator.java       |  2 +-
 .../process/last/LastQueryCollectOperator.java     |  6 ++--
 .../process/last/LastQueryMergeOperator.java       |  6 ++--
 .../operator/process/last/LastQueryOperator.java   |  6 ++--
 .../process/last/LastQuerySortOperator.java        |  6 ++--
 .../process/last/UpdateLastCacheOperator.java      |  2 +-
 .../operator/schema/CountMergeOperator.java        |  6 ++--
 .../schema/LevelTimeSeriesCountOperator.java       |  2 +-
 .../schema/NodeManageMemoryMergeOperator.java      |  4 +--
 .../operator/schema/NodePathsConvertOperator.java  |  4 +--
 .../operator/schema/NodePathsCountOperator.java    |  4 +--
 .../operator/schema/SchemaFetchMergeOperator.java  |  6 ++--
 .../operator/schema/SchemaQueryMergeOperator.java  |  6 ++--
 .../schema/SchemaQueryOrderByHeatOperator.java     |  6 ++--
 .../operator/schema/SchemaQueryScanOperator.java   |  2 +-
 .../AbstractSeriesAggregationScanOperator.java     |  2 +-
 .../operator/source/LastCacheScanOperator.java     |  2 +-
 .../iotdb/db/mpp/metric/QueryMetricsManager.java   | 35 ++++++++++++++++++----
 .../iotdb/db/mpp/plan/analyze/AnalyzeVisitor.java  |  5 ++--
 .../apache/iotdb/db/mpp/plan/analyze/Analyzer.java |  2 +-
 .../db/mpp/plan/execution/QueryExecution.java      |  2 +-
 .../db/mpp/plan/parser/StatementGenerator.java     |  2 +-
 .../iotdb/db/mpp/plan/planner/LogicalPlanner.java  |  3 +-
 .../dag/input/TsBlockInputDataSet.java             |  6 ++--
 49 files changed, 190 insertions(+), 105 deletions(-)

diff --git a/metrics/interface/src/main/java/org/apache/iotdb/metrics/config/MetricConfig.java b/metrics/interface/src/main/java/org/apache/iotdb/metrics/config/MetricConfig.java
index b17410e12d..3d50eac41d 100644
--- a/metrics/interface/src/main/java/org/apache/iotdb/metrics/config/MetricConfig.java
+++ b/metrics/interface/src/main/java/org/apache/iotdb/metrics/config/MetricConfig.java
@@ -25,7 +25,7 @@ import org.apache.iotdb.metrics.utils.MetricLevel;
 import org.apache.iotdb.metrics.utils.ReporterType;
 
 import java.util.ArrayList;
-import java.util.Collections;
+import java.util.Arrays;
 import java.util.List;
 import java.util.Objects;
 
@@ -34,7 +34,8 @@ public class MetricConfig {
   private MetricFrameType metricFrameType = MetricFrameType.MICROMETER;
 
   /** The list of reporters provide metrics for external tool */
-  private List<ReporterType> metricReporterList = Collections.singletonList(ReporterType.JMX);
+  private List<ReporterType> metricReporterList =
+      Arrays.asList(ReporterType.JMX, ReporterType.PROMETHEUS);
 
   /** The level of metric service */
   private MetricLevel metricLevel = MetricLevel.IMPORTANT;
diff --git a/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java b/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java
index cc9b017328..1c0ab35d31 100644
--- a/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java
+++ b/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java
@@ -62,7 +62,9 @@ public enum Metric {
   THRIFT_ACTIVE_THREADS,
   IOT_CONSENSUS,
   STAGE,
-  QUERY_PLAN_COST;
+  QUERY_PLAN_COST,
+  OPERATOR_EXECUTION_COST,
+  OPERATOR_EXECUTION_COUNT;
 
   @Override
   public String toString() {
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/driver/Driver.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/driver/Driver.java
index 086234fa12..92d8574294 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/driver/Driver.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/driver/Driver.java
@@ -21,6 +21,8 @@ package org.apache.iotdb.db.mpp.execution.driver;
 import org.apache.iotdb.db.mpp.common.FragmentInstanceId;
 import org.apache.iotdb.db.mpp.execution.exchange.ISinkHandle;
 import org.apache.iotdb.db.mpp.execution.operator.Operator;
+import org.apache.iotdb.db.mpp.execution.operator.OperatorContext;
+import org.apache.iotdb.db.mpp.metric.QueryMetricsManager;
 import org.apache.iotdb.tsfile.read.common.block.TsBlock;
 
 import com.google.common.collect.ImmutableList;
@@ -58,6 +60,8 @@ public abstract class Driver implements IDriver {
 
   protected final DriverLock exclusiveLock = new DriverLock();
 
+  protected final QueryMetricsManager QUERY_METRICS = QueryMetricsManager.getInstance();
+
   protected enum State {
     ALIVE,
     NEED_DESTRUCTION,
@@ -182,8 +186,8 @@ public abstract class Driver implements IDriver {
       if (!blocked.isDone()) {
         return blocked;
       }
-      if (root.hasNext()) {
-        TsBlock tsBlock = root.next();
+      if (root.hasNextWithTimer()) {
+        TsBlock tsBlock = root.nextWithTimer();
         if (tsBlock != null && !tsBlock.isEmpty()) {
           sinkHandle.send(tsBlock);
         }
@@ -332,6 +336,17 @@ public abstract class Driver implements IDriver {
     try {
       root.close();
       sinkHandle.setNoMoreTsBlocks();
+
+      // record operator execution statistics to metrics
+      List<OperatorContext> operatorContexts =
+          driverContext.getFragmentInstanceContext().getOperatorContexts();
+      for (OperatorContext operatorContext : operatorContexts) {
+        String operatorType = operatorContext.getOperatorType();
+        QUERY_METRICS.recordOperatorExecutionCost(
+            operatorType, operatorContext.getTotalExecutionTimeInNanos());
+        QUERY_METRICS.recordOperatorExecutionCount(
+            operatorType, operatorContext.getNextCalledCount());
+      }
     } catch (InterruptedException t) {
       // don't record the stack
       wasInterrupted = true;
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/Operator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/Operator.java
index b7b05a9289..aaf404d467 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/Operator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/Operator.java
@@ -38,9 +38,32 @@ public interface Operator extends AutoCloseable {
     return NOT_BLOCKED;
   }
 
+  default TsBlock nextWithTimer() {
+    OperatorContext context = getOperatorContext();
+    long startTime = System.nanoTime();
+
+    try {
+      return next();
+    } finally {
+      context.recordExecutionTime(System.nanoTime() - startTime);
+      context.recordNextCalled();
+    }
+  }
+
   /** Gets next tsBlock from this operator. If no data is currently available, return null. */
   TsBlock next();
 
+  default boolean hasNextWithTimer() {
+    OperatorContext context = getOperatorContext();
+    long startTime = System.nanoTime();
+
+    try {
+      return hasNext();
+    } finally {
+      context.recordExecutionTime(System.nanoTime() - startTime);
+    }
+  }
+
   /** @return true if the operator has more data, otherwise false */
   boolean hasNext();
 
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/OperatorContext.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/OperatorContext.java
index 447066f4c8..3e9620a1b2 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/OperatorContext.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/OperatorContext.java
@@ -40,6 +40,9 @@ public class OperatorContext {
 
   private Duration maxRunTime;
 
+  private long totalExecutionTimeInNanos = 0L;
+  private long nextCalledCount = 0L;
+
   public OperatorContext(
       int operatorId,
       PlanNodeId planNodeId,
@@ -75,6 +78,22 @@ public class OperatorContext {
     return instanceContext.getSessionInfo();
   }
 
+  public void recordExecutionTime(long executionTimeInNanos) {
+    this.totalExecutionTimeInNanos += executionTimeInNanos;
+  }
+
+  public void recordNextCalled() {
+    this.nextCalledCount++;
+  }
+
+  public long getTotalExecutionTimeInNanos() {
+    return totalExecutionTimeInNanos;
+  }
+
+  public long getNextCalledCount() {
+    return nextCalledCount;
+  }
+
   @Override
   public boolean equals(Object o) {
     if (this == o) {
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/AbstractIntoOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/AbstractIntoOperator.java
index 5f4b8dda34..ad24a16ade 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/AbstractIntoOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/AbstractIntoOperator.java
@@ -139,8 +139,8 @@ public abstract class AbstractIntoOperator implements ProcessOperator {
     }
     cachedTsBlock = null;
 
-    if (child.hasNext()) {
-      TsBlock inputTsBlock = child.next();
+    if (child.hasNextWithTimer()) {
+      TsBlock inputTsBlock = child.nextWithTimer();
       processTsBlock(inputTsBlock);
 
       // call child.next only once
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/AggregationOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/AggregationOperator.java
index 0d7a6c89f7..aa86c6585a 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/AggregationOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/AggregationOperator.java
@@ -175,7 +175,7 @@ public class AggregationOperator implements ProcessOperator {
 
   @Override
   public boolean isFinished() {
-    return !this.hasNext();
+    return !this.hasNextWithTimer();
   }
 
   @Override
@@ -194,7 +194,7 @@ public class AggregationOperator implements ProcessOperator {
         return false;
       }
 
-      inputTsBlocks[i] = children.get(i).next();
+      inputTsBlocks[i] = children.get(i).nextWithTimer();
       canCallNext[i] = false;
       if (inputTsBlocks[i] == null) {
         return false;
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/DeviceMergeOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/DeviceMergeOperator.java
index 511a75991b..c08895ceaa 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/DeviceMergeOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/DeviceMergeOperator.java
@@ -115,8 +115,8 @@ public class DeviceMergeOperator implements ProcessOperator {
   public TsBlock next() {
     // get new input TsBlock
     for (int i = 0; i < inputOperatorsCount; i++) {
-      if (!noMoreTsBlocks[i] && isTsBlockEmpty(i) && deviceOperators.get(i).hasNext()) {
-        inputTsBlocks[i] = deviceOperators.get(i).next();
+      if (!noMoreTsBlocks[i] && isTsBlockEmpty(i) && deviceOperators.get(i).hasNextWithTimer()) {
+        inputTsBlocks[i] = deviceOperators.get(i).nextWithTimer();
         if (inputTsBlocks[i] == null || inputTsBlocks[i].isEmpty()) {
           return null;
         }
@@ -204,7 +204,7 @@ public class DeviceMergeOperator implements ProcessOperator {
       if (!isTsBlockEmpty(i)) {
         return true;
       } else if (!noMoreTsBlocks[i]) {
-        if (deviceOperators.get(i).hasNext()) {
+        if (deviceOperators.get(i).hasNextWithTimer()) {
           return true;
         } else {
           noMoreTsBlocks[i] = true;
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/DeviceViewOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/DeviceViewOperator.java
index 9a3a5e6634..242586a78e 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/DeviceViewOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/DeviceViewOperator.java
@@ -108,11 +108,11 @@ public class DeviceViewOperator implements ProcessOperator {
 
   @Override
   public TsBlock next() {
-    if (!getCurDeviceOperator().hasNext()) {
+    if (!getCurDeviceOperator().hasNextWithTimer()) {
       deviceIndex++;
       return null;
     }
-    TsBlock tsBlock = getCurDeviceOperator().next();
+    TsBlock tsBlock = getCurDeviceOperator().nextWithTimer();
     if (tsBlock == null) {
       return null;
     }
@@ -151,7 +151,7 @@ public class DeviceViewOperator implements ProcessOperator {
 
   @Override
   public boolean isFinished() {
-    return !this.hasNext();
+    return !this.hasNextWithTimer();
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/FillOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/FillOperator.java
index c182168bd3..92ec771f32 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/FillOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/FillOperator.java
@@ -58,7 +58,7 @@ public class FillOperator implements ProcessOperator {
 
   @Override
   public TsBlock next() {
-    TsBlock block = child.next();
+    TsBlock block = child.nextWithTimer();
     if (block == null) {
       return null;
     }
@@ -79,7 +79,7 @@ public class FillOperator implements ProcessOperator {
 
   @Override
   public boolean hasNext() {
-    return child.hasNext();
+    return child.hasNextWithTimer();
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/FilterAndProjectOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/FilterAndProjectOperator.java
index fcb03660a8..c08fcfd27d 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/FilterAndProjectOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/FilterAndProjectOperator.java
@@ -95,7 +95,7 @@ public class FilterAndProjectOperator implements ProcessOperator {
 
   @Override
   public TsBlock next() {
-    TsBlock input = inputOperator.next();
+    TsBlock input = inputOperator.nextWithTimer();
     if (input == null) {
       return null;
     }
@@ -193,7 +193,7 @@ public class FilterAndProjectOperator implements ProcessOperator {
 
   @Override
   public boolean hasNext() {
-    return inputOperator.hasNext();
+    return inputOperator.hasNextWithTimer();
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/LimitOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/LimitOperator.java
index 726f5e7ecf..68604cd1ce 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/LimitOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/LimitOperator.java
@@ -52,7 +52,7 @@ public class LimitOperator implements ProcessOperator {
 
   @Override
   public TsBlock next() {
-    TsBlock block = child.next();
+    TsBlock block = child.nextWithTimer();
     if (block == null) {
       return null;
     }
@@ -68,7 +68,7 @@ public class LimitOperator implements ProcessOperator {
 
   @Override
   public boolean hasNext() {
-    return remainingLimit > 0 && child.hasNext();
+    return remainingLimit > 0 && child.hasNextWithTimer();
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/LinearFillOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/LinearFillOperator.java
index bcbb92a932..3792829196 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/LinearFillOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/LinearFillOperator.java
@@ -87,7 +87,7 @@ public class LinearFillOperator implements ProcessOperator {
     // make sure we call child.next() at most once
     if (cachedTsBlock.isEmpty()) {
       canCallNext = false;
-      TsBlock nextTsBlock = child.next();
+      TsBlock nextTsBlock = child.nextWithTimer();
       // child operator's calculation is not finished, so we just return null
       if (nextTsBlock == null || nextTsBlock.isEmpty()) {
         return nextTsBlock;
@@ -144,7 +144,7 @@ public class LinearFillOperator implements ProcessOperator {
   @Override
   public boolean hasNext() {
     // if child.hasNext() return false, it means that there is no more tsBlocks
-    noMoreTsBlock = !child.hasNext();
+    noMoreTsBlock = !child.hasNextWithTimer();
     // if there is more tsBlock, we can call child.next() once
     canCallNext = !noMoreTsBlock;
     return !cachedTsBlock.isEmpty() || !noMoreTsBlock;
@@ -212,7 +212,7 @@ public class LinearFillOperator implements ProcessOperator {
     if (canCallNext) { // if we can call child.next(), we call that and cache it in
       // cachedTsBlock
       canCallNext = false;
-      TsBlock nextTsBlock = child.next();
+      TsBlock nextTsBlock = child.nextWithTimer();
       // child operator's calculation is not finished, so we just return null
       if (nextTsBlock == null || nextTsBlock.isEmpty()) {
         return false;
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/MergeSortOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/MergeSortOperator.java
index 372d01cb23..db11810fa2 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/MergeSortOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/MergeSortOperator.java
@@ -96,8 +96,8 @@ public class MergeSortOperator implements ProcessOperator {
   public TsBlock next() {
     // 1. fill consumed up TsBlock
     for (int i = 0; i < inputOperatorsCount; i++) {
-      if (!noMoreTsBlocks[i] && isTsBlockEmpty(i) && inputOperators.get(i).hasNext()) {
-        inputTsBlocks[i] = inputOperators.get(i).next();
+      if (!noMoreTsBlocks[i] && isTsBlockEmpty(i) && inputOperators.get(i).hasNextWithTimer()) {
+        inputTsBlocks[i] = inputOperators.get(i).nextWithTimer();
         if (inputTsBlocks[i] == null || inputTsBlocks[i].isEmpty()) {
           return null;
         }
@@ -160,7 +160,7 @@ public class MergeSortOperator implements ProcessOperator {
       if (!isTsBlockEmpty(i)) {
         return true;
       } else if (!noMoreTsBlocks[i]) {
-        if (inputOperators.get(i).hasNext()) {
+        if (inputOperators.get(i).hasNextWithTimer()) {
           return true;
         } else {
           noMoreTsBlocks[i] = true;
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/OffsetOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/OffsetOperator.java
index 572738d081..e6f146eac1 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/OffsetOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/OffsetOperator.java
@@ -52,7 +52,7 @@ public class OffsetOperator implements ProcessOperator {
 
   @Override
   public TsBlock next() {
-    TsBlock block = child.next();
+    TsBlock block = child.nextWithTimer();
     if (block == null) {
       return null;
     }
@@ -67,7 +67,7 @@ public class OffsetOperator implements ProcessOperator {
 
   @Override
   public boolean hasNext() {
-    return child.hasNext();
+    return child.hasNextWithTimer();
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/RawDataAggregationOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/RawDataAggregationOperator.java
index 30488c18d3..b5ce5ef823 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/RawDataAggregationOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/RawDataAggregationOperator.java
@@ -58,7 +58,7 @@ public class RawDataAggregationOperator extends SingleInputAggregationOperator {
   }
 
   private boolean hasMoreData() {
-    return inputTsBlock != null || child.hasNext();
+    return inputTsBlock != null || child.hasNextWithTimer();
   }
 
   @Override
@@ -72,10 +72,10 @@ public class RawDataAggregationOperator extends SingleInputAggregationOperator {
       inputTsBlock = null;
 
       // NOTE: child.next() can only be invoked once
-      if (child.hasNext() && canCallNext) {
-        inputTsBlock = child.next();
+      if (child.hasNextWithTimer() && canCallNext) {
+        inputTsBlock = child.nextWithTimer();
         canCallNext = false;
-      } else if (child.hasNext()) {
+      } else if (child.hasNextWithTimer()) {
         // if child still has next but can't be invoked now
         return false;
       } else {
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/SingleDeviceViewOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/SingleDeviceViewOperator.java
index 0107af8121..a22d52c9e2 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/SingleDeviceViewOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/SingleDeviceViewOperator.java
@@ -82,7 +82,7 @@ public class SingleDeviceViewOperator implements ProcessOperator {
 
   @Override
   public TsBlock next() {
-    TsBlock tsBlock = deviceOperator.next();
+    TsBlock tsBlock = deviceOperator.nextWithTimer();
     if (tsBlock == null) {
       return null;
     }
@@ -104,7 +104,7 @@ public class SingleDeviceViewOperator implements ProcessOperator {
 
   @Override
   public boolean hasNext() {
-    return deviceOperator.hasNext();
+    return deviceOperator.hasNextWithTimer();
   }
 
   @Override
@@ -114,7 +114,7 @@ public class SingleDeviceViewOperator implements ProcessOperator {
 
   @Override
   public boolean isFinished() {
-    return !this.hasNext();
+    return !this.hasNextWithTimer();
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/SingleInputAggregationOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/SingleInputAggregationOperator.java
index 16071aea1e..8953755d5e 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/SingleInputAggregationOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/SingleInputAggregationOperator.java
@@ -108,7 +108,7 @@ public abstract class SingleInputAggregationOperator implements ProcessOperator
 
   @Override
   public boolean isFinished() {
-    return !this.hasNext();
+    return !this.hasNextWithTimer();
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/SlidingWindowAggregationOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/SlidingWindowAggregationOperator.java
index 76499849cb..805b183677 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/SlidingWindowAggregationOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/SlidingWindowAggregationOperator.java
@@ -78,10 +78,10 @@ public class SlidingWindowAggregationOperator extends SingleInputAggregationOper
     while (!isCalculationDone()) {
       if (inputTsBlock == null) {
         // NOTE: child.next() can only be invoked once
-        if (child.hasNext() && canCallNext) {
-          inputTsBlock = child.next();
+        if (child.hasNextWithTimer() && canCallNext) {
+          inputTsBlock = child.nextWithTimer();
           canCallNext = false;
-        } else if (child.hasNext()) {
+        } else if (child.hasNextWithTimer()) {
           // if child still has next but can't be invoked now
           return false;
         } else {
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/TagAggregationOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/TagAggregationOperator.java
index cf18c26b74..778e0d28c5 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/TagAggregationOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/TagAggregationOperator.java
@@ -127,7 +127,7 @@ public class TagAggregationOperator implements ProcessOperator {
       }
 
       // If the data is unavailable first, try to find next tsblock of the child.
-      inputTsBlocks[i] = children.get(i).next();
+      inputTsBlocks[i] = children.get(i).nextWithTimer();
       consumedIndices[i] = 0;
       canCallNext[i] = false;
 
@@ -186,7 +186,7 @@ public class TagAggregationOperator implements ProcessOperator {
   @Override
   public boolean hasNext() {
     for (int i = 0; i < children.size(); i++) {
-      if (dataUnavailable(i) && !children.get(i).hasNext()) {
+      if (dataUnavailable(i) && !children.get(i).hasNextWithTimer()) {
         return false;
       }
     }
@@ -195,7 +195,7 @@ public class TagAggregationOperator implements ProcessOperator {
 
   @Override
   public boolean isFinished() {
-    return !this.hasNext();
+    return !this.hasNextWithTimer();
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/TransformOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/TransformOperator.java
index 4346ed231e..2e80c93139 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/TransformOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/TransformOperator.java
@@ -342,7 +342,7 @@ public class TransformOperator implements ProcessOperator {
   @Override
   public boolean isFinished() {
     // call hasNext first, or data of inputOperator could be missing
-    boolean flag = !hasNext();
+    boolean flag = !hasNextWithTimer();
     return timeHeap.isEmpty() && (flag || inputOperator.isFinished());
   }
 
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/join/RowBasedTimeJoinOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/join/RowBasedTimeJoinOperator.java
index 695893d400..1c4211d003 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/join/RowBasedTimeJoinOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/join/RowBasedTimeJoinOperator.java
@@ -137,9 +137,9 @@ public class RowBasedTimeJoinOperator implements ProcessOperator {
     // among all the input TsBlock as the current output TsBlock's endTime.
     for (int i = 0; i < inputOperatorsCount; i++) {
       if (!noMoreTsBlocks[i] && empty(i)) {
-        if (children.get(i).hasNext()) {
+        if (children.get(i).hasNextWithTimer()) {
           inputIndex[i] = 0;
-          inputTsBlocks[i] = children.get(i).next();
+          inputTsBlocks[i] = children.get(i).nextWithTimer();
           if (!empty(i)) {
             updateTimeSelector(i);
           } else {
@@ -210,7 +210,7 @@ public class RowBasedTimeJoinOperator implements ProcessOperator {
       if (!empty(i)) {
         return true;
       } else if (!noMoreTsBlocks[i]) {
-        if (children.get(i).hasNext()) {
+        if (children.get(i).hasNextWithTimer()) {
           return true;
         } else {
           noMoreTsBlocks[i] = true;
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/join/TimeJoinOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/join/TimeJoinOperator.java
index 3170838986..18bcd8836a 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/join/TimeJoinOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/join/TimeJoinOperator.java
@@ -137,9 +137,9 @@ public class TimeJoinOperator implements ProcessOperator {
     // among all the input TsBlock as the current output TsBlock's endTime.
     for (int i = 0; i < inputOperatorsCount; i++) {
       if (!noMoreTsBlocks[i] && empty(i)) {
-        if (children.get(i).hasNext()) {
+        if (children.get(i).hasNextWithTimer()) {
           inputIndex[i] = 0;
-          inputTsBlocks[i] = children.get(i).next();
+          inputTsBlocks[i] = children.get(i).nextWithTimer();
           if (!empty(i)) {
             int rowSize = inputTsBlocks[i].getPositionCount();
             for (int row = 0; row < rowSize; row++) {
@@ -209,7 +209,7 @@ public class TimeJoinOperator implements ProcessOperator {
       if (!empty(i)) {
         return true;
       } else if (!noMoreTsBlocks[i]) {
-        if (children.get(i).hasNext()) {
+        if (children.get(i).hasNextWithTimer()) {
           return true;
         } else {
           noMoreTsBlocks[i] = true;
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/join/VerticallyConcatOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/join/VerticallyConcatOperator.java
index 2a4ffd58c5..1fe4390082 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/join/VerticallyConcatOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/join/VerticallyConcatOperator.java
@@ -99,7 +99,7 @@ public class VerticallyConcatOperator implements ProcessOperator {
     for (int i = 0; i < inputOperatorsCount; i++) {
       if (empty(i)) {
         inputIndex[i] = 0;
-        inputTsBlocks[i] = children.get(i).next();
+        inputTsBlocks[i] = children.get(i).nextWithTimer();
         if (empty(i)) {
           // child operator has not prepared TsBlock well
           return null;
@@ -144,7 +144,7 @@ public class VerticallyConcatOperator implements ProcessOperator {
     if (finished) {
       return false;
     }
-    return !empty(0) || children.get(0).hasNext();
+    return !empty(0) || children.get(0).hasNextWithTimer();
   }
 
   @Override
@@ -159,7 +159,7 @@ public class VerticallyConcatOperator implements ProcessOperator {
     if (finished) {
       return true;
     }
-    return finished = empty(0) && !children.get(0).hasNext();
+    return finished = empty(0) && !children.get(0).hasNextWithTimer();
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/AbstractUpdateLastCacheOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/AbstractUpdateLastCacheOperator.java
index 4850fb45c4..3c182e0668 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/AbstractUpdateLastCacheOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/AbstractUpdateLastCacheOperator.java
@@ -82,7 +82,7 @@ public abstract class AbstractUpdateLastCacheOperator implements ProcessOperator
 
   @Override
   public boolean hasNext() {
-    return child.hasNext();
+    return child.hasNextWithTimer();
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/AlignedUpdateLastCacheOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/AlignedUpdateLastCacheOperator.java
index 14354cbc3b..f78c19e4fe 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/AlignedUpdateLastCacheOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/AlignedUpdateLastCacheOperator.java
@@ -51,7 +51,7 @@ public class AlignedUpdateLastCacheOperator extends AbstractUpdateLastCacheOpera
 
   @Override
   public TsBlock next() {
-    TsBlock res = child.next();
+    TsBlock res = child.nextWithTimer();
     if (res == null) {
       return null;
     }
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/LastQueryCollectOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/LastQueryCollectOperator.java
index b91e346372..b69ecfa388 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/LastQueryCollectOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/LastQueryCollectOperator.java
@@ -61,8 +61,8 @@ public class LastQueryCollectOperator implements ProcessOperator {
 
   @Override
   public TsBlock next() {
-    if (children.get(currentIndex).hasNext()) {
-      return children.get(currentIndex).next();
+    if (children.get(currentIndex).hasNextWithTimer()) {
+      return children.get(currentIndex).nextWithTimer();
     } else {
       currentIndex++;
       return null;
@@ -83,7 +83,7 @@ public class LastQueryCollectOperator implements ProcessOperator {
 
   @Override
   public boolean isFinished() {
-    return !hasNext();
+    return !hasNextWithTimer();
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/LastQueryMergeOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/LastQueryMergeOperator.java
index 5ad04bdc2c..fcc074b024 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/LastQueryMergeOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/LastQueryMergeOperator.java
@@ -116,9 +116,9 @@ public class LastQueryMergeOperator implements ProcessOperator {
     // among all the input TsBlock as the current output TsBlock's endTimeSeries.
     for (int i = 0; i < inputOperatorsCount; i++) {
       if (!noMoreTsBlocks[i] && empty(i)) {
-        if (children.get(i).hasNext()) {
+        if (children.get(i).hasNextWithTimer()) {
           inputIndex[i] = 0;
-          inputTsBlocks[i] = children.get(i).next();
+          inputTsBlocks[i] = children.get(i).nextWithTimer();
           if (!empty(i)) {
             int rowSize = inputTsBlocks[i].getPositionCount();
             for (int row = 0; row < rowSize; row++) {
@@ -187,7 +187,7 @@ public class LastQueryMergeOperator implements ProcessOperator {
       if (!empty(i)) {
         return true;
       } else if (!noMoreTsBlocks[i]) {
-        if (children.get(i).hasNext()) {
+        if (children.get(i).hasNextWithTimer()) {
           return true;
         } else {
           noMoreTsBlocks[i] = true;
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/LastQueryOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/LastQueryOperator.java
index 7a689d4aa0..c0f825e02a 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/LastQueryOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/LastQueryOperator.java
@@ -104,8 +104,8 @@ public class LastQueryOperator implements ProcessOperator {
     while ((System.nanoTime() - start < maxRuntime)
         && (currentIndex < endIndex)
         && !tsBlockBuilder.isFull()) {
-      if (children.get(currentIndex).hasNext()) {
-        TsBlock tsBlock = children.get(currentIndex).next();
+      if (children.get(currentIndex).hasNextWithTimer()) {
+        TsBlock tsBlock = children.get(currentIndex).nextWithTimer();
         if (tsBlock == null) {
           return null;
         } else if (!tsBlock.isEmpty()) {
@@ -127,7 +127,7 @@ public class LastQueryOperator implements ProcessOperator {
 
   @Override
   public boolean isFinished() {
-    return !hasNext();
+    return !hasNextWithTimer();
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/LastQuerySortOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/LastQuerySortOperator.java
index aeb38b6b7d..3e16a8e6d2 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/LastQuerySortOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/LastQuerySortOperator.java
@@ -148,8 +148,8 @@ public class LastQuerySortOperator implements ProcessOperator {
           previousTsBlock = null;
         }
       } else {
-        if (children.get(currentIndex).hasNext()) {
-          TsBlock tsBlock = children.get(currentIndex).next();
+        if (children.get(currentIndex).hasNextWithTimer()) {
+          TsBlock tsBlock = children.get(currentIndex).nextWithTimer();
           if (tsBlock == null) {
             return null;
           } else if (!tsBlock.isEmpty()) {
@@ -189,7 +189,7 @@ public class LastQuerySortOperator implements ProcessOperator {
 
   @Override
   public boolean isFinished() {
-    return !hasNext();
+    return !hasNextWithTimer();
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/UpdateLastCacheOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/UpdateLastCacheOperator.java
index 7315c81f46..69972a40e2 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/UpdateLastCacheOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/process/last/UpdateLastCacheOperator.java
@@ -53,7 +53,7 @@ public class UpdateLastCacheOperator extends AbstractUpdateLastCacheOperator {
 
   @Override
   public TsBlock next() {
-    TsBlock res = child.next();
+    TsBlock res = child.nextWithTimer();
     if (res == null) {
       return null;
     }
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/CountMergeOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/CountMergeOperator.java
index 61a8758770..32f215716e 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/CountMergeOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/CountMergeOperator.java
@@ -96,8 +96,8 @@ public class CountMergeOperator implements ProcessOperator {
       if (childrenTsBlocks[i] == null) {
         // when this operator is not blocked, it means all children that have not return TsBlock is
         // not blocked.
-        if (children.get(i).hasNext()) {
-          TsBlock tsBlock = children.get(i).next();
+        if (children.get(i).hasNextWithTimer()) {
+          TsBlock tsBlock = children.get(i).nextWithTimer();
           if (tsBlock == null || tsBlock.isEmpty()) {
             allChildrenReady = false;
           } else {
@@ -164,7 +164,7 @@ public class CountMergeOperator implements ProcessOperator {
 
   @Override
   public boolean isFinished() {
-    return !hasNext();
+    return !hasNextWithTimer();
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/LevelTimeSeriesCountOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/LevelTimeSeriesCountOperator.java
index 9a4f471eda..ab33978776 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/LevelTimeSeriesCountOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/LevelTimeSeriesCountOperator.java
@@ -140,7 +140,7 @@ public class LevelTimeSeriesCountOperator implements SourceOperator {
 
   @Override
   public boolean isFinished() {
-    return !hasNext();
+    return !hasNextWithTimer();
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/NodeManageMemoryMergeOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/NodeManageMemoryMergeOperator.java
index 781ba46695..d9315fcdbc 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/NodeManageMemoryMergeOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/NodeManageMemoryMergeOperator.java
@@ -79,7 +79,7 @@ public class NodeManageMemoryMergeOperator implements ProcessOperator {
       isReadingMemory = false;
       return transferToTsBlock(data);
     } else {
-      TsBlock block = child.next();
+      TsBlock block = child.nextWithTimer();
       if (block == null) {
         return null;
       }
@@ -126,7 +126,7 @@ public class NodeManageMemoryMergeOperator implements ProcessOperator {
 
   @Override
   public boolean hasNext() {
-    return isReadingMemory || child.hasNext();
+    return isReadingMemory || child.hasNextWithTimer();
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/NodePathsConvertOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/NodePathsConvertOperator.java
index 217a40bd17..4d04a62e86 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/NodePathsConvertOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/NodePathsConvertOperator.java
@@ -70,7 +70,7 @@ public class NodePathsConvertOperator implements ProcessOperator {
 
   @Override
   public TsBlock next() {
-    TsBlock block = child.next();
+    TsBlock block = child.nextWithTimer();
     if (block == null || block.isEmpty()) {
       return null;
     }
@@ -95,7 +95,7 @@ public class NodePathsConvertOperator implements ProcessOperator {
 
   @Override
   public boolean hasNext() {
-    return child.hasNext();
+    return child.hasNextWithTimer();
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/NodePathsCountOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/NodePathsCountOperator.java
index cf026777fa..3a662afe13 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/NodePathsCountOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/NodePathsCountOperator.java
@@ -71,8 +71,8 @@ public class NodePathsCountOperator implements ProcessOperator {
       if (!blocked.isDone()) {
         return null;
       }
-      if (child.hasNext()) {
-        TsBlock tsBlock = child.next();
+      if (child.hasNextWithTimer()) {
+        TsBlock tsBlock = child.nextWithTimer();
         if (null != tsBlock && !tsBlock.isEmpty()) {
           for (int i = 0; i < tsBlock.getPositionCount(); i++) {
             String path = tsBlock.getColumn(0).getBinary(i).toString();
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/SchemaFetchMergeOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/SchemaFetchMergeOperator.java
index 07a531e80c..977cd388df 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/SchemaFetchMergeOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/SchemaFetchMergeOperator.java
@@ -72,8 +72,8 @@ public class SchemaFetchMergeOperator implements ProcessOperator {
       return generateStorageGroupInfo();
     }
 
-    if (children.get(currentIndex).hasNext()) {
-      return children.get(currentIndex).next();
+    if (children.get(currentIndex).hasNextWithTimer()) {
+      return children.get(currentIndex).nextWithTimer();
     } else {
       currentIndex++;
       return null;
@@ -94,7 +94,7 @@ public class SchemaFetchMergeOperator implements ProcessOperator {
 
   @Override
   public boolean isFinished() {
-    return !hasNext();
+    return !hasNextWithTimer();
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/SchemaQueryMergeOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/SchemaQueryMergeOperator.java
index f2671034e8..0ad36268a5 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/SchemaQueryMergeOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/SchemaQueryMergeOperator.java
@@ -51,8 +51,8 @@ public class SchemaQueryMergeOperator implements ProcessOperator {
 
   @Override
   public TsBlock next() {
-    if (children.get(currentIndex).hasNext()) {
-      return children.get(currentIndex).next();
+    if (children.get(currentIndex).hasNextWithTimer()) {
+      return children.get(currentIndex).nextWithTimer();
     } else {
       currentIndex++;
       return null;
@@ -71,7 +71,7 @@ public class SchemaQueryMergeOperator implements ProcessOperator {
 
   @Override
   public boolean isFinished() {
-    return !hasNext();
+    return !hasNextWithTimer();
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/SchemaQueryOrderByHeatOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/SchemaQueryOrderByHeatOperator.java
index 68419ec464..945961f828 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/SchemaQueryOrderByHeatOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/SchemaQueryOrderByHeatOperator.java
@@ -87,8 +87,8 @@ public class SchemaQueryOrderByHeatOperator implements ProcessOperator {
         if (operator.isFinished()) {
           noMoreTsBlocks[i] = true;
         } else {
-          if (operator.hasNext()) {
-            TsBlock tsBlock = operator.next();
+          if (operator.hasNextWithTimer()) {
+            TsBlock tsBlock = operator.nextWithTimer();
             if (null != tsBlock && !tsBlock.isEmpty()) {
               if (isShowTimeSeriesBlock(tsBlock)) {
                 showTimeSeriesResult.add(tsBlock);
@@ -203,7 +203,7 @@ public class SchemaQueryOrderByHeatOperator implements ProcessOperator {
 
   @Override
   public boolean isFinished() {
-    return !hasNext();
+    return !hasNextWithTimer();
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/SchemaQueryScanOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/SchemaQueryScanOperator.java
index 23bbcd4366..77bdc7b6cd 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/SchemaQueryScanOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/schema/SchemaQueryScanOperator.java
@@ -107,7 +107,7 @@ public abstract class SchemaQueryScanOperator implements SourceOperator {
 
   @Override
   public boolean isFinished() {
-    return !hasNext();
+    return !hasNextWithTimer();
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/source/AbstractSeriesAggregationScanOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/source/AbstractSeriesAggregationScanOperator.java
index 15ad856245..ed6c92c199 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/source/AbstractSeriesAggregationScanOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/source/AbstractSeriesAggregationScanOperator.java
@@ -168,7 +168,7 @@ public abstract class AbstractSeriesAggregationScanOperator implements DataSourc
 
   @Override
   public boolean isFinished() {
-    return finished || (finished = !hasNext());
+    return finished || (finished = !hasNextWithTimer());
   }
 
   protected void calculateNextAggregationResult() {
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/source/LastCacheScanOperator.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/source/LastCacheScanOperator.java
index 974758f8a5..6c5185fbf2 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/source/LastCacheScanOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/source/LastCacheScanOperator.java
@@ -54,7 +54,7 @@ public class LastCacheScanOperator implements SourceOperator {
 
   @Override
   public boolean isFinished() {
-    return !hasNext();
+    return !hasNextWithTimer();
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/metric/QueryMetricsManager.java b/server/src/main/java/org/apache/iotdb/db/mpp/metric/QueryMetricsManager.java
index 8a4e878733..dac48b1680 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/metric/QueryMetricsManager.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/metric/QueryMetricsManager.java
@@ -22,18 +22,41 @@ package org.apache.iotdb.db.mpp.metric;
 import org.apache.iotdb.commons.service.metric.MetricService;
 import org.apache.iotdb.commons.service.metric.enums.Metric;
 import org.apache.iotdb.commons.service.metric.enums.Tag;
-import org.apache.iotdb.metrics.type.Timer;
 import org.apache.iotdb.metrics.utils.MetricLevel;
 
+import java.util.concurrent.TimeUnit;
+
 public class QueryMetricsManager {
 
   private final MetricService metricService = MetricService.getInstance();
 
-  public void addPlanCost(String stage, long costTimeInNanos) {
-    Timer timer =
-        metricService.getOrCreateTimer(
-            Metric.QUERY_PLAN_COST.toString(), MetricLevel.IMPORTANT, Tag.STAGE.toString(), stage);
-    timer.updateNanos(costTimeInNanos);
+  public void recordPlanCost(String stage, long costTimeInNanos) {
+    metricService.timer(
+        costTimeInNanos,
+        TimeUnit.NANOSECONDS,
+        Metric.QUERY_PLAN_COST.toString(),
+        MetricLevel.IMPORTANT,
+        Tag.STAGE.toString(),
+        stage);
+  }
+
+  public void recordOperatorExecutionCost(String operatorType, long costTimeInNanos) {
+    metricService.timer(
+        costTimeInNanos,
+        TimeUnit.NANOSECONDS,
+        Metric.OPERATOR_EXECUTION_COST.toString(),
+        MetricLevel.IMPORTANT,
+        Tag.NAME.toString(),
+        operatorType);
+  }
+
+  public void recordOperatorExecutionCount(String operatorType, long count) {
+    metricService.count(
+        count,
+        Metric.OPERATOR_EXECUTION_COUNT.toString(),
+        MetricLevel.IMPORTANT,
+        Tag.NAME.toString(),
+        operatorType);
   }
 
   public static QueryMetricsManager getInstance() {
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/AnalyzeVisitor.java b/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/AnalyzeVisitor.java
index 8b582fb05a..00d63837ef 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/AnalyzeVisitor.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/AnalyzeVisitor.java
@@ -211,7 +211,8 @@ public class AnalyzeVisitor extends StatementVisitor<Analysis, MPPQueryContext>
       } else {
         schemaTree = schemaFetcher.fetchSchema(patternTree);
       }
-      QueryMetricsManager.getInstance().addPlanCost(SCHEMA_FETCHER, System.nanoTime() - startTime);
+      QueryMetricsManager.getInstance()
+          .recordPlanCost(SCHEMA_FETCHER, System.nanoTime() - startTime);
       logger.debug("[EndFetchSchema]");
 
       // If there is no leaf node in the schema tree, the query should be completed immediately
@@ -1175,7 +1176,7 @@ public class AnalyzeVisitor extends StatementVisitor<Analysis, MPPQueryContext>
       }
     } finally {
       QueryMetricsManager.getInstance()
-          .addPlanCost(PARTITION_FETCHER, System.nanoTime() - startTime);
+          .recordPlanCost(PARTITION_FETCHER, System.nanoTime() - startTime);
     }
   }
 
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/Analyzer.java b/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/Analyzer.java
index 679a71627f..54d6bc0e84 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/Analyzer.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/Analyzer.java
@@ -47,7 +47,7 @@ public class Analyzer {
         new AnalyzeVisitor(partitionFetcher, schemaFetcher).process(statement, context);
 
     if (statement.isQuery()) {
-      QueryMetricsManager.getInstance().addPlanCost(ANALYZER, System.nanoTime() - startTime);
+      QueryMetricsManager.getInstance().recordPlanCost(ANALYZER, System.nanoTime() - startTime);
     }
     return analysis;
   }
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/plan/execution/QueryExecution.java b/server/src/main/java/org/apache/iotdb/db/mpp/plan/execution/QueryExecution.java
index c150183527..23e8c75029 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/plan/execution/QueryExecution.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/plan/execution/QueryExecution.java
@@ -308,7 +308,7 @@ public class QueryExecution implements IQueryExecution {
 
     if (rawStatement.isQuery()) {
       QueryMetricsManager.getInstance()
-          .addPlanCost(DISTRIBUTION_PLANNER, System.nanoTime() - startTime);
+          .recordPlanCost(DISTRIBUTION_PLANNER, System.nanoTime() - startTime);
     }
     if (isQuery() && logger.isDebugEnabled()) {
       logger.debug(
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/plan/parser/StatementGenerator.java b/server/src/main/java/org/apache/iotdb/db/mpp/plan/parser/StatementGenerator.java
index 604442462e..f69362f66a 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/plan/parser/StatementGenerator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/plan/parser/StatementGenerator.java
@@ -520,7 +520,7 @@ public class StatementGenerator {
       }
       return astVisitor.visit(tree);
     } finally {
-      QueryMetricsManager.getInstance().addPlanCost(SQL_PARSER, System.nanoTime() - startTime);
+      QueryMetricsManager.getInstance().recordPlanCost(SQL_PARSER, System.nanoTime() - startTime);
     }
   }
 
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/plan/planner/LogicalPlanner.java b/server/src/main/java/org/apache/iotdb/db/mpp/plan/planner/LogicalPlanner.java
index cc25e475ac..47d9b63e10 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/plan/planner/LogicalPlanner.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/plan/planner/LogicalPlanner.java
@@ -46,7 +46,8 @@ public class LogicalPlanner {
 
     // optimize the query logical plan
     if (analysis.getStatement().isQuery()) {
-      QueryMetricsManager.getInstance().addPlanCost(LOGICAL_PLANNER, System.nanoTime() - startTime);
+      QueryMetricsManager.getInstance()
+          .recordPlanCost(LOGICAL_PLANNER, System.nanoTime() - startTime);
 
       for (PlanOptimizer optimizer : optimizers) {
         rootNode = optimizer.optimize(rootNode, context);
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/transformation/dag/input/TsBlockInputDataSet.java b/server/src/main/java/org/apache/iotdb/db/mpp/transformation/dag/input/TsBlockInputDataSet.java
index b7790a2953..8f6629aba3 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/transformation/dag/input/TsBlockInputDataSet.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/transformation/dag/input/TsBlockInputDataSet.java
@@ -55,10 +55,10 @@ public class TsBlockInputDataSet implements IUDFInputDataSet {
       if (operator.isBlocked() != Operator.NOT_BLOCKED) {
         return YieldableState.NOT_YIELDABLE_WAITING_FOR_DATA;
       }
-      if (!operator.hasNext()) {
+      if (!operator.hasNextWithTimer()) {
         return YieldableState.NOT_YIELDABLE_NO_MORE_DATA;
       }
-      final TsBlock tsBlock = operator.next();
+      final TsBlock tsBlock = operator.nextWithTimer();
       if (tsBlock == null) {
         return YieldableState.NOT_YIELDABLE_WAITING_FOR_DATA;
       }
@@ -72,7 +72,7 @@ public class TsBlockInputDataSet implements IUDFInputDataSet {
       if (operator.isBlocked() != Operator.NOT_BLOCKED) {
         return YieldableState.NOT_YIELDABLE_WAITING_FOR_DATA;
       }
-      return operator.hasNext()
+      return operator.hasNextWithTimer()
           ? YieldableState.NOT_YIELDABLE_WAITING_FOR_DATA
           : YieldableState.NOT_YIELDABLE_NO_MORE_DATA;
     }


[iotdb] 04/05: rename QueryPlanCostMetrics to QueryPlanCostMetricSet

Posted by hu...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

hui pushed a commit to branch lmh/addQueryMetrics
in repository https://gitbox.apache.org/repos/asf/iotdb.git

commit c35ccc4b9a9cd0d46174503827a39f5186195615
Author: Minghui Liu <li...@foxmail.com>
AuthorDate: Fri Dec 16 11:36:12 2022 +0800

    rename QueryPlanCostMetrics to QueryPlanCostMetricSet
---
 .../metric/{QueryPlanCostMetrics.java => QueryPlanCostMetricSet.java} | 2 +-
 .../java/org/apache/iotdb/db/mpp/plan/analyze/AnalyzeVisitor.java     | 4 ++--
 .../src/main/java/org/apache/iotdb/db/mpp/plan/analyze/Analyzer.java  | 2 +-
 .../java/org/apache/iotdb/db/mpp/plan/execution/QueryExecution.java   | 2 +-
 .../java/org/apache/iotdb/db/mpp/plan/parser/StatementGenerator.java  | 2 +-
 .../java/org/apache/iotdb/db/mpp/plan/planner/LogicalPlanner.java     | 2 +-
 .../org/apache/iotdb/db/service/metrics/DataNodeMetricsHelper.java    | 4 ++--
 7 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/metric/QueryPlanCostMetrics.java b/server/src/main/java/org/apache/iotdb/db/mpp/metric/QueryPlanCostMetricSet.java
similarity index 97%
rename from server/src/main/java/org/apache/iotdb/db/mpp/metric/QueryPlanCostMetrics.java
rename to server/src/main/java/org/apache/iotdb/db/mpp/metric/QueryPlanCostMetricSet.java
index 122ba3ea23..d7abbd4428 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/metric/QueryPlanCostMetrics.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/metric/QueryPlanCostMetricSet.java
@@ -29,7 +29,7 @@ import org.apache.iotdb.metrics.utils.MetricType;
 import java.util.Arrays;
 import java.util.List;
 
-public class QueryPlanCostMetrics implements IMetricSet {
+public class QueryPlanCostMetricSet implements IMetricSet {
 
   public static final String SQL_PARSER = "sql_parser";
   public static final String ANALYZER = "analyzer";
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/AnalyzeVisitor.java b/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/AnalyzeVisitor.java
index 00d63837ef..5529692452 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/AnalyzeVisitor.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/AnalyzeVisitor.java
@@ -154,8 +154,8 @@ import static org.apache.iotdb.commons.conf.IoTDBConstant.LOSS;
 import static org.apache.iotdb.commons.conf.IoTDBConstant.ONE_LEVEL_PATH_WILDCARD;
 import static org.apache.iotdb.db.metadata.MetadataConstant.ALL_RESULT_NODES;
 import static org.apache.iotdb.db.mpp.common.header.ColumnHeaderConstant.DEVICE;
-import static org.apache.iotdb.db.mpp.metric.QueryPlanCostMetrics.PARTITION_FETCHER;
-import static org.apache.iotdb.db.mpp.metric.QueryPlanCostMetrics.SCHEMA_FETCHER;
+import static org.apache.iotdb.db.mpp.metric.QueryPlanCostMetricSet.PARTITION_FETCHER;
+import static org.apache.iotdb.db.mpp.metric.QueryPlanCostMetricSet.SCHEMA_FETCHER;
 import static org.apache.iotdb.db.mpp.plan.analyze.SelectIntoUtils.constructTargetDevice;
 import static org.apache.iotdb.db.mpp.plan.analyze.SelectIntoUtils.constructTargetMeasurement;
 import static org.apache.iotdb.db.mpp.plan.analyze.SelectIntoUtils.constructTargetPath;
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/Analyzer.java b/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/Analyzer.java
index 54d6bc0e84..1a6782a5f3 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/Analyzer.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/Analyzer.java
@@ -25,7 +25,7 @@ import org.apache.iotdb.db.mpp.metric.QueryMetricsManager;
 import org.apache.iotdb.db.mpp.plan.statement.Statement;
 
 import static org.apache.iotdb.db.mpp.common.QueryId.mockQueryId;
-import static org.apache.iotdb.db.mpp.metric.QueryPlanCostMetrics.ANALYZER;
+import static org.apache.iotdb.db.mpp.metric.QueryPlanCostMetricSet.ANALYZER;
 
 /** Analyze the statement and generate Analysis. */
 public class Analyzer {
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/plan/execution/QueryExecution.java b/server/src/main/java/org/apache/iotdb/db/mpp/plan/execution/QueryExecution.java
index 23e8c75029..f621a4681d 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/plan/execution/QueryExecution.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/plan/execution/QueryExecution.java
@@ -82,7 +82,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
 
 import static com.google.common.base.Preconditions.checkArgument;
 import static com.google.common.base.Throwables.throwIfUnchecked;
-import static org.apache.iotdb.db.mpp.metric.QueryPlanCostMetrics.DISTRIBUTION_PLANNER;
+import static org.apache.iotdb.db.mpp.metric.QueryPlanCostMetricSet.DISTRIBUTION_PLANNER;
 import static org.apache.iotdb.db.mpp.plan.constant.DataNodeEndPoints.isSameNode;
 
 /**
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/plan/parser/StatementGenerator.java b/server/src/main/java/org/apache/iotdb/db/mpp/plan/parser/StatementGenerator.java
index f69362f66a..d41b03e458 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/plan/parser/StatementGenerator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/plan/parser/StatementGenerator.java
@@ -102,7 +102,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import static org.apache.iotdb.db.mpp.metric.QueryPlanCostMetrics.SQL_PARSER;
+import static org.apache.iotdb.db.mpp.metric.QueryPlanCostMetricSet.SQL_PARSER;
 
 /** Convert SQL and RPC requests to {@link Statement}. */
 public class StatementGenerator {
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/plan/planner/LogicalPlanner.java b/server/src/main/java/org/apache/iotdb/db/mpp/plan/planner/LogicalPlanner.java
index 47d9b63e10..b3f65eec7c 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/plan/planner/LogicalPlanner.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/plan/planner/LogicalPlanner.java
@@ -27,7 +27,7 @@ import org.apache.iotdb.db.mpp.plan.planner.plan.node.PlanNode;
 
 import java.util.List;
 
-import static org.apache.iotdb.db.mpp.metric.QueryPlanCostMetrics.LOGICAL_PLANNER;
+import static org.apache.iotdb.db.mpp.metric.QueryPlanCostMetricSet.LOGICAL_PLANNER;
 
 /** Generate a logical plan for the statement. */
 public class LogicalPlanner {
diff --git a/server/src/main/java/org/apache/iotdb/db/service/metrics/DataNodeMetricsHelper.java b/server/src/main/java/org/apache/iotdb/db/service/metrics/DataNodeMetricsHelper.java
index be59c2b288..920080ec39 100644
--- a/server/src/main/java/org/apache/iotdb/db/service/metrics/DataNodeMetricsHelper.java
+++ b/server/src/main/java/org/apache/iotdb/db/service/metrics/DataNodeMetricsHelper.java
@@ -20,7 +20,7 @@
 package org.apache.iotdb.db.service.metrics;
 
 import org.apache.iotdb.commons.service.metric.MetricService;
-import org.apache.iotdb.db.mpp.metric.QueryPlanCostMetrics;
+import org.apache.iotdb.db.mpp.metric.QueryPlanCostMetricSet;
 import org.apache.iotdb.metrics.metricsets.jvm.JvmMetrics;
 import org.apache.iotdb.metrics.metricsets.logback.LogbackMetrics;
 
@@ -34,6 +34,6 @@ public class DataNodeMetricsHelper {
     MetricService.getInstance().addMetricSet(new SystemMetrics(true));
 
     // bind query related metrics
-    MetricService.getInstance().addMetricSet(new QueryPlanCostMetrics());
+    MetricService.getInstance().addMetricSet(new QueryPlanCostMetricSet());
   }
 }


[iotdb] 03/05: replace Operation with OperationType in ClientRPCServiceImpl

Posted by hu...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

hui pushed a commit to branch lmh/addQueryMetrics
in repository https://gitbox.apache.org/repos/asf/iotdb.git

commit 3ad3ed1d12d18efbccb70932860dceb8add6a1ca
Author: Minghui Liu <li...@foxmail.com>
AuthorDate: Fri Dec 16 10:36:43 2022 +0800

    replace Operation with OperationType in ClientRPCServiceImpl
---
 .../service/thrift/impl/ClientRPCServiceImpl.java  | 35 +++++++++++-----------
 1 file changed, 17 insertions(+), 18 deletions(-)

diff --git a/server/src/main/java/org/apache/iotdb/db/service/thrift/impl/ClientRPCServiceImpl.java b/server/src/main/java/org/apache/iotdb/db/service/thrift/impl/ClientRPCServiceImpl.java
index f3f9d4e5ba..367d81207e 100644
--- a/server/src/main/java/org/apache/iotdb/db/service/thrift/impl/ClientRPCServiceImpl.java
+++ b/server/src/main/java/org/apache/iotdb/db/service/thrift/impl/ClientRPCServiceImpl.java
@@ -25,7 +25,6 @@ import org.apache.iotdb.commons.exception.IllegalPathException;
 import org.apache.iotdb.commons.exception.IoTDBException;
 import org.apache.iotdb.commons.service.metric.MetricService;
 import org.apache.iotdb.commons.service.metric.enums.Metric;
-import org.apache.iotdb.commons.service.metric.enums.Operation;
 import org.apache.iotdb.commons.utils.PathUtils;
 import org.apache.iotdb.db.auth.AuthorityChecker;
 import org.apache.iotdb.db.conf.IoTDBConfig;
@@ -244,7 +243,7 @@ public class ClientRPCServiceImpl implements IClientRPCServiceWithHandler {
       return RpcUtils.getTSExecuteStatementResp(
           onQueryException(e, "\"" + statement + "\". " + OperationType.EXECUTE_STATEMENT));
     } finally {
-      addOperationLatency(Operation.EXECUTE_QUERY, startTime);
+      addOperationLatency(OperationType.EXECUTE_STATEMENT, startTime);
       if (finished) {
         COORDINATOR.cleanupQueryExecution(queryId);
       }
@@ -306,7 +305,7 @@ public class ClientRPCServiceImpl implements IClientRPCServiceWithHandler {
       return RpcUtils.getTSExecuteStatementResp(
           onQueryException(e, "\"" + req + "\". " + OperationType.EXECUTE_RAW_DATA_QUERY));
     } finally {
-      addOperationLatency(Operation.EXECUTE_QUERY, startTime);
+      addOperationLatency(OperationType.EXECUTE_RAW_DATA_QUERY, startTime);
       if (finished) {
         COORDINATOR.cleanupQueryExecution(queryId);
       }
@@ -367,7 +366,7 @@ public class ClientRPCServiceImpl implements IClientRPCServiceWithHandler {
       return RpcUtils.getTSExecuteStatementResp(
           onQueryException(e, "\"" + req + "\". " + OperationType.EXECUTE_LAST_DATA_QUERY));
     } finally {
-      addOperationLatency(Operation.EXECUTE_QUERY, startTime);
+      addOperationLatency(OperationType.EXECUTE_LAST_DATA_QUERY, startTime);
       if (finished) {
         COORDINATOR.cleanupQueryExecution(queryId);
       }
@@ -433,7 +432,7 @@ public class ClientRPCServiceImpl implements IClientRPCServiceWithHandler {
       finished = true;
       return RpcUtils.getTSFetchResultsResp(onQueryException(e, OperationType.FETCH_RESULTS));
     } finally {
-      addOperationLatency(Operation.EXECUTE_QUERY, startTime);
+      addOperationLatency(OperationType.FETCH_RESULTS, startTime);
       if (finished) {
         COORDINATOR.cleanupQueryExecution(req.queryId);
       }
@@ -866,7 +865,7 @@ public class ClientRPCServiceImpl implements IClientRPCServiceWithHandler {
                 PARTITION_FETCHER,
                 SCHEMA_FETCHER,
                 config.getQueryTimeoutThreshold());
-        addOperationLatency(Operation.EXECUTE_ONE_SQL_IN_BATCH, t2);
+        addOperationLatency(OperationType.EXECUTE_STATEMENT, t2);
         results.add(result.status);
       } catch (Exception e) {
         LOGGER.warn("Error occurred when executing executeBatchStatement: ", e);
@@ -878,7 +877,7 @@ public class ClientRPCServiceImpl implements IClientRPCServiceWithHandler {
         results.add(status);
       }
     }
-    addOperationLatency(Operation.EXECUTE_JDBC_BATCH, t1);
+    addOperationLatency(OperationType.EXECUTE_BATCH_STATEMENT, t1);
     return isAllSuccessful
         ? RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS, "Execute batch statements successfully")
         : RpcUtils.getStatus(results);
@@ -928,7 +927,7 @@ public class ClientRPCServiceImpl implements IClientRPCServiceWithHandler {
       finished = true;
       return RpcUtils.getTSFetchResultsResp(onQueryException(e, OperationType.FETCH_RESULTS));
     } finally {
-      addOperationLatency(Operation.EXECUTE_QUERY, startTime);
+      addOperationLatency(OperationType.FETCH_RESULTS, startTime);
       if (finished) {
         COORDINATOR.cleanupQueryExecution(req.queryId);
       }
@@ -987,7 +986,7 @@ public class ClientRPCServiceImpl implements IClientRPCServiceWithHandler {
       return onNPEOrUnexpectedException(
           e, OperationType.INSERT_RECORDS, TSStatusCode.EXECUTE_STATEMENT_ERROR);
     } finally {
-      addOperationLatency(Operation.EXECUTE_RPC_BATCH_INSERT, t1);
+      addOperationLatency(OperationType.INSERT_RECORDS, t1);
     }
   }
 
@@ -1044,7 +1043,7 @@ public class ClientRPCServiceImpl implements IClientRPCServiceWithHandler {
       return onNPEOrUnexpectedException(
           e, OperationType.INSERT_RECORDS_OF_ONE_DEVICE, TSStatusCode.EXECUTE_STATEMENT_ERROR);
     } finally {
-      addOperationLatency(Operation.EXECUTE_RPC_BATCH_INSERT, t1);
+      addOperationLatency(OperationType.INSERT_RECORDS_OF_ONE_DEVICE, t1);
     }
   }
 
@@ -1104,7 +1103,7 @@ public class ClientRPCServiceImpl implements IClientRPCServiceWithHandler {
           OperationType.INSERT_STRING_RECORDS_OF_ONE_DEVICE,
           TSStatusCode.EXECUTE_STATEMENT_ERROR);
     } finally {
-      addOperationLatency(Operation.EXECUTE_RPC_BATCH_INSERT, t1);
+      addOperationLatency(OperationType.INSERT_STRING_RECORDS_OF_ONE_DEVICE, t1);
     }
   }
 
@@ -1156,7 +1155,7 @@ public class ClientRPCServiceImpl implements IClientRPCServiceWithHandler {
       return onNPEOrUnexpectedException(
           e, OperationType.INSERT_RECORD, TSStatusCode.EXECUTE_STATEMENT_ERROR);
     } finally {
-      addOperationLatency(Operation.EXECUTE_RPC_BATCH_INSERT, t1);
+      addOperationLatency(OperationType.INSERT_RECORD, t1);
     }
   }
 
@@ -1204,7 +1203,7 @@ public class ClientRPCServiceImpl implements IClientRPCServiceWithHandler {
       return onNPEOrUnexpectedException(
           e, OperationType.INSERT_TABLETS, TSStatusCode.EXECUTE_STATEMENT_ERROR);
     } finally {
-      addOperationLatency(Operation.EXECUTE_RPC_BATCH_INSERT, t1);
+      addOperationLatency(OperationType.INSERT_TABLETS, t1);
     }
   }
 
@@ -1251,7 +1250,7 @@ public class ClientRPCServiceImpl implements IClientRPCServiceWithHandler {
       return onNPEOrUnexpectedException(
           e, OperationType.INSERT_TABLET, TSStatusCode.EXECUTE_STATEMENT_ERROR);
     } finally {
-      addOperationLatency(Operation.EXECUTE_RPC_BATCH_INSERT, t1);
+      addOperationLatency(OperationType.INSERT_TABLET, t1);
     }
   }
 
@@ -1305,7 +1304,7 @@ public class ClientRPCServiceImpl implements IClientRPCServiceWithHandler {
       return onNPEOrUnexpectedException(
           e, OperationType.INSERT_STRING_RECORDS, TSStatusCode.EXECUTE_STATEMENT_ERROR);
     } finally {
-      addOperationLatency(Operation.EXECUTE_RPC_BATCH_INSERT, t1);
+      addOperationLatency(OperationType.INSERT_STRING_RECORDS, t1);
     }
   }
 
@@ -1557,7 +1556,7 @@ public class ClientRPCServiceImpl implements IClientRPCServiceWithHandler {
           onQueryException(e, "\"" + statement + "\". " + OperationType.EXECUTE_STATEMENT));
       return null;
     } finally {
-      addOperationLatency(Operation.EXECUTE_QUERY, startTime);
+      addOperationLatency(OperationType.EXECUTE_STATEMENT, startTime);
     }
   }
 
@@ -1766,7 +1765,7 @@ public class ClientRPCServiceImpl implements IClientRPCServiceWithHandler {
       return onNPEOrUnexpectedException(
           e, OperationType.INSERT_STRING_RECORD, TSStatusCode.EXECUTE_STATEMENT_ERROR);
     } finally {
-      addOperationLatency(Operation.EXECUTE_RPC_BATCH_INSERT, t1);
+      addOperationLatency(OperationType.INSERT_STRING_RECORD, t1);
     }
   }
 
@@ -1790,7 +1789,7 @@ public class ClientRPCServiceImpl implements IClientRPCServiceWithHandler {
   }
 
   /** Add stat of operation into metrics */
-  private void addOperationLatency(Operation operation, long startTime) {
+  private void addOperationLatency(OperationType operation, long startTime) {
     MetricService.getInstance()
         .histogram(
             System.currentTimeMillis() - startTime,


[iotdb] 05/05: add metrics: series_scan_cost

Posted by hu...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

hui pushed a commit to branch lmh/addQueryMetrics
in repository https://gitbox.apache.org/repos/asf/iotdb.git

commit 7bf9d103a118406335d89fb5a84e4dc4db7842d1
Author: Minghui Liu <li...@foxmail.com>
AuthorDate: Sun Dec 18 23:52:44 2022 +0800

    add metrics: series_scan_cost
---
 .../iotdb/commons/service/metric/enums/Metric.java |   3 +-
 .../iotdb/commons/service/metric/enums/Tag.java    |   3 +-
 .../apache/iotdb/db/engine/cache/ChunkCache.java   |  49 +-
 .../db/engine/cache/TimeSeriesMetadataCache.java   | 154 +++---
 .../operator/source/AlignedSeriesScanUtil.java     |   1 +
 .../execution/operator/source/SeriesScanUtil.java  | 359 +++++++-------
 .../iotdb/db/mpp/metric/QueryMetricsManager.java   |  11 +
 .../db/mpp/metric/SeriesScanCostMetricSet.java     | 542 +++++++++++++++++++++
 .../query/reader/chunk/DiskAlignedChunkLoader.java |  38 +-
 .../db/query/reader/chunk/DiskChunkLoader.java     |  23 +-
 .../query/reader/chunk/MemAlignedChunkLoader.java  |  15 +-
 .../db/query/reader/chunk/MemChunkLoader.java      |  15 +-
 .../metadata/DiskAlignedChunkMetadataLoader.java   | 112 +++--
 .../chunk/metadata/DiskChunkMetadataLoader.java    | 118 +++--
 .../metadata/MemAlignedChunkMetadataLoader.java    |  75 +--
 .../chunk/metadata/MemChunkMetadataLoader.java     |  76 +--
 .../db/service/metrics/DataNodeMetricsHelper.java  |   2 +
 .../org/apache/iotdb/db/utils/FileLoaderUtils.java | 267 +++++-----
 .../read/reader/chunk/AlignedChunkReader.java      |  34 +-
 19 files changed, 1344 insertions(+), 553 deletions(-)

diff --git a/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java b/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java
index 1c0ab35d31..e025895606 100644
--- a/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java
+++ b/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java
@@ -64,7 +64,8 @@ public enum Metric {
   STAGE,
   QUERY_PLAN_COST,
   OPERATOR_EXECUTION_COST,
-  OPERATOR_EXECUTION_COUNT;
+  OPERATOR_EXECUTION_COUNT,
+  SERIES_SCAN_COST;
 
   @Override
   public String toString() {
diff --git a/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Tag.java b/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Tag.java
index 65280a22a2..178937bec4 100644
--- a/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Tag.java
+++ b/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Tag.java
@@ -24,7 +24,8 @@ public enum Tag {
   NAME,
   REGION,
   STATUS,
-  STAGE;
+  STAGE,
+  FROM;
 
   @Override
   public String toString() {
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCache.java b/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCache.java
index 0fa63a5fdf..3cb94b4491 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCache.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCache.java
@@ -23,6 +23,7 @@ import org.apache.iotdb.commons.service.metric.MetricService;
 import org.apache.iotdb.commons.utils.TestOnly;
 import org.apache.iotdb.db.conf.IoTDBConfig;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.mpp.metric.QueryMetricsManager;
 import org.apache.iotdb.db.query.control.FileReaderManager;
 import org.apache.iotdb.tsfile.file.metadata.ChunkMetadata;
 import org.apache.iotdb.tsfile.read.TsFileSequenceReader;
@@ -38,6 +39,9 @@ import org.slf4j.LoggerFactory;
 import java.io.IOException;
 import java.util.concurrent.atomic.AtomicLong;
 
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.READ_CHUNK_ALL;
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.READ_CHUNK_FILE;
+
 /**
  * This class is used to cache <code>Chunk</code> of <code>ChunkMetaData</code> in IoTDB. The
  * caching strategy is LRU.
@@ -51,6 +55,8 @@ public class ChunkCache {
       config.getAllocateMemoryForChunkCache();
   private static final boolean CACHE_ENABLE = config.isMetaDataCacheEnable();
 
+  private static final QueryMetricsManager QUERY_METRICS = QueryMetricsManager.getInstance();
+
   private final LoadingCache<ChunkMetadata, Chunk> lruCache;
 
   private final AtomicLong entryAverageSize = new AtomicLong(0);
@@ -71,6 +77,7 @@ public class ChunkCache {
             .recordStats()
             .build(
                 chunkMetadata -> {
+                  long startTime = System.nanoTime();
                   try {
                     TsFileSequenceReader reader =
                         FileReaderManager.getInstance()
@@ -79,6 +86,9 @@ public class ChunkCache {
                   } catch (IOException e) {
                     logger.error("Something wrong happened in reading {}", chunkMetadata, e);
                     throw e;
+                  } finally {
+                    QUERY_METRICS.recordSeriesScanCost(
+                        READ_CHUNK_FILE, System.nanoTime() - startTime);
                   }
                 });
 
@@ -99,29 +109,34 @@ public class ChunkCache {
   }
 
   public Chunk get(ChunkMetadata chunkMetaData, boolean debug) throws IOException {
-    if (!CACHE_ENABLE) {
-      TsFileSequenceReader reader =
-          FileReaderManager.getInstance()
-              .get(chunkMetaData.getFilePath(), chunkMetaData.isClosed());
-      Chunk chunk = reader.readMemChunk(chunkMetaData);
+    long startTime = System.nanoTime();
+    try {
+      if (!CACHE_ENABLE) {
+        TsFileSequenceReader reader =
+            FileReaderManager.getInstance()
+                .get(chunkMetaData.getFilePath(), chunkMetaData.isClosed());
+        Chunk chunk = reader.readMemChunk(chunkMetaData);
+        return new Chunk(
+            chunk.getHeader(),
+            chunk.getData().duplicate(),
+            chunkMetaData.getDeleteIntervalList(),
+            chunkMetaData.getStatistics());
+      }
+
+      Chunk chunk = lruCache.get(chunkMetaData);
+
+      if (debug) {
+        DEBUG_LOGGER.info("get chunk from cache whose meta data is: " + chunkMetaData);
+      }
+
       return new Chunk(
           chunk.getHeader(),
           chunk.getData().duplicate(),
           chunkMetaData.getDeleteIntervalList(),
           chunkMetaData.getStatistics());
+    } finally {
+      QUERY_METRICS.recordSeriesScanCost(READ_CHUNK_ALL, System.nanoTime() - startTime);
     }
-
-    Chunk chunk = lruCache.get(chunkMetaData);
-
-    if (debug) {
-      DEBUG_LOGGER.info("get chunk from cache whose meta data is: " + chunkMetaData);
-    }
-
-    return new Chunk(
-        chunk.getHeader(),
-        chunk.getData().duplicate(),
-        chunkMetaData.getDeleteIntervalList(),
-        chunkMetaData.getStatistics());
   }
 
   public double calculateChunkHitRatio() {
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCache.java b/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCache.java
index d70b2faf41..936958dff4 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCache.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCache.java
@@ -24,6 +24,7 @@ import org.apache.iotdb.commons.service.metric.MetricService;
 import org.apache.iotdb.commons.utils.TestOnly;
 import org.apache.iotdb.db.conf.IoTDBConfig;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.mpp.metric.QueryMetricsManager;
 import org.apache.iotdb.db.query.control.FileReaderManager;
 import org.apache.iotdb.tsfile.file.metadata.ChunkMetadata;
 import org.apache.iotdb.tsfile.file.metadata.TimeseriesMetadata;
@@ -50,6 +51,9 @@ import java.util.Set;
 import java.util.WeakHashMap;
 import java.util.concurrent.atomic.AtomicLong;
 
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.READ_TIMESERIES_METADATA_CACHE;
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.READ_TIMESERIES_METADATA_FILE;
+
 /**
  * This class is used to cache <code>TimeSeriesMetadata</code> in IoTDB. The caching strategy is
  * LRU.
@@ -63,6 +67,8 @@ public class TimeSeriesMetadataCache {
       config.getAllocateMemoryForTimeSeriesMetaDataCache();
   private static final boolean CACHE_ENABLE = config.isMetaDataCacheEnable();
 
+  private static final QueryMetricsManager QUERY_METRICS = QueryMetricsManager.getInstance();
+
   private final Cache<TimeSeriesMetadataCacheKey, TimeseriesMetadata> lruCache;
 
   private final AtomicLong entryAverageSize = new AtomicLong(0);
@@ -118,84 +124,96 @@ public class TimeSeriesMetadataCache {
       boolean ignoreNotExists,
       boolean debug)
       throws IOException {
-    if (!CACHE_ENABLE) {
-      // bloom filter part
-      TsFileSequenceReader reader = FileReaderManager.getInstance().get(key.filePath, true);
-      BloomFilter bloomFilter = reader.readBloomFilter();
-      if (bloomFilter != null
-          && !bloomFilter.contains(key.device + IoTDBConstant.PATH_SEPARATOR + key.measurement)) {
-        return null;
+    long startTime = System.nanoTime();
+    boolean cacheHit = true;
+    try {
+      if (!CACHE_ENABLE) {
+        cacheHit = false;
+
+        // bloom filter part
+        TsFileSequenceReader reader = FileReaderManager.getInstance().get(key.filePath, true);
+        BloomFilter bloomFilter = reader.readBloomFilter();
+        if (bloomFilter != null
+            && !bloomFilter.contains(key.device + IoTDBConstant.PATH_SEPARATOR + key.measurement)) {
+          return null;
+        }
+        TimeseriesMetadata timeseriesMetadata =
+            reader.readTimeseriesMetadata(
+                new Path(key.device, key.measurement, true), ignoreNotExists);
+        return (timeseriesMetadata == null || timeseriesMetadata.getStatistics().getCount() == 0)
+            ? null
+            : timeseriesMetadata;
       }
-      TimeseriesMetadata timeseriesMetadata =
-          reader.readTimeseriesMetadata(
-              new Path(key.device, key.measurement, true), ignoreNotExists);
-      return (timeseriesMetadata == null || timeseriesMetadata.getStatistics().getCount() == 0)
-          ? null
-          : timeseriesMetadata;
-    }
 
-    TimeseriesMetadata timeseriesMetadata = lruCache.getIfPresent(key);
+      TimeseriesMetadata timeseriesMetadata = lruCache.getIfPresent(key);
 
-    if (timeseriesMetadata == null) {
-      if (debug) {
-        DEBUG_LOGGER.info(
-            "Cache miss: {}.{} in file: {}", key.device, key.measurement, key.filePath);
-        DEBUG_LOGGER.info("Device: {}, all sensors: {}", key.device, allSensors);
-      }
-      // allow for the parallelism of different devices
-      synchronized (
-          devices.computeIfAbsent(key.device + SEPARATOR + key.filePath, WeakReference::new)) {
-        // double check
-        timeseriesMetadata = lruCache.getIfPresent(key);
-        if (timeseriesMetadata == null) {
-          Path path = new Path(key.device, key.measurement, true);
-          // bloom filter part
-          BloomFilter bloomFilter =
-              BloomFilterCache.getInstance()
-                  .get(new BloomFilterCache.BloomFilterCacheKey(key.filePath), debug);
-          if (bloomFilter != null) {
-            bloomFilterRequestCount.incrementAndGet();
-            if (!bloomFilter.contains(path.getFullPath())) {
-              bloomFilterPreventCount.incrementAndGet();
-              if (debug) {
-                DEBUG_LOGGER.info("TimeSeries meta data {} is filter by bloomFilter!", key);
+      if (timeseriesMetadata == null) {
+        if (debug) {
+          DEBUG_LOGGER.info(
+              "Cache miss: {}.{} in file: {}", key.device, key.measurement, key.filePath);
+          DEBUG_LOGGER.info("Device: {}, all sensors: {}", key.device, allSensors);
+        }
+        // allow for the parallelism of different devices
+        synchronized (
+            devices.computeIfAbsent(key.device + SEPARATOR + key.filePath, WeakReference::new)) {
+          // double check
+          timeseriesMetadata = lruCache.getIfPresent(key);
+          if (timeseriesMetadata == null) {
+            cacheHit = false;
+
+            Path path = new Path(key.device, key.measurement, true);
+            // bloom filter part
+            BloomFilter bloomFilter =
+                BloomFilterCache.getInstance()
+                    .get(new BloomFilterCache.BloomFilterCacheKey(key.filePath), debug);
+            if (bloomFilter != null) {
+              bloomFilterRequestCount.incrementAndGet();
+              if (!bloomFilter.contains(path.getFullPath())) {
+                bloomFilterPreventCount.incrementAndGet();
+                if (debug) {
+                  DEBUG_LOGGER.info("TimeSeries meta data {} is filter by bloomFilter!", key);
+                }
+                return null;
               }
-              return null;
             }
-          }
-          TsFileSequenceReader reader = FileReaderManager.getInstance().get(key.filePath, true);
-          List<TimeseriesMetadata> timeSeriesMetadataList =
-              reader.readTimeseriesMetadata(path, allSensors);
-          // put TimeSeriesMetadata of all sensors used in this query into cache
-          for (TimeseriesMetadata metadata : timeSeriesMetadataList) {
-            TimeSeriesMetadataCacheKey k =
-                new TimeSeriesMetadataCacheKey(
-                    key.filePath, key.device, metadata.getMeasurementId());
-            if (metadata.getStatistics().getCount() != 0) {
-              lruCache.put(k, metadata);
-            }
-            if (metadata.getMeasurementId().equals(key.measurement)) {
-              timeseriesMetadata = metadata.getStatistics().getCount() == 0 ? null : metadata;
+            TsFileSequenceReader reader = FileReaderManager.getInstance().get(key.filePath, true);
+            List<TimeseriesMetadata> timeSeriesMetadataList =
+                reader.readTimeseriesMetadata(path, allSensors);
+            // put TimeSeriesMetadata of all sensors used in this query into cache
+            for (TimeseriesMetadata metadata : timeSeriesMetadataList) {
+              TimeSeriesMetadataCacheKey k =
+                  new TimeSeriesMetadataCacheKey(
+                      key.filePath, key.device, metadata.getMeasurementId());
+              if (metadata.getStatistics().getCount() != 0) {
+                lruCache.put(k, metadata);
+              }
+              if (metadata.getMeasurementId().equals(key.measurement)) {
+                timeseriesMetadata = metadata.getStatistics().getCount() == 0 ? null : metadata;
+              }
             }
           }
         }
       }
-    }
-    if (timeseriesMetadata == null) {
-      if (debug) {
-        DEBUG_LOGGER.info("The file doesn't have this time series {}.", key);
-      }
-      return null;
-    } else {
-      if (debug) {
-        DEBUG_LOGGER.info(
-            "Get timeseries: {}.{}  metadata in file: {}  from cache: {}.",
-            key.device,
-            key.measurement,
-            key.filePath,
-            timeseriesMetadata);
+      if (timeseriesMetadata == null) {
+        if (debug) {
+          DEBUG_LOGGER.info("The file doesn't have this time series {}.", key);
+        }
+        return null;
+      } else {
+        if (debug) {
+          DEBUG_LOGGER.info(
+              "Get timeseries: {}.{}  metadata in file: {}  from cache: {}.",
+              key.device,
+              key.measurement,
+              key.filePath,
+              timeseriesMetadata);
+        }
+        return new TimeseriesMetadata(timeseriesMetadata);
       }
-      return new TimeseriesMetadata(timeseriesMetadata);
+    } finally {
+      QUERY_METRICS.recordSeriesScanCost(
+          cacheHit ? READ_TIMESERIES_METADATA_CACHE : READ_TIMESERIES_METADATA_FILE,
+          System.nanoTime() - startTime);
     }
   }
 
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/source/AlignedSeriesScanUtil.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/source/AlignedSeriesScanUtil.java
index f41456b063..52e514eee4 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/source/AlignedSeriesScanUtil.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/source/AlignedSeriesScanUtil.java
@@ -57,6 +57,7 @@ public class AlignedSeriesScanUtil extends SeriesScanUtil {
     dataTypes =
         ((AlignedPath) seriesPath)
             .getSchemaList().stream().map(IMeasurementSchema::getType).collect(Collectors.toList());
+    isAligned = true;
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/source/SeriesScanUtil.java b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/source/SeriesScanUtil.java
index 22fd2cbabc..35927976d5 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/source/SeriesScanUtil.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/execution/operator/source/SeriesScanUtil.java
@@ -24,7 +24,10 @@ import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
 import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
 import org.apache.iotdb.db.metadata.idtable.IDTable;
 import org.apache.iotdb.db.mpp.execution.fragment.FragmentInstanceContext;
+import org.apache.iotdb.db.mpp.metric.QueryMetricsManager;
 import org.apache.iotdb.db.query.context.QueryContext;
+import org.apache.iotdb.db.query.reader.chunk.MemAlignedPageReader;
+import org.apache.iotdb.db.query.reader.chunk.MemPageReader;
 import org.apache.iotdb.db.query.reader.universal.DescPriorityMergeReader;
 import org.apache.iotdb.db.query.reader.universal.PriorityMergeReader;
 import org.apache.iotdb.db.utils.FileLoaderUtils;
@@ -58,19 +61,26 @@ import java.util.function.ToLongFunction;
 import java.util.stream.Collectors;
 
 import static com.google.common.base.Preconditions.checkArgument;
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.BUILD_TSBLOCK_FROM_MERGE_READER_ALIGNED;
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.BUILD_TSBLOCK_FROM_MERGE_READER_NONALIGNED;
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.BUILD_TSBLOCK_FROM_PAGE_READER_ALIGNED_DISK;
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.BUILD_TSBLOCK_FROM_PAGE_READER_ALIGNED_MEM;
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.BUILD_TSBLOCK_FROM_PAGE_READER_NONALIGNED_DISK;
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.BUILD_TSBLOCK_FROM_PAGE_READER_NONALIGNED_MEM;
 
 public class SeriesScanUtil {
   private final FragmentInstanceContext context;
 
   // The path of the target series which will be scanned.
   private final PartialPath seriesPath;
+  protected boolean isAligned = false;
 
   // all the sensors in this device;
   protected final Set<String> allSensors;
   protected final TSDataType dataType;
 
   // inner class of SeriesReader for order purpose
-  private TimeOrderUtils orderUtils;
+  private final TimeOrderUtils orderUtils;
 
   /*
    * There is at most one is not null between timeFilter and valueFilter
@@ -121,6 +131,8 @@ public class SeriesScanUtil {
   protected boolean hasCachedNextOverlappedPage;
   protected TsBlock cachedTsBlock;
 
+  private static final QueryMetricsManager QUERY_METRICS = QueryMetricsManager.getInstance();
+
   public SeriesScanUtil(
       PartialPath seriesPath,
       Set<String> allSensors,
@@ -642,177 +654,187 @@ public class SeriesScanUtil {
    */
   @SuppressWarnings("squid:S3776") // Suppress high Cognitive Complexity warning
   private boolean hasNextOverlappedPage() throws IOException {
+    long startTime = System.nanoTime();
+    try {
+      if (hasCachedNextOverlappedPage) {
+        return true;
+      }
 
-    if (hasCachedNextOverlappedPage) {
-      return true;
-    }
-
-    tryToPutAllDirectlyOverlappedUnseqPageReadersIntoMergeReader();
-
-    while (true) {
+      tryToPutAllDirectlyOverlappedUnseqPageReadersIntoMergeReader();
 
-      // may has overlapped data
-      if (mergeReader.hasNextTimeValuePair()) {
+      while (true) {
 
-        // TODO we still need to consider data type, ascending and descending here
-        TsBlockBuilder builder = new TsBlockBuilder(getTsDataTypeList());
-        TimeColumnBuilder timeBuilder = builder.getTimeColumnBuilder();
-        long currentPageEndPointTime = mergeReader.getCurrentReadStopTime();
-        while (mergeReader.hasNextTimeValuePair()) {
+        // may has overlapped data
+        if (mergeReader.hasNextTimeValuePair()) {
 
-          /*
-           * get current first point in mergeReader, this maybe overlapped later
-           */
-          TimeValuePair timeValuePair = mergeReader.currentTimeValuePair();
+          // TODO we still need to consider data type, ascending and descending here
+          TsBlockBuilder builder = new TsBlockBuilder(getTsDataTypeList());
+          TimeColumnBuilder timeBuilder = builder.getTimeColumnBuilder();
+          long currentPageEndPointTime = mergeReader.getCurrentReadStopTime();
+          while (mergeReader.hasNextTimeValuePair()) {
 
-          if (orderUtils.isExcessEndpoint(timeValuePair.getTimestamp(), currentPageEndPointTime)) {
             /*
-             * when the merged point excesses the currentPageEndPointTime, we have read all overlapped data before currentPageEndPointTime
-             * 1. has cached batch data, we don't need to read more data, just use the cached data later
-             * 2. has first page reader, which means first page reader last endTime < currentTimeValuePair.getTimestamp(),
-             * we could just use the first page reader later
-             * 3. sequence page reader is not empty, which means first page reader last endTime < currentTimeValuePair.getTimestamp(),
-             * we could use the first sequence page reader later
+             * get current first point in mergeReader, this maybe overlapped later
              */
-            if (!builder.isEmpty() || firstPageReader != null || !seqPageReaders.isEmpty()) {
-              break;
+            TimeValuePair timeValuePair = mergeReader.currentTimeValuePair();
+
+            if (orderUtils.isExcessEndpoint(
+                timeValuePair.getTimestamp(), currentPageEndPointTime)) {
+              /*
+               * when the merged point excesses the currentPageEndPointTime, we have read all overlapped data before currentPageEndPointTime
+               * 1. has cached batch data, we don't need to read more data, just use the cached data later
+               * 2. has first page reader, which means first page reader last endTime < currentTimeValuePair.getTimestamp(),
+               * we could just use the first page reader later
+               * 3. sequence page reader is not empty, which means first page reader last endTime < currentTimeValuePair.getTimestamp(),
+               * we could use the first sequence page reader later
+               */
+              if (!builder.isEmpty() || firstPageReader != null || !seqPageReaders.isEmpty()) {
+                break;
+              }
+              // so, we don't have other data except mergeReader
+              currentPageEndPointTime = mergeReader.getCurrentReadStopTime();
             }
-            // so, we don't have other data except mergeReader
-            currentPageEndPointTime = mergeReader.getCurrentReadStopTime();
-          }
 
-          // unpack all overlapped data for the first timeValuePair
-          unpackAllOverlappedTsFilesToTimeSeriesMetadata(timeValuePair.getTimestamp());
-          unpackAllOverlappedTimeSeriesMetadataToCachedChunkMetadata(
-              timeValuePair.getTimestamp(), false);
-          unpackAllOverlappedChunkMetadataToPageReaders(timeValuePair.getTimestamp(), false);
-          unpackAllOverlappedUnseqPageReadersToMergeReader(timeValuePair.getTimestamp());
-
-          // update if there are unpacked unSeqPageReaders
-          timeValuePair = mergeReader.currentTimeValuePair();
-
-          // from now, the unsequence reader is all unpacked, so we don't need to consider it
-          // we has first page reader now
-          if (firstPageReader != null) {
-            // if current timeValuePair excesses the first page reader's end time, we just use the
-            // cached data
-            if ((orderUtils.getAscending()
-                    && timeValuePair.getTimestamp() > firstPageReader.getStatistics().getEndTime())
-                || (!orderUtils.getAscending()
-                    && timeValuePair.getTimestamp()
-                        < firstPageReader.getStatistics().getStartTime())) {
-              hasCachedNextOverlappedPage = !builder.isEmpty();
-              cachedTsBlock = builder.build();
-              return hasCachedNextOverlappedPage;
-            } else if (orderUtils.isOverlapped(
-                timeValuePair.getTimestamp(), firstPageReader.getStatistics())) {
-              // current timeValuePair is overlapped with firstPageReader, add it to merged reader
-              // and update endTime to the max end time
-              mergeReader.addReader(
-                  getPointReader(
-                      firstPageReader.getAllSatisfiedPageData(orderUtils.getAscending())),
-                  firstPageReader.version,
-                  orderUtils.getOverlapCheckTime(firstPageReader.getStatistics()),
-                  context);
-              currentPageEndPointTime =
-                  updateEndPointTime(currentPageEndPointTime, firstPageReader);
-              firstPageReader = null;
+            // unpack all overlapped data for the first timeValuePair
+            unpackAllOverlappedTsFilesToTimeSeriesMetadata(timeValuePair.getTimestamp());
+            unpackAllOverlappedTimeSeriesMetadataToCachedChunkMetadata(
+                timeValuePair.getTimestamp(), false);
+            unpackAllOverlappedChunkMetadataToPageReaders(timeValuePair.getTimestamp(), false);
+            unpackAllOverlappedUnseqPageReadersToMergeReader(timeValuePair.getTimestamp());
+
+            // update if there are unpacked unSeqPageReaders
+            timeValuePair = mergeReader.currentTimeValuePair();
+
+            // from now, the unsequence reader is all unpacked, so we don't need to consider it
+            // we has first page reader now
+            if (firstPageReader != null) {
+              // if current timeValuePair excesses the first page reader's end time, we just use the
+              // cached data
+              if ((orderUtils.getAscending()
+                      && timeValuePair.getTimestamp()
+                          > firstPageReader.getStatistics().getEndTime())
+                  || (!orderUtils.getAscending()
+                      && timeValuePair.getTimestamp()
+                          < firstPageReader.getStatistics().getStartTime())) {
+                hasCachedNextOverlappedPage = !builder.isEmpty();
+                cachedTsBlock = builder.build();
+                return hasCachedNextOverlappedPage;
+              } else if (orderUtils.isOverlapped(
+                  timeValuePair.getTimestamp(), firstPageReader.getStatistics())) {
+                // current timeValuePair is overlapped with firstPageReader, add it to merged reader
+                // and update endTime to the max end time
+                mergeReader.addReader(
+                    getPointReader(
+                        firstPageReader.getAllSatisfiedPageData(orderUtils.getAscending())),
+                    firstPageReader.version,
+                    orderUtils.getOverlapCheckTime(firstPageReader.getStatistics()),
+                    context);
+                currentPageEndPointTime =
+                    updateEndPointTime(currentPageEndPointTime, firstPageReader);
+                firstPageReader = null;
+              }
             }
-          }
 
-          // the seq page readers is not empty, just like first page reader
-          if (!seqPageReaders.isEmpty()) {
-            if ((orderUtils.getAscending()
-                    && timeValuePair.getTimestamp()
-                        > seqPageReaders.get(0).getStatistics().getEndTime())
-                || (!orderUtils.getAscending()
-                    && timeValuePair.getTimestamp()
-                        < seqPageReaders.get(0).getStatistics().getStartTime())) {
-              hasCachedNextOverlappedPage = !builder.isEmpty();
-              cachedTsBlock = builder.build();
-              return hasCachedNextOverlappedPage;
-            } else if (orderUtils.isOverlapped(
-                timeValuePair.getTimestamp(), seqPageReaders.get(0).getStatistics())) {
-              VersionPageReader pageReader = seqPageReaders.remove(0);
-              mergeReader.addReader(
-                  getPointReader(pageReader.getAllSatisfiedPageData(orderUtils.getAscending())),
-                  pageReader.version,
-                  orderUtils.getOverlapCheckTime(pageReader.getStatistics()),
-                  context);
-              currentPageEndPointTime = updateEndPointTime(currentPageEndPointTime, pageReader);
+            // the seq page readers is not empty, just like first page reader
+            if (!seqPageReaders.isEmpty()) {
+              if ((orderUtils.getAscending()
+                      && timeValuePair.getTimestamp()
+                          > seqPageReaders.get(0).getStatistics().getEndTime())
+                  || (!orderUtils.getAscending()
+                      && timeValuePair.getTimestamp()
+                          < seqPageReaders.get(0).getStatistics().getStartTime())) {
+                hasCachedNextOverlappedPage = !builder.isEmpty();
+                cachedTsBlock = builder.build();
+                return hasCachedNextOverlappedPage;
+              } else if (orderUtils.isOverlapped(
+                  timeValuePair.getTimestamp(), seqPageReaders.get(0).getStatistics())) {
+                VersionPageReader pageReader = seqPageReaders.remove(0);
+                mergeReader.addReader(
+                    getPointReader(pageReader.getAllSatisfiedPageData(orderUtils.getAscending())),
+                    pageReader.version,
+                    orderUtils.getOverlapCheckTime(pageReader.getStatistics()),
+                    context);
+                currentPageEndPointTime = updateEndPointTime(currentPageEndPointTime, pageReader);
+              }
             }
-          }
 
-          /*
-           * get the latest first point in mergeReader
-           */
-          timeValuePair = mergeReader.nextTimeValuePair();
+            /*
+             * get the latest first point in mergeReader
+             */
+            timeValuePair = mergeReader.nextTimeValuePair();
 
-          Object valueForFilter = timeValuePair.getValue().getValue();
+            Object valueForFilter = timeValuePair.getValue().getValue();
 
-          // TODO fix value filter firstNotNullObject, currently, if it's a value filter, it will
-          // only accept AlignedPath with only one sub sensor
-          if (timeValuePair.getValue().getDataType() == TSDataType.VECTOR) {
-            for (TsPrimitiveType tsPrimitiveType : timeValuePair.getValue().getVector()) {
-              if (tsPrimitiveType != null) {
-                valueForFilter = tsPrimitiveType.getValue();
-                break;
+            // TODO fix value filter firstNotNullObject, currently, if it's a value filter, it will
+            // only accept AlignedPath with only one sub sensor
+            if (timeValuePair.getValue().getDataType() == TSDataType.VECTOR) {
+              for (TsPrimitiveType tsPrimitiveType : timeValuePair.getValue().getVector()) {
+                if (tsPrimitiveType != null) {
+                  valueForFilter = tsPrimitiveType.getValue();
+                  break;
+                }
               }
             }
-          }
 
-          if (valueFilter == null
-              || valueFilter.satisfy(timeValuePair.getTimestamp(), valueForFilter)) {
-            timeBuilder.writeLong(timeValuePair.getTimestamp());
-            switch (dataType) {
-              case BOOLEAN:
-                builder.getColumnBuilder(0).writeBoolean(timeValuePair.getValue().getBoolean());
-                break;
-              case INT32:
-                builder.getColumnBuilder(0).writeInt(timeValuePair.getValue().getInt());
-                break;
-              case INT64:
-                builder.getColumnBuilder(0).writeLong(timeValuePair.getValue().getLong());
-                break;
-              case FLOAT:
-                builder.getColumnBuilder(0).writeFloat(timeValuePair.getValue().getFloat());
-                break;
-              case DOUBLE:
-                builder.getColumnBuilder(0).writeDouble(timeValuePair.getValue().getDouble());
-                break;
-              case TEXT:
-                builder.getColumnBuilder(0).writeBinary(timeValuePair.getValue().getBinary());
-                break;
-              case VECTOR:
-                TsPrimitiveType[] values = timeValuePair.getValue().getVector();
-                for (int i = 0; i < values.length; i++) {
-                  if (values[i] == null) {
-                    builder.getColumnBuilder(i).appendNull();
-                  } else {
-                    builder.getColumnBuilder(i).writeTsPrimitiveType(values[i]);
+            if (valueFilter == null
+                || valueFilter.satisfy(timeValuePair.getTimestamp(), valueForFilter)) {
+              timeBuilder.writeLong(timeValuePair.getTimestamp());
+              switch (dataType) {
+                case BOOLEAN:
+                  builder.getColumnBuilder(0).writeBoolean(timeValuePair.getValue().getBoolean());
+                  break;
+                case INT32:
+                  builder.getColumnBuilder(0).writeInt(timeValuePair.getValue().getInt());
+                  break;
+                case INT64:
+                  builder.getColumnBuilder(0).writeLong(timeValuePair.getValue().getLong());
+                  break;
+                case FLOAT:
+                  builder.getColumnBuilder(0).writeFloat(timeValuePair.getValue().getFloat());
+                  break;
+                case DOUBLE:
+                  builder.getColumnBuilder(0).writeDouble(timeValuePair.getValue().getDouble());
+                  break;
+                case TEXT:
+                  builder.getColumnBuilder(0).writeBinary(timeValuePair.getValue().getBinary());
+                  break;
+                case VECTOR:
+                  TsPrimitiveType[] values = timeValuePair.getValue().getVector();
+                  for (int i = 0; i < values.length; i++) {
+                    if (values[i] == null) {
+                      builder.getColumnBuilder(i).appendNull();
+                    } else {
+                      builder.getColumnBuilder(i).writeTsPrimitiveType(values[i]);
+                    }
                   }
-                }
-                break;
-              default:
-                throw new UnSupportedDataTypeException(String.valueOf(dataType));
+                  break;
+                default:
+                  throw new UnSupportedDataTypeException(String.valueOf(dataType));
+              }
+              builder.declarePosition();
             }
-            builder.declarePosition();
           }
-        }
-        hasCachedNextOverlappedPage = !builder.isEmpty();
-        cachedTsBlock = builder.build();
-        /*
-         * if current overlapped page has valid data, return, otherwise read next overlapped page
-         */
-        if (hasCachedNextOverlappedPage) {
-          return true;
-        } else if (mergeReader.hasNextTimeValuePair()) {
-          // condition: seqPage.endTime < mergeReader.currentTime
+          hasCachedNextOverlappedPage = !builder.isEmpty();
+          cachedTsBlock = builder.build();
+          /*
+           * if current overlapped page has valid data, return, otherwise read next overlapped page
+           */
+          if (hasCachedNextOverlappedPage) {
+            return true;
+          } else if (mergeReader.hasNextTimeValuePair()) {
+            // condition: seqPage.endTime < mergeReader.currentTime
+            return false;
+          }
+        } else {
           return false;
         }
-      } else {
-        return false;
       }
+    } finally {
+      QUERY_METRICS.recordSeriesScanCost(
+          isAligned
+              ? BUILD_TSBLOCK_FROM_MERGE_READER_ALIGNED
+              : BUILD_TSBLOCK_FROM_MERGE_READER_NONALIGNED,
+          System.nanoTime() - startTime);
     }
   }
 
@@ -1084,25 +1106,21 @@ public class SeriesScanUtil {
     return timeFilter;
   }
 
-  public TimeOrderUtils getOrderUtils() {
-    return orderUtils;
-  }
+  protected static class VersionPageReader {
 
-  protected class VersionPageReader {
+    private final PriorityMergeReader.MergeReaderPriority version;
+    private final IPageReader data;
 
-    protected PriorityMergeReader.MergeReaderPriority version;
-    protected IPageReader data;
-
-    protected boolean isSeq;
+    private final boolean isSeq;
+    private final boolean isAligned;
+    private final boolean isMem;
 
     VersionPageReader(long version, long offset, IPageReader data, boolean isSeq) {
       this.version = new PriorityMergeReader.MergeReaderPriority(version, offset);
       this.data = data;
       this.isSeq = isSeq;
-    }
-
-    public boolean isAlignedPageReader() {
-      return data instanceof IAlignedPageReader;
+      this.isAligned = data instanceof IAlignedPageReader;
+      this.isMem = data instanceof MemPageReader || data instanceof MemAlignedPageReader;
     }
 
     Statistics getStatistics() {
@@ -1124,11 +1142,24 @@ public class SeriesScanUtil {
     }
 
     TsBlock getAllSatisfiedPageData(boolean ascending) throws IOException {
-      TsBlock tsBlock = data.getAllSatisfiedData();
-      if (!ascending) {
-        tsBlock.reverse();
+      long startTime = System.nanoTime();
+      try {
+        TsBlock tsBlock = data.getAllSatisfiedData();
+        if (!ascending) {
+          tsBlock.reverse();
+        }
+        return tsBlock;
+      } finally {
+        QUERY_METRICS.recordSeriesScanCost(
+            isAligned
+                ? (isMem
+                    ? BUILD_TSBLOCK_FROM_PAGE_READER_ALIGNED_MEM
+                    : BUILD_TSBLOCK_FROM_PAGE_READER_ALIGNED_DISK)
+                : (isMem
+                    ? BUILD_TSBLOCK_FROM_PAGE_READER_NONALIGNED_MEM
+                    : BUILD_TSBLOCK_FROM_PAGE_READER_NONALIGNED_DISK),
+            System.nanoTime() - startTime);
       }
-      return tsBlock;
     }
 
     void setFilter(Filter filter) {
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/metric/QueryMetricsManager.java b/server/src/main/java/org/apache/iotdb/db/mpp/metric/QueryMetricsManager.java
index dac48b1680..9292af0885 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/metric/QueryMetricsManager.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/metric/QueryMetricsManager.java
@@ -22,6 +22,7 @@ package org.apache.iotdb.db.mpp.metric;
 import org.apache.iotdb.commons.service.metric.MetricService;
 import org.apache.iotdb.commons.service.metric.enums.Metric;
 import org.apache.iotdb.commons.service.metric.enums.Tag;
+import org.apache.iotdb.metrics.utils.MetricInfo;
 import org.apache.iotdb.metrics.utils.MetricLevel;
 
 import java.util.concurrent.TimeUnit;
@@ -59,6 +60,16 @@ public class QueryMetricsManager {
         operatorType);
   }
 
+  public void recordSeriesScanCost(String stage, long costTimeInNanos) {
+    MetricInfo metricInfo = SeriesScanCostMetricSet.metricInfoMap.get(stage);
+    metricService.timer(
+        costTimeInNanos,
+        TimeUnit.NANOSECONDS,
+        metricInfo.getName(),
+        MetricLevel.IMPORTANT,
+        metricInfo.getTagsInArray());
+  }
+
   public static QueryMetricsManager getInstance() {
     return QueryMetricsManager.QueryMetricsManagerHolder.INSTANCE;
   }
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/metric/SeriesScanCostMetricSet.java b/server/src/main/java/org/apache/iotdb/db/mpp/metric/SeriesScanCostMetricSet.java
new file mode 100644
index 0000000000..06920a69ab
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/metric/SeriesScanCostMetricSet.java
@@ -0,0 +1,542 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.mpp.metric;
+
+import org.apache.iotdb.commons.service.metric.enums.Metric;
+import org.apache.iotdb.commons.service.metric.enums.Tag;
+import org.apache.iotdb.metrics.AbstractMetricService;
+import org.apache.iotdb.metrics.metricsets.IMetricSet;
+import org.apache.iotdb.metrics.utils.MetricInfo;
+import org.apache.iotdb.metrics.utils.MetricLevel;
+import org.apache.iotdb.metrics.utils.MetricType;
+
+import java.util.HashMap;
+import java.util.Map;
+
+public class SeriesScanCostMetricSet implements IMetricSet {
+
+  private static final String metric = Metric.SERIES_SCAN_COST.toString();
+
+  public static final Map<String, MetricInfo> metricInfoMap = new HashMap<>();
+
+  public static final String LOAD_TIMESERIES_METADATA_ALIGNED_MEM =
+      "load_timeseries_metadata_aligned_mem";
+  public static final String LOAD_TIMESERIES_METADATA_ALIGNED_DISK =
+      "load_timeseries_metadata_aligned_disk";
+  public static final String LOAD_TIMESERIES_METADATA_NONALIGNED_MEM =
+      "load_timeseries_metadata_nonaligned_mem";
+  public static final String LOAD_TIMESERIES_METADATA_NONALIGNED_DISK =
+      "load_timeseries_metadata_nonaligned_disk";
+
+  static {
+    metricInfoMap.put(
+        LOAD_TIMESERIES_METADATA_ALIGNED_MEM,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "load_timeseries_metadata",
+            Tag.TYPE.toString(),
+            "aligned",
+            Tag.FROM.toString(),
+            "mem"));
+    metricInfoMap.put(
+        LOAD_TIMESERIES_METADATA_ALIGNED_DISK,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "load_timeseries_metadata",
+            Tag.TYPE.toString(),
+            "aligned",
+            Tag.FROM.toString(),
+            "disk"));
+    metricInfoMap.put(
+        LOAD_TIMESERIES_METADATA_NONALIGNED_MEM,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "load_timeseries_metadata",
+            Tag.TYPE.toString(),
+            "non_aligned",
+            Tag.FROM.toString(),
+            "mem"));
+    metricInfoMap.put(
+        LOAD_TIMESERIES_METADATA_NONALIGNED_DISK,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "load_timeseries_metadata",
+            Tag.TYPE.toString(),
+            "non_aligned",
+            Tag.FROM.toString(),
+            "disk"));
+  }
+
+  public static final String READ_TIMESERIES_METADATA_CACHE = "read_timeseries_metadata_cache";
+  public static final String READ_TIMESERIES_METADATA_FILE = "read_timeseries_metadata_file";
+
+  static {
+    metricInfoMap.put(
+        READ_TIMESERIES_METADATA_CACHE,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "read_timeseries_metadata",
+            Tag.FROM.toString(),
+            "cache"));
+    metricInfoMap.put(
+        READ_TIMESERIES_METADATA_FILE,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "read_timeseries_metadata",
+            Tag.FROM.toString(),
+            "file"));
+  }
+
+  public static final String TIMESERIES_METADATA_MODIFICATION_ALIGNED =
+      "timeseries_metadata_modification_aligned";
+  public static final String TIMESERIES_METADATA_MODIFICATION_NONALIGNED =
+      "timeseries_metadata_modification_nonaligned";
+
+  static {
+    metricInfoMap.put(
+        TIMESERIES_METADATA_MODIFICATION_ALIGNED,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "timeseries_metadata_modification",
+            Tag.TYPE.toString(),
+            "aligned"));
+    metricInfoMap.put(
+        TIMESERIES_METADATA_MODIFICATION_NONALIGNED,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "timeseries_metadata_modification",
+            Tag.TYPE.toString(),
+            "non_aligned"));
+  }
+
+  public static final String LOAD_CHUNK_METADATA_LIST_ALIGNED_MEM =
+      "load_chunk_metadata_list_aligned_mem";
+  public static final String LOAD_CHUNK_METADATA_LIST_ALIGNED_DISK =
+      "load_chunk_metadata_list_aligned_disk";
+  public static final String LOAD_CHUNK_METADATA_LIST_NONALIGNED_MEM =
+      "load_chunk_metadata_list_nonaligned_mem";
+  public static final String LOAD_CHUNK_METADATA_LIST_NONALIGNED_DISK =
+      "load_chunk_metadata_list_nonaligned_disk";
+
+  static {
+    metricInfoMap.put(
+        LOAD_CHUNK_METADATA_LIST_ALIGNED_MEM,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "load_chunk_metadata_list",
+            Tag.TYPE.toString(),
+            "aligned",
+            Tag.FROM.toString(),
+            "mem"));
+    metricInfoMap.put(
+        LOAD_CHUNK_METADATA_LIST_ALIGNED_DISK,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "load_chunk_metadata_list",
+            Tag.TYPE.toString(),
+            "aligned",
+            Tag.FROM.toString(),
+            "disk"));
+    metricInfoMap.put(
+        LOAD_CHUNK_METADATA_LIST_NONALIGNED_MEM,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "load_chunk_metadata_list",
+            Tag.TYPE.toString(),
+            "non_aligned",
+            Tag.FROM.toString(),
+            "mem"));
+    metricInfoMap.put(
+        LOAD_CHUNK_METADATA_LIST_NONALIGNED_DISK,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "load_chunk_metadata_list",
+            Tag.TYPE.toString(),
+            "non_aligned",
+            Tag.FROM.toString(),
+            "disk"));
+  }
+
+  public static final String CHUNK_METADATA_MODIFICATION_ALIGNED_MEM =
+      "chunk_metadata_modification_aligned_mem";
+  public static final String CHUNK_METADATA_MODIFICATION_ALIGNED_DISK =
+      "chunk_metadata_modification_aligned_disk";
+  public static final String CHUNK_METADATA_MODIFICATION_NONALIGNED_MEM =
+      "chunk_metadata_modification_nonaligned_mem";
+  public static final String CHUNK_METADATA_MODIFICATION_NONALIGNED_DISK =
+      "chunk_metadata_modification_nonaligned_disk";
+
+  static {
+    metricInfoMap.put(
+        CHUNK_METADATA_MODIFICATION_ALIGNED_MEM,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "chunk_metadata_modification",
+            Tag.TYPE.toString(),
+            "aligned",
+            Tag.FROM.toString(),
+            "mem"));
+    metricInfoMap.put(
+        CHUNK_METADATA_MODIFICATION_ALIGNED_DISK,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "chunk_metadata_modification",
+            Tag.TYPE.toString(),
+            "aligned",
+            Tag.FROM.toString(),
+            "disk"));
+    metricInfoMap.put(
+        CHUNK_METADATA_MODIFICATION_NONALIGNED_MEM,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "chunk_metadata_modification",
+            Tag.TYPE.toString(),
+            "non_aligned",
+            Tag.FROM.toString(),
+            "mem"));
+    metricInfoMap.put(
+        CHUNK_METADATA_MODIFICATION_NONALIGNED_DISK,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "chunk_metadata_modification",
+            Tag.TYPE.toString(),
+            "non_aligned",
+            Tag.FROM.toString(),
+            "disk"));
+  }
+
+  public static final String CHUNK_METADATA_FILTER_ALIGNED_MEM =
+      "chunk_metadata_filter_aligned_mem";
+  public static final String CHUNK_METADATA_FILTER_ALIGNED_DISK =
+      "chunk_metadata_filter_aligned_disk";
+  public static final String CHUNK_METADATA_FILTER_NONALIGNED_MEM =
+      "chunk_metadata_filter_nonaligned_mem";
+  public static final String CHUNK_METADATA_FILTER_NONALIGNED_DISK =
+      "chunk_metadata_filter_nonaligned_disk";
+
+  static {
+    metricInfoMap.put(
+        CHUNK_METADATA_FILTER_ALIGNED_MEM,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "chunk_metadata_filter",
+            Tag.TYPE.toString(),
+            "aligned",
+            Tag.FROM.toString(),
+            "mem"));
+    metricInfoMap.put(
+        CHUNK_METADATA_FILTER_ALIGNED_DISK,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "chunk_metadata_filter",
+            Tag.TYPE.toString(),
+            "aligned",
+            Tag.FROM.toString(),
+            "disk"));
+    metricInfoMap.put(
+        CHUNK_METADATA_FILTER_NONALIGNED_MEM,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "chunk_metadata_filter",
+            Tag.TYPE.toString(),
+            "non_aligned",
+            Tag.FROM.toString(),
+            "mem"));
+    metricInfoMap.put(
+        CHUNK_METADATA_FILTER_NONALIGNED_DISK,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "chunk_metadata_filter",
+            Tag.TYPE.toString(),
+            "non_aligned",
+            Tag.FROM.toString(),
+            "disk"));
+  }
+
+  public static final String CONSTRUCT_CHUNK_READER_ALIGNED_MEM =
+      "construct_chunk_reader_aligned_mem";
+  public static final String CONSTRUCT_CHUNK_READER_ALIGNED_DISK =
+      "construct_chunk_reader_aligned_disk";
+  public static final String CONSTRUCT_CHUNK_READER_NONALIGNED_MEM =
+      "construct_chunk_reader_nonaligned_mem";
+  public static final String CONSTRUCT_CHUNK_READER_NONALIGNED_DISK =
+      "construct_chunk_reader_nonaligned_disk";
+
+  static {
+    metricInfoMap.put(
+        CONSTRUCT_CHUNK_READER_ALIGNED_MEM,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "construct_chunk_reader",
+            Tag.TYPE.toString(),
+            "aligned",
+            Tag.FROM.toString(),
+            "mem"));
+    metricInfoMap.put(
+        CONSTRUCT_CHUNK_READER_ALIGNED_DISK,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "construct_chunk_reader",
+            Tag.TYPE.toString(),
+            "aligned",
+            Tag.FROM.toString(),
+            "disk"));
+    metricInfoMap.put(
+        CONSTRUCT_CHUNK_READER_NONALIGNED_MEM,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "construct_chunk_reader",
+            Tag.TYPE.toString(),
+            "non_aligned",
+            Tag.FROM.toString(),
+            "mem"));
+    metricInfoMap.put(
+        CONSTRUCT_CHUNK_READER_NONALIGNED_DISK,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "construct_chunk_reader",
+            Tag.TYPE.toString(),
+            "non_aligned",
+            Tag.FROM.toString(),
+            "disk"));
+  }
+
+  public static final String READ_CHUNK_ALL = "read_chunk_all";
+  public static final String READ_CHUNK_FILE = "read_chunk_file";
+
+  static {
+    metricInfoMap.put(
+        READ_CHUNK_ALL,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "read_chunk",
+            Tag.FROM.toString(),
+            "all"));
+    metricInfoMap.put(
+        READ_CHUNK_FILE,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "read_chunk",
+            Tag.FROM.toString(),
+            "file"));
+  }
+
+  public static final String INIT_CHUNK_READER_ALIGNED_MEM = "init_chunk_reader_aligned_mem";
+  public static final String INIT_CHUNK_READER_ALIGNED_DISK = "init_chunk_reader_aligned_disk";
+  public static final String INIT_CHUNK_READER_NONALIGNED_MEM = "init_chunk_reader_nonaligned_mem";
+  public static final String INIT_CHUNK_READER_NONALIGNED_DISK =
+      "init_chunk_reader_nonaligned_disk";
+
+  static {
+    metricInfoMap.put(
+        INIT_CHUNK_READER_ALIGNED_MEM,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "init_chunk_reader",
+            Tag.TYPE.toString(),
+            "aligned",
+            Tag.FROM.toString(),
+            "mem"));
+    metricInfoMap.put(
+        INIT_CHUNK_READER_ALIGNED_DISK,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "init_chunk_reader",
+            Tag.TYPE.toString(),
+            "aligned",
+            Tag.FROM.toString(),
+            "disk"));
+    metricInfoMap.put(
+        INIT_CHUNK_READER_NONALIGNED_MEM,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "init_chunk_reader",
+            Tag.TYPE.toString(),
+            "non_aligned",
+            Tag.FROM.toString(),
+            "mem"));
+    metricInfoMap.put(
+        INIT_CHUNK_READER_NONALIGNED_DISK,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "init_chunk_reader",
+            Tag.TYPE.toString(),
+            "non_aligned",
+            Tag.FROM.toString(),
+            "disk"));
+  }
+
+  public static final String BUILD_TSBLOCK_FROM_PAGE_READER_ALIGNED_MEM =
+      "build_tsblock_from_page_reader_aligned_mem";
+  public static final String BUILD_TSBLOCK_FROM_PAGE_READER_ALIGNED_DISK =
+      "build_tsblock_from_page_reader_aligned_disk";
+  public static final String BUILD_TSBLOCK_FROM_PAGE_READER_NONALIGNED_MEM =
+      "build_tsblock_from_page_reader_nonaligned_mem";
+  public static final String BUILD_TSBLOCK_FROM_PAGE_READER_NONALIGNED_DISK =
+      "build_tsblock_from_page_reader_nonaligned_disk";
+
+  static {
+    metricInfoMap.put(
+        BUILD_TSBLOCK_FROM_PAGE_READER_ALIGNED_MEM,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "build_tsblock_from_page_reader",
+            Tag.TYPE.toString(),
+            "aligned",
+            Tag.FROM.toString(),
+            "mem"));
+    metricInfoMap.put(
+        BUILD_TSBLOCK_FROM_PAGE_READER_ALIGNED_DISK,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "build_tsblock_from_page_reader",
+            Tag.TYPE.toString(),
+            "aligned",
+            Tag.FROM.toString(),
+            "disk"));
+    metricInfoMap.put(
+        BUILD_TSBLOCK_FROM_PAGE_READER_NONALIGNED_MEM,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "build_tsblock_from_page_reader",
+            Tag.TYPE.toString(),
+            "non_aligned",
+            Tag.FROM.toString(),
+            "mem"));
+    metricInfoMap.put(
+        BUILD_TSBLOCK_FROM_PAGE_READER_NONALIGNED_DISK,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "build_tsblock_from_page_reader",
+            Tag.TYPE.toString(),
+            "non_aligned",
+            Tag.FROM.toString(),
+            "disk"));
+  }
+
+  public static final String BUILD_TSBLOCK_FROM_MERGE_READER_ALIGNED =
+      "build_tsblock_from_merge_reader_aligned";
+  public static final String BUILD_TSBLOCK_FROM_MERGE_READER_NONALIGNED =
+      "build_tsblock_from_merge_reader_nonaligned";
+
+  static {
+    metricInfoMap.put(
+        BUILD_TSBLOCK_FROM_MERGE_READER_ALIGNED,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "build_tsblock_from_merge_reader",
+            Tag.TYPE.toString(),
+            "aligned"));
+    metricInfoMap.put(
+        BUILD_TSBLOCK_FROM_MERGE_READER_NONALIGNED,
+        new MetricInfo(
+            MetricType.TIMER,
+            metric,
+            Tag.STAGE.toString(),
+            "build_tsblock_from_merge_reader",
+            Tag.TYPE.toString(),
+            "non_aligned"));
+  }
+
+  @Override
+  public void bindTo(AbstractMetricService metricService) {
+    for (MetricInfo metricInfo : metricInfoMap.values()) {
+      metricService.getOrCreateTimer(
+          metricInfo.getName(), MetricLevel.IMPORTANT, metricInfo.getTagsInArray());
+    }
+  }
+
+  @Override
+  public void unbindFrom(AbstractMetricService metricService) {
+    for (MetricInfo metricInfo : metricInfoMap.values()) {
+      metricService.remove(MetricType.TIMER, metric, metricInfo.getTagsInArray());
+    }
+  }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/DiskAlignedChunkLoader.java b/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/DiskAlignedChunkLoader.java
index 3487682a2e..b433641af0 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/DiskAlignedChunkLoader.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/DiskAlignedChunkLoader.java
@@ -19,6 +19,7 @@
 package org.apache.iotdb.db.query.reader.chunk;
 
 import org.apache.iotdb.db.engine.cache.ChunkCache;
+import org.apache.iotdb.db.mpp.metric.QueryMetricsManager;
 import org.apache.iotdb.tsfile.file.metadata.AlignedChunkMetadata;
 import org.apache.iotdb.tsfile.file.metadata.ChunkMetadata;
 import org.apache.iotdb.tsfile.file.metadata.IChunkMetadata;
@@ -32,9 +33,13 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.CONSTRUCT_CHUNK_READER_ALIGNED_DISK;
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.INIT_CHUNK_READER_ALIGNED_DISK;
+
 public class DiskAlignedChunkLoader implements IChunkLoader {
 
   private final boolean debug;
+  private static final QueryMetricsManager QUERY_METRICS = QueryMetricsManager.getInstance();
 
   public DiskAlignedChunkLoader(boolean debug) {
     this.debug = debug;
@@ -51,17 +56,28 @@ public class DiskAlignedChunkLoader implements IChunkLoader {
   @Override
   public IChunkReader getChunkReader(IChunkMetadata chunkMetaData, Filter timeFilter)
       throws IOException {
-    AlignedChunkMetadata alignedChunkMetadata = (AlignedChunkMetadata) chunkMetaData;
-    Chunk timeChunk =
-        ChunkCache.getInstance()
-            .get((ChunkMetadata) alignedChunkMetadata.getTimeChunkMetadata(), debug);
-    List<Chunk> valueChunkList = new ArrayList<>();
-    for (IChunkMetadata valueChunkMetadata : alignedChunkMetadata.getValueChunkMetadataList()) {
-      valueChunkList.add(
-          valueChunkMetadata == null
-              ? null
-              : ChunkCache.getInstance().get((ChunkMetadata) valueChunkMetadata, debug));
+    long t1 = System.nanoTime();
+    try {
+      AlignedChunkMetadata alignedChunkMetadata = (AlignedChunkMetadata) chunkMetaData;
+      Chunk timeChunk =
+          ChunkCache.getInstance()
+              .get((ChunkMetadata) alignedChunkMetadata.getTimeChunkMetadata(), debug);
+      List<Chunk> valueChunkList = new ArrayList<>();
+      for (IChunkMetadata valueChunkMetadata : alignedChunkMetadata.getValueChunkMetadataList()) {
+        valueChunkList.add(
+            valueChunkMetadata == null
+                ? null
+                : ChunkCache.getInstance().get((ChunkMetadata) valueChunkMetadata, debug));
+      }
+
+      long t2 = System.nanoTime();
+      IChunkReader chunkReader = new AlignedChunkReader(timeChunk, valueChunkList, timeFilter);
+      QUERY_METRICS.recordSeriesScanCost(INIT_CHUNK_READER_ALIGNED_DISK, System.nanoTime() - t2);
+
+      return chunkReader;
+    } finally {
+      QUERY_METRICS.recordSeriesScanCost(
+          CONSTRUCT_CHUNK_READER_ALIGNED_DISK, System.nanoTime() - t1);
     }
-    return new AlignedChunkReader(timeChunk, valueChunkList, timeFilter);
   }
 }
diff --git a/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/DiskChunkLoader.java b/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/DiskChunkLoader.java
index 2f926a7ce0..85ee83bee4 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/DiskChunkLoader.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/DiskChunkLoader.java
@@ -20,6 +20,7 @@
 package org.apache.iotdb.db.query.reader.chunk;
 
 import org.apache.iotdb.db.engine.cache.ChunkCache;
+import org.apache.iotdb.db.mpp.metric.QueryMetricsManager;
 import org.apache.iotdb.tsfile.file.metadata.ChunkMetadata;
 import org.apache.iotdb.tsfile.file.metadata.IChunkMetadata;
 import org.apache.iotdb.tsfile.read.common.Chunk;
@@ -30,11 +31,16 @@ import org.apache.iotdb.tsfile.read.reader.chunk.ChunkReader;
 
 import java.io.IOException;
 
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.CONSTRUCT_CHUNK_READER_NONALIGNED_DISK;
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.INIT_CHUNK_READER_NONALIGNED_DISK;
+
 /** To read one chunk from disk, and only used in iotdb server module */
 public class DiskChunkLoader implements IChunkLoader {
 
   private final boolean debug;
 
+  private static final QueryMetricsManager QUERY_METRICS = QueryMetricsManager.getInstance();
+
   public DiskChunkLoader(boolean debug) {
     this.debug = debug;
   }
@@ -52,8 +58,19 @@ public class DiskChunkLoader implements IChunkLoader {
   @Override
   public IChunkReader getChunkReader(IChunkMetadata chunkMetaData, Filter timeFilter)
       throws IOException {
-    Chunk chunk = ChunkCache.getInstance().get((ChunkMetadata) chunkMetaData, debug);
-    chunk.setFromOldFile(chunkMetaData.isFromOldTsFile());
-    return new ChunkReader(chunk, timeFilter);
+    long t1 = System.nanoTime();
+    try {
+      Chunk chunk = ChunkCache.getInstance().get((ChunkMetadata) chunkMetaData, debug);
+      chunk.setFromOldFile(chunkMetaData.isFromOldTsFile());
+
+      long t2 = System.nanoTime();
+      IChunkReader chunkReader = new ChunkReader(chunk, timeFilter);
+      QUERY_METRICS.recordSeriesScanCost(INIT_CHUNK_READER_NONALIGNED_DISK, System.nanoTime() - t2);
+
+      return chunkReader;
+    } finally {
+      QUERY_METRICS.recordSeriesScanCost(
+          CONSTRUCT_CHUNK_READER_NONALIGNED_DISK, System.nanoTime() - t1);
+    }
   }
 }
diff --git a/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/MemAlignedChunkLoader.java b/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/MemAlignedChunkLoader.java
index b307af1b06..f134618025 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/MemAlignedChunkLoader.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/MemAlignedChunkLoader.java
@@ -19,6 +19,7 @@
 package org.apache.iotdb.db.query.reader.chunk;
 
 import org.apache.iotdb.db.engine.querycontext.AlignedReadOnlyMemChunk;
+import org.apache.iotdb.db.mpp.metric.QueryMetricsManager;
 import org.apache.iotdb.tsfile.file.metadata.ChunkMetadata;
 import org.apache.iotdb.tsfile.file.metadata.IChunkMetadata;
 import org.apache.iotdb.tsfile.read.common.Chunk;
@@ -26,11 +27,16 @@ import org.apache.iotdb.tsfile.read.controller.IChunkLoader;
 import org.apache.iotdb.tsfile.read.filter.basic.Filter;
 import org.apache.iotdb.tsfile.read.reader.IChunkReader;
 
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.CONSTRUCT_CHUNK_READER_ALIGNED_MEM;
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.INIT_CHUNK_READER_ALIGNED_MEM;
+
 /** To read one aligned chunk from memory, and only used in iotdb server module */
 public class MemAlignedChunkLoader implements IChunkLoader {
 
   private final AlignedReadOnlyMemChunk chunk;
 
+  private static final QueryMetricsManager QUERY_METRICS = QueryMetricsManager.getInstance();
+
   public MemAlignedChunkLoader(AlignedReadOnlyMemChunk chunk) {
     this.chunk = chunk;
   }
@@ -47,6 +53,13 @@ public class MemAlignedChunkLoader implements IChunkLoader {
 
   @Override
   public IChunkReader getChunkReader(IChunkMetadata chunkMetaData, Filter timeFilter) {
-    return new MemAlignedChunkReader(chunk, timeFilter);
+    long startTime = System.nanoTime();
+    try {
+      return new MemAlignedChunkReader(chunk, timeFilter);
+    } finally {
+      long duration = System.nanoTime() - startTime;
+      QUERY_METRICS.recordSeriesScanCost(CONSTRUCT_CHUNK_READER_ALIGNED_MEM, duration);
+      QUERY_METRICS.recordSeriesScanCost(INIT_CHUNK_READER_ALIGNED_MEM, duration);
+    }
   }
 }
diff --git a/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/MemChunkLoader.java b/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/MemChunkLoader.java
index eff76f62db..842d2bd394 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/MemChunkLoader.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/MemChunkLoader.java
@@ -20,6 +20,7 @@
 package org.apache.iotdb.db.query.reader.chunk;
 
 import org.apache.iotdb.db.engine.querycontext.ReadOnlyMemChunk;
+import org.apache.iotdb.db.mpp.metric.QueryMetricsManager;
 import org.apache.iotdb.tsfile.file.metadata.ChunkMetadata;
 import org.apache.iotdb.tsfile.file.metadata.IChunkMetadata;
 import org.apache.iotdb.tsfile.read.common.Chunk;
@@ -27,11 +28,16 @@ import org.apache.iotdb.tsfile.read.controller.IChunkLoader;
 import org.apache.iotdb.tsfile.read.filter.basic.Filter;
 import org.apache.iotdb.tsfile.read.reader.IChunkReader;
 
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.CONSTRUCT_CHUNK_READER_NONALIGNED_MEM;
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.INIT_CHUNK_READER_NONALIGNED_MEM;
+
 /** To read one chunk from memory, and only used in iotdb server module */
 public class MemChunkLoader implements IChunkLoader {
 
   private final ReadOnlyMemChunk chunk;
 
+  private static final QueryMetricsManager QUERY_METRICS = QueryMetricsManager.getInstance();
+
   public MemChunkLoader(ReadOnlyMemChunk chunk) {
     this.chunk = chunk;
   }
@@ -48,6 +54,13 @@ public class MemChunkLoader implements IChunkLoader {
 
   @Override
   public IChunkReader getChunkReader(IChunkMetadata chunkMetaData, Filter timeFilter) {
-    return new MemChunkReader(chunk, timeFilter);
+    long startTime = System.nanoTime();
+    try {
+      return new MemChunkReader(chunk, timeFilter);
+    } finally {
+      long duration = System.nanoTime() - startTime;
+      QUERY_METRICS.recordSeriesScanCost(CONSTRUCT_CHUNK_READER_NONALIGNED_MEM, duration);
+      QUERY_METRICS.recordSeriesScanCost(INIT_CHUNK_READER_NONALIGNED_MEM, duration);
+    }
   }
 }
diff --git a/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/metadata/DiskAlignedChunkMetadataLoader.java b/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/metadata/DiskAlignedChunkMetadataLoader.java
index 58d4a1c98a..9e8ec7ae4e 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/metadata/DiskAlignedChunkMetadataLoader.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/metadata/DiskAlignedChunkMetadataLoader.java
@@ -21,6 +21,7 @@ package org.apache.iotdb.db.query.reader.chunk.metadata;
 import org.apache.iotdb.commons.path.AlignedPath;
 import org.apache.iotdb.db.engine.modification.Modification;
 import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
+import org.apache.iotdb.db.mpp.metric.QueryMetricsManager;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.reader.chunk.DiskAlignedChunkLoader;
 import org.apache.iotdb.db.utils.QueryUtils;
@@ -37,6 +38,10 @@ import org.slf4j.LoggerFactory;
 import java.util.ArrayList;
 import java.util.List;
 
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.CHUNK_METADATA_FILTER_ALIGNED_DISK;
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.CHUNK_METADATA_MODIFICATION_ALIGNED_DISK;
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.LOAD_CHUNK_METADATA_LIST_ALIGNED_DISK;
+
 public class DiskAlignedChunkMetadataLoader implements IChunkMetadataLoader {
 
   private final TsFileResource resource;
@@ -46,6 +51,7 @@ public class DiskAlignedChunkMetadataLoader implements IChunkMetadataLoader {
   private final Filter filter;
 
   private static final Logger DEBUG_LOGGER = LoggerFactory.getLogger("QUERY_DEBUG");
+  private static final QueryMetricsManager QUERY_METRICS = QueryMetricsManager.getInstance();
 
   public DiskAlignedChunkMetadataLoader(
       TsFileResource resource, AlignedPath seriesPath, QueryContext context, Filter filter) {
@@ -57,53 +63,65 @@ public class DiskAlignedChunkMetadataLoader implements IChunkMetadataLoader {
 
   @Override
   public List<IChunkMetadata> loadChunkMetadataList(ITimeSeriesMetadata timeSeriesMetadata) {
-    List<AlignedChunkMetadata> alignedChunkMetadataList =
-        ((AlignedTimeSeriesMetadata) timeSeriesMetadata).getCopiedChunkMetadataList();
-
-    // get all sub sensors' modifications
-    List<List<Modification>> pathModifications =
-        context.getPathModifications(resource.getModFile(), seriesPath);
-
-    if (context.isDebug()) {
-      DEBUG_LOGGER.info(
-          "Modifications size is {} for file Path: {} ",
-          pathModifications.size(),
-          resource.getTsFilePath());
-      pathModifications.forEach(c -> DEBUG_LOGGER.info(c.toString()));
-    }
-
-    // remove ChunkMetadata that have been deleted
-    QueryUtils.modifyAlignedChunkMetaData(alignedChunkMetadataList, pathModifications);
-
-    if (context.isDebug()) {
-      DEBUG_LOGGER.info("After modification Chunk meta data list is: ");
-      alignedChunkMetadataList.forEach(c -> DEBUG_LOGGER.info(c.toString()));
+    long t1 = System.nanoTime();
+    try {
+      List<AlignedChunkMetadata> alignedChunkMetadataList =
+          ((AlignedTimeSeriesMetadata) timeSeriesMetadata).getCopiedChunkMetadataList();
+
+      long t2 = System.nanoTime();
+      // get all sub sensors' modifications
+      List<List<Modification>> pathModifications =
+          context.getPathModifications(resource.getModFile(), seriesPath);
+
+      if (context.isDebug()) {
+        DEBUG_LOGGER.info(
+            "Modifications size is {} for file Path: {} ",
+            pathModifications.size(),
+            resource.getTsFilePath());
+        pathModifications.forEach(c -> DEBUG_LOGGER.info(c.toString()));
+      }
+
+      // remove ChunkMetadata that have been deleted
+      QueryUtils.modifyAlignedChunkMetaData(alignedChunkMetadataList, pathModifications);
+
+      if (context.isDebug()) {
+        DEBUG_LOGGER.info("After modification Chunk meta data list is: ");
+        alignedChunkMetadataList.forEach(c -> DEBUG_LOGGER.info(c.toString()));
+      }
+      QUERY_METRICS.recordSeriesScanCost(
+          CHUNK_METADATA_MODIFICATION_ALIGNED_DISK, System.nanoTime() - t2);
+
+      // remove not satisfied ChunkMetaData
+      long t3 = System.nanoTime();
+      alignedChunkMetadataList.removeIf(
+          alignedChunkMetaData ->
+              (filter != null
+                      && !filter.satisfyStartEndTime(
+                          alignedChunkMetaData.getStartTime(), alignedChunkMetaData.getEndTime()))
+                  || alignedChunkMetaData.getStartTime() > alignedChunkMetaData.getEndTime());
+      QUERY_METRICS.recordSeriesScanCost(
+          CHUNK_METADATA_FILTER_ALIGNED_DISK, System.nanoTime() - t3);
+
+      // it is ok, even if it is not thread safe, because the cost of creating a DiskChunkLoader is
+      // very cheap.
+      alignedChunkMetadataList.forEach(
+          chunkMetadata -> {
+            if (chunkMetadata.needSetChunkLoader()) {
+              chunkMetadata.setFilePath(resource.getTsFilePath());
+              chunkMetadata.setClosed(resource.isClosed());
+              chunkMetadata.setChunkLoader(new DiskAlignedChunkLoader(context.isDebug()));
+            }
+          });
+
+      if (context.isDebug()) {
+        DEBUG_LOGGER.info("After removed by filter Chunk meta data list is: ");
+        alignedChunkMetadataList.forEach(c -> DEBUG_LOGGER.info(c.toString()));
+      }
+
+      return new ArrayList<>(alignedChunkMetadataList);
+    } finally {
+      QUERY_METRICS.recordSeriesScanCost(
+          LOAD_CHUNK_METADATA_LIST_ALIGNED_DISK, System.nanoTime() - t1);
     }
-
-    // remove not satisfied ChunkMetaData
-    alignedChunkMetadataList.removeIf(
-        alignedChunkMetaData ->
-            (filter != null
-                    && !filter.satisfyStartEndTime(
-                        alignedChunkMetaData.getStartTime(), alignedChunkMetaData.getEndTime()))
-                || alignedChunkMetaData.getStartTime() > alignedChunkMetaData.getEndTime());
-
-    // it is ok, even if it is not thread safe, because the cost of creating a DiskChunkLoader is
-    // very cheap.
-    alignedChunkMetadataList.forEach(
-        chunkMetadata -> {
-          if (chunkMetadata.needSetChunkLoader()) {
-            chunkMetadata.setFilePath(resource.getTsFilePath());
-            chunkMetadata.setClosed(resource.isClosed());
-            chunkMetadata.setChunkLoader(new DiskAlignedChunkLoader(context.isDebug()));
-          }
-        });
-
-    if (context.isDebug()) {
-      DEBUG_LOGGER.info("After removed by filter Chunk meta data list is: ");
-      alignedChunkMetadataList.forEach(c -> DEBUG_LOGGER.info(c.toString()));
-    }
-
-    return new ArrayList<>(alignedChunkMetadataList);
   }
 }
diff --git a/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/metadata/DiskChunkMetadataLoader.java b/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/metadata/DiskChunkMetadataLoader.java
index 7b8856ec69..e45d976caa 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/metadata/DiskChunkMetadataLoader.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/metadata/DiskChunkMetadataLoader.java
@@ -21,6 +21,7 @@ package org.apache.iotdb.db.query.reader.chunk.metadata;
 import org.apache.iotdb.commons.path.PartialPath;
 import org.apache.iotdb.db.engine.modification.Modification;
 import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
+import org.apache.iotdb.db.mpp.metric.QueryMetricsManager;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.reader.chunk.DiskChunkLoader;
 import org.apache.iotdb.db.utils.QueryUtils;
@@ -35,6 +36,10 @@ import org.slf4j.LoggerFactory;
 
 import java.util.List;
 
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.CHUNK_METADATA_FILTER_NONALIGNED_DISK;
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.CHUNK_METADATA_MODIFICATION_NONALIGNED_DISK;
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.LOAD_CHUNK_METADATA_LIST_NONALIGNED_DISK;
+
 public class DiskChunkMetadataLoader implements IChunkMetadataLoader {
 
   private final TsFileResource resource;
@@ -44,6 +49,7 @@ public class DiskChunkMetadataLoader implements IChunkMetadataLoader {
   private final Filter filter;
 
   private static final Logger DEBUG_LOGGER = LoggerFactory.getLogger("QUERY_DEBUG");
+  private static final QueryMetricsManager QUERY_METRICS = QueryMetricsManager.getInstance();
 
   public DiskChunkMetadataLoader(
       TsFileResource resource, PartialPath seriesPath, QueryContext context, Filter filter) {
@@ -55,63 +61,73 @@ public class DiskChunkMetadataLoader implements IChunkMetadataLoader {
 
   @Override
   public List<IChunkMetadata> loadChunkMetadataList(ITimeSeriesMetadata timeSeriesMetadata) {
+    long t1 = System.nanoTime();
+    try {
+      List<IChunkMetadata> chunkMetadataList =
+          ((TimeseriesMetadata) timeSeriesMetadata).getCopiedChunkMetadataList();
+
+      long t2 = System.nanoTime();
+      List<Modification> pathModifications =
+          context.getPathModifications(resource.getModFile(), seriesPath);
+
+      if (context.isDebug()) {
+        DEBUG_LOGGER.info(
+            "Modifications size is {} for file Path: {} ",
+            pathModifications.size(),
+            resource.getTsFilePath());
+        pathModifications.forEach(c -> DEBUG_LOGGER.info(c.toString()));
+      }
 
-    List<IChunkMetadata> chunkMetadataList =
-        ((TimeseriesMetadata) timeSeriesMetadata).getCopiedChunkMetadataList();
-
-    List<Modification> pathModifications =
-        context.getPathModifications(resource.getModFile(), seriesPath);
-
-    if (context.isDebug()) {
-      DEBUG_LOGGER.info(
-          "Modifications size is {} for file Path: {} ",
-          pathModifications.size(),
-          resource.getTsFilePath());
-      pathModifications.forEach(c -> DEBUG_LOGGER.info(c.toString()));
-    }
+      if (!pathModifications.isEmpty()) {
+        QueryUtils.modifyChunkMetaData(chunkMetadataList, pathModifications);
+      }
 
-    if (!pathModifications.isEmpty()) {
-      QueryUtils.modifyChunkMetaData(chunkMetadataList, pathModifications);
-    }
+      if (context.isDebug()) {
+        DEBUG_LOGGER.info("After modification Chunk meta data list is: ");
+        chunkMetadataList.forEach(c -> DEBUG_LOGGER.info(c.toString()));
+      }
 
-    if (context.isDebug()) {
-      DEBUG_LOGGER.info("After modification Chunk meta data list is: ");
-      chunkMetadataList.forEach(c -> DEBUG_LOGGER.info(c.toString()));
-    }
+      QUERY_METRICS.recordSeriesScanCost(
+          CHUNK_METADATA_MODIFICATION_NONALIGNED_DISK, System.nanoTime() - t2);
+
+      // it is ok, even if it is not thread safe, because the cost of creating a DiskChunkLoader is
+      // very cheap.
+      chunkMetadataList.forEach(
+          chunkMetadata -> {
+            if (chunkMetadata.needSetChunkLoader()) {
+              chunkMetadata.setFilePath(resource.getTsFilePath());
+              chunkMetadata.setClosed(resource.isClosed());
+              chunkMetadata.setChunkLoader(new DiskChunkLoader(context.isDebug()));
+            }
+          });
+
+      // remove not satisfied ChunkMetaData
+      long t3 = System.nanoTime();
+      chunkMetadataList.removeIf(
+          chunkMetaData ->
+              (filter != null
+                      && !filter.satisfyStartEndTime(
+                          chunkMetaData.getStartTime(), chunkMetaData.getEndTime()))
+                  || chunkMetaData.getStartTime() > chunkMetaData.getEndTime());
+      QUERY_METRICS.recordSeriesScanCost(
+          CHUNK_METADATA_FILTER_NONALIGNED_DISK, System.nanoTime() - t3);
+
+      // For chunkMetadata from old TsFile, do not set version
+      for (IChunkMetadata metadata : chunkMetadataList) {
+        if (!metadata.isFromOldTsFile()) {
+          metadata.setVersion(resource.getVersion());
+        }
+      }
 
-    // it is ok, even if it is not thread safe, because the cost of creating a DiskChunkLoader is
-    // very cheap.
-    chunkMetadataList.forEach(
-        chunkMetadata -> {
-          if (chunkMetadata.needSetChunkLoader()) {
-            chunkMetadata.setFilePath(resource.getTsFilePath());
-            chunkMetadata.setClosed(resource.isClosed());
-            chunkMetadata.setChunkLoader(new DiskChunkLoader(context.isDebug()));
-          }
-        });
-
-    /*
-     * remove not satisfied ChunkMetaData
-     */
-    chunkMetadataList.removeIf(
-        chunkMetaData ->
-            (filter != null
-                    && !filter.satisfyStartEndTime(
-                        chunkMetaData.getStartTime(), chunkMetaData.getEndTime()))
-                || chunkMetaData.getStartTime() > chunkMetaData.getEndTime());
-
-    // For chunkMetadata from old TsFile, do not set version
-    for (IChunkMetadata metadata : chunkMetadataList) {
-      if (!metadata.isFromOldTsFile()) {
-        metadata.setVersion(resource.getVersion());
+      if (context.isDebug()) {
+        DEBUG_LOGGER.info("After removed by filter Chunk meta data list is: ");
+        chunkMetadataList.forEach(c -> DEBUG_LOGGER.info(c.toString()));
       }
-    }
 
-    if (context.isDebug()) {
-      DEBUG_LOGGER.info("After removed by filter Chunk meta data list is: ");
-      chunkMetadataList.forEach(c -> DEBUG_LOGGER.info(c.toString()));
+      return chunkMetadataList;
+    } finally {
+      QUERY_METRICS.recordSeriesScanCost(
+          LOAD_CHUNK_METADATA_LIST_NONALIGNED_DISK, System.nanoTime() - t1);
     }
-
-    return chunkMetadataList;
   }
 }
diff --git a/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/metadata/MemAlignedChunkMetadataLoader.java b/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/metadata/MemAlignedChunkMetadataLoader.java
index 6595b2f0c1..8b6dfda2d3 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/metadata/MemAlignedChunkMetadataLoader.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/metadata/MemAlignedChunkMetadataLoader.java
@@ -21,6 +21,7 @@ package org.apache.iotdb.db.query.reader.chunk.metadata;
 import org.apache.iotdb.commons.path.PartialPath;
 import org.apache.iotdb.db.engine.querycontext.ReadOnlyMemChunk;
 import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
+import org.apache.iotdb.db.mpp.metric.QueryMetricsManager;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.reader.chunk.DiskAlignedChunkLoader;
 import org.apache.iotdb.tsfile.file.metadata.IChunkMetadata;
@@ -30,6 +31,9 @@ import org.apache.iotdb.tsfile.read.filter.basic.Filter;
 
 import java.util.List;
 
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.CHUNK_METADATA_FILTER_ALIGNED_MEM;
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.LOAD_CHUNK_METADATA_LIST_ALIGNED_MEM;
+
 public class MemAlignedChunkMetadataLoader implements IChunkMetadataLoader {
 
   private final TsFileResource resource;
@@ -37,6 +41,8 @@ public class MemAlignedChunkMetadataLoader implements IChunkMetadataLoader {
   private final QueryContext context;
   private final Filter timeFilter;
 
+  private static final QueryMetricsManager QUERY_METRICS = QueryMetricsManager.getInstance();
+
   public MemAlignedChunkMetadataLoader(
       TsFileResource resource, PartialPath seriesPath, QueryContext context, Filter timeFilter) {
     this.resource = resource;
@@ -47,43 +53,50 @@ public class MemAlignedChunkMetadataLoader implements IChunkMetadataLoader {
 
   @Override
   public List<IChunkMetadata> loadChunkMetadataList(ITimeSeriesMetadata timeSeriesMetadata) {
+    long t1 = System.nanoTime();
+    try {
+      // There is no need to apply modifications to these, because we already do that while
+      // generating it in TSP
+      List<IChunkMetadata> chunkMetadataList = resource.getChunkMetadataList(seriesPath);
 
-    // There is no need to apply modifications to these, because we already do that while generating
-    // it in TSP
-    List<IChunkMetadata> chunkMetadataList = resource.getChunkMetadataList(seriesPath);
+      chunkMetadataList.forEach(
+          chunkMetadata -> {
+            if (chunkMetadata.needSetChunkLoader()) {
+              chunkMetadata.setFilePath(resource.getTsFilePath());
+              chunkMetadata.setClosed(resource.isClosed());
+              chunkMetadata.setChunkLoader(new DiskAlignedChunkLoader(context.isDebug()));
+            }
+          });
 
-    chunkMetadataList.forEach(
-        chunkMetadata -> {
-          if (chunkMetadata.needSetChunkLoader()) {
-            chunkMetadata.setFilePath(resource.getTsFilePath());
-            chunkMetadata.setClosed(resource.isClosed());
-            chunkMetadata.setChunkLoader(new DiskAlignedChunkLoader(context.isDebug()));
+      // There is no need to set IChunkLoader for it, because the MemChunkLoader has already been
+      // set
+      // while creating ReadOnlyMemChunk
+      List<ReadOnlyMemChunk> memChunks = resource.getReadOnlyMemChunk(seriesPath);
+      if (memChunks != null) {
+        for (ReadOnlyMemChunk readOnlyMemChunk : memChunks) {
+          if (!memChunks.isEmpty()) {
+            chunkMetadataList.add(readOnlyMemChunk.getChunkMetaData());
           }
-        });
-
-    // There is no need to set IChunkLoader for it, because the MemChunkLoader has already been set
-    // while creating ReadOnlyMemChunk
-    List<ReadOnlyMemChunk> memChunks = resource.getReadOnlyMemChunk(seriesPath);
-    if (memChunks != null) {
-      for (ReadOnlyMemChunk readOnlyMemChunk : memChunks) {
-        if (!memChunks.isEmpty()) {
-          chunkMetadataList.add(readOnlyMemChunk.getChunkMetaData());
         }
       }
-    }
-    /*
-     * remove not satisfied ChunkMetaData
-     */
-    chunkMetadataList.removeIf(
-        chunkMetaData ->
-            (timeFilter != null
-                    && !timeFilter.satisfyStartEndTime(
-                        chunkMetaData.getStartTime(), chunkMetaData.getEndTime()))
-                || chunkMetaData.getStartTime() > chunkMetaData.getEndTime());
 
-    for (IChunkMetadata metadata : chunkMetadataList) {
-      metadata.setVersion(resource.getVersion());
+      // remove not satisfied ChunkMetaData
+      long t2 = System.nanoTime();
+      chunkMetadataList.removeIf(
+          chunkMetaData ->
+              (timeFilter != null
+                      && !timeFilter.satisfyStartEndTime(
+                          chunkMetaData.getStartTime(), chunkMetaData.getEndTime()))
+                  || chunkMetaData.getStartTime() > chunkMetaData.getEndTime());
+      QUERY_METRICS.recordSeriesScanCost(CHUNK_METADATA_FILTER_ALIGNED_MEM, System.nanoTime() - t2);
+
+      for (IChunkMetadata metadata : chunkMetadataList) {
+        metadata.setVersion(resource.getVersion());
+      }
+      return chunkMetadataList;
+    } finally {
+      QUERY_METRICS.recordSeriesScanCost(
+          LOAD_CHUNK_METADATA_LIST_ALIGNED_MEM, System.nanoTime() - t1);
     }
-    return chunkMetadataList;
   }
 }
diff --git a/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/metadata/MemChunkMetadataLoader.java b/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/metadata/MemChunkMetadataLoader.java
index 4e88609901..7e60df653d 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/metadata/MemChunkMetadataLoader.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/metadata/MemChunkMetadataLoader.java
@@ -21,6 +21,7 @@ package org.apache.iotdb.db.query.reader.chunk.metadata;
 import org.apache.iotdb.commons.path.PartialPath;
 import org.apache.iotdb.db.engine.querycontext.ReadOnlyMemChunk;
 import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
+import org.apache.iotdb.db.mpp.metric.QueryMetricsManager;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.reader.chunk.DiskChunkLoader;
 import org.apache.iotdb.tsfile.file.metadata.IChunkMetadata;
@@ -30,6 +31,9 @@ import org.apache.iotdb.tsfile.read.filter.basic.Filter;
 
 import java.util.List;
 
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.CHUNK_METADATA_FILTER_NONALIGNED_MEM;
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.LOAD_CHUNK_METADATA_LIST_NONALIGNED_MEM;
+
 public class MemChunkMetadataLoader implements IChunkMetadataLoader {
 
   private final TsFileResource resource;
@@ -37,6 +41,8 @@ public class MemChunkMetadataLoader implements IChunkMetadataLoader {
   private final QueryContext context;
   private final Filter timeFilter;
 
+  private static final QueryMetricsManager QUERY_METRICS = QueryMetricsManager.getInstance();
+
   public MemChunkMetadataLoader(
       TsFileResource resource, PartialPath seriesPath, QueryContext context, Filter timeFilter) {
     this.resource = resource;
@@ -47,42 +53,50 @@ public class MemChunkMetadataLoader implements IChunkMetadataLoader {
 
   @Override
   public List<IChunkMetadata> loadChunkMetadataList(ITimeSeriesMetadata timeSeriesMetadata) {
-    // There is no need to apply modifications to these, because we already do that while generating
-    // it in TSP
-    List<IChunkMetadata> chunkMetadataList = resource.getChunkMetadataList(seriesPath);
+    long t1 = System.nanoTime();
+    try {
+      // There is no need to apply modifications to these, because we already do that while
+      // generating it in TSP
+      List<IChunkMetadata> chunkMetadataList = resource.getChunkMetadataList(seriesPath);
 
-    // it is ok, even if it is not thread safe, because the cost of creating a DiskChunkLoader is
-    // very cheap.
-    chunkMetadataList.forEach(
-        chunkMetadata -> {
-          if (chunkMetadata.needSetChunkLoader()) {
-            chunkMetadata.setFilePath(resource.getTsFilePath());
-            chunkMetadata.setClosed(resource.isClosed());
-            chunkMetadata.setChunkLoader(new DiskChunkLoader(context.isDebug()));
-          }
-        });
+      // it is ok, even if it is not thread safe, because the cost of creating a DiskChunkLoader is
+      // very cheap.
+      chunkMetadataList.forEach(
+          chunkMetadata -> {
+            if (chunkMetadata.needSetChunkLoader()) {
+              chunkMetadata.setFilePath(resource.getTsFilePath());
+              chunkMetadata.setClosed(resource.isClosed());
+              chunkMetadata.setChunkLoader(new DiskChunkLoader(context.isDebug()));
+            }
+          });
 
-    List<ReadOnlyMemChunk> memChunks = resource.getReadOnlyMemChunk(seriesPath);
-    if (memChunks != null) {
-      for (ReadOnlyMemChunk readOnlyMemChunk : memChunks) {
-        if (!memChunks.isEmpty()) {
-          chunkMetadataList.add(readOnlyMemChunk.getChunkMetaData());
+      List<ReadOnlyMemChunk> memChunks = resource.getReadOnlyMemChunk(seriesPath);
+      if (memChunks != null) {
+        for (ReadOnlyMemChunk readOnlyMemChunk : memChunks) {
+          if (!memChunks.isEmpty()) {
+            chunkMetadataList.add(readOnlyMemChunk.getChunkMetaData());
+          }
         }
       }
-    }
-    /*
-     * remove not satisfied ChunkMetaData
-     */
-    chunkMetadataList.removeIf(
-        chunkMetaData ->
-            (timeFilter != null
-                    && !timeFilter.satisfyStartEndTime(
-                        chunkMetaData.getStartTime(), chunkMetaData.getEndTime()))
-                || chunkMetaData.getStartTime() > chunkMetaData.getEndTime());
 
-    for (IChunkMetadata metadata : chunkMetadataList) {
-      metadata.setVersion(resource.getVersion());
+      // remove not satisfied ChunkMetaData
+      long t2 = System.nanoTime();
+      chunkMetadataList.removeIf(
+          chunkMetaData ->
+              (timeFilter != null
+                      && !timeFilter.satisfyStartEndTime(
+                          chunkMetaData.getStartTime(), chunkMetaData.getEndTime()))
+                  || chunkMetaData.getStartTime() > chunkMetaData.getEndTime());
+      QUERY_METRICS.recordSeriesScanCost(
+          CHUNK_METADATA_FILTER_NONALIGNED_MEM, System.nanoTime() - t2);
+
+      for (IChunkMetadata metadata : chunkMetadataList) {
+        metadata.setVersion(resource.getVersion());
+      }
+      return chunkMetadataList;
+    } finally {
+      QUERY_METRICS.recordSeriesScanCost(
+          LOAD_CHUNK_METADATA_LIST_NONALIGNED_MEM, System.nanoTime() - t1);
     }
-    return chunkMetadataList;
   }
 }
diff --git a/server/src/main/java/org/apache/iotdb/db/service/metrics/DataNodeMetricsHelper.java b/server/src/main/java/org/apache/iotdb/db/service/metrics/DataNodeMetricsHelper.java
index 920080ec39..22096f8986 100644
--- a/server/src/main/java/org/apache/iotdb/db/service/metrics/DataNodeMetricsHelper.java
+++ b/server/src/main/java/org/apache/iotdb/db/service/metrics/DataNodeMetricsHelper.java
@@ -21,6 +21,7 @@ package org.apache.iotdb.db.service.metrics;
 
 import org.apache.iotdb.commons.service.metric.MetricService;
 import org.apache.iotdb.db.mpp.metric.QueryPlanCostMetricSet;
+import org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet;
 import org.apache.iotdb.metrics.metricsets.jvm.JvmMetrics;
 import org.apache.iotdb.metrics.metricsets.logback.LogbackMetrics;
 
@@ -35,5 +36,6 @@ public class DataNodeMetricsHelper {
 
     // bind query related metrics
     MetricService.getInstance().addMetricSet(new QueryPlanCostMetricSet());
+    MetricService.getInstance().addMetricSet(new SeriesScanCostMetricSet());
   }
 }
diff --git a/server/src/main/java/org/apache/iotdb/db/utils/FileLoaderUtils.java b/server/src/main/java/org/apache/iotdb/db/utils/FileLoaderUtils.java
index bfd6a0bf80..093fe15c5d 100644
--- a/server/src/main/java/org/apache/iotdb/db/utils/FileLoaderUtils.java
+++ b/server/src/main/java/org/apache/iotdb/db/utils/FileLoaderUtils.java
@@ -25,6 +25,7 @@ import org.apache.iotdb.db.engine.cache.TimeSeriesMetadataCache.TimeSeriesMetada
 import org.apache.iotdb.db.engine.modification.Modification;
 import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
 import org.apache.iotdb.db.engine.storagegroup.TsFileResourceStatus;
+import org.apache.iotdb.db.mpp.metric.QueryMetricsManager;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.reader.chunk.metadata.DiskAlignedChunkMetadataLoader;
 import org.apache.iotdb.db.query.reader.chunk.metadata.DiskChunkMetadataLoader;
@@ -51,8 +52,17 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
 
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.LOAD_TIMESERIES_METADATA_ALIGNED_DISK;
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.LOAD_TIMESERIES_METADATA_ALIGNED_MEM;
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.LOAD_TIMESERIES_METADATA_NONALIGNED_DISK;
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.LOAD_TIMESERIES_METADATA_NONALIGNED_MEM;
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.TIMESERIES_METADATA_MODIFICATION_ALIGNED;
+import static org.apache.iotdb.db.mpp.metric.SeriesScanCostMetricSet.TIMESERIES_METADATA_MODIFICATION_NONALIGNED;
+
 public class FileLoaderUtils {
 
+  private static final QueryMetricsManager QUERY_METRICS = QueryMetricsManager.getInstance();
+
   private FileLoaderUtils() {}
 
   public static void loadOrGenerateResource(TsFileResource tsFileResource) throws IOException {
@@ -124,51 +134,68 @@ public class FileLoaderUtils {
       Filter filter,
       Set<String> allSensors)
       throws IOException {
+    long t1 = System.nanoTime();
+    boolean loadFromMem = false;
+    try {
+      // common path
+      TimeseriesMetadata timeSeriesMetadata;
+      // If the tsfile is closed, we need to load from tsfile
+      if (resource.isClosed()) {
+        // when resource.getTimeIndexType() == 1, TsFileResource.timeIndexType is deviceTimeIndex
+        // we should not ignore the non-exist of device in TsFileMetadata
+        timeSeriesMetadata =
+            TimeSeriesMetadataCache.getInstance()
+                .get(
+                    new TimeSeriesMetadataCache.TimeSeriesMetadataCacheKey(
+                        resource.getTsFilePath(),
+                        seriesPath.getDevice(),
+                        seriesPath.getMeasurement()),
+                    allSensors,
+                    resource.getTimeIndexType() != 1,
+                    context.isDebug());
+        if (timeSeriesMetadata != null) {
+          timeSeriesMetadata.setChunkMetadataLoader(
+              new DiskChunkMetadataLoader(resource, seriesPath, context, filter));
+        }
+      } else { // if the tsfile is unclosed, we just get it directly from TsFileResource
+        loadFromMem = true;
 
-    // common path
-    TimeseriesMetadata timeSeriesMetadata;
-    // If the tsfile is closed, we need to load from tsfile
-    if (resource.isClosed()) {
-      // when resource.getTimeIndexType() == 1, TsFileResource.timeIndexType is deviceTimeIndex
-      // we should not ignore the non-exist of device in TsFileMetadata
-      timeSeriesMetadata =
-          TimeSeriesMetadataCache.getInstance()
-              .get(
-                  new TimeSeriesMetadataCache.TimeSeriesMetadataCacheKey(
-                      resource.getTsFilePath(),
-                      seriesPath.getDevice(),
-                      seriesPath.getMeasurement()),
-                  allSensors,
-                  resource.getTimeIndexType() != 1,
-                  context.isDebug());
-      if (timeSeriesMetadata != null) {
-        timeSeriesMetadata.setChunkMetadataLoader(
-            new DiskChunkMetadataLoader(resource, seriesPath, context, filter));
-      }
-    } else { // if the tsfile is unclosed, we just get it directly from TsFileResource
-      timeSeriesMetadata = (TimeseriesMetadata) resource.getTimeSeriesMetadata(seriesPath);
-      if (timeSeriesMetadata != null) {
-        timeSeriesMetadata.setChunkMetadataLoader(
-            new MemChunkMetadataLoader(resource, seriesPath, context, filter));
+        timeSeriesMetadata = (TimeseriesMetadata) resource.getTimeSeriesMetadata(seriesPath);
+        if (timeSeriesMetadata != null) {
+          timeSeriesMetadata.setChunkMetadataLoader(
+              new MemChunkMetadataLoader(resource, seriesPath, context, filter));
+        }
       }
-    }
 
-    if (timeSeriesMetadata != null) {
-      List<Modification> pathModifications =
-          context.getPathModifications(resource.getModFile(), seriesPath);
-      timeSeriesMetadata.setModified(!pathModifications.isEmpty());
-      if (timeSeriesMetadata.getStatistics().getStartTime()
-          > timeSeriesMetadata.getStatistics().getEndTime()) {
-        return null;
-      }
-      if (filter != null
-          && !filter.satisfyStartEndTime(
-              timeSeriesMetadata.getStatistics().getStartTime(),
-              timeSeriesMetadata.getStatistics().getEndTime())) {
-        return null;
+      if (timeSeriesMetadata != null) {
+        long t2 = System.nanoTime();
+        try {
+          List<Modification> pathModifications =
+              context.getPathModifications(resource.getModFile(), seriesPath);
+          timeSeriesMetadata.setModified(!pathModifications.isEmpty());
+          if (timeSeriesMetadata.getStatistics().getStartTime()
+              > timeSeriesMetadata.getStatistics().getEndTime()) {
+            return null;
+          }
+          if (filter != null
+              && !filter.satisfyStartEndTime(
+                  timeSeriesMetadata.getStatistics().getStartTime(),
+                  timeSeriesMetadata.getStatistics().getEndTime())) {
+            return null;
+          }
+        } finally {
+          QUERY_METRICS.recordSeriesScanCost(
+              TIMESERIES_METADATA_MODIFICATION_NONALIGNED, System.nanoTime() - t2);
+        }
       }
+      return timeSeriesMetadata;
+    } finally {
+      QUERY_METRICS.recordSeriesScanCost(
+          loadFromMem
+              ? LOAD_TIMESERIES_METADATA_NONALIGNED_MEM
+              : LOAD_TIMESERIES_METADATA_NONALIGNED_DISK,
+          System.nanoTime() - t1);
     }
-    return timeSeriesMetadata;
   }
 
   /**
@@ -181,87 +208,105 @@ public class FileLoaderUtils {
   public static AlignedTimeSeriesMetadata loadTimeSeriesMetadata(
       TsFileResource resource, AlignedPath vectorPath, QueryContext context, Filter filter)
       throws IOException {
-    AlignedTimeSeriesMetadata alignedTimeSeriesMetadata = null;
-    // If the tsfile is closed, we need to load from tsfile
-    if (resource.isClosed()) {
-      // load all the TimeseriesMetadata of vector, the first one is for time column and the
-      // remaining is for sub sensors
-      // the order of timeSeriesMetadata list is same as subSensorList's order
-      TimeSeriesMetadataCache cache = TimeSeriesMetadataCache.getInstance();
-      List<String> valueMeasurementList = vectorPath.getMeasurementList();
-      Set<String> allSensors = new HashSet<>(valueMeasurementList);
-      allSensors.add("");
-      boolean isDebug = context.isDebug();
-      String filePath = resource.getTsFilePath();
-      String deviceId = vectorPath.getDevice();
+    long t1 = System.nanoTime();
+    boolean loadFromMem = false;
+    try {
+      AlignedTimeSeriesMetadata alignedTimeSeriesMetadata = null;
+      // If the tsfile is closed, we need to load from tsfile
+      if (resource.isClosed()) {
+        // load all the TimeseriesMetadata of vector, the first one is for time column and the
+        // remaining is for sub sensors
+        // the order of timeSeriesMetadata list is same as subSensorList's order
+        TimeSeriesMetadataCache cache = TimeSeriesMetadataCache.getInstance();
+        List<String> valueMeasurementList = vectorPath.getMeasurementList();
+        Set<String> allSensors = new HashSet<>(valueMeasurementList);
+        allSensors.add("");
+        boolean isDebug = context.isDebug();
+        String filePath = resource.getTsFilePath();
+        String deviceId = vectorPath.getDevice();
 
-      // when resource.getTimeIndexType() == 1, TsFileResource.timeIndexType is deviceTimeIndex
-      // we should not ignore the non-exist of device in TsFileMetadata
-      TimeseriesMetadata timeColumn =
-          cache.get(
-              new TimeSeriesMetadataCacheKey(filePath, deviceId, ""),
-              allSensors,
-              resource.getTimeIndexType() != 1,
-              isDebug);
-      if (timeColumn != null) {
-        List<TimeseriesMetadata> valueTimeSeriesMetadataList =
-            new ArrayList<>(valueMeasurementList.size());
-        // if all the queried aligned sensors does not exist, we will return null
-        boolean exist = false;
-        for (String valueMeasurement : valueMeasurementList) {
-          TimeseriesMetadata valueColumn =
-              cache.get(
-                  new TimeSeriesMetadataCacheKey(filePath, deviceId, valueMeasurement),
-                  allSensors,
-                  resource.getTimeIndexType() != 1,
-                  isDebug);
-          exist = (exist || (valueColumn != null));
-          valueTimeSeriesMetadataList.add(valueColumn);
+        // when resource.getTimeIndexType() == 1, TsFileResource.timeIndexType is deviceTimeIndex
+        // we should not ignore the non-exist of device in TsFileMetadata
+        TimeseriesMetadata timeColumn =
+            cache.get(
+                new TimeSeriesMetadataCacheKey(filePath, deviceId, ""),
+                allSensors,
+                resource.getTimeIndexType() != 1,
+                isDebug);
+        if (timeColumn != null) {
+          List<TimeseriesMetadata> valueTimeSeriesMetadataList =
+              new ArrayList<>(valueMeasurementList.size());
+          // if all the queried aligned sensors does not exist, we will return null
+          boolean exist = false;
+          for (String valueMeasurement : valueMeasurementList) {
+            TimeseriesMetadata valueColumn =
+                cache.get(
+                    new TimeSeriesMetadataCacheKey(filePath, deviceId, valueMeasurement),
+                    allSensors,
+                    resource.getTimeIndexType() != 1,
+                    isDebug);
+            exist = (exist || (valueColumn != null));
+            valueTimeSeriesMetadataList.add(valueColumn);
+          }
+          if (exist) {
+            alignedTimeSeriesMetadata =
+                new AlignedTimeSeriesMetadata(timeColumn, valueTimeSeriesMetadataList);
+            alignedTimeSeriesMetadata.setChunkMetadataLoader(
+                new DiskAlignedChunkMetadataLoader(resource, vectorPath, context, filter));
+          }
         }
-        if (exist) {
-          alignedTimeSeriesMetadata =
-              new AlignedTimeSeriesMetadata(timeColumn, valueTimeSeriesMetadataList);
+      } else { // if the tsfile is unclosed, we just get it directly from TsFileResource
+        loadFromMem = true;
+
+        alignedTimeSeriesMetadata =
+            (AlignedTimeSeriesMetadata) resource.getTimeSeriesMetadata(vectorPath);
+        if (alignedTimeSeriesMetadata != null) {
           alignedTimeSeriesMetadata.setChunkMetadataLoader(
-              new DiskAlignedChunkMetadataLoader(resource, vectorPath, context, filter));
+              new MemAlignedChunkMetadataLoader(resource, vectorPath, context, filter));
         }
       }
-    } else { // if the tsfile is unclosed, we just get it directly from TsFileResource
-      alignedTimeSeriesMetadata =
-          (AlignedTimeSeriesMetadata) resource.getTimeSeriesMetadata(vectorPath);
-      if (alignedTimeSeriesMetadata != null) {
-        alignedTimeSeriesMetadata.setChunkMetadataLoader(
-            new MemAlignedChunkMetadataLoader(resource, vectorPath, context, filter));
-      }
-    }
 
-    if (alignedTimeSeriesMetadata != null) {
-      if (alignedTimeSeriesMetadata.getTimeseriesMetadata().getStatistics().getStartTime()
-          > alignedTimeSeriesMetadata.getTimeseriesMetadata().getStatistics().getEndTime()) {
-        return null;
-      }
-      if (filter != null
-          && !filter.satisfyStartEndTime(
-              alignedTimeSeriesMetadata.getTimeseriesMetadata().getStatistics().getStartTime(),
-              alignedTimeSeriesMetadata.getTimeseriesMetadata().getStatistics().getEndTime())) {
-        return null;
-      }
+      if (alignedTimeSeriesMetadata != null) {
+        long t2 = System.nanoTime();
+        try {
+          if (alignedTimeSeriesMetadata.getTimeseriesMetadata().getStatistics().getStartTime()
+              > alignedTimeSeriesMetadata.getTimeseriesMetadata().getStatistics().getEndTime()) {
+            return null;
+          }
+          if (filter != null
+              && !filter.satisfyStartEndTime(
+                  alignedTimeSeriesMetadata.getTimeseriesMetadata().getStatistics().getStartTime(),
+                  alignedTimeSeriesMetadata.getTimeseriesMetadata().getStatistics().getEndTime())) {
+            return null;
+          }
 
-      // set modifications to each aligned path
-      List<TimeseriesMetadata> valueTimeSeriesMetadataList =
-          alignedTimeSeriesMetadata.getValueTimeseriesMetadataList();
-      boolean modified = false;
-      for (int i = 0; i < valueTimeSeriesMetadataList.size(); i++) {
-        if (valueTimeSeriesMetadataList.get(i) != null) {
-          List<Modification> pathModifications =
-              context.getPathModifications(
-                  resource.getModFile(), vectorPath.getPathWithMeasurement(i));
-          valueTimeSeriesMetadataList.get(i).setModified(!pathModifications.isEmpty());
-          modified = (modified || !pathModifications.isEmpty());
+          // set modifications to each aligned path
+          List<TimeseriesMetadata> valueTimeSeriesMetadataList =
+              alignedTimeSeriesMetadata.getValueTimeseriesMetadataList();
+          boolean modified = false;
+          for (int i = 0; i < valueTimeSeriesMetadataList.size(); i++) {
+            if (valueTimeSeriesMetadataList.get(i) != null) {
+              List<Modification> pathModifications =
+                  context.getPathModifications(
+                      resource.getModFile(), vectorPath.getPathWithMeasurement(i));
+              valueTimeSeriesMetadataList.get(i).setModified(!pathModifications.isEmpty());
+              modified = (modified || !pathModifications.isEmpty());
+            }
+          }
+          alignedTimeSeriesMetadata.getTimeseriesMetadata().setModified(modified);
+        } finally {
+          QUERY_METRICS.recordSeriesScanCost(
+              TIMESERIES_METADATA_MODIFICATION_ALIGNED, System.nanoTime() - t2);
         }
       }
-      alignedTimeSeriesMetadata.getTimeseriesMetadata().setModified(modified);
+      return alignedTimeSeriesMetadata;
+    } finally {
+      QUERY_METRICS.recordSeriesScanCost(
+          loadFromMem
+              ? LOAD_TIMESERIES_METADATA_ALIGNED_MEM
+              : LOAD_TIMESERIES_METADATA_ALIGNED_DISK,
+          System.nanoTime() - t1);
     }
-    return alignedTimeSeriesMetadata;
   }
 
   /**
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/chunk/AlignedChunkReader.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/chunk/AlignedChunkReader.java
index d1173ab6b3..b65b8a12a0 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/chunk/AlignedChunkReader.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/chunk/AlignedChunkReader.java
@@ -73,21 +73,25 @@ public class AlignedChunkReader implements IChunkReader {
    */
   public AlignedChunkReader(Chunk timeChunk, List<Chunk> valueChunkList, Filter filter)
       throws IOException {
-    this.filter = filter;
-    this.timeChunkDataBuffer = timeChunk.getData();
-    this.valueDeleteIntervalList = new ArrayList<>();
-    this.timeChunkHeader = timeChunk.getHeader();
-    this.unCompressor = IUnCompressor.getUnCompressor(timeChunkHeader.getCompressionType());
-    this.currentTimestamp = Long.MIN_VALUE;
-    List<Statistics> valueChunkStatisticsList = new ArrayList<>();
-    valueChunkList.forEach(
-        chunk -> {
-          valueChunkHeaderList.add(chunk == null ? null : chunk.getHeader());
-          valueChunkDataBufferList.add(chunk == null ? null : chunk.getData());
-          valueChunkStatisticsList.add(chunk == null ? null : chunk.getChunkStatistic());
-          valueDeleteIntervalList.add(chunk == null ? null : chunk.getDeleteIntervalList());
-        });
-    initAllPageReaders(timeChunk.getChunkStatistic(), valueChunkStatisticsList);
+    try {
+      this.filter = filter;
+      this.timeChunkDataBuffer = timeChunk.getData();
+      this.valueDeleteIntervalList = new ArrayList<>();
+      this.timeChunkHeader = timeChunk.getHeader();
+      this.unCompressor = IUnCompressor.getUnCompressor(timeChunkHeader.getCompressionType());
+      this.currentTimestamp = Long.MIN_VALUE;
+      List<Statistics> valueChunkStatisticsList = new ArrayList<>();
+      valueChunkList.forEach(
+          chunk -> {
+            valueChunkHeaderList.add(chunk == null ? null : chunk.getHeader());
+            valueChunkDataBufferList.add(chunk == null ? null : chunk.getData());
+            valueChunkStatisticsList.add(chunk == null ? null : chunk.getChunkStatistic());
+            valueDeleteIntervalList.add(chunk == null ? null : chunk.getDeleteIntervalList());
+          });
+      initAllPageReaders(timeChunk.getChunkStatistic(), valueChunkStatisticsList);
+    } finally {
+
+    }
   }
 
   /**


[iotdb] 01/05: add metrics: query_plan_cost

Posted by hu...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

hui pushed a commit to branch lmh/addQueryMetrics
in repository https://gitbox.apache.org/repos/asf/iotdb.git

commit 08d478707b464b43627a889be9f7bf6bc19e9f72
Author: Minghui Liu <li...@foxmail.com>
AuthorDate: Thu Dec 15 10:43:47 2022 +0800

    add metrics: query_plan_cost
---
 .../apache/iotdb/metrics/config/MetricConfig.java  |   4 +-
 .../iotdb/commons/service/metric/enums/Metric.java |   3 +-
 .../iotdb/commons/service/metric/enums/Tag.java    |   3 +-
 .../iotdb/db/engine/cache/ChunkCacheMetrics.java   |   3 +-
 .../cache/TimeSeriesMetadataCacheMetrics.java      |   2 +-
 .../iotdb/db/mpp/metric/QueryMetricsManager.java   |  49 +++++++++
 .../metric/QueryPlanCostMetrics.java}              |  58 +++++------
 .../iotdb/db/mpp/plan/analyze/AnalyzeVisitor.java  | 114 +++++++++------------
 .../apache/iotdb/db/mpp/plan/analyze/Analyzer.java |  11 +-
 .../db/mpp/plan/execution/QueryExecution.java      |   8 ++
 .../db/mpp/plan/parser/StatementGenerator.java     |  88 ++++++++--------
 .../iotdb/db/mpp/plan/planner/LogicalPlanner.java  |   9 +-
 .../db/service/metrics/DataNodeMetricsHelper.java  |   4 +
 13 files changed, 214 insertions(+), 142 deletions(-)

diff --git a/metrics/interface/src/main/java/org/apache/iotdb/metrics/config/MetricConfig.java b/metrics/interface/src/main/java/org/apache/iotdb/metrics/config/MetricConfig.java
index a2558f8b9b..b17410e12d 100644
--- a/metrics/interface/src/main/java/org/apache/iotdb/metrics/config/MetricConfig.java
+++ b/metrics/interface/src/main/java/org/apache/iotdb/metrics/config/MetricConfig.java
@@ -34,10 +34,10 @@ public class MetricConfig {
   private MetricFrameType metricFrameType = MetricFrameType.MICROMETER;
 
   /** The list of reporters provide metrics for external tool */
-  private List<ReporterType> metricReporterList = Collections.emptyList();
+  private List<ReporterType> metricReporterList = Collections.singletonList(ReporterType.JMX);
 
   /** The level of metric service */
-  private MetricLevel metricLevel = MetricLevel.CORE;
+  private MetricLevel metricLevel = MetricLevel.IMPORTANT;
 
   /** The period of async collection of some metrics in second */
   private Integer asyncCollectPeriodInSecond = 5;
diff --git a/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java b/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java
index fc534ef595..cc9b017328 100644
--- a/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java
+++ b/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java
@@ -61,7 +61,8 @@ public enum Metric {
   THRIFT_CONNECTIONS,
   THRIFT_ACTIVE_THREADS,
   IOT_CONSENSUS,
-  STAGE;
+  STAGE,
+  QUERY_PLAN_COST;
 
   @Override
   public String toString() {
diff --git a/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Tag.java b/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Tag.java
index 1b02b0161c..65280a22a2 100644
--- a/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Tag.java
+++ b/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Tag.java
@@ -23,7 +23,8 @@ public enum Tag {
   TYPE,
   NAME,
   REGION,
-  STATUS;
+  STATUS,
+  STAGE;
 
   @Override
   public String toString() {
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCacheMetrics.java b/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCacheMetrics.java
index 58903a6191..5b092a3930 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCacheMetrics.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCacheMetrics.java
@@ -29,7 +29,8 @@ import org.apache.iotdb.metrics.utils.MetricType;
 import java.util.Objects;
 
 public class ChunkCacheMetrics implements IMetricSet {
-  private ChunkCache chunkCache;
+
+  private final ChunkCache chunkCache;
 
   public ChunkCacheMetrics(ChunkCache chunkCache) {
     this.chunkCache = chunkCache;
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCacheMetrics.java b/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCacheMetrics.java
index 84684f624c..d0a5e66008 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCacheMetrics.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCacheMetrics.java
@@ -30,7 +30,7 @@ import java.util.Objects;
 
 public class TimeSeriesMetadataCacheMetrics implements IMetricSet {
 
-  private TimeSeriesMetadataCache timeSeriesMetadataCache;
+  private final TimeSeriesMetadataCache timeSeriesMetadataCache;
 
   public TimeSeriesMetadataCacheMetrics(TimeSeriesMetadataCache timeSeriesMetadataCache) {
     this.timeSeriesMetadataCache = timeSeriesMetadataCache;
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/metric/QueryMetricsManager.java b/server/src/main/java/org/apache/iotdb/db/mpp/metric/QueryMetricsManager.java
new file mode 100644
index 0000000000..8a4e878733
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/metric/QueryMetricsManager.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.mpp.metric;
+
+import org.apache.iotdb.commons.service.metric.MetricService;
+import org.apache.iotdb.commons.service.metric.enums.Metric;
+import org.apache.iotdb.commons.service.metric.enums.Tag;
+import org.apache.iotdb.metrics.type.Timer;
+import org.apache.iotdb.metrics.utils.MetricLevel;
+
+public class QueryMetricsManager {
+
+  private final MetricService metricService = MetricService.getInstance();
+
+  public void addPlanCost(String stage, long costTimeInNanos) {
+    Timer timer =
+        metricService.getOrCreateTimer(
+            Metric.QUERY_PLAN_COST.toString(), MetricLevel.IMPORTANT, Tag.STAGE.toString(), stage);
+    timer.updateNanos(costTimeInNanos);
+  }
+
+  public static QueryMetricsManager getInstance() {
+    return QueryMetricsManager.QueryMetricsManagerHolder.INSTANCE;
+  }
+
+  private static class QueryMetricsManagerHolder {
+
+    private static final QueryMetricsManager INSTANCE = new QueryMetricsManager();
+
+    private QueryMetricsManagerHolder() {}
+  }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCacheMetrics.java b/server/src/main/java/org/apache/iotdb/db/mpp/metric/QueryPlanCostMetrics.java
similarity index 54%
copy from server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCacheMetrics.java
copy to server/src/main/java/org/apache/iotdb/db/mpp/metric/QueryPlanCostMetrics.java
index 58903a6191..122ba3ea23 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCacheMetrics.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/metric/QueryPlanCostMetrics.java
@@ -17,7 +17,7 @@
  * under the License.
  */
 
-package org.apache.iotdb.db.engine.cache;
+package org.apache.iotdb.db.mpp.metric;
 
 import org.apache.iotdb.commons.service.metric.enums.Metric;
 import org.apache.iotdb.commons.service.metric.enums.Tag;
@@ -26,42 +26,42 @@ import org.apache.iotdb.metrics.metricsets.IMetricSet;
 import org.apache.iotdb.metrics.utils.MetricLevel;
 import org.apache.iotdb.metrics.utils.MetricType;
 
-import java.util.Objects;
+import java.util.Arrays;
+import java.util.List;
 
-public class ChunkCacheMetrics implements IMetricSet {
-  private ChunkCache chunkCache;
+public class QueryPlanCostMetrics implements IMetricSet {
 
-  public ChunkCacheMetrics(ChunkCache chunkCache) {
-    this.chunkCache = chunkCache;
-  }
+  public static final String SQL_PARSER = "sql_parser";
+  public static final String ANALYZER = "analyzer";
+  public static final String LOGICAL_PLANNER = "logical_planner";
+  public static final String DISTRIBUTION_PLANNER = "distribution_planner";
 
-  @Override
-  public void bindTo(AbstractMetricService metricService) {
-    metricService.createAutoGauge(
-        Metric.CACHE_HIT.toString(),
-        MetricLevel.IMPORTANT,
-        chunkCache,
-        o -> (long) o.getHitRate(),
-        Tag.NAME.toString(),
-        "chunk");
-  }
+  public static final String PARTITION_FETCHER = "partition_fetcher";
+  public static final String SCHEMA_FETCHER = "schema_fetcher";
 
-  @Override
-  public void unbindFrom(AbstractMetricService metricService) {
-    metricService.remove(
-        MetricType.AUTO_GAUGE, Metric.CACHE_HIT.toString(), Tag.NAME.toString(), "chunk");
-  }
+  private final String metric = Metric.QUERY_PLAN_COST.toString();
+  private final String tagKey = Tag.STAGE.toString();
+
+  private static final List<String> stages =
+      Arrays.asList(
+          SQL_PARSER,
+          ANALYZER,
+          LOGICAL_PLANNER,
+          DISTRIBUTION_PLANNER,
+          PARTITION_FETCHER,
+          SCHEMA_FETCHER);
 
   @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-    ChunkCacheMetrics that = (ChunkCacheMetrics) o;
-    return Objects.equals(chunkCache, that.chunkCache);
+  public void bindTo(AbstractMetricService metricService) {
+    for (String stage : stages) {
+      metricService.getOrCreateTimer(metric, MetricLevel.IMPORTANT, tagKey, stage);
+    }
   }
 
   @Override
-  public int hashCode() {
-    return Objects.hash(chunkCache);
+  public void unbindFrom(AbstractMetricService metricService) {
+    for (String stage : stages) {
+      metricService.remove(MetricType.TIMER, metric, tagKey, stage);
+    }
   }
 }
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/AnalyzeVisitor.java b/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/AnalyzeVisitor.java
index 81910ef650..8b582fb05a 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/AnalyzeVisitor.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/AnalyzeVisitor.java
@@ -46,6 +46,7 @@ import org.apache.iotdb.db.mpp.common.header.DatasetHeader;
 import org.apache.iotdb.db.mpp.common.header.DatasetHeaderFactory;
 import org.apache.iotdb.db.mpp.common.schematree.DeviceSchemaInfo;
 import org.apache.iotdb.db.mpp.common.schematree.ISchemaTree;
+import org.apache.iotdb.db.mpp.metric.QueryMetricsManager;
 import org.apache.iotdb.db.mpp.plan.Coordinator;
 import org.apache.iotdb.db.mpp.plan.execution.ExecutionResult;
 import org.apache.iotdb.db.mpp.plan.expression.Expression;
@@ -153,6 +154,8 @@ import static org.apache.iotdb.commons.conf.IoTDBConstant.LOSS;
 import static org.apache.iotdb.commons.conf.IoTDBConstant.ONE_LEVEL_PATH_WILDCARD;
 import static org.apache.iotdb.db.metadata.MetadataConstant.ALL_RESULT_NODES;
 import static org.apache.iotdb.db.mpp.common.header.ColumnHeaderConstant.DEVICE;
+import static org.apache.iotdb.db.mpp.metric.QueryPlanCostMetrics.PARTITION_FETCHER;
+import static org.apache.iotdb.db.mpp.metric.QueryPlanCostMetrics.SCHEMA_FETCHER;
 import static org.apache.iotdb.db.mpp.plan.analyze.SelectIntoUtils.constructTargetDevice;
 import static org.apache.iotdb.db.mpp.plan.analyze.SelectIntoUtils.constructTargetMeasurement;
 import static org.apache.iotdb.db.mpp.plan.analyze.SelectIntoUtils.constructTargetPath;
@@ -201,13 +204,16 @@ public class AnalyzeVisitor extends StatementVisitor<Analysis, MPPQueryContext>
 
       // request schema fetch API
       logger.debug("[StartFetchSchema]");
+      long startTime = System.nanoTime();
       ISchemaTree schemaTree;
       if (queryStatement.isGroupByTag()) {
         schemaTree = schemaFetcher.fetchSchemaWithTags(patternTree);
       } else {
         schemaTree = schemaFetcher.fetchSchema(patternTree);
       }
+      QueryMetricsManager.getInstance().addPlanCost(SCHEMA_FETCHER, System.nanoTime() - startTime);
       logger.debug("[EndFetchSchema]");
+
       // If there is no leaf node in the schema tree, the query should be completed immediately
       if (schemaTree.isEmpty()) {
         if (queryStatement.isSelectInto()) {
@@ -228,8 +234,23 @@ public class AnalyzeVisitor extends StatementVisitor<Analysis, MPPQueryContext>
         if (analysis.hasValueFilter()) {
           throw new SemanticException("Only time filters are supported in LAST query");
         }
+
         analyzeOrderBy(analysis, queryStatement);
-        return analyzeLast(analysis, schemaTree.getAllMeasurement(), schemaTree);
+
+        List<MeasurementPath> allSelectedPath = schemaTree.getAllMeasurement();
+        analyzeLastSource(analysis, allSelectedPath);
+
+        // set header
+        analysis.setRespDatasetHeader(DatasetHeaderFactory.getLastQueryHeader());
+
+        // fetch partition information
+        Set<String> deviceSet =
+            allSelectedPath.stream().map(MeasurementPath::getDevice).collect(Collectors.toSet());
+        DataPartition dataPartition =
+            fetchDataPartitionByDevices(deviceSet, schemaTree, analysis.getGlobalTimeFilter());
+        analysis.setDataPartitionInfo(dataPartition);
+
+        return analysis;
       }
 
       List<Pair<Expression, String>> outputExpressions;
@@ -328,8 +349,7 @@ public class AnalyzeVisitor extends StatementVisitor<Analysis, MPPQueryContext>
     analysis.setHasValueFilter(hasValueFilter);
   }
 
-  private Analysis analyzeLast(
-      Analysis analysis, List<MeasurementPath> allSelectedPath, ISchemaTree schemaTree) {
+  private void analyzeLastSource(Analysis analysis, List<MeasurementPath> allSelectedPath) {
     Set<Expression> sourceExpressions;
     List<SortItem> sortItemList = analysis.getMergeOrderParameter().getSortItemList();
     if (sortItemList.size() > 0) {
@@ -352,47 +372,7 @@ public class AnalyzeVisitor extends StatementVisitor<Analysis, MPPQueryContext>
               .map(TimeSeriesOperand::new)
               .collect(Collectors.toCollection(LinkedHashSet::new));
     }
-
     analysis.setSourceExpressions(sourceExpressions);
-
-    analysis.setRespDatasetHeader(DatasetHeaderFactory.getLastQueryHeader());
-
-    Set<String> deviceSet =
-        allSelectedPath.stream().map(MeasurementPath::getDevice).collect(Collectors.toSet());
-
-    Pair<List<TTimePartitionSlot>, Pair<Boolean, Boolean>> res =
-        getTimePartitionSlotList(analysis.getGlobalTimeFilter());
-
-    DataPartition dataPartition;
-
-    // there is no satisfied time range
-    if (res.left.isEmpty() && !res.right.left) {
-      dataPartition =
-          new DataPartition(
-              Collections.emptyMap(),
-              CONFIG.getSeriesPartitionExecutorClass(),
-              CONFIG.getSeriesPartitionSlotNum());
-    } else {
-      Map<String, List<DataPartitionQueryParam>> sgNameToQueryParamsMap = new HashMap<>();
-      for (String devicePath : deviceSet) {
-        DataPartitionQueryParam queryParam =
-            new DataPartitionQueryParam(devicePath, res.left, res.right.left, res.right.right);
-        sgNameToQueryParamsMap
-            .computeIfAbsent(schemaTree.getBelongedDatabase(devicePath), key -> new ArrayList<>())
-            .add(queryParam);
-      }
-
-      if (res.right.left || res.right.right) {
-        dataPartition =
-            partitionFetcher.getDataPartitionWithUnclosedTimeRange(sgNameToQueryParamsMap);
-      } else {
-        dataPartition = partitionFetcher.getDataPartition(sgNameToQueryParamsMap);
-      }
-    }
-
-    analysis.setDataPartitionInfo(dataPartition);
-
-    return analysis;
   }
 
   private Map<Integer, List<Pair<Expression, String>>> analyzeSelect(
@@ -1168,28 +1148,34 @@ public class AnalyzeVisitor extends StatementVisitor<Analysis, MPPQueryContext>
 
   private DataPartition fetchDataPartitionByDevices(
       Set<String> deviceSet, ISchemaTree schemaTree, Filter globalTimeFilter) {
-    Pair<List<TTimePartitionSlot>, Pair<Boolean, Boolean>> res =
-        getTimePartitionSlotList(globalTimeFilter);
-    // there is no satisfied time range
-    if (res.left.isEmpty() && !res.right.left) {
-      return new DataPartition(
-          Collections.emptyMap(),
-          CONFIG.getSeriesPartitionExecutorClass(),
-          CONFIG.getSeriesPartitionSlotNum());
-    }
-    Map<String, List<DataPartitionQueryParam>> sgNameToQueryParamsMap = new HashMap<>();
-    for (String devicePath : deviceSet) {
-      DataPartitionQueryParam queryParam =
-          new DataPartitionQueryParam(devicePath, res.left, res.right.left, res.right.right);
-      sgNameToQueryParamsMap
-          .computeIfAbsent(schemaTree.getBelongedDatabase(devicePath), key -> new ArrayList<>())
-          .add(queryParam);
-    }
+    long startTime = System.nanoTime();
+    try {
+      Pair<List<TTimePartitionSlot>, Pair<Boolean, Boolean>> res =
+          getTimePartitionSlotList(globalTimeFilter);
+      // there is no satisfied time range
+      if (res.left.isEmpty() && !res.right.left) {
+        return new DataPartition(
+            Collections.emptyMap(),
+            CONFIG.getSeriesPartitionExecutorClass(),
+            CONFIG.getSeriesPartitionSlotNum());
+      }
+      Map<String, List<DataPartitionQueryParam>> sgNameToQueryParamsMap = new HashMap<>();
+      for (String devicePath : deviceSet) {
+        DataPartitionQueryParam queryParam =
+            new DataPartitionQueryParam(devicePath, res.left, res.right.left, res.right.right);
+        sgNameToQueryParamsMap
+            .computeIfAbsent(schemaTree.getBelongedDatabase(devicePath), key -> new ArrayList<>())
+            .add(queryParam);
+      }
 
-    if (res.right.left || res.right.right) {
-      return partitionFetcher.getDataPartitionWithUnclosedTimeRange(sgNameToQueryParamsMap);
-    } else {
-      return partitionFetcher.getDataPartition(sgNameToQueryParamsMap);
+      if (res.right.left || res.right.right) {
+        return partitionFetcher.getDataPartitionWithUnclosedTimeRange(sgNameToQueryParamsMap);
+      } else {
+        return partitionFetcher.getDataPartition(sgNameToQueryParamsMap);
+      }
+    } finally {
+      QueryMetricsManager.getInstance()
+          .addPlanCost(PARTITION_FETCHER, System.nanoTime() - startTime);
     }
   }
 
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/Analyzer.java b/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/Analyzer.java
index 010b491e55..679a71627f 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/Analyzer.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/Analyzer.java
@@ -21,9 +21,11 @@ package org.apache.iotdb.db.mpp.plan.analyze;
 
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
 import org.apache.iotdb.db.mpp.common.MPPQueryContext;
+import org.apache.iotdb.db.mpp.metric.QueryMetricsManager;
 import org.apache.iotdb.db.mpp.plan.statement.Statement;
 
 import static org.apache.iotdb.db.mpp.common.QueryId.mockQueryId;
+import static org.apache.iotdb.db.mpp.metric.QueryPlanCostMetrics.ANALYZER;
 
 /** Analyze the statement and generate Analysis. */
 public class Analyzer {
@@ -40,7 +42,14 @@ public class Analyzer {
   }
 
   public Analysis analyze(Statement statement) {
-    return new AnalyzeVisitor(partitionFetcher, schemaFetcher).process(statement, context);
+    long startTime = System.nanoTime();
+    Analysis analysis =
+        new AnalyzeVisitor(partitionFetcher, schemaFetcher).process(statement, context);
+
+    if (statement.isQuery()) {
+      QueryMetricsManager.getInstance().addPlanCost(ANALYZER, System.nanoTime() - startTime);
+    }
+    return analysis;
   }
 
   public static void validate(Statement statement) {
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/plan/execution/QueryExecution.java b/server/src/main/java/org/apache/iotdb/db/mpp/plan/execution/QueryExecution.java
index 7cfa0a3fa1..c150183527 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/plan/execution/QueryExecution.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/plan/execution/QueryExecution.java
@@ -33,6 +33,7 @@ import org.apache.iotdb.db.mpp.execution.QueryState;
 import org.apache.iotdb.db.mpp.execution.QueryStateMachine;
 import org.apache.iotdb.db.mpp.execution.exchange.ISourceHandle;
 import org.apache.iotdb.db.mpp.execution.exchange.MPPDataExchangeService;
+import org.apache.iotdb.db.mpp.metric.QueryMetricsManager;
 import org.apache.iotdb.db.mpp.plan.analyze.Analysis;
 import org.apache.iotdb.db.mpp.plan.analyze.Analyzer;
 import org.apache.iotdb.db.mpp.plan.analyze.IPartitionFetcher;
@@ -81,6 +82,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
 
 import static com.google.common.base.Preconditions.checkArgument;
 import static com.google.common.base.Throwables.throwIfUnchecked;
+import static org.apache.iotdb.db.mpp.metric.QueryPlanCostMetrics.DISTRIBUTION_PLANNER;
 import static org.apache.iotdb.db.mpp.plan.constant.DataNodeEndPoints.isSameNode;
 
 /**
@@ -300,8 +302,14 @@ public class QueryExecution implements IQueryExecution {
 
   // Generate the distributed plan and split it into fragments
   public void doDistributedPlan() {
+    long startTime = System.nanoTime();
     DistributionPlanner planner = new DistributionPlanner(this.analysis, this.logicalPlan);
     this.distributedPlan = planner.planFragments();
+
+    if (rawStatement.isQuery()) {
+      QueryMetricsManager.getInstance()
+          .addPlanCost(DISTRIBUTION_PLANNER, System.nanoTime() - startTime);
+    }
     if (isQuery() && logger.isDebugEnabled()) {
       logger.debug(
           "distribution plan done. Fragment instance count is {}, details is: \n {}",
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/plan/parser/StatementGenerator.java b/server/src/main/java/org/apache/iotdb/db/mpp/plan/parser/StatementGenerator.java
index 151a475d67..604442462e 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/plan/parser/StatementGenerator.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/plan/parser/StatementGenerator.java
@@ -25,6 +25,7 @@ import org.apache.iotdb.commons.path.PartialPath;
 import org.apache.iotdb.db.exception.query.QueryProcessException;
 import org.apache.iotdb.db.metadata.template.TemplateQueryType;
 import org.apache.iotdb.db.metadata.utils.MetaFormatUtils;
+import org.apache.iotdb.db.mpp.metric.QueryMetricsManager;
 import org.apache.iotdb.db.mpp.plan.expression.binary.GreaterEqualExpression;
 import org.apache.iotdb.db.mpp.plan.expression.binary.LessThanExpression;
 import org.apache.iotdb.db.mpp.plan.expression.binary.LogicAndExpression;
@@ -101,6 +102,8 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import static org.apache.iotdb.db.mpp.metric.QueryPlanCostMetrics.SQL_PARSER;
+
 /** Convert SQL and RPC requests to {@link Statement}. */
 public class StatementGenerator {
 
@@ -472,48 +475,53 @@ public class StatementGenerator {
   }
 
   private static Statement invokeParser(String sql, ZoneId zoneId) {
-    ASTVisitor astVisitor = new ASTVisitor();
-    astVisitor.setZoneId(zoneId);
-
-    CharStream charStream1 = CharStreams.fromString(sql);
-
-    SqlLexer lexer1 = new SqlLexer(charStream1);
-    lexer1.removeErrorListeners();
-    lexer1.addErrorListener(SQLParseError.INSTANCE);
-
-    CommonTokenStream tokens1 = new CommonTokenStream(lexer1);
-
-    IoTDBSqlParser parser1 = new IoTDBSqlParser(tokens1);
-    parser1.getInterpreter().setPredictionMode(PredictionMode.SLL);
-    parser1.removeErrorListeners();
-    parser1.addErrorListener(SQLParseError.INSTANCE);
-
-    ParseTree tree;
+    long startTime = System.nanoTime();
     try {
-      // STAGE 1: try with simpler/faster SLL(*)
-      tree = parser1.singleStatement();
-      // if we get here, there was no syntax error and SLL(*) was enough;
-      // there is no need to try full LL(*)
-    } catch (Exception ex) {
-      CharStream charStream2 = CharStreams.fromString(sql);
-
-      SqlLexer lexer2 = new SqlLexer(charStream2);
-      lexer2.removeErrorListeners();
-      lexer2.addErrorListener(SQLParseError.INSTANCE);
-
-      CommonTokenStream tokens2 = new CommonTokenStream(lexer2);
-
-      org.apache.iotdb.db.qp.sql.IoTDBSqlParser parser2 =
-          new org.apache.iotdb.db.qp.sql.IoTDBSqlParser(tokens2);
-      parser2.getInterpreter().setPredictionMode(PredictionMode.LL);
-      parser2.removeErrorListeners();
-      parser2.addErrorListener(SQLParseError.INSTANCE);
-
-      // STAGE 2: parser with full LL(*)
-      tree = parser2.singleStatement();
-      // if we get here, it's LL not SLL
+      ASTVisitor astVisitor = new ASTVisitor();
+      astVisitor.setZoneId(zoneId);
+
+      CharStream charStream1 = CharStreams.fromString(sql);
+
+      SqlLexer lexer1 = new SqlLexer(charStream1);
+      lexer1.removeErrorListeners();
+      lexer1.addErrorListener(SQLParseError.INSTANCE);
+
+      CommonTokenStream tokens1 = new CommonTokenStream(lexer1);
+
+      IoTDBSqlParser parser1 = new IoTDBSqlParser(tokens1);
+      parser1.getInterpreter().setPredictionMode(PredictionMode.SLL);
+      parser1.removeErrorListeners();
+      parser1.addErrorListener(SQLParseError.INSTANCE);
+
+      ParseTree tree;
+      try {
+        // STAGE 1: try with simpler/faster SLL(*)
+        tree = parser1.singleStatement();
+        // if we get here, there was no syntax error and SLL(*) was enough;
+        // there is no need to try full LL(*)
+      } catch (Exception ex) {
+        CharStream charStream2 = CharStreams.fromString(sql);
+
+        SqlLexer lexer2 = new SqlLexer(charStream2);
+        lexer2.removeErrorListeners();
+        lexer2.addErrorListener(SQLParseError.INSTANCE);
+
+        CommonTokenStream tokens2 = new CommonTokenStream(lexer2);
+
+        org.apache.iotdb.db.qp.sql.IoTDBSqlParser parser2 =
+            new org.apache.iotdb.db.qp.sql.IoTDBSqlParser(tokens2);
+        parser2.getInterpreter().setPredictionMode(PredictionMode.LL);
+        parser2.removeErrorListeners();
+        parser2.addErrorListener(SQLParseError.INSTANCE);
+
+        // STAGE 2: parser with full LL(*)
+        tree = parser2.singleStatement();
+        // if we get here, it's LL not SLL
+      }
+      return astVisitor.visit(tree);
+    } finally {
+      QueryMetricsManager.getInstance().addPlanCost(SQL_PARSER, System.nanoTime() - startTime);
     }
-    return astVisitor.visit(tree);
   }
 
   private static void addMeasurementAndValue(
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/plan/planner/LogicalPlanner.java b/server/src/main/java/org/apache/iotdb/db/mpp/plan/planner/LogicalPlanner.java
index 308887f53a..cc25e475ac 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/plan/planner/LogicalPlanner.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/plan/planner/LogicalPlanner.java
@@ -19,14 +19,16 @@
 package org.apache.iotdb.db.mpp.plan.planner;
 
 import org.apache.iotdb.db.mpp.common.MPPQueryContext;
+import org.apache.iotdb.db.mpp.metric.QueryMetricsManager;
 import org.apache.iotdb.db.mpp.plan.analyze.Analysis;
 import org.apache.iotdb.db.mpp.plan.optimization.PlanOptimizer;
 import org.apache.iotdb.db.mpp.plan.planner.plan.LogicalQueryPlan;
 import org.apache.iotdb.db.mpp.plan.planner.plan.node.PlanNode;
-import org.apache.iotdb.db.mpp.plan.statement.crud.QueryStatement;
 
 import java.util.List;
 
+import static org.apache.iotdb.db.mpp.metric.QueryPlanCostMetrics.LOGICAL_PLANNER;
+
 /** Generate a logical plan for the statement. */
 public class LogicalPlanner {
 
@@ -39,10 +41,13 @@ public class LogicalPlanner {
   }
 
   public LogicalQueryPlan plan(Analysis analysis) {
+    long startTime = System.nanoTime();
     PlanNode rootNode = new LogicalPlanVisitor(analysis).process(analysis.getStatement(), context);
 
     // optimize the query logical plan
-    if (analysis.getStatement() instanceof QueryStatement) {
+    if (analysis.getStatement().isQuery()) {
+      QueryMetricsManager.getInstance().addPlanCost(LOGICAL_PLANNER, System.nanoTime() - startTime);
+
       for (PlanOptimizer optimizer : optimizers) {
         rootNode = optimizer.optimize(rootNode, context);
       }
diff --git a/server/src/main/java/org/apache/iotdb/db/service/metrics/DataNodeMetricsHelper.java b/server/src/main/java/org/apache/iotdb/db/service/metrics/DataNodeMetricsHelper.java
index e36b3b653e..be59c2b288 100644
--- a/server/src/main/java/org/apache/iotdb/db/service/metrics/DataNodeMetricsHelper.java
+++ b/server/src/main/java/org/apache/iotdb/db/service/metrics/DataNodeMetricsHelper.java
@@ -20,6 +20,7 @@
 package org.apache.iotdb.db.service.metrics;
 
 import org.apache.iotdb.commons.service.metric.MetricService;
+import org.apache.iotdb.db.mpp.metric.QueryPlanCostMetrics;
 import org.apache.iotdb.metrics.metricsets.jvm.JvmMetrics;
 import org.apache.iotdb.metrics.metricsets.logback.LogbackMetrics;
 
@@ -31,5 +32,8 @@ public class DataNodeMetricsHelper {
     MetricService.getInstance().addMetricSet(new FileMetrics());
     MetricService.getInstance().addMetricSet(new ProcessMetrics());
     MetricService.getInstance().addMetricSet(new SystemMetrics(true));
+
+    // bind query related metrics
+    MetricService.getInstance().addMetricSet(new QueryPlanCostMetrics());
   }
 }