You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by le...@apache.org on 2022/10/11 14:52:13 UTC

[iotdb] branch research/M4-visualization updated: 51%

This is an automated email from the ASF dual-hosted git repository.

leirui pushed a commit to branch research/M4-visualization
in repository https://gitbox.apache.org/repos/asf/iotdb.git


The following commit(s) were added to refs/heads/research/M4-visualization by this push:
     new ed45560744 51%
ed45560744 is described below

commit ed455607448ed31115627e4ed5cd9c9b181ae81c
Author: Lei Rui <10...@qq.com>
AuthorDate: Tue Oct 11 22:52:17 2022 +0800

    51%
---
 .../dataset/groupby/LocalGroupByExecutor4CPV.java  |  59 ++--
 .../org/apache/iotdb/db/service/TSServiceImpl.java | 118 +++----
 .../apache/iotdb/db/integration/m4/MyTest2.java    |  70 ++--
 .../session/MyRealDataTest1_WriteAndQuery.java     | 142 +-------
 .../iotdb/session/MyRealDataTest2_OnlyQuery.java   | 142 +-------
 .../session/MyRealDataTest3_WriteAndRawQuery.java  | 192 +++-------
 .../session/MySmallRealDataWriteQueryTest.java     |  34 +-
 .../tsfile/common/constant/TsFileConstant.java     |   2 +-
 .../encoding/decoder/DeltaBinaryDecoder.java       |  72 ++--
 .../encoding/encoder/DeltaBinaryEncoder.java       |  22 +-
 .../iotdb/tsfile/read/reader/page/PageReader.java  | 391 ++++++++++-----------
 11 files changed, 436 insertions(+), 808 deletions(-)

diff --git a/server/src/main/java/org/apache/iotdb/db/query/dataset/groupby/LocalGroupByExecutor4CPV.java b/server/src/main/java/org/apache/iotdb/db/query/dataset/groupby/LocalGroupByExecutor4CPV.java
index a04ae0c6d1..f27bc484b4 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/dataset/groupby/LocalGroupByExecutor4CPV.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/dataset/groupby/LocalGroupByExecutor4CPV.java
@@ -19,14 +19,6 @@
 
 package org.apache.iotdb.db.query.dataset.groupby;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.List;
-import java.util.ListIterator;
-import java.util.Map;
-import java.util.Set;
 import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
 import org.apache.iotdb.db.exception.StorageEngineException;
 import org.apache.iotdb.db.exception.query.QueryProcessException;
@@ -59,6 +51,15 @@ import org.apache.iotdb.tsfile.read.reader.page.PageReader;
 import org.apache.iotdb.tsfile.utils.Pair;
 import org.apache.iotdb.tsfile.utils.TsPrimitiveType;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.Map;
+import java.util.Set;
+
 /**
  * Sql format: SELECT min_time(s0), max_time(s0), first_value(s0), last_value(s0), min_value(s0),
  * max_value(s0) ROM root.xx group by ([tqs,tqe),IntervalLength). Requirements: (1) Don't change the
@@ -198,9 +199,9 @@ public class LocalGroupByExecutor4CPV implements GroupByExecutor {
 
   /**
    * @param curStartTime closed
-   * @param curEndTime   open
-   * @param startTime    closed
-   * @param endTime      open
+   * @param curEndTime open
+   * @param startTime closed
+   * @param endTime open
    */
   @Override
   public List<AggregateResult> calcResult(
@@ -225,9 +226,7 @@ public class LocalGroupByExecutor4CPV implements GroupByExecutor {
     return results;
   }
 
-  /**
-   * 对BatchData应用deletes操作,获得更新的BatchData和statistics赋值到chunkSuit4CPV中
-   */
+  /** 对BatchData应用deletes操作,获得更新的BatchData和statistics赋值到chunkSuit4CPV中 */
   private void updateBatchData(ChunkSuit4CPV chunkSuit4CPV, TSDataType dataType) {
     if (chunkSuit4CPV.getBatchData() != null) {
       BatchData batchData1 = BatchDataFactory.createBatchData(dataType, true, false);
@@ -324,8 +323,8 @@ public class LocalGroupByExecutor4CPV implements GroupByExecutor {
           new Comparator<ChunkSuit4CPV>() { // TODO double check the sort order logic for version
             public int compare(ChunkSuit4CPV o1, ChunkSuit4CPV o2) {
               return new MergeReaderPriority(
-                  o2.getChunkMetadata().getVersion(),
-                  o2.getChunkMetadata().getOffsetOfChunkHeader())
+                      o2.getChunkMetadata().getVersion(),
+                      o2.getChunkMetadata().getOffsetOfChunkHeader())
                   .compareTo(
                       new MergeReaderPriority(
                           o1.getChunkMetadata().getVersion(),
@@ -412,7 +411,7 @@ public class LocalGroupByExecutor4CPV implements GroupByExecutor {
                 .get(4) // TODO check: minTimestamp, maxTimestamp, firstValue, lastValue,
                 // minValue[bottomTimestamp], maxValue[topTimestamp]
                 .updateResultUsingValues(
-                    new long[]{candidateTimestamp}, 1, new Object[]{candidateValue});
+                    new long[] {candidateTimestamp}, 1, new Object[] {candidateValue});
             // TODO check updateResult
             return; // 计算结束
           } else { // 是被overlap,则partial scan所有这些overlap的块
@@ -456,7 +455,7 @@ public class LocalGroupByExecutor4CPV implements GroupByExecutor {
                   .get(4) // TODO check: minTimestamp, maxTimestamp, firstValue, lastValue,
                   // minValue[bottomTimestamp], maxValue[topTimestamp]
                   .updateResultUsingValues(
-                      new long[]{candidateTimestamp}, 1, new Object[]{candidateValue});
+                      new long[] {candidateTimestamp}, 1, new Object[] {candidateValue});
               // TODO check updateResult
               return; // 计算结束
             } else { // 找到这样的点,于是标记candidate point所在块为lazy
@@ -522,8 +521,8 @@ public class LocalGroupByExecutor4CPV implements GroupByExecutor {
           new Comparator<ChunkSuit4CPV>() { // TODO double check the sort order logic for version
             public int compare(ChunkSuit4CPV o1, ChunkSuit4CPV o2) {
               return new MergeReaderPriority(
-                  o2.getChunkMetadata().getVersion(),
-                  o2.getChunkMetadata().getOffsetOfChunkHeader())
+                      o2.getChunkMetadata().getVersion(),
+                      o2.getChunkMetadata().getOffsetOfChunkHeader())
                   .compareTo(
                       new MergeReaderPriority(
                           o1.getChunkMetadata().getVersion(),
@@ -610,7 +609,7 @@ public class LocalGroupByExecutor4CPV implements GroupByExecutor {
                 .get(5) // TODO check: minTimestamp, maxTimestamp, firstValue, lastValue,
                 // minValue[bottomTimestamp], maxValue[topTimestamp]
                 .updateResultUsingValues(
-                    new long[]{candidateTimestamp}, 1, new Object[]{candidateValue});
+                    new long[] {candidateTimestamp}, 1, new Object[] {candidateValue});
             // TODO check updateResult
             return; // 计算结束
           } else { // 是被overlap,则partial scan所有这些overlap的块
@@ -653,7 +652,7 @@ public class LocalGroupByExecutor4CPV implements GroupByExecutor {
                   .get(5) // TODO check: minTimestamp, maxTimestamp, firstValue, lastValue,
                   // minValue[bottomTimestamp], maxValue[topTimestamp]
                   .updateResultUsingValues(
-                      new long[]{candidateTimestamp}, 1, new Object[]{candidateValue});
+                      new long[] {candidateTimestamp}, 1, new Object[] {candidateValue});
               // TODO check updateResult
               return; // 计算结束
             } else { // 找到这样的点,于是标记candidate point所在块为lazy
@@ -701,8 +700,8 @@ public class LocalGroupByExecutor4CPV implements GroupByExecutor {
                 return res;
               } else {
                 return new MergeReaderPriority(
-                    o2.getChunkMetadata().getVersion(),
-                    o2.getChunkMetadata().getOffsetOfChunkHeader())
+                        o2.getChunkMetadata().getVersion(),
+                        o2.getChunkMetadata().getOffsetOfChunkHeader())
                     .compareTo(
                         new MergeReaderPriority(
                             o1.getChunkMetadata().getVersion(),
@@ -767,11 +766,11 @@ public class LocalGroupByExecutor4CPV implements GroupByExecutor {
           results
               .get(0)
               .updateResultUsingValues(
-                  new long[]{candidateTimestamp}, 1, new Object[]{candidateValue});
+                  new long[] {candidateTimestamp}, 1, new Object[] {candidateValue});
           results
               .get(2)
               .updateResultUsingValues(
-                  new long[]{candidateTimestamp}, 1, new Object[]{candidateValue});
+                  new long[] {candidateTimestamp}, 1, new Object[] {candidateValue});
           return;
         }
       }
@@ -798,8 +797,8 @@ public class LocalGroupByExecutor4CPV implements GroupByExecutor {
                 return res;
               } else {
                 return new MergeReaderPriority(
-                    o2.getChunkMetadata().getVersion(),
-                    o2.getChunkMetadata().getOffsetOfChunkHeader())
+                        o2.getChunkMetadata().getVersion(),
+                        o2.getChunkMetadata().getOffsetOfChunkHeader())
                     .compareTo(
                         new MergeReaderPriority(
                             o1.getChunkMetadata().getVersion(),
@@ -864,11 +863,11 @@ public class LocalGroupByExecutor4CPV implements GroupByExecutor {
           results
               .get(1)
               .updateResultUsingValues(
-                  new long[]{candidateTimestamp}, 1, new Object[]{candidateValue});
+                  new long[] {candidateTimestamp}, 1, new Object[] {candidateValue});
           results
               .get(3)
               .updateResultUsingValues(
-                  new long[]{candidateTimestamp}, 1, new Object[]{candidateValue});
+                  new long[] {candidateTimestamp}, 1, new Object[] {candidateValue});
           return;
         }
       }
diff --git a/server/src/main/java/org/apache/iotdb/db/service/TSServiceImpl.java b/server/src/main/java/org/apache/iotdb/db/service/TSServiceImpl.java
index 577af67c61..5fe4cb058d 100644
--- a/server/src/main/java/org/apache/iotdb/db/service/TSServiceImpl.java
+++ b/server/src/main/java/org/apache/iotdb/db/service/TSServiceImpl.java
@@ -18,22 +18,6 @@
  */
 package org.apache.iotdb.db.service;
 
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.sql.SQLException;
-import java.time.ZoneId;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.stream.Collectors;
-import org.antlr.v4.runtime.misc.ParseCancellationException;
 import org.apache.iotdb.db.IOMonitor;
 import org.apache.iotdb.db.auth.AuthException;
 import org.apache.iotdb.db.auth.AuthorityChecker;
@@ -147,13 +131,29 @@ import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
 import org.apache.iotdb.tsfile.read.common.Path;
 import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
+
+import org.antlr.v4.runtime.misc.ParseCancellationException;
 import org.apache.thrift.TException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-/**
- * Thrift RPC implementation at server side.
- */
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.sql.SQLException;
+import java.time.ZoneId;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Collectors;
+
+/** Thrift RPC implementation at server side. */
 public class TSServiceImpl implements TSIService.Iface {
 
   private static final Logger LOGGER = LoggerFactory.getLogger(TSServiceImpl.class);
@@ -327,9 +327,7 @@ public class TSServiceImpl implements TSIService.Iface {
     }
   }
 
-  /**
-   * release single operation resource
-   */
+  /** release single operation resource */
   public void releaseQueryResource(long queryId) throws StorageEngineException {
     sessionManager.releaseQueryResource(queryId);
   }
@@ -610,13 +608,13 @@ public class TSServiceImpl implements TSIService.Iface {
 
       return physicalPlan.isQuery()
           ? internalExecuteQueryStatement(
-          statement,
-          req.statementId,
-          physicalPlan,
-          req.fetchSize,
-          req.timeout,
-          sessionManager.getUsername(req.getSessionId()),
-          req.isEnableRedirectQuery())
+              statement,
+              req.statementId,
+              physicalPlan,
+              req.fetchSize,
+              req.timeout,
+              sessionManager.getUsername(req.getSessionId()),
+              req.isEnableRedirectQuery())
           : executeUpdateStatement(physicalPlan, req.getSessionId());
     } catch (InterruptedException e) {
       LOGGER.error(INFO_INTERRUPT_ERROR, req, e);
@@ -643,13 +641,13 @@ public class TSServiceImpl implements TSIService.Iface {
       //      System.out.println("====DEBUG====: fetchSize=" + req.fetchSize);
       return physicalPlan.isQuery()
           ? internalExecuteQueryStatement(
-          statement,
-          req.statementId,
-          physicalPlan,
-          req.fetchSize,
-          req.timeout,
-          sessionManager.getUsername(req.getSessionId()),
-          req.isEnableRedirectQuery())
+              statement,
+              req.statementId,
+              physicalPlan,
+              req.fetchSize,
+              req.timeout,
+              sessionManager.getUsername(req.getSessionId()),
+              req.isEnableRedirectQuery())
           : RpcUtils.getTSExecuteStatementResp(
               TSStatusCode.EXECUTE_STATEMENT_ERROR, "Statement is not a query statement.");
     } catch (InterruptedException e) {
@@ -669,18 +667,18 @@ public class TSServiceImpl implements TSIService.Iface {
     ret.setStatus(RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS));
     ret.setExecutionInfo(
         IOMonitor.print()
-            + ". timeColumnTS2DIFFLoadBatchCost= "
-            + TsFileConstant.timeColumnTS2DIFFLoadBatchCost.getSum()
-            + " us"
-            + ". countLoadIntBatch= "
-            + TsFileConstant.countLoadIntBatch
+            + ". \ntimeColumnTS2DIFFLoadBatchCost= "
+            + TsFileConstant.timeColumnTS2DIFFLoadBatchCost
+            + " ns"
+        //            + ". \ncountLoadIntBatch= "
+        //            + TsFileConstant.countLoadIntBatch
         //            + ". countForRegularEqual="
         //            + TsFileConstant.countForRegularEqual
         //            + ". countForRegularNOTEqual="
         //            + TsFileConstant.countForRegularNOTEqual
         //            + ". countForRegularZero="
         //            + TsFileConstant.countForRegularZero
-    );
+        );
     IOMonitor.finish();
     return ret;
   }
@@ -696,13 +694,13 @@ public class TSServiceImpl implements TSIService.Iface {
           processor.rawDataQueryReqToPhysicalPlan(req, sessionManager.getZoneId(req.sessionId));
       return physicalPlan.isQuery()
           ? internalExecuteQueryStatement(
-          "",
-          req.statementId,
-          physicalPlan,
-          req.fetchSize,
-          config.getQueryTimeoutThreshold(),
-          sessionManager.getUsername(req.sessionId),
-          req.isEnableRedirectQuery())
+              "",
+              req.statementId,
+              physicalPlan,
+              req.fetchSize,
+              config.getQueryTimeoutThreshold(),
+              sessionManager.getUsername(req.sessionId),
+              req.isEnableRedirectQuery())
           : RpcUtils.getTSExecuteStatementResp(
               TSStatusCode.EXECUTE_STATEMENT_ERROR, "Statement is not a query statement.");
     } catch (InterruptedException e) {
@@ -718,7 +716,7 @@ public class TSServiceImpl implements TSIService.Iface {
 
   /**
    * @param plan must be a plan for Query: FillQueryPlan, AggregationPlan, GroupByTimePlan, UDFPlan,
-   *             some AuthorPlan
+   *     some AuthorPlan
    */
   @SuppressWarnings({"squid:S3776", "squid:S1141"}) // Suppress high Cognitive Complexity warning
   private TSExecuteStatementResp internalExecuteQueryStatement(
@@ -730,8 +728,8 @@ public class TSServiceImpl implements TSIService.Iface {
       String username,
       boolean enableRedirect)
       throws QueryProcessException, SQLException, StorageEngineException,
-      QueryFilterOptimizationException, MetadataException, IOException, InterruptedException,
-      TException, AuthException {
+          QueryFilterOptimizationException, MetadataException, IOException, InterruptedException,
+          TException, AuthException {
 
     // start record execution time
     IOMonitor.setSQL(statement);
@@ -869,9 +867,7 @@ public class TSServiceImpl implements TSIService.Iface {
         dataSet.getDataTypes().stream().map(Enum::toString).collect(Collectors.toList()));
   }
 
-  /**
-   * get ResultSet schema
-   */
+  /** get ResultSet schema */
   private TSExecuteStatementResp getQueryColumnHeaders(PhysicalPlan physicalPlan, String username)
       throws AuthException, TException, QueryProcessException, MetadataException {
 
@@ -1140,12 +1136,10 @@ public class TSServiceImpl implements TSIService.Iface {
     return encoder;
   }
 
-  /**
-   * create QueryDataSet and buffer it for fetchResults
-   */
+  /** create QueryDataSet and buffer it for fetchResults */
   private QueryDataSet createQueryDataSet(long queryId, PhysicalPlan physicalPlan, int fetchSize)
       throws QueryProcessException, QueryFilterOptimizationException, StorageEngineException,
-      IOException, MetadataException, SQLException, TException, InterruptedException {
+          IOException, MetadataException, SQLException, TException, InterruptedException {
 
     QueryContext context = genQueryContext(queryId, physicalPlan.isDebug());
     if (physicalPlan instanceof QueryPlan) {
@@ -1204,7 +1198,7 @@ public class TSServiceImpl implements TSIService.Iface {
             statement, sessionManager.getZoneId(sessionId), DEFAULT_FETCH_SIZE);
     return physicalPlan.isQuery()
         ? RpcUtils.getTSExecuteStatementResp(
-        TSStatusCode.EXECUTE_STATEMENT_ERROR, "Statement is a query statement.")
+            TSStatusCode.EXECUTE_STATEMENT_ERROR, "Statement is a query statement.")
         : executeUpdateStatement(physicalPlan, sessionId);
   }
 
@@ -1634,9 +1628,7 @@ public class TSServiceImpl implements TSIService.Iface {
     return insertTabletPlan;
   }
 
-  /**
-   * construct one InsertMultiTabletPlan and process it
-   */
+  /** construct one InsertMultiTabletPlan and process it */
   public TSStatus insertTabletsInternal(TSInsertTabletsReq req) throws IllegalPathException {
     List<InsertTabletPlan> insertTabletPlanList = new ArrayList<>();
     InsertMultiTabletPlan insertMultiTabletPlan = new InsertMultiTabletPlan();
diff --git a/server/src/test/java/org/apache/iotdb/db/integration/m4/MyTest2.java b/server/src/test/java/org/apache/iotdb/db/integration/m4/MyTest2.java
index e935288743..1d689ad5a1 100644
--- a/server/src/test/java/org/apache/iotdb/db/integration/m4/MyTest2.java
+++ b/server/src/test/java/org/apache/iotdb/db/integration/m4/MyTest2.java
@@ -42,9 +42,9 @@ public class MyTest2 {
   private static final String TIMESTAMP_STR = "Time";
 
   private static String[] creationSqls =
-      new String[]{
-          "SET STORAGE GROUP TO root.vehicle.d0",
-          "CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=INT64",
+      new String[] {
+        "SET STORAGE GROUP TO root.vehicle.d0",
+        "CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=INT64",
       };
 
   private final String d0s0 = "root.vehicle.d0.s0";
@@ -97,14 +97,14 @@ public class MyTest2 {
     prepareData1();
 
     String[] res =
-        new String[]{
-            "0,1,20,5,20,5[1],30[10]",
-            "25,27,45,20,30,9[33],40[30]",
-            "50,52,54,8,18,8[52],18[54]",
-            "75,null,null,null,null,null,null"
+        new String[] {
+          "0,1,20,5,20,5[1],30[10]",
+          "25,27,45,20,30,9[33],40[30]",
+          "50,52,54,8,18,8[52],18[54]",
+          "75,null,null,null,null,null,null"
         };
     try (Connection connection =
-        DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+            DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
         Statement statement = connection.createStatement()) {
       boolean hasResultSet =
           statement.execute(
@@ -144,8 +144,8 @@ public class MyTest2 {
     // data:
     // https://user-images.githubusercontent.com/33376433/152085323-321ecd70-1253-494f-81ab-fe227d1f5351.png
     try (Connection connection =
-        DriverManager.getConnection(
-            Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root");
+            DriverManager.getConnection(
+                Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root");
         Statement statement = connection.createStatement()) {
 
       for (String sql : creationSqls) {
@@ -184,14 +184,14 @@ public class MyTest2 {
     prepareData2();
 
     String[] res =
-        new String[]{
-            "0,1,20,5,5,5[1],5[1]",
-            "25,30,40,5,5,5[30],5[30]",
-            "50,55,72,5,5,5[65],5[65]",
-            "75,80,90,5,5,5[80],5[80]"
+        new String[] {
+          "0,1,20,5,5,5[1],5[1]",
+          "25,30,40,5,5,5[30],5[30]",
+          "50,55,72,5,5,5[65],5[65]",
+          "75,80,90,5,5,5[80],5[80]"
         };
     try (Connection connection =
-        DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+            DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
         Statement statement = connection.createStatement()) {
       boolean hasResultSet =
           statement.execute(
@@ -231,8 +231,8 @@ public class MyTest2 {
     // data:
     // https://user-images.githubusercontent.com/33376433/152085361-571f64dc-0c32-4f70-9481-bc30e4f6f78a.png
     try (Connection connection =
-        DriverManager.getConnection(
-            Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root");
+            DriverManager.getConnection(
+                Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root");
         Statement statement = connection.createStatement()) {
 
       for (String sql : creationSqls) {
@@ -271,14 +271,14 @@ public class MyTest2 {
     prepareData3();
 
     String[] res =
-        new String[]{
-            "0,1,20,5,15,5[1],15[2]",
-            "25,30,40,5,15,5[30],15[40]",
-            "50,55,72,5,15,5[65],15[66]",
-            "75,80,90,5,15,5[80],15[82]"
+        new String[] {
+          "0,1,20,5,15,5[1],15[2]",
+          "25,30,40,5,15,5[30],15[40]",
+          "50,55,72,5,15,5[65],15[66]",
+          "75,80,90,5,15,5[80],15[82]"
         };
     try (Connection connection =
-        DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+            DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
         Statement statement = connection.createStatement()) {
       boolean hasResultSet =
           statement.execute(
@@ -318,8 +318,8 @@ public class MyTest2 {
     // data:
     // https://user-images.githubusercontent.com/33376433/152085386-ebe57e83-cb49-49e8-b8f8-b80719547c42.png
     try (Connection connection =
-        DriverManager.getConnection(
-            Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root");
+            DriverManager.getConnection(
+                Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root");
         Statement statement = connection.createStatement()) {
 
       for (String sql : creationSqls) {
@@ -357,9 +357,9 @@ public class MyTest2 {
   public void test4() {
     prepareData4();
 
-    String[] res = new String[]{"0,1,10,5,18,3[9],25[6]", "50,60,60,1,1,1[60],1[60]"};
+    String[] res = new String[] {"0,1,10,5,18,3[9],25[6]", "50,60,60,1,1,1[60],1[60]"};
     try (Connection connection =
-        DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+            DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
         Statement statement = connection.createStatement()) {
       boolean hasResultSet =
           statement.execute(
@@ -399,8 +399,8 @@ public class MyTest2 {
     // data:
     // https://user-images.githubusercontent.com/33376433/152088562-830e3272-749a-493a-83ca-1279e66ab145.png
     try (Connection connection =
-        DriverManager.getConnection(
-            Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root");
+            DriverManager.getConnection(
+                Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root");
         Statement statement = connection.createStatement()) {
 
       for (String sql : creationSqls) {
@@ -433,9 +433,9 @@ public class MyTest2 {
   public void test5() {
     prepareData5();
 
-    String[] res = new String[]{"0,1,10,5,18,3[9],25[6]", "50,60,60,1,1,1[60],1[60]"};
+    String[] res = new String[] {"0,1,10,5,18,3[9],25[6]", "50,60,60,1,1,1[60],1[60]"};
     try (Connection connection =
-        DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+            DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
         Statement statement = connection.createStatement()) {
       boolean hasResultSet =
           statement.execute(
@@ -475,8 +475,8 @@ public class MyTest2 {
     // data:
     // https://user-images.githubusercontent.com/33376433/152088820-49351c49-9da2-43dd-8da1-2940ae81ae9d.png
     try (Connection connection =
-        DriverManager.getConnection(
-            Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root");
+            DriverManager.getConnection(
+                Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root");
         Statement statement = connection.createStatement()) {
 
       for (String sql : creationSqls) {
diff --git a/session/src/test/java/org/apache/iotdb/session/MyRealDataTest1_WriteAndQuery.java b/session/src/test/java/org/apache/iotdb/session/MyRealDataTest1_WriteAndQuery.java
index bff308ff23..a3182c90b8 100644
--- a/session/src/test/java/org/apache/iotdb/session/MyRealDataTest1_WriteAndQuery.java
+++ b/session/src/test/java/org/apache/iotdb/session/MyRealDataTest1_WriteAndQuery.java
@@ -16,11 +16,11 @@ import java.io.BufferedReader;
 import java.io.File;
 import java.io.FileReader;
 import java.io.IOException;
-import java.text.DecimalFormat;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 
+import static org.apache.iotdb.session.MyRealDataTest3_WriteAndRawQuery.printStat;
 import static org.apache.iotdb.tsfile.common.constant.TsFileConstant.allRegularBytesSize;
 import static org.apache.iotdb.tsfile.common.constant.TsFileConstant.byteArrayLengthStatistics;
 import static org.apache.iotdb.tsfile.common.constant.TsFileConstant.countForHitNewDeltas;
@@ -360,52 +360,7 @@ public class MyRealDataTest1_WriteAndQuery {
     dataSet.closeOperationHandle();
     session.close();
 
-    DecimalFormat df = new DecimalFormat("#,###.00");
-    double max = timeColumnTS2DIFFLoadBatchCost.getMax();
-    double min = timeColumnTS2DIFFLoadBatchCost.getMin();
-    double mean = timeColumnTS2DIFFLoadBatchCost.getMean();
-    double std = timeColumnTS2DIFFLoadBatchCost.getStandardDeviation();
-    double p25 = timeColumnTS2DIFFLoadBatchCost.getPercentile(25);
-    double p50 = timeColumnTS2DIFFLoadBatchCost.getPercentile(50);
-    double p75 = timeColumnTS2DIFFLoadBatchCost.getPercentile(75);
-    double p90 = timeColumnTS2DIFFLoadBatchCost.getPercentile(90);
-    double p95 = timeColumnTS2DIFFLoadBatchCost.getPercentile(95);
-    System.out.println(
-        "timeColumnTS2DIFFLoadBatchCost_stats"
-            + ": "
-            + "num="
-            + timeColumnTS2DIFFLoadBatchCost.getN()
-            + ", "
-            // num is inaccurate because I let alone the last chunk
-            + "sum="
-            + df.format(timeColumnTS2DIFFLoadBatchCost.getSum())
-            + "us,"
-            + "mean="
-            + df.format(mean)
-            + ", "
-            + "min="
-            + df.format(min)
-            + ", "
-            + "max="
-            + df.format(max)
-            + ", "
-            + "std="
-            + df.format(std)
-            + ", "
-            + "p25="
-            + df.format(p25)
-            + ", "
-            + "p50="
-            + df.format(p50)
-            + ", "
-            + "p75="
-            + df.format(p75)
-            + ", "
-            + "p90="
-            + df.format(p90)
-            + ", "
-            + "p95="
-            + df.format(p95));
+    System.out.println("timeColumnTS2DIFFLoadBatchCost=" + timeColumnTS2DIFFLoadBatchCost);
 
     System.out.println("Equal Num: " + countForRegularEqual);
     System.out.println("NOT Equal Num: " + countForRegularNOTEqual);
@@ -414,97 +369,8 @@ public class MyRealDataTest1_WriteAndQuery {
     System.out.println("hit Num: " + countForHitNewDeltas.getN());
     System.out.println("NOT hit Num: " + countForNotHitNewDeltas.getN());
 
-    max = regularNewDeltasStatistics.getMax();
-    min = regularNewDeltasStatistics.getMin();
-    mean = regularNewDeltasStatistics.getMean();
-    std = regularNewDeltasStatistics.getStandardDeviation();
-    p25 = regularNewDeltasStatistics.getPercentile(25);
-    p50 = regularNewDeltasStatistics.getPercentile(50);
-    p75 = regularNewDeltasStatistics.getPercentile(75);
-    p90 = regularNewDeltasStatistics.getPercentile(90);
-    p95 = regularNewDeltasStatistics.getPercentile(95);
-    System.out.println(
-        "regularNewDeltas_stats"
-            + ": "
-            + "num="
-            + regularNewDeltasStatistics.getN()
-            + ", "
-            // num is inaccurate because I let alone the last chunk
-            + "sum="
-            + df.format(regularNewDeltasStatistics.getSum())
-            + "us,"
-            + "mean="
-            + df.format(mean)
-            + ", "
-            + "min="
-            + df.format(min)
-            + ", "
-            + "max="
-            + df.format(max)
-            + ", "
-            + "std="
-            + df.format(std)
-            + ", "
-            + "p25="
-            + df.format(p25)
-            + ", "
-            + "p50="
-            + df.format(p50)
-            + ", "
-            + "p75="
-            + df.format(p75)
-            + ", "
-            + "p90="
-            + df.format(p90)
-            + ", "
-            + "p95="
-            + df.format(p95));
-
-    max = byteArrayLengthStatistics.getMax();
-    min = byteArrayLengthStatistics.getMin();
-    mean = byteArrayLengthStatistics.getMean();
-    std = byteArrayLengthStatistics.getStandardDeviation();
-    p25 = byteArrayLengthStatistics.getPercentile(25);
-    p50 = byteArrayLengthStatistics.getPercentile(50);
-    p75 = byteArrayLengthStatistics.getPercentile(75);
-    p90 = byteArrayLengthStatistics.getPercentile(90);
-    p95 = byteArrayLengthStatistics.getPercentile(95);
-    System.out.println(
-        "byteArrayLengthStatistics_stats"
-            + ": "
-            + "num="
-            + byteArrayLengthStatistics.getN()
-            + ", "
-            // num is inaccurate because I let alone the last chunk
-            + "sum="
-            + df.format(byteArrayLengthStatistics.getSum())
-            + "us,"
-            + "mean="
-            + df.format(mean)
-            + ", "
-            + "min="
-            + df.format(min)
-            + ", "
-            + "max="
-            + df.format(max)
-            + ", "
-            + "std="
-            + df.format(std)
-            + ", "
-            + "p25="
-            + df.format(p25)
-            + ", "
-            + "p50="
-            + df.format(p50)
-            + ", "
-            + "p75="
-            + df.format(p75)
-            + ", "
-            + "p90="
-            + df.format(p90)
-            + ", "
-            + "p95="
-            + df.format(p95));
+    printStat(regularNewDeltasStatistics, "regularNewDeltasStatistics");
+    printStat(byteArrayLengthStatistics, "byteArrayLengthStatistics");
 
     System.out.println("allRegularBytes size: " + allRegularBytesSize.getMax());
     System.out.println("prepare AllRegularBytes cost: " + prepareAllRegulars.getSum() + "us");
diff --git a/session/src/test/java/org/apache/iotdb/session/MyRealDataTest2_OnlyQuery.java b/session/src/test/java/org/apache/iotdb/session/MyRealDataTest2_OnlyQuery.java
index e99a27e4fd..c7d25739e2 100644
--- a/session/src/test/java/org/apache/iotdb/session/MyRealDataTest2_OnlyQuery.java
+++ b/session/src/test/java/org/apache/iotdb/session/MyRealDataTest2_OnlyQuery.java
@@ -16,11 +16,11 @@ import java.io.BufferedReader;
 import java.io.File;
 import java.io.FileReader;
 import java.io.IOException;
-import java.text.DecimalFormat;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 
+import static org.apache.iotdb.session.MyRealDataTest3_WriteAndRawQuery.printStat;
 import static org.apache.iotdb.tsfile.common.constant.TsFileConstant.allRegularBytesSize;
 import static org.apache.iotdb.tsfile.common.constant.TsFileConstant.byteArrayLengthStatistics;
 import static org.apache.iotdb.tsfile.common.constant.TsFileConstant.countForHitNewDeltas;
@@ -277,52 +277,7 @@ public class MyRealDataTest2_OnlyQuery {
     dataSet.closeOperationHandle();
     session.close();
 
-    DecimalFormat df = new DecimalFormat("#,###.00");
-    double max = timeColumnTS2DIFFLoadBatchCost.getMax();
-    double min = timeColumnTS2DIFFLoadBatchCost.getMin();
-    double mean = timeColumnTS2DIFFLoadBatchCost.getMean();
-    double std = timeColumnTS2DIFFLoadBatchCost.getStandardDeviation();
-    double p25 = timeColumnTS2DIFFLoadBatchCost.getPercentile(25);
-    double p50 = timeColumnTS2DIFFLoadBatchCost.getPercentile(50);
-    double p75 = timeColumnTS2DIFFLoadBatchCost.getPercentile(75);
-    double p90 = timeColumnTS2DIFFLoadBatchCost.getPercentile(90);
-    double p95 = timeColumnTS2DIFFLoadBatchCost.getPercentile(95);
-    System.out.println(
-        "timeColumnTS2DIFFLoadBatchCost_stats"
-            + ": "
-            + "num="
-            + timeColumnTS2DIFFLoadBatchCost.getN()
-            + ", "
-            // num is inaccurate because I let alone the last chunk
-            + "sum="
-            + df.format(timeColumnTS2DIFFLoadBatchCost.getSum())
-            + "us,"
-            + "mean="
-            + df.format(mean)
-            + ", "
-            + "min="
-            + df.format(min)
-            + ", "
-            + "max="
-            + df.format(max)
-            + ", "
-            + "std="
-            + df.format(std)
-            + ", "
-            + "p25="
-            + df.format(p25)
-            + ", "
-            + "p50="
-            + df.format(p50)
-            + ", "
-            + "p75="
-            + df.format(p75)
-            + ", "
-            + "p90="
-            + df.format(p90)
-            + ", "
-            + "p95="
-            + df.format(p95));
+    System.out.println("timeColumnTS2DIFFLoadBatchCost=" + timeColumnTS2DIFFLoadBatchCost);
 
     System.out.println("Equal Num: " + countForRegularEqual);
     System.out.println("NOT Equal Num: " + countForRegularNOTEqual);
@@ -331,97 +286,8 @@ public class MyRealDataTest2_OnlyQuery {
     System.out.println("hit Num: " + countForHitNewDeltas.getN());
     System.out.println("NOT hit Num: " + countForNotHitNewDeltas.getN());
 
-    max = regularNewDeltasStatistics.getMax();
-    min = regularNewDeltasStatistics.getMin();
-    mean = regularNewDeltasStatistics.getMean();
-    std = regularNewDeltasStatistics.getStandardDeviation();
-    p25 = regularNewDeltasStatistics.getPercentile(25);
-    p50 = regularNewDeltasStatistics.getPercentile(50);
-    p75 = regularNewDeltasStatistics.getPercentile(75);
-    p90 = regularNewDeltasStatistics.getPercentile(90);
-    p95 = regularNewDeltasStatistics.getPercentile(95);
-    System.out.println(
-        "countForRegularNewDeltas_stats"
-            + ": "
-            + "num="
-            + regularNewDeltasStatistics.getN()
-            + ", "
-            // num is inaccurate because I let alone the last chunk
-            + "sum="
-            + df.format(regularNewDeltasStatistics.getSum())
-            + "us,"
-            + "mean="
-            + df.format(mean)
-            + ", "
-            + "min="
-            + df.format(min)
-            + ", "
-            + "max="
-            + df.format(max)
-            + ", "
-            + "std="
-            + df.format(std)
-            + ", "
-            + "p25="
-            + df.format(p25)
-            + ", "
-            + "p50="
-            + df.format(p50)
-            + ", "
-            + "p75="
-            + df.format(p75)
-            + ", "
-            + "p90="
-            + df.format(p90)
-            + ", "
-            + "p95="
-            + df.format(p95));
-
-    max = byteArrayLengthStatistics.getMax();
-    min = byteArrayLengthStatistics.getMin();
-    mean = byteArrayLengthStatistics.getMean();
-    std = byteArrayLengthStatistics.getStandardDeviation();
-    p25 = byteArrayLengthStatistics.getPercentile(25);
-    p50 = byteArrayLengthStatistics.getPercentile(50);
-    p75 = byteArrayLengthStatistics.getPercentile(75);
-    p90 = byteArrayLengthStatistics.getPercentile(90);
-    p95 = byteArrayLengthStatistics.getPercentile(95);
-    System.out.println(
-        "byteArrayLengthStatistics_stats"
-            + ": "
-            + "num="
-            + byteArrayLengthStatistics.getN()
-            + ", "
-            // num is inaccurate because I let alone the last chunk
-            + "sum="
-            + df.format(byteArrayLengthStatistics.getSum())
-            + "us,"
-            + "mean="
-            + df.format(mean)
-            + ", "
-            + "min="
-            + df.format(min)
-            + ", "
-            + "max="
-            + df.format(max)
-            + ", "
-            + "std="
-            + df.format(std)
-            + ", "
-            + "p25="
-            + df.format(p25)
-            + ", "
-            + "p50="
-            + df.format(p50)
-            + ", "
-            + "p75="
-            + df.format(p75)
-            + ", "
-            + "p90="
-            + df.format(p90)
-            + ", "
-            + "p95="
-            + df.format(p95));
+    printStat(regularNewDeltasStatistics, "regularNewDeltasStatistics");
+    printStat(byteArrayLengthStatistics, "byteArrayLengthStatistics");
 
     System.out.println("allRegularBytes size: " + allRegularBytesSize.getMax());
     System.out.println("prepare AllRegularBytes cost: " + prepareAllRegulars.getSum() + "us");
diff --git a/session/src/test/java/org/apache/iotdb/session/MyRealDataTest3_WriteAndRawQuery.java b/session/src/test/java/org/apache/iotdb/session/MyRealDataTest3_WriteAndRawQuery.java
index e647d0f8ad..f608252a6a 100644
--- a/session/src/test/java/org/apache/iotdb/session/MyRealDataTest3_WriteAndRawQuery.java
+++ b/session/src/test/java/org/apache/iotdb/session/MyRealDataTest3_WriteAndRawQuery.java
@@ -11,6 +11,7 @@ import org.apache.iotdb.session.SessionDataSet.DataIterator;
 import org.apache.iotdb.tsfile.common.conf.TSFileDescriptor;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 
+import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -286,52 +287,7 @@ public class MyRealDataTest3_WriteAndRawQuery {
     dataSet.closeOperationHandle();
     session.close();
 
-    DecimalFormat df = new DecimalFormat("#,###.00");
-    double max = timeColumnTS2DIFFLoadBatchCost.getMax();
-    double min = timeColumnTS2DIFFLoadBatchCost.getMin();
-    double mean = timeColumnTS2DIFFLoadBatchCost.getMean();
-    double std = timeColumnTS2DIFFLoadBatchCost.getStandardDeviation();
-    double p25 = timeColumnTS2DIFFLoadBatchCost.getPercentile(25);
-    double p50 = timeColumnTS2DIFFLoadBatchCost.getPercentile(50);
-    double p75 = timeColumnTS2DIFFLoadBatchCost.getPercentile(75);
-    double p90 = timeColumnTS2DIFFLoadBatchCost.getPercentile(90);
-    double p95 = timeColumnTS2DIFFLoadBatchCost.getPercentile(95);
-    System.out.println(
-        "timeColumnTS2DIFFLoadBatchCost_stats"
-            + ": "
-            + "num="
-            + timeColumnTS2DIFFLoadBatchCost.getN()
-            + ", "
-            // num is inaccurate because I let alone the last chunk
-            + "sum="
-            + df.format(timeColumnTS2DIFFLoadBatchCost.getSum())
-            + "us,"
-            + "mean="
-            + df.format(mean)
-            + ", "
-            + "min="
-            + df.format(min)
-            + ", "
-            + "max="
-            + df.format(max)
-            + ", "
-            + "std="
-            + df.format(std)
-            + ", "
-            + "p25="
-            + df.format(p25)
-            + ", "
-            + "p50="
-            + df.format(p50)
-            + ", "
-            + "p75="
-            + df.format(p75)
-            + ", "
-            + "p90="
-            + df.format(p90)
-            + ", "
-            + "p95="
-            + df.format(p95));
+    System.out.println("timeColumnTS2DIFFLoadBatchCost=" + timeColumnTS2DIFFLoadBatchCost);
 
     System.out.println("Equal Num: " + countForRegularEqual);
     System.out.println("NOT Equal Num: " + countForRegularNOTEqual);
@@ -340,97 +296,8 @@ public class MyRealDataTest3_WriteAndRawQuery {
     System.out.println("hit Num: " + countForHitNewDeltas.getN());
     System.out.println("NOT hit Num: " + countForNotHitNewDeltas.getN());
 
-    max = regularNewDeltasStatistics.getMax();
-    min = regularNewDeltasStatistics.getMin();
-    mean = regularNewDeltasStatistics.getMean();
-    std = regularNewDeltasStatistics.getStandardDeviation();
-    p25 = regularNewDeltasStatistics.getPercentile(25);
-    p50 = regularNewDeltasStatistics.getPercentile(50);
-    p75 = regularNewDeltasStatistics.getPercentile(75);
-    p90 = regularNewDeltasStatistics.getPercentile(90);
-    p95 = regularNewDeltasStatistics.getPercentile(95);
-    System.out.println(
-        "regularNewDeltas_stats"
-            + ": "
-            + "num="
-            + regularNewDeltasStatistics.getN()
-            + ", "
-            // num is inaccurate because I let alone the last chunk
-            + "sum="
-            + df.format(regularNewDeltasStatistics.getSum())
-            + "us,"
-            + "mean="
-            + df.format(mean)
-            + ", "
-            + "min="
-            + df.format(min)
-            + ", "
-            + "max="
-            + df.format(max)
-            + ", "
-            + "std="
-            + df.format(std)
-            + ", "
-            + "p25="
-            + df.format(p25)
-            + ", "
-            + "p50="
-            + df.format(p50)
-            + ", "
-            + "p75="
-            + df.format(p75)
-            + ", "
-            + "p90="
-            + df.format(p90)
-            + ", "
-            + "p95="
-            + df.format(p95));
-
-    max = byteArrayLengthStatistics.getMax();
-    min = byteArrayLengthStatistics.getMin();
-    mean = byteArrayLengthStatistics.getMean();
-    std = byteArrayLengthStatistics.getStandardDeviation();
-    p25 = byteArrayLengthStatistics.getPercentile(25);
-    p50 = byteArrayLengthStatistics.getPercentile(50);
-    p75 = byteArrayLengthStatistics.getPercentile(75);
-    p90 = byteArrayLengthStatistics.getPercentile(90);
-    p95 = byteArrayLengthStatistics.getPercentile(95);
-    System.out.println(
-        "byteArrayLengthStatistics_stats"
-            + ": "
-            + "num="
-            + byteArrayLengthStatistics.getN()
-            + ", "
-            // num is inaccurate because I let alone the last chunk
-            + "sum="
-            + df.format(byteArrayLengthStatistics.getSum())
-            + "us,"
-            + "mean="
-            + df.format(mean)
-            + ", "
-            + "min="
-            + df.format(min)
-            + ", "
-            + "max="
-            + df.format(max)
-            + ", "
-            + "std="
-            + df.format(std)
-            + ", "
-            + "p25="
-            + df.format(p25)
-            + ", "
-            + "p50="
-            + df.format(p50)
-            + ", "
-            + "p75="
-            + df.format(p75)
-            + ", "
-            + "p90="
-            + df.format(p90)
-            + ", "
-            + "p95="
-            + df.format(p95));
+    printStat(regularNewDeltasStatistics, "regularNewDeltasStatistics");
+    printStat(byteArrayLengthStatistics, "byteArrayLengthStatistics");
 
     System.out.println("allRegularBytes size: " + allRegularBytesSize.getMax());
     System.out.println("prepare AllRegularBytes cost: " + prepareAllRegulars.getSum() + "us");
@@ -544,4 +411,55 @@ public class MyRealDataTest3_WriteAndRawQuery {
       throw new IOException("data type wrong");
     }
   }
+
+  public static String printStat(DescriptiveStatistics statistics, String name) {
+    DecimalFormat df = new DecimalFormat("#,###.00");
+    double max = statistics.getMax();
+    double min = statistics.getMin();
+    double mean = statistics.getMean();
+    double std = statistics.getStandardDeviation();
+    double p25 = statistics.getPercentile(25);
+    double p50 = statistics.getPercentile(50);
+    double p75 = statistics.getPercentile(75);
+    double p90 = statistics.getPercentile(90);
+    double p95 = statistics.getPercentile(95);
+    String res =
+        name
+            + "_stats"
+            + ": "
+            + "num="
+            + statistics.getN()
+            + ", "
+            + "sum="
+            + df.format(statistics.getSum())
+            + "us,"
+            + "mean="
+            + df.format(mean)
+            + ", "
+            + "min="
+            + df.format(min)
+            + ", "
+            + "max="
+            + df.format(max)
+            + ", "
+            + "std="
+            + df.format(std)
+            + ", "
+            + "p25="
+            + df.format(p25)
+            + ", "
+            + "p50="
+            + df.format(p50)
+            + ", "
+            + "p75="
+            + df.format(p75)
+            + ", "
+            + "p90="
+            + df.format(p90)
+            + ", "
+            + "p95="
+            + df.format(p95);
+    System.out.println(res);
+    return res;
+  }
 }
diff --git a/session/src/test/java/org/apache/iotdb/session/MySmallRealDataWriteQueryTest.java b/session/src/test/java/org/apache/iotdb/session/MySmallRealDataWriteQueryTest.java
index 46239c8655..89e765c7f9 100644
--- a/session/src/test/java/org/apache/iotdb/session/MySmallRealDataWriteQueryTest.java
+++ b/session/src/test/java/org/apache/iotdb/session/MySmallRealDataWriteQueryTest.java
@@ -1,13 +1,5 @@
 package org.apache.iotdb.session;
 
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileReader;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Random;
 import org.apache.iotdb.db.conf.IoTDBConfig;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
 import org.apache.iotdb.db.engine.compaction.CompactionStrategy;
@@ -19,11 +11,21 @@ import org.apache.iotdb.session.SessionDataSet.DataIterator;
 import org.apache.iotdb.tsfile.common.conf.TSFileDescriptor;
 import org.apache.iotdb.tsfile.common.constant.TsFileConstant;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Random;
+
 public class MySmallRealDataWriteQueryTest {
 
   private static final IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
@@ -49,7 +51,8 @@ public class MySmallRealDataWriteQueryTest {
   private static long dataMaxTime = 25599285703L;
   private static long total_time_length = dataMaxTime - dataMinTime;
   private static int total_point_number = 50000;
-  private static int iotdb_chunk_point_size = 100;// must be smaller than BLOCK_DEFAULT_SIZE = 128 to fulfill the assumption that page=pack
+  private static int iotdb_chunk_point_size =
+      100; // must be smaller than BLOCK_DEFAULT_SIZE = 128 to fulfill the assumption that page=pack
   private static long chunkAvgTimeLen =
       (long)
           Math.ceil(
@@ -114,9 +117,7 @@ public class MySmallRealDataWriteQueryTest {
     EnvironmentUtils.cleanEnv();
   }
 
-  /**
-   * Before writing data, make sure check the server parameter configurations.
-   */
+  /** Before writing data, make sure check the server parameter configurations. */
   // Usage: java -jar WriteData-0.12.4.jar device measurement dataType timestamp_precision
   // total_time_length total_point_number iotdb_chunk_point_size filePath deleteFreq deleteLen
   // timeIdx valueIdx
@@ -152,8 +153,9 @@ public class MySmallRealDataWriteQueryTest {
     } else {
       // randomize between [dataMinTime, dataMaxTime-range]
       minTime =
-          (long) Math
-              .ceil(dataMinTime + random.nextDouble() * (dataMaxTime - range - dataMinTime + 1));
+          (long)
+              Math.ceil(
+                  dataMinTime + random.nextDouble() * (dataMaxTime - range - dataMinTime + 1));
       interval = (long) Math.ceil((double) range / w);
     }
     maxTime = minTime + interval * w;
@@ -315,8 +317,8 @@ public class MySmallRealDataWriteQueryTest {
           long deleteStartTime =
               (long)
                   Math.ceil(
-                      lastDeleteMinTime + random.nextDouble() * (rightBound - lastDeleteMinTime
-                          + 1));
+                      lastDeleteMinTime
+                          + random.nextDouble() * (rightBound - lastDeleteMinTime + 1));
           long deleteEndTime = deleteStartTime + deleteLen - 1;
           session.deleteData(deletePaths, deleteStartTime, deleteEndTime);
           System.out.println("[[[[delete]]]]]" + deleteStartTime + "," + deleteEndTime);
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/common/constant/TsFileConstant.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/common/constant/TsFileConstant.java
index 4f694432dd..d419cf0f7f 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/common/constant/TsFileConstant.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/common/constant/TsFileConstant.java
@@ -38,7 +38,7 @@ public class TsFileConstant {
   public static final String PATH_SEPARATER_NO_REGEX = "\\.";
   public static final char DOUBLE_QUOTE = '"';
 
-  public static DescriptiveStatistics timeColumnTS2DIFFLoadBatchCost = new DescriptiveStatistics();
+  public static long timeColumnTS2DIFFLoadBatchCost = 0;
 
   public static long countForRegularEqual = 0; // equal to regular
   public static long countForRegularNOTEqual = 0; // not equal to regular
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/decoder/DeltaBinaryDecoder.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/decoder/DeltaBinaryDecoder.java
index bd55464282..6e4db59f35 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/decoder/DeltaBinaryDecoder.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/decoder/DeltaBinaryDecoder.java
@@ -19,8 +19,6 @@
 
 package org.apache.iotdb.tsfile.encoding.decoder;
 
-import java.io.IOException;
-import java.nio.ByteBuffer;
 import org.apache.iotdb.tsfile.common.conf.TSFileDescriptor;
 import org.apache.iotdb.tsfile.common.constant.TsFileConstant;
 import org.apache.iotdb.tsfile.encoding.encoder.DeltaBinaryEncoder;
@@ -28,9 +26,13 @@ import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
 import org.apache.iotdb.tsfile.utils.BytesUtils;
 import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
 
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
 /**
  * This class is a decoder for decoding the byte array that encoded by {@code
- * DeltaBinaryEncoder}.DeltaBinaryDecoder just supports integer and long values.<br> .
+ * DeltaBinaryEncoder}.DeltaBinaryDecoder just supports integer and long values.<br>
+ * .
  *
  * @see DeltaBinaryEncoder
  */
@@ -39,24 +41,16 @@ public abstract class DeltaBinaryDecoder extends Decoder {
   protected long count = 0;
   protected byte[] deltaBuf;
 
-  /**
-   * the first value in one pack.
-   */
+  /** the first value in one pack. */
   protected int readIntTotalCount = 0;
 
   protected int nextReadIndex = 0;
-  /**
-   * max bit length of all value in a pack.
-   */
+  /** max bit length of all value in a pack. */
   protected int packWidth;
-  /**
-   * data number in this pack.
-   */
+  /** data number in this pack. */
   protected int packNum;
 
-  /**
-   * how many bytes data takes after encoding.
-   */
+  /** how many bytes data takes after encoding. */
   protected int encodingLength;
 
   public DeltaBinaryDecoder() {
@@ -89,9 +83,7 @@ public abstract class DeltaBinaryDecoder extends Decoder {
     private int firstValue;
     private int[] data;
     private int previous;
-    /**
-     * minimum value for all difference.
-     */
+    /** minimum value for all difference. */
     private int minDeltaBase;
 
     public IntDeltaDecoder() {
@@ -176,16 +168,14 @@ public abstract class DeltaBinaryDecoder extends Decoder {
     private long[] data; // NOTE this does not include firstValue
     private long[] allData; // assuming only one pack in the buffer to be decoded
     private long previous;
-    /**
-     * minimum value for all difference.
-     */
+    /** minimum value for all difference. */
     private long minDeltaBase;
 
     private boolean enableRegularityTimeDecode;
     private long regularTimeInterval;
 
-//    private Map<Pair<Long, Integer>, byte[][]> allRegularBytes =
-//        new HashMap<>(); // <newRegularDelta,packWidth> -> (relativePos->bytes)
+    //    private Map<Pair<Long, Integer>, byte[][]> allRegularBytes =
+    //        new HashMap<>(); // <newRegularDelta,packWidth> -> (relativePos->bytes)
 
     private int[][] allFallWithinMasks = new int[7][]; // packWidth(1~7) -> fallWithinMasks[]
 
@@ -215,22 +205,22 @@ public abstract class DeltaBinaryDecoder extends Decoder {
       return allData;
     }
 
-//    /**
-//     * @return true if the point whose time equals candidateTimestamp exists, false if not
-//     */
-//    public boolean partialScan4CPV(long candidateTimestamp, ByteBuffer buffer) throws IOException {
-//      long[] timeData = getDataArray4CPV(buffer);
-//      for (long t : timeData) {
-//        if (t > candidateTimestamp) {
-//          return false; // not exist, return early
-//        }
-//        if (t == candidateTimestamp) {
-//          return true; // exist
-//        }
-//      }
-//      return false; // not exist
-//    }
-
+    //    /**
+    //     * @return true if the point whose time equals candidateTimestamp exists, false if not
+    //     */
+    //    public boolean partialScan4CPV(long candidateTimestamp, ByteBuffer buffer) throws
+    // IOException {
+    //      long[] timeData = getDataArray4CPV(buffer);
+    //      for (long t : timeData) {
+    //        if (t > candidateTimestamp) {
+    //          return false; // not exist, return early
+    //        }
+    //        if (t == candidateTimestamp) {
+    //          return true; // exist
+    //        }
+    //      }
+    //      return false; // not exist
+    //    }
 
     /**
      * if there's no decoded data left, decode next pack into {@code data}.
@@ -252,7 +242,7 @@ public abstract class DeltaBinaryDecoder extends Decoder {
      * @return long value
      */
     protected long loadIntBatch(ByteBuffer buffer) {
-//      TsFileConstant.countLoadIntBatch++;
+      //      TsFileConstant.countLoadIntBatch++;
       long start = System.nanoTime();
 
       packNum = ReadWriteIOUtils.readInt(buffer);
@@ -355,7 +345,7 @@ public abstract class DeltaBinaryDecoder extends Decoder {
       }
 
       long runTime = System.nanoTime() - start; // ns
-      TsFileConstant.timeColumnTS2DIFFLoadBatchCost.addValue(runTime / 1000.0); // us
+      TsFileConstant.timeColumnTS2DIFFLoadBatchCost += runTime; // ns
 
       return firstValue;
     }
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/encoder/DeltaBinaryEncoder.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/encoder/DeltaBinaryEncoder.java
index 05f8319915..f8fecf4454 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/encoder/DeltaBinaryEncoder.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/encoder/DeltaBinaryEncoder.java
@@ -19,15 +19,17 @@
 
 package org.apache.iotdb.tsfile.encoding.encoder;
 
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
 import org.apache.iotdb.tsfile.common.conf.TSFileDescriptor;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
 import org.apache.iotdb.tsfile.utils.BytesUtils;
 import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+
 /**
  * DeltaBinaryEncoder is a encoder for compressing data in type of integer and long. We adapt a
  * hypothesis that contiguous data points have similar values. Thus the difference value of two
@@ -36,8 +38,8 @@ import org.slf4j.LoggerFactory;
  * Given an array which length is n, if all values in input data array are all positive and less
  * than 2^m, we need actually m*n, but not 32*n bits to store the array.
  *
- * <p>DeltaBinaryEncoder calculates difference between two adjacent points and record the minimum
- * of those difference values firstly. Then it saves two_diff value that difference minus minimum of
+ * <p>DeltaBinaryEncoder calculates difference between two adjacent points and record the minimum of
+ * those difference values firstly. Then it saves two_diff value that difference minus minimum of
  * them, to make sure all two_diff values are positive. Then it statistics the longest bit length
  * {@code m} it takes for each two_diff value, which means the bit length that maximum two_diff
  * value takes. Only the low m bits are saved into result byte array for all two_diff values.
@@ -76,9 +78,7 @@ public abstract class DeltaBinaryEncoder extends Encoder {
 
   protected abstract int calculateBitWidthsForDeltaBlockBuffer();
 
-  /**
-   * write all data into {@code encodingBlockBuffer}.
-   */
+  /** write all data into {@code encodingBlockBuffer}. */
   private void writeDataWithMinWidth() {
     for (int i = 0; i < writeIndex; i++) {
       writeValueToBytes(i);
@@ -111,9 +111,7 @@ public abstract class DeltaBinaryEncoder extends Encoder {
     writeIndex = -1;
   }
 
-  /**
-   * calling this method to flush all values which haven't encoded to result byte array.
-   */
+  /** calling this method to flush all values which haven't encoded to result byte array. */
   @Override
   public void flush(ByteArrayOutputStream out) {
     try {
@@ -174,7 +172,7 @@ public abstract class DeltaBinaryEncoder extends Encoder {
      * input a integer.
      *
      * @param value value to encode
-     * @param out   the ByteArrayOutputStream which data encode into
+     * @param out the ByteArrayOutputStream which data encode into
      */
     public void encodeValue(int value, ByteArrayOutputStream out) {
       if (writeIndex == -1) {
@@ -382,7 +380,7 @@ public abstract class DeltaBinaryEncoder extends Encoder {
      * input a integer or long value.
      *
      * @param value value to encode
-     * @param out   - the ByteArrayOutputStream which data encode into
+     * @param out - the ByteArrayOutputStream which data encode into
      */
     public void encodeValue(long value, ByteArrayOutputStream out) {
       if (writeIndex == -1) {
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/page/PageReader.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/page/PageReader.java
index 2d825da0dc..35ee02b4eb 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/page/PageReader.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/page/PageReader.java
@@ -18,12 +18,6 @@
  */
 package org.apache.iotdb.tsfile.read.reader.page;
 
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
 import org.apache.iotdb.tsfile.encoding.decoder.Decoder;
 import org.apache.iotdb.tsfile.encoding.decoder.DeltaBinaryDecoder.LongDeltaDecoder;
 import org.apache.iotdb.tsfile.file.header.PageHeader;
@@ -40,37 +34,35 @@ import org.apache.iotdb.tsfile.read.filter.operator.AndFilter;
 import org.apache.iotdb.tsfile.read.reader.IPageReader;
 import org.apache.iotdb.tsfile.utils.ReadWriteForEncodingUtils;
 
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
 public class PageReader implements IPageReader {
 
   private PageHeader pageHeader;
 
   protected TSDataType dataType;
 
-  /**
-   * decoder for value column
-   */
+  /** decoder for value column */
   public Decoder valueDecoder;
 
-  /**
-   * decoder for time column
-   */
+  /** decoder for time column */
   public Decoder timeDecoder;
 
-  /**
-   * time column in memory
-   */
-  public ByteBuffer timeBuffer; // solely consumed either by getDataArray4CPV() or by timeDecoder.hasNext(timeBuffer)&timeDecoder.readLong(timeBuffer)
+  /** time column in memory */
+  public ByteBuffer timeBuffer; // solely consumed either by getDataArray4CPV() or by
+  // timeDecoder.hasNext(timeBuffer)&timeDecoder.readLong(timeBuffer)
 
-  /**
-   * value column in memory
-   */
+  /** value column in memory */
   public ByteBuffer valueBuffer;
 
   protected Filter filter;
 
-  /**
-   * A list of deleted intervals.
-   */
+  /** A list of deleted intervals. */
   private List<TimeRange> deleteIntervalList;
 
   private int deleteCursor = 0;
@@ -114,143 +106,144 @@ public class PageReader implements IPageReader {
     valueBuffer.position(timeBufferLength);
   }
 
-//  public void split4CPV(
-//      long startTime,
-//      long endTime,
-//      long interval,
-//      long curStartTime,
-//      List<ChunkSuit4CPV> currentChunkList,
-//      Map<Integer, List<ChunkSuit4CPV>> splitChunkList,
-//      ChunkMetadata chunkMetadata)
-//      throws IOException { // note: [startTime,endTime), [curStartTime,curEndTime)
-//    Map<Integer, BatchData> splitBatchDataMap = new HashMap<>();
-//    Map<Integer, ChunkMetadata> splitChunkMetadataMap = new HashMap<>();
-//    while (timeDecoder.hasNext(timeBuffer)) {
-//      long timestamp = timeDecoder.readLong(timeBuffer);
-//      // prepare corresponding batchData
-//      if (timestamp < curStartTime) {
-//        switch (dataType) {
-//          case INT32:
-//            valueDecoder.readInt(valueBuffer);
-//            break;
-//          case INT64:
-//            valueDecoder.readLong(valueBuffer);
-//            break;
-//          case FLOAT:
-//            valueDecoder.readFloat(valueBuffer);
-//            break;
-//          case DOUBLE:
-//            valueDecoder.readDouble(valueBuffer);
-//            break;
-//          default:
-//            throw new UnSupportedDataTypeException(String.valueOf(dataType));
-//        }
-//        continue;
-//      }
-//      if (timestamp >= endTime) {
-//        break;
-//      }
-//      int idx = (int) Math.floor((timestamp - startTime) * 1.0 / interval);
-//      if (!splitBatchDataMap.containsKey(idx)) {
-//        // create batchData
-//        BatchData batch1 = BatchDataFactory.createBatchData(dataType, true, false);
-//        splitBatchDataMap.put(idx, batch1);
-//        Statistics statistics = null;
-//        switch (dataType) {
-//          case INT32:
-//            statistics = new IntegerStatistics();
-//            break;
-//          case INT64:
-//            statistics = new LongStatistics();
-//            break;
-//          case FLOAT:
-//            statistics = new FloatStatistics();
-//            break;
-//          case DOUBLE:
-//            statistics = new DoubleStatistics();
-//            break;
-//          default:
-//            break;
-//        }
-//        // create chunkMetaData
-//        ChunkMetadata chunkMetadata1 =
-//            new ChunkMetadata(
-//                chunkMetadata.getMeasurementUid(),
-//                chunkMetadata.getDataType(),
-//                chunkMetadata.getOffsetOfChunkHeader(),
-//                statistics);
-//        chunkMetadata1.setVersion(chunkMetadata.getVersion()); // don't miss this
-//
-//        //        // important, used later for candidate point verification
-//        //        // (1) candidate point itself whether is in the deleted interval
-//        //        // (2) candidate point whether is overlapped by a chunk with a larger version
-//        // number and
-//        //        // the chunk does not have a deleted interval overlapping this candidate point
-//        //        chunkMetadata1.setDeleteIntervalList(chunkMetadata.getDeleteIntervalList());
-//        //        // not use current Ii to modify deletedIntervalList any more
-//
-//        splitChunkMetadataMap.put(idx, chunkMetadata1);
-//      }
-//      BatchData batchData1 = splitBatchDataMap.get(idx);
-//      ChunkMetadata chunkMetadata1 = splitChunkMetadataMap.get(idx);
-//      switch (dataType) {
-//        case INT32:
-//          int anInt = valueDecoder.readInt(valueBuffer);
-//          if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, anInt))) {
-//            // update batchData1
-//            batchData1.putInt(timestamp, anInt);
-//            // update statistics of chunkMetadata1
-//            chunkMetadata1.getStatistics().update(timestamp, anInt);
-//          }
-//          break;
-//        case INT64:
-//          long aLong = valueDecoder.readLong(valueBuffer);
-//          if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, aLong))) {
-//            // update batchData1
-//            batchData1.putLong(timestamp, aLong);
-//            // update statistics of chunkMetadata1
-//            chunkMetadata1.getStatistics().update(timestamp, aLong);
-//          }
-//          break;
-//        case FLOAT:
-//          float aFloat = valueDecoder.readFloat(valueBuffer);
-//          if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, aFloat))) {
-//            // update batchData1
-//            batchData1.putFloat(timestamp, aFloat);
-//            // update statistics of chunkMetadata1
-//            chunkMetadata1.getStatistics().update(timestamp, aFloat);
-//          }
-//          break;
-//        case DOUBLE:
-//          double aDouble = valueDecoder.readDouble(valueBuffer);
-//          if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, aDouble))) {
-//            // update batchData1
-//            batchData1.putDouble(timestamp, aDouble);
-//            // update statistics of chunkMetadata1
-//            chunkMetadata1.getStatistics().update(timestamp, aDouble);
-//          }
-//          break;
-//        default:
-//          throw new UnSupportedDataTypeException(String.valueOf(dataType));
-//      }
-//    }
-//
-//    int curIdx = (int) Math.floor((curStartTime - startTime) * 1.0 / interval);
-//    for (Integer i : splitBatchDataMap.keySet()) {
-//      if (!splitBatchDataMap.get(i).isEmpty()) {
-//        if (i == curIdx) {
-//          currentChunkList.add(
-//              new ChunkSuit4CPV(splitChunkMetadataMap.get(i), splitBatchDataMap.get(i).flip()));
-//        } else {
-//          splitChunkList.computeIfAbsent(i, k -> new ArrayList<>());
-//          splitChunkList
-//              .get(i)
-//              .add(
-//                  new ChunkSuit4CPV(splitChunkMetadataMap.get(i), splitBatchDataMap.get(i).flip()));
-//        }
-//      }
-//    }
-//  }
+  //  public void split4CPV(
+  //      long startTime,
+  //      long endTime,
+  //      long interval,
+  //      long curStartTime,
+  //      List<ChunkSuit4CPV> currentChunkList,
+  //      Map<Integer, List<ChunkSuit4CPV>> splitChunkList,
+  //      ChunkMetadata chunkMetadata)
+  //      throws IOException { // note: [startTime,endTime), [curStartTime,curEndTime)
+  //    Map<Integer, BatchData> splitBatchDataMap = new HashMap<>();
+  //    Map<Integer, ChunkMetadata> splitChunkMetadataMap = new HashMap<>();
+  //    while (timeDecoder.hasNext(timeBuffer)) {
+  //      long timestamp = timeDecoder.readLong(timeBuffer);
+  //      // prepare corresponding batchData
+  //      if (timestamp < curStartTime) {
+  //        switch (dataType) {
+  //          case INT32:
+  //            valueDecoder.readInt(valueBuffer);
+  //            break;
+  //          case INT64:
+  //            valueDecoder.readLong(valueBuffer);
+  //            break;
+  //          case FLOAT:
+  //            valueDecoder.readFloat(valueBuffer);
+  //            break;
+  //          case DOUBLE:
+  //            valueDecoder.readDouble(valueBuffer);
+  //            break;
+  //          default:
+  //            throw new UnSupportedDataTypeException(String.valueOf(dataType));
+  //        }
+  //        continue;
+  //      }
+  //      if (timestamp >= endTime) {
+  //        break;
+  //      }
+  //      int idx = (int) Math.floor((timestamp - startTime) * 1.0 / interval);
+  //      if (!splitBatchDataMap.containsKey(idx)) {
+  //        // create batchData
+  //        BatchData batch1 = BatchDataFactory.createBatchData(dataType, true, false);
+  //        splitBatchDataMap.put(idx, batch1);
+  //        Statistics statistics = null;
+  //        switch (dataType) {
+  //          case INT32:
+  //            statistics = new IntegerStatistics();
+  //            break;
+  //          case INT64:
+  //            statistics = new LongStatistics();
+  //            break;
+  //          case FLOAT:
+  //            statistics = new FloatStatistics();
+  //            break;
+  //          case DOUBLE:
+  //            statistics = new DoubleStatistics();
+  //            break;
+  //          default:
+  //            break;
+  //        }
+  //        // create chunkMetaData
+  //        ChunkMetadata chunkMetadata1 =
+  //            new ChunkMetadata(
+  //                chunkMetadata.getMeasurementUid(),
+  //                chunkMetadata.getDataType(),
+  //                chunkMetadata.getOffsetOfChunkHeader(),
+  //                statistics);
+  //        chunkMetadata1.setVersion(chunkMetadata.getVersion()); // don't miss this
+  //
+  //        //        // important, used later for candidate point verification
+  //        //        // (1) candidate point itself whether is in the deleted interval
+  //        //        // (2) candidate point whether is overlapped by a chunk with a larger version
+  //        // number and
+  //        //        // the chunk does not have a deleted interval overlapping this candidate point
+  //        //        chunkMetadata1.setDeleteIntervalList(chunkMetadata.getDeleteIntervalList());
+  //        //        // not use current Ii to modify deletedIntervalList any more
+  //
+  //        splitChunkMetadataMap.put(idx, chunkMetadata1);
+  //      }
+  //      BatchData batchData1 = splitBatchDataMap.get(idx);
+  //      ChunkMetadata chunkMetadata1 = splitChunkMetadataMap.get(idx);
+  //      switch (dataType) {
+  //        case INT32:
+  //          int anInt = valueDecoder.readInt(valueBuffer);
+  //          if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, anInt))) {
+  //            // update batchData1
+  //            batchData1.putInt(timestamp, anInt);
+  //            // update statistics of chunkMetadata1
+  //            chunkMetadata1.getStatistics().update(timestamp, anInt);
+  //          }
+  //          break;
+  //        case INT64:
+  //          long aLong = valueDecoder.readLong(valueBuffer);
+  //          if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, aLong))) {
+  //            // update batchData1
+  //            batchData1.putLong(timestamp, aLong);
+  //            // update statistics of chunkMetadata1
+  //            chunkMetadata1.getStatistics().update(timestamp, aLong);
+  //          }
+  //          break;
+  //        case FLOAT:
+  //          float aFloat = valueDecoder.readFloat(valueBuffer);
+  //          if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, aFloat))) {
+  //            // update batchData1
+  //            batchData1.putFloat(timestamp, aFloat);
+  //            // update statistics of chunkMetadata1
+  //            chunkMetadata1.getStatistics().update(timestamp, aFloat);
+  //          }
+  //          break;
+  //        case DOUBLE:
+  //          double aDouble = valueDecoder.readDouble(valueBuffer);
+  //          if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, aDouble))) {
+  //            // update batchData1
+  //            batchData1.putDouble(timestamp, aDouble);
+  //            // update statistics of chunkMetadata1
+  //            chunkMetadata1.getStatistics().update(timestamp, aDouble);
+  //          }
+  //          break;
+  //        default:
+  //          throw new UnSupportedDataTypeException(String.valueOf(dataType));
+  //      }
+  //    }
+  //
+  //    int curIdx = (int) Math.floor((curStartTime - startTime) * 1.0 / interval);
+  //    for (Integer i : splitBatchDataMap.keySet()) {
+  //      if (!splitBatchDataMap.get(i).isEmpty()) {
+  //        if (i == curIdx) {
+  //          currentChunkList.add(
+  //              new ChunkSuit4CPV(splitChunkMetadataMap.get(i), splitBatchDataMap.get(i).flip()));
+  //        } else {
+  //          splitChunkList.computeIfAbsent(i, k -> new ArrayList<>());
+  //          splitChunkList
+  //              .get(i)
+  //              .add(
+  //                  new ChunkSuit4CPV(splitChunkMetadataMap.get(i),
+  // splitBatchDataMap.get(i).flip()));
+  //        }
+  //      }
+  //    }
+  //  }
 
   /**
    * 负责当候选点因为M4 time span/删除/更新而失效而要去update的时候的update。 它会遍历这个page里的点,对取出来的点进行删除和过滤条件判断,并且按照M4 time
@@ -263,14 +256,15 @@ public class PageReader implements IPageReader {
       long curStartTime,
       List<ChunkSuit4CPV> currentChunkList,
       Map<Integer, List<ChunkSuit4CPV>> splitChunkList,
-      ChunkMetadata chunkMetadata)
-      throws IOException { // note: [startTime,endTime), [curStartTime,curEndTime)
+      ChunkMetadata chunkMetadata) { // note: [startTime,endTime), [curStartTime,curEndTime)
     Map<Integer, BatchData> splitBatchDataMap = new HashMap<>();
     Map<Integer, ChunkMetadata> splitChunkMetadataMap = new HashMap<>();
     long[] timeData = ((LongDeltaDecoder) timeDecoder).getDataArray4CPV(timeBuffer);
     for (long timestamp : timeData) {
       // prepare corresponding batchData
       if (timestamp < curStartTime) {
+        // TODO delay the decode of value until the timestamp is valid, skip to the next point when
+        // t is invalid
         valueDecoder.readLong(valueBuffer); // hard-coded, assuming value is long data type
         continue;
       }
@@ -282,7 +276,8 @@ public class PageReader implements IPageReader {
         // create batchData
         BatchData batch1 = BatchDataFactory.createBatchData(dataType, true, false);
         splitBatchDataMap.put(idx, batch1);
-        LongStatistics statistics = new LongStatistics();  // hard-coded, assuming value is long data type
+        LongStatistics statistics =
+            new LongStatistics(); // hard-coded, assuming value is long data type
         // create chunkMetaData
         ChunkMetadata chunkMetadata1 =
             new ChunkMetadata(
@@ -296,12 +291,14 @@ public class PageReader implements IPageReader {
       BatchData batchData1 = splitBatchDataMap.get(idx);
       ChunkMetadata chunkMetadata1 = splitChunkMetadataMap.get(idx);
 
-      // TODO delay the decode of value until the timestamp is valid, skip to the next point when t is invalid
+      // TODO delay the decode of value until the timestamp is valid, skip to the next point when t
+      // is invalid
       // hard-coded, assuming value is long data type
       long aLong = valueDecoder.readLong(valueBuffer);
 
       if (!isDeleted(timestamp)) {
-        // remove filter, only check delete, because groupByFilter is handled in this function's own logic
+        // remove filter, only check delete, because groupByFilter is handled in this function's own
+        // logic
 
         // update batchData1
         batchData1.putLong(timestamp, aLong);
@@ -327,23 +324,23 @@ public class PageReader implements IPageReader {
     }
   }
 
-//  /**
-//   * chunk里点时间戳从小到大递增, 所以遍历直到点的时间戳大于或等于candidateTimestamp即可结束
-//   *
-//   * @return true if the point whose time equals candidateTimestamp exists, false if not
-//   */
-//  public boolean partialScan(long candidateTimestamp) throws IOException {
-//    while (timeDecoder.hasNext(timeBuffer)) {
-//      long timestamp = timeDecoder.readLong(timeBuffer);
-//      if (timestamp > candidateTimestamp) {
-//        return false;
-//      }
-//      if (timestamp == candidateTimestamp) {
-//        return true;
-//      }
-//    }
-//    return false;
-//  }
+  //  /**
+  //   * chunk里点时间戳从小到大递增, 所以遍历直到点的时间戳大于或等于candidateTimestamp即可结束
+  //   *
+  //   * @return true if the point whose time equals candidateTimestamp exists, false if not
+  //   */
+  //  public boolean partialScan(long candidateTimestamp) throws IOException {
+  //    while (timeDecoder.hasNext(timeBuffer)) {
+  //      long timestamp = timeDecoder.readLong(timeBuffer);
+  //      if (timestamp > candidateTimestamp) {
+  //        return false;
+  //      }
+  //      if (timestamp == candidateTimestamp) {
+  //        return true;
+  //      }
+  //    }
+  //    return false;
+  //  }
 
   public boolean partialScan4CPV(long candidateTimestamp) {
     long[] timeData = ((LongDeltaDecoder) timeDecoder).getDataArray4CPV(timeBuffer);
@@ -358,9 +355,7 @@ public class PageReader implements IPageReader {
     return false; // not exist
   }
 
-  /**
-   * @return the returned BatchData may be empty, but never be null
-   */
+  /** @return the returned BatchData may be empty, but never be null */
   @SuppressWarnings("squid:S3776") // Suppress high Cognitive Complexity warning
   @Override
   public BatchData getAllSatisfiedPageData(boolean ascending) throws IOException {
@@ -368,27 +363,29 @@ public class PageReader implements IPageReader {
 
     long[] timeData = ((LongDeltaDecoder) timeDecoder).getDataArray4CPV(timeBuffer);
     for (long timestamp : timeData) {
-      // TODO delay the decode of value until the timestamp is valid, skip to the next point when t is invalid
-      long aLong = valueDecoder
-          .readLong(valueBuffer); // hard-coded, assuming value is long data type
+      // TODO delay the decode of value until the timestamp is valid, skip to the next point when t
+      // is invalid
+      long aLong =
+          valueDecoder.readLong(valueBuffer); // hard-coded, assuming value is long data type
 
       if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, null))) {
-        // cannot remove filter here because M4-UDF uses time filters, but we can delay the use of value object
+        // cannot remove filter here because M4-UDF uses time filters, but we can delay the use of
+        // value object
         // assuming the filter is always timeFilter
 
         pageData.putLong(timestamp, aLong);
       }
     }
 
-//    while (timeDecoder.hasNext(timeBuffer)) { // TODO: timeDecoder.data
-//      long timestamp = timeDecoder.readLong(timeBuffer);
-//      long aLong = valueDecoder
-//          .readLong(valueBuffer); // hard-coded, assuming value is long data type
-//      if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, aLong))) {
-//        // cannot remove filter here because M4-UDF uses time filters
-//        pageData.putLong(timestamp, aLong);
-//      }
-//    }
+    //    while (timeDecoder.hasNext(timeBuffer)) { // TODO: timeDecoder.data
+    //      long timestamp = timeDecoder.readLong(timeBuffer);
+    //      long aLong = valueDecoder
+    //          .readLong(valueBuffer); // hard-coded, assuming value is long data type
+    //      if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, aLong))) {
+    //        // cannot remove filter here because M4-UDF uses time filters
+    //        pageData.putLong(timestamp, aLong);
+    //      }
+    //    }
 
     return pageData.flip();
   }