You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by hu...@apache.org on 2022/11/04 06:41:13 UTC

[iotdb] branch research/LSM-quantile updated: add Quantile Summary into Statistics for Chunks (#7903)

This is an automated email from the ASF dual-hosted git repository.

hui pushed a commit to branch research/LSM-quantile
in repository https://gitbox.apache.org/repos/asf/iotdb.git


The following commit(s) were added to refs/heads/research/LSM-quantile by this push:
     new 5fa03557ff add Quantile Summary into Statistics for Chunks (#7903)
5fa03557ff is described below

commit 5fa03557ff852071ab39a131fa04ef356c5ef361
Author: czllgzmzl <32...@users.noreply.github.com>
AuthorDate: Fri Nov 4 14:41:06 2022 +0800

    add Quantile Summary into Statistics for Chunks (#7903)
---
 client-py/SessionMemory.py                         |  86 +++
 client-py/SessionNo.py                             | 143 ++++
 client-py/SessionQuery.py                          |  74 +++
 client-py/SessionSyn.py                            |  89 +++
 .../iotdb/cluster/query/LocalQueryExecutor.java    |   3 +-
 .../query/aggregate/ClusterAggregateExecutor.java  | 170 ++++-
 .../iotdb/tsfile/TsFileWriteAlignedWithTablet.java |  21 +-
 server/pom.xml                                     |  25 +
 .../resources/conf/iotdb-engine.properties         |  35 +-
 .../java/org/apache/iotdb/db/conf/IoTDBConfig.java |  20 +
 .../org/apache/iotdb/db/conf/IoTDBDescriptor.java  | 105 +++
 .../apache/iotdb/db/qp/constant/SQLConstant.java   |  57 +-
 .../qp/logical/crud/AggregationQueryOperator.java  |  48 ++
 .../iotdb/db/qp/physical/crud/AggregationPlan.java |  85 ++-
 .../db/query/aggregation/AggregateResult.java      |  95 ++-
 .../db/query/aggregation/AggregationType.java      | 146 ++++-
 .../impl/AggressiveMedianAggrResult.java           | 353 ++++++++++
 .../impl/AmortizedMedianAggrResult.java            | 352 ++++++++++
 ...BucketStatFilterAggressiveMedianAggrResult.java | 455 +++++++++++++
 .../impl/BitsBucketStatFilterMedianAggrResult.java | 457 +++++++++++++
 .../impl/BitsBucketStatMedianAggrResult.java       | 445 +++++++++++++
 .../db/query/aggregation/impl/CountAggrResult.java |   2 +
 .../impl/KLLDebugFullReadingAggrResult.java        | 356 ++++++++++
 .../db/query/aggregation/impl/KLLDebugResult.java  | 484 ++++++++++++++
 .../impl/KLLFloatsMedianAggrResult.java            | 313 +++++++++
 .../impl/KLLFloatsSingleAggrResult.java            | 273 ++++++++
 .../impl/KLLStatDebugFullReadingAggrResult.java    | 532 +++++++++++++++
 .../impl/KLLStatDebugPageDemandRateAggrResult.java | 554 ++++++++++++++++
 .../aggregation/impl/KLLStatMedianAggrResult.java  | 540 +++++++++++++++
 .../impl/KLLStatOverlapSingleAggrResult.java       | 558 ++++++++++++++++
 .../aggregation/impl/KLLStatSingleAggrResult.java  | 510 +++++++++++++++
 .../impl/KLLStatSingleReadAggrResult.java          | 328 ++++++++++
 .../query/aggregation/impl/MedianAggrResult.java   | 389 +++++++++++
 .../impl/OptimizedMedianAggrResult.java            | 423 ++++++++++++
 .../impl/Optimized_2_MedianAggrResult.java         | 409 ++++++++++++
 .../impl/Optimized_3_MedianAggrResult.java         | 348 ++++++++++
 .../impl/Optimized_4_MedianAggrResult.java         | 359 ++++++++++
 .../impl/Optimized_5_MedianAggrResult.java         | 348 ++++++++++
 .../impl/SamplingStatSingleAggrResult.java         | 411 ++++++++++++
 .../impl/StrictKLLStatSingleAggrResult.java        | 508 +++++++++++++++
 .../impl/TDigestStatSingleAggrResult.java          | 404 ++++++++++++
 .../groupby/GroupByWithValueFilterDataSet.java     | 203 ++++--
 .../db/query/executor/AggregationExecutor.java     | 668 +++++++++++++------
 .../iotdb/db/query/executor/fill/LinearFill.java   |   3 +-
 .../db/query/factory/AggregateResultFactory.java   | 144 ++++
 .../db/query/reader/chunk/DiskChunkLoader.java     |   7 +
 .../db/query/reader/series/IAggregateReader.java   |   4 +
 .../query/reader/series/SeriesAggregateReader.java |  12 +
 ...ader.java => SeriesAggregateReaderForStat.java} |  59 +-
 ...java => SeriesAggregateReaderForStatChain.java} |  59 +-
 .../iotdb/db/query/reader/series/SeriesReader.java |  52 +-
 .../query/reader/series/SeriesReaderForStat.java   | 725 +++++++++++++++++++++
 .../reader/series/SeriesReaderForStatChain.java    | 591 +++++++++++++++++
 .../reader/universal/PriorityMergeReader.java      |  74 +++
 ...Reader.java => PriorityMergeReaderForStat.java} | 117 ++--
 .../org/apache/iotdb/db/utils/FileLoaderUtils.java |  12 +
 .../org/apache/iotdb/db/utils/SchemaUtils.java     |  24 +
 .../apache/iotdb/db/utils/TypeInferenceUtils.java  |  24 +
 .../iotdb/db/utils/datastructure/FixedTreap.java   | 296 +++++++++
 .../datastructure/HashMapOpenCacheForQuantile.java | 239 +++++++
 .../db/utils/quantiles/AmortizedForMedian.java     | 182 ++++++
 .../EclipseCollectionsHashMapForQuantile.java      | 199 ++++++
 .../iotdb/db/utils/quantiles/FixedTreap.java       | 296 +++++++++
 .../quantiles/GCHashMapComplexForQuantile.java     | 252 +++++++
 .../quantiles/GSHashMapAggressiveForQuantile.java  | 182 ++++++
 .../db/utils/quantiles/KLLDoublesForMedian.java    |  37 ++
 .../quantiles/TDigestRadixBetterForMedian.java     | 208 ++++++
 .../db/utils/quantiles/TDigestRadixForMedian.java  | 293 +++++++++
 .../db/query/aggregation/AggregateResultTest.java  |  44 ++
 .../dataset/groupby/GroupByLevelDataSetTest.java   |  18 +
 .../dataset/groupby/GroupByTimeDataSetTest.java    |  28 +
 .../org/apache/iotdb/session/InsertCsvDataIT.java  | 242 +++++++
 .../org/apache/iotdb/session/InsertDataIT.java     | 242 +++++++
 .../apache/iotdb/session/InsertLatencyDataIT.java  | 213 ++++++
 .../iotdb/session/InsertLatencyUpdateDataIT.java   | 269 ++++++++
 .../apache/iotdb/session/InsertOverlapDataIT.java  | 244 +++++++
 .../session/IoTDBSessionComplexAggregationIT.java  | 521 +++++++++++++++
 .../iotdb/session/QueryLatencyUpdateDataIT.java    | 371 +++++++++++
 .../org/apache/iotdb/session/SketchTempIT.java     | 353 ++++++++++
 .../iotdb/session/StatMedianKLLFullReadIT.java     | 349 ++++++++++
 .../org/apache/iotdb/session/StatMedianKLLIT.java  | 456 +++++++++++++
 .../iotdb/session/StatMedianKLLOverlapIT.java      | 397 +++++++++++
 tsfile/pom.xml                                     |  26 +
 .../iotdb/tsfile/common/conf/TSFileConfig.java     |  92 ++-
 .../tsfile/common/constant/TsFileConstant.java     |   7 +
 .../iotdb/tsfile/file/header/PageHeader.java       |   3 +-
 .../iotdb/tsfile/file/metadata/ChunkMetadata.java  |   6 +-
 .../tsfile/file/metadata/TimeseriesMetadata.java   |   3 +-
 .../file/metadata/statistics/DoubleStatistics.java | 496 +++++++++++++-
 .../file/metadata/statistics/Statistics.java       | 140 +++-
 .../iotdb/tsfile/read/TsFileSequenceReader.java    |   4 +-
 .../org/apache/iotdb/tsfile/read/common/Chunk.java |  11 +
 .../tsfile/read/reader/chunk/ChunkReader.java      |  96 ++-
 .../tsfile/read/reader/page/LazyPageReader.java    | 236 +++++++
 .../iotdb/tsfile/utils/GSHashMapForStat.java       | 215 ++++++
 .../iotdb/tsfile/utils/HeapLongKLLSketch.java      | 336 ++++++++++
 .../tsfile/utils/HeapLongStrictKLLSketch.java      | 348 ++++++++++
 .../iotdb/tsfile/utils/KLLSketchForQuantile.java   | 154 +++++
 .../apache/iotdb/tsfile/utils/LongKLLSketch.java   | 284 ++++++++
 .../tsfile/utils/SamplingHeapForStatMerge.java     | 289 ++++++++
 .../iotdb/tsfile/utils/TDigestForStatMerge.java    | 263 ++++++++
 .../iotdb/tsfile/v2/file/header/PageHeaderV2.java  |   3 +-
 .../tsfile/v2/file/metadata/ChunkMetadataV2.java   |   3 +-
 .../v2/file/metadata/TimeseriesMetadataV2.java     |   3 +-
 .../v2/file/metadata/statistics/StatisticsV2.java  |   2 +-
 .../write/chunk/AlignedChunkGroupWriterImpl.java   | 118 +++-
 .../iotdb/tsfile/write/chunk/ChunkWriterImpl.java  |   6 +-
 .../iotdb/tsfile/write/chunk/TimeChunkWriter.java  |  15 +-
 .../iotdb/tsfile/write/chunk/ValueChunkWriter.java |  14 +-
 .../iotdb/tsfile/write/writer/TsFileIOWriter.java  |   2 +-
 .../iotdb/tsfile/read/ReadInPartitionTest.java     |   7 +-
 .../controller/IMetadataQuerierByFileImplTest.java |   9 +-
 112 files changed, 22711 insertions(+), 502 deletions(-)

diff --git a/client-py/SessionMemory.py b/client-py/SessionMemory.py
new file mode 100644
index 0000000000..6a724ad729
--- /dev/null
+++ b/client-py/SessionMemory.py
@@ -0,0 +1,86 @@
+from iotdb.Session import Session
+from iotdb.utils.IoTDBConstants import TSDataType, TSEncoding, Compressor
+from iotdb.utils.Tablet import Tablet
+import pandas as pd
+import numpy as np
+import time
+
+# creating session connection.
+ip = "127.0.0.1"
+port_ = "6667"
+username_ = "root"
+password_ = "root"
+session = Session(ip, port_, username_, password_, fetch_size=1024, zone_id="UTC+8")
+session.open(False)
+
+# grps = ["tx_syn_01", "tx_syn_05"]
+# grps = ["bt_syn_01", "bt_syn_05"]
+# grps = ["st_syn_01", "st_syn_05"]
+# grps = ["wh_syn_01", "wh_syn_05"]
+ds = [["bt_syn_01", "bt_syn_05"], ["st_syn_01", "st_syn_05"], ["tx_syn_01", "tx_syn_05"], ["wh_syn_01", "wh_syn_05"]]
+times = 10
+drop = 0
+data_size = 50000000
+
+for grps in ds:
+    res = []
+    print(grps)
+    for i in range(3):
+        result = session.execute_query_statement(
+                    # "select KLL_QUANTILE(s_01) from root." + grps[0] + ".d_01"
+                    # "select TDIGEST_QUANTILE(s_01) from root." + grps[0] + ".d_01"
+                    # "select SAMPLING_QUANTILE(s_01) from root." + grps[0] + ".d_01"
+                )
+
+    query_times = 0
+    for i in range(times + 2):
+        start_time = time.time()
+        result = session.execute_query_statement(
+            "select KLL_QUANTILE(s_01) from root." + grps[0] + ".d_01 where time<=2000000"
+        )
+        query_times += time.time() - start_time
+
+    print(query_times / (times + 2 - drop))
+    res.append(query_times / (times + 2 - drop))
+
+    for i in range(3):
+        result = session.execute_query_statement(
+                    # "select KLL_QUANTILE(s_01) from root." + grps[0] + ".d_01"
+                    # "select TDIGEST_QUANTILE(s_01) from root." + grps[0] + ".d_01"
+                    # "select SAMPLING_QUANTILE(s_01) from root." + grps[0] + ".d_01"
+                )
+
+    query_times = 0
+    for i in range(times):
+        start_time = time.time()
+        result = session.execute_query_statement(
+            "select SAMPLING_QUANTILE(s_01) from root." + grps[1] + ".d_01 where time<=2000000"
+        )
+        query_times += time.time() - start_time
+
+    print(query_times / (times - drop))
+    res.append(query_times / (times - drop))
+
+    query_times = 0
+    for i in range(times):
+        start_time = time.time()
+        result = session.execute_query_statement(
+            "select TDIGEST_QUANTILE(s_01) from root." + grps[1] + ".d_01 where time<=2000000"
+        )
+        query_times += time.time() - start_time
+
+    print(query_times / (times - drop))
+    res.append(query_times / (times - drop))
+
+    query_times = 0
+    for i in range(times + 2):
+        start_time = time.time()
+        result = session.execute_query_statement(
+            "select KLL_QUANTILE(s_01) from root." + grps[1] + ".d_01 where time<=2000000"
+        )
+        query_times += time.time() - start_time
+
+    print(query_times / (times + 2 - drop))
+    res.append(query_times / (times + 2 - drop))
+
+    print(str(int(res[0] * 1000)) + " " + str(int(res[1] * 1000)) + " " + str(int(res[2] * 1000)) + " " + str(int(res[3] * 1000)))
diff --git a/client-py/SessionNo.py b/client-py/SessionNo.py
new file mode 100644
index 0000000000..a914280e88
--- /dev/null
+++ b/client-py/SessionNo.py
@@ -0,0 +1,143 @@
+from iotdb.Session import Session
+from iotdb.utils.IoTDBConstants import TSDataType, TSEncoding, Compressor
+from iotdb.utils.Tablet import Tablet
+import pandas as pd
+import numpy as np
+import time
+
+# creating session connection.
+ip = "127.0.0.1"
+port_ = "6667"
+username_ = "root"
+password_ = "root"
+session = Session(ip, port_, username_, password_, fetch_size=1024, zone_id="UTC+8")
+session.open(False)
+
+# grps = ["tx_syn_05"]
+grps = ["bt_syn_05"]
+# grps = ["st_syn_05"]
+# grps = ["wh_syn_05"]
+drop = 0
+times =  drop + 3
+data_size = 50000000
+quant = 0.5
+sizes = [10000000, 20000000, 30000000, 40000000, 50000000]
+
+for i in range(3):
+    result = session.execute_query_statement(
+                # "select KLL_QUANTILE(s_01) from root." + grps[0] + ".d_01"
+                "select KLL_QUANTILE(s_01) from root." + grps[0] + ".d_01"
+                # "select TDIGEST_QUANTILE(s_01) from root." + grps[0] + ".d_01"
+                # "select SAMPLING_QUANTILE(s_01) from root." + grps[0] + ".d_01"
+            )
+
+# for grp in grps: 
+#     query_times = 0
+#     accuracy = 0
+#     print(grp)
+#     for i in range(times):
+#         start_time = time.time()
+#         result = session.execute_query_statement(
+#             # "select KLL_QUANTILE(s_01) from root." + grp + ".d_01"
+#             # "select KLL_QUANTILE(s_01) from root." + grp + ".d_01"
+#             # "select TDIGEST_QUANTILE(s_01) from root." + grp + ".d_01"
+#             "select SAMPLING_QUANTILE(s_01) from root." + grp + ".d_01"
+#         )
+#         if i < drop:
+#             continue
+#         query_times += time.time() - start_time
+#         # result = str(result.next()).split()
+#         # quantile = float(result[1])
+
+#         # count = session.execute_query_statement(
+#         #     "select count(s_01) from root." + grp + ".d_01 where s_01<=" + str(quantile)
+#         # )
+
+#         # count =str(count.next()).split()
+#         # accuracy += data_size * quant - float(count[1])
+
+#     print(query_times / (times - drop), (accuracy / (times - drop)) / data_size)
+
+# for grp in grps:
+#     query_times = 0
+#     accuracy = 0
+#     print(grp)
+#     for i in range(times):
+#         start_time = time.time()
+#         result = session.execute_query_statement(
+#             # "select KLL_QUANTILE(s_01) from root." + grp + ".d_01"
+#             # "select KLL_QUANTILE(s_01) from root." + grp + ".d_01"
+#             "select TDIGEST_QUANTILE(s_01) from root." + grp + ".d_01"
+#             # "select SAMPLING_QUANTILE(s_01) from root." + grp + ".d_01"
+#         )
+#         if i < drop:
+#             continue
+#         query_times += time.time() - start_time
+#         # result = str(result.next()).split()
+#         # quantile = float(result[1])
+
+#         # count = session.execute_query_statement(
+#         #     "select count(s_01) from root." + grp + ".d_01 where s_01<=" + str(quantile)
+#         # )
+
+#         # count =str(count.next()).split()
+#         # accuracy += data_size * quant - float(count[1])
+
+#     print(query_times / (times - drop), (accuracy / (times - drop)) / data_size)
+
+# for grp in grps:
+#     query_times = 0
+#     accuracy = 0
+#     print(grp)
+#     for i in range(times):
+#         start_time = time.time()
+#         result = session.execute_query_statement(
+#             # "select KLL_QUANTILE(s_01) from root." + grp + ".d_01"
+#             "select KLL_QUANTILE(s_01) from root." + grp + ".d_01"
+#             # "select TDIGEST_QUANTILE(s_01) from root." + grp + ".d_01"
+#             # "select SAMPLING_QUANTILE(s_01) from root." + grp + ".d_01"
+#         )
+#         if i < drop:
+#             continue
+#         query_times += time.time() - start_time
+#         # result = str(result.next()).split()
+#         # quantile = float(result[1])
+
+#         # count = session.execute_query_statement(
+#         #     "select count(s_01) from root." + grp + ".d_01 where s_01<=" + str(quantile)
+#         # )
+
+#         # count =str(count.next()).split()
+#         # accuracy += data_size * quant - float(count[1])
+
+#     print(query_times / (times - drop), (accuracy / (times - drop)) / data_size)
+
+# *******************************data size, no-sketch************************************
+
+for size in sizes:
+    print(size)
+    query_times = 0
+    accuracy = 0
+    for i in range(times):
+        start_time = time.time()
+        result = session.execute_query_statement(
+            "select KLL_QUANTILE(s_01) from root." + grps[0] + ".d_01 where time<=" + str(size)
+            # "select TDIGEST_QUANTILE(s_01) from root." + grps[0] + ".d_01 where time<=" + str(size)
+            # "select SAMPLING_QUANTILE(s_01) from root." + grps[0] + ".d_01 where time<=" + str(size)
+        )
+        if i < drop:
+            continue
+        query_times += time.time() - start_time
+        # result = str(result.next()).split()
+        # quantile = float(result[1])
+
+        # count = session.execute_query_statement(
+        #     # "select count(s_01) from root.sg_syn_02.d_01 where s_01<=" + str(quantile) + " and time<=" + str(size)
+        #     "select count(s_01) from root.sg_td_02.d_01 where s_01<=" + str(quantile) + " and time<=" + str(size)
+        #     # "select count(s_01) from root.sg_rs_02.d_01 where s_01<=" + str(quantile) + " and time<=" + str(size)
+        # )
+
+        # count =str(count.next()).split()
+        # accuracy += size * quant - float(count[1])
+
+    print(query_times / (times - drop), (accuracy / (times - drop)) / size)
diff --git a/client-py/SessionQuery.py b/client-py/SessionQuery.py
new file mode 100644
index 0000000000..38d1aa3f64
--- /dev/null
+++ b/client-py/SessionQuery.py
@@ -0,0 +1,74 @@
+from iotdb.Session import Session
+from iotdb.utils.IoTDBConstants import TSDataType, TSEncoding, Compressor
+from iotdb.utils.Tablet import Tablet
+import pandas as pd
+import numpy as np
+import time
+
+# creating session connection.
+ip = "127.0.0.1"
+port_ = "6667"
+username_ = "root"
+password_ = "root"
+session = Session(ip, port_, username_, password_, fetch_size=1024, zone_id="UTC+8")
+session.open(False)
+
+grps = ["tx_syn_01", "tx_syn_02", "tx_syn_03", "tx_syn_04"]
+# grps = ["bt_syn_01", "bt_syn_02", "bt_syn_03", "bt_syn_04"]
+# grps = ["st_syn_01", "st_syn_02", "st_syn_03", "st_syn_04"]
+# grps = ["wh_syn_01", "wh_syn_02", "wh_syn_03", "wh_syn_04"]
+times = 6
+drop = 3
+data_size = 50000000
+sizes = [10000000, 20000000, 30000000, 40000000, 50000000]
+quant = 0.5
+
+for grp in grps:
+    query_times = 0
+    accuracy = 0
+    print(grp)
+    for i in range(times):
+        start_time = time.time()
+        result = session.execute_query_statement(
+            "select KLL_QUANTILE(s_01) from root." + grp + ".d_01"
+        )
+        if i < drop:
+            continue
+        query_times += time.time() - start_time
+        # result = str(result.next()).split()
+        # quantile = float(result[1])
+
+        # count = session.execute_query_statement(
+        #     "select count(s_01) from root." + grp + ".d_01 where s_01<=" + str(quantile)
+        # )
+
+        # count =str(count.next()).split()
+        # accuracy += data_size * quant - float(count[1])
+
+    print(query_times / (times - drop), (accuracy / (times - drop)) / data_size)
+
+for size in sizes:
+    print(size)
+    query_times = 0
+    accuracy = 0
+    for i in range(times):
+        start_time = time.time()
+        result = session.execute_query_statement(
+            "select KLL_QUANTILE(s_01) from root." + grps[0] + ".d_01 where time<=" + str(size)
+        )
+        if i < drop:
+            continue
+        query_times += time.time() - start_time
+        # result = str(result.next()).split()
+        # quantile = float(result[1])
+
+        # count = session.execute_query_statement(
+        #     # "select count(s_01) from root.sg_syn_02.d_01 where s_01<=" + str(quantile) + " and time<=" + str(size)
+        #     "select count(s_01) from root.sg_td_02.d_01 where s_01<=" + str(quantile) + " and time<=" + str(size)
+        #     # "select count(s_01) from root.sg_rs_02.d_01 where s_01<=" + str(quantile) + " and time<=" + str(size)
+        # )
+
+        # count =str(count.next()).split()
+        # accuracy += size * quant - float(count[1])
+
+    print(query_times / (times - drop), (accuracy / (times - drop)) / size)
diff --git a/client-py/SessionSyn.py b/client-py/SessionSyn.py
new file mode 100644
index 0000000000..2649c7a05d
--- /dev/null
+++ b/client-py/SessionSyn.py
@@ -0,0 +1,89 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# Uncomment the following line to use apache-iotdb module installed by pip3
+#
+
+from iotdb.Session import Session
+from iotdb.utils.IoTDBConstants import TSDataType, TSEncoding, Compressor
+from iotdb.utils.Tablet import Tablet
+import pandas as pd
+import numpy as np
+
+# creating session connection.
+ip = "127.0.0.1"
+port_ = "6667"
+username_ = "root"
+password_ = "root"
+session = Session(ip, port_, username_, password_, fetch_size=1024, zone_id="UTC+8")
+session.open(False)
+
+grp = "tx_syn_04"
+# syn_01: 1024
+# syn_02: 512
+# syn_03: 256
+# syn_04: 128
+# syn_05: false
+
+# set and delete storage groups
+session.delete_storage_group("root." + grp)
+session.set_storage_group("root." + grp)
+
+# setting time series.
+session.create_time_series(
+    "root." + grp + ".d_01.s_01", TSDataType.DOUBLE, TSEncoding.PLAIN, Compressor.SNAPPY
+)
+
+# checking time series
+print(
+    "s_01 expecting True, checking result: ",
+    session.check_time_series_exists("root." + grp + ".d_01.s_01"),
+)
+
+# df = pd.read_csv("~/LSM-Quantile/wh.csv")
+# data = df["value"].tolist()
+# data = [[datum] for datum in data]
+df = pd.read_csv("~/LSM-Quantile/taxi.txt")
+data = (np.array(df)).tolist()
+data = [[datum[0]] for datum in data]
+data = data[:50000000]
+batch = 81920
+print(data[:10])
+print(type(data[0]))
+
+measurements_ = ["s_01"]
+data_types_ = [
+    TSDataType.DOUBLE
+]
+
+for i in range(int(len(data) / batch)):
+    if i % 100 == 0:
+        print("Iter: " + str(i))
+# insert one tablet into the database.
+    values_ = data[i * batch : (i + 1) * batch] # Non-ASCII text will cause error since bytes can only hold 0-128 nums.
+    timestamps_ = list(range(i * batch, (i + 1) * batch))
+    tablet_ = Tablet(
+        "root." + grp + ".d_01", measurements_, data_types_, values_, timestamps_
+    )
+    session.insert_tablet(tablet_)
+    # session.execute_non_query_statement("flush")
+
+# close session connection.
+session.close()
+
+print("All executions done!!")
\ No newline at end of file
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/LocalQueryExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/LocalQueryExecutor.java
index 5589d7a2f3..4a6e16831f 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/LocalQueryExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/LocalQueryExecutor.java
@@ -750,7 +750,8 @@ public class LocalQueryExecutor {
         ascResults,
         descResults,
         new SlotTsFileFilter(nodeSlots),
-        ascending);
+        ascending,
+        null);
     return results;
   }
 
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/aggregate/ClusterAggregateExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/aggregate/ClusterAggregateExecutor.java
index 022da56122..76e4099bbd 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/query/aggregate/ClusterAggregateExecutor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/query/aggregate/ClusterAggregateExecutor.java
@@ -22,22 +22,32 @@ package org.apache.iotdb.cluster.query.aggregate;
 import org.apache.iotdb.cluster.query.reader.ClusterReaderFactory;
 import org.apache.iotdb.cluster.query.reader.ClusterTimeGenerator;
 import org.apache.iotdb.cluster.server.member.MetaGroupMember;
+import org.apache.iotdb.db.engine.StorageEngine;
+import org.apache.iotdb.db.engine.storagegroup.VirtualStorageGroupProcessor;
 import org.apache.iotdb.db.exception.StorageEngineException;
 import org.apache.iotdb.db.exception.query.QueryProcessException;
+import org.apache.iotdb.db.metadata.path.AlignedPath;
 import org.apache.iotdb.db.metadata.path.PartialPath;
+import org.apache.iotdb.db.metadata.utils.MetaUtils;
 import org.apache.iotdb.db.qp.physical.crud.AggregationPlan;
 import org.apache.iotdb.db.qp.physical.crud.RawDataQueryPlan;
 import org.apache.iotdb.db.query.aggregation.AggregateResult;
 import org.apache.iotdb.db.query.context.QueryContext;
+import org.apache.iotdb.db.query.control.QueryResourceManager;
+import org.apache.iotdb.db.query.dataset.SingleDataSet;
 import org.apache.iotdb.db.query.executor.AggregationExecutor;
+import org.apache.iotdb.db.query.factory.AggregateResultFactory;
 import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.read.common.RowRecord;
+import org.apache.iotdb.tsfile.read.expression.impl.GlobalTimeExpression;
 import org.apache.iotdb.tsfile.read.filter.basic.Filter;
+import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
 import org.apache.iotdb.tsfile.read.query.timegenerator.TimeGenerator;
+import org.apache.iotdb.tsfile.utils.Pair;
 
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
+import java.io.IOException;
+import java.util.*;
 
 public class ClusterAggregateExecutor extends AggregationExecutor {
 
@@ -58,6 +68,71 @@ public class ClusterAggregateExecutor extends AggregationExecutor {
     this.aggregator = new ClusterAggregator(metaMember);
   }
 
+  @Override
+  /**
+   * execute aggregate function with only time filter or no filter. this is old version of
+   * AggregationExecutor.executeWithoutValueFilter
+   */
+  public QueryDataSet executeWithoutValueFilter(AggregationPlan aggregationPlan)
+      throws StorageEngineException, IOException, QueryProcessException {
+
+    Filter timeFilter = null;
+    if (expression != null) {
+      timeFilter = ((GlobalTimeExpression) expression).getFilter();
+    }
+
+    // TODO use multi-thread
+    Map<PartialPath, List<Integer>> pathToAggrIndexesMap =
+        MetaUtils.groupAggregationsBySeries(selectedSeries);
+    // Attention: this method will REMOVE aligned path from pathToAggrIndexesMap
+    Map<AlignedPath, List<List<Integer>>> alignedPathToAggrIndexesMap =
+        MetaUtils.groupAlignedSeriesWithAggregations(pathToAggrIndexesMap);
+
+    List<PartialPath> groupedPathList =
+        new ArrayList<>(pathToAggrIndexesMap.size() + alignedPathToAggrIndexesMap.size());
+    groupedPathList.addAll(pathToAggrIndexesMap.keySet());
+    groupedPathList.addAll(alignedPathToAggrIndexesMap.keySet());
+
+    // TODO-Cluster: group the paths by storage group to reduce communications
+    Pair<List<VirtualStorageGroupProcessor>, Map<VirtualStorageGroupProcessor, List<PartialPath>>>
+        lockListAndProcessorToSeriesMapPair =
+            StorageEngine.getInstance().mergeLock(groupedPathList);
+    List<VirtualStorageGroupProcessor> lockList = lockListAndProcessorToSeriesMapPair.left;
+    Map<VirtualStorageGroupProcessor, List<PartialPath>> processorToSeriesMap =
+        lockListAndProcessorToSeriesMapPair.right;
+
+    try {
+      // init QueryDataSource Cache
+      QueryResourceManager.getInstance()
+          .initQueryDataSourceCache(processorToSeriesMap, context, timeFilter);
+    } catch (Exception e) {
+      logger.error("Meet error when init QueryDataSource ", e);
+      throw new QueryProcessException("Meet error when init QueryDataSource.", e);
+    } finally {
+      StorageEngine.getInstance().mergeUnLock(lockList);
+    }
+
+    for (Map.Entry<PartialPath, List<Integer>> entry : pathToAggrIndexesMap.entrySet()) {
+      PartialPath seriesPath = entry.getKey();
+      aggregateOneSeries(
+          seriesPath,
+          entry.getValue(),
+          aggregationPlan.getAllMeasurementsInDevice(seriesPath.getDevice()),
+          timeFilter);
+    }
+    for (Map.Entry<AlignedPath, List<List<Integer>>> entry :
+        alignedPathToAggrIndexesMap.entrySet()) {
+      AlignedPath alignedPath = entry.getKey();
+      aggregateOneAlignedSeries(
+          alignedPath,
+          entry.getValue(),
+          aggregationPlan.getAllMeasurementsInDevice(alignedPath.getDevice()),
+          timeFilter);
+    }
+
+    return constructDataSet(Arrays.asList(aggregateResultList), aggregationPlan);
+  }
+
   @Override
   protected void aggregateOneSeries(
       PartialPath seriesPath,
@@ -86,6 +161,61 @@ public class ClusterAggregateExecutor extends AggregationExecutor {
     }
   }
 
+  /** this is old version of AggregationExecutor.aggregateOneAlignedSeries */
+  @Override
+  protected void aggregateOneAlignedSeries(
+      AlignedPath alignedPath,
+      List<List<Integer>> subIndexes,
+      Set<String> allMeasurementsInDevice,
+      Filter timeFilter)
+      throws IOException, QueryProcessException, StorageEngineException {
+    List<List<AggregateResult>> ascAggregateResultList = new ArrayList<>();
+    List<List<AggregateResult>> descAggregateResultList = new ArrayList<>();
+    boolean[] isAsc = new boolean[aggregateResultList.length];
+
+    for (List<Integer> subIndex : subIndexes) {
+      TSDataType tsDataType = dataTypes.get(subIndex.get(0));
+      List<AggregateResult> subAscResultList = new ArrayList<>();
+      List<AggregateResult> subDescResultList = new ArrayList<>();
+      for (int i : subIndex) {
+        // construct AggregateResult
+        AggregateResult aggregateResult =
+            AggregateResultFactory.getAggrResultByName(aggregations.get(i), tsDataType);
+        if (aggregateResult.isAscending()) {
+          subAscResultList.add(aggregateResult);
+          isAsc[i] = true;
+        } else {
+          subDescResultList.add(aggregateResult);
+        }
+      }
+      ascAggregateResultList.add(subAscResultList);
+      descAggregateResultList.add(subDescResultList);
+    }
+
+    aggregateOneAlignedSeries(
+        alignedPath,
+        allMeasurementsInDevice,
+        context,
+        timeFilter,
+        TSDataType.VECTOR,
+        ascAggregateResultList,
+        descAggregateResultList,
+        null,
+        ascending);
+
+    for (int i = 0; i < subIndexes.size(); i++) {
+      List<Integer> subIndex = subIndexes.get(i);
+      List<AggregateResult> subAscResultList = ascAggregateResultList.get(i);
+      List<AggregateResult> subDescResultList = descAggregateResultList.get(i);
+      int ascIndex = 0;
+      int descIndex = 0;
+      for (int index : subIndex) {
+        aggregateResultList[index] =
+            isAsc[index] ? subAscResultList.get(ascIndex++) : subDescResultList.get(descIndex++);
+      }
+    }
+  }
+
   @Override
   protected TimeGenerator getTimeGenerator(QueryContext context, RawDataQueryPlan rawDataQueryPlan)
       throws StorageEngineException {
@@ -104,4 +234,38 @@ public class ClusterAggregateExecutor extends AggregationExecutor {
         dataQueryPlan.isAscending(),
         null);
   }
+
+  /**
+   * using aggregate result data list construct QueryDataSet. this is old version of
+   * AggregationExecutor.constructDataSet
+   *
+   * @param aggregateResultList aggregate result list
+   */
+  protected QueryDataSet constructDataSet(
+      List<AggregateResult> aggregateResultList, AggregationPlan plan) {
+    SingleDataSet dataSet;
+    RowRecord record = new RowRecord(0);
+
+    if (plan.isGroupByLevel()) {
+      Map<String, AggregateResult> groupPathsResultMap =
+          plan.groupAggResultByLevel(aggregateResultList);
+
+      List<PartialPath> paths = new ArrayList<>();
+      List<TSDataType> dataTypes = new ArrayList<>();
+      for (AggregateResult resultData : groupPathsResultMap.values()) {
+        dataTypes.add(resultData.getResultDataType());
+        record.addField(resultData.getResult(), resultData.getResultDataType());
+      }
+      dataSet = new SingleDataSet(paths, dataTypes);
+    } else {
+      for (AggregateResult resultData : aggregateResultList) {
+        TSDataType dataType = resultData.getResultDataType();
+        record.addField(resultData.getResult(), dataType);
+      }
+      dataSet = new SingleDataSet(selectedSeries, dataTypes);
+    }
+    dataSet.setRecord(record);
+
+    return dataSet;
+  }
 }
diff --git a/example/tsfile/src/main/java/org/apache/iotdb/tsfile/TsFileWriteAlignedWithTablet.java b/example/tsfile/src/main/java/org/apache/iotdb/tsfile/TsFileWriteAlignedWithTablet.java
index bd14059b20..81cb42193a 100644
--- a/example/tsfile/src/main/java/org/apache/iotdb/tsfile/TsFileWriteAlignedWithTablet.java
+++ b/example/tsfile/src/main/java/org/apache/iotdb/tsfile/TsFileWriteAlignedWithTablet.java
@@ -23,7 +23,6 @@ import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
 import org.apache.iotdb.tsfile.fileSystem.FSFactoryProducer;
 import org.apache.iotdb.tsfile.read.common.Path;
-import org.apache.iotdb.tsfile.utils.Binary;
 import org.apache.iotdb.tsfile.write.TsFileWriter;
 import org.apache.iotdb.tsfile.write.record.Tablet;
 import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
@@ -36,11 +35,7 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
-import static org.apache.iotdb.tsfile.Constant.DEVICE_1;
-import static org.apache.iotdb.tsfile.Constant.DEVICE_2;
-import static org.apache.iotdb.tsfile.Constant.SENSOR_1;
-import static org.apache.iotdb.tsfile.Constant.SENSOR_2;
-import static org.apache.iotdb.tsfile.Constant.SENSOR_3;
+import static org.apache.iotdb.tsfile.Constant.*;
 
 public class TsFileWriteAlignedWithTablet {
   private static final Logger logger = LoggerFactory.getLogger(TsFileWriteAlignedWithTablet.class);
@@ -53,9 +48,9 @@ public class TsFileWriteAlignedWithTablet {
 
     try (TsFileWriter tsFileWriter = new TsFileWriter(f)) {
       List<MeasurementSchema> measurementSchemas = new ArrayList<>();
-      measurementSchemas.add(new MeasurementSchema(SENSOR_1, TSDataType.TEXT, TSEncoding.PLAIN));
-      measurementSchemas.add(new MeasurementSchema(SENSOR_2, TSDataType.TEXT, TSEncoding.PLAIN));
-      measurementSchemas.add(new MeasurementSchema(SENSOR_3, TSDataType.TEXT, TSEncoding.PLAIN));
+      measurementSchemas.add(new MeasurementSchema(SENSOR_1, TSDataType.INT64, TSEncoding.PLAIN));
+      measurementSchemas.add(new MeasurementSchema(SENSOR_2, TSDataType.INT64, TSEncoding.PLAIN));
+      measurementSchemas.add(new MeasurementSchema(SENSOR_3, TSDataType.INT64, TSEncoding.PLAIN));
 
       // register align timeseries
       tsFileWriter.registerAlignedTimeseries(new Path(DEVICE_1), measurementSchemas);
@@ -65,9 +60,9 @@ public class TsFileWriteAlignedWithTablet {
       writeMeasurementScheams.add(measurementSchemas.get(0));
       writeMeasurementScheams.add(measurementSchemas.get(1));
       writeMeasurementScheams.add(measurementSchemas.get(2));
-      writeAlignedWithTablet(tsFileWriter, DEVICE_1, writeMeasurementScheams, 200000, 0, 0);
+      writeAlignedWithTablet(tsFileWriter, DEVICE_1, writeMeasurementScheams, 20000, 0, 0);
 
-      writeNonAlignedWithTablet(tsFileWriter); // write nonAligned timeseries
+      //      writeNonAlignedWithTablet(tsFileWriter); // write nonAligned timeseries
     } catch (WriteProcessException e) {
       logger.error("write Tablet failed", e);
     }
@@ -90,8 +85,8 @@ public class TsFileWriteAlignedWithTablet {
       int row = tablet.rowSize++;
       timestamps[row] = startTime++;
       for (int i = 0; i < sensorNum; i++) {
-        Binary[] textSensor = (Binary[]) values[i];
-        textSensor[row] = new Binary("testString.........");
+        long[] textSensor = (long[]) values[i];
+        textSensor[row] = startValue;
       }
       // write
       if (tablet.rowSize == tablet.getMaxRowNumber()) {
diff --git a/server/pom.xml b/server/pom.xml
index d052794509..fa54ca5762 100644
--- a/server/pom.xml
+++ b/server/pom.xml
@@ -140,6 +140,31 @@
                 </exclusion>
             </exclusions>
         </dependency>
+        <dependency>
+            <groupId>it.unimi.dsi</groupId>
+            <artifactId>fastutil</artifactId>
+            <version>8.5.8</version>
+        </dependency>
+        <dependency>
+            <groupId>it.unimi.dsi</groupId>
+            <artifactId>dsiutils</artifactId>
+            <version>2.7.2</version>
+        </dependency>
+        <dependency>
+            <groupId>org.eclipse.collections</groupId>
+            <artifactId>eclipse-collections-api</artifactId>
+            <version>11.0.0</version>
+        </dependency>
+        <dependency>
+            <groupId>org.eclipse.collections</groupId>
+            <artifactId>eclipse-collections</artifactId>
+            <version>11.0.0</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.datasketches</groupId>
+            <artifactId>datasketches-java</artifactId>
+            <version>3.2.0</version>
+        </dependency>
         <!-- for mocked test-->
         <dependency>
             <groupId>org.powermock</groupId>
diff --git a/server/src/assembly/resources/conf/iotdb-engine.properties b/server/src/assembly/resources/conf/iotdb-engine.properties
index 7150de3e8c..4a26cf2c9f 100644
--- a/server/src/assembly/resources/conf/iotdb-engine.properties
+++ b/server/src/assembly/resources/conf/iotdb-engine.properties
@@ -21,6 +21,32 @@
 ### RPC Configuration
 ####################
 
+enable_synopsis=true
+synopsis_size_in_byte=1024
+aggregator_memory_in_kb=1024
+summary_type=0
+quantile=0.5
+
+enable_seq_space_compaction=false
+enable_unseq_space_compaction=false
+enable_cross_space_compaction=false
+meta_data_cache_enable=true
+
+aggregation_strategy=0
+bloom_filter_bits_per_key=16
+enable_bloom_filter=false
+max_number_of_points_in_chunk=8000
+
+flush_wal_threshold=4096
+
+# The memory size for each series writer to pack page, default value is 64KB
+# Datatype: int
+page_size_in_byte=65536
+
+# The maximum number of data points in a page, default 1024*1024
+# Datatype: int
+max_number_of_points_in_page=4096
+
 # Datatype: String
 rpc_address=0.0.0.0
 
@@ -62,7 +88,6 @@ rpc_port=6667
 # When a certain amount of insert ahead log is reached, it will be flushed to disk
 # It is possible to lose at most flush_wal_threshold operations
 # Datatype: int
-# flush_wal_threshold=10000
 
 # The cycle when insert ahead log is periodically forced to be written to disk(in milliseconds)
 # If force_wal_period_in_ms = 0 it means force insert ahead log to be written to disk after each refreshment
@@ -688,14 +713,6 @@ timestamp_precision=ms
 # Datatype: int
 # group_size_in_byte=134217728
 
-# The memory size for each series writer to pack page, default value is 64KB
-# Datatype: int
-# page_size_in_byte=65536
-
-# The maximum number of data points in a page, default 1024*1024
-# Datatype: int
-# max_number_of_points_in_page=1048576
-
 # Max size limitation of input string
 # Datatype: int
 # max_string_length=128
diff --git a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
index b6c1da96c5..9d5f4791a8 100644
--- a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
+++ b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
@@ -2525,4 +2525,24 @@ public class IoTDBConfig {
   public void setEncryptDecryptProviderParameter(String encryptDecryptProviderParameter) {
     this.encryptDecryptProviderParameter = encryptDecryptProviderParameter;
   }
+
+  private int aggregatorMemoryInKB = 128;
+
+  public void setAggregatorMemoryInKB(int kb) {
+    this.aggregatorMemoryInKB = kb;
+  }
+
+  public int getAggregatorMemoryInKB() {
+    return aggregatorMemoryInKB;
+  }
+
+  private int aggregationStrategy = 0;
+
+  public void setAggregationStrategy(int s) {
+    this.aggregationStrategy = s;
+  }
+
+  public int getAggregationStrategy() {
+    return aggregationStrategy;
+  }
 }
diff --git a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
index 0d5cb9e5b4..a06f2efb9f 100644
--- a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
+++ b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
@@ -132,6 +132,32 @@ public class IoTDBDescriptor {
       conf.setRpcAddress(properties.getProperty("rpc_address", conf.getRpcAddress()));
       replaceHostnameWithIP();
 
+      conf.setEnableSeqSpaceCompaction(
+          Boolean.parseBoolean(
+              properties.getProperty(
+                  "enable_seq_space_compaction",
+                  Boolean.toString(conf.isEnableSeqSpaceCompaction()))));
+      conf.setEnableUnseqSpaceCompaction(
+          Boolean.parseBoolean(
+              properties.getProperty(
+                  "enable_unseq_space_compaction",
+                  Boolean.toString(conf.isEnableUnseqSpaceCompaction()))));
+      conf.setEnableCrossSpaceCompaction(
+          Boolean.parseBoolean(
+              properties.getProperty(
+                  "enable_cross_space_compaction",
+                  Boolean.toString(conf.isEnableCrossSpaceCompaction()))));
+
+      conf.setAggregatorMemoryInKB(
+          Integer.parseInt(
+              properties.getProperty(
+                  "aggregator_memory_in_kb", Integer.toString(conf.getAggregatorMemoryInKB()))));
+
+      conf.setAggregationStrategy(
+          Integer.parseInt(
+              properties.getProperty(
+                  "aggregation_strategy", Integer.toString(conf.getAggregationStrategy()))));
+
       conf.setRpcThriftCompressionEnable(
           Boolean.parseBoolean(
               properties.getProperty(
@@ -986,6 +1012,85 @@ public class IoTDBDescriptor {
   }
 
   private void loadTsFileProps(Properties properties) {
+
+    TSFileDescriptor.getInstance()
+        .getConfig()
+        .setEnableSynopsis(
+            Boolean.parseBoolean(
+                properties.getProperty(
+                    "enable_synopsis",
+                    Boolean.toString(
+                        TSFileDescriptor.getInstance().getConfig().isEnableSynopsis()))));
+    TSFileDescriptor.getInstance()
+        .getConfig()
+        .setEnableBloomFilter(
+            Boolean.parseBoolean(
+                properties.getProperty(
+                    "enable_bloom_filter",
+                    Boolean.toString(
+                        TSFileDescriptor.getInstance().getConfig().isEnableBloomFilter()))));
+
+    TSFileDescriptor.getInstance()
+        .getConfig()
+        .setSynopsisSizeInByte(
+            Integer.parseInt(
+                properties.getProperty(
+                    "synopsis_size_in_byte",
+                    Integer.toString(
+                        TSFileDescriptor.getInstance().getConfig().getSynopsisSizeInByte()))));
+
+    TSFileDescriptor.getInstance()
+        .getConfig()
+        .setKLLBulkMergeB(
+            Integer.parseInt(
+                properties.getProperty(
+                    "kll_bulk_merge_b",
+                    Integer.toString(
+                        TSFileDescriptor.getInstance().getConfig().getKLLBulkMergeB()))));
+
+    TSFileDescriptor.getInstance()
+        .getConfig()
+        .setSummaryType(
+            Integer.parseInt(
+                properties.getProperty(
+                    "summary_type",
+                    Integer.toString(
+                        TSFileDescriptor.getInstance().getConfig().getSummaryType()))));
+
+    TSFileDescriptor.getInstance()
+        .getConfig()
+        .setQUANTILE(
+            Double.parseDouble(
+                properties.getProperty(
+                    "quantile",
+                    Double.toString(TSFileDescriptor.getInstance().getConfig().getQUANTILE()))));
+
+    TSFileDescriptor.getInstance()
+        .getConfig()
+        .setQuantileFile(
+            (properties.getProperty(
+                "quantile_file", (TSFileDescriptor.getInstance().getConfig().getQuantileFile()))));
+
+    TSFileDescriptor.getInstance()
+        .getConfig()
+        .setBloomFilterBitsPerKey(
+            Integer.parseInt(
+                properties.getProperty(
+                    "bloom_filter_bits_per_key",
+                    Integer.toString(
+                        TSFileDescriptor.getInstance().getConfig().getBloomFilterBitsPerKey()))));
+
+    TSFileDescriptor.getInstance()
+        .getConfig()
+        .setMaxNumberOfPointsInChunk(
+            Integer.parseInt(
+                properties.getProperty(
+                    "max_number_of_points_in_chunk",
+                    Integer.toString(
+                        TSFileDescriptor.getInstance()
+                            .getConfig()
+                            .getMaxNumberOfPointsInChunk()))));
+
     TSFileDescriptor.getInstance()
         .getConfig()
         .setGroupSizeInByte(
diff --git a/server/src/main/java/org/apache/iotdb/db/qp/constant/SQLConstant.java b/server/src/main/java/org/apache/iotdb/db/qp/constant/SQLConstant.java
index a0b3168568..b6737a6e41 100644
--- a/server/src/main/java/org/apache/iotdb/db/qp/constant/SQLConstant.java
+++ b/server/src/main/java/org/apache/iotdb/db/qp/constant/SQLConstant.java
@@ -70,6 +70,37 @@ public class SQLConstant {
   public static final String COUNT = "count";
   public static final String AVG = "avg";
   public static final String SUM = "sum";
+  public static final String EXACT_MEDIAN = "exact_median";
+  public static final String EXACT_MEDIAN_OPT = "exact_median_opt";
+  public static final String EXACT_MEDIAN_OPT_2 = "exact_median_opt_2";
+  public static final String EXACT_MEDIAN_OPT_3 = "exact_median_opt_3";
+  public static final String EXACT_MEDIAN_OPT_4 = "exact_median_opt_4";
+  public static final String EXACT_MEDIAN_OPT_5 = "exact_median_opt_5";
+  public static final String EXACT_MEDIAN_AMORTIZED = "exact_median_amortized";
+  public static final String EXACT_MEDIAN_KLL_FLOATS = "exact_median_kll_floats";
+  public static final String EXACT_MEDIAN_AGGRESSIVE = "exact_median_aggressive";
+  public static final String EXACT_MEDIAN_BITS_BUCKET_STAT = "exact_median_bits_bucket_stat";
+  public static final String EXACT_MEDIAN_BITS_BUCKET_STAT_FILTER =
+      "exact_median_bits_bucket_stat_filter";
+  public static final String EXACT_MEDIAN_BITS_BUCKET_STAT_FILTER_AGGRESSIVE =
+      "exact_median_bits_bucket_stat_filter_aggressive";
+  public static final String EXACT_MEDIAN_KLL_STAT = "exact_median_kll_stat";
+  public static final String EXACT_MEDIAN_KLL_STAT_SINGLE = "exact_median_kll_stat_single";
+  public static final String EXACT_MEDIAN_KLL_FLOATS_SINGLE = "exact_median_kll_floats_single";
+  public static final String EXACT_MEDIAN_KLL_STAT_SINGLE_READ =
+      "exact_median_kll_stat_single_read";
+  public static final String EXACT_MEDIAN_KLL_DEBUG = "exact_median_kll_debug";
+  public static final String EXACT_MEDIAN_KLL_STAT_DEBUG_FULL_READING =
+      "exact_median_kll_stat_debug_full_reading";
+  public static final String EXACT_MEDIAN_KLL_DEBUG_FULL_READING =
+      "exact_median_kll_debug_full_reading";
+  public static final String EXACT_MEDIAN_KLL_STAT_DEBUG_PAGE_DEMAND_RATE =
+      "exact_median_kll_stat_debug_page_demand_rate";
+  public static final String EXACT_MEDIAN_KLL_STAT_OVERLAP_SINGLE =
+      "exact_median_kll_stat_overlap_single";
+  public static final String TDIGEST_STAT_SINGLE = "tdigest_quantile";
+  public static final String SAMPLING_STAT_SINGLE = "sampling_quantile";
+  public static final String STRICT_KLL_STAT_SINGLE = "kll_quantile";
 
   public static final String ALL = "all";
 
@@ -85,7 +116,31 @@ public class SQLConstant {
               LAST_VALUE,
               COUNT,
               SUM,
-              AVG));
+              AVG,
+              EXACT_MEDIAN,
+              EXACT_MEDIAN_OPT,
+              EXACT_MEDIAN_OPT_2,
+              EXACT_MEDIAN_OPT_3,
+              EXACT_MEDIAN_OPT_4,
+              EXACT_MEDIAN_OPT_5,
+              EXACT_MEDIAN_AMORTIZED,
+              EXACT_MEDIAN_KLL_FLOATS,
+              EXACT_MEDIAN_AGGRESSIVE,
+              EXACT_MEDIAN_BITS_BUCKET_STAT,
+              EXACT_MEDIAN_BITS_BUCKET_STAT_FILTER,
+              EXACT_MEDIAN_BITS_BUCKET_STAT_FILTER_AGGRESSIVE,
+              EXACT_MEDIAN_KLL_STAT,
+              EXACT_MEDIAN_KLL_STAT_SINGLE,
+              EXACT_MEDIAN_KLL_FLOATS_SINGLE,
+              EXACT_MEDIAN_KLL_STAT_SINGLE_READ,
+              EXACT_MEDIAN_KLL_DEBUG,
+              EXACT_MEDIAN_KLL_STAT_DEBUG_FULL_READING,
+              EXACT_MEDIAN_KLL_DEBUG_FULL_READING,
+              EXACT_MEDIAN_KLL_STAT_DEBUG_PAGE_DEMAND_RATE,
+              EXACT_MEDIAN_KLL_STAT_OVERLAP_SINGLE,
+              TDIGEST_STAT_SINGLE,
+              SAMPLING_STAT_SINGLE,
+              STRICT_KLL_STAT_SINGLE));
 
   public static final int TOK_WHERE = 23;
   public static final int TOK_INSERT = 24;
diff --git a/server/src/main/java/org/apache/iotdb/db/qp/logical/crud/AggregationQueryOperator.java b/server/src/main/java/org/apache/iotdb/db/qp/logical/crud/AggregationQueryOperator.java
index 11a4202b51..8e5951f73a 100644
--- a/server/src/main/java/org/apache/iotdb/db/qp/logical/crud/AggregationQueryOperator.java
+++ b/server/src/main/java/org/apache/iotdb/db/qp/logical/crud/AggregationQueryOperator.java
@@ -124,6 +124,30 @@ public class AggregationQueryOperator extends QueryOperator {
       case SQLConstant.EXTREME:
       case SQLConstant.MIN_VALUE:
       case SQLConstant.MAX_VALUE:
+      case SQLConstant.EXACT_MEDIAN:
+      case SQLConstant.EXACT_MEDIAN_OPT:
+      case SQLConstant.EXACT_MEDIAN_OPT_2:
+      case SQLConstant.EXACT_MEDIAN_OPT_3:
+      case SQLConstant.EXACT_MEDIAN_OPT_4:
+      case SQLConstant.EXACT_MEDIAN_OPT_5:
+      case SQLConstant.EXACT_MEDIAN_AMORTIZED:
+      case SQLConstant.EXACT_MEDIAN_KLL_FLOATS:
+      case SQLConstant.EXACT_MEDIAN_AGGRESSIVE:
+      case SQLConstant.EXACT_MEDIAN_BITS_BUCKET_STAT:
+      case SQLConstant.EXACT_MEDIAN_BITS_BUCKET_STAT_FILTER:
+      case SQLConstant.EXACT_MEDIAN_BITS_BUCKET_STAT_FILTER_AGGRESSIVE:
+      case SQLConstant.EXACT_MEDIAN_KLL_STAT:
+      case SQLConstant.EXACT_MEDIAN_KLL_STAT_SINGLE:
+      case SQLConstant.EXACT_MEDIAN_KLL_FLOATS_SINGLE:
+      case SQLConstant.EXACT_MEDIAN_KLL_STAT_SINGLE_READ:
+      case SQLConstant.EXACT_MEDIAN_KLL_DEBUG:
+      case SQLConstant.EXACT_MEDIAN_KLL_STAT_DEBUG_FULL_READING:
+      case SQLConstant.EXACT_MEDIAN_KLL_DEBUG_FULL_READING:
+      case SQLConstant.EXACT_MEDIAN_KLL_STAT_DEBUG_PAGE_DEMAND_RATE:
+      case SQLConstant.EXACT_MEDIAN_KLL_STAT_OVERLAP_SINGLE:
+      case SQLConstant.TDIGEST_STAT_SINGLE:
+      case SQLConstant.SAMPLING_STAT_SINGLE:
+      case SQLConstant.STRICT_KLL_STAT_SINGLE:
         return dataType.isNumeric();
       case SQLConstant.COUNT:
       case SQLConstant.MIN_TIME:
@@ -149,6 +173,30 @@ public class AggregationQueryOperator extends QueryOperator {
       case SQLConstant.MAX_VALUE:
       case SQLConstant.AVG:
       case SQLConstant.SUM:
+      case SQLConstant.EXACT_MEDIAN:
+      case SQLConstant.EXACT_MEDIAN_OPT:
+      case SQLConstant.EXACT_MEDIAN_OPT_2:
+      case SQLConstant.EXACT_MEDIAN_OPT_3:
+      case SQLConstant.EXACT_MEDIAN_OPT_4:
+      case SQLConstant.EXACT_MEDIAN_OPT_5:
+      case SQLConstant.EXACT_MEDIAN_AMORTIZED:
+      case SQLConstant.EXACT_MEDIAN_KLL_FLOATS:
+      case SQLConstant.EXACT_MEDIAN_AGGRESSIVE:
+      case SQLConstant.EXACT_MEDIAN_BITS_BUCKET_STAT:
+      case SQLConstant.EXACT_MEDIAN_BITS_BUCKET_STAT_FILTER:
+      case SQLConstant.EXACT_MEDIAN_BITS_BUCKET_STAT_FILTER_AGGRESSIVE:
+      case SQLConstant.EXACT_MEDIAN_KLL_STAT:
+      case SQLConstant.EXACT_MEDIAN_KLL_STAT_SINGLE:
+      case SQLConstant.EXACT_MEDIAN_KLL_FLOATS_SINGLE:
+      case SQLConstant.EXACT_MEDIAN_KLL_STAT_SINGLE_READ:
+      case SQLConstant.EXACT_MEDIAN_KLL_DEBUG:
+      case SQLConstant.EXACT_MEDIAN_KLL_STAT_DEBUG_FULL_READING:
+      case SQLConstant.EXACT_MEDIAN_KLL_DEBUG_FULL_READING:
+      case SQLConstant.EXACT_MEDIAN_KLL_STAT_DEBUG_PAGE_DEMAND_RATE:
+      case SQLConstant.EXACT_MEDIAN_KLL_STAT_OVERLAP_SINGLE:
+      case SQLConstant.TDIGEST_STAT_SINGLE:
+      case SQLConstant.SAMPLING_STAT_SINGLE:
+      case SQLConstant.STRICT_KLL_STAT_SINGLE:
         return dataTypes.stream().allMatch(dataTypes.get(0)::equals);
       default:
         return true;
diff --git a/server/src/main/java/org/apache/iotdb/db/qp/physical/crud/AggregationPlan.java b/server/src/main/java/org/apache/iotdb/db/qp/physical/crud/AggregationPlan.java
index c81e12e5f8..e756c79c89 100644
--- a/server/src/main/java/org/apache/iotdb/db/qp/physical/crud/AggregationPlan.java
+++ b/server/src/main/java/org/apache/iotdb/db/qp/physical/crud/AggregationPlan.java
@@ -24,6 +24,7 @@ import org.apache.iotdb.db.qp.logical.Operator;
 import org.apache.iotdb.db.qp.utils.GroupByLevelController;
 import org.apache.iotdb.db.query.aggregation.AggregateResult;
 import org.apache.iotdb.db.query.expression.ResultColumn;
+import org.apache.iotdb.db.query.factory.AggregateResultFactory;
 import org.apache.iotdb.db.utils.SchemaUtils;
 import org.apache.iotdb.rpc.RpcUtils;
 import org.apache.iotdb.rpc.TSStatusCode;
@@ -32,11 +33,7 @@ import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 
 import org.apache.thrift.TException;
 
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
+import java.util.*;
 
 public class AggregationPlan extends RawDataQueryPlan {
 
@@ -51,6 +48,8 @@ public class AggregationPlan extends RawDataQueryPlan {
   private GroupByLevelController groupByLevelController;
   // group by level aggregation result path
   private final Map<String, AggregateResult> groupPathsResultMap = new LinkedHashMap<>();
+  private final Map<String, TSDataType> groupedPathToTSDataType = new LinkedHashMap<>();
+  private final Map<AggregateResult, AggregateResult> resultToGroupedAhead = new HashMap<>();
 
   public AggregationPlan() {
     super();
@@ -140,10 +139,7 @@ public class AggregationPlan extends RawDataQueryPlan {
       groupPathsResultMap.clear();
     }
     for (int i = 0; i < getDeduplicatedPaths().size(); i++) {
-      String rawPath =
-          String.format(
-              "%s(%s)",
-              deduplicatedAggregations.get(i), getDeduplicatedPaths().get(i).getFullPath());
+      String rawPath = getRawPath(i);
       String transformedPath = groupByLevelController.getGroupedPath(rawPath);
       AggregateResult result = groupPathsResultMap.get(transformedPath);
       if (result == null) {
@@ -156,6 +152,77 @@ public class AggregationPlan extends RawDataQueryPlan {
     return groupPathsResultMap;
   }
 
+  public Map<AggregateResult, AggregateResult> getresultToGroupedAhead() {
+    return resultToGroupedAhead;
+  }
+
+  public void groupAggResultByLevelBeforeAggregation(List<AggregateResult> aggregateResults) {
+    List<TSDataType> seriesDataTypes = this.getDeduplicatedDataTypes();
+    groupedPathToTSDataType.clear();
+    groupPathsResultMap.clear();
+    resultToGroupedAhead.clear();
+
+    if (!this.isGroupByLevel()) {
+      for (int i = 0; i < getDeduplicatedPaths().size(); i++) {
+        resultToGroupedAhead.put(aggregateResults.get(i), aggregateResults.get(i));
+      }
+      return;
+    }
+
+    // find the biggest(Double>Float>INT64>INT32) TsDataType for grouped AggregationResult
+    for (int i = 0; i < getDeduplicatedPaths().size(); i++)
+      if (aggregateResults.get(i).groupByLevelBeforeAggregation()) {
+        String rawPath = getRawPath(i);
+        String transformedPath = groupByLevelController.getGroupedPath(rawPath);
+        TSDataType dataType = groupedPathToTSDataType.get(transformedPath);
+        if (dataType == null || dataType.serialize() < seriesDataTypes.get(i).serialize()) {
+          groupedPathToTSDataType.put(transformedPath, seriesDataTypes.get(i));
+        }
+      }
+    for (int i = 0; i < getDeduplicatedPaths().size(); i++)
+      if (aggregateResults.get(i).groupByLevelBeforeAggregation()) {
+        String rawPath = getRawPath(i);
+        String transformedPath = groupByLevelController.getGroupedPath(rawPath);
+
+        AggregateResult groupedResult = groupPathsResultMap.get(transformedPath);
+        if (groupedResult == null) {
+          groupedResult =
+              AggregateResultFactory.getAggrResultByName(
+                  deduplicatedAggregations.get(i),
+                  groupedPathToTSDataType.get(transformedPath),
+                  aggregateResults.get(i).isAscending());
+          groupPathsResultMap.put(transformedPath, groupedResult);
+        }
+        resultToGroupedAhead.put(aggregateResults.get(i), groupedResult);
+      } else {
+        resultToGroupedAhead.put(aggregateResults.get(i), aggregateResults.get(i));
+      }
+  }
+
+  public Map<String, AggregateResult> groupAggResultByLevelAfterAggregation(
+      List<AggregateResult> aggregateResults) {
+    for (int i = 0; i < getDeduplicatedPaths().size(); i++)
+      if (!aggregateResults.get(i).groupByLevelBeforeAggregation()) {
+        String rawPath = getRawPath(i);
+        String transformedPath = groupByLevelController.getGroupedPath(rawPath);
+        AggregateResult result = groupPathsResultMap.get(transformedPath);
+        if (result == null) {
+          groupPathsResultMap.put(transformedPath, aggregateResults.get(i));
+        } else {
+          result.merge(aggregateResults.get(i));
+          groupPathsResultMap.put(transformedPath, result);
+        }
+      }
+    return groupPathsResultMap;
+  }
+
+  public String getRawPath(int aggrIndex) {
+    return String.format(
+        "%s(%s)",
+        deduplicatedAggregations.get(aggrIndex),
+        getDeduplicatedPaths().get(aggrIndex).getFullPath());
+  }
+
   @Override
   public boolean isGroupByLevel() {
     return levels != null;
diff --git a/server/src/main/java/org/apache/iotdb/db/query/aggregation/AggregateResult.java b/server/src/main/java/org/apache/iotdb/db/query/aggregation/AggregateResult.java
index 7daa1e8d60..bccb9a5ac9 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/aggregation/AggregateResult.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/aggregation/AggregateResult.java
@@ -19,10 +19,13 @@
 
 package org.apache.iotdb.db.query.aggregation;
 
+import org.apache.iotdb.db.conf.IoTDBDescriptor;
 import org.apache.iotdb.db.exception.query.QueryProcessException;
 import org.apache.iotdb.db.query.factory.AggregateResultFactory;
+import org.apache.iotdb.db.query.reader.series.IAggregateReader;
 import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp;
 import org.apache.iotdb.db.utils.ValueIterator;
+import org.apache.iotdb.tsfile.common.conf.TSFileDescriptor;
 import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.file.metadata.statistics.Statistics;
@@ -30,8 +33,7 @@ import org.apache.iotdb.tsfile.read.common.IBatchDataIterator;
 import org.apache.iotdb.tsfile.utils.Binary;
 import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
 
-import java.io.IOException;
-import java.io.OutputStream;
+import java.io.*;
 import java.nio.ByteBuffer;
 
 public abstract class AggregateResult {
@@ -48,6 +50,8 @@ public abstract class AggregateResult {
   private Binary binaryValue;
 
   protected boolean hasCandidateResult;
+  protected int maxMemoryByte;
+  protected double QUANTILE;
 
   /**
    * construct.
@@ -58,6 +62,22 @@ public abstract class AggregateResult {
     this.aggregationType = aggregationType;
     this.resultDataType = resultDataType;
     this.hasCandidateResult = false;
+    maxMemoryByte = IoTDBDescriptor.getInstance().getConfig().getAggregatorMemoryInKB() * 1024;
+    QUANTILE = TSFileDescriptor.getInstance().getConfig().getQUANTILE();
+    String quantileFile = TSFileDescriptor.getInstance().getConfig().getQuantileFile();
+    if (quantileFile.length() > 0) {
+      try {
+        File qf = new File(quantileFile);
+        BufferedReader reader = new BufferedReader(new FileReader(qf));
+        String str;
+        if ((str = reader.readLine()) != null) {
+          QUANTILE = Double.parseDouble(str);
+        }
+      } catch (IOException e) {
+        // no-op;
+      }
+    }
+    //    KLLBulkMergeB = TSFileDescriptor.getInstance().getConfig().getKLLBulkMergeB() * 1024;
   }
 
   public abstract Object getResult();
@@ -91,7 +111,7 @@ public abstract class AggregateResult {
       IBatchDataIterator batchIterator, long minBound, long maxBound) throws IOException;
 
   /**
-   * This method calculates the aggregation using common timestamps of the cross series filter.
+   * This method updates the aggregation using common timestamps of the cross series filter.
    *
    * @throws IOException TsFile data read error
    */
@@ -102,6 +122,22 @@ public abstract class AggregateResult {
   public abstract void updateResultUsingValues(
       long[] timestamps, int length, ValueIterator valueIterator);
 
+  /**
+   * This method calculates the aggregation using common timestamps of the cross series filter.
+   * construct result based on timestamps in param
+   *
+   * @throws IOException TsFile data read error
+   */
+  public void constructResultUsingTimestamps(
+      long[] timestamps, int length, IReaderByTimestamp dataReader) throws IOException {
+    updateResultUsingTimestamps(timestamps, length, dataReader);
+  }
+
+  public void constructResultUsingValues(
+      long[] timestamps, int length, ValueIterator valueIterator) {
+    updateResultUsingValues(timestamps, length, valueIterator);
+  }
+
   /**
    * Judge if aggregation results have been calculated. In other words, if the aggregated result
    * does not need to compute the remaining data, it returns true.
@@ -110,9 +146,18 @@ public abstract class AggregateResult {
    */
   public abstract boolean hasFinalResult();
 
+  // to drop
   /** Merge another aggregateResult into this */
   public abstract void merge(AggregateResult another);
 
+  /**
+   * communication between 2 aggregateResults no-op if the aggregated result does not need to
+   * communicate (can merge easily)
+   */
+  public void communicate(AggregateResult another) {
+    // no-op
+  }
+
   public static AggregateResult deserializeFrom(ByteBuffer buffer) {
     AggregationType aggregationType = AggregationType.deserialize(buffer);
     TSDataType dataType = TSDataType.deserialize(buffer.get());
@@ -315,11 +360,47 @@ public abstract class AggregateResult {
     return aggregationType;
   }
 
-  /**
-   * Whether the AggregationResult accepts data in time ascending order, if it returns false, the
-   * data should be passed in time descending order.
-   */
   public boolean isAscending() {
     return true;
   }
+
+  public boolean canUpdateFromStatistics(Statistics statistics) {
+    return true;
+  }
+
+  public boolean useStatisticsIfPossible() {
+    return true;
+  }
+
+  public boolean needMultiIterations() {
+    return false;
+  }
+
+  public int maxIteration() {
+    return 1;
+  }
+
+  public void startIteration() {
+    // no-op
+  }
+
+  public void finishIteration() {
+    // no-op
+  }
+
+  public boolean useOverlapStat() {
+    return false;
+  }
+
+  public void updateResultFromOverlap(IAggregateReader reader) {
+    // no-op
+  }
+
+  /**
+   * When group by level, QUANTILE needs to merge before aggregation, while aggregations like
+   * LAST_VALUE need to merge after aggregation
+   */
+  public boolean groupByLevelBeforeAggregation() {
+    return false;
+  }
 }
diff --git a/server/src/main/java/org/apache/iotdb/db/query/aggregation/AggregationType.java b/server/src/main/java/org/apache/iotdb/db/query/aggregation/AggregationType.java
index a3c651835c..e31c0d7ffb 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/aggregation/AggregationType.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/aggregation/AggregationType.java
@@ -35,7 +35,31 @@ public enum AggregationType {
   MIN_TIME,
   MAX_VALUE,
   MIN_VALUE,
-  EXTREME;
+  EXTREME,
+  EXACT_MEDIAN,
+  EXACT_MEDIAN_OPT,
+  EXACT_MEDIAN_OPT_2,
+  EXACT_MEDIAN_OPT_3,
+  EXACT_MEDIAN_OPT_4,
+  EXACT_MEDIAN_OPT_5,
+  EXACT_MEDIAN_AMORTIZED,
+  EXACT_MEDIAN_KLL_FLOATS,
+  EXACT_MEDIAN_AGGRESSIVE,
+  EXACT_MEDIAN_BITS_BUCKET_STAT,
+  EXACT_MEDIAN_BITS_BUCKET_STAT_FILTER,
+  EXACT_MEDIAN_BITS_BUCKET_STAT_FILTER_AGGRESSIVE,
+  EXACT_MEDIAN_KLL_STAT,
+  EXACT_MEDIAN_KLL_STAT_SINGLE,
+  EXACT_MEDIAN_KLL_FLOATS_SINGLE,
+  EXACT_MEDIAN_KLL_STAT_SINGLE_READ,
+  EXACT_MEDIAN_KLL_DEBUG,
+  EXACT_MEDIAN_KLL_STAT_DEBUG_FULL_READING,
+  EXACT_MEDIAN_KLL_DEBUG_FULL_READING,
+  EXACT_MEDIAN_KLL_STAT_DEBUG_PAGE_DEMAND_RATE,
+  EXACT_MEDIAN_KLL_STAT_OVERLAP_SINGLE,
+  TDIGEST_STAT_SINGLE,
+  SAMPLING_STAT_SINGLE,
+  STRICT_KLL_STAT_SINGLE;
 
   /**
    * give an integer to return a data type.
@@ -65,6 +89,54 @@ public enum AggregationType {
         return MIN_VALUE;
       case 9:
         return EXTREME;
+      case 10:
+        return EXACT_MEDIAN;
+      case 11:
+        return EXACT_MEDIAN_OPT;
+      case 12:
+        return EXACT_MEDIAN_OPT_2;
+      case 13:
+        return EXACT_MEDIAN_OPT_3;
+      case 14:
+        return EXACT_MEDIAN_OPT_4;
+      case 15:
+        return EXACT_MEDIAN_OPT_5;
+      case 16:
+        return EXACT_MEDIAN_AMORTIZED;
+      case 17:
+        return EXACT_MEDIAN_KLL_FLOATS;
+      case 18:
+        return EXACT_MEDIAN_AGGRESSIVE;
+      case 19:
+        return EXACT_MEDIAN_BITS_BUCKET_STAT;
+      case 20:
+        return EXACT_MEDIAN_BITS_BUCKET_STAT_FILTER;
+      case 21:
+        return EXACT_MEDIAN_BITS_BUCKET_STAT_FILTER_AGGRESSIVE;
+      case 22:
+        return EXACT_MEDIAN_KLL_STAT;
+      case 23:
+        return EXACT_MEDIAN_KLL_STAT_SINGLE;
+      case 24:
+        return EXACT_MEDIAN_KLL_FLOATS_SINGLE;
+      case 25:
+        return EXACT_MEDIAN_KLL_STAT_SINGLE_READ;
+      case 26:
+        return EXACT_MEDIAN_KLL_DEBUG;
+      case 27:
+        return EXACT_MEDIAN_KLL_STAT_DEBUG_FULL_READING;
+      case 28:
+        return EXACT_MEDIAN_KLL_DEBUG_FULL_READING;
+      case 29:
+        return EXACT_MEDIAN_KLL_STAT_DEBUG_PAGE_DEMAND_RATE;
+      case 30:
+        return EXACT_MEDIAN_KLL_STAT_OVERLAP_SINGLE;
+      case 31:
+        return TDIGEST_STAT_SINGLE;
+      case 32:
+        return SAMPLING_STAT_SINGLE;
+      case 33:
+        return STRICT_KLL_STAT_SINGLE;
       default:
         throw new IllegalArgumentException("Invalid Aggregation Type: " + i);
     }
@@ -103,6 +175,78 @@ public enum AggregationType {
       case EXTREME:
         i = 9;
         break;
+      case EXACT_MEDIAN:
+        i = 10;
+        break;
+      case EXACT_MEDIAN_OPT:
+        i = 11;
+        break;
+      case EXACT_MEDIAN_OPT_2:
+        i = 12;
+        break;
+      case EXACT_MEDIAN_OPT_3:
+        i = 13;
+        break;
+      case EXACT_MEDIAN_OPT_4:
+        i = 14;
+        break;
+      case EXACT_MEDIAN_OPT_5:
+        i = 15;
+        break;
+      case EXACT_MEDIAN_AMORTIZED:
+        i = 16;
+        break;
+      case EXACT_MEDIAN_KLL_FLOATS:
+        i = 17;
+        break;
+      case EXACT_MEDIAN_AGGRESSIVE:
+        i = 18;
+        break;
+      case EXACT_MEDIAN_BITS_BUCKET_STAT:
+        i = 19;
+        break;
+      case EXACT_MEDIAN_BITS_BUCKET_STAT_FILTER:
+        i = 20;
+        break;
+      case EXACT_MEDIAN_BITS_BUCKET_STAT_FILTER_AGGRESSIVE:
+        i = 21;
+        break;
+      case EXACT_MEDIAN_KLL_STAT:
+        i = 22;
+        break;
+      case EXACT_MEDIAN_KLL_STAT_SINGLE:
+        i = 23;
+        break;
+      case EXACT_MEDIAN_KLL_FLOATS_SINGLE:
+        i = 24;
+        break;
+      case EXACT_MEDIAN_KLL_STAT_SINGLE_READ:
+        i = 25;
+        break;
+      case EXACT_MEDIAN_KLL_DEBUG:
+        i = 26;
+        break;
+      case EXACT_MEDIAN_KLL_STAT_DEBUG_FULL_READING:
+        i = 27;
+        break;
+      case EXACT_MEDIAN_KLL_DEBUG_FULL_READING:
+        i = 28;
+        break;
+      case EXACT_MEDIAN_KLL_STAT_DEBUG_PAGE_DEMAND_RATE:
+        i = 29;
+        break;
+      case EXACT_MEDIAN_KLL_STAT_OVERLAP_SINGLE:
+        i = 30;
+        break;
+      case TDIGEST_STAT_SINGLE:
+        i = 31;
+        break;
+      case SAMPLING_STAT_SINGLE:
+        i = 32;
+        break;
+      case STRICT_KLL_STAT_SINGLE:
+        i = 33;
+        break;
       default:
         throw new IllegalArgumentException("Invalid Aggregation Type: " + this.name());
     }
diff --git a/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/AggressiveMedianAggrResult.java b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/AggressiveMedianAggrResult.java
new file mode 100644
index 0000000000..0de1492014
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/AggressiveMedianAggrResult.java
@@ -0,0 +1,353 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.query.aggregation.impl;
+
+import org.apache.iotdb.db.query.aggregation.AggregateResult;
+import org.apache.iotdb.db.query.aggregation.AggregationType;
+import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp;
+import org.apache.iotdb.db.utils.ValueIterator;
+import org.apache.iotdb.db.utils.quantiles.GCHashMapComplexForQuantile;
+import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.file.metadata.statistics.Statistics;
+import org.apache.iotdb.tsfile.read.common.IBatchDataIterator;
+import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.List;
+
+public class AggressiveMedianAggrResult extends AggregateResult {
+  private TSDataType seriesDataType;
+  private long cnt; // = n after iteration.
+  private int bitsOfDataType,
+      bitsCounted,
+      bitsConcerned; // bitsOfDataType == bitsCounted + bitsConcerned
+  private long maskConcerned;
+  private long K1, K2, N1;
+  private long prefixOfMedian1, prefixOfMedian2; // needs prefixOfMedian2 when n is even
+  private long maxWithPrefix1, minWithPrefix2; // when two medians divided
+  private GCHashMapComplexForQuantile hashMap;
+
+  private boolean hasFinalResult;
+  private boolean hasTwoDividedMedians;
+
+  private long maskOfPrefix() {
+    return bitsCounted == 0 ? (0) : (((1L << bitsCounted) - 1) << (bitsOfDataType - bitsCounted));
+  }
+
+  private int getBitsOfDataType() {
+    switch (seriesDataType) {
+      case INT32:
+      case FLOAT:
+        return 32;
+      case INT64:
+      case DOUBLE:
+        return 64;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private boolean hasTwoMedians() {
+    return (cnt & 1) == 0;
+  }
+
+  private boolean hasTwoDividedMedians() {
+    return hasTwoMedians() && prefixOfMedian1 != prefixOfMedian2;
+  }
+
+  public AggressiveMedianAggrResult(TSDataType seriesDataType) throws UnSupportedDataTypeException {
+    super(TSDataType.DOUBLE, AggregationType.EXACT_MEDIAN_AGGRESSIVE);
+    this.seriesDataType = seriesDataType;
+    reset();
+  }
+
+  // turn FLOAT/INT32 to unsigned long keeping relative order
+  private long dataToLongBits(Object data) throws UnSupportedDataTypeException {
+    long longBits;
+    switch (seriesDataType) {
+      case INT32:
+        return (int) data + (1L << 31);
+      case FLOAT:
+        longBits = Float.floatToIntBits((float) data) + (1L << 31);
+        return (float) data >= 0f ? longBits : longBits ^ 0x7F800000L;
+      case INT64:
+        return (long) data + (1L << 63);
+      case DOUBLE:
+        longBits = Double.doubleToLongBits((double) data) + (1L << 63);
+        return (double) data >= 0d ? longBits : longBits ^ 0x7FF0000000000000L;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private double longBitsToResult(long longBits) throws UnSupportedDataTypeException {
+    switch (seriesDataType) {
+      case INT32:
+        return (double) (longBits - (1L << 31));
+      case FLOAT:
+        longBits = (longBits >>> 31) > 0 ? longBits : longBits ^ 0x7F800000L;
+        return Float.intBitsToFloat((int) (longBits - (1L << 31)));
+      case INT64:
+        return (double) (longBits - (1L << 63));
+      case DOUBLE:
+        longBits = (longBits >>> 63) > 0 ? longBits : longBits ^ 0x7FF0000000000000L;
+        return Double.longBitsToDouble(longBits - (1L << 63));
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private void updateStatusFromData(Object data, long times) {
+    long longBits = dataToLongBits(data);
+    if ((longBits & maskOfPrefix()) != prefixOfMedian1
+        && (longBits & maskOfPrefix()) != prefixOfMedian2) return;
+    if (bitsCounted == 0) cnt += times;
+    if (hasTwoDividedMedians) {
+      if ((longBits & maskOfPrefix()) == prefixOfMedian1)
+        maxWithPrefix1 = Math.max(maxWithPrefix1, longBits);
+      else minWithPrefix2 = Math.min(minWithPrefix2, longBits);
+      return;
+    }
+
+    //    if(bitsConcerned==2)
+    //      System.out.println("\t\t\t"+(long)data+"    "+(longBits & maskConcerned));
+    long dataConcerned = longBits & maskConcerned;
+    hashMap.insert(dataConcerned, times);
+  }
+
+  @Override
+  public void startIteration() {
+    bitsConcerned = bitsOfDataType - bitsCounted;
+    maskConcerned = bitsConcerned == 64 ? -1L : ((1L << bitsConcerned) - 1);
+
+    hasTwoDividedMedians = hasTwoDividedMedians();
+    //    System.out.println("[DEBUG]:startIteration value:"+getValue()
+    //        + " bitsCounted:"+bitsCounted+" prefix1,2:"+prefixOfMedian1+" "+prefixOfMedian2
+    //        + " divided:"+hasTwoDividedMedians
+    //        + " K1,2:" + K1+" "+K2);
+    //    System.out.println("[DEBUG]\t\t result1:"+longBitsToResult(prefixOfMedian1));
+    if (hasTwoDividedMedians) {
+      maxWithPrefix1 = Long.MIN_VALUE;
+      minWithPrefix2 = Long.MAX_VALUE;
+      return;
+    }
+    if (bitsCounted == 0) { // first iteration
+      if (bitsOfDataType == 32) hashMap = new GCHashMapComplexForQuantile(bitsOfDataType, 31, 0.5);
+      else hashMap = new GCHashMapComplexForQuantile(bitsOfDataType, 16, 0.5);
+    } else {
+      double expectK = 1.0 * K1 / N1;
+      if (bitsConcerned <= 16) hashMap.reset(bitsConcerned, 16, expectK);
+      else if (bitsConcerned <= 32) hashMap.reset(bitsConcerned, bitsConcerned - 1, expectK);
+      else hashMap.reset(bitsConcerned, 16, expectK);
+    }
+  }
+
+  @Override
+  public void finishIteration() {
+    System.out.println("[aggressive DEBUG] finishIteration. bitsCounted:" + bitsCounted);
+    if (cnt == 0) {
+      hasFinalResult = true;
+      return;
+    }
+    if (hasTwoDividedMedians) {
+      //            System.out.println("[DEBUG]hasTwoDividedMedians");
+      setDoubleValue(0.5 * (longBitsToResult(maxWithPrefix1) + longBitsToResult(minWithPrefix2)));
+      hasFinalResult = true;
+      return;
+    }
+    if (bitsCounted == 0) {
+      K1 = (cnt + 1) >> 1;
+      K2 = hasTwoMedians() ? (K1 + 1) : K1;
+    }
+    List<Long> iterationResult = hashMap.findResultIndex(K1, K2);
+    int bitsCountedInIteration = Math.min(bitsConcerned, hashMap.getRemainingBits());
+    prefixOfMedian1 |= iterationResult.get(0) << (bitsConcerned - bitsCountedInIteration);
+    K1 -= iterationResult.get(1);
+    N1 = iterationResult.get(2);
+    prefixOfMedian2 |= iterationResult.get(3) << (bitsConcerned - bitsCountedInIteration);
+    K2 -= iterationResult.get(4);
+    bitsCounted += bitsCountedInIteration;
+    if (bitsCounted == bitsOfDataType) {
+      if (!hasTwoMedians()) setDoubleValue(longBitsToResult(prefixOfMedian1));
+      else
+        setDoubleValue(
+            0.5 * (longBitsToResult(prefixOfMedian1) + longBitsToResult(prefixOfMedian2)));
+      hasFinalResult = true;
+      //      System.out.println("[opt_4 DEBUG] calc over  answer:" + getDoubleValue());
+    }
+    //        System.out.println("\t\t[MEDIAN4]"+this.hashCode()+"  finishIteration "+bitsCounted+"
+    // "+bitsOfDataType);
+    //            System.out.println(
+    //                "K1: "
+    //                    + K1
+    //                    + " K2: "
+    //                    + K2
+    //                    + "    cnt:"
+    //                    + cnt
+    //                    + "|| prefixOfMedian1:"
+    //                    + prefixOfMedian1
+    //                    + "  prefixOfMedian2:"
+    //                    + prefixOfMedian2);
+  }
+
+  @Override
+  protected boolean hasCandidateResult() {
+    return hasFinalResult && cnt > 0;
+  }
+
+  @Override
+  public Double getResult() {
+    return hasCandidateResult() ? getDoubleValue() : null;
+  }
+
+  @Override
+  public void updateResultFromStatistics(Statistics statistics) {
+    switch (statistics.getType()) {
+      case INT32:
+      case INT64:
+      case FLOAT:
+      case DOUBLE:
+        break;
+      case TEXT:
+      case BOOLEAN:
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format(
+                "Unsupported data type in aggregation MEDIAN : %s", statistics.getType()));
+    }
+    Comparable<Object> minVal = (Comparable<Object>) statistics.getMinValue();
+    Comparable<Object> maxVal = (Comparable<Object>) statistics.getMaxValue();
+    if (minVal.compareTo(maxVal) == 0) {
+      updateStatusFromData(minVal, statistics.getCount());
+    } /*else
+      throw new QueryProcessException("Failed to update median aggregation result from statistics.");*/
+  }
+
+  @Override
+  public void updateResultFromPageData(IBatchDataIterator batchIterator) {
+    updateResultFromPageData(batchIterator, Long.MIN_VALUE, Long.MAX_VALUE);
+  }
+
+  @Override
+  public void updateResultFromPageData(
+      IBatchDataIterator batchIterator, long minBound, long maxBound) {
+    while (batchIterator.hasNext()) {
+      if (batchIterator.currentTime() >= maxBound || batchIterator.currentTime() < minBound) {
+        break;
+      }
+      updateStatusFromData(batchIterator.currentValue(), 1);
+      batchIterator.next();
+    }
+  }
+
+  @Override
+  public void updateResultUsingTimestamps(
+      long[] timestamps, int length, IReaderByTimestamp dataReader) throws IOException {
+    Object[] values = dataReader.getValuesInTimestamps(timestamps, length);
+    for (int i = 0; i < length; i++) {
+      if (values[i] != null) {
+        updateStatusFromData(values[i], 1);
+      }
+    }
+  }
+
+  @Override
+  public void updateResultUsingValues(long[] timestamps, int length, ValueIterator valueIterator) {
+    //    List<Object> tmp = new ArrayList<>();
+    while (valueIterator.hasNext()) {
+      updateStatusFromData(valueIterator.next(), 1);
+      //      Object tmpObj = valueIterator.next();
+      //      updateStatusFromData(tmpObj, 1);
+      //      tmp.add(tmpObj);
+    }
+    //
+    // System.out.println("\t\t[MEDIAN]"+this.hashCode()+"[updateResultUsingValues]"+tmp.toString());
+  }
+
+  @Override
+  public int maxIteration() {
+    return bitsOfDataType / 16;
+  }
+
+  @Override
+  public boolean hasFinalResult() {
+    return hasFinalResult;
+  }
+
+  @Override
+  public void merge(AggregateResult another) {
+    //    System.out.println("[DEBUG] [merge] " + this.getResult() + "  " + another.getResult());
+    // merge not supported
+    //        throw new QueryProcessException("Can't merge MedianAggregateResult");
+  }
+
+  @Override
+  protected void deserializeSpecificFields(ByteBuffer buffer) {
+    this.seriesDataType = TSDataType.deserialize(buffer.get());
+    this.cnt = buffer.getLong();
+    this.bitsCounted = buffer.getInt();
+    this.prefixOfMedian1 = buffer.getLong();
+    this.prefixOfMedian2 = buffer.getLong();
+    // TODO
+  }
+
+  @Override
+  protected void serializeSpecificFields(OutputStream outputStream) throws IOException {
+    ReadWriteIOUtils.write(seriesDataType, outputStream);
+    ReadWriteIOUtils.write(cnt, outputStream);
+    ReadWriteIOUtils.write(bitsCounted, outputStream);
+    ReadWriteIOUtils.write(prefixOfMedian1, outputStream);
+    ReadWriteIOUtils.write(prefixOfMedian2, outputStream);
+    // TODO
+  }
+
+  public long getCnt() {
+    return cnt;
+  }
+
+  @Override
+  public void reset() {
+    super.reset();
+    cnt = 0;
+    bitsCounted = 0;
+    bitsOfDataType = getBitsOfDataType();
+    prefixOfMedian1 = prefixOfMedian2 = 0;
+    hasTwoDividedMedians = false;
+    hasFinalResult = false;
+  }
+
+  @Override
+  public boolean canUpdateFromStatistics(Statistics statistics) {
+    Comparable<Object> minVal = (Comparable<Object>) statistics.getMinValue();
+    Comparable<Object> maxVal = (Comparable<Object>) statistics.getMaxValue();
+    return (minVal.compareTo(maxVal) == 0);
+  }
+
+  @Override
+  public boolean groupByLevelBeforeAggregation() {
+    return true;
+  }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/AmortizedMedianAggrResult.java b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/AmortizedMedianAggrResult.java
new file mode 100644
index 0000000000..262f1ec393
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/AmortizedMedianAggrResult.java
@@ -0,0 +1,352 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.query.aggregation.impl;
+
+import org.apache.iotdb.db.query.aggregation.AggregateResult;
+import org.apache.iotdb.db.query.aggregation.AggregationType;
+import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp;
+import org.apache.iotdb.db.utils.ValueIterator;
+import org.apache.iotdb.db.utils.quantiles.TDigestRadixBetterForMedian;
+import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.file.metadata.statistics.Statistics;
+import org.apache.iotdb.tsfile.read.common.IBatchDataIterator;
+import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.List;
+
+public class AmortizedMedianAggrResult extends AggregateResult {
+  private final long deltaForUnsignedCompare = 1L << 63;
+  private TSDataType seriesDataType;
+  private long n;
+  private int bitsOfDataType, iteration; // bitsOfDataType == bitsCounted + bitsConcerned
+  private long K1, K2, L1, R1, L2, R2, lastL1, lastR1;
+  private TDigestRadixBetterForMedian worker;
+
+  private boolean hasFinalResult;
+  private long maxInInterval1, minInInterval2;
+  private boolean hasTwoDividedMedians;
+
+  private int getBitsOfDataType() {
+    switch (seriesDataType) {
+      case INT32:
+      case FLOAT:
+        return 32;
+      case INT64:
+      case DOUBLE:
+        return 64;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private boolean hasTwoMedians() {
+    return (n & 1) == 0;
+  }
+
+  private boolean hasTwoDividedMedians() {
+    return iteration > 0 && hasTwoMedians() && L2 > R1;
+  }
+
+  public AmortizedMedianAggrResult(TSDataType seriesDataType) throws UnSupportedDataTypeException {
+    super(TSDataType.DOUBLE, AggregationType.EXACT_MEDIAN_AMORTIZED);
+    this.seriesDataType = seriesDataType;
+    reset();
+  }
+
+  // turn FLOAT/INT32 to unsigned long keeping relative order
+  private long dataToLongBits(Object data) throws UnSupportedDataTypeException {
+    long longBits;
+    switch (seriesDataType) {
+      case INT32:
+        return (int) data + (1L << 31);
+      case FLOAT:
+        longBits = Float.floatToIntBits((float) data) + (1L << 31);
+        return (float) data >= 0f ? longBits : longBits ^ 0x7F800000L;
+      case INT64:
+        return (long) data + (1L << 63);
+      case DOUBLE:
+        longBits = Double.doubleToLongBits((double) data) + (1L << 63);
+        return (double) data >= 0d ? longBits : longBits ^ 0x7FF0000000000000L;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private double longBitsToResult(long longBits) throws UnSupportedDataTypeException {
+    switch (seriesDataType) {
+      case INT32:
+        return (double) (longBits - (1L << 31));
+      case FLOAT:
+        longBits = (longBits >>> 31) > 0 ? longBits : longBits ^ 0x7F800000L;
+        return Float.intBitsToFloat((int) (longBits - (1L << 31)));
+      case INT64:
+        return (double) (longBits - (1L << 63));
+      case DOUBLE:
+        longBits = (longBits >>> 63) > 0 ? longBits : longBits ^ 0x7FF0000000000000L;
+        return Double.longBitsToDouble(longBits - (1L << 63));
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  //  private void updateStatusFromData(Object data, long times) {
+  //    long signedLongBits = dataToLongBits(data) ^ deltaForUnsignedCompare;
+  //    if (iteration == 0) n += times;
+  //    if (hasTwoDividedMedians) {
+  //      if (L1 <= signedLongBits && signedLongBits <= R1)
+  //        maxInInterval1 = Math.max(maxInInterval1, signedLongBits);
+  //
+  //      if (L2 <= signedLongBits && signedLongBits <= R2)
+  //        minInInterval2 = Math.min(minInInterval2, signedLongBits);
+  //      return;
+  //    }
+  //    if (lastL1 <= signedLongBits && signedLongBits < L1) K1 -= times;
+  //    else if (L1 <= signedLongBits && signedLongBits <= R1) worker.add(signedLongBits, times);
+  //  }
+
+  private void updateStatusFromData(Object data) {
+    long signedLongBits = dataToLongBits(data) ^ deltaForUnsignedCompare;
+    if (iteration == 0) n++;
+    if (hasTwoDividedMedians) {
+      if (L1 <= signedLongBits && signedLongBits <= R1)
+        maxInInterval1 = Math.max(maxInInterval1, signedLongBits);
+
+      if (L2 <= signedLongBits && signedLongBits <= R2)
+        minInInterval2 = Math.min(minInInterval2, signedLongBits);
+      return;
+    }
+    if (lastL1 <= signedLongBits && signedLongBits < L1) K1--;
+    else if (L1 <= signedLongBits && signedLongBits <= R1) worker.add(signedLongBits);
+  }
+
+  @Override
+  public void startIteration() {
+    if (iteration == 0) {
+      worker = new TDigestRadixBetterForMedian(6708, 6708 * 6, bitsOfDataType);
+    } else {
+      worker.reset();
+    }
+    hasTwoDividedMedians = hasTwoDividedMedians();
+    if (hasTwoDividedMedians) {
+      maxInInterval1 = Long.MIN_VALUE;
+      minInInterval2 = Long.MAX_VALUE;
+    }
+  }
+
+  @Override
+  public void finishIteration() {
+    iteration++;
+    if (n == 0) {
+      hasFinalResult = true;
+      return;
+    }
+    if (hasTwoDividedMedians) {
+      //                  System.out.println("[amortized DEBUG]hasTwoDividedMedians");
+      setDoubleValue(
+          0.5
+              * (longBitsToResult(maxInInterval1 ^ deltaForUnsignedCompare)
+                  + longBitsToResult(minInInterval2 ^ deltaForUnsignedCompare)));
+      hasFinalResult = true;
+      return;
+    }
+    lastL1 = L1;
+    lastR1 = R1;
+    if (iteration == 1) { // first iteration over
+      K1 = (n + 1) >> 1;
+    }
+    K2 = hasTwoMedians() ? (K1 + 1) : K1;
+    List<Long> iterationResult = worker.findResultRange(K1, K2);
+    L1 = iterationResult.get(0);
+    R1 = iterationResult.get(1);
+    L2 = iterationResult.get(2);
+    R2 = iterationResult.get(3);
+    //    System.out.println(
+    //        "[amortized DEBUG] "
+    //            + L1
+    //            + "~"
+    //            + R1
+    //            + "   "
+    //            + L2
+    //            + "~"
+    //            + R2
+    //            + "  K1,K2:"
+    //            + K1
+    //            + ","
+    //            + K2
+    //            + "  tot:"
+    //            + worker.totSize);
+    if (L1 == R1 && L2 == R2) {
+      if (!hasTwoMedians()) setDoubleValue(longBitsToResult(L1 ^ deltaForUnsignedCompare));
+      else
+        setDoubleValue(
+            0.5
+                * (longBitsToResult(L1 ^ deltaForUnsignedCompare)
+                    + longBitsToResult(L2 ^ deltaForUnsignedCompare)));
+      hasFinalResult = true;
+      //      System.out.println("[amortized DEBUG] calc over  answer:" + getDoubleValue());
+    }
+    if (hasTwoDividedMedians()) {
+      hasTwoDividedMedians = true;
+    } else {
+      L1 = Math.min(L1, L2);
+      R1 = Math.min(R1, R2);
+    }
+  }
+
+  @Override
+  protected boolean hasCandidateResult() {
+    return hasFinalResult && n > 0;
+  }
+
+  @Override
+  public Double getResult() {
+    return hasCandidateResult() ? getDoubleValue() : null;
+  }
+
+  @Override
+  public void updateResultFromStatistics(Statistics statistics) {
+    switch (statistics.getType()) {
+      case INT32:
+      case INT64:
+      case FLOAT:
+      case DOUBLE:
+        break;
+      case TEXT:
+      case BOOLEAN:
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format(
+                "Unsupported data type in aggregation MEDIAN : %s", statistics.getType()));
+    }
+    Comparable<Object> minVal = (Comparable<Object>) statistics.getMinValue();
+    Comparable<Object> maxVal = (Comparable<Object>) statistics.getMaxValue();
+    if (minVal.compareTo(maxVal) == 0) {
+      for (long i = statistics.getCount(); i > 0; i--) updateStatusFromData(minVal);
+    } /*else
+      throw new QueryProcessException("Failed to update median aggregation result from statistics.");*/
+  }
+
+  @Override
+  public void updateResultFromPageData(IBatchDataIterator batchIterator) {
+    updateResultFromPageData(batchIterator, Long.MIN_VALUE, Long.MAX_VALUE);
+  }
+
+  @Override
+  public void updateResultFromPageData(
+      IBatchDataIterator batchIterator, long minBound, long maxBound) {
+    while (batchIterator.hasNext()) {
+      if (batchIterator.currentTime() >= maxBound || batchIterator.currentTime() < minBound) {
+        break;
+      }
+      updateStatusFromData(batchIterator.currentValue());
+      batchIterator.next();
+    }
+  }
+
+  @Override
+  public void updateResultUsingTimestamps(
+      long[] timestamps, int length, IReaderByTimestamp dataReader) throws IOException {
+    Object[] values = dataReader.getValuesInTimestamps(timestamps, length);
+    for (int i = 0; i < length; i++) {
+      if (values[i] != null) {
+        updateStatusFromData(values[i]);
+      }
+    }
+  }
+
+  @Override
+  public void updateResultUsingValues(long[] timestamps, int length, ValueIterator valueIterator) {
+    //    List<Object> tmp = new ArrayList<>();
+    while (valueIterator.hasNext()) {
+      updateStatusFromData(valueIterator.next());
+      //      Object tmpObj = valueIterator.next();
+      //      updateStatusFromData(tmpObj);
+      //      tmp.add(tmpObj);
+    }
+    //
+    // System.out.println("\t\t[MEDIAN]"+this.hashCode()+"[updateResultUsingValues]"+tmp.toString());
+  }
+
+  @Override
+  public int maxIteration() {
+    return bitsOfDataType / 16;
+  }
+
+  @Override
+  public boolean hasFinalResult() {
+    return hasFinalResult;
+  }
+
+  @Override
+  public void merge(AggregateResult another) {
+    //    System.out.println("[DEBUG] [merge] " + this.getResult() + "  " + another.getResult());
+    // merge not supported
+    //        throw new QueryProcessException("Can't merge MedianAggregateResult");
+  }
+
+  @Override
+  protected void deserializeSpecificFields(ByteBuffer buffer) {
+    this.seriesDataType = TSDataType.deserialize(buffer.get());
+    this.n = buffer.getLong();
+    // TODO
+  }
+
+  @Override
+  protected void serializeSpecificFields(OutputStream outputStream) throws IOException {
+    ReadWriteIOUtils.write(seriesDataType, outputStream);
+    ReadWriteIOUtils.write(n, outputStream);
+    // TODO
+  }
+
+  public long getN() {
+    return n;
+  }
+
+  @Override
+  public void reset() {
+    super.reset();
+    n = 0;
+    bitsOfDataType = getBitsOfDataType();
+    hasTwoDividedMedians = false;
+    hasFinalResult = false;
+    lastL1 = L1 = Long.MIN_VALUE;
+    lastR1 = R1 = Long.MAX_VALUE;
+    iteration = 0;
+  }
+
+  @Override
+  public boolean canUpdateFromStatistics(Statistics statistics) {
+    Comparable<Object> minVal = (Comparable<Object>) statistics.getMinValue();
+    Comparable<Object> maxVal = (Comparable<Object>) statistics.getMaxValue();
+    return (minVal.compareTo(maxVal) == 0);
+  }
+
+  @Override
+  public boolean groupByLevelBeforeAggregation() {
+    return true;
+  }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/BitsBucketStatFilterAggressiveMedianAggrResult.java b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/BitsBucketStatFilterAggressiveMedianAggrResult.java
new file mode 100644
index 0000000000..ced757b95f
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/BitsBucketStatFilterAggressiveMedianAggrResult.java
@@ -0,0 +1,455 @@
+/// *
+// * Licensed to the Apache Software Foundation (ASF) under one
+// * or more contributor license agreements.  See the NOTICE file
+// * distributed with this work for additional information
+// * regarding copyright ownership.  The ASF licenses this file
+// * to you under the Apache License, Version 2.0 (the
+// * "License"); you may not use this file except in compliance
+// * with the License.  You may obtain a copy of the License at
+// *
+// *     http://www.apache.org/licenses/LICENSE-2.0
+// *
+// * Unless required by applicable law or agreed to in writing,
+// * software distributed under the License is distributed on an
+// * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// * KIND, either express or implied.  See the License for the
+// * specific language governing permissions and limitations
+// * under the License.
+// */
+//
+// package org.apache.iotdb.db.query.aggregation.impl;
+//
+// import org.apache.iotdb.db.query.aggregation.AggregateResult;
+// import org.apache.iotdb.db.query.aggregation.AggregationType;
+// import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp;
+// import org.apache.iotdb.db.utils.ValueIterator;
+// import org.apache.iotdb.db.utils.quantiles.GCHashMapComplexForQuantile;
+// import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
+// import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+// import org.apache.iotdb.tsfile.file.metadata.statistics.DoubleStatistics;
+// import org.apache.iotdb.tsfile.file.metadata.statistics.LongStatistics;
+// import org.apache.iotdb.tsfile.file.metadata.statistics.Statistics;
+// import org.apache.iotdb.tsfile.read.common.IBatchDataIterator;
+// import org.apache.iotdb.tsfile.utils.GSHashMapForStat;
+// import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
+//
+// import org.eclipse.collections.api.tuple.primitive.LongLongPair;
+//
+// import java.io.IOException;
+// import java.io.OutputStream;
+// import java.nio.ByteBuffer;
+// import java.util.List;
+//
+// import static org.apache.iotdb.tsfile.file.metadata.enums.TSDataType.DOUBLE;
+// import static org.apache.iotdb.tsfile.file.metadata.enums.TSDataType.INT64;
+//
+// public class BitsBucketStatFilterAggressiveMedianAggrResult extends AggregateResult {
+//  private TSDataType seriesDataType;
+//  private long cnt; // = n after iteration.
+//  private int bitsOfDataType,
+//      bitsCounted,
+//      bitsConcerned; // bitsOfDataType == bitsCounted + bitsConcerned
+//  private long maskConcerned;
+//  private long K1, K2, N1;
+//  private long prefixOfMedian1, prefixOfMedian2; // needs prefixOfMedian2 when n is even
+//  private long maxWithPrefix1, minWithPrefix2; // when two medians divided
+//  private GCHashMapComplexForQuantile hashMap;
+//  private GSHashMapForStat statHashMap;
+//  private boolean usingStatistics = false;
+//
+//  private boolean hasFinalResult;
+//  private boolean hasTwoDividedMedians;
+//  private long deltaForUnsignedCompare;
+//
+//  private long maskOfPrefix() {
+//    return bitsCounted == 0 ? (0) : (((1L << bitsCounted) - 1) << (bitsOfDataType - bitsCounted));
+//  }
+//
+//  private int getBitsOfDataType() {
+//    switch (seriesDataType) {
+//      case INT32:
+//      case FLOAT:
+//        return 32;
+//      case INT64:
+//      case DOUBLE:
+//        return 64;
+//      default:
+//        throw new UnSupportedDataTypeException(
+//            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+//    }
+//  }
+//
+//  private boolean hasTwoMedians() {
+//    return (cnt & 1) == 0;
+//  }
+//
+//  private boolean hasTwoDividedMedians() {
+//    return hasTwoMedians() && prefixOfMedian1 != prefixOfMedian2;
+//  }
+//
+//  public BitsBucketStatFilterAggressiveMedianAggrResult(TSDataType seriesDataType)
+//      throws UnSupportedDataTypeException {
+//    super(TSDataType.DOUBLE, AggregationType.EXACT_MEDIAN_BITS_BUCKET_STAT_FILTER_AGGRESSIVE);
+//    this.seriesDataType = seriesDataType;
+//    reset();
+//  }
+//
+//  // turn FLOAT/INT32 to unsigned long keeping relative order
+//  private long dataToLongBits(Object data) throws UnSupportedDataTypeException {
+//    long longBits;
+//    switch (seriesDataType) {
+//      case INT32:
+//        return (int) data + (1L << 31);
+//      case FLOAT:
+//        longBits = Float.floatToIntBits((float) data) + (1L << 31);
+//        return (float) data >= 0f ? longBits : longBits ^ 0x7F800000L;
+//      case INT64:
+//        return (long) data + (1L << 63);
+//      case DOUBLE:
+//        longBits = Double.doubleToLongBits((double) data) + (1L << 63);
+//        return (double) data >= 0d ? longBits : longBits ^ 0x7FF0000000000000L;
+//      default:
+//        throw new UnSupportedDataTypeException(
+//            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+//    }
+//  }
+//
+//  private double longBitsToResult(long longBits) throws UnSupportedDataTypeException {
+//    switch (seriesDataType) {
+//      case INT32:
+//        return (double) (longBits - (1L << 31));
+//      case FLOAT:
+//        longBits = (longBits >>> 31) > 0 ? longBits : longBits ^ 0x7F800000L;
+//        return Float.intBitsToFloat((int) (longBits - (1L << 31)));
+//      case INT64:
+//        return (double) (longBits - (1L << 63));
+//      case DOUBLE:
+//        longBits = (longBits >>> 63) > 0 ? longBits : longBits ^ 0x7FF0000000000000L;
+//        return Double.longBitsToDouble(longBits - (1L << 63));
+//      default:
+//        throw new UnSupportedDataTypeException(
+//            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+//    }
+//  }
+//
+//  private void updateStatusFromData(Object data, long times) {
+//    long longBits = dataToLongBits(data);
+//    if (bitsCounted > 0
+//        && (longBits & maskOfPrefix()) != prefixOfMedian1
+//        && (longBits & maskOfPrefix()) != prefixOfMedian2) return;
+//    if (bitsCounted == 0) cnt += times;
+//    if (usingStatistics) {
+//      statHashMap.insertLongBits(64, longBits, times);
+//      return;
+//    }
+//    if (hasTwoDividedMedians) {
+//      if ((longBits & maskOfPrefix()) == prefixOfMedian1)
+//        maxWithPrefix1 = Math.max(maxWithPrefix1, longBits);
+//      else minWithPrefix2 = Math.min(minWithPrefix2, longBits);
+//      return;
+//    }
+//    //    if(hashMap.getHashMapSize()==0)
+//    //      System.out.println("???!!!!");
+//    //    if(bitsConcerned==2)
+//    //      System.out.println("\t\t\t"+(long)data+"    "+(longBits & maskConcerned));
+//    long dataConcerned = longBits & maskConcerned;
+//    hashMap.insert(dataConcerned, times);
+//  }
+//
+//  @Override
+//  public void startIteration() {
+//    bitsConcerned = bitsOfDataType - bitsCounted;
+//    maskConcerned = bitsConcerned == 64 ? -1L : ((1L << bitsConcerned) - 1);
+//
+//    hasTwoDividedMedians = hasTwoDividedMedians();
+//    //    System.out.println("[DEBUG]:startIteration value:"+getValue()
+//    //        + " bitsCounted:"+bitsCounted+" prefix1,2:"+prefixOfMedian1+" "+prefixOfMedian2
+//    //        + " divided:"+hasTwoDividedMedians
+//    //        + " K1,2:" + K1+" "+K2);
+//    //    System.out.println("[DEBUG]\t\t result1:"+longBitsToResult(prefixOfMedian1));
+//    if (hasTwoDividedMedians) {
+//      maxWithPrefix1 = Long.MIN_VALUE;
+//      minWithPrefix2 = Long.MAX_VALUE;
+//      return;
+//    }
+//    if (bitsCounted == 0) { // first iteration
+//      if (bitsOfDataType == 32) hashMap = new GCHashMapComplexForQuantile(bitsOfDataType, 31,
+// 0.5);
+//      else hashMap = new GCHashMapComplexForQuantile(bitsOfDataType, 16, 0.5);
+//    } else {
+//      double expectK = 1.0 * K1 / N1;
+//      if (bitsConcerned <= 16) hashMap.reset(bitsConcerned, 16, expectK);
+//      else if (bitsConcerned <= 32) hashMap.reset(bitsConcerned, bitsConcerned - 1, expectK);
+//      else hashMap.reset(bitsConcerned, 16, expectK);
+//    }
+//  }
+//
+//  @Override
+//  public void finishIteration() {
+//    System.out.println(
+//        "[statFilterAggressive DEBUG] finishIteration hashMapBits:"
+//            + hashMap.getRemainingBits()
+//            + "   bitsCounted:"
+//            + bitsCounted
+//            + "??"
+//            + usingStatistics);
+//    if (cnt == 0) {
+//      hasFinalResult = true;
+//      return;
+//    }
+//    if (hasTwoDividedMedians) {
+//      //            System.out.println("[DEBUG]hasTwoDividedMedians");
+//      setDoubleValue(0.5 * (longBitsToResult(maxWithPrefix1) + longBitsToResult(minWithPrefix2)));
+//      hasFinalResult = true;
+//      return;
+//    }
+//    if (bitsCounted == 0) {
+//      K1 = (cnt + 1) >> 1;
+//      K2 = hasTwoMedians() ? (K1 + 1) : K1;
+//    }
+//    if (usingStatistics) {
+//      hashMap.reset(statHashMap.remainingBits, 0);
+//      long tmpTOT = 0;
+//      for (LongLongPair p : statHashMap.getKeyValuesView()) {
+//        hashMap.insert(p.getOne(), p.getTwo());
+//        tmpTOT += p.getTwo();
+//      }
+//      //      System.out.println("\t\t[DEBUG] BitsBucketStat finishIteration  stat size:"
+//      //          +hashMap.getHashMapSize()+"  remainingBits:"+hashMap.getRemainingBits()+"
+//      // TOT:"+tmpTOT);
+//    }
+//    List<Long> iterationResult = hashMap.findResultIndex(K1, K2);
+//    int bitsCountedInIteration = Math.min(bitsConcerned, hashMap.getRemainingBits());
+//    prefixOfMedian1 |= iterationResult.get(0) << (bitsConcerned - bitsCountedInIteration);
+//    K1 -= iterationResult.get(1);
+//    N1 = iterationResult.get(2);
+//    prefixOfMedian2 |= iterationResult.get(3) << (bitsConcerned - bitsCountedInIteration);
+//    K2 -= iterationResult.get(4);
+//    bitsCounted += bitsCountedInIteration;
+//    if (bitsCounted == bitsOfDataType) {
+//      if (!hasTwoMedians()) setDoubleValue(longBitsToResult(prefixOfMedian1));
+//      else
+//        setDoubleValue(
+//            0.5 * (longBitsToResult(prefixOfMedian1) + longBitsToResult(prefixOfMedian2)));
+//      hasFinalResult = true;
+//      //      System.out.println("[opt_4 DEBUG] calc over  answer:" + getDoubleValue());
+//    }
+//    if (usingStatistics) {
+//      usingStatistics = false;
+//      statHashMap = null;
+//    }
+//    //        System.out.println("\t\t[MEDIAN4]"+this.hashCode()+"  finishIteration
+// "+bitsCounted+"
+//    // "+bitsOfDataType);
+//    //            System.out.println(
+//    //                "K1: "
+//    //                    + K1
+//    //                    + " K2: "
+//    //                    + K2
+//    //                    + "    cnt:"
+//    //                    + cnt
+//    //                    + "|| prefixOfMedian1:"
+//    //                    + prefixOfMedian1
+//    //                    + "  prefixOfMedian2:"
+//    //                    + prefixOfMedian2);
+//  }
+//
+//  @Override
+//  protected boolean hasCandidateResult() {
+//    return hasFinalResult && cnt > 0;
+//  }
+//
+//  @Override
+//  public Double getResult() {
+//    return hasCandidateResult() ? getDoubleValue() : null;
+//  }
+//
+//  @Override
+//  public void updateResultFromStatistics(Statistics statistics) {
+//    switch (statistics.getType()) {
+//      case INT32:
+//      case INT64:
+//      case FLOAT:
+//      case DOUBLE:
+//        break;
+//      case TEXT:
+//      case BOOLEAN:
+//      default:
+//        throw new UnSupportedDataTypeException(
+//            String.format(
+//                "Unsupported data type in aggregation MEDIAN : %s", statistics.getType()));
+//    }
+//    if (bitsCounted == 0) {
+//      cnt += statistics.getCount();
+//      if (statistics.getType() == DOUBLE) {
+//        DoubleStatistics doubleStat = (DoubleStatistics) statistics;
+//        statHashMap.merge(doubleStat.getGSHashMap());
+//      }
+//      if (statistics.getType() == INT64) {
+//        LongStatistics longStat = (LongStatistics) statistics;
+//        statHashMap.merge(longStat.getGSHashMap());
+//      }
+//    } else {
+//      Comparable<Object> minVal = (Comparable<Object>) statistics.getMinValue();
+//      Comparable<Object> maxVal = (Comparable<Object>) statistics.getMaxValue();
+//      if (minVal.compareTo(maxVal) == 0) { // min == max
+//        updateStatusFromData(minVal, statistics.getCount());
+//      }
+//      // out of range
+//    }
+//
+//    /*else
+//         throw new QueryProcessException("Failed to update median aggregation result from
+//    statistics.");*/
+//  }
+//
+//  @Override
+//  public void updateResultFromPageData(IBatchDataIterator batchIterator) {
+//    updateResultFromPageData(batchIterator, Long.MIN_VALUE, Long.MAX_VALUE);
+//  }
+//
+//  @Override
+//  public void updateResultFromPageData(
+//      IBatchDataIterator batchIterator, long minBound, long maxBound) {
+//    while (batchIterator.hasNext()) {
+//      if (batchIterator.currentTime() >= maxBound || batchIterator.currentTime() < minBound) {
+//        break;
+//      }
+//      updateStatusFromData(batchIterator.currentValue(), 1);
+//      batchIterator.next();
+//    }
+//  }
+//
+//  @Override
+//  public void updateResultUsingTimestamps(
+//      long[] timestamps, int length, IReaderByTimestamp dataReader) throws IOException {
+//    Object[] values = dataReader.getValuesInTimestamps(timestamps, length);
+//    for (int i = 0; i < length; i++) {
+//      if (values[i] != null) {
+//        updateStatusFromData(values[i], 1);
+//      }
+//    }
+//  }
+//
+//  @Override
+//  public void updateResultUsingValues(long[] timestamps, int length, ValueIterator valueIterator)
+// {
+//    //    List<Object> tmp = new ArrayList<>();
+//    while (valueIterator.hasNext()) {
+//      updateStatusFromData(valueIterator.next(), 1);
+//      //      Object tmpObj = valueIterator.next();
+//      //      updateStatusFromData(tmpObj, 1);
+//      //      tmp.add(tmpObj);
+//    }
+//    //
+//    //
+// System.out.println("\t\t[MEDIAN]"+this.hashCode()+"[updateResultUsingValues]"+tmp.toString());
+//  }
+//
+//  @Override
+//  public int maxIteration() {
+//    return bitsOfDataType / 16 + 1;
+//  }
+//
+//  @Override
+//  public boolean hasFinalResult() {
+//    return hasFinalResult;
+//  }
+//
+//  @Override
+//  public void merge(AggregateResult another) {
+//    //    System.out.println("[DEBUG] [merge] " + this.getResult() + "  " + another.getResult());
+//    // merge not supported
+//    //        throw new QueryProcessException("Can't merge MedianAggregateResult");
+//  }
+//
+//  @Override
+//  protected void deserializeSpecificFields(ByteBuffer buffer) {
+//    this.seriesDataType = TSDataType.deserialize(buffer.get());
+//    this.cnt = buffer.getLong();
+//    this.bitsCounted = buffer.getInt();
+//    this.prefixOfMedian1 = buffer.getLong();
+//    this.prefixOfMedian2 = buffer.getLong();
+//    // TODO
+//  }
+//
+//  @Override
+//  protected void serializeSpecificFields(OutputStream outputStream) throws IOException {
+//    ReadWriteIOUtils.write(seriesDataType, outputStream);
+//    ReadWriteIOUtils.write(cnt, outputStream);
+//    ReadWriteIOUtils.write(bitsCounted, outputStream);
+//    ReadWriteIOUtils.write(prefixOfMedian1, outputStream);
+//    ReadWriteIOUtils.write(prefixOfMedian2, outputStream);
+//    // TODO
+//  }
+//
+//  public long getCnt() {
+//    return cnt;
+//  }
+//
+//  @Override
+//  public void reset() {
+//    super.reset();
+//    cnt = 0;
+//    bitsCounted = 0;
+//    bitsOfDataType = getBitsOfDataType();
+//    prefixOfMedian1 = prefixOfMedian2 = 0;
+//    hasTwoDividedMedians = false;
+//    hasFinalResult = false;
+//    statHashMap = null;
+//    usingStatistics = false;
+//    deltaForUnsignedCompare = bitsOfDataType == 64 ? (1L << 63) : 0;
+//  }
+//
+//  @Override
+//  public boolean canUpdateFromStatistics(Statistics statistics) {
+//    if ((seriesDataType == DOUBLE) && bitsCounted == 0) {
+//      DoubleStatistics doubleStats = (DoubleStatistics) statistics;
+//      if (doubleStats.hasSerializeHashMap()) {
+//        if (!usingStatistics) {
+//          statHashMap = new GSHashMapForStat((byte) 64, 65535);
+//          usingStatistics = true;
+//          if (hashMap.getHashMapSize() > 0) {
+//            for (LongLongPair p : hashMap.getKeyValuesView()) {
+//              statHashMap.remainingBits = (byte) hashMap.getRemainingBits();
+//              statHashMap.insertLongBits(hashMap.getRemainingBits(), p.getOne(), p.getTwo());
+//            }
+//          }
+//        }
+//        return true;
+//      }
+//    }
+//    if ((seriesDataType == TSDataType.INT64) && bitsCounted == 0) {
+//      LongStatistics longStats = (LongStatistics) statistics;
+//      if (longStats.hasSerializeHashMap()) {
+//        if (!usingStatistics) {
+//          statHashMap = new GSHashMapForStat((byte) 64, 65535);
+//          usingStatistics = true;
+//          if (hashMap.getHashMapSize() > 0) {
+//            for (LongLongPair p : hashMap.getKeyValuesView()) {
+//              statHashMap.remainingBits = (byte) hashMap.getRemainingBits();
+//              statHashMap.insertLongBits(hashMap.getRemainingBits(), p.getOne(), p.getTwo());
+//            }
+//          }
+//        }
+//        return true;
+//      }
+//    }
+//    if (bitsCounted > 0) {
+//      long prefixMIN = dataToLongBits(statistics.getMinValue()) & maskOfPrefix();
+//      long prefixMAX = dataToLongBits(statistics.getMaxValue()) & maskOfPrefix();
+//      if ((prefixMIN ^ deltaForUnsignedCompare) > (prefixOfMedian2 ^ deltaForUnsignedCompare))
+//        return true;
+//      if ((prefixMAX ^ deltaForUnsignedCompare) < (prefixOfMedian1 ^ deltaForUnsignedCompare))
+//        return true; // out of range
+//    }
+//    Comparable<Object> minVal = (Comparable<Object>) statistics.getMinValue();
+//    Comparable<Object> maxVal = (Comparable<Object>) statistics.getMaxValue();
+//    return (minVal.compareTo(maxVal) == 0); // min==max
+//  }
+//
+//  @Override
+//  public boolean groupByLevelBeforeAggregation() {
+//    return true;
+//  }
+// }
diff --git a/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/BitsBucketStatFilterMedianAggrResult.java b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/BitsBucketStatFilterMedianAggrResult.java
new file mode 100644
index 0000000000..6bdc237b55
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/BitsBucketStatFilterMedianAggrResult.java
@@ -0,0 +1,457 @@
+/// *
+// * Licensed to the Apache Software Foundation (ASF) under one
+// * or more contributor license agreements.  See the NOTICE file
+// * distributed with this work for additional information
+// * regarding copyright ownership.  The ASF licenses this file
+// * to you under the Apache License, Version 2.0 (the
+// * "License"); you may not use this file except in compliance
+// * with the License.  You may obtain a copy of the License at
+// *
+// *     http://www.apache.org/licenses/LICENSE-2.0
+// *
+// * Unless required by applicable law or agreed to in writing,
+// * software distributed under the License is distributed on an
+// * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// * KIND, either express or implied.  See the License for the
+// * specific language governing permissions and limitations
+// * under the License.
+// */
+//
+// package org.apache.iotdb.db.query.aggregation.impl;
+//
+// import org.apache.iotdb.db.query.aggregation.AggregateResult;
+// import org.apache.iotdb.db.query.aggregation.AggregationType;
+// import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp;
+// import org.apache.iotdb.db.utils.ValueIterator;
+// import org.apache.iotdb.db.utils.quantiles.EclipseCollectionsHashMapForQuantile;
+// import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
+// import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+// import org.apache.iotdb.tsfile.file.metadata.statistics.DoubleStatistics;
+// import org.apache.iotdb.tsfile.file.metadata.statistics.LongStatistics;
+// import org.apache.iotdb.tsfile.file.metadata.statistics.Statistics;
+// import org.apache.iotdb.tsfile.read.common.IBatchDataIterator;
+// import org.apache.iotdb.tsfile.utils.GSHashMapForStat;
+// import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
+//
+// import org.eclipse.collections.api.tuple.primitive.LongLongPair;
+//
+// import java.io.IOException;
+// import java.io.OutputStream;
+// import java.nio.ByteBuffer;
+// import java.util.List;
+//
+// import static org.apache.iotdb.tsfile.file.metadata.enums.TSDataType.DOUBLE;
+// import static org.apache.iotdb.tsfile.file.metadata.enums.TSDataType.INT64;
+//
+// public class BitsBucketStatFilterMedianAggrResult extends AggregateResult {
+//  private TSDataType seriesDataType;
+//  private long cnt; // = n after iteration.
+//  private int bitsOfDataType,
+//      bitsCounted,
+//      bitsConcerned; // bitsOfDataType == bitsCounted + bitsConcerned
+//  private long maskConcerned;
+//  private long K1, K2;
+//  private long prefixOfMedian1, prefixOfMedian2; // needs prefixOfMedian2 when n is even
+//  private long maxWithPrefix1, minWithPrefix2; // when two medians divided
+//  private EclipseCollectionsHashMapForQuantile hashMap;
+//  private GSHashMapForStat statHashMap;
+//  private boolean usingStatistics = false;
+//
+//  private boolean hasFinalResult;
+//  private boolean hasTwoDividedMedians;
+//  private long deltaForUnsignedCompare;
+//
+//  private long maskOfPrefix() {
+//    return bitsCounted == 0 ? (0) : (((1L << bitsCounted) - 1) << (bitsOfDataType - bitsCounted));
+//  }
+//
+//  private int getBitsOfDataType() {
+//    switch (seriesDataType) {
+//      case INT32:
+//      case FLOAT:
+//        return 32;
+//      case INT64:
+//      case DOUBLE:
+//        return 64;
+//      default:
+//        throw new UnSupportedDataTypeException(
+//            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+//    }
+//  }
+//
+//  private boolean hasTwoMedians() {
+//    return (cnt & 1) == 0;
+//  }
+//
+//  private boolean hasTwoDividedMedians() {
+//    return hasTwoMedians() && prefixOfMedian1 != prefixOfMedian2;
+//  }
+//
+//  public BitsBucketStatFilterMedianAggrResult(TSDataType seriesDataType)
+//      throws UnSupportedDataTypeException {
+//    super(TSDataType.DOUBLE, AggregationType.EXACT_MEDIAN_BITS_BUCKET_STAT_FILTER);
+//    this.seriesDataType = seriesDataType;
+//    reset();
+//  }
+//
+//  // turn FLOAT/INT32 to unsigned long keeping relative order
+//  private long dataToLongBits(Object data) throws UnSupportedDataTypeException {
+//    long longBits;
+//    switch (seriesDataType) {
+//      case INT32:
+//        return (int) data + (1L << 31);
+//      case FLOAT:
+//        longBits = Float.floatToIntBits((float) data) + (1L << 31);
+//        return (float) data >= 0f ? longBits : longBits ^ 0x7F800000L;
+//      case INT64:
+//        return (long) data + (1L << 63);
+//      case DOUBLE:
+//        longBits = Double.doubleToLongBits((double) data) + (1L << 63);
+//        return (double) data >= 0d ? longBits : longBits ^ 0x7FF0000000000000L;
+//      default:
+//        throw new UnSupportedDataTypeException(
+//            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+//    }
+//  }
+//
+//  private double longBitsToResult(long longBits) throws UnSupportedDataTypeException {
+//    switch (seriesDataType) {
+//      case INT32:
+//        return (double) (longBits - (1L << 31));
+//      case FLOAT:
+//        longBits = (longBits >>> 31) > 0 ? longBits : longBits ^ 0x7F800000L;
+//        return Float.intBitsToFloat((int) (longBits - (1L << 31)));
+//      case INT64:
+//        return (double) (longBits - (1L << 63));
+//      case DOUBLE:
+//        longBits = (longBits >>> 63) > 0 ? longBits : longBits ^ 0x7FF0000000000000L;
+//        return Double.longBitsToDouble(longBits - (1L << 63));
+//      default:
+//        throw new UnSupportedDataTypeException(
+//            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+//    }
+//  }
+//
+//  private void updateStatusFromData(Object data, long times) {
+//    long longBits = dataToLongBits(data);
+//    if (bitsCounted > 0
+//        && (longBits & maskOfPrefix()) != prefixOfMedian1
+//        && (longBits & maskOfPrefix()) != prefixOfMedian2) return;
+//    if (bitsCounted == 0) cnt += times;
+//    if (usingStatistics) {
+//      statHashMap.insertLongBits(64, longBits, times);
+//      return;
+//    }
+//    if (hasTwoDividedMedians) {
+//      if ((longBits & maskOfPrefix()) == prefixOfMedian1)
+//        maxWithPrefix1 = Math.max(maxWithPrefix1, longBits);
+//      else minWithPrefix2 = Math.min(minWithPrefix2, longBits);
+//      return;
+//    }
+//    //    if(hashMap.getHashMapSize()==0)
+//    //      System.out.println("???!!!!");
+//    //    if(bitsConcerned==2)
+//    //      System.out.println("\t\t\t"+(long)data+"    "+(longBits & maskConcerned));
+//    long dataConcerned = longBits & maskConcerned;
+//    hashMap.insert(dataConcerned, times);
+//  }
+//
+//  @Override
+//  public void startIteration() {
+//    bitsConcerned = bitsOfDataType - bitsCounted;
+//    maskConcerned = bitsConcerned == 64 ? -1L : ((1L << bitsConcerned) - 1);
+//
+//    hasTwoDividedMedians = hasTwoDividedMedians();
+//    //    System.out.println("[DEBUG]:startIteration value:"+getValue()
+//    //        + " bitsCounted:"+bitsCounted+" prefix1,2:"+prefixOfMedian1+" "+prefixOfMedian2
+//    //        + " divided:"+hasTwoDividedMedians
+//    //        + " K1,2:" + K1+" "+K2);
+//    //    System.out.println("[DEBUG]\t\t result1:"+longBitsToResult(prefixOfMedian1));
+//    if (hasTwoDividedMedians) {
+//      maxWithPrefix1 = Long.MIN_VALUE;
+//      minWithPrefix2 = Long.MAX_VALUE;
+//      return;
+//    }
+//    if (bitsCounted == 0) { // first iteration
+//      if (bitsOfDataType == 32)
+//        hashMap = new EclipseCollectionsHashMapForQuantile(bitsOfDataType, 31);
+//      else hashMap = new EclipseCollectionsHashMapForQuantile(bitsOfDataType, 16);
+//    } else {
+//      if (bitsConcerned <= 16) hashMap.reset(bitsConcerned, 16);
+//      else if (bitsConcerned <= 32) hashMap.reset(bitsConcerned, bitsConcerned - 1);
+//      else hashMap.reset(bitsConcerned, 16);
+//    }
+//  }
+//
+//  @Override
+//  public void finishIteration() {
+//    //    System.out.println(
+//    //        "[statFilter DEBUG] finishIteration hashMapBits:"
+//    //            + hashMap.getRemainingBits()
+//    //            + "   bitsCounted:"
+//    //            + bitsCounted
+//    //            + "??"
+//    //            + usingStatistics);
+//    if (cnt == 0) {
+//      hasFinalResult = true;
+//      return;
+//    }
+//    if (hasTwoDividedMedians) {
+//      //            System.out.println("[DEBUG]hasTwoDividedMedians");
+//      setDoubleValue(0.5 * (longBitsToResult(maxWithPrefix1) + longBitsToResult(minWithPrefix2)));
+//      hasFinalResult = true;
+//      return;
+//    }
+//    if (bitsCounted == 0) {
+//      K1 = (cnt + 1) >> 1;
+//      K2 = hasTwoMedians() ? (K1 + 1) : K1;
+//    }
+//    if (usingStatistics) {
+//      hashMap.reset(statHashMap.remainingBits, 0);
+//      long tmpTOT = 0;
+//      for (LongLongPair p : statHashMap.getKeyValuesView()) {
+//        hashMap.insert(p.getOne(), p.getTwo());
+//        tmpTOT += p.getTwo();
+//      }
+//      //      System.out.println("\t\t[DEBUG] BitsBucketStat finishIteration  stat size:"
+//      //          +hashMap.getHashMapSize()+"  remainingBits:"+hashMap.getRemainingBits()+"
+//      // TOT:"+tmpTOT);
+//    }
+//    List<Long> iterationResult = hashMap.findResultIndex(K1, K2);
+//    //    System.out.println("\t\t[DEBUG] BitsBucketStat finishIteration  result:"
+//    //    +iterationResult+"   K1,K2:"+K1+" "+K2);
+//    int bitsCountedInIteration = Math.min(bitsConcerned, hashMap.getRemainingBits());
+//    prefixOfMedian1 |= iterationResult.get(0) << (bitsConcerned - bitsCountedInIteration);
+//    K1 -= iterationResult.get(1);
+//    prefixOfMedian2 |= iterationResult.get(2) << (bitsConcerned - bitsCountedInIteration);
+//    K2 -= iterationResult.get(3);
+//    bitsCounted += bitsCountedInIteration;
+//    if (bitsCounted == bitsOfDataType) {
+//      if (!hasTwoMedians()) setDoubleValue(longBitsToResult(prefixOfMedian1));
+//      else
+//        setDoubleValue(
+//            0.5 * (longBitsToResult(prefixOfMedian1) + longBitsToResult(prefixOfMedian2)));
+//      hasFinalResult = true;
+//      //      System.out.println("[opt_4 DEBUG] calc over  answer:" + getDoubleValue());
+//    }
+//    if (usingStatistics) {
+//      usingStatistics = false;
+//      statHashMap = null;
+//    }
+//    //        System.out.println("\t\t[MEDIAN4]"+this.hashCode()+"  finishIteration
+// "+bitsCounted+"
+//    // "+bitsOfDataType);
+//    //            System.out.println(
+//    //                "K1: "
+//    //                    + K1
+//    //                    + " K2: "
+//    //                    + K2
+//    //                    + "    cnt:"
+//    //                    + cnt
+//    //                    + "|| prefixOfMedian1:"
+//    //                    + prefixOfMedian1
+//    //                    + "  prefixOfMedian2:"
+//    //                    + prefixOfMedian2);
+//  }
+//
+//  @Override
+//  protected boolean hasCandidateResult() {
+//    return hasFinalResult && cnt > 0;
+//  }
+//
+//  @Override
+//  public Double getResult() {
+//    return hasCandidateResult() ? getDoubleValue() : null;
+//  }
+//
+//  @Override
+//  public void updateResultFromStatistics(Statistics statistics) {
+//    switch (statistics.getType()) {
+//      case INT32:
+//      case INT64:
+//      case FLOAT:
+//      case DOUBLE:
+//        break;
+//      case TEXT:
+//      case BOOLEAN:
+//      default:
+//        throw new UnSupportedDataTypeException(
+//            String.format(
+//                "Unsupported data type in aggregation MEDIAN : %s", statistics.getType()));
+//    }
+//    if (bitsCounted == 0) {
+//      cnt += statistics.getCount();
+//      if (statistics.getType() == DOUBLE) {
+//        DoubleStatistics doubleStat = (DoubleStatistics) statistics;
+//        statHashMap.merge(doubleStat.getGSHashMap());
+//      }
+//      if (statistics.getType() == INT64) {
+//        LongStatistics longStat = (LongStatistics) statistics;
+//        statHashMap.merge(longStat.getGSHashMap());
+//      }
+//    } else {
+//      Comparable<Object> minVal = (Comparable<Object>) statistics.getMinValue();
+//      Comparable<Object> maxVal = (Comparable<Object>) statistics.getMaxValue();
+//      if (minVal.compareTo(maxVal) == 0) { // min == max
+//        updateStatusFromData(minVal, statistics.getCount());
+//      }
+//      // out of range
+//    }
+//
+//    /*else
+//         throw new QueryProcessException("Failed to update median aggregation result from
+//    statistics.");*/
+//  }
+//
+//  @Override
+//  public void updateResultFromPageData(IBatchDataIterator batchIterator) {
+//    updateResultFromPageData(batchIterator, Long.MIN_VALUE, Long.MAX_VALUE);
+//  }
+//
+//  @Override
+//  public void updateResultFromPageData(
+//      IBatchDataIterator batchIterator, long minBound, long maxBound) {
+//    while (batchIterator.hasNext()) {
+//      if (batchIterator.currentTime() >= maxBound || batchIterator.currentTime() < minBound) {
+//        break;
+//      }
+//      updateStatusFromData(batchIterator.currentValue(), 1);
+//      batchIterator.next();
+//    }
+//  }
+//
+//  @Override
+//  public void updateResultUsingTimestamps(
+//      long[] timestamps, int length, IReaderByTimestamp dataReader) throws IOException {
+//    Object[] values = dataReader.getValuesInTimestamps(timestamps, length);
+//    for (int i = 0; i < length; i++) {
+//      if (values[i] != null) {
+//        updateStatusFromData(values[i], 1);
+//      }
+//    }
+//  }
+//
+//  @Override
+//  public void updateResultUsingValues(long[] timestamps, int length, ValueIterator valueIterator)
+// {
+//    //    List<Object> tmp = new ArrayList<>();
+//    while (valueIterator.hasNext()) {
+//      updateStatusFromData(valueIterator.next(), 1);
+//      //      Object tmpObj = valueIterator.next();
+//      //      updateStatusFromData(tmpObj, 1);
+//      //      tmp.add(tmpObj);
+//    }
+//    //
+//    //
+// System.out.println("\t\t[MEDIAN]"+this.hashCode()+"[updateResultUsingValues]"+tmp.toString());
+//  }
+//
+//  @Override
+//  public int maxIteration() {
+//    return bitsOfDataType / 16 + 1;
+//  }
+//
+//  @Override
+//  public boolean hasFinalResult() {
+//    return hasFinalResult;
+//  }
+//
+//  @Override
+//  public void merge(AggregateResult another) {
+//    //    System.out.println("[DEBUG] [merge] " + this.getResult() + "  " + another.getResult());
+//    // merge not supported
+//    //        throw new QueryProcessException("Can't merge MedianAggregateResult");
+//  }
+//
+//  @Override
+//  protected void deserializeSpecificFields(ByteBuffer buffer) {
+//    this.seriesDataType = TSDataType.deserialize(buffer.get());
+//    this.cnt = buffer.getLong();
+//    this.bitsCounted = buffer.getInt();
+//    this.prefixOfMedian1 = buffer.getLong();
+//    this.prefixOfMedian2 = buffer.getLong();
+//    // TODO
+//  }
+//
+//  @Override
+//  protected void serializeSpecificFields(OutputStream outputStream) throws IOException {
+//    ReadWriteIOUtils.write(seriesDataType, outputStream);
+//    ReadWriteIOUtils.write(cnt, outputStream);
+//    ReadWriteIOUtils.write(bitsCounted, outputStream);
+//    ReadWriteIOUtils.write(prefixOfMedian1, outputStream);
+//    ReadWriteIOUtils.write(prefixOfMedian2, outputStream);
+//    // TODO
+//  }
+//
+//  public long getCnt() {
+//    return cnt;
+//  }
+//
+//  @Override
+//  public void reset() {
+//    super.reset();
+//    cnt = 0;
+//    bitsCounted = 0;
+//    bitsOfDataType = getBitsOfDataType();
+//    prefixOfMedian1 = prefixOfMedian2 = 0;
+//    hasTwoDividedMedians = false;
+//    hasFinalResult = false;
+//    statHashMap = null;
+//    usingStatistics = false;
+//    deltaForUnsignedCompare = bitsOfDataType == 64 ? (1L << 63) : 0;
+//  }
+//
+//  @Override
+//  public boolean canUpdateFromStatistics(Statistics statistics) {
+//    if ((seriesDataType == DOUBLE) && bitsCounted == 0) {
+//      DoubleStatistics doubleStats = (DoubleStatistics) statistics;
+//      if (doubleStats.hasSerializeHashMap()) {
+//        if (!usingStatistics) {
+//          statHashMap = new GSHashMapForStat((byte) 64, 65535);
+//          usingStatistics = true;
+//          if (hashMap.getHashMapSize() > 0) {
+//            for (LongLongPair p : hashMap.getKeyValuesView()) {
+//              statHashMap.remainingBits = (byte) hashMap.getRemainingBits();
+//              statHashMap.insertLongBits(hashMap.getRemainingBits(), p.getOne(), p.getTwo());
+//            }
+//          }
+//        }
+//        return true;
+//      }
+//    }
+//    if ((seriesDataType == TSDataType.INT64) && bitsCounted == 0) {
+//      LongStatistics longStats = (LongStatistics) statistics;
+//      if (longStats.hasSerializeHashMap()) {
+//        if (!usingStatistics) {
+//          statHashMap = new GSHashMapForStat((byte) 64, 65535);
+//          usingStatistics = true;
+//          if (hashMap.getHashMapSize() > 0) {
+//            for (LongLongPair p : hashMap.getKeyValuesView()) {
+//              statHashMap.remainingBits = (byte) hashMap.getRemainingBits();
+//              statHashMap.insertLongBits(hashMap.getRemainingBits(), p.getOne(), p.getTwo());
+//            }
+//          }
+//        }
+//        return true;
+//      }
+//    }
+//    //    if (bitsCounted > 0) {
+//    //      long prefixMIN = dataToLongBits(statistics.getMinValue()) & maskOfPrefix();
+//    //      long prefixMAX = dataToLongBits(statistics.getMaxValue()) & maskOfPrefix();
+//    //      if ((prefixMIN ^ deltaForUnsignedCompare) > (prefixOfMedian2 ^
+// deltaForUnsignedCompare))
+//    //        return true;
+//    //      if ((prefixMAX ^ deltaForUnsignedCompare) < (prefixOfMedian1 ^
+// deltaForUnsignedCompare))
+//    //        return true; // out of range
+//    //    }
+//    Comparable<Object> minVal = (Comparable<Object>) statistics.getMinValue();
+//    Comparable<Object> maxVal = (Comparable<Object>) statistics.getMaxValue();
+//    return (minVal.compareTo(maxVal) == 0); // min == max
+//  }
+//
+//  @Override
+//  public boolean groupByLevelBeforeAggregation() {
+//    return true;
+//  }
+// }
diff --git a/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/BitsBucketStatMedianAggrResult.java b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/BitsBucketStatMedianAggrResult.java
new file mode 100644
index 0000000000..ba4acd285f
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/BitsBucketStatMedianAggrResult.java
@@ -0,0 +1,445 @@
+/// *
+// * Licensed to the Apache Software Foundation (ASF) under one
+// * or more contributor license agreements.  See the NOTICE file
+// * distributed with this work for additional information
+// * regarding copyright ownership.  The ASF licenses this file
+// * to you under the Apache License, Version 2.0 (the
+// * "License"); you may not use this file except in compliance
+// * with the License.  You may obtain a copy of the License at
+// *
+// *     http://www.apache.org/licenses/LICENSE-2.0
+// *
+// * Unless required by applicable law or agreed to in writing,
+// * software distributed under the License is distributed on an
+// * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// * KIND, either express or implied.  See the License for the
+// * specific language governing permissions and limitations
+// * under the License.
+// */
+//
+// package org.apache.iotdb.db.query.aggregation.impl;
+//
+// import org.apache.iotdb.db.query.aggregation.AggregateResult;
+// import org.apache.iotdb.db.query.aggregation.AggregationType;
+// import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp;
+// import org.apache.iotdb.db.utils.ValueIterator;
+// import org.apache.iotdb.db.utils.quantiles.EclipseCollectionsHashMapForQuantile;
+// import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
+// import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+// import org.apache.iotdb.tsfile.file.metadata.statistics.DoubleStatistics;
+// import org.apache.iotdb.tsfile.file.metadata.statistics.LongStatistics;
+// import org.apache.iotdb.tsfile.file.metadata.statistics.Statistics;
+// import org.apache.iotdb.tsfile.read.common.IBatchDataIterator;
+// import org.apache.iotdb.tsfile.utils.GSHashMapForStat;
+// import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
+//
+// import org.eclipse.collections.api.tuple.primitive.LongLongPair;
+//
+// import java.io.IOException;
+// import java.io.OutputStream;
+// import java.nio.ByteBuffer;
+// import java.util.List;
+//
+// import static org.apache.iotdb.tsfile.file.metadata.enums.TSDataType.DOUBLE;
+// import static org.apache.iotdb.tsfile.file.metadata.enums.TSDataType.INT64;
+//
+// public class BitsBucketStatMedianAggrResult extends AggregateResult {
+//  private TSDataType seriesDataType;
+//  private long cnt; // = n after iteration.
+//  private int bitsOfDataType,
+//      bitsCounted,
+//      bitsConcerned; // bitsOfDataType == bitsCounted + bitsConcerned
+//  private long maskConcerned;
+//  private long K1, K2;
+//  private long prefixOfMedian1, prefixOfMedian2; // needs prefixOfMedian2 when n is even
+//  private long maxWithPrefix1, minWithPrefix2; // when two medians divided
+//  private EclipseCollectionsHashMapForQuantile hashMap;
+//  private GSHashMapForStat statHashMap;
+//  private boolean usingStatistics = false;
+//
+//  private boolean hasFinalResult;
+//  private boolean hasTwoDividedMedians;
+//
+//  private long maskOfPrefix() {
+//    return bitsCounted == 0 ? (0) : (((1L << bitsCounted) - 1) << (bitsOfDataType - bitsCounted));
+//  }
+//
+//  private int getBitsOfDataType() {
+//    switch (seriesDataType) {
+//      case INT32:
+//      case FLOAT:
+//        return 32;
+//      case INT64:
+//      case DOUBLE:
+//        return 64;
+//      default:
+//        throw new UnSupportedDataTypeException(
+//            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+//    }
+//  }
+//
+//  private boolean hasTwoMedians() {
+//    return (cnt & 1) == 0;
+//  }
+//
+//  private boolean hasTwoDividedMedians() {
+//    return hasTwoMedians() && prefixOfMedian1 != prefixOfMedian2;
+//  }
+//
+//  public BitsBucketStatMedianAggrResult(TSDataType seriesDataType)
+//      throws UnSupportedDataTypeException {
+//    super(DOUBLE, AggregationType.EXACT_MEDIAN_BITS_BUCKET_STAT);
+//    this.seriesDataType = seriesDataType;
+//    reset();
+//  }
+//
+//  // turn FLOAT/INT32 to unsigned long keeping relative order
+//  private long dataToLongBits(Object data) throws UnSupportedDataTypeException {
+//    long longBits;
+//    switch (seriesDataType) {
+//      case INT32:
+//        return (int) data + (1L << 31);
+//      case FLOAT:
+//        longBits = Float.floatToIntBits((float) data) + (1L << 31);
+//        return (float) data >= 0f ? longBits : longBits ^ 0x7F800000L;
+//      case INT64:
+//        return (long) data + (1L << 63);
+//      case DOUBLE:
+//        longBits = Double.doubleToLongBits((double) data) + (1L << 63);
+//        return (double) data >= 0d ? longBits : longBits ^ 0x7FF0000000000000L;
+//      default:
+//        throw new UnSupportedDataTypeException(
+//            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+//    }
+//  }
+//
+//  private double longBitsToResult(long longBits) throws UnSupportedDataTypeException {
+//    switch (seriesDataType) {
+//      case INT32:
+//        return (double) (longBits - (1L << 31));
+//      case FLOAT:
+//        longBits = (longBits >>> 31) > 0 ? longBits : longBits ^ 0x7F800000L;
+//        return Float.intBitsToFloat((int) (longBits - (1L << 31)));
+//      case INT64:
+//        return (double) (longBits - (1L << 63));
+//      case DOUBLE:
+//        longBits = (longBits >>> 63) > 0 ? longBits : longBits ^ 0x7FF0000000000000L;
+//        return Double.longBitsToDouble(longBits - (1L << 63));
+//      default:
+//        throw new UnSupportedDataTypeException(
+//            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+//    }
+//  }
+//
+//  private void updateStatusFromData(Object data, long times) {
+//    long longBits = dataToLongBits(data);
+//    if (bitsCounted > 0
+//        && (longBits & maskOfPrefix()) != prefixOfMedian1
+//        && (longBits & maskOfPrefix()) != prefixOfMedian2) return;
+//    if (bitsCounted == 0) cnt += times;
+//    if (usingStatistics) {
+//      statHashMap.insertLongBits(64, longBits, times);
+//      return;
+//    }
+//    if (hasTwoDividedMedians) {
+//      if ((longBits & maskOfPrefix()) == prefixOfMedian1)
+//        maxWithPrefix1 = Math.max(maxWithPrefix1, longBits);
+//      else minWithPrefix2 = Math.min(minWithPrefix2, longBits);
+//      return;
+//    }
+//    //    if(hashMap.getHashMapSize()==0)
+//    //      System.out.println("???!!!!");
+//    //    if(bitsConcerned==2)
+//    //      System.out.println("\t\t\t"+(long)data+"    "+(longBits & maskConcerned));
+//    long dataConcerned = longBits & maskConcerned;
+//    hashMap.insert(dataConcerned, times);
+//  }
+//
+//  @Override
+//  public void startIteration() {
+//    bitsConcerned = bitsOfDataType - bitsCounted;
+//    maskConcerned = bitsConcerned == 64 ? -1L : ((1L << bitsConcerned) - 1);
+//
+//    hasTwoDividedMedians = hasTwoDividedMedians();
+//    //    System.out.println("[DEBUG]:startIteration value:"+getValue()
+//    //        + " bitsCounted:"+bitsCounted+" prefix1,2:"+prefixOfMedian1+" "+prefixOfMedian2
+//    //        + " divided:"+hasTwoDividedMedians
+//    //        + " K1,2:" + K1+" "+K2);
+//    //    System.out.println("[DEBUG]\t\t result1:"+longBitsToResult(prefixOfMedian1));
+//    if (hasTwoDividedMedians) {
+//      maxWithPrefix1 = Long.MIN_VALUE;
+//      minWithPrefix2 = Long.MAX_VALUE;
+//      return;
+//    }
+//    if (bitsCounted == 0) { // first iteration
+//      if (bitsOfDataType == 32)
+//        hashMap = new EclipseCollectionsHashMapForQuantile(bitsOfDataType, 31);
+//      else hashMap = new EclipseCollectionsHashMapForQuantile(bitsOfDataType, 16);
+//    } else {
+//      if (bitsConcerned <= 16) hashMap.reset(bitsConcerned, 16);
+//      else if (bitsConcerned <= 32) hashMap.reset(bitsConcerned, bitsConcerned - 1);
+//      else hashMap.reset(bitsConcerned, 16);
+//    }
+//  }
+//
+//  @Override
+//  public void finishIteration() {
+//    System.out.println(
+//        "[BitsBucketStat DEBUG] finishIteration hashMapBits:"
+//            + hashMap.getRemainingBits()
+//            + "   bitsCounted:"
+//            + bitsCounted
+//            + "??"
+//            + usingStatistics);
+//    if (cnt == 0) {
+//      hasFinalResult = true;
+//      return;
+//    }
+//    if (hasTwoDividedMedians) {
+//      //            System.out.println("[DEBUG]hasTwoDividedMedians");
+//      setDoubleValue(0.5 * (longBitsToResult(maxWithPrefix1) + longBitsToResult(minWithPrefix2)));
+//      hasFinalResult = true;
+//      return;
+//    }
+//    if (bitsCounted == 0) {
+//      K1 = (cnt + 1) >> 1;
+//      K2 = hasTwoMedians() ? (K1 + 1) : K1;
+//    }
+//    if (usingStatistics) {
+//      hashMap.reset(statHashMap.remainingBits, 0);
+//      long tmpTOT = 0;
+//      for (LongLongPair p : statHashMap.getKeyValuesView()) {
+//        hashMap.insert(p.getOne(), p.getTwo());
+//        tmpTOT += p.getTwo();
+//      }
+//      //      System.out.println("\t\t[DEBUG] BitsBucketStat finishIteration  stat size:"
+//      //          +hashMap.getHashMapSize()+"  remainingBits:"+hashMap.getRemainingBits()+"
+//      // TOT:"+tmpTOT);
+//    }
+//    List<Long> iterationResult = hashMap.findResultIndex(K1, K2);
+//    int bitsCountedInIteration = Math.min(bitsConcerned, hashMap.getRemainingBits());
+//    prefixOfMedian1 |= iterationResult.get(0) << (bitsConcerned - bitsCountedInIteration);
+//    K1 -= iterationResult.get(1);
+//    prefixOfMedian2 |= iterationResult.get(2) << (bitsConcerned - bitsCountedInIteration);
+//    K2 -= iterationResult.get(3);
+//    bitsCounted += bitsCountedInIteration;
+//    if (bitsCounted == bitsOfDataType) {
+//      if (!hasTwoMedians()) setDoubleValue(longBitsToResult(prefixOfMedian1));
+//      else
+//        setDoubleValue(
+//            0.5 * (longBitsToResult(prefixOfMedian1) + longBitsToResult(prefixOfMedian2)));
+//      hasFinalResult = true;
+//      //      System.out.println("[opt_4 DEBUG] calc over  answer:" + getDoubleValue());
+//    }
+//    if (usingStatistics) {
+//      usingStatistics = false;
+//      statHashMap = null;
+//    }
+//    //    System.out.println("\t\t[BitsBucketStat]" + this.hashCode()
+//    //        + "  finishIteration " + bitsCounted + " " + bitsOfDataType);
+//    //    System.out.println(
+//    //        "K1: "
+//    //            + K1
+//    //            + " K2: "
+//    //            + K2
+//    //            + "    cnt:"
+//    //            + cnt
+//    //            + "|| prefixOfMedian1:"
+//    //            + prefixOfMedian1
+//    //            + "  prefixOfMedian2:"
+//    //            + prefixOfMedian2);
+//  }
+//
+//  @Override
+//  protected boolean hasCandidateResult() {
+//    return hasFinalResult && cnt > 0;
+//  }
+//
+//  @Override
+//  public Double getResult() {
+//    return hasCandidateResult() ? getDoubleValue() : null;
+//  }
+//
+//  @Override
+//  public void updateResultFromStatistics(Statistics statistics) {
+//    switch (statistics.getType()) {
+//      case INT32:
+//      case INT64:
+//      case FLOAT:
+//      case DOUBLE:
+//        break;
+//      case TEXT:
+//      case BOOLEAN:
+//      default:
+//        throw new UnSupportedDataTypeException(
+//            String.format(
+//                "Unsupported data type in aggregation MEDIAN : %s", statistics.getType()));
+//    }
+//    cnt += statistics.getCount();
+//    if (statistics.getType() == DOUBLE) {
+//      DoubleStatistics doubleStat = (DoubleStatistics) statistics;
+//      //    System.out.println("\t\t????"+doubleStat.getGSHashMap().getHashMapSize()+"
+//      // "+doubleStat.hashCode()
+//      //        +"  time:"+doubleStat.getStartTime()+"...."+doubleStat.getEndTime());
+//      //    System.out.println("\t\t????"+doubleStat.getGSHashMap().getRemainingBits()+"
+//      // self:"+statHashMap.getRemainingBits());
+//      statHashMap.merge(doubleStat.getGSHashMap());
+//    }
+//    if (statistics.getType() == INT64) {
+//      LongStatistics longStat = (LongStatistics) statistics;
+//      statHashMap.merge(longStat.getGSHashMap());
+//    }
+//    //    System.out.println("\t\t???!"+statHashMap.getHashMapSize());
+//    //    Comparable<Object> minVal = (Comparable<Object>) statistics.getMinValue();
+//    //    Comparable<Object> maxVal = (Comparable<Object>) statistics.getMaxValue();
+//    //    if (minVal.compareTo(maxVal) == 0) {
+//    //      updateStatusFromData(minVal, statistics.getCount());
+//    //    } /*else
+//    //      throw new QueryProcessException("Failed to update median aggregation result from
+//    // statistics.");*/
+//  }
+//
+//  @Override
+//  public void updateResultFromPageData(IBatchDataIterator batchIterator) {
+//    updateResultFromPageData(batchIterator, Long.MIN_VALUE, Long.MAX_VALUE);
+//  }
+//
+//  @Override
+//  public void updateResultFromPageData(
+//      IBatchDataIterator batchIterator, long minBound, long maxBound) {
+//    while (batchIterator.hasNext()) {
+//      if (batchIterator.currentTime() >= maxBound || batchIterator.currentTime() < minBound) {
+//        break;
+//      }
+//      updateStatusFromData(batchIterator.currentValue(), 1);
+//      batchIterator.next();
+//    }
+//  }
+//
+//  @Override
+//  public void updateResultUsingTimestamps(
+//      long[] timestamps, int length, IReaderByTimestamp dataReader) throws IOException {
+//    Object[] values = dataReader.getValuesInTimestamps(timestamps, length);
+//    for (int i = 0; i < length; i++) {
+//      if (values[i] != null) {
+//        updateStatusFromData(values[i], 1);
+//      }
+//    }
+//  }
+//
+//  @Override
+//  public void updateResultUsingValues(long[] timestamps, int length, ValueIterator valueIterator)
+// {
+//    //    List<Object> tmp = new ArrayList<>();
+//    while (valueIterator.hasNext()) {
+//      updateStatusFromData(valueIterator.next(), 1);
+//      //      Object tmpObj = valueIterator.next();
+//      //      updateStatusFromData(tmpObj, 1);
+//      //      tmp.add(tmpObj);
+//    }
+//    //
+//    //
+// System.out.println("\t\t[MEDIAN]"+this.hashCode()+"[updateResultUsingValues]"+tmp.toString());
+//  }
+//
+//  @Override
+//  public int maxIteration() {
+//    return bitsOfDataType / 16 + 1;
+//  }
+//
+//  @Override
+//  public boolean hasFinalResult() {
+//    return hasFinalResult;
+//  }
+//
+//  @Override
+//  public void merge(AggregateResult another) {
+//    //    System.out.println("[DEBUG] [merge] " + this.getResult() + "  " + another.getResult());
+//    // merge not supported
+//    //        throw new QueryProcessException("Can't merge MedianAggregateResult");
+//  }
+//
+//  @Override
+//  protected void deserializeSpecificFields(ByteBuffer buffer) {
+//    this.seriesDataType = TSDataType.deserialize(buffer.get());
+//    this.cnt = buffer.getLong();
+//    this.bitsCounted = buffer.getInt();
+//    this.prefixOfMedian1 = buffer.getLong();
+//    this.prefixOfMedian2 = buffer.getLong();
+//    // TODO
+//  }
+//
+//  @Override
+//  protected void serializeSpecificFields(OutputStream outputStream) throws IOException {
+//    ReadWriteIOUtils.write(seriesDataType, outputStream);
+//    ReadWriteIOUtils.write(cnt, outputStream);
+//    ReadWriteIOUtils.write(bitsCounted, outputStream);
+//    ReadWriteIOUtils.write(prefixOfMedian1, outputStream);
+//    ReadWriteIOUtils.write(prefixOfMedian2, outputStream);
+//    // TODO
+//  }
+//
+//  public long getCnt() {
+//    return cnt;
+//  }
+//
+//  @Override
+//  public void reset() {
+//    super.reset();
+//    cnt = 0;
+//    bitsCounted = 0;
+//    bitsOfDataType = getBitsOfDataType();
+//    prefixOfMedian1 = prefixOfMedian2 = 0;
+//    hasTwoDividedMedians = false;
+//    hasFinalResult = false;
+//    statHashMap = null;
+//    usingStatistics = false;
+//  }
+//
+//  @Override
+//  public boolean canUpdateFromStatistics(Statistics statistics) {
+//    if ((seriesDataType == DOUBLE) && bitsCounted == 0) {
+//      DoubleStatistics doubleStats = (DoubleStatistics) statistics;
+//      if (doubleStats.hasSerializeHashMap()) {
+//        //        System.out.println("\t\t[DEBUG] bitsBucketStat trying to use stat with count + "
+//        //            + doubleStats.getCount());
+//        if (!usingStatistics) {
+//          statHashMap = new GSHashMapForStat((byte) 64, 65535);
+//          usingStatistics = true;
+//          if (hashMap.getHashMapSize() > 0) {
+//            for (LongLongPair p : hashMap.getKeyValuesView()) {
+//              statHashMap.remainingBits = (byte) hashMap.getRemainingBits();
+//              statHashMap.insertLongBits(hashMap.getRemainingBits(), p.getOne(), p.getTwo());
+//            }
+//          }
+//        }
+//        return true;
+//      }
+//    }
+//    if ((seriesDataType == TSDataType.INT64) && bitsCounted == 0) {
+//      LongStatistics longStats = (LongStatistics) statistics;
+//      if (longStats.hasSerializeHashMap()) {
+//        if (!usingStatistics) {
+//          statHashMap = new GSHashMapForStat((byte) 64, 65535);
+//          usingStatistics = true;
+//          if (hashMap.getHashMapSize() > 0) {
+//            for (LongLongPair p : hashMap.getKeyValuesView()) {
+//              statHashMap.remainingBits = (byte) hashMap.getRemainingBits();
+//              statHashMap.insertLongBits(hashMap.getRemainingBits(), p.getOne(), p.getTwo());
+//            }
+//          }
+//        }
+//        return true;
+//      }
+//    }
+//    return false;
+//    //    Comparable<Object> minVal = (Comparable<Object>) statistics.getMinValue();
+//    //    Comparable<Object> maxVal = (Comparable<Object>) statistics.getMaxValue();
+//    //    return (minVal.compareTo(maxVal) == 0);
+//  }
+//
+//  @Override
+//  public boolean groupByLevelBeforeAggregation() {
+//    return true;
+//  }
+// }
diff --git a/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/CountAggrResult.java b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/CountAggrResult.java
index ec279729f9..f344d55a22 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/CountAggrResult.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/CountAggrResult.java
@@ -46,6 +46,7 @@ public class CountAggrResult extends AggregateResult {
 
   @Override
   public void updateResultFromStatistics(Statistics statistics) {
+    System.out.println("\t\t[DEBUG Count] update from statistics:" + statistics.getCount());
     setLongValue(getLongValue() + statistics.getCount());
   }
 
@@ -89,6 +90,7 @@ public class CountAggrResult extends AggregateResult {
       cnt++;
     }
     setLongValue(getLongValue() + cnt);
+    //    System.out.println("\t\t[COUNT][updateResultUsingValues]"+getResult());
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLDebugFullReadingAggrResult.java b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLDebugFullReadingAggrResult.java
new file mode 100644
index 0000000000..1376701a8d
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLDebugFullReadingAggrResult.java
@@ -0,0 +1,356 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.query.aggregation.impl;
+
+import org.apache.iotdb.db.query.aggregation.AggregateResult;
+import org.apache.iotdb.db.query.aggregation.AggregationType;
+import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp;
+import org.apache.iotdb.db.utils.ValueIterator;
+import org.apache.iotdb.db.utils.quantiles.KLLDoublesForMedian;
+import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.file.metadata.statistics.Statistics;
+import org.apache.iotdb.tsfile.read.common.IBatchDataIterator;
+import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.List;
+
+import static org.apache.iotdb.tsfile.file.metadata.statistics.Statistics.STATISTICS_PAGE_MAXSIZE;
+
+public class KLLDebugFullReadingAggrResult extends AggregateResult {
+  private final long deltaForUnsignedCompare = 1L << 63;
+  private TSDataType seriesDataType;
+  private long n, K1, K2;
+  private int bitsOfDataType, iteration; // bitsOfDataType == bitsCounted + bitsConcerned
+  private double L, R, lastL;
+  private KLLDoublesForMedian worker;
+  long FULL_READ = 0, DEMAND_READ = 0;
+  boolean FULL_READING_PAGE = false;
+
+  private boolean hasFinalResult;
+
+  private int getBitsOfDataType() {
+    switch (seriesDataType) {
+      case INT32:
+      case FLOAT:
+        return 32;
+      case INT64:
+      case DOUBLE:
+        return 64;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private boolean hasTwoMedians() {
+    return (n & 1) == 0;
+  }
+
+  public KLLDebugFullReadingAggrResult(TSDataType seriesDataType)
+      throws UnSupportedDataTypeException {
+    super(TSDataType.DOUBLE, AggregationType.EXACT_MEDIAN_KLL_DEBUG_FULL_READING);
+    this.seriesDataType = seriesDataType;
+    reset();
+  }
+
+  // turn FLOAT/INT32 to unsigned long keeping relative order
+  private long dataToLongBits(Object data) throws UnSupportedDataTypeException {
+    long longBits;
+    switch (seriesDataType) {
+      case INT32:
+        return (int) data + (1L << 31);
+      case FLOAT:
+        longBits = Float.floatToIntBits((float) data) + (1L << 31);
+        return (float) data >= 0f ? longBits : longBits ^ 0x7F800000L;
+      case INT64:
+        return (long) data + (1L << 63);
+      case DOUBLE:
+        longBits = Double.doubleToLongBits((double) data) + (1L << 63);
+        return (double) data >= 0d ? longBits : longBits ^ 0x7FF0000000000000L;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private double dataToDouble(Object data) throws UnSupportedDataTypeException {
+    switch (seriesDataType) {
+      case INT32:
+        return (int) data;
+      case FLOAT:
+        return (float) data;
+      case INT64:
+        return (long) data;
+      case DOUBLE:
+        return (double) data;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private double longBitsToResult(long longBits) throws UnSupportedDataTypeException {
+    switch (seriesDataType) {
+      case INT32:
+        return (double) (longBits - (1L << 31));
+      case FLOAT:
+        longBits = (longBits >>> 31) > 0 ? longBits : longBits ^ 0x7F800000L;
+        return Float.intBitsToFloat((int) (longBits - (1L << 31)));
+      case INT64:
+        return (double) (longBits - (1L << 63));
+      case DOUBLE:
+        longBits = (longBits >>> 63) > 0 ? longBits : longBits ^ 0x7FF0000000000000L;
+        return Double.longBitsToDouble(longBits - (1L << 63));
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  //  private void updateStatusFromData(Object data, long times) {
+  //    long signedLongBits = dataToLongBits(data) ^ deltaForUnsignedCompare;
+  //    if (iteration == 0) n += times;
+  //    if (hasTwoDividedMedians) {
+  //      if (L1 <= signedLongBits && signedLongBits <= R1)
+  //        maxInInterval1 = Math.max(maxInInterval1, signedLongBits);
+  //
+  //      if (L2 <= signedLongBits && signedLongBits <= R2)
+  //        minInInterval2 = Math.min(minInInterval2, signedLongBits);
+  //      return;
+  //    }
+  //    if (lastL1 <= signedLongBits && signedLongBits < L1) K1 -= times;
+  //    else if (L1 <= signedLongBits && signedLongBits <= R1) worker.add(signedLongBits, times);
+  //  }
+
+  private void updateStatusFromData(Object data) {
+    FULL_READ++;
+    double dataF = dataToDouble(data);
+    if (iteration == 0) n++;
+    if (lastL <= dataF && dataF < L) K1--;
+    else if (L <= dataF && dataF <= R) worker.add(dataF);
+  }
+
+  @Override
+  public void startIteration() {
+    if (iteration == 0) {
+      worker = new KLLDoublesForMedian();
+    } else {
+      worker.reset();
+    }
+  }
+
+  @Override
+  public void finishIteration() {
+    iteration++;
+    if (n == 0) {
+      hasFinalResult = true;
+      return;
+    }
+    lastL = L;
+    if (iteration == 1) { // first iteration over
+      K1 = (n + 1) >> 1;
+    }
+    K2 = hasTwoMedians() ? (K1 + 1) : K1;
+    if (worker.isExactResult()) {
+      //      System.out.println("[kll floats DEBUG] "+worker.sketch.getN()+" ???
+      // "+worker.sketch.getK());
+      //      System.out.println("[kll floats DEBUG] "+"!!! "+n);
+      double v1 = worker.sketch.getQuantile(1.0 * (K1 - 1) / worker.sketch.getN());
+      double v2 = worker.sketch.getQuantile(1.0 * (K2 - 1) / worker.sketch.getN());
+      setDoubleValue(FULL_READ);
+      hasFinalResult = true;
+      System.out.println("[kll floats DEBUG] calc over  answer:" + getDoubleValue());
+      return;
+    }
+    List<Double> iterationResult = worker.findResultRange(K1, K2);
+    L = iterationResult.get(0);
+    R = iterationResult.get(1);
+    System.out.println(
+        "[kll floats DEBUG] "
+            + L
+            + "~"
+            + R
+            + "  K1,K2:"
+            + K1
+            + ","
+            + K2
+            + " cntN:"
+            + worker.sketch.getN()
+            + " eps:"
+            + worker.sketch.getNormalizedRankError(false));
+    if (L == R) {
+      setDoubleValue(FULL_READ);
+      hasFinalResult = true;
+    }
+  }
+
+  @Override
+  protected boolean hasCandidateResult() {
+    return hasFinalResult && n > 0;
+  }
+
+  @Override
+  public Double getResult() {
+    return hasCandidateResult() ? getDoubleValue() : null;
+  }
+
+  @Override
+  public void updateResultFromStatistics(Statistics statistics) {
+    switch (statistics.getType()) {
+      case INT32:
+      case INT64:
+      case FLOAT:
+      case DOUBLE:
+        break;
+      case TEXT:
+      case BOOLEAN:
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format(
+                "Unsupported data type in aggregation MEDIAN : %s", statistics.getType()));
+    }
+    double minVal = dataToDouble(statistics.getMinValue());
+    double maxVal = dataToDouble(statistics.getMaxValue());
+    if (maxVal < L) {
+      K1 -= statistics.getCount();
+    } else if (minVal > R) return;
+    if (minVal == maxVal) {
+      for (long i = statistics.getCount(); i > 0; i--) updateStatusFromData(minVal);
+    } /*else
+      throw new QueryProcessException("Failed to update median aggregation result from statistics.");*/
+  }
+
+  @Override
+  public void updateResultFromPageData(IBatchDataIterator batchIterator) {
+    updateResultFromPageData(batchIterator, Long.MIN_VALUE, Long.MAX_VALUE);
+  }
+
+  @Override
+  public void updateResultFromPageData(
+      IBatchDataIterator batchIterator, long minBound, long maxBound) {
+    while (batchIterator.hasNext()) {
+      if (batchIterator.currentTime() >= maxBound || batchIterator.currentTime() < minBound) {
+        break;
+      }
+      updateStatusFromData(batchIterator.currentValue());
+      batchIterator.next();
+    }
+  }
+
+  @Override
+  public void updateResultUsingTimestamps(
+      long[] timestamps, int length, IReaderByTimestamp dataReader) throws IOException {
+    Object[] values = dataReader.getValuesInTimestamps(timestamps, length);
+    for (int i = 0; i < length; i++) {
+      if (values[i] != null) {
+        updateStatusFromData(values[i]);
+      }
+    }
+  }
+
+  @Override
+  public void updateResultUsingValues(long[] timestamps, int length, ValueIterator valueIterator) {
+    //    List<Object> tmp = new ArrayList<>();
+    while (valueIterator.hasNext()) {
+      updateStatusFromData(valueIterator.next());
+      //      Object tmpObj = valueIterator.next();
+      //      updateStatusFromData(tmpObj);
+      //      tmp.add(tmpObj);
+    }
+    //
+    // System.out.println("\t\t[MEDIAN]"+this.hashCode()+"[updateResultUsingValues]"+tmp.toString());
+  }
+
+  @Override
+  public int maxIteration() {
+    return bitsOfDataType / 16;
+  }
+
+  @Override
+  public boolean hasFinalResult() {
+    return hasFinalResult;
+  }
+
+  @Override
+  public void merge(AggregateResult another) {
+    //    System.out.println("[DEBUG] [merge] " + this.getResult() + "  " + another.getResult());
+    // merge not supported
+    //        throw new QueryProcessException("Can't merge MedianAggregateResult");
+  }
+
+  @Override
+  protected void deserializeSpecificFields(ByteBuffer buffer) {
+    this.seriesDataType = TSDataType.deserialize(buffer.get());
+    this.n = buffer.getLong();
+    // TODO
+  }
+
+  @Override
+  protected void serializeSpecificFields(OutputStream outputStream) throws IOException {
+    ReadWriteIOUtils.write(seriesDataType, outputStream);
+    ReadWriteIOUtils.write(n, outputStream);
+    // TODO
+  }
+
+  public long getN() {
+    return n;
+  }
+
+  @Override
+  public void reset() {
+    super.reset();
+    n = 0;
+    bitsOfDataType = getBitsOfDataType();
+    hasFinalResult = false;
+    L = -Double.MAX_VALUE;
+    R = Double.MAX_VALUE;
+    iteration = 0;
+    FULL_READ = 0;
+    FULL_READING_PAGE = false;
+    DEMAND_READ = 0;
+  }
+
+  @Override
+  public boolean canUpdateFromStatistics(Statistics statistics) {
+    FULL_READING_PAGE = false;
+    //    Comparable<Object> minVal = (Comparable<Object>) statistics.getMinValue();
+    //    Comparable<Object> maxVal = (Comparable<Object>) statistics.getMaxValue();
+    double minVal = dataToDouble(statistics.getMinValue());
+    double maxVal = dataToDouble(statistics.getMaxValue());
+    if (minVal == maxVal) return true;
+    if (maxVal < L) {
+      return true;
+    } else if (minVal > R) return true;
+
+    if (statistics.getCount() <= STATISTICS_PAGE_MAXSIZE) {
+      FULL_READING_PAGE = true;
+    }
+    return false;
+  }
+
+  @Override
+  public boolean groupByLevelBeforeAggregation() {
+    return true;
+  }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLDebugResult.java b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLDebugResult.java
new file mode 100644
index 0000000000..a5eb166a97
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLDebugResult.java
@@ -0,0 +1,484 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.query.aggregation.impl;
+
+import org.apache.iotdb.db.query.aggregation.AggregateResult;
+import org.apache.iotdb.db.query.aggregation.AggregationType;
+import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp;
+import org.apache.iotdb.db.utils.ValueIterator;
+import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.file.metadata.statistics.DoubleStatistics;
+import org.apache.iotdb.tsfile.file.metadata.statistics.Statistics;
+import org.apache.iotdb.tsfile.read.common.IBatchDataIterator;
+import org.apache.iotdb.tsfile.utils.HeapLongKLLSketch;
+import org.apache.iotdb.tsfile.utils.KLLSketchForQuantile;
+import org.apache.iotdb.tsfile.utils.LongKLLSketch;
+import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.apache.iotdb.tsfile.file.metadata.enums.TSDataType.DOUBLE;
+import static org.apache.iotdb.tsfile.file.metadata.statistics.Statistics.SYNOPSIS_SIZE_IN_BYTE;
+
+public class KLLDebugResult extends AggregateResult {
+  private TSDataType seriesDataType;
+  private int iteration;
+  private long pageKLLNum;
+  private long cntL, cntR, lastL;
+  private long n, K1, heapN;
+  private HeapLongKLLSketch heapKLL;
+  private boolean hasFinalResult;
+  private final int maxMemoryByte = 1 << 19; // half of the limit
+  private final int pageAvgError = 50, pageMaxError = 127;
+  private final int pageKLLMemoryByte = (68 + 15) * 8,
+      pageKLLNumMemoryByte = SYNOPSIS_SIZE_IN_BYTE * Long.BYTES;
+  //  private List<HeapLongKLLSketch> pageKLL;
+  private List<KLLSketchForQuantile> pageKLL;
+  private int pageKLLIndex;
+  private final int pageKLLMaxSize = (int) Math.floor((1.0 * maxMemoryByte / pageKLLMemoryByte));
+  long DEBUG = 0;
+
+  private int getBitsOfDataType() {
+    switch (seriesDataType) {
+      case INT32:
+      case FLOAT:
+        return 32;
+      case INT64:
+      case DOUBLE:
+        return 64;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private long approximateDataAvgError() {
+    long dataAvgError = (long) Math.ceil(2.0 * heapN / heapKLL.getMaxMemoryNum()) + 1;
+    return dataAvgError;
+  }
+
+  private long approximateStatAvgError() {
+    double rate = 1.0 * pageKLLNumMemoryByte * pageKLLNum / maxMemoryByte;
+    long pageStatAvgError;
+    if (rate < 1.0) {
+      pageStatAvgError = (long) Math.ceil(pageAvgError * Math.pow(pageKLLNum, 0.5));
+      if (pageKLLNum <= 10) pageStatAvgError += pageMaxError;
+    } else {
+      int memKLLNum = maxMemoryByte / pageKLLNumMemoryByte;
+      long memErr = (long) Math.ceil(pageAvgError * Math.pow(memKLLNum, 0.5));
+      pageStatAvgError = (long) Math.ceil(rate * 0.5 * memErr + 0.5 * memErr);
+    }
+    return pageStatAvgError;
+  }
+
+  private long approximateMaxError() {
+    return 0;
+  }
+
+  private boolean hasTwoMedians() {
+    return (n & 1) == 0;
+  }
+
+  public KLLDebugResult(TSDataType seriesDataType) throws UnSupportedDataTypeException {
+    super(DOUBLE, AggregationType.EXACT_MEDIAN_KLL_DEBUG);
+    this.seriesDataType = seriesDataType;
+    reset();
+  }
+
+  private long dataToLong(Object data) throws UnSupportedDataTypeException {
+    long result;
+    switch (seriesDataType) {
+      case INT32:
+        return (int) data;
+      case FLOAT:
+        result = Float.floatToIntBits((float) data);
+        return (float) data >= 0f ? result : result ^ Long.MAX_VALUE;
+      case INT64:
+        return (long) data;
+      case DOUBLE:
+        result = Double.doubleToLongBits((double) data);
+        return (double) data >= 0d ? result : result ^ Long.MAX_VALUE;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private double longToResult(long result) throws UnSupportedDataTypeException {
+    switch (seriesDataType) {
+      case INT32:
+        return (double) (result);
+      case FLOAT:
+        result = (result >>> 31) == 0 ? result : result ^ Long.MAX_VALUE;
+        return Float.intBitsToFloat((int) (result));
+      case INT64:
+        return (double) (result);
+      case DOUBLE:
+        result = (result >>> 63) == 0 ? result : result ^ Long.MAX_VALUE;
+        return Double.longBitsToDouble(result);
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private void updateStatusFromData(Object data) {
+    long dataL = dataToLong(data);
+    if (iteration == 0) n++;
+    if (cntL <= dataL && dataL <= cntR) {
+      heapKLL.update(dataL);
+      heapN++;
+    } else if (lastL <= dataL && dataL < cntL) K1--;
+  }
+
+  private long getRankInKLL(long val) {
+    //    long rank = 0;
+    //    if (pageKLL != null) {
+    //      for (HeapLongKLLSketch heapLongKLLSketch : pageKLL)
+    //        if (heapLongKLLSketch != null) rank += heapLongKLLSketch.getApproxRank(val);
+    //    }
+    //    rank += heapKLL.getApproxRank(val);
+    //    return rank;
+    return heapKLL.getApproxRank(val);
+  }
+
+  public long findMaxValueWithRankLE(long K) {
+    long L = Long.MIN_VALUE, R = Long.MAX_VALUE, mid;
+    while (L < R) {
+      mid = L + ((R - L) >>> 1);
+      if (mid == L) mid++;
+      //      System.out.println(
+      //          "\t\t\t" + L + "\t" + R + "\t\t\tmid:" + mid + "\trank:" + getRankInKLL(mid));
+      if (getRankInKLL(mid) <= K) L = mid;
+      else R = mid - 1;
+      //      System.out.println("\t mid:"+mid+"  mid_rank:"+getRankInKLL(mid));
+    }
+    return L;
+  }
+
+  public long findMinValueWithRankGE(long K) {
+    long L = Long.MIN_VALUE, R = Long.MAX_VALUE, mid;
+    while (L < R) {
+      mid = L + ((R - L) >>> 1);
+      if (mid == R) mid--;
+      //      System.out.println(
+      //          "\t\t\t" + L + "\t" + R + "\t\t\tmid:" + mid + "\trank:" + getRankInKLL(mid));
+      if (getRankInKLL(mid) >= K) R = mid;
+      else L = mid + 1;
+    }
+    return L;
+  }
+
+  @Override
+  public void startIteration() {
+    heapN = 0;
+    if (iteration == 0) { // first iteration
+      heapKLL = new HeapLongKLLSketch(maxMemoryByte);
+      lastL = cntL = Long.MIN_VALUE;
+      cntR = Long.MAX_VALUE;
+      n = 0;
+      pageKLLNum = 0;
+      pageKLL = new ArrayList<>(pageKLLMaxSize);
+      for (int i = 0; i < pageKLLMaxSize; i++) pageKLL.add(null);
+      pageKLLIndex = 0;
+    } else {
+      heapKLL = new HeapLongKLLSketch(maxMemoryByte);
+      pageKLLNum = 0;
+      pageKLL = null;
+      System.out.println(
+          "\t[KLL STAT DEBUG] start iteration "
+              + iteration
+              + " cntL,R:"
+              + "["
+              + cntL
+              + ","
+              + cntR
+              + "]"
+              + "\tlastL:"
+              + lastL
+              + "\tK1:"
+              + K1);
+    }
+  }
+
+  @Override
+  public void finishIteration() {
+    System.out.println(
+        "\t[KLL STAT DEBUG]"
+            + "finish iteration "
+            + iteration
+            + " cntL,R:"
+            + "["
+            + cntL
+            + ","
+            + cntR
+            + "]"
+            + "\tlastL:"
+            + lastL
+            + "\tK1:"
+            + K1);
+    System.out.println(
+        "\t[KLL STAT DEBUG]" + " statNum:" + pageKLLNum + " heapN:" + heapKLL.getN());
+    iteration++;
+    if (n == 0) {
+      hasFinalResult = true;
+      return;
+    }
+    lastL = cntL;
+
+    if (iteration == 1) { // first iteration over
+      K1 = (n + 1) >> 1;
+    }
+    long K2 = hasTwoMedians() ? (K1 + 1) : K1;
+
+    System.out.println("\t[KLL STAT DEBUG]" + " K1,K2:" + K1 + ", " + K2);
+    if (pageKLLNum == 0) { // all in heapKLL
+      System.out.println("\t[KLL STAT DEBUG]" + " calc by heap only. N:" + heapKLL.getN());
+      heapKLL.show();
+
+      double v1 = longToResult(heapKLL.findMinValueWithRank(K1 - 1));
+      //      System.out.println("\t[KLL STAT DEBUG]" + "v1:" + v1);
+      double v2 = longToResult(heapKLL.findMinValueWithRank(K2 - 1));
+      double ans = 0.5 * (v1 + v2);
+      setDoubleValue(ans);
+      hasFinalResult = true;
+      return;
+    }
+    // iteration = 0 && there are page KLL statistics
+    //    heapKLL.show();
+    //    System.out.println("\t[KLL STAT DEBUG] remaining pageKLLSize:" + pageKLLIndex);
+    mergePageKLL();
+    heapKLL.show();
+    System.out.println("\t[KLL STAT DEBUG] heapN:" + heapKLL.getN() + "\tn_true:" + n);
+    double v1 = longToResult(heapKLL.findMinValueWithRank(K1 - 1));
+    double v2 = longToResult(heapKLL.findMinValueWithRank(K2 - 1));
+    double ans = 0.5 * (v1 + v2);
+    setDoubleValue(ans);
+    hasFinalResult = true;
+  }
+
+  @Override
+  protected boolean hasCandidateResult() {
+    return hasFinalResult && n > 0;
+  }
+
+  @Override
+  public Double getResult() {
+    return hasCandidateResult() ? getDoubleValue() : null;
+  }
+
+  //  private void addSketch(KLLSketchForQuantile sketch, List<HeapLongKLLSketch> a, int baseByte) {
+  //    int pos0 = 0;
+  //    while (pos0 < pageKLLMaxLen && a.get(pos0) != null) pos0++;
+  //    HeapLongKLLSketch bigger_sketch = new HeapLongKLLSketch(baseByte << pos0);
+  //    bigger_sketch.mergeWithTempSpace(sketch);
+  //    for (int i = 0; i < pos0; i++) {
+  //      bigger_sketch.mergeWithTempSpace(a.get(i));
+  //      a.set(i, null);
+  //    }
+  //    if (pos0 == pageKLLMaxLen) { // mem of pageKLL list is too large.
+  //      heapKLL.mergeWithTempSpace(bigger_sketch);
+  //    } else a.set(pos0, bigger_sketch);
+  //  }
+  private void addSketch(KLLSketchForQuantile sketch) {
+    if (pageKLLIndex < pageKLLMaxSize) pageKLL.set(pageKLLIndex++, sketch);
+    else {
+      heapKLL.mergeWithTempSpace(pageKLL);
+      for (int i = 0; i < pageKLLMaxSize; i++) pageKLL.set(i, null);
+      pageKLLIndex = 0;
+      pageKLL.set(pageKLLIndex++, sketch);
+      //      System.out.println(
+      //          "\t[KLL STAT DEBUG]\theapKLL merge pageKLLList. newN: "
+      //              + heapKLL.getN()
+      //              + "   n_true:"
+      //              + n);
+      //      heapKLL.show();
+    }
+  }
+
+  private void mergePageKLL() {
+    HeapLongKLLSketch tmpSketch = heapKLL;
+    heapKLL = new HeapLongKLLSketch(maxMemoryByte * 2);
+    heapKLL.mergeWithTempSpace(tmpSketch);
+    heapKLL.mergeWithTempSpace(pageKLL);
+  }
+
+  @Override
+  public void updateResultFromStatistics(Statistics statistics) {
+    switch (statistics.getType()) {
+      case INT32:
+      case INT64:
+      case FLOAT:
+      case DOUBLE:
+        break;
+      case TEXT:
+      case BOOLEAN:
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format(
+                "Unsupported data type in aggregation MEDIAN : %s", statistics.getType()));
+    }
+    if (iteration == 0) {
+      n += statistics.getCount();
+      pageKLLNum++;
+      //      if (statistics.getType() == DOUBLE) {
+      //      }
+      if (statistics.getType() == DOUBLE) {
+        DoubleStatistics stat = (DoubleStatistics) statistics;
+        if (stat.getKllSketchNum() > 0) {
+          for (LongKLLSketch sketch : stat.getKllSketchList()) addSketch(sketch);
+          return;
+        } else System.out.println("\t\t\t\t!!!!!![ERROR!] no KLL in stat!");
+      }
+    }
+    long minVal = dataToLong(statistics.getMinValue());
+    long maxVal = dataToLong(statistics.getMaxValue());
+    System.out.println(
+        "\t[KLL STAT DEBUG] no KLL in stat. update from statistics:\t"
+            + "min,max:"
+            + minVal
+            + ","
+            + maxVal
+            + " n:"
+            + statistics.getCount());
+    // out of range
+    if (minVal > cntR || maxVal < lastL) return;
+    if (lastL <= minVal && maxVal < cntL) {
+      K1 -= statistics.getCount();
+      return;
+    }
+    if (minVal == maxVal) { // min == max
+      for (int i = 0; i < statistics.getCount(); i++) updateStatusFromData(minVal);
+      return;
+    }
+  }
+
+  @Override
+  public void updateResultFromPageData(IBatchDataIterator batchIterator) {
+    updateResultFromPageData(batchIterator, Long.MIN_VALUE, Long.MAX_VALUE);
+  }
+
+  @Override
+  public void updateResultFromPageData(
+      IBatchDataIterator batchIterator, long minBound, long maxBound) {
+    while (batchIterator.hasNext()) {
+      if (batchIterator.currentTime() >= maxBound || batchIterator.currentTime() < minBound) {
+        break;
+      }
+      updateStatusFromData(batchIterator.currentValue());
+      batchIterator.next();
+    }
+  }
+
+  @Override
+  public void updateResultUsingTimestamps(
+      long[] timestamps, int length, IReaderByTimestamp dataReader) throws IOException {
+    Object[] values = dataReader.getValuesInTimestamps(timestamps, length);
+    for (int i = 0; i < length; i++) {
+      if (values[i] != null) {
+        updateStatusFromData(values[i]);
+      }
+    }
+  }
+
+  @Override
+  public void updateResultUsingValues(long[] timestamps, int length, ValueIterator valueIterator) {
+    //    List<Object> tmp = new ArrayList<>();
+    while (valueIterator.hasNext()) {
+      updateStatusFromData(valueIterator.next());
+      //      Object tmpObj = valueIterator.next();
+      //      updateStatusFromData(tmpObj, 1);
+      //      tmp.add(tmpObj);
+    }
+    //
+    // System.out.println("\t\t[MEDIAN]"+this.hashCode()+"[updateResultUsingValues]"+tmp.toString());
+  }
+
+  @Override
+  public int maxIteration() {
+    return 1;
+  }
+
+  @Override
+  public boolean hasFinalResult() {
+    return hasFinalResult;
+  }
+
+  @Override
+  public void merge(AggregateResult another) {
+    //    System.out.println("[DEBUG] [merge] " + this.getResult() + "  " + another.getResult());
+    // merge not supported
+    //        throw new QueryProcessException("Can't merge MedianAggregateResult");
+  }
+
+  @Override
+  protected void deserializeSpecificFields(ByteBuffer buffer) {
+    this.seriesDataType = TSDataType.deserialize(buffer.get());
+    // TODO
+  }
+
+  @Override
+  protected void serializeSpecificFields(OutputStream outputStream) throws IOException {
+    ReadWriteIOUtils.write(seriesDataType, outputStream);
+    // TODO
+  }
+
+  public long getN() {
+    return n;
+  }
+
+  @Override
+  public void reset() {
+    super.reset();
+    heapKLL = null;
+    lastL = cntL = Long.MIN_VALUE;
+    cntR = Long.MAX_VALUE;
+    n = 0;
+    iteration = 0;
+    hasFinalResult = false;
+  }
+
+  @Override
+  public boolean canUpdateFromStatistics(Statistics statistics) {
+    if ((seriesDataType == DOUBLE) && iteration == 0) {
+      DoubleStatistics doubleStats = (DoubleStatistics) statistics;
+      if (doubleStats.getKllSketchNum() > 0) return true;
+    }
+    if (iteration > 0) {
+      long minVal = dataToLong(statistics.getMinValue());
+      long maxVal = dataToLong(statistics.getMaxValue());
+      if (minVal > cntR || maxVal < lastL) return true;
+      if (lastL <= minVal && maxVal < cntL) return true;
+    }
+    Comparable<Object> minVal = (Comparable<Object>) statistics.getMinValue();
+    Comparable<Object> maxVal = (Comparable<Object>) statistics.getMaxValue();
+    return (minVal.compareTo(maxVal) == 0); // min==max
+  }
+
+  @Override
+  public boolean groupByLevelBeforeAggregation() {
+    return true;
+  }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLFloatsMedianAggrResult.java b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLFloatsMedianAggrResult.java
new file mode 100644
index 0000000000..16e265f2cc
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLFloatsMedianAggrResult.java
@@ -0,0 +1,313 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.query.aggregation.impl;
+
+import org.apache.iotdb.db.query.aggregation.AggregateResult;
+import org.apache.iotdb.db.query.aggregation.AggregationType;
+import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp;
+import org.apache.iotdb.db.utils.ValueIterator;
+import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.file.metadata.statistics.Statistics;
+import org.apache.iotdb.tsfile.read.common.IBatchDataIterator;
+import org.apache.iotdb.tsfile.utils.HeapLongKLLSketch;
+import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+
+public class KLLFloatsMedianAggrResult extends AggregateResult {
+  private final long deltaForUnsignedCompare = 1L << 63;
+  private TSDataType seriesDataType;
+  private long n, K1, K2;
+  private int bitsOfDataType, iteration; // bitsOfDataType == bitsCounted + bitsConcerned
+  private long L, R, lastL;
+  private HeapLongKLLSketch worker;
+
+  private boolean hasFinalResult;
+
+  private int getBitsOfDataType() {
+    switch (seriesDataType) {
+      case INT32:
+      case FLOAT:
+        return 32;
+      case INT64:
+      case DOUBLE:
+        return 64;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private boolean hasTwoMedians() {
+    return (n & 1) == 0;
+  }
+
+  public KLLFloatsMedianAggrResult(TSDataType seriesDataType) throws UnSupportedDataTypeException {
+    super(TSDataType.DOUBLE, AggregationType.EXACT_MEDIAN_KLL_FLOATS);
+    this.seriesDataType = seriesDataType;
+    reset();
+  }
+
+  private long dataToLong(Object data) throws UnSupportedDataTypeException {
+    long result;
+    switch (seriesDataType) {
+      case INT32:
+        return (int) data;
+      case FLOAT:
+        result = Float.floatToIntBits((float) data);
+        return (float) data >= 0f ? result : result ^ Long.MAX_VALUE;
+      case INT64:
+        return (long) data;
+      case DOUBLE:
+        result = Double.doubleToLongBits((double) data);
+        return (double) data >= 0d ? result : result ^ Long.MAX_VALUE;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private double longToResult(long result) throws UnSupportedDataTypeException {
+    switch (seriesDataType) {
+      case INT32:
+        return (double) (result);
+      case FLOAT:
+        result = (result >>> 31) == 0 ? result : result ^ Long.MAX_VALUE;
+        return Float.intBitsToFloat((int) (result));
+      case INT64:
+        return (double) (result);
+      case DOUBLE:
+        result = (result >>> 63) == 0 ? result : result ^ Long.MAX_VALUE;
+        return Double.longBitsToDouble(result);
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  //  private void updateStatusFromData(Object data, long times) {
+  //    long signedLongBits = dataToLongBits(data) ^ deltaForUnsignedCompare;
+  //    if (iteration == 0) n += times;
+  //    if (hasTwoDividedMedians) {
+  //      if (L1 <= signedLongBits && signedLongBits <= R1)
+  //        maxInInterval1 = Math.max(maxInInterval1, signedLongBits);
+  //
+  //      if (L2 <= signedLongBits && signedLongBits <= R2)
+  //        minInInterval2 = Math.min(minInInterval2, signedLongBits);
+  //      return;
+  //    }
+  //    if (lastL1 <= signedLongBits && signedLongBits < L1) K1 -= times;
+  //    else if (L1 <= signedLongBits && signedLongBits <= R1) worker.add(signedLongBits, times);
+  //  }
+
+  private void updateStatusFromData(Object data) {
+    long dataL = dataToLong(data);
+    if (iteration == 0) n++;
+    if (lastL <= dataL && dataL < L) K1--;
+    else if (L <= dataL && dataL <= R) worker.update(dataL);
+  }
+
+  @Override
+  public void startIteration() {
+    worker = new HeapLongKLLSketch(maxMemoryByte);
+  }
+
+  @Override
+  public void finishIteration() {
+    iteration++;
+    if (n == 0) {
+      hasFinalResult = true;
+      return;
+    }
+    lastL = L;
+    if (iteration == 1) { // first iteration over
+      K1 = (n + 1) >> 1;
+    }
+    K2 = hasTwoMedians() ? (K1 + 1) : K1;
+    if (worker.exactResult()) {
+      long v1 = worker.getExactResult((int) K1 - 1), v2 = worker.getExactResult((int) K2 - 1);
+      double ans = 0.5 * (longToResult(v1) + longToResult(v2));
+      setDoubleValue(ans);
+      hasFinalResult = true;
+      System.out.println("[kll POINT DEBUG] calc over  answer:" + getDoubleValue());
+      return;
+    }
+    long err = (long) (1.5 * worker.getN() / worker.getNumLen());
+    long minK = K1 - 1 - err * 3, maxK = K2 - 1 + err * 3;
+    L = worker.findMinValueWithRank(minK);
+    R = worker.findMaxValueWithRank(maxK);
+    if (L == R + 1) {
+      if (worker.getApproxRank(L) < K1) R = L;
+      else L = R;
+    }
+    //    System.out.println("\t[KLL STAT DEBUG] cntLR found.");
+    System.out.println("\t[KLL POINT DEBUG] avg_err:" + err + "\tcntL:" + L + "\tcntR:" + R + "\n");
+
+    if (L == R) {
+      double ans = longToResult(L);
+      setDoubleValue(ans);
+      hasFinalResult = true;
+    }
+  }
+
+  @Override
+  protected boolean hasCandidateResult() {
+    return hasFinalResult && n > 0;
+  }
+
+  @Override
+  public Double getResult() {
+    return hasCandidateResult() ? getDoubleValue() : null;
+  }
+
+  @Override
+  public void updateResultFromStatistics(Statistics statistics) {
+    switch (statistics.getType()) {
+      case INT32:
+      case INT64:
+      case FLOAT:
+      case DOUBLE:
+        break;
+      case TEXT:
+      case BOOLEAN:
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format(
+                "Unsupported data type in aggregation MEDIAN : %s", statistics.getType()));
+    }
+    long minVal = dataToLong(statistics.getMinValue());
+    long maxVal = dataToLong(statistics.getMaxValue());
+    if (maxVal < L) {
+      K1 -= statistics.getCount();
+    } else if (minVal > R) return;
+    if (minVal == maxVal) {
+      for (long i = statistics.getCount(); i > 0; i--) updateStatusFromData(minVal);
+    } /*else
+      throw new QueryProcessException("Failed to update median aggregation result from statistics.");*/
+  }
+
+  @Override
+  public void updateResultFromPageData(IBatchDataIterator batchIterator) {
+    updateResultFromPageData(batchIterator, Long.MIN_VALUE, Long.MAX_VALUE);
+  }
+
+  @Override
+  public void updateResultFromPageData(
+      IBatchDataIterator batchIterator, long minBound, long maxBound) {
+    while (batchIterator.hasNext()) {
+      if (batchIterator.currentTime() >= maxBound || batchIterator.currentTime() < minBound) {
+        break;
+      }
+      updateStatusFromData(batchIterator.currentValue());
+      batchIterator.next();
+    }
+  }
+
+  @Override
+  public void updateResultUsingTimestamps(
+      long[] timestamps, int length, IReaderByTimestamp dataReader) throws IOException {
+    Object[] values = dataReader.getValuesInTimestamps(timestamps, length);
+    for (int i = 0; i < length; i++) {
+      if (values[i] != null) {
+        updateStatusFromData(values[i]);
+      }
+    }
+  }
+
+  @Override
+  public void updateResultUsingValues(long[] timestamps, int length, ValueIterator valueIterator) {
+    //    List<Object> tmp = new ArrayList<>();
+    while (valueIterator.hasNext()) {
+      updateStatusFromData(valueIterator.next());
+      //      Object tmpObj = valueIterator.next();
+      //      updateStatusFromData(tmpObj);
+      //      tmp.add(tmpObj);
+    }
+    //
+    // System.out.println("\t\t[MEDIAN]"+this.hashCode()+"[updateResultUsingValues]"+tmp.toString());
+  }
+
+  @Override
+  public int maxIteration() {
+    return bitsOfDataType / 16;
+  }
+
+  @Override
+  public boolean hasFinalResult() {
+    return hasFinalResult;
+  }
+
+  @Override
+  public void merge(AggregateResult another) {
+    //    System.out.println("[DEBUG] [merge] " + this.getResult() + "  " + another.getResult());
+    // merge not supported
+    //        throw new QueryProcessException("Can't merge MedianAggregateResult");
+  }
+
+  @Override
+  protected void deserializeSpecificFields(ByteBuffer buffer) {
+    this.seriesDataType = TSDataType.deserialize(buffer.get());
+    this.n = buffer.getLong();
+    // TODO
+  }
+
+  @Override
+  protected void serializeSpecificFields(OutputStream outputStream) throws IOException {
+    ReadWriteIOUtils.write(seriesDataType, outputStream);
+    ReadWriteIOUtils.write(n, outputStream);
+    // TODO
+  }
+
+  public long getN() {
+    return n;
+  }
+
+  @Override
+  public void reset() {
+    super.reset();
+    n = 0;
+    bitsOfDataType = getBitsOfDataType();
+    hasFinalResult = false;
+    L = Long.MIN_VALUE;
+    R = Long.MAX_VALUE;
+    iteration = 0;
+  }
+
+  @Override
+  public boolean canUpdateFromStatistics(Statistics statistics) {
+    //    Comparable<Object> minVal = (Comparable<Object>) statistics.getMinValue();
+    //    Comparable<Object> maxVal = (Comparable<Object>) statistics.getMaxValue();
+    long minVal = dataToLong(statistics.getMinValue());
+    long maxVal = dataToLong(statistics.getMaxValue());
+    if (minVal == maxVal) return true;
+    if (maxVal < L) {
+      return true;
+    } else if (minVal > R) return true;
+    return false;
+  }
+
+  @Override
+  public boolean groupByLevelBeforeAggregation() {
+    return true;
+  }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLFloatsSingleAggrResult.java b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLFloatsSingleAggrResult.java
new file mode 100644
index 0000000000..fcf59c3583
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLFloatsSingleAggrResult.java
@@ -0,0 +1,273 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.query.aggregation.impl;
+
+import org.apache.iotdb.db.query.aggregation.AggregateResult;
+import org.apache.iotdb.db.query.aggregation.AggregationType;
+import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp;
+import org.apache.iotdb.db.utils.ValueIterator;
+import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.file.metadata.statistics.Statistics;
+import org.apache.iotdb.tsfile.read.common.IBatchDataIterator;
+import org.apache.iotdb.tsfile.utils.HeapLongKLLSketch;
+import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+
+public class KLLFloatsSingleAggrResult extends AggregateResult {
+  private final long deltaForUnsignedCompare = 1L << 63;
+  private TSDataType seriesDataType;
+  private long n, K1, K2;
+  private int bitsOfDataType, iteration; // bitsOfDataType == bitsCounted + bitsConcerned
+  private double L, R, lastL;
+  private HeapLongKLLSketch worker;
+
+  private boolean hasFinalResult;
+
+  private int getBitsOfDataType() {
+    switch (seriesDataType) {
+      case INT32:
+      case FLOAT:
+        return 32;
+      case INT64:
+      case DOUBLE:
+        return 64;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private boolean hasTwoMedians() {
+    return (n & 1) == 0;
+  }
+
+  public KLLFloatsSingleAggrResult(TSDataType seriesDataType) throws UnSupportedDataTypeException {
+    super(TSDataType.DOUBLE, AggregationType.EXACT_MEDIAN_KLL_FLOATS);
+    this.seriesDataType = seriesDataType;
+    reset();
+  }
+
+  private long dataToLong(Object data) throws UnSupportedDataTypeException {
+    long result;
+    switch (seriesDataType) {
+      case INT32:
+        return (int) data;
+      case FLOAT:
+        result = Float.floatToIntBits((float) data);
+        return (float) data >= 0f ? result : result ^ Long.MAX_VALUE;
+      case INT64:
+        return (long) data;
+      case DOUBLE:
+        result = Double.doubleToLongBits((double) data);
+        return (double) data >= 0d ? result : result ^ Long.MAX_VALUE;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private double dataToDouble(Object data) throws UnSupportedDataTypeException {
+    switch (seriesDataType) {
+      case INT32:
+        return (int) data;
+      case FLOAT:
+        return (float) data;
+      case INT64:
+        return (long) data;
+      case DOUBLE:
+        return (double) data;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private double longToResult(long result) throws UnSupportedDataTypeException {
+    switch (seriesDataType) {
+      case INT32:
+        return (double) (result);
+      case FLOAT:
+        result = (result >>> 31) == 0 ? result : result ^ Long.MAX_VALUE;
+        return Float.intBitsToFloat((int) (result));
+      case INT64:
+        return (double) (result);
+      case DOUBLE:
+        result = (result >>> 63) == 0 ? result : result ^ Long.MAX_VALUE;
+        return Double.longBitsToDouble(result);
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  //  private void updateStatusFromData(Object data, long times) {
+  //    long signedLongBits = dataToLongBits(data) ^ deltaForUnsignedCompare;
+  //    if (iteration == 0) n += times;
+  //    if (hasTwoDividedMedians) {
+  //      if (L1 <= signedLongBits && signedLongBits <= R1)
+  //        maxInInterval1 = Math.max(maxInInterval1, signedLongBits);
+  //
+  //      if (L2 <= signedLongBits && signedLongBits <= R2)
+  //        minInInterval2 = Math.min(minInInterval2, signedLongBits);
+  //      return;
+  //    }
+  //    if (lastL1 <= signedLongBits && signedLongBits < L1) K1 -= times;
+  //    else if (L1 <= signedLongBits && signedLongBits <= R1) worker.add(signedLongBits, times);
+  //  }
+
+  private void updateStatusFromData(Object data) {
+    long dataL = dataToLong(data);
+    if (iteration == 0) n++;
+    if (lastL <= dataL && dataL < L) K1--;
+    else if (L <= dataL && dataL <= R) worker.update(dataL);
+  }
+
+  @Override
+  public void startIteration() {
+    worker = new HeapLongKLLSketch(maxMemoryByte);
+  }
+
+  @Override
+  public void finishIteration() {
+    iteration++;
+    if (n == 0) {
+      hasFinalResult = true;
+      return;
+    }
+    setDoubleValue(longToResult(worker.findMinValueWithRank((n + 1) / 2)));
+    hasFinalResult = true;
+  }
+
+  @Override
+  protected boolean hasCandidateResult() {
+    return hasFinalResult && n > 0;
+  }
+
+  @Override
+  public Double getResult() {
+    return hasCandidateResult() ? getDoubleValue() : null;
+  }
+
+  @Override
+  public void updateResultFromStatistics(Statistics statistics) {
+    // no-op
+  }
+
+  @Override
+  public void updateResultFromPageData(IBatchDataIterator batchIterator) {
+    updateResultFromPageData(batchIterator, Long.MIN_VALUE, Long.MAX_VALUE);
+  }
+
+  @Override
+  public void updateResultFromPageData(
+      IBatchDataIterator batchIterator, long minBound, long maxBound) {
+    while (batchIterator.hasNext()) {
+      if (batchIterator.currentTime() >= maxBound || batchIterator.currentTime() < minBound) {
+        break;
+      }
+      updateStatusFromData(batchIterator.currentValue());
+      batchIterator.next();
+    }
+  }
+
+  @Override
+  public void updateResultUsingTimestamps(
+      long[] timestamps, int length, IReaderByTimestamp dataReader) throws IOException {
+    Object[] values = dataReader.getValuesInTimestamps(timestamps, length);
+    for (int i = 0; i < length; i++) {
+      if (values[i] != null) {
+        updateStatusFromData(values[i]);
+      }
+    }
+  }
+
+  @Override
+  public void updateResultUsingValues(long[] timestamps, int length, ValueIterator valueIterator) {
+    //    List<Object> tmp = new ArrayList<>();
+    while (valueIterator.hasNext()) {
+      updateStatusFromData(valueIterator.next());
+      //      Object tmpObj = valueIterator.next();
+      //      updateStatusFromData(tmpObj);
+      //      tmp.add(tmpObj);
+    }
+    //
+    // System.out.println("\t\t[MEDIAN]"+this.hashCode()+"[updateResultUsingValues]"+tmp.toString());
+  }
+
+  @Override
+  public int maxIteration() {
+    return 1;
+  }
+
+  @Override
+  public boolean hasFinalResult() {
+    return hasFinalResult;
+  }
+
+  @Override
+  public void merge(AggregateResult another) {
+    //    System.out.println("[DEBUG] [merge] " + this.getResult() + "  " + another.getResult());
+    // merge not supported
+    //        throw new QueryProcessException("Can't merge MedianAggregateResult");
+  }
+
+  @Override
+  protected void deserializeSpecificFields(ByteBuffer buffer) {
+    this.seriesDataType = TSDataType.deserialize(buffer.get());
+    this.n = buffer.getLong();
+    // TODO
+  }
+
+  @Override
+  protected void serializeSpecificFields(OutputStream outputStream) throws IOException {
+    ReadWriteIOUtils.write(seriesDataType, outputStream);
+    ReadWriteIOUtils.write(n, outputStream);
+    // TODO
+  }
+
+  public long getN() {
+    return n;
+  }
+
+  @Override
+  public void reset() {
+    super.reset();
+    n = 0;
+    bitsOfDataType = getBitsOfDataType();
+    hasFinalResult = false;
+    L = -Double.MAX_VALUE;
+    R = Double.MAX_VALUE;
+    iteration = 0;
+  }
+
+  @Override
+  public boolean canUpdateFromStatistics(Statistics statistics) {
+    return false;
+  }
+
+  @Override
+  public boolean groupByLevelBeforeAggregation() {
+    return true;
+  }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLStatDebugFullReadingAggrResult.java b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLStatDebugFullReadingAggrResult.java
new file mode 100644
index 0000000000..4be7500887
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLStatDebugFullReadingAggrResult.java
@@ -0,0 +1,532 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.query.aggregation.impl;
+
+import org.apache.iotdb.db.query.aggregation.AggregateResult;
+import org.apache.iotdb.db.query.aggregation.AggregationType;
+import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp;
+import org.apache.iotdb.db.utils.ValueIterator;
+import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.file.metadata.statistics.DoubleStatistics;
+import org.apache.iotdb.tsfile.file.metadata.statistics.Statistics;
+import org.apache.iotdb.tsfile.read.common.IBatchDataIterator;
+import org.apache.iotdb.tsfile.utils.HeapLongKLLSketch;
+import org.apache.iotdb.tsfile.utils.KLLSketchForQuantile;
+import org.apache.iotdb.tsfile.utils.LongKLLSketch;
+import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.apache.iotdb.tsfile.file.metadata.enums.TSDataType.DOUBLE;
+import static org.apache.iotdb.tsfile.file.metadata.statistics.Statistics.STATISTICS_PAGE_MAXSIZE;
+import static org.apache.iotdb.tsfile.file.metadata.statistics.Statistics.SYNOPSIS_SIZE_IN_BYTE;
+
+public class KLLStatDebugFullReadingAggrResult extends AggregateResult {
+  private TSDataType seriesDataType;
+  private int iteration;
+  private long pageKLLNum;
+  private long cntL, cntR, lastL;
+  private long n, K1, heapN;
+  private HeapLongKLLSketch heapKLL;
+  private boolean hasFinalResult;
+  private final int maxMemoryByte = 1 << 19; // half of the limit
+  private final int pageAvgError = 50, pageMaxError = 127;
+  private final int pageKLLMemoryByte = (68 + 15) * 8, pageKLLNumMemoryByte = SYNOPSIS_SIZE_IN_BYTE;
+  //  private List<HeapLongKLLSketch> pageKLL;
+  private List<KLLSketchForQuantile> pageKLL;
+  private int pageKLLIndex;
+  private final int pageKLLMaxSize = (int) Math.floor((1.0 * maxMemoryByte / pageKLLMemoryByte));
+  long PAGE_FULL_READ = 0, FULL_READ = 0, DEMAND_READ = 0;
+  boolean FULL_READING_PAGE = false;
+
+  private int getBitsOfDataType() {
+    switch (seriesDataType) {
+      case INT32:
+      case FLOAT:
+        return 32;
+      case INT64:
+      case DOUBLE:
+        return 64;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private long approximateDataAvgError() {
+    long dataAvgError = (long) Math.ceil(2.0 * heapN / heapKLL.getMaxMemoryNum()) + 1;
+    return dataAvgError;
+  }
+
+  private long approximateStatAvgError() {
+    double rate = 1.0 * pageKLLNumMemoryByte * pageKLLNum / maxMemoryByte;
+    long pageStatAvgError;
+    if (rate < 1.0) {
+      pageStatAvgError = (long) Math.ceil(pageAvgError * Math.pow(pageKLLNum, 0.5));
+      if (pageKLLNum == 1) return pageMaxError;
+      else pageStatAvgError += pageMaxError;
+    } else {
+      int memKLLNum = maxMemoryByte / pageKLLNumMemoryByte;
+      long memErr = (long) Math.ceil(pageAvgError * Math.pow(memKLLNum, 0.5));
+      pageStatAvgError = (long) Math.ceil(rate * 0.5 * memErr + 0.5 * memErr);
+    }
+    return pageStatAvgError * 5 / 4;
+  }
+
+  private long approximateMaxError() {
+    return 0;
+  }
+
+  private boolean hasTwoMedians() {
+    return (n & 1) == 0;
+  }
+
+  public KLLStatDebugFullReadingAggrResult(TSDataType seriesDataType)
+      throws UnSupportedDataTypeException {
+    super(DOUBLE, AggregationType.EXACT_MEDIAN_KLL_STAT_DEBUG_FULL_READING);
+    this.seriesDataType = seriesDataType;
+    reset();
+  }
+
+  private long dataToLong(Object data) throws UnSupportedDataTypeException {
+    long result;
+    switch (seriesDataType) {
+      case INT32:
+        return (int) data;
+      case FLOAT:
+        result = Float.floatToIntBits((float) data);
+        return (float) data >= 0f ? result : result ^ Long.MAX_VALUE;
+      case INT64:
+        return (long) data;
+      case DOUBLE:
+        result = Double.doubleToLongBits((double) data);
+        return (double) data >= 0d ? result : result ^ Long.MAX_VALUE;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private double longToResult(long result) throws UnSupportedDataTypeException {
+    switch (seriesDataType) {
+      case INT32:
+        return (double) (result);
+      case FLOAT:
+        result = (result >>> 31) == 0 ? result : result ^ Long.MAX_VALUE;
+        return Float.intBitsToFloat((int) (result));
+      case INT64:
+        return (double) (result);
+      case DOUBLE:
+        result = (result >>> 63) == 0 ? result : result ^ Long.MAX_VALUE;
+        return Double.longBitsToDouble(result);
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private void updateStatusFromData(Object data) {
+    long dataL = dataToLong(data);
+    FULL_READ++;
+    if (iteration == 0) n++;
+    if (cntL <= dataL && dataL <= cntR) {
+      DEMAND_READ++;
+      heapKLL.update(dataL);
+      heapN++;
+    } else if (lastL <= dataL && dataL < cntL) K1--;
+  }
+
+  private long getRankInKLL(long val) {
+    //    long rank = 0;
+    //    if (pageKLL != null) {
+    //      for (HeapLongKLLSketch heapLongKLLSketch : pageKLL)
+    //        if (heapLongKLLSketch != null) rank += heapLongKLLSketch.getApproxRank(val);
+    //    }
+    //    rank += heapKLL.getApproxRank(val);
+    //    return rank;
+    return heapKLL.getApproxRank(val);
+  }
+
+  public long findMaxValueWithRankLE(long K) {
+    long L = Long.MIN_VALUE, R = Long.MAX_VALUE, mid;
+    while (L < R) {
+      mid = L + ((R - L) >>> 1);
+      if (mid == L) mid++;
+      //      System.out.println(
+      //          "\t\t\t" + L + "\t" + R + "\t\t\tmid:" + mid + "\trank:" + getRankInKLL(mid));
+      if (getRankInKLL(mid) <= K) L = mid;
+      else R = mid - 1;
+      //      System.out.println("\t mid:"+mid+"  mid_rank:"+getRankInKLL(mid));
+    }
+    return L;
+  }
+
+  public long findMinValueWithRankGE(long K) {
+    long L = Long.MIN_VALUE, R = Long.MAX_VALUE, mid;
+    while (L < R) {
+      mid = L + ((R - L) >>> 1);
+      if (mid == R) mid--;
+      //      System.out.println(
+      //          "\t\t\t" + L + "\t" + R + "\t\t\tmid:" + mid + "\trank:" + getRankInKLL(mid));
+      if (getRankInKLL(mid) >= K) R = mid;
+      else L = mid + 1;
+    }
+    return L;
+  }
+
+  @Override
+  public void startIteration() {
+    heapN = 0;
+    if (iteration == 0) { // first iteration
+      heapKLL = new HeapLongKLLSketch(maxMemoryByte);
+      lastL = cntL = Long.MIN_VALUE;
+      cntR = Long.MAX_VALUE;
+      n = 0;
+      pageKLLNum = 0;
+      pageKLL = new ArrayList<>(pageKLLMaxSize);
+      for (int i = 0; i < pageKLLMaxSize; i++) pageKLL.add(null);
+      pageKLLIndex = 0;
+    } else {
+      heapKLL = new HeapLongKLLSketch(maxMemoryByte);
+      pageKLLNum = 0;
+      pageKLL = null;
+      System.out.println(
+          "\t[KLL STAT DEBUG] start iteration "
+              + iteration
+              + " cntL,R:"
+              + "["
+              + cntL
+              + ","
+              + cntR
+              + "]"
+              + "\tlastL:"
+              + lastL
+              + "\tK1:"
+              + K1);
+    }
+  }
+
+  @Override
+  public void finishIteration() {
+    System.out.println(
+        "\t[KLL STAT DEBUG]"
+            + "finish iteration "
+            + iteration
+            + " cntL,R:"
+            + "["
+            + cntL
+            + ","
+            + cntR
+            + "]"
+            + "\tlastL:"
+            + lastL
+            + "\tK1:"
+            + K1
+            + "\t FULL_READ:"
+            + FULL_READ);
+    System.out.println(
+        "\t[KLL STAT DEBUG]" + " statNum:" + pageKLLNum + " heapN:" + heapKLL.getN());
+    iteration++;
+    if (n == 0) {
+      hasFinalResult = true;
+      return;
+    }
+    lastL = cntL;
+
+    if (iteration == 1) { // first iteration over
+      K1 = (n + 1) >> 1;
+    }
+    long K2 = hasTwoMedians() ? (K1 + 1) : K1;
+
+    System.out.println("\t[KLL STAT DEBUG]" + " K1,K2:" + K1 + ", " + K2);
+    if (pageKLLNum == 0) { // all in heapKLL
+      System.out.println("\t[KLL STAT DEBUG]" + " calc by heap only. N:" + heapKLL.getN());
+      heapKLL.show();
+      //      System.out.println("\t[KLL STAT DEBUG] DEBUG:" + DEBUG);
+      if (heapKLL.exactResult()) {
+        long v1 = heapKLL.getExactResult((int) K1 - 1), v2 = heapKLL.getExactResult((int) K2 - 1);
+        double ans = 0.5 * (longToResult(v1) + longToResult(v2));
+        setDoubleValue(FULL_READ);
+        hasFinalResult = true;
+      } else {
+        long err = approximateDataAvgError();
+        long minK = K1 - 1 - err * 2, maxK = K2 - 1 + err * 2;
+        System.out.println(
+            "\t[KLL STAT DEBUG] finding cntLR...  err:" + err + "minK,maxK:" + minK + "," + maxK);
+        cntL = findMinValueWithRankGE(minK);
+        cntR = findMaxValueWithRankLE(maxK);
+        if (cntL == cntR + 1) {
+          if (getRankInKLL(cntL) < K1) cntR = cntL;
+          else cntL = cntR;
+        }
+        //        System.out.println("\t[KLL STAT DEBUG] cntLR found.");
+        if (cntL == cntR) {
+          double ans = longToResult(cntL);
+          setDoubleValue(FULL_READ);
+          hasFinalResult = true;
+        }
+      }
+      return;
+    }
+    // iteration = 0 && there are page KLL statistics
+    //    System.out.println("\t[KLL STAT DEBUG] remaining pageKLLSize:" + pageKLLIndex);
+    long err = (approximateDataAvgError() + approximateStatAvgError());
+    long minK = K1 - 1 - err * 3, maxK = K2 - 1 + err * 3;
+    mergePageKLL();
+    System.out.println(
+        "\t[KLL STAT DEBUG] finding cntLR...  minK,maxK:"
+            + minK
+            + ","
+            + maxK
+            + "\theapN:"
+            + heapKLL.getN());
+    heapKLL.show();
+    cntL = findMinValueWithRankGE(minK);
+    cntR = findMaxValueWithRankLE(maxK);
+    if (cntL == cntR + 1) {
+      if (getRankInKLL(cntL) < K1) cntR = cntL;
+      else cntL = cntR;
+    }
+    //    System.out.println("\t[KLL STAT DEBUG] cntLR found.");
+    System.out.println(
+        "\t[KLL STAT DEBUG] avg_err:" + err + "\tcntL:" + cntL + "\tcntR:" + cntR + "\n");
+    pageKLLNum = 0;
+
+    if (cntL == cntR) {
+      double ans = longToResult(cntL);
+      setDoubleValue(FULL_READ);
+      hasFinalResult = true;
+    }
+  }
+
+  @Override
+  protected boolean hasCandidateResult() {
+    return hasFinalResult && n > 0;
+  }
+
+  @Override
+  public Double getResult() {
+    return hasCandidateResult() ? getDoubleValue() : null;
+  }
+
+  //  private void addSketch(KLLSketchForQuantile sketch, List<HeapLongKLLSketch> a, int baseByte) {
+  //    int pos0 = 0;
+  //    while (pos0 < pageKLLMaxLen && a.get(pos0) != null) pos0++;
+  //    HeapLongKLLSketch bigger_sketch = new HeapLongKLLSketch(baseByte << pos0);
+  //    bigger_sketch.mergeWithTempSpace(sketch);
+  //    for (int i = 0; i < pos0; i++) {
+  //      bigger_sketch.mergeWithTempSpace(a.get(i));
+  //      a.set(i, null);
+  //    }
+  //    if (pos0 == pageKLLMaxLen) { // mem of pageKLL list is too large.
+  //      heapKLL.mergeWithTempSpace(bigger_sketch);
+  //    } else a.set(pos0, bigger_sketch);
+  //  }
+  private void addSketch(KLLSketchForQuantile sketch) {
+    if (pageKLLIndex == pageKLLMaxSize) {
+      heapKLL.mergeWithTempSpace(pageKLL);
+      for (int i = 0; i < pageKLLMaxSize; i++) pageKLL.set(i, null);
+      pageKLLIndex = 0;
+    }
+    pageKLL.set(pageKLLIndex++, sketch);
+  }
+
+  private void mergePageKLL() {
+    HeapLongKLLSketch tmpSketch = heapKLL;
+    heapKLL = new HeapLongKLLSketch(maxMemoryByte * 2);
+    heapKLL.mergeWithTempSpace(tmpSketch);
+    heapKLL.mergeWithTempSpace(pageKLL);
+  }
+
+  @Override
+  public void updateResultFromStatistics(Statistics statistics) {
+    switch (statistics.getType()) {
+      case INT32:
+      case INT64:
+      case FLOAT:
+      case DOUBLE:
+        break;
+      case TEXT:
+      case BOOLEAN:
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format(
+                "Unsupported data type in aggregation MEDIAN : %s", statistics.getType()));
+    }
+    if (iteration == 0) {
+      n += statistics.getCount();
+      if (statistics.getType() == DOUBLE) {
+        DoubleStatistics stat = (DoubleStatistics) statistics;
+        if (stat.getKllSketchNum() > 0) {
+          //          System.out.println(
+          //              "\t[KLL STAT DEBUG] update from page stat:\t" + "n:" +
+          // stat.getKllSketch().getN());
+          //          addSketch(stat.getKllSketch(), pageKLL, pageKLLMemoryByte);
+          pageKLLNum += stat.getKllSketchNum();
+          for (LongKLLSketch sketch : stat.getKllSketchList()) addSketch(sketch);
+          //          heapKLL.mergeWithTempSpace(stat.getKllSketch());
+          return;
+        } else System.out.println("\t\t\t\t!!!!!![ERROR!] no KLL in stat!");
+      }
+    }
+    long minVal = dataToLong(statistics.getMinValue());
+    long maxVal = dataToLong(statistics.getMaxValue());
+    //    System.out.println(
+    //        "\t[KLL STAT DEBUG] update from statistics:\t"
+    //            + "min,max:"
+    //            + minVal
+    //            + ","
+    //            + maxVal
+    //            + " n:"
+    //            + statistics.getCount());
+    // out of range
+    if (minVal > cntR || maxVal < lastL) return;
+    if (lastL <= minVal && maxVal < cntL) {
+      K1 -= statistics.getCount();
+      return;
+    }
+    if (minVal == maxVal) { // min == max
+      for (int i = 0; i < statistics.getCount(); i++) updateStatusFromData(minVal);
+      return;
+    }
+  }
+
+  @Override
+  public void updateResultFromPageData(IBatchDataIterator batchIterator) {
+    updateResultFromPageData(batchIterator, Long.MIN_VALUE, Long.MAX_VALUE);
+  }
+
+  @Override
+  public void updateResultFromPageData(
+      IBatchDataIterator batchIterator, long minBound, long maxBound) {
+    while (batchIterator.hasNext()) {
+      if (batchIterator.currentTime() >= maxBound || batchIterator.currentTime() < minBound) {
+        break;
+      }
+      updateStatusFromData(batchIterator.currentValue());
+      batchIterator.next();
+    }
+  }
+
+  @Override
+  public void updateResultUsingTimestamps(
+      long[] timestamps, int length, IReaderByTimestamp dataReader) throws IOException {
+    Object[] values = dataReader.getValuesInTimestamps(timestamps, length);
+    for (int i = 0; i < length; i++) {
+      if (values[i] != null) {
+        updateStatusFromData(values[i]);
+      }
+    }
+  }
+
+  @Override
+  public void updateResultUsingValues(long[] timestamps, int length, ValueIterator valueIterator) {
+    //    List<Object> tmp = new ArrayList<>();
+    while (valueIterator.hasNext()) {
+      updateStatusFromData(valueIterator.next());
+      //      Object tmpObj = valueIterator.next();
+      //      updateStatusFromData(tmpObj, 1);
+      //      tmp.add(tmpObj);
+    }
+    //
+    // System.out.println("\t\t[MEDIAN]"+this.hashCode()+"[updateResultUsingValues]"+tmp.toString());
+  }
+
+  @Override
+  public int maxIteration() {
+    return 3;
+  }
+
+  @Override
+  public boolean hasFinalResult() {
+    return hasFinalResult;
+  }
+
+  @Override
+  public void merge(AggregateResult another) {
+    //    System.out.println("[DEBUG] [merge] " + this.getResult() + "  " + another.getResult());
+    // merge not supported
+    //        throw new QueryProcessException("Can't merge MedianAggregateResult");
+  }
+
+  @Override
+  protected void deserializeSpecificFields(ByteBuffer buffer) {
+    this.seriesDataType = TSDataType.deserialize(buffer.get());
+    // TODO
+  }
+
+  @Override
+  protected void serializeSpecificFields(OutputStream outputStream) throws IOException {
+    ReadWriteIOUtils.write(seriesDataType, outputStream);
+    // TODO
+  }
+
+  public long getN() {
+    return n;
+  }
+
+  @Override
+  public void reset() {
+    super.reset();
+    heapKLL = null;
+    lastL = cntL = Long.MIN_VALUE;
+    cntR = Long.MAX_VALUE;
+    n = 0;
+    iteration = 0;
+    hasFinalResult = false;
+    PAGE_FULL_READ = FULL_READ = 0;
+    FULL_READING_PAGE = false;
+    DEMAND_READ = 0;
+  }
+
+  @Override
+  public boolean canUpdateFromStatistics(Statistics statistics) {
+    FULL_READING_PAGE = false;
+    if ((seriesDataType == DOUBLE) && iteration == 0) {
+      DoubleStatistics doubleStats = (DoubleStatistics) statistics;
+      if (doubleStats.getKllSketchNum() > 0) return true;
+    }
+    if (iteration > 0) {
+      long minVal = dataToLong(statistics.getMinValue());
+      long maxVal = dataToLong(statistics.getMaxValue());
+      if (minVal > cntR || maxVal < lastL) return true;
+      if (lastL <= minVal && maxVal < cntL) return true;
+    }
+    Comparable<Object> minVal = (Comparable<Object>) statistics.getMinValue();
+    Comparable<Object> maxVal = (Comparable<Object>) statistics.getMaxValue();
+    if (minVal.compareTo(maxVal) == 0) return true; // min==max
+
+    if (statistics.getCount() <= STATISTICS_PAGE_MAXSIZE) {
+      PAGE_FULL_READ++;
+      FULL_READING_PAGE = true;
+    }
+    return false;
+  }
+
+  @Override
+  public boolean groupByLevelBeforeAggregation() {
+    return true;
+  }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLStatDebugPageDemandRateAggrResult.java b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLStatDebugPageDemandRateAggrResult.java
new file mode 100644
index 0000000000..974089874d
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLStatDebugPageDemandRateAggrResult.java
@@ -0,0 +1,554 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.query.aggregation.impl;
+
+import org.apache.iotdb.db.query.aggregation.AggregateResult;
+import org.apache.iotdb.db.query.aggregation.AggregationType;
+import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp;
+import org.apache.iotdb.db.utils.ValueIterator;
+import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.file.metadata.statistics.DoubleStatistics;
+import org.apache.iotdb.tsfile.file.metadata.statistics.Statistics;
+import org.apache.iotdb.tsfile.read.common.IBatchDataIterator;
+import org.apache.iotdb.tsfile.utils.HeapLongKLLSketch;
+import org.apache.iotdb.tsfile.utils.KLLSketchForQuantile;
+import org.apache.iotdb.tsfile.utils.LongKLLSketch;
+import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import static org.apache.iotdb.tsfile.file.metadata.enums.TSDataType.DOUBLE;
+import static org.apache.iotdb.tsfile.file.metadata.statistics.Statistics.STATISTICS_PAGE_MAXSIZE;
+import static org.apache.iotdb.tsfile.file.metadata.statistics.Statistics.SYNOPSIS_SIZE_IN_BYTE;
+
+public class KLLStatDebugPageDemandRateAggrResult extends AggregateResult {
+  private TSDataType seriesDataType;
+  private int iteration;
+  private long pageKLLNum;
+  private long cntL, cntR, lastL;
+  private long n, K1, heapN;
+  private HeapLongKLLSketch heapKLL;
+  private boolean hasFinalResult;
+  private final int maxMemoryByte = 1 << 19; // half of the limit
+  private final int pageAvgError = 50, pageMaxError = 127;
+  private final int pageKLLMemoryByte = (68 + 15) * 8, pageKLLNumMemoryByte = SYNOPSIS_SIZE_IN_BYTE;
+  //  private List<HeapLongKLLSketch> pageKLL;
+  private List<KLLSketchForQuantile> pageKLL;
+  private int pageKLLIndex;
+  private final int pageKLLMaxSize = (int) Math.floor((1.0 * maxMemoryByte / pageKLLMemoryByte));
+  long PAGE_FULL_READ = 0, FULL_READ = 0, DEMAND_READ = 0, CNT_PAGE_N, CNT_PAGE_READ, DEMAND_PAGE;
+  List<Double> PAGE_RATE;
+  boolean FULL_READING_PAGE = false;
+
+  private int getBitsOfDataType() {
+    switch (seriesDataType) {
+      case INT32:
+      case FLOAT:
+        return 32;
+      case INT64:
+      case DOUBLE:
+        return 64;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private long approximateDataAvgError() {
+    long dataAvgError = (long) Math.ceil(2.0 * heapN / heapKLL.getMaxMemoryNum()) + 1;
+    return dataAvgError;
+  }
+
+  private long approximateStatAvgError() {
+    double rate = 1.0 * pageKLLNumMemoryByte * pageKLLNum / maxMemoryByte;
+    long pageStatAvgError;
+    if (rate < 1.0) {
+      pageStatAvgError = (long) Math.ceil(pageAvgError * Math.pow(pageKLLNum, 0.5));
+      if (pageKLLNum == 1) return pageMaxError;
+      else pageStatAvgError += pageMaxError;
+    } else {
+      int memKLLNum = maxMemoryByte / pageKLLNumMemoryByte;
+      long memErr = (long) Math.ceil(pageAvgError * Math.pow(memKLLNum, 0.5));
+      pageStatAvgError = (long) Math.ceil(rate * 0.5 * memErr + 0.5 * memErr);
+    }
+    return pageStatAvgError * 5 / 4;
+  }
+
+  private long approximateMaxError() {
+    return 0;
+  }
+
+  private boolean hasTwoMedians() {
+    return (n & 1) == 0;
+  }
+
+  public KLLStatDebugPageDemandRateAggrResult(TSDataType seriesDataType)
+      throws UnSupportedDataTypeException {
+    super(DOUBLE, AggregationType.EXACT_MEDIAN_KLL_STAT_DEBUG_PAGE_DEMAND_RATE);
+    this.seriesDataType = seriesDataType;
+    reset();
+  }
+
+  private long dataToLong(Object data) throws UnSupportedDataTypeException {
+    long result;
+    switch (seriesDataType) {
+      case INT32:
+        return (int) data;
+      case FLOAT:
+        result = Float.floatToIntBits((float) data);
+        return (float) data >= 0f ? result : result ^ Long.MAX_VALUE;
+      case INT64:
+        return (long) data;
+      case DOUBLE:
+        result = Double.doubleToLongBits((double) data);
+        return (double) data >= 0d ? result : result ^ Long.MAX_VALUE;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private double longToResult(long result) throws UnSupportedDataTypeException {
+    switch (seriesDataType) {
+      case INT32:
+        return (double) (result);
+      case FLOAT:
+        result = (result >>> 31) == 0 ? result : result ^ Long.MAX_VALUE;
+        return Float.intBitsToFloat((int) (result));
+      case INT64:
+        return (double) (result);
+      case DOUBLE:
+        result = (result >>> 63) == 0 ? result : result ^ Long.MAX_VALUE;
+        return Double.longBitsToDouble(result);
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private void updateStatusFromData(Object data) {
+    long dataL = dataToLong(data);
+    FULL_READ++;
+    if (iteration == 0) n++;
+    if (cntL <= dataL && dataL <= cntR) {
+      DEMAND_READ++;
+      heapKLL.update(dataL);
+      heapN++;
+      if (FULL_READING_PAGE) CNT_PAGE_READ++;
+    } else if (lastL <= dataL && dataL < cntL) K1--;
+  }
+
+  private void add_demand_page() {
+    DEMAND_PAGE++;
+    PAGE_RATE.add(1.0 * CNT_PAGE_READ / CNT_PAGE_N);
+  }
+
+  private long getRankInKLL(long val) {
+    //    long rank = 0;
+    //    if (pageKLL != null) {
+    //      for (HeapLongKLLSketch heapLongKLLSketch : pageKLL)
+    //        if (heapLongKLLSketch != null) rank += heapLongKLLSketch.getApproxRank(val);
+    //    }
+    //    rank += heapKLL.getApproxRank(val);
+    //    return rank;
+    return heapKLL.getApproxRank(val);
+  }
+
+  public long findMaxValueWithRankLE(long K) {
+    long L = Long.MIN_VALUE, R = Long.MAX_VALUE, mid;
+    while (L < R) {
+      mid = L + ((R - L) >>> 1);
+      if (mid == L) mid++;
+      //      System.out.println(
+      //          "\t\t\t" + L + "\t" + R + "\t\t\tmid:" + mid + "\trank:" + getRankInKLL(mid));
+      if (getRankInKLL(mid) <= K) L = mid;
+      else R = mid - 1;
+      //      System.out.println("\t mid:"+mid+"  mid_rank:"+getRankInKLL(mid));
+    }
+    return L;
+  }
+
+  public long findMinValueWithRankGE(long K) {
+    long L = Long.MIN_VALUE, R = Long.MAX_VALUE, mid;
+    while (L < R) {
+      mid = L + ((R - L) >>> 1);
+      if (mid == R) mid--;
+      //      System.out.println(
+      //          "\t\t\t" + L + "\t" + R + "\t\t\tmid:" + mid + "\trank:" + getRankInKLL(mid));
+      if (getRankInKLL(mid) >= K) R = mid;
+      else L = mid + 1;
+    }
+    return L;
+  }
+
+  @Override
+  public void startIteration() {
+    heapN = 0;
+    if (iteration == 0) { // first iteration
+      heapKLL = new HeapLongKLLSketch(maxMemoryByte);
+      lastL = cntL = Long.MIN_VALUE;
+      cntR = Long.MAX_VALUE;
+      n = 0;
+      pageKLLNum = 0;
+      pageKLL = new ArrayList<>(pageKLLMaxSize);
+      for (int i = 0; i < pageKLLMaxSize; i++) pageKLL.add(null);
+      pageKLLIndex = 0;
+    } else {
+      heapKLL = new HeapLongKLLSketch(maxMemoryByte);
+      pageKLLNum = 0;
+      pageKLL = null;
+      System.out.println(
+          "\t[KLL STAT DEBUG] start iteration "
+              + iteration
+              + " cntL,R:"
+              + "["
+              + cntL
+              + ","
+              + cntR
+              + "]"
+              + "\tlastL:"
+              + lastL
+              + "\tK1:"
+              + K1);
+    }
+  }
+
+  @Override
+  public void finishIteration() {
+    if (CNT_PAGE_READ > 0) add_demand_page();
+    System.out.println(
+        "\t[KLL STAT DEBUG]"
+            + "finish iteration "
+            + iteration
+            + " cntL,R:"
+            + "["
+            + cntL
+            + ","
+            + cntR
+            + "]"
+            + "\tlastL:"
+            + lastL
+            + "\tK1:"
+            + K1);
+    System.out.println(
+        "\t[KLL STAT DEBUG]" + " statNum:" + pageKLLNum + " heapN:" + heapKLL.getN());
+    iteration++;
+    if (n == 0) {
+      hasFinalResult = true;
+      return;
+    }
+    lastL = cntL;
+
+    if (iteration == 1) { // first iteration over
+      K1 = (n + 1) >> 1;
+    }
+    long K2 = hasTwoMedians() ? (K1 + 1) : K1;
+
+    System.out.println("\t[KLL STAT DEBUG]" + " K1,K2:" + K1 + ", " + K2);
+    if (pageKLLNum == 0) { // all in heapKLL
+      System.out.println("\t[KLL STAT DEBUG]" + " calc by heap only. N:" + heapKLL.getN());
+      heapKLL.show();
+      //      System.out.println("\t[KLL STAT DEBUG] DEBUG:" + DEBUG);
+      if (heapKLL.exactResult()) {
+        long v1 = heapKLL.getExactResult((int) K1 - 1), v2 = heapKLL.getExactResult((int) K2 - 1);
+        double ans = 0.5 * (longToResult(v1) + longToResult(v2));
+        Collections.sort(PAGE_RATE);
+        if (PAGE_RATE.size() > 0) setDoubleValue(PAGE_RATE.get(PAGE_RATE.size() / 2));
+        else setDoubleValue(0.0);
+        hasFinalResult = true;
+      } else {
+        long err = approximateDataAvgError();
+        long minK = K1 - 1 - err * 2, maxK = K2 - 1 + err * 2;
+        System.out.println(
+            "\t[KLL STAT DEBUG] finding cntLR...  err:" + err + "minK,maxK:" + minK + "," + maxK);
+        cntL = findMinValueWithRankGE(minK);
+        cntR = findMaxValueWithRankLE(maxK);
+        if (cntL == cntR + 1) {
+          if (getRankInKLL(cntL) < K1) cntR = cntL;
+          else cntL = cntR;
+        }
+        //        System.out.println("\t[KLL STAT DEBUG] cntLR found.");
+        if (cntL == cntR) {
+          double ans = longToResult(cntL);
+          Collections.sort(PAGE_RATE);
+          if (PAGE_RATE.size() > 0) setDoubleValue(PAGE_RATE.get(PAGE_RATE.size() / 2));
+          else setDoubleValue(0.0);
+          hasFinalResult = true;
+        }
+      }
+      return;
+    }
+    // iteration = 0 && there are page KLL statistics
+    //    System.out.println("\t[KLL STAT DEBUG] remaining pageKLLSize:" + pageKLLIndex);
+    long err = (approximateDataAvgError() + approximateStatAvgError());
+    long minK = K1 - 1 - err * 3, maxK = K2 - 1 + err * 3;
+    mergePageKLL();
+    System.out.println(
+        "\t[KLL STAT DEBUG] finding cntLR...  minK,maxK:"
+            + minK
+            + ","
+            + maxK
+            + "\theapN:"
+            + heapKLL.getN());
+    heapKLL.show();
+    cntL = findMinValueWithRankGE(minK);
+    cntR = findMaxValueWithRankLE(maxK);
+    if (cntL == cntR + 1) {
+      if (getRankInKLL(cntL) < K1) cntR = cntL;
+      else cntL = cntR;
+    }
+    //    System.out.println("\t[KLL STAT DEBUG] cntLR found.");
+    System.out.println(
+        "\t[KLL STAT DEBUG] avg_err:" + err + "\tcntL:" + cntL + "\tcntR:" + cntR + "\n");
+    pageKLLNum = 0;
+
+    if (cntL == cntR) {
+      double ans = longToResult(cntL);
+      Collections.sort(PAGE_RATE);
+      if (PAGE_RATE.size() > 0) setDoubleValue(PAGE_RATE.get(PAGE_RATE.size() / 2));
+      else setDoubleValue(0.0);
+      hasFinalResult = true;
+    }
+  }
+
+  @Override
+  protected boolean hasCandidateResult() {
+    return hasFinalResult && n > 0;
+  }
+
+  @Override
+  public Double getResult() {
+    return hasCandidateResult() ? getDoubleValue() : null;
+  }
+
+  //  private void addSketch(KLLSketchForQuantile sketch, List<HeapLongKLLSketch> a, int baseByte) {
+  //    int pos0 = 0;
+  //    while (pos0 < pageKLLMaxLen && a.get(pos0) != null) pos0++;
+  //    HeapLongKLLSketch bigger_sketch = new HeapLongKLLSketch(baseByte << pos0);
+  //    bigger_sketch.mergeWithTempSpace(sketch);
+  //    for (int i = 0; i < pos0; i++) {
+  //      bigger_sketch.mergeWithTempSpace(a.get(i));
+  //      a.set(i, null);
+  //    }
+  //    if (pos0 == pageKLLMaxLen) { // mem of pageKLL list is too large.
+  //      heapKLL.mergeWithTempSpace(bigger_sketch);
+  //    } else a.set(pos0, bigger_sketch);
+  //  }
+  private void addSketch(KLLSketchForQuantile sketch) {
+    if (pageKLLIndex == pageKLLMaxSize) {
+      heapKLL.mergeWithTempSpace(pageKLL);
+      for (int i = 0; i < pageKLLMaxSize; i++) pageKLL.set(i, null);
+      pageKLLIndex = 0;
+    }
+    pageKLL.set(pageKLLIndex++, sketch);
+  }
+
+  private void mergePageKLL() {
+    HeapLongKLLSketch tmpSketch = heapKLL;
+    heapKLL = new HeapLongKLLSketch(maxMemoryByte * 2);
+    heapKLL.mergeWithTempSpace(tmpSketch);
+    heapKLL.mergeWithTempSpace(pageKLL);
+  }
+
+  @Override
+  public void updateResultFromStatistics(Statistics statistics) {
+    switch (statistics.getType()) {
+      case INT32:
+      case INT64:
+      case FLOAT:
+      case DOUBLE:
+        break;
+      case TEXT:
+      case BOOLEAN:
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format(
+                "Unsupported data type in aggregation MEDIAN : %s", statistics.getType()));
+    }
+    if (iteration == 0) {
+      n += statistics.getCount();
+      if (statistics.getType() == DOUBLE) {
+        DoubleStatistics stat = (DoubleStatistics) statistics;
+        if (stat.getKllSketchNum() > 0) {
+          //          System.out.println(
+          //              "\t[KLL STAT DEBUG] update from page stat:\t" + "n:" +
+          // stat.getKllSketch().getN());
+          //          addSketch(stat.getKllSketch(), pageKLL, pageKLLMemoryByte);
+          pageKLLNum += stat.getKllSketchNum();
+          for (LongKLLSketch sketch : stat.getKllSketchList()) addSketch(sketch);
+          //          heapKLL.mergeWithTempSpace(stat.getKllSketch());
+          return;
+        } else System.out.println("\t\t\t\t!!!!!![ERROR!] no KLL in stat!");
+      }
+    }
+    long minVal = dataToLong(statistics.getMinValue());
+    long maxVal = dataToLong(statistics.getMaxValue());
+    //    System.out.println(
+    //        "\t[KLL STAT DEBUG] update from statistics:\t"
+    //            + "min,max:"
+    //            + minVal
+    //            + ","
+    //            + maxVal
+    //            + " n:"
+    //            + statistics.getCount());
+    // out of range
+    if (minVal > cntR || maxVal < lastL) return;
+    if (lastL <= minVal && maxVal < cntL) {
+      K1 -= statistics.getCount();
+      return;
+    }
+    if (minVal == maxVal) { // min == max
+      for (int i = 0; i < statistics.getCount(); i++) updateStatusFromData(minVal);
+      return;
+    }
+  }
+
+  @Override
+  public void updateResultFromPageData(IBatchDataIterator batchIterator) {
+    updateResultFromPageData(batchIterator, Long.MIN_VALUE, Long.MAX_VALUE);
+  }
+
+  @Override
+  public void updateResultFromPageData(
+      IBatchDataIterator batchIterator, long minBound, long maxBound) {
+    while (batchIterator.hasNext()) {
+      if (batchIterator.currentTime() >= maxBound || batchIterator.currentTime() < minBound) {
+        break;
+      }
+      updateStatusFromData(batchIterator.currentValue());
+      batchIterator.next();
+    }
+  }
+
+  @Override
+  public void updateResultUsingTimestamps(
+      long[] timestamps, int length, IReaderByTimestamp dataReader) throws IOException {
+    Object[] values = dataReader.getValuesInTimestamps(timestamps, length);
+    for (int i = 0; i < length; i++) {
+      if (values[i] != null) {
+        updateStatusFromData(values[i]);
+      }
+    }
+  }
+
+  @Override
+  public void updateResultUsingValues(long[] timestamps, int length, ValueIterator valueIterator) {
+    //    List<Object> tmp = new ArrayList<>();
+    while (valueIterator.hasNext()) {
+      updateStatusFromData(valueIterator.next());
+      //      Object tmpObj = valueIterator.next();
+      //      updateStatusFromData(tmpObj, 1);
+      //      tmp.add(tmpObj);
+    }
+    //
+    // System.out.println("\t\t[MEDIAN]"+this.hashCode()+"[updateResultUsingValues]"+tmp.toString());
+  }
+
+  @Override
+  public int maxIteration() {
+    return 3;
+  }
+
+  @Override
+  public boolean hasFinalResult() {
+    return hasFinalResult;
+  }
+
+  @Override
+  public void merge(AggregateResult another) {
+    //    System.out.println("[DEBUG] [merge] " + this.getResult() + "  " + another.getResult());
+    // merge not supported
+    //        throw new QueryProcessException("Can't merge MedianAggregateResult");
+  }
+
+  @Override
+  protected void deserializeSpecificFields(ByteBuffer buffer) {
+    this.seriesDataType = TSDataType.deserialize(buffer.get());
+    // TODO
+  }
+
+  @Override
+  protected void serializeSpecificFields(OutputStream outputStream) throws IOException {
+    ReadWriteIOUtils.write(seriesDataType, outputStream);
+    // TODO
+  }
+
+  public long getN() {
+    return n;
+  }
+
+  @Override
+  public void reset() {
+    super.reset();
+    heapKLL = null;
+    lastL = cntL = Long.MIN_VALUE;
+    cntR = Long.MAX_VALUE;
+    n = 0;
+    iteration = 0;
+    hasFinalResult = false;
+    PAGE_FULL_READ = FULL_READ = 0;
+    FULL_READING_PAGE = false;
+    DEMAND_READ = 0;
+    CNT_PAGE_N = 0;
+    CNT_PAGE_READ = 0;
+    DEMAND_PAGE = 0;
+    PAGE_RATE = new ArrayList<>(2333);
+  }
+
+  @Override
+  public boolean canUpdateFromStatistics(Statistics statistics) {
+    if (CNT_PAGE_READ > 0) add_demand_page();
+    FULL_READING_PAGE = false;
+    CNT_PAGE_N = 0;
+    CNT_PAGE_READ = 0;
+    if ((seriesDataType == DOUBLE) && iteration == 0) {
+      DoubleStatistics doubleStats = (DoubleStatistics) statistics;
+      if (doubleStats.getKllSketchNum() > 0) return true;
+    }
+    if (iteration > 0) {
+      long minVal = dataToLong(statistics.getMinValue());
+      long maxVal = dataToLong(statistics.getMaxValue());
+      if (minVal > cntR || maxVal < lastL) return true;
+      if (lastL <= minVal && maxVal < cntL) return true;
+    }
+    Comparable<Object> minVal = (Comparable<Object>) statistics.getMinValue();
+    Comparable<Object> maxVal = (Comparable<Object>) statistics.getMaxValue();
+    if (minVal.compareTo(maxVal) == 0) return true; // min==max
+
+    if (statistics.getCount() <= STATISTICS_PAGE_MAXSIZE) {
+      CNT_PAGE_N = statistics.getCount();
+      CNT_PAGE_READ = 0;
+      //      PAGE_FULL_READ++;
+      FULL_READING_PAGE = true;
+    }
+    return false;
+  }
+
+  @Override
+  public boolean groupByLevelBeforeAggregation() {
+    return true;
+  }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLStatMedianAggrResult.java b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLStatMedianAggrResult.java
new file mode 100644
index 0000000000..8275defcef
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLStatMedianAggrResult.java
@@ -0,0 +1,540 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.query.aggregation.impl;
+
+import org.apache.iotdb.db.query.aggregation.AggregateResult;
+import org.apache.iotdb.db.query.aggregation.AggregationType;
+import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp;
+import org.apache.iotdb.db.utils.ValueIterator;
+import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.file.metadata.statistics.DoubleStatistics;
+import org.apache.iotdb.tsfile.file.metadata.statistics.Statistics;
+import org.apache.iotdb.tsfile.read.common.IBatchDataIterator;
+import org.apache.iotdb.tsfile.utils.HeapLongKLLSketch;
+import org.apache.iotdb.tsfile.utils.KLLSketchForQuantile;
+import org.apache.iotdb.tsfile.utils.LongKLLSketch;
+import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.apache.iotdb.tsfile.file.metadata.enums.TSDataType.DOUBLE;
+
+public class KLLStatMedianAggrResult extends AggregateResult {
+  private TSDataType seriesDataType;
+  private int iteration;
+  private long pageKLLNum, statNum;
+  private long cntL, cntR, lastL;
+  private long n, K1, heapN;
+  private HeapLongKLLSketch heapKLL;
+  private boolean hasFinalResult;
+  private List<KLLSketchForQuantile> pageKLL;
+  private int pageKLLIndex;
+  private long TOT_SKETCH_N = 0, TOT_SKETCH_SIZE = 0;
+  private int SKETCH_BYTE = -1;
+  private int pageKLLMaxIndex;
+  static final double memoryForMergeBuffer = 0.75;
+  long DEBUG = 0;
+
+  private int getBitsOfDataType() {
+    switch (seriesDataType) {
+      case INT32:
+      case FLOAT:
+        return 32;
+      case INT64:
+      case DOUBLE:
+        return 64;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private long approximateDataAvgError() {
+    long dataAvgError = (long) Math.ceil(2.0 * heapN / heapKLL.getMaxMemoryNum()) + 1;
+    return dataAvgError;
+  }
+
+  private long approximateStatAvgError() {
+    if (SKETCH_BYTE < 0) return 0;
+    double pageAvgError = 1.0 * TOT_SKETCH_N / TOT_SKETCH_SIZE / 3.0;
+    double rate = 1.0 * SKETCH_BYTE * pageKLLNum / (maxMemoryByte * (1 - memoryForMergeBuffer));
+    long pageStatAvgError;
+    if (rate < 1.0) { // similar to Random Sampling
+      pageStatAvgError = (long) Math.ceil(pageAvgError * Math.pow(pageKLLNum, 0.5));
+      if (pageKLLNum <= 10) pageStatAvgError += pageAvgError * 3.0;
+    } else {
+      int memKLLNum = (int) Math.round((maxMemoryByte * (1 - memoryForMergeBuffer)) / SKETCH_BYTE);
+      long memErr = (long) Math.ceil(pageAvgError * Math.pow(memKLLNum, 0.5));
+      pageStatAvgError = (long) Math.ceil(rate * 0.5 * memErr + 0.5 * memErr);
+    }
+    return pageStatAvgError;
+  }
+
+  private long approximateMaxError() {
+    return 0;
+  }
+
+  private boolean hasTwoMedians() {
+    return (n & 1) == 0;
+  }
+
+  public KLLStatMedianAggrResult(TSDataType seriesDataType) throws UnSupportedDataTypeException {
+    super(DOUBLE, AggregationType.EXACT_MEDIAN_KLL_STAT);
+    this.seriesDataType = seriesDataType;
+    reset();
+  }
+
+  private long dataToLong(Object data) throws UnSupportedDataTypeException {
+    long result;
+    switch (seriesDataType) {
+      case INT32:
+        return (int) data;
+      case FLOAT:
+        result = Float.floatToIntBits((float) data);
+        return (float) data >= 0f ? result : result ^ Long.MAX_VALUE;
+      case INT64:
+        return (long) data;
+      case DOUBLE:
+        result = Double.doubleToLongBits((double) data);
+        return (double) data >= 0d ? result : result ^ Long.MAX_VALUE;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private double longToResult(long result) throws UnSupportedDataTypeException {
+    switch (seriesDataType) {
+      case INT32:
+        return (double) (result);
+      case FLOAT:
+        result = (result >>> 31) == 0 ? result : result ^ Long.MAX_VALUE;
+        return Float.intBitsToFloat((int) (result));
+      case INT64:
+        return (double) (result);
+      case DOUBLE:
+        result = (result >>> 63) == 0 ? result : result ^ Long.MAX_VALUE;
+        return Double.longBitsToDouble(result);
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private void updateStatusFromData(Object data) {
+    long dataL = dataToLong(data);
+    if (iteration == 0) n++;
+    if (cntL <= dataL && dataL <= cntR) {
+      heapKLL.update(dataL);
+      heapN++;
+    } else if (lastL <= dataL && dataL < cntL) K1--;
+  }
+
+  private long getRankInKLL(long val) {
+    //    long rank = 0;
+    //    if (pageKLL != null) {
+    //      for (HeapLongKLLSketch heapLongKLLSketch : pageKLL)
+    //        if (heapLongKLLSketch != null) rank += heapLongKLLSketch.getApproxRank(val);
+    //    }
+    //    rank += heapKLL.getApproxRank(val);
+    //    return rank;
+    return heapKLL.getApproxRank(val);
+  }
+
+  public long findMaxValueWithRankLE(long K) {
+    long L = Long.MIN_VALUE, R = Long.MAX_VALUE, mid;
+    while (L < R) {
+      mid = L + ((R - L) >>> 1);
+      if (mid == L) mid++;
+      //      System.out.println(
+      //          "\t\t\t" + L + "\t" + R + "\t\t\tmid:" + mid + "\trank:" + getRankInKLL(mid));
+      if (getRankInKLL(mid) <= K) L = mid;
+      else R = mid - 1;
+      //      System.out.println("\t mid:"+mid+"  mid_rank:"+getRankInKLL(mid));
+    }
+    return L;
+  }
+
+  public long findMinValueWithRankGE(long K) {
+    long L = Long.MIN_VALUE, R = Long.MAX_VALUE, mid;
+    while (L < R) {
+      mid = L + ((R - L) >>> 1);
+      if (mid == R) mid--;
+      //      System.out.println(
+      //          "\t\t\t" + L + "\t" + R + "\t\t\tmid:" + mid + "\trank:" + getRankInKLL(mid));
+      if (getRankInKLL(mid) >= K) R = mid;
+      else L = mid + 1;
+    }
+    return L;
+  }
+
+  @Override
+  public void startIteration() {
+    heapN = statNum = 0;
+    if (iteration == 0) { // first iteration
+      heapKLL = new HeapLongKLLSketch(maxMemoryByte / 2);
+      lastL = cntL = Long.MIN_VALUE;
+      cntR = Long.MAX_VALUE;
+      n = 0;
+      pageKLLNum = 0;
+      pageKLLIndex = 0;
+    } else {
+      heapKLL = new HeapLongKLLSketch(maxMemoryByte);
+      pageKLLNum = 0;
+      pageKLL = null;
+      System.out.println(
+          "\t[KLL STAT DEBUG] start iteration "
+              + iteration
+              + " cntL,R:"
+              + "["
+              + cntL
+              + ","
+              + cntR
+              + "]"
+              + "\tlastL:"
+              + lastL
+              + "\tK1:"
+              + K1);
+    }
+  }
+
+  @Override
+  public void finishIteration() {
+    System.out.println(
+        "\t[KLL STAT DEBUG]"
+            + "finish iteration "
+            + iteration
+            + " cntL,R:"
+            + "["
+            + cntL
+            + ","
+            + cntR
+            + "]"
+            + "\tlastL:"
+            + lastL
+            + "\tK1:"
+            + K1);
+    System.out.println(
+        "\t[KLL STAT Single DEBUG]"
+            + " statNum:"
+            + statNum
+            + " pageKllNum:"
+            + pageKLLNum
+            + " heapN:"
+            + heapN);
+    iteration++;
+    if (n == 0) {
+      hasFinalResult = true;
+      return;
+    }
+    lastL = cntL;
+
+    if (iteration == 1) { // first iteration over
+      K1 = (n + 1) >> 1;
+    }
+    long K2 = hasTwoMedians() ? (K1 + 1) : K1;
+
+    System.out.println("\t[KLL STAT DEBUG]" + " K1,K2:" + K1 + ", " + K2);
+    if (pageKLLNum == 0) { // all in heapKLL
+      System.out.println("\t[KLL STAT DEBUG]" + " calc by heap only. N:" + heapKLL.getN());
+      heapKLL.show();
+      //      System.out.println("\t[KLL STAT DEBUG] DEBUG:" + DEBUG);
+      if (heapKLL.exactResult()) {
+        long v1 = heapKLL.getExactResult((int) K1 - 1), v2 = heapKLL.getExactResult((int) K2 - 1);
+        double ans = 0.5 * (longToResult(v1) + longToResult(v2));
+        setDoubleValue(ans);
+        hasFinalResult = true;
+      } else {
+        long err = approximateDataAvgError();
+        long minK = K1 - 1 - err * 3, maxK = K2 - 1 + err * 3;
+        System.out.println(
+            "\t[KLL STAT DEBUG] finding cntLR...  err:" + err + "minK,maxK:" + minK + "," + maxK);
+        cntL = findMinValueWithRankGE(minK);
+        cntR = findMaxValueWithRankLE(maxK);
+        if (cntL == cntR + 1) {
+          if (getRankInKLL(cntL) < K1) cntR = cntL;
+          else cntL = cntR;
+        }
+        System.out.println(
+            "\t[KLL STAT DEBUG] cntLR found."
+                + "\t"
+                + longToResult(cntL)
+                + "\t"
+                + longToResult(cntR));
+        if (cntL == cntR) {
+          double ans = longToResult(cntL);
+          setDoubleValue(ans);
+          hasFinalResult = true;
+        }
+      }
+      return;
+    }
+    // iteration = 0 && there are page KLL statistics
+    //    System.out.println("\t[KLL STAT DEBUG] remaining pageKLLSize:" + pageKLLIndex);
+    mergePageKLL();
+    long err = (approximateDataAvgError() + approximateStatAvgError());
+    long minK = K1 - 1 - err * 3, maxK = K2 - 1 + err * 3;
+    System.out.println(
+        "\t[KLL STAT DEBUG] finding cntLR...  minK,maxK:"
+            + minK
+            + ","
+            + maxK
+            + "\theapN:"
+            + heapKLL.getN());
+    heapKLL.show();
+    cntL = findMinValueWithRankGE(minK);
+    cntR = findMaxValueWithRankLE(maxK);
+    if (cntL == cntR + 1) {
+      if (getRankInKLL(cntL) < K1) cntR = cntL;
+      else cntL = cntR;
+    }
+    //    System.out.println("\t[KLL STAT DEBUG] cntLR found.");
+    System.out.println(
+        "\t[KLL STAT DEBUG] avg_err:" + err + "\tcntL:" + cntL + "\tcntR:" + cntR + "\n");
+    pageKLLNum = 0;
+
+    if (cntL == cntR) {
+      double ans = longToResult(cntL);
+      setDoubleValue(ans);
+      hasFinalResult = true;
+    }
+  }
+
+  @Override
+  protected boolean hasCandidateResult() {
+    return hasFinalResult && n > 0;
+  }
+
+  @Override
+  public Double getResult() {
+    return hasCandidateResult() ? getDoubleValue() : null;
+  }
+
+  //  private void addSketch(KLLSketchForQuantile sketch, List<HeapLongKLLSketch> a, int baseByte) {
+  //    int pos0 = 0;
+  //    while (pos0 < pageKLLMaxLen && a.get(pos0) != null) pos0++;
+  //    HeapLongKLLSketch bigger_sketch = new HeapLongKLLSketch(baseByte << pos0);
+  //    bigger_sketch.mergeWithTempSpace(sketch);
+  //    for (int i = 0; i < pos0; i++) {
+  //      bigger_sketch.mergeWithTempSpace(a.get(i));
+  //      a.set(i, null);
+  //    }
+  //    if (pos0 == pageKLLMaxLen) { // mem of pageKLL list is too large.
+  //      heapKLL.mergeWithTempSpace(bigger_sketch);
+  //    } else a.set(pos0, bigger_sketch);
+  //  }
+  private void addSketch(KLLSketchForQuantile sketch) {
+    TOT_SKETCH_N += sketch.getN();
+    TOT_SKETCH_SIZE += sketch.getNumLen();
+    if (SKETCH_BYTE < 0) {
+      SKETCH_BYTE = sketch.getNumLen() * 8;
+      pageKLLMaxIndex = (int) Math.floor((1.0 * maxMemoryByte / 2 / SKETCH_BYTE));
+      pageKLL = new ArrayList<>(pageKLLMaxIndex);
+      for (int i = 0; i < pageKLLMaxIndex; i++) pageKLL.add(null);
+    }
+    if (pageKLLIndex == pageKLLMaxIndex) {
+      heapKLL.mergeWithTempSpace(pageKLL);
+      for (int i = 0; i < pageKLLMaxIndex; i++) pageKLL.set(i, null);
+      pageKLLIndex = 0;
+    }
+    pageKLL.set(pageKLLIndex++, sketch);
+  }
+
+  private void mergePageKLL() {
+    HeapLongKLLSketch tmpSketch = heapKLL;
+    heapKLL = new HeapLongKLLSketch(maxMemoryByte);
+    heapKLL.mergeWithTempSpace(tmpSketch);
+    heapKLL.mergeWithTempSpace(pageKLL);
+  }
+
+  @Override
+  public void updateResultFromStatistics(Statistics statistics) {
+    switch (statistics.getType()) {
+      case INT32:
+      case INT64:
+      case FLOAT:
+      case DOUBLE:
+        break;
+      case TEXT:
+      case BOOLEAN:
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format(
+                "Unsupported data type in aggregation MEDIAN : %s", statistics.getType()));
+    }
+    if (iteration == 0) {
+      n += statistics.getCount();
+      if (statistics.getType() == DOUBLE) {
+        DoubleStatistics stat = (DoubleStatistics) statistics;
+        if (stat.getKllSketchNum() > 0) {
+          //          System.out.println(
+          //              "\t[KLL STAT DEBUG] update from page stat:\t" + "n:" +
+          // stat.getKllSketch().getN());
+          //          addSketch(stat.getKllSketch(), pageKLL, pageKLLMemoryByte);
+          pageKLLNum += stat.getKllSketchNum();
+          statNum += 1;
+          //          if(stat.getBfNum()>1)
+          //            System.out.println("\t\tFK\tstat:" +
+          //                stat.getStartTime() + "..." + stat.getEndTime());
+          for (LongKLLSketch sketch : stat.getKllSketchList()) addSketch(sketch);
+          //          heapKLL.mergeWithTempSpace(stat.getKllSketch());
+          return;
+        } else System.out.println("\t\t\t\t!!!!!![ERROR!] no KLL in stat!");
+      }
+    }
+    long minVal = dataToLong(statistics.getMinValue());
+    long maxVal = dataToLong(statistics.getMaxValue());
+    //    System.out.println(
+    //        "\t[KLL STAT DEBUG] update from statistics:\t"
+    //            + "min,max:"
+    //            + minVal
+    //            + ","
+    //            + maxVal
+    //            + " n:"
+    //            + statistics.getCount());
+    // out of range
+    if (minVal > cntR || maxVal < lastL) return;
+    if (lastL <= minVal && maxVal < cntL) {
+      K1 -= statistics.getCount();
+      return;
+    }
+    if (minVal == maxVal) { // min == max
+      for (int i = 0; i < statistics.getCount(); i++) updateStatusFromData(minVal);
+      return;
+    }
+  }
+
+  @Override
+  public void updateResultFromPageData(IBatchDataIterator batchIterator) {
+    updateResultFromPageData(batchIterator, Long.MIN_VALUE, Long.MAX_VALUE);
+  }
+
+  @Override
+  public void updateResultFromPageData(
+      IBatchDataIterator batchIterator, long minBound, long maxBound) {
+    while (batchIterator.hasNext()) {
+      if (batchIterator.currentTime() >= maxBound || batchIterator.currentTime() < minBound) {
+        break;
+      }
+      updateStatusFromData(batchIterator.currentValue());
+      batchIterator.next();
+    }
+  }
+
+  @Override
+  public void updateResultUsingTimestamps(
+      long[] timestamps, int length, IReaderByTimestamp dataReader) throws IOException {
+    Object[] values = dataReader.getValuesInTimestamps(timestamps, length);
+    for (int i = 0; i < length; i++) {
+      if (values[i] != null) {
+        updateStatusFromData(values[i]);
+      }
+    }
+  }
+
+  @Override
+  public void updateResultUsingValues(long[] timestamps, int length, ValueIterator valueIterator) {
+    //    List<Object> tmp = new ArrayList<>();
+    while (valueIterator.hasNext()) {
+      updateStatusFromData(valueIterator.next());
+      //      Object tmpObj = valueIterator.next();
+      //      updateStatusFromData(tmpObj, 1);
+      //      tmp.add(tmpObj);
+    }
+    //
+    // System.out.println("\t\t[MEDIAN]"+this.hashCode()+"[updateResultUsingValues]"+tmp.toString());
+  }
+
+  @Override
+  public int maxIteration() {
+    return 3;
+  }
+
+  @Override
+  public boolean hasFinalResult() {
+    return hasFinalResult;
+  }
+
+  @Override
+  public void merge(AggregateResult another) {
+    //    System.out.println("[DEBUG] [merge] " + this.getResult() + "  " + another.getResult());
+    // merge not supported
+    //        throw new QueryProcessException("Can't merge MedianAggregateResult");
+  }
+
+  @Override
+  protected void deserializeSpecificFields(ByteBuffer buffer) {
+    this.seriesDataType = TSDataType.deserialize(buffer.get());
+    // TODO
+  }
+
+  @Override
+  protected void serializeSpecificFields(OutputStream outputStream) throws IOException {
+    ReadWriteIOUtils.write(seriesDataType, outputStream);
+    // TODO
+  }
+
+  public long getN() {
+    return n;
+  }
+
+  @Override
+  public void reset() {
+    super.reset();
+    heapKLL = null;
+    lastL = cntL = Long.MIN_VALUE;
+    cntR = Long.MAX_VALUE;
+    n = 0;
+    iteration = 0;
+    hasFinalResult = false;
+  }
+
+  @Override
+  public boolean canUpdateFromStatistics(Statistics statistics) {
+    if ((seriesDataType == DOUBLE) && iteration == 0) {
+      DoubleStatistics doubleStats = (DoubleStatistics) statistics;
+      if (doubleStats.getKllSketchNum() > 0) return true;
+    }
+    if (iteration > 0) {
+      long minVal = dataToLong(statistics.getMinValue());
+      long maxVal = dataToLong(statistics.getMaxValue());
+      if (minVal > cntR || maxVal < lastL) return true;
+      if (lastL <= minVal && maxVal < cntL) return true;
+    }
+    Comparable<Object> minVal = (Comparable<Object>) statistics.getMinValue();
+    Comparable<Object> maxVal = (Comparable<Object>) statistics.getMaxValue();
+    return (minVal.compareTo(maxVal) == 0); // min==max
+  }
+
+  @Override
+  public boolean groupByLevelBeforeAggregation() {
+    return true;
+  }
+
+  @Override
+  public boolean useStatisticsIfPossible() {
+    return iteration == 0;
+  }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLStatOverlapSingleAggrResult.java b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLStatOverlapSingleAggrResult.java
new file mode 100644
index 0000000000..c233815629
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLStatOverlapSingleAggrResult.java
@@ -0,0 +1,558 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.query.aggregation.impl;
+
+import org.apache.iotdb.db.query.aggregation.AggregateResult;
+import org.apache.iotdb.db.query.aggregation.AggregationType;
+import org.apache.iotdb.db.query.reader.series.IAggregateReader;
+import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp;
+import org.apache.iotdb.db.query.reader.series.SeriesAggregateReader;
+import org.apache.iotdb.db.utils.ValueIterator;
+import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.file.metadata.statistics.DoubleStatistics;
+import org.apache.iotdb.tsfile.file.metadata.statistics.Statistics;
+import org.apache.iotdb.tsfile.read.common.IBatchDataIterator;
+import org.apache.iotdb.tsfile.read.reader.IPointReader;
+import org.apache.iotdb.tsfile.utils.HeapLongKLLSketch;
+import org.apache.iotdb.tsfile.utils.KLLSketchForQuantile;
+import org.apache.iotdb.tsfile.utils.LongKLLSketch;
+import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
+
+import org.eclipse.collections.api.iterator.MutableLongIterator;
+import org.eclipse.collections.api.map.MutableMap;
+import org.eclipse.collections.impl.list.mutable.primitive.LongArrayList;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.apache.iotdb.tsfile.file.metadata.enums.TSDataType.DOUBLE;
+import static org.apache.iotdb.tsfile.file.metadata.statistics.Statistics.SYNOPSIS_SIZE_IN_BYTE;
+
+public class KLLStatOverlapSingleAggrResult extends AggregateResult {
+  private TSDataType seriesDataType;
+  private int iteration;
+  private long pageKLLNum;
+  private long cntL, cntR, lastL;
+  private long n, K1, heapN;
+  private HeapLongKLLSketch heapKLL, heapDELKLL;
+  private boolean hasFinalResult;
+  private final int maxMemoryByte = 1 << 19; // half of the limit
+  private final int pageAvgError = 50, pageMaxError = 127;
+  private final int pageKLLMemoryByte = (68 + 15) * 8, pageKLLNumMemoryByte = SYNOPSIS_SIZE_IN_BYTE;
+  //  private List<HeapLongKLLSketch> pageKLL;
+  private List<KLLSketchForQuantile> pageKLL;
+  private int pageKLLIndex;
+  private final int pageKLLMaxSize = (int) Math.floor((1.0 * maxMemoryByte / pageKLLMemoryByte));
+  long DEBUG = 0;
+
+  private int getBitsOfDataType() {
+    switch (seriesDataType) {
+      case INT32:
+      case FLOAT:
+        return 32;
+      case INT64:
+      case DOUBLE:
+        return 64;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private long approximateDataAvgError() {
+    long dataAvgError = (long) Math.ceil(2.0 * heapN / heapKLL.getMaxMemoryNum()) + 1;
+    return dataAvgError;
+  }
+
+  private long approximateStatAvgError() {
+    double rate = 1.0 * pageKLLNumMemoryByte * pageKLLNum / maxMemoryByte;
+    long pageStatAvgError;
+    if (rate < 1.0) {
+      pageStatAvgError = (long) Math.ceil(pageAvgError * Math.pow(pageKLLNum, 0.5));
+      if (pageKLLNum <= 10) pageStatAvgError += pageMaxError;
+    } else {
+      int memKLLNum = maxMemoryByte / pageKLLNumMemoryByte;
+      long memErr = (long) Math.ceil(pageAvgError * Math.pow(memKLLNum, 0.5));
+      pageStatAvgError = (long) Math.ceil(rate * 0.5 * memErr + 0.5 * memErr);
+    }
+    return pageStatAvgError;
+  }
+
+  private long approximateMaxError() {
+    return 0;
+  }
+
+  private boolean hasTwoMedians() {
+    return (n & 1) == 0;
+  }
+
+  public KLLStatOverlapSingleAggrResult(TSDataType seriesDataType)
+      throws UnSupportedDataTypeException {
+    super(DOUBLE, AggregationType.EXACT_MEDIAN_KLL_STAT_OVERLAP_SINGLE);
+    this.seriesDataType = seriesDataType;
+    reset();
+  }
+
+  private long dataToLong(Object data) throws UnSupportedDataTypeException {
+    long result;
+    switch (seriesDataType) {
+      case INT32:
+        return (int) data;
+      case FLOAT:
+        result = Float.floatToIntBits((float) data);
+        return (float) data >= 0f ? result : result ^ Long.MAX_VALUE;
+      case INT64:
+        return (long) data;
+      case DOUBLE:
+        result = Double.doubleToLongBits((double) data);
+        return (double) data >= 0d ? result : result ^ Long.MAX_VALUE;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private double longToResult(long result) throws UnSupportedDataTypeException {
+    switch (seriesDataType) {
+      case INT32:
+        return (double) (result);
+      case FLOAT:
+        result = (result >>> 31) == 0 ? result : result ^ Long.MAX_VALUE;
+        return Float.intBitsToFloat((int) (result));
+      case INT64:
+        return (double) (result);
+      case DOUBLE:
+        result = (result >>> 63) == 0 ? result : result ^ Long.MAX_VALUE;
+        return Double.longBitsToDouble(result);
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private void updateStatusFromLong(Long dataL) {
+    if (iteration == 0) n++;
+    if (cntL <= dataL && dataL <= cntR) {
+      heapKLL.update(dataL);
+      heapN++;
+    } else if (lastL <= dataL && dataL < cntL) K1--;
+  }
+
+  private void updateStatusFromData(Object data) {
+    long dataL = dataToLong(data);
+    if (iteration == 0) n++;
+    if (cntL <= dataL && dataL <= cntR) {
+      heapKLL.update(dataL);
+      heapN++;
+    } else if (lastL <= dataL && dataL < cntL) K1--;
+  }
+
+  private long getRankInKLL(long val) {
+    //    long rank = 0;
+    //    if (pageKLL != null) {
+    //      for (HeapLongKLLSketch heapLongKLLSketch : pageKLL)
+    //        if (heapLongKLLSketch != null) rank += heapLongKLLSketch.getApproxRank(val);
+    //    }
+    //    rank += heapKLL.getApproxRank(val);
+    //    return rank;
+    return heapDELKLL != null
+        ? (heapKLL.getApproxRank(val) - heapDELKLL.getApproxRank(val))
+        : heapKLL.getApproxRank(val);
+  }
+
+  public long findMaxValueWithRankLE(long K) {
+    long L = Long.MIN_VALUE, R = Long.MAX_VALUE, mid;
+    while (L < R) {
+      mid = L + ((R - L) >>> 1);
+      if (mid == L) mid++;
+      //      System.out.println(
+      //          "\t\t\t" + L + "\t" + R + "\t\t\tmid:" + mid + "\trank:" + getRankInKLL(mid));
+      if (getRankInKLL(mid) <= K) L = mid;
+      else R = mid - 1;
+      //      System.out.println("\t mid:"+mid+"  mid_rank:"+getRankInKLL(mid));
+    }
+    return L;
+  }
+
+  public long findMinValueWithRankGE(long K) {
+    long L = Long.MIN_VALUE, R = Long.MAX_VALUE, mid;
+    while (L < R) {
+      mid = L + ((R - L) >>> 1);
+      if (mid == R) mid--;
+      //      System.out.println(
+      //          "\t\t\t" + L + "\t" + R + "\t\t\tmid:" + mid + "\trank:" + getRankInKLL(mid));
+      if (getRankInKLL(mid) >= K) R = mid;
+      else L = mid + 1;
+    }
+    return L;
+  }
+
+  @Override
+  public void startIteration() {
+    heapN = 0;
+    if (iteration == 0) { // first iteration
+      heapKLL = new HeapLongKLLSketch(maxMemoryByte * 4 / 5);
+      heapDELKLL = new HeapLongKLLSketch(maxMemoryByte / 5);
+      lastL = cntL = Long.MIN_VALUE;
+      cntR = Long.MAX_VALUE;
+      n = 0;
+      pageKLLNum = 0;
+      pageKLL = new ArrayList<>(pageKLLMaxSize);
+      for (int i = 0; i < pageKLLMaxSize; i++) pageKLL.add(null);
+      pageKLLIndex = 0;
+    } else {
+      heapKLL = new HeapLongKLLSketch(maxMemoryByte);
+      pageKLLNum = 0;
+      pageKLL = null;
+      System.out.println(
+          "\t[KLL STAT DEBUG] start iteration "
+              + iteration
+              + " cntL,R:"
+              + "["
+              + cntL
+              + ","
+              + cntR
+              + "]"
+              + "\tlastL:"
+              + lastL
+              + "\tK1:"
+              + K1);
+    }
+  }
+
+  @Override
+  public void finishIteration() {
+    System.out.println(
+        "\t[KLL STAT Single DEBUG]"
+            + "finish iteration "
+            + iteration
+            + " cntL,R:"
+            + "["
+            + cntL
+            + ","
+            + cntR
+            + "]"
+            + "\tlastL:"
+            + lastL
+            + "\tK1:"
+            + K1);
+    System.out.println(
+        "\t[KLL STAT Single DEBUG]" + " statNum:" + pageKLLNum + " heapN:" + heapKLL.getN());
+    iteration++;
+    if (n == 0) {
+      hasFinalResult = true;
+      return;
+    }
+    lastL = cntL;
+
+    if (iteration == 1) { // first iteration over
+      K1 = (n + 1) >> 1;
+    }
+    long K2 = hasTwoMedians() ? (K1 + 1) : K1;
+
+    System.out.println("\t[KLL STAT Single DEBUG]" + " K1,K2:" + K1 + ", " + K2);
+    if (pageKLLNum == 0) { // all in heapKLL
+      System.out.println("\t[KLL STAT Single DEBUG]" + " calc by heap only. N:" + heapKLL.getN());
+      heapKLL.show();
+
+      double v1 = longToResult(findMinValueWithRankGE(K1 - 1));
+      //      System.out.println("\t[KLL STAT DEBUG]" + "v1:" + v1);
+      double v2 = longToResult(findMinValueWithRankGE(K2 - 1));
+      double ans = 0.5 * (v1 + v2);
+      setDoubleValue(ans);
+      hasFinalResult = true;
+      return;
+    }
+    // iteration = 0 && there are page KLL statistics
+    //    heapKLL.show();
+    //    System.out.println("\t[KLL STAT DEBUG] remaining pageKLLSize:" + pageKLLIndex);
+    mergePageKLL();
+    heapKLL.show();
+    System.out.println(
+        "\t[KLL STAT Single DEBUG] after merge. heapN:"
+            + heapKLL.getN()
+            + "\theapdelN:"
+            + heapDELKLL.getN()
+            + "\tn_true:"
+            + n);
+    double v1 = longToResult(findMinValueWithRankGE(K1 - 1));
+    double v2 = longToResult(findMinValueWithRankGE(K2 - 1));
+    double ans = 0.5 * (v1 + v2);
+    setDoubleValue(ans);
+    hasFinalResult = true;
+  }
+
+  @Override
+  protected boolean hasCandidateResult() {
+    return hasFinalResult && n > 0;
+  }
+
+  @Override
+  public Double getResult() {
+    return hasCandidateResult() ? getDoubleValue() : null;
+  }
+
+  //  private void addSketch(KLLSketchForQuantile sketch, List<HeapLongKLLSketch> a, int baseByte) {
+  //    int pos0 = 0;
+  //    while (pos0 < pageKLLMaxLen && a.get(pos0) != null) pos0++;
+  //    HeapLongKLLSketch bigger_sketch = new HeapLongKLLSketch(baseByte << pos0);
+  //    bigger_sketch.mergeWithTempSpace(sketch);
+  //    for (int i = 0; i < pos0; i++) {
+  //      bigger_sketch.mergeWithTempSpace(a.get(i));
+  //      a.set(i, null);
+  //    }
+  //    if (pos0 == pageKLLMaxLen) { // mem of pageKLL list is too large.
+  //      heapKLL.mergeWithTempSpace(bigger_sketch);
+  //    } else a.set(pos0, bigger_sketch);
+  //  }
+  private void addSketch(KLLSketchForQuantile sketch) {
+    if (pageKLLIndex < pageKLLMaxSize) pageKLL.set(pageKLLIndex++, sketch);
+    else {
+      heapKLL.mergeWithTempSpace(pageKLL);
+      for (int i = 0; i < pageKLLMaxSize; i++) pageKLL.set(i, null);
+      pageKLLIndex = 0;
+      pageKLL.set(pageKLLIndex++, sketch);
+      //      System.out.println(
+      //          "\t[KLL STAT DEBUG]\theapKLL merge pageKLLList. newN: "
+      //              + heapKLL.getN()
+      //              + "   n_true:"
+      //              + n);
+      //      heapKLL.show();
+    }
+  }
+
+  private void mergePageKLL() {
+    HeapLongKLLSketch tmpSketch = heapKLL;
+    heapKLL = new HeapLongKLLSketch(maxMemoryByte * 2);
+    heapKLL.mergeWithTempSpace(tmpSketch);
+    heapKLL.mergeWithTempSpace(pageKLL);
+  }
+
+  @Override
+  public void updateResultFromStatistics(Statistics statistics) {
+    switch (statistics.getType()) {
+      case INT32:
+      case INT64:
+      case FLOAT:
+      case DOUBLE:
+        break;
+      case TEXT:
+      case BOOLEAN:
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format(
+                "Unsupported data type in aggregation MEDIAN : %s", statistics.getType()));
+    }
+    if (iteration == 0) {
+      n += statistics.getCount();
+      if (statistics.getType() == DOUBLE) {
+        DoubleStatistics stat = (DoubleStatistics) statistics;
+        if (stat.getKllSketchNum() > 0) {
+          pageKLLNum += stat.getKllSketchNum();
+          for (LongKLLSketch sketch : stat.getKllSketchList()) addSketch(sketch);
+          //          System.out.println(
+          //              "\t[KLL STAT Single DEBUG] updateResultFromStatistics. pageN:"
+          //                  + stat.getKllSketch().getN());
+          //          stat.getKllSketch().show();
+          //          return;
+        } else System.out.println("\t\t\t\t!!!!!![ERROR!] no KLL in stat!");
+      }
+    }
+    long minVal = dataToLong(statistics.getMinValue());
+    long maxVal = dataToLong(statistics.getMaxValue());
+    //    System.out.println(
+    //        "\t[KLL STAT DEBUG] no KLL in stat. update from statistics:\t"
+    //            + "min,max:"
+    //            + minVal
+    //            + ","
+    //            + maxVal
+    //            + " n:"
+    //            + statistics.getCount());
+    // out of range
+    if (minVal > cntR || maxVal < lastL) return;
+    if (lastL <= minVal && maxVal < cntL) {
+      K1 -= statistics.getCount();
+      return;
+    }
+    if (minVal == maxVal) { // min == max
+      for (int i = 0; i < statistics.getCount(); i++) updateStatusFromData(minVal);
+      return;
+    }
+  }
+
+  @Override
+  public void updateResultFromPageData(IBatchDataIterator batchIterator) {
+    updateResultFromPageData(batchIterator, Long.MIN_VALUE, Long.MAX_VALUE);
+  }
+
+  @Override
+  public void updateResultFromPageData(
+      IBatchDataIterator batchIterator, long minBound, long maxBound) {
+    //    System.out.print("\t[KLL STAT DEBUG]\tupdateResultFromPageData:");
+    //    int tmp_tot = 0;
+    while (batchIterator.hasNext()) {
+      if (batchIterator.currentTime() >= maxBound || batchIterator.currentTime() < minBound) {
+        break;
+      }
+      //      System.out.print(
+      //          " (" + batchIterator.currentTime() + "," + batchIterator.currentValue() + ")");
+      //      tmp_tot++;
+      updateStatusFromData(batchIterator.currentValue());
+      batchIterator.next();
+    }
+    //    System.out.println(" tot:" + tmp_tot);
+  }
+
+  @Override
+  public void updateResultUsingTimestamps(
+      long[] timestamps, int length, IReaderByTimestamp dataReader) throws IOException {
+    //    System.out.print("\t[KLL STAT DEBUG]\tupdateResultUsingTimestamps:");
+    //    int tmp_tot = 0;
+    Object[] values = dataReader.getValuesInTimestamps(timestamps, length);
+    for (int i = 0; i < length; i++) {
+      if (values[i] != null) {
+        updateStatusFromData(values[i]);
+        //        tmp_tot++;
+      }
+    }
+    //    System.out.println(" tot:" + tmp_tot);
+  }
+
+  @Override
+  public void updateResultUsingValues(long[] timestamps, int length, ValueIterator valueIterator) {
+    //    List<Object> tmp = new ArrayList<>();
+    while (valueIterator.hasNext()) {
+      updateStatusFromData(valueIterator.next());
+      //      Object tmpObj = valueIterator.next();
+      //      updateStatusFromData(tmpObj, 1);
+      //      tmp.add(tmpObj);
+    }
+    //
+    // System.out.println("\t\t[MEDIAN]"+this.hashCode()+"[updateResultUsingValues]"+tmp.toString());
+  }
+
+  @Override
+  public int maxIteration() {
+    return 1;
+  }
+
+  @Override
+  public boolean hasFinalResult() {
+    return hasFinalResult;
+  }
+
+  @Override
+  public void merge(AggregateResult another) {
+    //    System.out.println("[DEBUG] [merge] " + this.getResult() + "  " + another.getResult());
+    // merge not supported
+    //        throw new QueryProcessException("Can't merge MedianAggregateResult");
+  }
+
+  @Override
+  protected void deserializeSpecificFields(ByteBuffer buffer) {
+    this.seriesDataType = TSDataType.deserialize(buffer.get());
+    // TODO
+  }
+
+  @Override
+  protected void serializeSpecificFields(OutputStream outputStream) throws IOException {
+    ReadWriteIOUtils.write(seriesDataType, outputStream);
+    // TODO
+  }
+
+  public long getN() {
+    return n;
+  }
+
+  @Override
+  public void reset() {
+    super.reset();
+    heapKLL = null;
+    heapDELKLL = null;
+    lastL = cntL = Long.MIN_VALUE;
+    cntR = Long.MAX_VALUE;
+    n = 0;
+    iteration = 0;
+    hasFinalResult = false;
+  }
+
+  @Override
+  public boolean canUpdateFromStatistics(Statistics statistics) {
+    if ((seriesDataType == DOUBLE) && iteration == 0) {
+      DoubleStatistics doubleStats = (DoubleStatistics) statistics;
+      if (doubleStats.getKllSketchNum() > 0) return true;
+    }
+    if (iteration > 0) {
+      long minVal = dataToLong(statistics.getMinValue());
+      long maxVal = dataToLong(statistics.getMaxValue());
+      if (minVal > cntR || maxVal < lastL) return true;
+      if (lastL <= minVal && maxVal < cntL) return true;
+    }
+    Comparable<Object> minVal = (Comparable<Object>) statistics.getMinValue();
+    Comparable<Object> maxVal = (Comparable<Object>) statistics.getMaxValue();
+    return (minVal.compareTo(maxVal) == 0); // min==max
+  }
+
+  @Override
+  public boolean groupByLevelBeforeAggregation() {
+    return true;
+  }
+
+  @Override
+  public boolean useOverlapStat() {
+    return iteration == 0;
+  }
+
+  @Override
+  public void updateResultFromOverlap(IAggregateReader reader) {
+    MutableMap<IPointReader, Statistics> pageStat = ((SeriesAggregateReader) reader).getPageStat();
+    MutableMap<IPointReader, LongArrayList[]> pageData =
+        ((SeriesAggregateReader) reader).getPageData();
+    //    System.out.println("\t[KLL STAT DEBUG]\tupdateResultFromOverlap\t");
+    for (IPointReader pageReader : pageStat.keySet()) {
+      Statistics stat = pageStat.get(pageReader);
+      //      System.out.println(
+      //          "\t[KLL STAT DEBUG]\tupdateResultFromOverlap\t\treaderHashCode:"
+      //              + pageReader.hashCode()
+      //              + "pageN:"
+      //              + stat.getCount());
+      LongArrayList validData = pageData.get(pageReader)[0];
+      LongArrayList invalidData = pageData.get(pageReader)[1];
+      if (!canUpdateFromStatistics(stat)
+          || validData.size() + invalidData.size() < stat.getCount()
+          || invalidData.size() > 0.5 * stat.getCount()) {
+        for (MutableLongIterator itr = validData.longIterator(); itr.hasNext(); )
+          updateStatusFromLong(itr.next());
+      } else {
+        updateResultFromStatistics(stat);
+        for (MutableLongIterator itr = invalidData.longIterator(); itr.hasNext(); ) {
+          heapDELKLL.update(itr.next());
+          n--;
+        }
+      }
+    }
+    pageStat.clear();
+    pageData.clear();
+  }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLStatSingleAggrResult.java b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLStatSingleAggrResult.java
new file mode 100644
index 0000000000..98e7211912
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLStatSingleAggrResult.java
@@ -0,0 +1,510 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.query.aggregation.impl;
+
+import org.apache.iotdb.db.query.aggregation.AggregateResult;
+import org.apache.iotdb.db.query.aggregation.AggregationType;
+import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp;
+import org.apache.iotdb.db.utils.ValueIterator;
+import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.file.metadata.statistics.DoubleStatistics;
+import org.apache.iotdb.tsfile.file.metadata.statistics.Statistics;
+import org.apache.iotdb.tsfile.read.common.IBatchDataIterator;
+import org.apache.iotdb.tsfile.utils.HeapLongKLLSketch;
+import org.apache.iotdb.tsfile.utils.KLLSketchForQuantile;
+import org.apache.iotdb.tsfile.utils.LongKLLSketch;
+import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.apache.iotdb.tsfile.file.metadata.enums.TSDataType.DOUBLE;
+
+public class KLLStatSingleAggrResult extends AggregateResult {
+  private TSDataType seriesDataType;
+  private int iteration;
+  private long pageKLLNum, statNum;
+  private long cntL, cntR, lastL;
+  private long n, K1, heapN;
+  private HeapLongKLLSketch heapKLL;
+  private boolean hasFinalResult;
+  private List<KLLSketchForQuantile> pageKLL;
+  private int pageKLLIndex;
+  private long TOT_SKETCH_N = 0, TOT_SKETCH_SIZE = 0;
+  private int SKETCH_SIZE = -1;
+  private int pageKLLMaxIndex;
+  long DEBUG = 0;
+
+  private int getBitsOfDataType() {
+    switch (seriesDataType) {
+      case INT32:
+      case FLOAT:
+        return 32;
+      case INT64:
+      case DOUBLE:
+        return 64;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private long approximateDataAvgError() {
+    long dataAvgError = (long) Math.ceil(2.0 * heapN / heapKLL.getMaxMemoryNum()) + 1;
+    return dataAvgError;
+  }
+
+  private long approximateStatAvgError() {
+    if (SKETCH_SIZE < 0) return 0;
+    double pageAvgError = 1.0 * TOT_SKETCH_N / TOT_SKETCH_SIZE / 3.0;
+    double rate = 1.0 * SKETCH_SIZE * pageKLLNum / (maxMemoryByte);
+    long pageStatAvgError;
+    if (rate < 1.0) {
+      pageStatAvgError = (long) Math.ceil(pageAvgError * Math.pow(pageKLLNum, 0.5));
+      if (pageKLLNum <= 10) pageStatAvgError += pageAvgError * 3.0;
+    } else {
+      int memKLLNum = (maxMemoryByte) / SKETCH_SIZE;
+      long memErr = (long) Math.ceil(pageAvgError * Math.pow(memKLLNum, 0.5));
+      pageStatAvgError = (long) Math.ceil(rate * 0.5 * memErr + 0.5 * memErr);
+    }
+    return pageStatAvgError;
+  }
+
+  private long approximateMaxError() {
+    return 0;
+  }
+
+  private boolean hasTwoMedians() {
+    return (n & 1) == 0;
+  }
+
+  public KLLStatSingleAggrResult(TSDataType seriesDataType) throws UnSupportedDataTypeException {
+    super(DOUBLE, AggregationType.EXACT_MEDIAN_KLL_STAT_SINGLE);
+    this.seriesDataType = seriesDataType;
+    reset();
+  }
+
+  private long dataToLong(Object data) throws UnSupportedDataTypeException {
+    long result;
+    switch (seriesDataType) {
+      case INT32:
+        return (int) data;
+      case FLOAT:
+        result = Float.floatToIntBits((float) data);
+        return (float) data >= 0f ? result : result ^ Long.MAX_VALUE;
+      case INT64:
+        return (long) data;
+      case DOUBLE:
+        result = Double.doubleToLongBits((double) data);
+        return (double) data >= 0d ? result : result ^ Long.MAX_VALUE;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private double longToResult(long result) throws UnSupportedDataTypeException {
+    switch (seriesDataType) {
+      case INT32:
+        return (double) (result);
+      case FLOAT:
+        result = (result >>> 31) == 0 ? result : result ^ Long.MAX_VALUE;
+        return Float.intBitsToFloat((int) (result));
+      case INT64:
+        return (double) (result);
+      case DOUBLE:
+        result = (result >>> 63) == 0 ? result : result ^ Long.MAX_VALUE;
+        return Double.longBitsToDouble(result);
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private void updateStatusFromData(Object data) {
+    long dataL = dataToLong(data);
+    if (iteration == 0) n++;
+    if (cntL <= dataL && dataL <= cntR) {
+      heapKLL.update(dataL);
+      heapN++;
+    } else if (lastL <= dataL && dataL < cntL) K1--;
+  }
+
+  private long getRankInKLL(long val) {
+    //    long rank = 0;
+    //    if (pageKLL != null) {
+    //      for (HeapLongKLLSketch heapLongKLLSketch : pageKLL)
+    //        if (heapLongKLLSketch != null) rank += heapLongKLLSketch.getApproxRank(val);
+    //    }
+    //    rank += heapKLL.getApproxRank(val);
+    //    return rank;
+    return heapKLL.getApproxRank(val);
+  }
+
+  public long findMaxValueWithRankLE(long K) {
+    long L = Long.MIN_VALUE, R = Long.MAX_VALUE, mid;
+    while (L < R) {
+      mid = L + ((R - L) >>> 1);
+      if (mid == L) mid++;
+      //      System.out.println(
+      //          "\t\t\t" + L + "\t" + R + "\t\t\tmid:" + mid + "\trank:" + getRankInKLL(mid));
+      if (getRankInKLL(mid) <= K) L = mid;
+      else R = mid - 1;
+      //      System.out.println("\t mid:"+mid+"  mid_rank:"+getRankInKLL(mid));
+    }
+    return L;
+  }
+
+  public long findMinValueWithRankGE(long K) {
+    long L = Long.MIN_VALUE, R = Long.MAX_VALUE, mid;
+    while (L < R) {
+      mid = L + ((R - L) >>> 1);
+      if (mid == R) mid--;
+      //      System.out.println(
+      //          "\t\t\t" + L + "\t" + R + "\t\t\tmid:" + mid + "\trank:" + getRankInKLL(mid));
+      if (getRankInKLL(mid) >= K) R = mid;
+      else L = mid + 1;
+    }
+    return L;
+  }
+
+  @Override
+  public void startIteration() {
+    heapN = statNum = 0;
+    if (iteration == 0) { // first iteration
+      heapKLL = new HeapLongKLLSketch(maxMemoryByte);
+      lastL = cntL = Long.MIN_VALUE;
+      cntR = Long.MAX_VALUE;
+      n = 0;
+      pageKLLNum = 0;
+      pageKLLIndex = 0;
+    } else {
+      heapKLL = new HeapLongKLLSketch(maxMemoryByte);
+      pageKLLNum = 0;
+      pageKLL = null;
+      System.out.println(
+          "\t[KLL STAT DEBUG] start iteration "
+              + iteration
+              + " cntL,R:"
+              + "["
+              + cntL
+              + ","
+              + cntR
+              + "]"
+              + "\tlastL:"
+              + lastL
+              + "\tK1:"
+              + K1);
+    }
+  }
+
+  @Override
+  public void finishIteration() {
+    System.out.println(
+        "\t[KLL STAT Single DEBUG]"
+            + "finish iteration "
+            + iteration
+            + " cntL,R:"
+            + "["
+            + cntL
+            + ","
+            + cntR
+            + "]"
+            + "\tlastL:"
+            + lastL
+            + "\tK1:"
+            + K1);
+    System.out.println(
+        "\t[KLL STAT Single DEBUG]"
+            + " statNum:"
+            + statNum
+            + " pageKllNum:"
+            + pageKLLNum
+            + " heapN:"
+            + heapN);
+    iteration++;
+    if (n == 0) {
+      hasFinalResult = true;
+      return;
+    }
+    lastL = cntL;
+
+    if (iteration == 1) { // first iteration over
+      K1 = (long) Math.floor((n + 1) * QUANTILE);
+    }
+    long K2 = K1 + 1; // hasTwoMedians() ? (K1 + 1) : K1;
+
+    System.out.println("\t[KLL STAT Single DEBUG]" + " K1,K2:" + K1 + ", " + K2);
+    if (pageKLLNum == 0) { // all in heap
+      System.out.println("\t[KLL STAT Single DEBUG]" + " calc by heap only. N:" + heapKLL.getN());
+      heapKLL.show();
+
+      double v1 = longToResult(heapKLL.findMinValueWithRank(K1 - 1));
+      //      System.out.println("\t[KLL STAT DEBUG]" + "v1:" + v1);
+      double v2 = longToResult(heapKLL.findMinValueWithRank(K2 - 1));
+      double ans = 0.5 * (v1 + v2);
+      setDoubleValue(ans);
+      hasFinalResult = true;
+      return;
+    }
+    // iteration = 0 && there are page KLL statistics
+    //    heapKLL.show();
+    //    System.out.println("\t[KLL STAT DEBUG] remaining pageKLLSize:" + pageKLLIndex);
+    mergePageKLL();
+    heapKLL.show();
+    System.out.println(
+        "\t[KLL STAT Single DEBUG] after merge. heapN:" + heapKLL.getN() + "\tn_true:" + n);
+    double v1 = longToResult(heapKLL.findMinValueWithRank(K1 - 1));
+    double v2 = longToResult(heapKLL.findMinValueWithRank(K2 - 1));
+    double ans = 0.5 * (v1 + v2);
+    setDoubleValue(ans);
+    hasFinalResult = true;
+    System.out.println("\t[KLL STAT Single DEBUG]" + " est_stats_err:" + approximateStatAvgError());
+  }
+
+  @Override
+  protected boolean hasCandidateResult() {
+    return hasFinalResult && n > 0;
+  }
+
+  @Override
+  public Double getResult() {
+    return hasCandidateResult() ? getDoubleValue() : null;
+  }
+
+  //  private void addSketch(KLLSketchForQuantile sketch, List<HeapLongKLLSketch> a, int baseByte) {
+  //    int pos0 = 0;
+  //    while (pos0 < pageKLLMaxLen && a.get(pos0) != null) pos0++;
+  //    HeapLongKLLSketch bigger_sketch = new HeapLongKLLSketch(baseByte << pos0);
+  //    bigger_sketch.mergeWithTempSpace(sketch);
+  //    for (int i = 0; i < pos0; i++) {
+  //      bigger_sketch.mergeWithTempSpace(a.get(i));
+  //      a.set(i, null);
+  //    }
+  //    if (pos0 == pageKLLMaxLen) { // mem of pageKLL list is too large.
+  //      heapKLL.mergeWithTempSpace(bigger_sketch);
+  //    } else a.set(pos0, bigger_sketch);
+  //  }
+  private void addSketch(KLLSketchForQuantile sketch) {
+    TOT_SKETCH_N += sketch.getN();
+    TOT_SKETCH_SIZE += sketch.getNumLen();
+    if (SKETCH_SIZE < 0) {
+      SKETCH_SIZE = sketch.getNumLen() * 8;
+      pageKLLMaxIndex = (int) Math.floor((0.5 * maxMemoryByte / SKETCH_SIZE));
+      pageKLL = new ArrayList<>(pageKLLMaxIndex);
+      for (int i = 0; i < pageKLLMaxIndex; i++) pageKLL.add(null);
+    }
+    if (pageKLLIndex < pageKLLMaxIndex) pageKLL.set(pageKLLIndex++, sketch);
+    else {
+      heapKLL.mergeWithTempSpace(pageKLL);
+      for (int i = 0; i < pageKLLMaxIndex; i++) pageKLL.set(i, null);
+      pageKLLIndex = 0;
+      pageKLL.set(pageKLLIndex++, sketch);
+      //      System.out.println(
+      //          "\t[KLL STAT DEBUG]\theapKLL merge pageKLLList. newN: "
+      //              + heapKLL.getN()
+      //              + "   n_true:"
+      //              + n);
+      //      heapKLL.show();
+    }
+  }
+
+  private void mergePageKLL() {
+    HeapLongKLLSketch tmpSketch = heapKLL;
+    heapKLL = new HeapLongKLLSketch(maxMemoryByte);
+    heapKLL.mergeWithTempSpace(tmpSketch);
+    heapKLL.mergeWithTempSpace(pageKLL);
+  }
+
+  @Override
+  public void updateResultFromStatistics(Statistics statistics) {
+    switch (statistics.getType()) {
+      case INT32:
+      case INT64:
+      case FLOAT:
+      case DOUBLE:
+        break;
+      case TEXT:
+      case BOOLEAN:
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format(
+                "Unsupported data type in aggregation MEDIAN : %s", statistics.getType()));
+    }
+    if (iteration == 0) {
+      n += statistics.getCount();
+      //      if (statistics.getType() == DOUBLE) {
+      //      }
+      if (statistics.getType() == DOUBLE) {
+        DoubleStatistics stat = (DoubleStatistics) statistics;
+        if (stat.getSummaryNum() > 0) {
+          pageKLLNum += stat.getSummaryNum();
+          statNum += 1;
+          for (LongKLLSketch sketch : stat.getKllSketchList()) addSketch(sketch);
+          //          System.out.println(
+          //              "\t[KLL STAT Single DEBUG] updateResultFromStatistics. pageN:"
+          //                  + stat.getKllSketch().getN());
+          //          stat.getKllSketch().show();
+          return;
+        } else System.out.println("\t\t\t\t!!!!!![ERROR!] no KLL in stat!");
+      }
+    }
+    long minVal = dataToLong(statistics.getMinValue());
+    long maxVal = dataToLong(statistics.getMaxValue());
+    //    System.out.println(
+    //        "\t[KLL STAT DEBUG] no KLL in stat. update from statistics:\t"
+    //            + "min,max:"
+    //            + minVal
+    //            + ","
+    //            + maxVal
+    //            + " n:"
+    //            + statistics.getCount());
+    // out of range
+    if (minVal > cntR || maxVal < lastL) return;
+    if (lastL <= minVal && maxVal < cntL) {
+      K1 -= statistics.getCount();
+      return;
+    }
+    if (minVal == maxVal) { // min == max
+      for (int i = 0; i < statistics.getCount(); i++) updateStatusFromData(longToResult(minVal));
+      return;
+    }
+  }
+
+  @Override
+  public void updateResultFromPageData(IBatchDataIterator batchIterator) {
+    updateResultFromPageData(batchIterator, Long.MIN_VALUE, Long.MAX_VALUE);
+  }
+
+  @Override
+  public void updateResultFromPageData(
+      IBatchDataIterator batchIterator, long minBound, long maxBound) {
+    //    System.out.print("\t[KLL STAT DEBUG]\tupdateResultFromPageData:");
+    //    int tmp_tot = 0;
+    while (batchIterator.hasNext()) {
+      if (batchIterator.currentTime() >= maxBound || batchIterator.currentTime() < minBound) {
+        break;
+      }
+      //      System.out.print(
+      //          " (" + batchIterator.currentTime() + "," + batchIterator.currentValue() + ")");
+      //      tmp_tot++;
+      updateStatusFromData(batchIterator.currentValue());
+      batchIterator.next();
+    }
+    //    System.out.println(" tot:" + tmp_tot);
+  }
+
+  @Override
+  public void updateResultUsingTimestamps(
+      long[] timestamps, int length, IReaderByTimestamp dataReader) throws IOException {
+    //    System.out.print("\t[KLL STAT DEBUG]\tupdateResultUsingTimestamps:");
+    //    int tmp_tot = 0;
+    Object[] values = dataReader.getValuesInTimestamps(timestamps, length);
+    for (int i = 0; i < length; i++) {
+      if (values[i] != null) {
+        updateStatusFromData(values[i]);
+        //        tmp_tot++;
+      }
+    }
+    //    System.out.println(" tot:" + tmp_tot);
+  }
+
+  @Override
+  public void updateResultUsingValues(long[] timestamps, int length, ValueIterator valueIterator) {
+    //    List<Object> tmp = new ArrayList<>();
+    while (valueIterator.hasNext()) {
+      updateStatusFromData(valueIterator.next());
+      //      Object tmpObj = valueIterator.next();
+      //      updateStatusFromData(tmpObj, 1);
+      //      tmp.add(tmpObj);
+    }
+    //
+    // System.out.println("\t\t[MEDIAN]"+this.hashCode()+"[updateResultUsingValues]"+tmp.toString());
+  }
+
+  @Override
+  public int maxIteration() {
+    return 1;
+  }
+
+  @Override
+  public boolean hasFinalResult() {
+    return hasFinalResult;
+  }
+
+  @Override
+  public void merge(AggregateResult another) {
+    //    System.out.println("[DEBUG] [merge] " + this.getResult() + "  " + another.getResult());
+    // merge not supported
+    //        throw new QueryProcessException("Can't merge MedianAggregateResult");
+  }
+
+  @Override
+  protected void deserializeSpecificFields(ByteBuffer buffer) {
+    this.seriesDataType = TSDataType.deserialize(buffer.get());
+    // TODO
+  }
+
+  @Override
+  protected void serializeSpecificFields(OutputStream outputStream) throws IOException {
+    ReadWriteIOUtils.write(seriesDataType, outputStream);
+    // TODO
+  }
+
+  public long getN() {
+    return n;
+  }
+
+  @Override
+  public void reset() {
+    super.reset();
+    heapKLL = null;
+    lastL = cntL = Long.MIN_VALUE;
+    cntR = Long.MAX_VALUE;
+    n = 0;
+    iteration = 0;
+    hasFinalResult = false;
+    TOT_SKETCH_N = TOT_SKETCH_SIZE = 0;
+    SKETCH_SIZE = -1;
+  }
+
+  @Override
+  public boolean canUpdateFromStatistics(Statistics statistics) {
+    if ((seriesDataType == DOUBLE) && iteration == 0) {
+      DoubleStatistics doubleStats = (DoubleStatistics) statistics;
+      //      System.out.println(
+      //          "\t[DEBUG][KLL STAT SINGLE]\tcanUseStat? count:"
+      //              + doubleStats.getCount()
+      //              + " KLLNum:"
+      //              + doubleStats.getKllSketchNum());
+      if (doubleStats.getSummaryNum() > 0) return true;
+    }
+    return false;
+  }
+
+  @Override
+  public boolean groupByLevelBeforeAggregation() {
+    return true;
+  }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLStatSingleReadAggrResult.java b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLStatSingleReadAggrResult.java
new file mode 100644
index 0000000000..45c6dc5c93
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/KLLStatSingleReadAggrResult.java
@@ -0,0 +1,328 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.query.aggregation.impl;
+
+import org.apache.iotdb.db.query.aggregation.AggregateResult;
+import org.apache.iotdb.db.query.aggregation.AggregationType;
+import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp;
+import org.apache.iotdb.db.utils.ValueIterator;
+import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.file.metadata.statistics.DoubleStatistics;
+import org.apache.iotdb.tsfile.file.metadata.statistics.Statistics;
+import org.apache.iotdb.tsfile.read.common.IBatchDataIterator;
+import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+
+import static org.apache.iotdb.tsfile.file.metadata.enums.TSDataType.DOUBLE;
+import static org.apache.iotdb.tsfile.file.metadata.statistics.Statistics.SYNOPSIS_SIZE_IN_BYTE;
+
+public class KLLStatSingleReadAggrResult extends AggregateResult {
+  private TSDataType seriesDataType;
+  private int iteration;
+  private long pageKLLNum;
+  private long cntL, cntR, lastL;
+  private long n, K1, heapN;
+  private boolean hasFinalResult;
+  private final int pageAvgError = 50, pageMaxError = 127;
+  private final int pageKLLMemoryByte = (68 + 15) * 8, pageKLLNumMemoryByte = SYNOPSIS_SIZE_IN_BYTE;
+  //  private List<HeapLongKLLSketch> pageKLL;
+  long DEBUG = 0;
+
+  private int getBitsOfDataType() {
+    switch (seriesDataType) {
+      case INT32:
+      case FLOAT:
+        return 32;
+      case INT64:
+      case DOUBLE:
+        return 64;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  public KLLStatSingleReadAggrResult(TSDataType seriesDataType)
+      throws UnSupportedDataTypeException {
+    super(DOUBLE, AggregationType.EXACT_MEDIAN_KLL_STAT_SINGLE_READ);
+    this.seriesDataType = seriesDataType;
+    reset();
+  }
+
+  private long dataToLong(Object data) throws UnSupportedDataTypeException {
+    long result;
+    switch (seriesDataType) {
+      case INT32:
+        return (int) data;
+      case FLOAT:
+        result = Float.floatToIntBits((float) data);
+        return (float) data >= 0f ? result : result ^ Long.MAX_VALUE;
+      case INT64:
+        return (long) data;
+      case DOUBLE:
+        result = Double.doubleToLongBits((double) data);
+        return (double) data >= 0d ? result : result ^ Long.MAX_VALUE;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private double longToResult(long result) throws UnSupportedDataTypeException {
+    switch (seriesDataType) {
+      case INT32:
+        return (double) (result);
+      case FLOAT:
+        result = (result >>> 31) == 0 ? result : result ^ Long.MAX_VALUE;
+        return Float.intBitsToFloat((int) (result));
+      case INT64:
+        return (double) (result);
+      case DOUBLE:
+        result = (result >>> 63) == 0 ? result : result ^ Long.MAX_VALUE;
+        return Double.longBitsToDouble(result);
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private void updateStatusFromData(Object data) {
+    long dataL = dataToLong(data);
+    if (iteration == 0) n++;
+    if (cntL <= dataL && dataL <= cntR) {
+      //      heapKLL.update(dataL);
+      heapN++;
+    } else if (lastL <= dataL && dataL < cntL) K1--;
+  }
+
+  @Override
+  public void startIteration() {
+    heapN = 0;
+    if (iteration == 0) { // first iteration
+      lastL = cntL = Long.MIN_VALUE;
+      cntR = Long.MAX_VALUE;
+      n = 0;
+      pageKLLNum = 0;
+    } else {
+      pageKLLNum = 0;
+      System.out.println(
+          "\t[KLL STAT DEBUG] start iteration "
+              + iteration
+              + " cntL,R:"
+              + "["
+              + cntL
+              + ","
+              + cntR
+              + "]"
+              + "\tlastL:"
+              + lastL
+              + "\tK1:"
+              + K1);
+    }
+  }
+
+  @Override
+  public void finishIteration() {
+    System.out.println(
+        "\t[KLL STAT DEBUG]"
+            + "finish iteration "
+            + iteration
+            + " cntL,R:"
+            + "["
+            + cntL
+            + ","
+            + cntR
+            + "]"
+            + "\tlastL:"
+            + lastL
+            + "\tK1:"
+            + K1);
+    System.out.println("\t[KLL STAT DEBUG]" + " statNum:" + pageKLLNum);
+    iteration++;
+    if (n == 0) {
+      hasFinalResult = true;
+      return;
+    }
+    setDoubleValue(-233.0);
+    hasFinalResult = true;
+  }
+
+  @Override
+  protected boolean hasCandidateResult() {
+    return hasFinalResult && n > 0;
+  }
+
+  @Override
+  public Double getResult() {
+    return hasCandidateResult() ? getDoubleValue() : null;
+  }
+
+  @Override
+  public void updateResultFromStatistics(Statistics statistics) {
+    switch (statistics.getType()) {
+      case INT32:
+      case INT64:
+      case FLOAT:
+      case DOUBLE:
+        break;
+      case TEXT:
+      case BOOLEAN:
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format(
+                "Unsupported data type in aggregation MEDIAN : %s", statistics.getType()));
+    }
+    if (iteration == 0) {
+      n += statistics.getCount();
+      if (statistics.getType() == DOUBLE && ((DoubleStatistics) statistics).getKllSketchNum() > 0)
+        pageKLLNum += ((DoubleStatistics) statistics).getKllSketchNum();
+    }
+    long minVal = dataToLong(statistics.getMinValue());
+    long maxVal = dataToLong(statistics.getMaxValue());
+    //    System.out.println(
+    //        "\t[KLL STAT DEBUG] update from statistics:\t"
+    //            + "min,max:"
+    //            + minVal
+    //            + ","
+    //            + maxVal
+    //            + " n:"
+    //            + statistics.getCount());
+    // out of range
+    if (minVal > cntR || maxVal < lastL) return;
+    if (lastL <= minVal && maxVal < cntL) {
+      K1 -= statistics.getCount();
+      return;
+    }
+    if (minVal == maxVal) { // min == max
+      for (int i = 0; i < statistics.getCount(); i++) updateStatusFromData(minVal);
+      return;
+    }
+  }
+
+  @Override
+  public void updateResultFromPageData(IBatchDataIterator batchIterator) {
+    updateResultFromPageData(batchIterator, Long.MIN_VALUE, Long.MAX_VALUE);
+  }
+
+  @Override
+  public void updateResultFromPageData(
+      IBatchDataIterator batchIterator, long minBound, long maxBound) {
+    while (batchIterator.hasNext()) {
+      if (batchIterator.currentTime() >= maxBound || batchIterator.currentTime() < minBound) {
+        break;
+      }
+      updateStatusFromData(batchIterator.currentValue());
+      batchIterator.next();
+    }
+  }
+
+  @Override
+  public void updateResultUsingTimestamps(
+      long[] timestamps, int length, IReaderByTimestamp dataReader) throws IOException {
+    Object[] values = dataReader.getValuesInTimestamps(timestamps, length);
+    for (int i = 0; i < length; i++) {
+      if (values[i] != null) {
+        updateStatusFromData(values[i]);
+      }
+    }
+  }
+
+  @Override
+  public void updateResultUsingValues(long[] timestamps, int length, ValueIterator valueIterator) {
+    //    List<Object> tmp = new ArrayList<>();
+    while (valueIterator.hasNext()) {
+      updateStatusFromData(valueIterator.next());
+      //      Object tmpObj = valueIterator.next();
+      //      updateStatusFromData(tmpObj, 1);
+      //      tmp.add(tmpObj);
+    }
+    //
+    // System.out.println("\t\t[MEDIAN]"+this.hashCode()+"[updateResultUsingValues]"+tmp.toString());
+  }
+
+  @Override
+  public int maxIteration() {
+    return 1;
+  }
+
+  @Override
+  public boolean hasFinalResult() {
+    return hasFinalResult;
+  }
+
+  @Override
+  public void merge(AggregateResult another) {
+    //    System.out.println("[DEBUG] [merge] " + this.getResult() + "  " + another.getResult());
+    // merge not supported
+    //        throw new QueryProcessException("Can't merge MedianAggregateResult");
+  }
+
+  @Override
+  protected void deserializeSpecificFields(ByteBuffer buffer) {
+    this.seriesDataType = TSDataType.deserialize(buffer.get());
+    // TODO
+  }
+
+  @Override
+  protected void serializeSpecificFields(OutputStream outputStream) throws IOException {
+    ReadWriteIOUtils.write(seriesDataType, outputStream);
+    // TODO
+  }
+
+  public long getN() {
+    return n;
+  }
+
+  @Override
+  public void reset() {
+    super.reset();
+    lastL = cntL = Long.MIN_VALUE;
+    cntR = Long.MAX_VALUE;
+    n = 0;
+    iteration = 0;
+    hasFinalResult = false;
+  }
+
+  @Override
+  public boolean canUpdateFromStatistics(Statistics statistics) {
+    if ((seriesDataType == DOUBLE) && iteration == 0) {
+      DoubleStatistics doubleStats = (DoubleStatistics) statistics;
+      if (doubleStats.getKllSketchNum() > 0) return true;
+    }
+    if (iteration > 0) {
+      long minVal = dataToLong(statistics.getMinValue());
+      long maxVal = dataToLong(statistics.getMaxValue());
+      if (minVal > cntR || maxVal < lastL) return true;
+      if (lastL <= minVal && maxVal < cntL) return true;
+    }
+    Comparable<Object> minVal = (Comparable<Object>) statistics.getMinValue();
+    Comparable<Object> maxVal = (Comparable<Object>) statistics.getMaxValue();
+    return (minVal.compareTo(maxVal) == 0); // min==max
+  }
+
+  @Override
+  public boolean groupByLevelBeforeAggregation() {
+    return true;
+  }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/MedianAggrResult.java b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/MedianAggrResult.java
new file mode 100644
index 0000000000..e01a107ca1
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/MedianAggrResult.java
@@ -0,0 +1,389 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.query.aggregation.impl;
+
+import org.apache.iotdb.db.query.aggregation.AggregateResult;
+import org.apache.iotdb.db.query.aggregation.AggregationType;
+import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp;
+import org.apache.iotdb.db.utils.ValueIterator;
+import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.file.metadata.statistics.Statistics;
+import org.apache.iotdb.tsfile.read.common.IBatchDataIterator;
+import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.*;
+
+public class MedianAggrResult extends AggregateResult {
+
+  private TSDataType seriesDataType;
+  private long cnt, cntInIteration;
+  private int bitsCounted;
+  private long prefixOfMedian1, prefixOfMedian2; // prefix of n/2, n/2+1 th smallest number.
+  private long K1, K2; //  n/2, n/2+1 th smallest number is the K1,K2 smallest with the two prefix
+  private long[] bucket1, bucket2;
+  private boolean hasFinalResult;
+
+  public int bitsOfBucket() {
+    return 16;
+  }
+
+  private int sizeOfBucket() {
+    return 1 << bitsOfBucket();
+  }
+
+  private int maskOfBucket() {
+    return (1 << bitsOfBucket()) - 1;
+  }
+
+  private long maskOfPrefix() {
+    return bitsCounted == 0 ? (0) : (((1L << bitsCounted) - 1) << (bitsOfDataType() - bitsCounted));
+  }
+
+  private int bitsOfDataType() {
+    switch (seriesDataType) {
+      case INT32:
+      case FLOAT:
+        return 32;
+      case INT64:
+      case DOUBLE:
+        return 64;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private boolean needsTwoBuckets() {
+    return K2 != -1;
+  }
+
+  public MedianAggrResult(TSDataType seriesDataType) throws UnSupportedDataTypeException {
+    super(TSDataType.DOUBLE, AggregationType.EXACT_MEDIAN);
+    this.seriesDataType = seriesDataType;
+    reset();
+  }
+
+  // turn FLOAT/INT32 to unsigned long keeping relative order
+  private long dataToLongBits(Object data) throws UnSupportedDataTypeException {
+    long longBits;
+    switch (seriesDataType) {
+      case INT32:
+        return (int) data + (1L << 31);
+      case FLOAT:
+        longBits = Float.floatToIntBits((float) data) + (1L << 31);
+        return (float) data >= 0f ? longBits : longBits ^ 0x7F800000L;
+      case INT64:
+        return (long) data + (1L << 63);
+      case DOUBLE:
+        longBits = Double.doubleToLongBits((double) data) + (1L << 63);
+        return (double) data >= 0d ? longBits : longBits ^ 0x7FF0000000000000L;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private double longBitsToResult(long longBits) throws UnSupportedDataTypeException {
+    switch (seriesDataType) {
+      case INT32:
+        return (double) (longBits - (1L << 31));
+      case FLOAT:
+        longBits = (longBits >>> 31) > 0 ? longBits : longBits ^ 0x7F800000L;
+        return Float.intBitsToFloat((int) (longBits - (1L << 31)));
+      case INT64:
+        return (double) (longBits - (1L << 63));
+      case DOUBLE:
+        longBits = (longBits >>> 63) > 0 ? longBits : longBits ^ 0x7FF0000000000000L;
+        return Double.longBitsToDouble(longBits - (1L << 63));
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private int longBitsToIndex(long longBits) {
+    return (int)
+        ((longBits >>> (bitsOfDataType() - bitsOfBucket() - bitsCounted)) & maskOfBucket());
+  }
+
+  private void updateBucketFromData(Object data, long times) {
+    long longBits = dataToLongBits(data);
+    int index = longBitsToIndex(longBits);
+    //        String tmp = "";
+    //    System.out.println("[DEBUG] maskOfPrefix:"+maskOfPrefix()+ "  "+longBits+"
+    // prefix1:"+prefixOfMedian1);
+    if ((longBits & maskOfPrefix()) == prefixOfMedian1) {
+      bucket1[index] += times;
+      //            tmp += " A";
+    }
+    if (needsTwoBuckets() && (longBits & maskOfPrefix()) == prefixOfMedian2) {
+      bucket2[index] += times;
+      //            tmp += " B";
+    }
+    //        System.out.println(longBits + " " + index + " " + tmp + "   times:" + times);
+    cntInIteration += times;
+  }
+
+  private Map<String, Long> updateFromBucket(long[] bucket, long K, long prefixOfMedian) {
+    long tmpCnt = 0;
+    int p = 0;
+    for (int i = 0; i < sizeOfBucket(); i++) {
+      tmpCnt += bucket[i];
+      if (tmpCnt >= K) {
+        p = i;
+        break;
+      }
+    }
+    //    System.out.println("[updateFromBucket] p:" + p);
+    Map<String, Long> result = new HashMap<>();
+    result.put("K", K - (tmpCnt - bucket[p]));
+    result.put(
+        "prefixOfMedian",
+        prefixOfMedian | ((long) p << (bitsOfDataType() - bitsOfBucket() - bitsCounted)));
+    //    System.out.println(
+    //        "[updateFromBucket] prefixOfMedian:" + ((prefixOfMedian << bitsOfBucket()) | p));
+    return result;
+  }
+
+  @Override
+  public void startIteration() {
+    if (bucket1 != null) Arrays.fill(bucket1, 0);
+    else this.bucket1 = new long[sizeOfBucket()];
+    if (bucket2 != null) Arrays.fill(bucket2, 0);
+  }
+
+  @Override
+  public void finishIteration() {
+    if (bitsCounted == 0) {
+      cnt = cntInIteration;
+      K1 = (cnt + 1) >> 1;
+
+      // TODO : just sort when the amount of data is small enough
+
+      if ((cnt & 1) == 0) {
+        bucket2 = Arrays.copyOf(bucket1, sizeOfBucket());
+        K2 = (cnt >> 1) + 1;
+      } else K2 = -1;
+    }
+    if (cnt == 0) {
+      hasFinalResult = true;
+      return;
+    }
+    Map<String, Long> result1 = updateFromBucket(bucket1, K1, prefixOfMedian1);
+    K1 = result1.get("K");
+    prefixOfMedian1 = result1.get("prefixOfMedian");
+    //    Arrays.fill(bucket1, 0);
+
+    // TODO: optimize. unnecessary when prefixOfMedian1==prefixOfMedian2
+    if (needsTwoBuckets()) {
+      Map<String, Long> result2 = updateFromBucket(bucket2, K2, prefixOfMedian2);
+      K2 = result2.get("K");
+      prefixOfMedian2 = result2.get("prefixOfMedian");
+      //      Arrays.fill(bucket2, 0);
+    }
+
+    bitsCounted += bitsOfBucket();
+    if (bitsCounted == bitsOfDataType()) {
+      hasFinalResult = true;
+      if (!needsTwoBuckets()) setDoubleValue(longBitsToResult(prefixOfMedian1));
+      else
+        setDoubleValue(
+            0.5 * (longBitsToResult(prefixOfMedian1) + longBitsToResult(prefixOfMedian2)));
+      //            System.out.println(
+      //                "median1: "
+      //                    + longBitsToResult(prefixOfMedian1)
+      //                    + "   median2: "
+      //                    + longBitsToResult(prefixOfMedian2));
+    }
+
+    //        System.out.println("\t\t[MEDIAN]"+this.hashCode()+"  finishIteration "+bitsCounted+"
+    //     "+bitsOfDataType());
+    //    System.out.println(
+    //        "K1: "
+    //            + K1
+    //            + " K2: "
+    //            + K2
+    //            + "    cntInIteration:"
+    //            + cntInIteration
+    //            + "|| prefixOfMedian1:"
+    //            + prefixOfMedian1
+    //            + "  prefixOfMedian2:"
+    //            + prefixOfMedian2);
+
+    cntInIteration = 0;
+  }
+
+  @Override
+  protected boolean hasCandidateResult() {
+    return hasFinalResult && cnt > 0;
+  }
+
+  @Override
+  public Double getResult() {
+    return hasCandidateResult() ? getDoubleValue() : null;
+  }
+
+  @Override
+  public void updateResultFromStatistics(Statistics statistics) {
+    switch (statistics.getType()) {
+      case INT32:
+      case INT64:
+      case FLOAT:
+      case DOUBLE:
+        break;
+      case TEXT:
+      case BOOLEAN:
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format(
+                "Unsupported data type in aggregation MEDIAN : %s", statistics.getType()));
+    }
+    Comparable<Object> minVal = (Comparable<Object>) statistics.getMinValue();
+    Comparable<Object> maxVal = (Comparable<Object>) statistics.getMaxValue();
+    if (minVal.compareTo(maxVal) == 0) {
+      updateBucketFromData(minVal, statistics.getCount());
+    } /*else
+      throw new QueryProcessException("Failed to update median aggregation result from statistics.");*/
+  }
+
+  @Override
+  public void updateResultFromPageData(IBatchDataIterator batchIterator) {
+    updateResultFromPageData(batchIterator, Long.MIN_VALUE, Long.MAX_VALUE);
+  }
+
+  @Override
+  public void updateResultFromPageData(
+      IBatchDataIterator batchIterator, long minBound, long maxBound) {
+    while (batchIterator.hasNext()) {
+      if (batchIterator.currentTime() >= maxBound || batchIterator.currentTime() < minBound) {
+        break;
+      }
+      updateBucketFromData(batchIterator.currentValue(), 1);
+      batchIterator.next();
+    }
+  }
+
+  @Override
+  public void updateResultUsingTimestamps(
+      long[] timestamps, int length, IReaderByTimestamp dataReader) throws IOException {
+    Object[] values = dataReader.getValuesInTimestamps(timestamps, length);
+    for (int i = 0; i < length; i++) {
+      if (values[i] != null) {
+        updateBucketFromData(values[i], 1);
+      }
+    }
+  }
+
+  @Override
+  public void updateResultUsingValues(long[] timestamps, int length, ValueIterator valueIterator) {
+    //    List<Object> tmp = new ArrayList<>();
+    while (valueIterator.hasNext()) {
+      updateBucketFromData(valueIterator.next(), 1);
+      //      Object tmpObj = valueIterator.next();
+      //      updateBucketFromData(tmpObj, 1);
+      //      tmp.add(tmpObj);
+    }
+    //
+    // System.out.println("\t\t[MEDIAN]"+this.hashCode()+"[updateResultUsingValues]"+tmp.toString());
+  }
+
+  @Override
+  public int maxIteration() {
+    return bitsOfDataType() / bitsOfBucket();
+  }
+
+  @Override
+  public boolean hasFinalResult() {
+    return hasFinalResult;
+  }
+
+  @Override
+  public void merge(AggregateResult another) {
+    System.out.println("[DEBUG] [merge] " + this.getResult() + "  " + another.getResult());
+    // merge not supported
+    //        throw new QueryProcessException("Can't merge MedianAggregateResult");
+  }
+
+  @Override
+  protected void deserializeSpecificFields(ByteBuffer buffer) {
+    this.seriesDataType = TSDataType.deserialize(buffer.get());
+    this.cnt = buffer.getLong();
+    this.cntInIteration = buffer.getLong();
+    this.bitsCounted = buffer.getInt();
+    this.K1 = buffer.getLong();
+    this.K2 = buffer.getLong();
+    this.prefixOfMedian1 = buffer.getLong();
+    this.prefixOfMedian2 = buffer.getLong();
+
+    this.bucket1 = new long[sizeOfBucket()];
+    for (int i = 0; i < sizeOfBucket(); i++) this.bucket1[i] = buffer.getLong();
+    if (needsTwoBuckets()) {
+      this.bucket2 = new long[sizeOfBucket()];
+      for (int i = 0; i < sizeOfBucket(); i++) this.bucket2[i] = buffer.getLong();
+    }
+  }
+
+  @Override
+  protected void serializeSpecificFields(OutputStream outputStream) throws IOException {
+    ReadWriteIOUtils.write(seriesDataType, outputStream);
+    ReadWriteIOUtils.write(cnt, outputStream);
+    ReadWriteIOUtils.write(cntInIteration, outputStream);
+    ReadWriteIOUtils.write(bitsCounted, outputStream);
+    ReadWriteIOUtils.write(K1, outputStream);
+    ReadWriteIOUtils.write(K2, outputStream);
+    ReadWriteIOUtils.write(prefixOfMedian1, outputStream);
+    ReadWriteIOUtils.write(prefixOfMedian2, outputStream);
+    for (int i = 0; i < sizeOfBucket(); i++) ReadWriteIOUtils.write(bucket1[i], outputStream);
+    if (needsTwoBuckets())
+      for (int i = 0; i < sizeOfBucket(); i++) ReadWriteIOUtils.write(bucket2[i], outputStream);
+  }
+
+  public long getCnt() {
+    return cnt;
+  }
+
+  @Override
+  public void reset() {
+    super.reset();
+    cnt = cntInIteration = 0;
+    bitsCounted = 0;
+    prefixOfMedian1 = prefixOfMedian2 = 0;
+    K1 = K2 = -1;
+    bucket1 = null;
+    hasFinalResult = false;
+  }
+
+  @Override
+  public boolean canUpdateFromStatistics(Statistics statistics) {
+    return false;
+    //    Comparable<Object> minVal = (Comparable<Object>) statistics.getMinValue();
+    //    Comparable<Object> maxVal = (Comparable<Object>) statistics.getMaxValue();
+    //    return (minVal.compareTo(maxVal) == 0);
+  }
+
+  @Override
+  public boolean groupByLevelBeforeAggregation() {
+    return true;
+  }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/OptimizedMedianAggrResult.java b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/OptimizedMedianAggrResult.java
new file mode 100644
index 0000000000..8d0cc69367
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/OptimizedMedianAggrResult.java
@@ -0,0 +1,423 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.query.aggregation.impl;
+
+import org.apache.iotdb.db.query.aggregation.AggregateResult;
+import org.apache.iotdb.db.query.aggregation.AggregationType;
+import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp;
+import org.apache.iotdb.db.utils.ValueIterator;
+import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.file.metadata.statistics.Statistics;
+import org.apache.iotdb.tsfile.read.common.IBatchDataIterator;
+import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.*;
+
+public class OptimizedMedianAggrResult extends AggregateResult {
+  private TSDataType seriesDataType;
+  private long cnt;
+  private int bitsCounted;
+  private long prefixOfMedian1, prefixOfMedian2; // prefix of n/2, n/2+1 th number.
+  private long K1, K2; //  n/2, n/2+1 th number is the K1,K2 smallest with the two prefix
+  private long[] bucket1, bucket2;
+
+  private boolean isSmallAmount;
+  private HashSet<Long> valueSet;
+  private HashMap<Long, Long> valueCount;
+  private boolean hasFinalResult;
+  static int smallAmount = 65536; // calc directly when size of valueSet <= smallAmount
+
+  public int bitsOfBucket() {
+    return 16;
+  }
+
+  private int sizeOfBucket() {
+    return 1 << bitsOfBucket();
+  }
+
+  private int maskOfBucket() {
+    return (1 << bitsOfBucket()) - 1;
+  }
+
+  private long maskOfPrefix() {
+    return bitsCounted == 0 ? (0) : (((1L << bitsCounted) - 1) << (bitsOfDataType() - bitsCounted));
+  }
+
+  private int bitsOfDataType() {
+    switch (seriesDataType) {
+      case INT32:
+      case FLOAT:
+        return 32;
+      case INT64:
+      case DOUBLE:
+        return 64;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private boolean needsTwoBuckets() {
+    return K2 != -1;
+  }
+
+  public OptimizedMedianAggrResult(TSDataType seriesDataType) throws UnSupportedDataTypeException {
+    super(TSDataType.DOUBLE, AggregationType.EXACT_MEDIAN_OPT);
+    this.seriesDataType = seriesDataType;
+    reset();
+  }
+
+  // turn FLOAT/INT32 to unsigned long keeping relative order
+  private long dataToLongBits(Object data) throws UnSupportedDataTypeException {
+    long longBits;
+    switch (seriesDataType) {
+      case INT32:
+        return (int) data + (1L << 31);
+      case FLOAT:
+        longBits = Float.floatToIntBits((float) data) + (1L << 31);
+        return (float) data >= 0f ? longBits : longBits ^ 0x7F800000L;
+      case INT64:
+        return (long) data + (1L << 63);
+      case DOUBLE:
+        longBits = Double.doubleToLongBits((double) data) + (1L << 63);
+        return (double) data >= 0d ? longBits : longBits ^ 0x7FF0000000000000L;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private double longBitsToResult(long longBits) throws UnSupportedDataTypeException {
+    switch (seriesDataType) {
+      case INT32:
+        return (double) (longBits - (1L << 31));
+      case FLOAT:
+        longBits = (longBits >>> 31) > 0 ? longBits : longBits ^ 0x7F800000L;
+        return Float.intBitsToFloat((int) (longBits - (1L << 31)));
+      case INT64:
+        return (double) (longBits - (1L << 63));
+      case DOUBLE:
+        longBits = (longBits >>> 63) > 0 ? longBits : longBits ^ 0x7FF0000000000000L;
+        return Double.longBitsToDouble(longBits - (1L << 63));
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private int longBitsToIndex(long longBits) {
+    return (int)
+        ((longBits >>> (bitsOfDataType() - bitsOfBucket() - bitsCounted)) & maskOfBucket());
+  }
+
+  private void updateBucketFromData(Object data, long times) {
+    long longBits = dataToLongBits(data);
+    int index = longBitsToIndex(longBits);
+    if ((longBits & maskOfPrefix()) == prefixOfMedian1) bucket1[index] += times;
+    if (needsTwoBuckets() && (longBits & maskOfPrefix()) == prefixOfMedian2)
+      bucket2[index] += times;
+    if (bitsCounted == 0) {
+      longBits ^= 1L << 63; // unsigned......
+      cnt += times;
+      if (isSmallAmount) {
+        if (!valueSet.add(longBits)) {
+          valueCount.put(longBits, valueCount.get(longBits) + times);
+        } else {
+          if (valueSet.size() > smallAmount) {
+            isSmallAmount = false;
+          }
+          valueCount.put(longBits, times);
+        }
+      }
+    }
+  }
+
+  private Map<String, Long> updateFromBucket(long[] bucket, long K, long prefixOfMedian) {
+    long tmpCnt = 0;
+    int p = 0;
+    for (int i = 0; i < sizeOfBucket(); i++) {
+      tmpCnt += bucket[i];
+      if (tmpCnt >= K) {
+        p = i;
+        break;
+      }
+    }
+    //    System.out.println("[updateFromBucket] p:" + p);
+    Map<String, Long> result = new HashMap<>();
+    result.put("K", K - (tmpCnt - bucket[p]));
+    result.put(
+        "prefixOfMedian",
+        prefixOfMedian | ((long) p << (bitsOfDataType() - bitsOfBucket() - bitsCounted)));
+    //    System.out.println(
+    //        "[updateFromBucket] prefixOfMedian:" + ((prefixOfMedian << bitsOfBucket()) | p));
+    return result;
+  }
+
+  @Override
+  public void startIteration() {
+    System.out.println(
+        "[DEBUG 1]:startIteration value:"
+            + getValue()
+            + " bitsCounted:"
+            + bitsCounted
+            + " 1,2:"
+            + prefixOfMedian1
+            + " "
+            + prefixOfMedian2);
+    if (bucket1 != null) Arrays.fill(bucket1, 0);
+    else this.bucket1 = new long[sizeOfBucket()];
+    if (bucket2 != null) Arrays.fill(bucket2, 0);
+  }
+
+  @Override
+  public void finishIteration() {
+    if (cnt == 0) return;
+
+    if (bitsCounted == 0) {
+      K1 = (cnt + 1) >> 1;
+      if ((cnt & 1) == 0) {
+        K2 = (cnt >> 1) + 1;
+      } else K2 = -1;
+
+      // TODO : just sort when the amount of data is small enough
+      if (isSmallAmount) {
+        List<Long> valueList = new ArrayList<>(valueSet);
+        Collections.sort(valueList);
+        long value1 = 0, value2 = 0, valueSum = 0;
+        for (Long value : valueList) {
+          if (valueSum < K1 && valueSum + valueCount.get(value) >= K1) value1 = value;
+          if (valueSum < K2 && valueSum + valueCount.get(value) >= K2) value2 = value;
+          valueSum += valueCount.get(value);
+        }
+        value1 ^= 1L << 63;
+        value2 ^= 1L << 63; // unsigned...
+        //        System.out.println("[DEBUG] index1:"+value1+"  "+longBitsToResult(value1));
+        if (!needsTwoBuckets()) setDoubleValue(longBitsToResult(value1));
+        else setDoubleValue(0.5 * (longBitsToResult(value1) + longBitsToResult(value2)));
+        hasFinalResult = true;
+        //        System.out.println(
+        //            "[DEBUG] cnt:" + cnt + " valueSet_size:" + valueSet.size() + " ans:" +
+        //                getDoubleValue());
+        //        for (Long value : valueList){
+        //          System.out.println("[DEBUG]..."+value+"  "+longBitsToResult(value^(1L<<63)));
+        //        }
+      }
+
+      if (K2 != -1) bucket2 = Arrays.copyOf(bucket1, sizeOfBucket());
+    }
+    Map<String, Long> result1 = updateFromBucket(bucket1, K1, prefixOfMedian1);
+    K1 = result1.get("K");
+    prefixOfMedian1 = result1.get("prefixOfMedian");
+    //    Arrays.fill(bucket1, 0);
+
+    // TODO: optimize. unnecessary when prefixOfMedian1==prefixOfMedian2
+    if (needsTwoBuckets()) {
+      Map<String, Long> result2 = updateFromBucket(bucket2, K2, prefixOfMedian2);
+      K2 = result2.get("K");
+      prefixOfMedian2 = result2.get("prefixOfMedian");
+      //      Arrays.fill(bucket2, 0);
+    }
+
+    bitsCounted += bitsOfBucket();
+    if (bitsCounted == bitsOfDataType()) {
+      if (!needsTwoBuckets()) setDoubleValue(longBitsToResult(prefixOfMedian1));
+      else
+        setDoubleValue(
+            0.5 * (longBitsToResult(prefixOfMedian1) + longBitsToResult(prefixOfMedian2)));
+      hasFinalResult = true;
+      //      System.out.println(
+      //          "median1: "
+      //              + longBitsToResult(prefixOfMedian1)
+      //              + "   median2: "
+      //              + longBitsToResult(prefixOfMedian2));
+    }
+
+    //    System.out.println("\t\t[MEDIAN]"+this.hashCode()+"  finishIteration "+bitsCounted+"
+    // "+bitsOfDataType());
+    //        System.out.println(
+    //            "K1: "
+    //                + K1
+    //                + " K2: "
+    //                + K2
+    //                + "    cnt:"
+    //                + cnt
+    //                + "|| prefixOfMedian1:"
+    //                + prefixOfMedian1
+    //                + "  prefixOfMedian2:"
+    //                + prefixOfMedian2);
+  }
+
+  @Override
+  protected boolean hasCandidateResult() {
+    return bitsCounted == bitsOfDataType() || hasFinalResult;
+  }
+
+  @Override
+  public Double getResult() {
+    return hasCandidateResult() ? getDoubleValue() : null;
+  }
+
+  @Override
+  public void updateResultFromStatistics(Statistics statistics) {
+    switch (statistics.getType()) {
+      case INT32:
+      case INT64:
+      case FLOAT:
+      case DOUBLE:
+        break;
+      case TEXT:
+      case BOOLEAN:
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format(
+                "Unsupported data type in aggregation MEDIAN : %s", statistics.getType()));
+    }
+    Comparable<Object> minVal = (Comparable<Object>) statistics.getMinValue();
+    Comparable<Object> maxVal = (Comparable<Object>) statistics.getMaxValue();
+    if (minVal.compareTo(maxVal) == 0) {
+      updateBucketFromData(minVal, statistics.getCount());
+    } /*else
+      throw new QueryProcessException("Failed to update median aggregation result from statistics.");*/
+  }
+
+  @Override
+  public void updateResultFromPageData(IBatchDataIterator batchIterator) {
+    updateResultFromPageData(batchIterator, Long.MIN_VALUE, Long.MAX_VALUE);
+  }
+
+  @Override
+  public void updateResultFromPageData(
+      IBatchDataIterator batchIterator, long minBound, long maxBound) {
+    while (batchIterator.hasNext()) {
+      if (batchIterator.currentTime() >= maxBound || batchIterator.currentTime() < minBound) {
+        break;
+      }
+      updateBucketFromData(batchIterator.currentValue(), 1);
+      batchIterator.next();
+    }
+  }
+
+  @Override
+  public void updateResultUsingTimestamps(
+      long[] timestamps, int length, IReaderByTimestamp dataReader) throws IOException {
+    Object[] values = dataReader.getValuesInTimestamps(timestamps, length);
+    for (int i = 0; i < length; i++) {
+      if (values[i] != null) {
+        updateBucketFromData(values[i], 1);
+      }
+    }
+  }
+
+  @Override
+  public void updateResultUsingValues(long[] timestamps, int length, ValueIterator valueIterator) {
+    //    List<Object> tmp = new ArrayList<>();
+    while (valueIterator.hasNext()) {
+      updateBucketFromData(valueIterator.next(), 1);
+      //      Object tmpObj = valueIterator.next();
+      //      updateBucketFromData(tmpObj, 1);
+      //      tmp.add(tmpObj);
+    }
+    //
+    // System.out.println("\t\t[MEDIAN]"+this.hashCode()+"[updateResultUsingValues]"+tmp.toString());
+  }
+
+  @Override
+  public int maxIteration() {
+    return bitsOfDataType() / bitsOfBucket();
+  }
+
+  @Override
+  public boolean hasFinalResult() {
+    return hasFinalResult;
+  }
+
+  @Override
+  public void merge(AggregateResult another) {
+    System.out.println("[DEBUG] [merge] " + this.getResult() + "  " + another.getResult());
+    // merge not supported
+    //        throw new QueryProcessException("Can't merge MedianAggregateResult");
+  }
+
+  @Override
+  protected void deserializeSpecificFields(ByteBuffer buffer) {
+    this.seriesDataType = TSDataType.deserialize(buffer.get());
+    this.cnt = buffer.getLong();
+    this.bitsCounted = buffer.getInt();
+    this.K1 = buffer.getLong();
+    this.K2 = buffer.getLong();
+    this.prefixOfMedian1 = buffer.getLong();
+    this.prefixOfMedian2 = buffer.getLong();
+
+    this.bucket1 = new long[sizeOfBucket()];
+    for (int i = 0; i < sizeOfBucket(); i++) this.bucket1[i] = buffer.getLong();
+    if (needsTwoBuckets()) {
+      this.bucket2 = new long[sizeOfBucket()];
+      for (int i = 0; i < sizeOfBucket(); i++) this.bucket2[i] = buffer.getLong();
+    }
+  }
+
+  @Override
+  protected void serializeSpecificFields(OutputStream outputStream) throws IOException {
+    ReadWriteIOUtils.write(seriesDataType, outputStream);
+    ReadWriteIOUtils.write(cnt, outputStream);
+    ReadWriteIOUtils.write(bitsCounted, outputStream);
+    ReadWriteIOUtils.write(K1, outputStream);
+    ReadWriteIOUtils.write(K2, outputStream);
+    ReadWriteIOUtils.write(prefixOfMedian1, outputStream);
+    ReadWriteIOUtils.write(prefixOfMedian2, outputStream);
+    for (int i = 0; i < sizeOfBucket(); i++) ReadWriteIOUtils.write(bucket1[i], outputStream);
+    if (needsTwoBuckets())
+      for (int i = 0; i < sizeOfBucket(); i++) ReadWriteIOUtils.write(bucket2[i], outputStream);
+  }
+
+  public long getCnt() {
+    return cnt;
+  }
+
+  @Override
+  public void reset() {
+    super.reset();
+    cnt = 0;
+    bitsCounted = 0;
+    prefixOfMedian1 = prefixOfMedian2 = 0;
+    K1 = K2 = -1;
+    bucket1 = bucket2 = null;
+    isSmallAmount = true;
+    valueSet = new HashSet<>();
+    valueCount = new HashMap<>();
+    hasFinalResult = false;
+  }
+
+  @Override
+  public boolean canUpdateFromStatistics(Statistics statistics) {
+    Comparable<Object> minVal = (Comparable<Object>) statistics.getMinValue();
+    Comparable<Object> maxVal = (Comparable<Object>) statistics.getMaxValue();
+    return (minVal.compareTo(maxVal) == 0);
+  }
+
+  @Override
+  public boolean groupByLevelBeforeAggregation() {
+    return true;
+  }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/Optimized_2_MedianAggrResult.java b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/Optimized_2_MedianAggrResult.java
new file mode 100644
index 0000000000..4cabc611ab
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/Optimized_2_MedianAggrResult.java
@@ -0,0 +1,409 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.query.aggregation.impl;
+
+import org.apache.iotdb.db.query.aggregation.AggregateResult;
+import org.apache.iotdb.db.query.aggregation.AggregationType;
+import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp;
+import org.apache.iotdb.db.utils.ValueIterator;
+import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.file.metadata.statistics.Statistics;
+import org.apache.iotdb.tsfile.read.common.IBatchDataIterator;
+import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.*;
+
+public class Optimized_2_MedianAggrResult extends AggregateResult {
+  private TSDataType seriesDataType;
+  private long cnt; // = n after iteration.
+  private int bitsCounted;
+  private int bitsOfBucket;
+  private long K1, K2;
+  private long prefixOfMedian1, prefixOfMedian2; // needs prefixOfMedian2 when n is even
+  private long maxWithPrefix1, minWithPrefix2; // when two medians divided
+  private TreeMap<Long, Long> treeMap;
+  static int smallAmount = 1 << 16; // keep <= smallAmount entries in the treeMap.
+
+  private boolean hasFinalResult;
+
+  private long sizeOfBucket() {
+    return 1L << bitsOfBucket;
+  }
+
+  private long maskOfBucket() {
+    return bitsOfBucket < 64 ? ((1L << bitsOfBucket) - 1) : -1L;
+  }
+
+  private long maskOfPrefix() {
+    return bitsCounted == 0 ? (0) : (((1L << bitsCounted) - 1) << (bitsOfDataType() - bitsCounted));
+  }
+
+  private int bitsOfDataType() {
+    switch (seriesDataType) {
+      case INT32:
+      case FLOAT:
+        return 32;
+      case INT64:
+      case DOUBLE:
+        return 64;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private boolean hasTwoMedians() {
+    return (cnt & 1) == 0;
+  }
+
+  private boolean hasTwoDividedMedians() {
+    return hasTwoMedians() && prefixOfMedian1 != prefixOfMedian2;
+  }
+
+  public Optimized_2_MedianAggrResult(TSDataType seriesDataType)
+      throws UnSupportedDataTypeException {
+    super(TSDataType.DOUBLE, AggregationType.EXACT_MEDIAN_OPT_2);
+    this.seriesDataType = seriesDataType;
+    reset();
+  }
+
+  // turn FLOAT/INT32 to unsigned long keeping relative order
+  private long dataToLongBits(Object data) throws UnSupportedDataTypeException {
+    long longBits;
+    switch (seriesDataType) {
+      case INT32:
+        return (int) data + (1L << 31);
+      case FLOAT:
+        longBits = Float.floatToIntBits((float) data) + (1L << 31);
+        return (float) data >= 0f ? longBits : longBits ^ 0x7F800000L;
+      case INT64:
+        return (long) data + (1L << 63);
+      case DOUBLE:
+        longBits = Double.doubleToLongBits((double) data) + (1L << 63);
+        return (double) data >= 0d ? longBits : longBits ^ 0x7FF0000000000000L;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private double longBitsToResult(long longBits) throws UnSupportedDataTypeException {
+    switch (seriesDataType) {
+      case INT32:
+        return (double) (longBits - (1L << 31));
+      case FLOAT:
+        longBits = (longBits >>> 31) > 0 ? longBits : longBits ^ 0x7F800000L;
+        return Float.intBitsToFloat((int) (longBits - (1L << 31)));
+      case INT64:
+        return (double) (longBits - (1L << 63));
+      case DOUBLE:
+        longBits = (longBits >>> 63) > 0 ? longBits : longBits ^ 0x7FF0000000000000L;
+        return Double.longBitsToDouble(longBits - (1L << 63));
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private long longBitsToIndex(long longBits) {
+    return ((longBits >>> (bitsOfDataType() - bitsOfBucket - bitsCounted)) & maskOfBucket());
+  }
+
+  private void updateStatusFromData(Object data, long times) {
+    if (bitsCounted == 0) cnt += times;
+    long longBits = dataToLongBits(data);
+    if ((longBits & maskOfPrefix()) != prefixOfMedian1
+        && (longBits & maskOfPrefix()) != prefixOfMedian2) return;
+    if (hasTwoDividedMedians()) {
+      if ((longBits & maskOfPrefix()) == prefixOfMedian1)
+        maxWithPrefix1 = Math.max(maxWithPrefix1, longBits);
+      else minWithPrefix2 = Math.min(minWithPrefix2, longBits);
+      return;
+    }
+
+    while (treeMap.size() > smallAmount) rebuildTreeMap();
+    long dataIndex = longBitsToIndex(longBits);
+    if (bitsOfBucket == 64) dataIndex ^= 1L << 63; // unsigned
+    //    System.out.println("[DEBUG]: data:"+data);
+    //    System.out.println("[DEBUG]: updatefromdata:"+longBits+"  index:"+dataIndex+"
+    // bitsOfBucket:"+bitsOfBucket+"   beforeMask:"+((longBits >>> (bitsOfDataType() - bitsOfBucket
+    // - bitsCounted))));
+    treeMap.merge(dataIndex, times, Long::sum);
+  }
+
+  private void rebuildTreeMap() {
+    //    System.out.println("\t\t[opt_2 DEBUG] rebuild");
+    bitsOfBucket -= 1;
+    Set<Map.Entry<Long, Long>> entrySet = treeMap.entrySet();
+    TreeMap<Long, Long> newTreeMap = new TreeMap<>();
+    long newIndex, newValue, preIndex = -1, preValue = 0;
+    boolean first = true;
+    for (Map.Entry<Long, Long> entry : entrySet) {
+      if (bitsOfBucket < 63) newIndex = entry.getKey() >>> 1;
+      else newIndex = (entry.getKey() ^ (1L << 63)) >>> 1;
+      newValue = entry.getValue();
+
+      newTreeMap.merge(newIndex, newValue, Long::sum);
+      //      if (first) {
+      //        preIndex = newIndex;
+      //        preValue = newValue;
+      //        first = false;
+      //      }
+      //      if (newIndex != preIndex) {
+      //        newTreeMap.put(preIndex, preValue);
+      //        preIndex = newIndex;
+      //        preValue = newValue;
+      //      } else {
+      //        preValue += newValue;
+      //      }
+    }
+    //    newTreeMap.put(preIndex, preValue);
+    treeMap = newTreeMap;
+  }
+
+  @Override
+  public void startIteration() {
+    //    System.out.println("[DEBUG]:startIteration value:"+getValue()+"
+    // bitsCounted:"+bitsCounted+" 1,2:"+prefixOfMedian1+" "+prefixOfMedian2 + "
+    // mask:"+maskOfPrefix());
+    bitsOfBucket = bitsOfDataType() - bitsCounted;
+    treeMap = new TreeMap<>();
+    if (bitsCounted > 0 && hasTwoDividedMedians()) {
+      maxWithPrefix1 = Long.MIN_VALUE;
+      minWithPrefix2 = Long.MAX_VALUE;
+    }
+  }
+
+  @Override
+  public void finishIteration() {
+    if (cnt == 0) {
+      hasFinalResult = true;
+      return;
+    }
+    if (hasTwoDividedMedians()) {
+      //      System.out.println("[DEBUG]hasTwoDividedMedians");
+      setDoubleValue(0.5 * (longBitsToResult(maxWithPrefix1) + longBitsToResult(minWithPrefix2)));
+      hasFinalResult = true;
+      return;
+    }
+    if (bitsCounted == 0) {
+      K1 = (cnt + 1) >> 1;
+      K2 = hasTwoMedians() ? (K1 + 1) : K1;
+    }
+    Set<Map.Entry<Long, Long>> entrySet = treeMap.entrySet();
+    long sum = 0, index1 = -1, index2 = -1;
+    for (Map.Entry<Long, Long> entry : entrySet) {
+      //      System.out.println("[DEBUG] entrySet:"+longBitsToResult(entry.getKey()^(1L<<63)));
+      //      if(index1!=-1&&longBitsToResult(entry.getKey()^(1L<<63))<120334271764371712.0){
+      //        System.out.println("[DEBUG]?? entrySet:"+longBitsToResult(entry.getKey()^(1L<<63)));
+      //      }
+      sum += entry.getValue();
+      if (sum >= K1 && index1 == -1) {
+        K1 -= sum - entry.getValue();
+        index1 = entry.getKey();
+        if (bitsOfBucket == 64) index1 ^= 1L << 63; // unsigned
+        //                System.out.println("[DEBUG] index1:"+index1+"  "+(index1 <<
+        // (bitsOfDataType() -
+        //         bitsOfBucket - bitsCounted))+"  "+longBitsToResult(index1));
+        prefixOfMedian1 |= (index1 << (bitsOfDataType() - bitsOfBucket - bitsCounted));
+      }
+      if (sum >= K2 && index2 == -1) {
+        K2 -= sum - entry.getValue();
+        index2 = entry.getKey();
+        if (bitsOfBucket == 64) index2 ^= 1L << 63; // unsigned
+        //                System.out.println("[DEBUG] index2:"+index2+"  "+(index2 <<
+        // (bitsOfDataType() -
+        //         bitsOfBucket - bitsCounted))+"  "+longBitsToResult(index2));
+
+        prefixOfMedian2 |= (index2 << (bitsOfDataType() - bitsOfBucket - bitsCounted));
+      }
+    }
+
+    bitsCounted += bitsOfBucket;
+    if (bitsCounted == bitsOfDataType()) {
+      //            System.out.println("[DEBUG] calc over. treeMapSize:"+treeMap.size());
+      if (!hasTwoMedians()) setDoubleValue(longBitsToResult(prefixOfMedian1));
+      else
+        setDoubleValue(
+            0.5 * (longBitsToResult(prefixOfMedian1) + longBitsToResult(prefixOfMedian2)));
+      hasFinalResult = true;
+      //            System.out.println(
+      //                "median1: "
+      //                    + longBitsToResult(prefixOfMedian1)
+      //                    + "   median2: "
+      //                    + longBitsToResult(prefixOfMedian2));
+    }
+
+    //        System.out.println("\t\t[MEDIAN]"+this.hashCode()+"  finishIteration "+bitsCounted+"
+    // "+bitsOfDataType());
+    //            System.out.println(
+    //                "K1: "
+    //                    + K1
+    //                    + " K2: "
+    //                    + K2
+    //                    + "    cnt:"
+    //                    + cnt
+    //                    + "|| prefixOfMedian1:"
+    //                    + prefixOfMedian1
+    //                    + "  prefixOfMedian2:"
+    //                    + prefixOfMedian2);
+  }
+
+  @Override
+  protected boolean hasCandidateResult() {
+    return hasFinalResult && cnt > 0;
+  }
+
+  @Override
+  public Double getResult() {
+    return hasCandidateResult() ? getDoubleValue() : null;
+  }
+
+  @Override
+  public void updateResultFromStatistics(Statistics statistics) {
+    switch (statistics.getType()) {
+      case INT32:
+      case INT64:
+      case FLOAT:
+      case DOUBLE:
+        break;
+      case TEXT:
+      case BOOLEAN:
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format(
+                "Unsupported data type in aggregation MEDIAN : %s", statistics.getType()));
+    }
+    Comparable<Object> minVal = (Comparable<Object>) statistics.getMinValue();
+    Comparable<Object> maxVal = (Comparable<Object>) statistics.getMaxValue();
+    if (minVal.compareTo(maxVal) == 0) {
+      updateStatusFromData(minVal, statistics.getCount());
+    } /*else
+      throw new QueryProcessException("Failed to update median aggregation result from statistics.");*/
+  }
+
+  @Override
+  public void updateResultFromPageData(IBatchDataIterator batchIterator) {
+    updateResultFromPageData(batchIterator, Long.MIN_VALUE, Long.MAX_VALUE);
+  }
+
+  @Override
+  public void updateResultFromPageData(
+      IBatchDataIterator batchIterator, long minBound, long maxBound) {
+    while (batchIterator.hasNext()) {
+      if (batchIterator.currentTime() >= maxBound || batchIterator.currentTime() < minBound) {
+        break;
+      }
+      updateStatusFromData(batchIterator.currentValue(), 1);
+      batchIterator.next();
+    }
+  }
+
+  @Override
+  public void updateResultUsingTimestamps(
+      long[] timestamps, int length, IReaderByTimestamp dataReader) throws IOException {
+    Object[] values = dataReader.getValuesInTimestamps(timestamps, length);
+    for (int i = 0; i < length; i++) {
+      if (values[i] != null) {
+        updateStatusFromData(values[i], 1);
+      }
+    }
+  }
+
+  @Override
+  public void updateResultUsingValues(long[] timestamps, int length, ValueIterator valueIterator) {
+    //    List<Object> tmp = new ArrayList<>();
+    while (valueIterator.hasNext()) {
+      updateStatusFromData(valueIterator.next(), 1);
+      //      Object tmpObj = valueIterator.next();
+      //      updateBucketFromData(tmpObj, 1);
+      //      tmp.add(tmpObj);
+    }
+    //
+    // System.out.println("\t\t[MEDIAN]"+this.hashCode()+"[updateResultUsingValues]"+tmp.toString());
+  }
+
+  @Override
+  public int maxIteration() {
+    return bitsOfDataType() / 16;
+  }
+
+  @Override
+  public boolean hasFinalResult() {
+    return hasFinalResult;
+  }
+
+  @Override
+  public void merge(AggregateResult another) {
+    // TODO
+    //    System.out.println("[DEBUG] [merge] " + this.getResult() + "  " + another.getResult());
+    // merge not supported
+    //        throw new QueryProcessException("Can't merge MedianAggregateResult");
+  }
+
+  @Override
+  protected void deserializeSpecificFields(ByteBuffer buffer) {
+    this.seriesDataType = TSDataType.deserialize(buffer.get());
+    this.cnt = buffer.getLong();
+    this.bitsCounted = buffer.getInt();
+    this.prefixOfMedian1 = buffer.getLong();
+    this.prefixOfMedian2 = buffer.getLong();
+    // TODO
+  }
+
+  @Override
+  protected void serializeSpecificFields(OutputStream outputStream) throws IOException {
+    ReadWriteIOUtils.write(seriesDataType, outputStream);
+    ReadWriteIOUtils.write(cnt, outputStream);
+    ReadWriteIOUtils.write(bitsCounted, outputStream);
+    ReadWriteIOUtils.write(prefixOfMedian1, outputStream);
+    ReadWriteIOUtils.write(prefixOfMedian2, outputStream);
+    // TODO
+  }
+
+  public long getCnt() {
+    return cnt;
+  }
+
+  @Override
+  public void reset() {
+    super.reset();
+    cnt = 0;
+    bitsCounted = 0;
+    prefixOfMedian1 = prefixOfMedian2 = 0;
+    hasFinalResult = false;
+  }
+
+  @Override
+  public boolean canUpdateFromStatistics(Statistics statistics) {
+    Comparable<Object> minVal = (Comparable<Object>) statistics.getMinValue();
+    Comparable<Object> maxVal = (Comparable<Object>) statistics.getMaxValue();
+    return (minVal.compareTo(maxVal) == 0);
+  }
+
+  @Override
+  public boolean groupByLevelBeforeAggregation() {
+    return true;
+  }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/Optimized_3_MedianAggrResult.java b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/Optimized_3_MedianAggrResult.java
new file mode 100644
index 0000000000..fce86279e8
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/Optimized_3_MedianAggrResult.java
@@ -0,0 +1,348 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.query.aggregation.impl;
+
+import org.apache.iotdb.db.query.aggregation.AggregateResult;
+import org.apache.iotdb.db.query.aggregation.AggregationType;
+import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp;
+import org.apache.iotdb.db.utils.ValueIterator;
+import org.apache.iotdb.db.utils.datastructure.FixedTreap;
+import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.file.metadata.statistics.Statistics;
+import org.apache.iotdb.tsfile.read.common.IBatchDataIterator;
+import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.List;
+
+public class Optimized_3_MedianAggrResult extends AggregateResult {
+  private TSDataType seriesDataType;
+  private long cnt; // = n after iteration.
+  private int bitsOfDataType,
+      bitsCounted,
+      bitsConcerned; // bitsOfDataType == bitsCounted + bitsConcerned
+  private long maskConcerned;
+  private long K1, K2;
+  private long prefixOfMedian1, prefixOfMedian2; // needs prefixOfMedian2 when n is even
+  private long maxWithPrefix1, minWithPrefix2; // when two medians divided
+  private FixedTreap treap;
+
+  private boolean hasFinalResult;
+  private boolean hasTwoDividedMedians;
+
+  private long maskOfPrefix() {
+    return bitsCounted == 0 ? (0) : (((1L << bitsCounted) - 1) << (bitsOfDataType - bitsCounted));
+  }
+
+  private int getBitsOfDataType() {
+    switch (seriesDataType) {
+      case INT32:
+      case FLOAT:
+        return 32;
+      case INT64:
+      case DOUBLE:
+        return 64;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private boolean hasTwoMedians() {
+    return (cnt & 1) == 0;
+  }
+
+  private boolean hasTwoDividedMedians() {
+    return hasTwoMedians() && prefixOfMedian1 != prefixOfMedian2;
+  }
+
+  public Optimized_3_MedianAggrResult(TSDataType seriesDataType)
+      throws UnSupportedDataTypeException {
+    super(TSDataType.DOUBLE, AggregationType.EXACT_MEDIAN_OPT_3);
+    this.seriesDataType = seriesDataType;
+    reset();
+  }
+
+  // turn FLOAT/INT32 to unsigned long keeping relative order
+  private long dataToLongBits(Object data) throws UnSupportedDataTypeException {
+    long longBits;
+    switch (seriesDataType) {
+      case INT32:
+        return (int) data + (1L << 31);
+      case FLOAT:
+        longBits = Float.floatToIntBits((float) data) + (1L << 31);
+        return (float) data >= 0f ? longBits : longBits ^ 0x7F800000L;
+      case INT64:
+        return (long) data + (1L << 63);
+      case DOUBLE:
+        longBits = Double.doubleToLongBits((double) data) + (1L << 63);
+        return (double) data >= 0d ? longBits : longBits ^ 0x7FF0000000000000L;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private double longBitsToResult(long longBits) throws UnSupportedDataTypeException {
+    switch (seriesDataType) {
+      case INT32:
+        return (double) (longBits - (1L << 31));
+      case FLOAT:
+        longBits = (longBits >>> 31) > 0 ? longBits : longBits ^ 0x7F800000L;
+        return Float.intBitsToFloat((int) (longBits - (1L << 31)));
+      case INT64:
+        return (double) (longBits - (1L << 63));
+      case DOUBLE:
+        longBits = (longBits >>> 63) > 0 ? longBits : longBits ^ 0x7FF0000000000000L;
+        return Double.longBitsToDouble(longBits - (1L << 63));
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private void updateStatusFromData(Object data, long times) {
+    long longBits = dataToLongBits(data);
+    if ((longBits & maskOfPrefix()) != prefixOfMedian1
+        && (longBits & maskOfPrefix()) != prefixOfMedian2) return;
+    if (bitsCounted == 0) cnt += times;
+    if (hasTwoDividedMedians) {
+      if ((longBits & maskOfPrefix()) == prefixOfMedian1)
+        maxWithPrefix1 = Math.max(maxWithPrefix1, longBits);
+      else minWithPrefix2 = Math.min(minWithPrefix2, longBits);
+      return;
+    }
+
+    long dataConcerned = longBits & maskConcerned;
+    treap.insert(dataConcerned, times);
+  }
+
+  @Override
+  public void startIteration() {
+    bitsConcerned = bitsOfDataType - bitsCounted;
+    maskConcerned = bitsConcerned == 64 ? -1L : ((1L << bitsConcerned) - 1);
+
+    hasTwoDividedMedians = hasTwoDividedMedians();
+    //    System.out.println("[DEBUG]:startIteration value:"+getValue()+
+    //        " bitsCounted:"+bitsCounted+" 1,2:"+prefixOfMedian1+" "+prefixOfMedian2+ "
+    // divided:"+hasTwoDividedMedians);
+    if (hasTwoDividedMedians) {
+      maxWithPrefix1 = Long.MIN_VALUE;
+      minWithPrefix2 = Long.MAX_VALUE;
+      return;
+    }
+    if (bitsCounted == 0) { // first iteration
+      if (bitsOfDataType == 32) treap = new FixedTreap(bitsOfDataType, 31);
+      else treap = new FixedTreap(bitsOfDataType, 16);
+    } else {
+      if (bitsConcerned <= 16) treap.reset(bitsConcerned, 16);
+      else if (bitsConcerned <= 32) treap.reset(bitsConcerned, bitsConcerned - 1);
+      else treap.reset(bitsConcerned, 16);
+    }
+  }
+
+  @Override
+  public void finishIteration() {
+    //        System.out.println("[opt_3 DEBUG] finishIteration
+    // treapBits:"+treap.getRemainingBits());
+    if (cnt == 0) {
+      hasFinalResult = true;
+      return;
+    }
+    if (hasTwoDividedMedians) {
+      //            System.out.println("[DEBUG]hasTwoDividedMedians");
+      setDoubleValue(0.5 * (longBitsToResult(maxWithPrefix1) + longBitsToResult(minWithPrefix2)));
+      hasFinalResult = true;
+      return;
+    }
+    if (bitsCounted == 0) {
+      K1 = (cnt + 1) >> 1;
+      K2 = hasTwoMedians() ? (K1 + 1) : K1;
+    }
+    List<Long> iterationResult = treap.findResultIndex(K1, K2);
+    prefixOfMedian1 |= iterationResult.get(0) << (bitsConcerned - treap.getRemainingBits());
+    K1 -= iterationResult.get(1);
+    prefixOfMedian2 |= iterationResult.get(2) << (bitsConcerned - treap.getRemainingBits());
+    K2 -= iterationResult.get(3);
+    bitsCounted += treap.getRemainingBits();
+    if (bitsCounted == bitsOfDataType) {
+      if (!hasTwoMedians()) setDoubleValue(longBitsToResult(prefixOfMedian1));
+      else
+        setDoubleValue(
+            0.5 * (longBitsToResult(prefixOfMedian1) + longBitsToResult(prefixOfMedian2)));
+      hasFinalResult = true;
+      //      System.out.println("[opt_3 DEBUG] calc over  answer:"+getDoubleValue());
+    }
+    //    System.out.println("\t\t[MEDIAN]"+this.hashCode()+"  finishIteration "+bitsCounted+"
+    // "+bitsOfDataType());
+    //        System.out.println(
+    //            "K1: "
+    //                + K1
+    //                + " K2: "
+    //                + K2
+    //                + "    cnt:"
+    //                + cnt
+    //                + "|| prefixOfMedian1:"
+    //                + prefixOfMedian1
+    //                + "  prefixOfMedian2:"
+    //                + prefixOfMedian2);
+  }
+
+  @Override
+  protected boolean hasCandidateResult() {
+    return hasFinalResult && cnt > 0;
+  }
+
+  @Override
+  public Double getResult() {
+    return hasCandidateResult() ? getDoubleValue() : null;
+  }
+
+  @Override
+  public void updateResultFromStatistics(Statistics statistics) {
+    switch (statistics.getType()) {
+      case INT32:
+      case INT64:
+      case FLOAT:
+      case DOUBLE:
+        break;
+      case TEXT:
+      case BOOLEAN:
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format(
+                "Unsupported data type in aggregation MEDIAN : %s", statistics.getType()));
+    }
+    Comparable<Object> minVal = (Comparable<Object>) statistics.getMinValue();
+    Comparable<Object> maxVal = (Comparable<Object>) statistics.getMaxValue();
+    if (minVal.compareTo(maxVal) == 0) {
+      updateStatusFromData(minVal, statistics.getCount());
+    } /*else
+      throw new QueryProcessException("Failed to update median aggregation result from statistics.");*/
+  }
+
+  @Override
+  public void updateResultFromPageData(IBatchDataIterator batchIterator) {
+    updateResultFromPageData(batchIterator, Long.MIN_VALUE, Long.MAX_VALUE);
+  }
+
+  @Override
+  public void updateResultFromPageData(
+      IBatchDataIterator batchIterator, long minBound, long maxBound) {
+    while (batchIterator.hasNext()) {
+      if (batchIterator.currentTime() >= maxBound || batchIterator.currentTime() < minBound) {
+        break;
+      }
+      updateStatusFromData(batchIterator.currentValue(), 1);
+      batchIterator.next();
+    }
+  }
+
+  @Override
+  public void updateResultUsingTimestamps(
+      long[] timestamps, int length, IReaderByTimestamp dataReader) throws IOException {
+    Object[] values = dataReader.getValuesInTimestamps(timestamps, length);
+    for (int i = 0; i < length; i++) {
+      if (values[i] != null) {
+        updateStatusFromData(values[i], 1);
+      }
+    }
+  }
+
+  @Override
+  public void updateResultUsingValues(long[] timestamps, int length, ValueIterator valueIterator) {
+    //    List<Object> tmp = new ArrayList<>();
+    while (valueIterator.hasNext()) {
+      updateStatusFromData(valueIterator.next(), 1);
+      //      Object tmpObj = valueIterator.next();
+      //      updateStatusFromData(tmpObj, 1);
+      //      tmp.add(tmpObj);
+    }
+    //
+    // System.out.println("\t\t[MEDIAN]"+this.hashCode()+"[updateResultUsingValues]"+tmp.toString());
+  }
+
+  @Override
+  public int maxIteration() {
+    return bitsOfDataType / 16;
+  }
+
+  @Override
+  public boolean hasFinalResult() {
+    return hasFinalResult;
+  }
+
+  @Override
+  public void merge(AggregateResult another) {
+    //    System.out.println("[DEBUG] [merge] " + this.getResult() + "  " + another.getResult());
+    // merge not supported
+    //        throw new QueryProcessException("Can't merge MedianAggregateResult");
+  }
+
+  @Override
+  protected void deserializeSpecificFields(ByteBuffer buffer) {
+    this.seriesDataType = TSDataType.deserialize(buffer.get());
+    this.cnt = buffer.getLong();
+    this.bitsCounted = buffer.getInt();
+    this.prefixOfMedian1 = buffer.getLong();
+    this.prefixOfMedian2 = buffer.getLong();
+    // TODO
+  }
+
+  @Override
+  protected void serializeSpecificFields(OutputStream outputStream) throws IOException {
+    ReadWriteIOUtils.write(seriesDataType, outputStream);
+    ReadWriteIOUtils.write(cnt, outputStream);
+    ReadWriteIOUtils.write(bitsCounted, outputStream);
+    ReadWriteIOUtils.write(prefixOfMedian1, outputStream);
+    ReadWriteIOUtils.write(prefixOfMedian2, outputStream);
+    // TODO
+  }
+
+  public long getCnt() {
+    return cnt;
+  }
+
+  @Override
+  public void reset() {
+    super.reset();
+    cnt = 0;
+    bitsCounted = 0;
+    bitsOfDataType = getBitsOfDataType();
+    prefixOfMedian1 = prefixOfMedian2 = 0;
+    hasTwoDividedMedians = false;
+    hasFinalResult = false;
+  }
+
+  @Override
+  public boolean canUpdateFromStatistics(Statistics statistics) {
+    Comparable<Object> minVal = (Comparable<Object>) statistics.getMinValue();
+    Comparable<Object> maxVal = (Comparable<Object>) statistics.getMaxValue();
+    return (minVal.compareTo(maxVal) == 0);
+  }
+
+  @Override
+  public boolean groupByLevelBeforeAggregation() {
+    return true;
+  }
+}
diff --git a/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/Optimized_4_MedianAggrResult.java b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/Optimized_4_MedianAggrResult.java
new file mode 100644
index 0000000000..44b9f188a6
--- /dev/null
+++ b/server/src/main/java/org/apache/iotdb/db/query/aggregation/impl/Optimized_4_MedianAggrResult.java
@@ -0,0 +1,359 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.query.aggregation.impl;
+
+import org.apache.iotdb.db.query.aggregation.AggregateResult;
+import org.apache.iotdb.db.query.aggregation.AggregationType;
+import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp;
+import org.apache.iotdb.db.utils.ValueIterator;
+import org.apache.iotdb.db.utils.quantiles.EclipseCollectionsHashMapForQuantile;
+import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.file.metadata.statistics.Statistics;
+import org.apache.iotdb.tsfile.read.common.IBatchDataIterator;
+import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.List;
+
+public class Optimized_4_MedianAggrResult extends AggregateResult {
+  private TSDataType seriesDataType;
+  private long cnt; // = n after iteration.
+  private int bitsOfDataType,
+      bitsCounted,
+      bitsConcerned; // bitsOfDataType == bitsCounted + bitsConcerned
+  private long maskConcerned;
+  private long K1, K2;
+  private long prefixOfMedian1, prefixOfMedian2; // needs prefixOfMedian2 when n is even
+  private long maxWithPrefix1, minWithPrefix2; // when two medians divided
+  private EclipseCollectionsHashMapForQuantile hashMap;
+
+  private boolean hasFinalResult;
+  private boolean hasTwoDividedMedians;
+
+  private long maskOfPrefix() {
+    return bitsCounted == 0 ? (0) : (((1L << bitsCounted) - 1) << (bitsOfDataType - bitsCounted));
+  }
+
+  private int getBitsOfDataType() {
+    switch (seriesDataType) {
+      case INT32:
+      case FLOAT:
+        return 32;
+      case INT64:
+      case DOUBLE:
+        return 64;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
+  private boolean hasTwoMedians() {
+    return (cnt & 1) == 0;
+  }
+
+  private boolean hasTwoDividedMedians() {
+    return hasTwoMedians() && prefixOfMedian1 != prefixOfMedian2;
+  }
+
+  public Optimized_4_MedianAggrResult(TSDataType seriesDataType)
+      throws UnSupportedDataTypeException {
+    super(TSDataType.DOUBLE, AggregationType.EXACT_MEDIAN_OPT_4);
+    this.seriesDataType = seriesDataType;
+    reset();
+  }
+
+  // turn FLOAT/INT32 to unsigned long keeping relative order
+  private long dataToLongBits(Object data) throws UnSupportedDataTypeException {
+    long longBits;
+    switch (seriesDataType) {
+      case INT32:
+        return (int) data + (1L << 31);
+      case FLOAT:
+        longBits = Float.floatToIntBits((float) data) + (1L << 31);
+        return (float) data >= 0f ? longBits : longBits ^ 0x7F800000L;
+      case INT64:
+        return (long) data + (1L << 63);
+      case DOUBLE:
+        longBits = Double.doubleToLongBits((double) data) + (1L << 63);
+        return (double) data >= 0d ? longBits : longBits ^ 0x7FF0000000000000L;
+      default:
+        throw new UnSupportedDataTypeException(
+            String.format("Unsupported data type in aggregation MEDIAN : %s", seriesDataType));
+    }
+  }
+
... 15618 lines suppressed ...