You are viewing a plain text version of this content. The canonical link for it is here.
Posted to notifications@skywalking.apache.org by ha...@apache.org on 2022/10/15 00:27:43 UTC

[skywalking-banyandb] branch main updated: Add streaming API and topN aggregator (#164)

This is an automated email from the ASF dual-hosted git repository.

hanahmily pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/skywalking-banyandb.git


The following commit(s) were added to refs/heads/main by this push:
     new c8bb21a  Add streaming API and topN aggregator (#164)
c8bb21a is described below

commit c8bb21aebf2a5bc189214d9da56d7787b64a6985
Author: Jiajing LU <lu...@gmail.com>
AuthorDate: Sat Oct 15 08:27:38 2022 +0800

    Add streaming API and topN aggregator (#164)
    
    * add streaming API and TopN aggregator
    
    * implement TopNAggregation pre-calculation based on incoming measure data points
    
    * implement TopNAggregation querying.
    
    * add UTs and integration test cases.
---
 api/data/measure.go                                |   6 +
 api/proto/banyandb/database/v1/schema.pb.go        | 225 +++----
 .../banyandb/database/v1/schema.pb.validate.go     |  49 +-
 api/proto/banyandb/database/v1/schema.proto        | 254 ++++----
 api/proto/banyandb/measure/v1/query.pb.go          |   6 +-
 api/proto/banyandb/measure/v1/query.proto          |   6 +-
 api/proto/banyandb/measure/v1/topn.pb.go           |   2 +-
 api/proto/banyandb/measure/v1/topn.proto           |   2 +-
 api/proto/banyandb/model/v1/query.pb.go            |   2 +-
 api/proto/banyandb/model/v1/query.proto            |   2 +-
 .../openapi/banyandb/database/v1/rpc.swagger.json  |  16 +-
 .../openapi/banyandb/measure/v1/rpc.swagger.json   |   8 +-
 .../openapi/banyandb/stream/v1/rpc.swagger.json    |   2 +-
 banyand/liaison/grpc/measure.go                    |  24 +-
 banyand/liaison/http/server.go                     |  31 +-
 banyand/measure/encode.go                          |  31 +-
 banyand/measure/field_flag_test.go                 |   5 +-
 banyand/measure/measure.go                         |  35 +-
 banyand/measure/measure_query.go                   |  38 +-
 banyand/measure/measure_topn.go                    | 681 +++++++++++++++++++++
 banyand/measure/measure_write.go                   |  41 +-
 banyand/measure/metadata.go                        |  58 +-
 banyand/measure/service.go                         |  48 +-
 banyand/metadata/schema/measure.go                 |  17 +
 banyand/metadata/schema/schema.go                  |   8 +
 banyand/query/processor.go                         |  11 +-
 banyand/query/processor_topn.go                    | 439 +++++++++++++
 banyand/query/query.go                             |   7 +
 banyand/tsdb/block.go                              |   6 +-
 banyand/tsdb/scope.go                              |   3 +-
 banyand/tsdb/segment.go                            |   6 +-
 banyand/tsdb/series.go                             |   4 +-
 banyand/tsdb/series_seek_filter.go                 |   2 +-
 banyand/tsdb/series_write.go                       |   4 +-
 banyand/tsdb/seriesdb.go                           |  29 +-
 banyand/tsdb/seriesdb_test.go                      |   2 +-
 dist/LICENSE                                       |   6 +
 .../licenses/license-github.com-emirpasic-gods.txt |  41 ++
 docs/api-reference.md                              |  13 +-
 go.mod                                             |   1 +
 go.sum                                             |   2 +
 pkg/flow/dedup_priority_queue.go                   | 158 +++++
 api/data/measure.go => pkg/flow/op.go              |  32 +-
 pkg/flow/streaming/flow.go                         | 120 ++++
 pkg/flow/streaming/flow_sliding_window.go          | 328 ++++++++++
 pkg/flow/streaming/flow_sliding_window_test.go     | 137 +++++
 pkg/flow/streaming/flow_test.go                    | 240 ++++++++
 pkg/flow/streaming/flow_topn.go                    | 177 ++++++
 pkg/flow/streaming/flow_topn_test.go               | 102 +++
 pkg/flow/streaming/flow_unary.go                   | 109 ++++
 pkg/flow/streaming/sink/slice.go                   |  82 +++
 pkg/flow/streaming/sources/channel.go              |  93 +++
 pkg/flow/streaming/sources/channel_test.go         |  72 +++
 .../flow/streaming/streaming_suite_test.go         |  22 +-
 pkg/flow/types.go                                  | 174 ++++++
 api/data/measure.go => pkg/flow/utils.go           |  36 +-
 pkg/iter/iter.go                                   | 109 ++++
 pkg/pb/v1/write.go                                 |  36 ++
 pkg/query/aggregation/aggregation.go               |  11 +-
 pkg/query/aggregation/function.go                  |   1 +
 pkg/query/logical/expr.go                          |   4 +-
 pkg/query/logical/measure/measure_analyzer.go      |   2 +-
 .../logical/measure/measure_plan_aggregation.go    |   1 +
 pkg/query/logical/measure/measure_plan_top.go      |   1 +
 pkg/query/logical/measure/measure_top.go           |   1 +
 pkg/query/logical/measure/measure_top_test.go      |   1 +
 pkg/schema/metadata.go                             |  32 +-
 pkg/test/flow/slice.go                             |  85 +++
 pkg/test/flow/slice_test.go                        |  63 ++
 pkg/test/measure/etcd.go                           |  43 +-
 .../service_cpm_minute_nogroup_top100.json         |  14 +
 .../service_cpm_minute_top_bottom100.json          |  17 +
 pkg/timestamp/duration.go                          |   1 +
 pkg/timestamp/nano.go                              |   1 +
 pkg/timestamp/range.go                             |   1 +
 .../data/testdata/service_cpm_minute_data1.json    | 158 ++++-
 test/cases/measure/data/want/all.yaml              | 134 +++-
 test/cases/measure/data/want/group_max.yaml        |   2 +-
 test/cases/measure/data/want/limit.yaml            |  62 +-
 test/cases/measure/data/want/order_asc.yaml        | 276 ++++++---
 test/cases/measure/data/want/order_desc.yaml       | 276 ++++++---
 test/cases/measure/data/want/top.yaml              |   2 +-
 test/cases/measure/measure.go                      |   2 +-
 .../data/testdata/service_cpm_minute_data.json     | 182 ++++++
 test/cases/stream/stream.go                        |   8 +-
 test/cases/topn/data/data.go                       |  85 +++
 .../top.yaml => topn/data/input/aggr_desc.yaml}    |  31 +-
 .../want/top.yaml => topn/data/input/asc.yaml}     |  30 +-
 .../data/input/condition_aggr_desc.yaml}           |  37 +-
 .../want/top.yaml => topn/data/input/desc.yaml}    |  30 +-
 .../top.yaml => topn/data/want/aggr_desc.yaml}     |  40 +-
 .../data/want/top.yaml => topn/data/want/asc.yaml} |  62 +-
 .../data/want/condition_aggr_desc.yaml}            |  32 +-
 .../want/top.yaml => topn/data/want/desc.yaml}     |  62 +-
 test/cases/topn/topn.go                            |  46 ++
 test/integration/cold_query/query_suite_test.go    |  26 +-
 test/integration/query/query_suite_test.go         |   8 +
 97 files changed, 5137 insertions(+), 933 deletions(-)

diff --git a/api/data/measure.go b/api/data/measure.go
index a582924..08b3f1b 100644
--- a/api/data/measure.go
+++ b/api/data/measure.go
@@ -34,3 +34,9 @@ var MeasureQueryKindVersion = common.KindVersion{
 	Kind:    "measure-query",
 }
 var TopicMeasureQuery = bus.BiTopic(MeasureQueryKindVersion.String())
+
+var TopNQueryKindVersion = common.KindVersion{
+	Version: "v1",
+	Kind:    "topN-query",
+}
+var TopicTopNQuery = bus.BiTopic(TopNQueryKindVersion.String())
diff --git a/api/proto/banyandb/database/v1/schema.pb.go b/api/proto/banyandb/database/v1/schema.pb.go
index e4e5740..b86658a 100644
--- a/api/proto/banyandb/database/v1/schema.pb.go
+++ b/api/proto/banyandb/database/v1/schema.pb.go
@@ -837,11 +837,13 @@ type TopNAggregation struct {
 	// group_by_tag_names groups data points into statistical counters
 	GroupByTagNames []string `protobuf:"bytes,5,rep,name=group_by_tag_names,json=groupByTagNames,proto3" json:"group_by_tag_names,omitempty"`
 	// criteria select partial data points from measure
-	Criteria []*v11.Criteria `protobuf:"bytes,6,rep,name=criteria,proto3" json:"criteria,omitempty"`
+	Criteria *v11.Criteria `protobuf:"bytes,6,opt,name=criteria,proto3" json:"criteria,omitempty"`
 	// counters_number sets the number of counters to be tracked. The default value is 1000
 	CountersNumber int32 `protobuf:"varint,7,opt,name=counters_number,json=countersNumber,proto3" json:"counters_number,omitempty"`
+	// lru_size defines how much entry is allowed to be maintained in the memory
+	LruSize int32 `protobuf:"varint,8,opt,name=lru_size,json=lruSize,proto3" json:"lru_size,omitempty"`
 	// updated_at indicates when the measure is updated
-	UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"`
+	UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"`
 }
 
 func (x *TopNAggregation) Reset() {
@@ -911,7 +913,7 @@ func (x *TopNAggregation) GetGroupByTagNames() []string {
 	return nil
 }
 
-func (x *TopNAggregation) GetCriteria() []*v11.Criteria {
+func (x *TopNAggregation) GetCriteria() *v11.Criteria {
 	if x != nil {
 		return x.Criteria
 	}
@@ -925,6 +927,13 @@ func (x *TopNAggregation) GetCountersNumber() int32 {
 	return 0
 }
 
+func (x *TopNAggregation) GetLruSize() int32 {
+	if x != nil {
+		return x.LruSize
+	}
+	return 0
+}
+
 func (x *TopNAggregation) GetUpdatedAt() *timestamppb.Timestamp {
 	if x != nil {
 		return x.UpdatedAt
@@ -1265,7 +1274,7 @@ var file_banyandb_database_v1_schema_proto_rawDesc = []byte{
 	0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01,
 	0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
 	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09,
-	0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0xbc, 0x03, 0x0a, 0x0f, 0x54, 0x6f,
+	0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0xd7, 0x03, 0x0a, 0x0f, 0x54, 0x6f,
 	0x70, 0x4e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a,
 	0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
 	0x1c, 0x2e, 0x62, 0x61, 0x6e, 0x79, 0x61, 0x6e, 0x64, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f,
@@ -1284,115 +1293,117 @@ var file_banyandb_database_v1_schema_proto_rawDesc = []byte{
 	0x0a, 0x12, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x62, 0x79, 0x5f, 0x74, 0x61, 0x67, 0x5f, 0x6e,
 	0x61, 0x6d, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x67, 0x72, 0x6f, 0x75,
 	0x70, 0x42, 0x79, 0x54, 0x61, 0x67, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x63,
-	0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e,
+	0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e,
 	0x62, 0x61, 0x6e, 0x79, 0x61, 0x6e, 0x64, 0x62, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76,
 	0x31, 0x2e, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x52, 0x08, 0x63, 0x72, 0x69, 0x74,
 	0x65, 0x72, 0x69, 0x61, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73,
 	0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x63,
-	0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x39, 0x0a,
-	0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28,
-	0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75,
-	0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0xd2, 0x04, 0x0a, 0x09, 0x49, 0x6e, 0x64,
-	0x65, 0x78, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x38, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61,
-	0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x62, 0x61, 0x6e, 0x79, 0x61,
-	0x6e, 0x64, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65,
-	0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
-	0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04,
-	0x74, 0x61, 0x67, 0x73, 0x12, 0x38, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01,
-	0x28, 0x0e, 0x32, 0x24, 0x2e, 0x62, 0x61, 0x6e, 0x79, 0x61, 0x6e, 0x64, 0x62, 0x2e, 0x64, 0x61,
-	0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52,
-	0x75, 0x6c, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x44,
-	0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e,
-	0x32, 0x28, 0x2e, 0x62, 0x61, 0x6e, 0x79, 0x61, 0x6e, 0x64, 0x62, 0x2e, 0x64, 0x61, 0x74, 0x61,
-	0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x75, 0x6c,
-	0x65, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61,
-	0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f,
-	0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
-	0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12,
-	0x44, 0x0a, 0x08, 0x61, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28,
-	0x0e, 0x32, 0x28, 0x2e, 0x62, 0x61, 0x6e, 0x79, 0x61, 0x6e, 0x64, 0x62, 0x2e, 0x64, 0x61, 0x74,
-	0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x75,
-	0x6c, 0x65, 0x2e, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x52, 0x08, 0x61, 0x6e, 0x61,
-	0x6c, 0x79, 0x7a, 0x65, 0x72, 0x22, 0x3e, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a,
-	0x10, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
-	0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x54, 0x52, 0x45, 0x45,
-	0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x56, 0x45, 0x52,
-	0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x4e, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f,
-	0x6e, 0x12, 0x18, 0x0a, 0x14, 0x4c, 0x4f, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e,
-	0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c,
-	0x4f, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x45, 0x52, 0x49, 0x45, 0x53, 0x10, 0x01,
-	0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x4f, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x47, 0x4c, 0x4f,
-	0x42, 0x41, 0x4c, 0x10, 0x02, 0x22, 0x66, 0x0a, 0x08, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65,
-	0x72, 0x12, 0x18, 0x0a, 0x14, 0x41, 0x4e, 0x41, 0x4c, 0x59, 0x5a, 0x45, 0x52, 0x5f, 0x55, 0x4e,
-	0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x41,
-	0x4e, 0x41, 0x4c, 0x59, 0x5a, 0x45, 0x52, 0x5f, 0x4b, 0x45, 0x59, 0x57, 0x4f, 0x52, 0x44, 0x10,
-	0x01, 0x12, 0x15, 0x0a, 0x11, 0x41, 0x4e, 0x41, 0x4c, 0x59, 0x5a, 0x45, 0x52, 0x5f, 0x53, 0x54,
-	0x41, 0x4e, 0x44, 0x41, 0x52, 0x44, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x41, 0x4e, 0x41, 0x4c,
-	0x59, 0x5a, 0x45, 0x52, 0x5f, 0x53, 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x10, 0x03, 0x22, 0x54, 0x0a,
-	0x07, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x35, 0x0a, 0x07, 0x63, 0x61, 0x74, 0x61,
-	0x6c, 0x6f, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x62, 0x61, 0x6e, 0x79,
-	0x61, 0x6e, 0x64, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x43,
-	0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x52, 0x07, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x12,
-	0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
-	0x61, 0x6d, 0x65, 0x22, 0xc6, 0x02, 0x0a, 0x10, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x75, 0x6c,
-	0x65, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x38, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61,
-	0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x62, 0x61, 0x6e,
-	0x79, 0x61, 0x6e, 0x64, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e,
-	0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61,
-	0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
-	0x09, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a,
-	0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x62, 0x61, 0x6e, 0x79,
-	0x61, 0x6e, 0x64, 0x62, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31,
-	0x2e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63,
-	0x74, 0x12, 0x35, 0x0a, 0x08, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20,
+	0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x19, 0x0a,
+	0x08, 0x6c, 0x72, 0x75, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52,
+	0x07, 0x6c, 0x72, 0x75, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61,
+	0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
+	0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
+	0x64, 0x41, 0x74, 0x22, 0xd2, 0x04, 0x0a, 0x09, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x75, 0x6c,
+	0x65, 0x12, 0x38, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x62, 0x61, 0x6e, 0x79, 0x61, 0x6e, 0x64, 0x62, 0x2e, 0x63,
+	0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
+	0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x74,
+	0x61, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12,
+	0x38, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e,
+	0x62, 0x61, 0x6e, 0x79, 0x61, 0x6e, 0x64, 0x62, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
+	0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x54,
+	0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x62, 0x61,
+	0x6e, 0x79, 0x61, 0x6e, 0x64, 0x62, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e,
+	0x76, 0x31, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x4c, 0x6f, 0x63,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+	0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20,
 	0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
 	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
-	0x07, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x41, 0x74, 0x12, 0x37, 0x0a, 0x09, 0x65, 0x78, 0x70, 0x69,
-	0x72, 0x65, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
-	0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41,
-	0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18,
-	0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
-	0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x2a, 0xa8, 0x01, 0x0a,
-	0x07, 0x54, 0x61, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x14, 0x54, 0x41, 0x47, 0x5f,
-	0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44,
-	0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x41, 0x47, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53,
-	0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x41, 0x47, 0x5f, 0x54,
-	0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x19, 0x0a, 0x15, 0x54, 0x41, 0x47,
-	0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x41, 0x52, 0x52,
-	0x41, 0x59, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x47, 0x5f, 0x54, 0x59, 0x50, 0x45,
-	0x5f, 0x49, 0x4e, 0x54, 0x5f, 0x41, 0x52, 0x52, 0x41, 0x59, 0x10, 0x04, 0x12, 0x18, 0x0a, 0x14,
-	0x54, 0x41, 0x47, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x42, 0x49,
-	0x4e, 0x41, 0x52, 0x59, 0x10, 0x05, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x41, 0x47, 0x5f, 0x54, 0x59,
-	0x50, 0x45, 0x5f, 0x49, 0x44, 0x10, 0x06, 0x2a, 0x6e, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64,
-	0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x54, 0x59,
-	0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00,
-	0x12, 0x15, 0x0a, 0x11, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53,
-	0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x46, 0x49, 0x45, 0x4c, 0x44,
-	0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, 0x46,
-	0x49, 0x45, 0x4c, 0x44, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x42,
-	0x49, 0x4e, 0x41, 0x52, 0x59, 0x10, 0x03, 0x2a, 0x4e, 0x0a, 0x0e, 0x45, 0x6e, 0x63, 0x6f, 0x64,
-	0x69, 0x6e, 0x67, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x43,
-	0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53,
-	0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x45, 0x4e,
-	0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x47, 0x4f,
-	0x52, 0x49, 0x4c, 0x4c, 0x41, 0x10, 0x01, 0x2a, 0x54, 0x0a, 0x11, 0x43, 0x6f, 0x6d, 0x70, 0x72,
-	0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x22, 0x0a, 0x1e,
-	0x43, 0x4f, 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x45, 0x54, 0x48,
-	0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00,
-	0x12, 0x1b, 0x0a, 0x17, 0x43, 0x4f, 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f,
-	0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x5a, 0x53, 0x54, 0x44, 0x10, 0x01, 0x42, 0x72, 0x0a,
-	0x2a, 0x6f, 0x72, 0x67, 0x2e, 0x61, 0x70, 0x61, 0x63, 0x68, 0x65, 0x2e, 0x73, 0x6b, 0x79, 0x77,
-	0x61, 0x6c, 0x6b, 0x69, 0x6e, 0x67, 0x2e, 0x62, 0x61, 0x6e, 0x79, 0x61, 0x6e, 0x64, 0x62, 0x2e,
-	0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x5a, 0x44, 0x67, 0x69, 0x74,
-	0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x70, 0x61, 0x63, 0x68, 0x65, 0x2f, 0x73,
-	0x6b, 0x79, 0x77, 0x61, 0x6c, 0x6b, 0x69, 0x6e, 0x67, 0x2d, 0x62, 0x61, 0x6e, 0x79, 0x61, 0x6e,
-	0x64, 0x62, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x62, 0x61, 0x6e,
-	0x79, 0x61, 0x6e, 0x64, 0x62, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x76,
-	0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+	0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x44, 0x0a, 0x08, 0x61, 0x6e,
+	0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x62,
+	0x61, 0x6e, 0x79, 0x61, 0x6e, 0x64, 0x62, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
+	0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x41, 0x6e,
+	0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x52, 0x08, 0x61, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72,
+	0x22, 0x3e, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, 0x45,
+	0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0d,
+	0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x54, 0x52, 0x45, 0x45, 0x10, 0x01, 0x12, 0x11, 0x0a,
+	0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x56, 0x45, 0x52, 0x54, 0x45, 0x44, 0x10, 0x02,
+	0x22, 0x4e, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x14,
+	0x4c, 0x4f, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49,
+	0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x4f, 0x43, 0x41, 0x54, 0x49,
+	0x4f, 0x4e, 0x5f, 0x53, 0x45, 0x52, 0x49, 0x45, 0x53, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x4c,
+	0x4f, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x47, 0x4c, 0x4f, 0x42, 0x41, 0x4c, 0x10, 0x02,
+	0x22, 0x66, 0x0a, 0x08, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x14,
+	0x41, 0x4e, 0x41, 0x4c, 0x59, 0x5a, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49,
+	0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x41, 0x4e, 0x41, 0x4c, 0x59, 0x5a,
+	0x45, 0x52, 0x5f, 0x4b, 0x45, 0x59, 0x57, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11,
+	0x41, 0x4e, 0x41, 0x4c, 0x59, 0x5a, 0x45, 0x52, 0x5f, 0x53, 0x54, 0x41, 0x4e, 0x44, 0x41, 0x52,
+	0x44, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x41, 0x4e, 0x41, 0x4c, 0x59, 0x5a, 0x45, 0x52, 0x5f,
+	0x53, 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x10, 0x03, 0x22, 0x54, 0x0a, 0x07, 0x53, 0x75, 0x62, 0x6a,
+	0x65, 0x63, 0x74, 0x12, 0x35, 0x0a, 0x07, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x18, 0x01,
+	0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x62, 0x61, 0x6e, 0x79, 0x61, 0x6e, 0x64, 0x62, 0x2e,
+	0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f,
+	0x67, 0x52, 0x07, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
+	0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc6,
+	0x02, 0x0a, 0x10, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x75, 0x6c, 0x65, 0x42, 0x69, 0x6e, 0x64,
+	0x69, 0x6e, 0x67, 0x12, 0x38, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18,
+	0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x62, 0x61, 0x6e, 0x79, 0x61, 0x6e, 0x64, 0x62,
+	0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64,
+	0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a,
+	0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x72, 0x75,
+	0x6c, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03,
+	0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x62, 0x61, 0x6e, 0x79, 0x61, 0x6e, 0x64, 0x62, 0x2e,
+	0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x6a,
+	0x65, 0x63, 0x74, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x35, 0x0a, 0x08,
+	0x62, 0x65, 0x67, 0x69, 0x6e, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x62, 0x65, 0x67, 0x69,
+	0x6e, 0x41, 0x74, 0x12, 0x37, 0x0a, 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x61, 0x74,
+	0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+	0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a,
+	0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b,
+	0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+	0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70,
+	0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x2a, 0xa8, 0x01, 0x0a, 0x07, 0x54, 0x61, 0x67, 0x54,
+	0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x14, 0x54, 0x41, 0x47, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
+	0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x13, 0x0a,
+	0x0f, 0x54, 0x41, 0x47, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47,
+	0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x41, 0x47, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49,
+	0x4e, 0x54, 0x10, 0x02, 0x12, 0x19, 0x0a, 0x15, 0x54, 0x41, 0x47, 0x5f, 0x54, 0x59, 0x50, 0x45,
+	0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x41, 0x52, 0x52, 0x41, 0x59, 0x10, 0x03, 0x12,
+	0x16, 0x0a, 0x12, 0x54, 0x41, 0x47, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x5f,
+	0x41, 0x52, 0x52, 0x41, 0x59, 0x10, 0x04, 0x12, 0x18, 0x0a, 0x14, 0x54, 0x41, 0x47, 0x5f, 0x54,
+	0x59, 0x50, 0x45, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x10,
+	0x05, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x41, 0x47, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x44,
+	0x10, 0x06, 0x2a, 0x6e, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12,
+	0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e,
+	0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x46,
+	0x49, 0x45, 0x4c, 0x44, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47,
+	0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x54, 0x59, 0x50, 0x45,
+	0x5f, 0x49, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f,
+	0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59,
+	0x10, 0x03, 0x2a, 0x4e, 0x0a, 0x0e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x4d, 0x65,
+	0x74, 0x68, 0x6f, 0x64, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47,
+	0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46,
+	0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e,
+	0x47, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x47, 0x4f, 0x52, 0x49, 0x4c, 0x4c, 0x41,
+	0x10, 0x01, 0x2a, 0x54, 0x0a, 0x11, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f,
+	0x6e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x22, 0x0a, 0x1e, 0x43, 0x4f, 0x4d, 0x50, 0x52,
+	0x45, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e,
+	0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x43,
+	0x4f, 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f,
+	0x44, 0x5f, 0x5a, 0x53, 0x54, 0x44, 0x10, 0x01, 0x42, 0x72, 0x0a, 0x2a, 0x6f, 0x72, 0x67, 0x2e,
+	0x61, 0x70, 0x61, 0x63, 0x68, 0x65, 0x2e, 0x73, 0x6b, 0x79, 0x77, 0x61, 0x6c, 0x6b, 0x69, 0x6e,
+	0x67, 0x2e, 0x62, 0x61, 0x6e, 0x79, 0x61, 0x6e, 0x64, 0x62, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62,
+	0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
+	0x6f, 0x6d, 0x2f, 0x61, 0x70, 0x61, 0x63, 0x68, 0x65, 0x2f, 0x73, 0x6b, 0x79, 0x77, 0x61, 0x6c,
+	0x6b, 0x69, 0x6e, 0x67, 0x2d, 0x62, 0x61, 0x6e, 0x79, 0x61, 0x6e, 0x64, 0x62, 0x2f, 0x61, 0x70,
+	0x69, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x62, 0x61, 0x6e, 0x79, 0x61, 0x6e, 0x64, 0x62,
+	0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x33,
 }
 
 var (
diff --git a/api/proto/banyandb/database/v1/schema.pb.validate.go b/api/proto/banyandb/database/v1/schema.pb.validate.go
index a65fb5a..bd42d8d 100644
--- a/api/proto/banyandb/database/v1/schema.pb.validate.go
+++ b/api/proto/banyandb/database/v1/schema.pb.validate.go
@@ -1038,42 +1038,39 @@ func (m *TopNAggregation) validate(all bool) error {
 
 	// no validation rules for FieldValueSort
 
-	for idx, item := range m.GetCriteria() {
-		_, _ = idx, item
-
-		if all {
-			switch v := interface{}(item).(type) {
-			case interface{ ValidateAll() error }:
-				if err := v.ValidateAll(); err != nil {
-					errors = append(errors, TopNAggregationValidationError{
-						field:  fmt.Sprintf("Criteria[%v]", idx),
-						reason: "embedded message failed validation",
-						cause:  err,
-					})
-				}
-			case interface{ Validate() error }:
-				if err := v.Validate(); err != nil {
-					errors = append(errors, TopNAggregationValidationError{
-						field:  fmt.Sprintf("Criteria[%v]", idx),
-						reason: "embedded message failed validation",
-						cause:  err,
-					})
-				}
+	if all {
+		switch v := interface{}(m.GetCriteria()).(type) {
+		case interface{ ValidateAll() error }:
+			if err := v.ValidateAll(); err != nil {
+				errors = append(errors, TopNAggregationValidationError{
+					field:  "Criteria",
+					reason: "embedded message failed validation",
+					cause:  err,
+				})
 			}
-		} else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+		case interface{ Validate() error }:
 			if err := v.Validate(); err != nil {
-				return TopNAggregationValidationError{
-					field:  fmt.Sprintf("Criteria[%v]", idx),
+				errors = append(errors, TopNAggregationValidationError{
+					field:  "Criteria",
 					reason: "embedded message failed validation",
 					cause:  err,
-				}
+				})
+			}
+		}
+	} else if v, ok := interface{}(m.GetCriteria()).(interface{ Validate() error }); ok {
+		if err := v.Validate(); err != nil {
+			return TopNAggregationValidationError{
+				field:  "Criteria",
+				reason: "embedded message failed validation",
+				cause:  err,
 			}
 		}
-
 	}
 
 	// no validation rules for CountersNumber
 
+	// no validation rules for LruSize
+
 	if all {
 		switch v := interface{}(m.GetUpdatedAt()).(type) {
 		case interface{ ValidateAll() error }:
diff --git a/api/proto/banyandb/database/v1/schema.proto b/api/proto/banyandb/database/v1/schema.proto
index 3fa94d3..b4f3c9f 100644
--- a/api/proto/banyandb/database/v1/schema.proto
+++ b/api/proto/banyandb/database/v1/schema.proto
@@ -27,179 +27,181 @@ import "banyandb/common/v1/common.proto";
 import "banyandb/model/v1/query.proto";
 
 enum TagType {
-    TAG_TYPE_UNSPECIFIED=0;
-    TAG_TYPE_STRING = 1;
-    TAG_TYPE_INT = 2;
-    TAG_TYPE_STRING_ARRAY = 3;
-    TAG_TYPE_INT_ARRAY = 4;
-    TAG_TYPE_DATA_BINARY = 5;
-    TAG_TYPE_ID = 6;
+  TAG_TYPE_UNSPECIFIED = 0;
+  TAG_TYPE_STRING = 1;
+  TAG_TYPE_INT = 2;
+  TAG_TYPE_STRING_ARRAY = 3;
+  TAG_TYPE_INT_ARRAY = 4;
+  TAG_TYPE_DATA_BINARY = 5;
+  TAG_TYPE_ID = 6;
 }
 
 message TagFamilySpec {
-    string name = 1;
-    // tags defines accepted tags
-    repeated TagSpec tags = 2;
+  string name = 1;
+  // tags defines accepted tags
+  repeated TagSpec tags = 2;
 }
 
 message TagSpec {
-    string name = 1;
-    TagType type = 2;
-    // indexed_only indicates whether the tag is stored
-    // True: It's indexed only, but not stored 
-    // False: it's stored and indexed
-    bool indexed_only = 3;
+  string name = 1;
+  TagType type = 2;
+  // indexed_only indicates whether the tag is stored
+  // True: It's indexed only, but not stored
+  // False: it's stored and indexed
+  bool indexed_only = 3;
 }
 
 // Stream intends to store streaming data, for example, traces or logs
 message Stream {
-    // metadata is the identity of a trace series
-    common.v1.Metadata metadata = 1;
-    // tag_families
-    repeated TagFamilySpec tag_families = 2;
-    // entity indicates how to generate a series and shard a stream
-    Entity entity = 3;
-    // updated_at indicates when the stream is updated
-    google.protobuf.Timestamp updated_at = 4;
+  // metadata is the identity of a trace series
+  common.v1.Metadata metadata = 1;
+  // tag_families
+  repeated TagFamilySpec tag_families = 2;
+  // entity indicates how to generate a series and shard a stream
+  Entity entity = 3;
+  // updated_at indicates when the stream is updated
+  google.protobuf.Timestamp updated_at = 4;
 }
 
 message Entity {
-    repeated string tag_names = 1;
+  repeated string tag_names = 1;
 }
 
 enum FieldType {
-    FIELD_TYPE_UNSPECIFIED = 0;
-    FIELD_TYPE_STRING = 1;
-    FIELD_TYPE_INT = 2;
-    FIELD_TYPE_DATA_BINARY = 3;
+  FIELD_TYPE_UNSPECIFIED = 0;
+  FIELD_TYPE_STRING = 1;
+  FIELD_TYPE_INT = 2;
+  FIELD_TYPE_DATA_BINARY = 3;
 }
 
 enum EncodingMethod {
-    ENCODING_METHOD_UNSPECIFIED = 0;
-    ENCODING_METHOD_GORILLA = 1;
+  ENCODING_METHOD_UNSPECIFIED = 0;
+  ENCODING_METHOD_GORILLA = 1;
 }
 
 enum CompressionMethod {
-    COMPRESSION_METHOD_UNSPECIFIED = 0;
-    COMPRESSION_METHOD_ZSTD = 1;
+  COMPRESSION_METHOD_UNSPECIFIED = 0;
+  COMPRESSION_METHOD_ZSTD = 1;
 }
 
 // FieldSpec is the specification of field
 message FieldSpec {
-    // name is the identity of a field
-    string name = 1;
-    // field_type denotes the type of field value
-    FieldType field_type = 2;
-    // encoding_method indicates how to encode data during writing
-    EncodingMethod encoding_method = 3;
-    // compression_method indicates how to compress data during writing
-    CompressionMethod compression_method = 4;
+  // name is the identity of a field
+  string name = 1;
+  // field_type denotes the type of field value
+  FieldType field_type = 2;
+  // encoding_method indicates how to encode data during writing
+  EncodingMethod encoding_method = 3;
+  // compression_method indicates how to compress data during writing
+  CompressionMethod compression_method = 4;
 }
 
 // Measure intends to store data point
 message Measure {
-    // metadata is the identity of a measure
-    common.v1.Metadata metadata = 1;
-    // tag_families are for filter measures
-    repeated TagFamilySpec tag_families = 2;
-    // fields denote measure values
-    repeated FieldSpec fields = 3;
-    // entity indicates which tags will be to generate a series and shard a measure
-    Entity entity = 4;
-    // interval indicates how frequently to send a data point
-    // valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h", "d".
-    string interval = 5;
-    // updated_at indicates when the measure is updated
-    google.protobuf.Timestamp updated_at = 6;
+  // metadata is the identity of a measure
+  common.v1.Metadata metadata = 1;
+  // tag_families are for filter measures
+  repeated TagFamilySpec tag_families = 2;
+  // fields denote measure values
+  repeated FieldSpec fields = 3;
+  // entity indicates which tags will be to generate a series and shard a measure
+  Entity entity = 4;
+  // interval indicates how frequently to send a data point
+  // valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h", "d".
+  string interval = 5;
+  // updated_at indicates when the measure is updated
+  google.protobuf.Timestamp updated_at = 6;
 }
 
 // TopNAggregation generates offline TopN statistics for a measure's TopN approximation
 message TopNAggregation {
-    // metadata is the identity of an aggregation
-    common.v1.Metadata metadata = 1;
-    // source_measure denotes the data source of this aggregation
-    common.v1.Metadata source_measure = 2;
-    // field_name is the name of field used for ranking
-    string field_name = 3;
-    // field_value_sort indicates how to sort fields
-    // ASC: bottomN
-    // DESC: topN
-    // UNSPECIFIED: topN + bottomN
-    model.v1.Sort field_value_sort = 4;
-    // group_by_tag_names groups data points into statistical counters
-    repeated string group_by_tag_names = 5;
-    // criteria select partial data points from measure
-    repeated model.v1.Criteria criteria = 6;
-    // counters_number sets the number of counters to be tracked. The default value is 1000
-    int32 counters_number = 7;
-    // updated_at indicates when the measure is updated
-    google.protobuf.Timestamp updated_at = 8;
+  // metadata is the identity of an aggregation
+  common.v1.Metadata metadata = 1;
+  // source_measure denotes the data source of this aggregation
+  common.v1.Metadata source_measure = 2;
+  // field_name is the name of field used for ranking
+  string field_name = 3;
+  // field_value_sort indicates how to sort fields
+  // ASC: bottomN
+  // DESC: topN
+  // UNSPECIFIED: topN + bottomN
+  model.v1.Sort field_value_sort = 4;
+  // group_by_tag_names groups data points into statistical counters
+  repeated string group_by_tag_names = 5;
+  // criteria select partial data points from measure
+  model.v1.Criteria criteria = 6;
+  // counters_number sets the number of counters to be tracked. The default value is 1000
+  int32 counters_number = 7;
+  // lru_size defines how much entry is allowed to be maintained in the memory
+  int32 lru_size = 8;
+  // updated_at indicates when the measure is updated
+  google.protobuf.Timestamp updated_at = 9;
 }
 
 // IndexRule defines how to generate indices based on tags and the index type
 // IndexRule should bind to a subject through an IndexRuleBinding to generate proper indices.
 message IndexRule {
-    // metadata define the rule's identity
-    common.v1.Metadata metadata = 1;
-    // tags are the combination that refers to an indexed object
-    // If the elements in tags are more than 1, the object will generate a multi-tag index
-    // Caveat: All tags in a multi-tag MUST have an identical IndexType
-    repeated string tags = 2;
-    // Type determine the index structure under the hood
-    enum Type {
-        TYPE_UNSPECIFIED = 0;
-        TYPE_TREE = 1;
-        TYPE_INVERTED = 2;
-    }
-    // type is the IndexType of this IndexObject.
-    Type type = 3;
-    enum Location {
-        LOCATION_UNSPECIFIED = 0;
-        LOCATION_SERIES = 1;
-        LOCATION_GLOBAL = 2;
-    }
-    // location indicates where to store index.
-    Location location = 4;
-    // updated_at indicates when the IndexRule is updated
-    google.protobuf.Timestamp updated_at = 5;
-    enum Analyzer {
-        ANALYZER_UNSPECIFIED = 0;
-        // Keyword analyzer is a “noop” analyzer which returns the entire input string as a single token.
-        ANALYZER_KEYWORD = 1;
-        // Standard analyzer provides grammar based tokenization
-        ANALYZER_STANDARD = 2;
-        // Simple analyzer breaks text into tokens at any non-letter character, 
-        // such as numbers, spaces, hyphens and apostrophes, discards non-letter characters, 
-        // and changes uppercase to lowercase.
-        ANALYZER_SIMPLE = 3;
-    }
-    // analyzer analyzes tag value to support the full-text searching for TYPE_INVERTED indices.
-    Analyzer analyzer = 6;
+  // metadata define the rule's identity
+  common.v1.Metadata metadata = 1;
+  // tags are the combination that refers to an indexed object
+  // If the elements in tags are more than 1, the object will generate a multi-tag index
+  // Caveat: All tags in a multi-tag MUST have an identical IndexType
+  repeated string tags = 2;
+  // Type determine the index structure under the hood
+  enum Type {
+    TYPE_UNSPECIFIED = 0;
+    TYPE_TREE = 1;
+    TYPE_INVERTED = 2;
+  }
+  // type is the IndexType of this IndexObject.
+  Type type = 3;
+  enum Location {
+    LOCATION_UNSPECIFIED = 0;
+    LOCATION_SERIES = 1;
+    LOCATION_GLOBAL = 2;
+  }
+  // location indicates where to store index.
+  Location location = 4;
+  // updated_at indicates when the IndexRule is updated
+  google.protobuf.Timestamp updated_at = 5;
+  enum Analyzer {
+    ANALYZER_UNSPECIFIED = 0;
+    // Keyword analyzer is a “noop” analyzer which returns the entire input string as a single token.
+    ANALYZER_KEYWORD = 1;
+    // Standard analyzer provides grammar based tokenization
+    ANALYZER_STANDARD = 2;
+    // Simple analyzer breaks text into tokens at any non-letter character,
+    // such as numbers, spaces, hyphens and apostrophes, discards non-letter characters,
+    // and changes uppercase to lowercase.
+    ANALYZER_SIMPLE = 3;
+  }
+  // analyzer analyzes tag value to support the full-text searching for TYPE_INVERTED indices.
+  Analyzer analyzer = 6;
 }
 
 // Subject defines which stream or measure would generate indices
 message Subject {
-    // catalog is where the subject belongs to
-    common.v1.Catalog catalog = 1;
-    // name refers to a stream or measure in a particular catalog
-    string name = 2;
+  // catalog is where the subject belongs to
+  common.v1.Catalog catalog = 1;
+  // name refers to a stream or measure in a particular catalog
+  string name = 2;
 }
 
 // IndexRuleBinding is a bridge to connect severalIndexRules to a subject
 // This binding is valid between begin_at_nanoseconds and expire_at_nanoseconds, that provides flexible strategies
 // to control how to generate time series indices.
 message IndexRuleBinding {
-    // metadata is the identity of this binding
-    common.v1.Metadata metadata = 1;
-    // rules refers to the IndexRule
-    repeated string rules = 2;
-    // subject indicates the subject of binding action
-    Subject subject = 3;
-    // begin_at_nanoseconds is the timestamp, after which the binding will be active
-    google.protobuf.Timestamp begin_at = 4;
-    // expire_at_nanoseconds it the timestamp, after which the binding will be inactive
-    // expire_at_nanoseconds must be larger than begin_at_nanoseconds
-    google.protobuf.Timestamp expire_at = 5;
-    // updated_at indicates when the IndexRuleBinding is updated
-    google.protobuf.Timestamp updated_at = 6;
+  // metadata is the identity of this binding
+  common.v1.Metadata metadata = 1;
+  // rules refers to the IndexRule
+  repeated string rules = 2;
+  // subject indicates the subject of binding action
+  Subject subject = 3;
+  // begin_at_nanoseconds is the timestamp, after which the binding will be active
+  google.protobuf.Timestamp begin_at = 4;
+  // expire_at_nanoseconds it the timestamp, after which the binding will be inactive
+  // expire_at_nanoseconds must be larger than begin_at_nanoseconds
+  google.protobuf.Timestamp expire_at = 5;
+  // updated_at indicates when the IndexRuleBinding is updated
+  google.protobuf.Timestamp updated_at = 6;
 }
diff --git a/api/proto/banyandb/measure/v1/query.pb.go b/api/proto/banyandb/measure/v1/query.pb.go
index a016cf8..7a7fc7b 100644
--- a/api/proto/banyandb/measure/v1/query.pb.go
+++ b/api/proto/banyandb/measure/v1/query.pb.go
@@ -178,13 +178,13 @@ type QueryRequest struct {
 	// agg aggregates data points based on a field
 	Agg *QueryRequest_Aggregation `protobuf:"bytes,8,opt,name=agg,proto3" json:"agg,omitempty"`
 	// top limits the result based on a particular field.
-	// If order_by is specificed, top sorts the dataset based on order_by's output
+	// If order_by is specified, top sorts the dataset based on order_by's output
 	Top *QueryRequest_Top `protobuf:"bytes,9,opt,name=top,proto3" json:"top,omitempty"`
 	// offset is used to support pagination, together with the following limit.
-	// If top is sepcificed, offset processes the dataset based on top's output
+	// If top is specified, offset processes the dataset based on top's output
 	Offset uint32 `protobuf:"varint,10,opt,name=offset,proto3" json:"offset,omitempty"`
 	// limit is used to impose a boundary on the number of records being returned.
-	// If top is sepcificed, limit processes the dataset based on top's output
+	// If top is specified, limit processes the dataset based on top's output
 	Limit uint32 `protobuf:"varint,11,opt,name=limit,proto3" json:"limit,omitempty"`
 	// order_by is given to specify the sort for a tag.
 	OrderBy *v1.QueryOrder `protobuf:"bytes,12,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"`
diff --git a/api/proto/banyandb/measure/v1/query.proto b/api/proto/banyandb/measure/v1/query.proto
index 527b6ee..5d12ee2 100644
--- a/api/proto/banyandb/measure/v1/query.proto
+++ b/api/proto/banyandb/measure/v1/query.proto
@@ -90,13 +90,13 @@ message QueryRequest {
     model.v1.Sort field_value_sort = 3;
   } 
   // top limits the result based on a particular field.
-  // If order_by is specificed, top sorts the dataset based on order_by's output
+  // If order_by is specified, top sorts the dataset based on order_by's output
   Top top = 9;
   // offset is used to support pagination, together with the following limit.
-  // If top is sepcificed, offset processes the dataset based on top's output
+  // If top is specified, offset processes the dataset based on top's output
   uint32 offset = 10;
   // limit is used to impose a boundary on the number of records being returned.
-  // If top is sepcificed, limit processes the dataset based on top's output
+  // If top is specified, limit processes the dataset based on top's output
   uint32 limit = 11;
   // order_by is given to specify the sort for a tag.
   model.v1.QueryOrder order_by = 12;
diff --git a/api/proto/banyandb/measure/v1/topn.pb.go b/api/proto/banyandb/measure/v1/topn.pb.go
index 2f3cc61..7f2a278 100644
--- a/api/proto/banyandb/measure/v1/topn.pb.go
+++ b/api/proto/banyandb/measure/v1/topn.pb.go
@@ -164,7 +164,7 @@ type TopNRequest struct {
 	// agg aggregates lists grouped by field names in the time_range
 	// TODO validate enum defined_only
 	Agg v11.AggregationFunction `protobuf:"varint,4,opt,name=agg,proto3,enum=banyandb.model.v1.AggregationFunction" json:"agg,omitempty"`
-	// criteria select counters.
+	// criteria select counters. Only equals are acceptable.
 	Conditions []*v11.Condition `protobuf:"bytes,5,rep,name=conditions,proto3" json:"conditions,omitempty"`
 	// field_value_sort indicates how to sort fields
 	FieldValueSort v11.Sort `protobuf:"varint,6,opt,name=field_value_sort,json=fieldValueSort,proto3,enum=banyandb.model.v1.Sort" json:"field_value_sort,omitempty"`
diff --git a/api/proto/banyandb/measure/v1/topn.proto b/api/proto/banyandb/measure/v1/topn.proto
index dc3a205..fe46c1a 100644
--- a/api/proto/banyandb/measure/v1/topn.proto
+++ b/api/proto/banyandb/measure/v1/topn.proto
@@ -58,7 +58,7 @@ message TopNRequest {
     // agg aggregates lists grouped by field names in the time_range
     // TODO validate enum defined_only
     model.v1.AggregationFunction agg = 4;
-    // criteria select counters.
+    // criteria select counters. Only equals are acceptable.
     repeated model.v1.Condition conditions = 5;
     // field_value_sort indicates how to sort fields
     model.v1.Sort field_value_sort = 6;
diff --git a/api/proto/banyandb/model/v1/query.pb.go b/api/proto/banyandb/model/v1/query.pb.go
index 453cce5..425bbec 100644
--- a/api/proto/banyandb/model/v1/query.pb.go
+++ b/api/proto/banyandb/model/v1/query.pb.go
@@ -488,7 +488,7 @@ type LogicalExpression struct {
 	sizeCache     protoimpl.SizeCache
 	unknownFields protoimpl.UnknownFields
 
-	// op is a logial operation
+	// op is a logical operation
 	Op    LogicalExpression_LogicalOp `protobuf:"varint,1,opt,name=op,proto3,enum=banyandb.model.v1.LogicalExpression_LogicalOp" json:"op,omitempty"`
 	Left  *Criteria                   `protobuf:"bytes,2,opt,name=left,proto3" json:"left,omitempty"`
 	Right *Criteria                   `protobuf:"bytes,3,opt,name=right,proto3" json:"right,omitempty"`
diff --git a/api/proto/banyandb/model/v1/query.proto b/api/proto/banyandb/model/v1/query.proto
index 68c7033..683fecd 100644
--- a/api/proto/banyandb/model/v1/query.proto
+++ b/api/proto/banyandb/model/v1/query.proto
@@ -85,7 +85,7 @@ message LogicalExpression {
         LOGICAL_OP_AND = 1;
         LOGICAL_OP_OR = 2; 
     }
-    // op is a logial operation
+    // op is a logical operation
     LogicalOp op = 1;
     Criteria left = 2;
     Criteria right = 3;
diff --git a/api/proto/openapi/banyandb/database/v1/rpc.swagger.json b/api/proto/openapi/banyandb/database/v1/rpc.swagger.json
index 357f24d..64182ca 100644
--- a/api/proto/openapi/banyandb/database/v1/rpc.swagger.json
+++ b/api/proto/openapi/banyandb/database/v1/rpc.swagger.json
@@ -1375,7 +1375,7 @@
         "ANALYZER_SIMPLE"
       ],
       "default": "ANALYZER_UNSPECIFIED",
-      "description": " - ANALYZER_KEYWORD: Keyword analyzer is a “noop” analyzer which returns the entire input string as a single token.\n - ANALYZER_STANDARD: Standard analyzer provides grammar based tokenization\n - ANALYZER_SIMPLE: Simple analyzer breaks text into tokens at any non-letter character, \nsuch as numbers, spaces, hyphens and apostrophes, discards non-letter characters, \nand changes uppercase to lowercase."
+      "description": " - ANALYZER_KEYWORD: Keyword analyzer is a “noop” analyzer which returns the entire input string as a single token.\n - ANALYZER_STANDARD: Standard analyzer provides grammar based tokenization\n - ANALYZER_SIMPLE: Simple analyzer breaks text into tokens at any non-letter character,\nsuch as numbers, spaces, hyphens and apostrophes, discards non-letter characters,\nand changes uppercase to lowercase."
     },
     "IntervalRuleUnit": {
       "type": "string",
@@ -1838,7 +1838,7 @@
       "properties": {
         "op": {
           "$ref": "#/definitions/LogicalExpressionLogicalOp",
-          "title": "op is a logial operation"
+          "title": "op is a logical operation"
         },
         "left": {
           "$ref": "#/definitions/v1Criteria"
@@ -2134,7 +2134,7 @@
         },
         "indexedOnly": {
           "type": "boolean",
-          "title": "indexed_only indicates whether the tag is stored\nTrue: It's indexed only, but not stored \nFalse: it's stored and indexed"
+          "title": "indexed_only indicates whether the tag is stored\nTrue: It's indexed only, but not stored\nFalse: it's stored and indexed"
         }
       }
     },
@@ -2205,10 +2205,7 @@
           "title": "group_by_tag_names groups data points into statistical counters"
         },
         "criteria": {
-          "type": "array",
-          "items": {
-            "$ref": "#/definitions/v1Criteria"
-          },
+          "$ref": "#/definitions/v1Criteria",
           "title": "criteria select partial data points from measure"
         },
         "countersNumber": {
@@ -2216,6 +2213,11 @@
           "format": "int32",
           "title": "counters_number sets the number of counters to be tracked. The default value is 1000"
         },
+        "lruSize": {
+          "type": "integer",
+          "format": "int32",
+          "title": "lru_size defines how much entry is allowed to be maintained in the memory"
+        },
         "updatedAt": {
           "type": "string",
           "format": "date-time",
diff --git a/api/proto/openapi/banyandb/measure/v1/rpc.swagger.json b/api/proto/openapi/banyandb/measure/v1/rpc.swagger.json
index a59bf95..dd73d8b 100644
--- a/api/proto/openapi/banyandb/measure/v1/rpc.swagger.json
+++ b/api/proto/openapi/banyandb/measure/v1/rpc.swagger.json
@@ -357,7 +357,7 @@
       "properties": {
         "op": {
           "$ref": "#/definitions/LogicalExpressionLogicalOp",
-          "title": "op is a logial operation"
+          "title": "op is a logical operation"
         },
         "left": {
           "$ref": "#/definitions/v1Criteria"
@@ -441,17 +441,17 @@
         },
         "top": {
           "$ref": "#/definitions/QueryRequestTop",
-          "title": "top limits the result based on a particular field.\nIf order_by is specificed, top sorts the dataset based on order_by's output"
+          "title": "top limits the result based on a particular field.\nIf order_by is specified, top sorts the dataset based on order_by's output"
         },
         "offset": {
           "type": "integer",
           "format": "int64",
-          "title": "offset is used to support pagination, together with the following limit.\nIf top is sepcificed, offset processes the dataset based on top's output"
+          "title": "offset is used to support pagination, together with the following limit.\nIf top is specified, offset processes the dataset based on top's output"
         },
         "limit": {
           "type": "integer",
           "format": "int64",
-          "title": "limit is used to impose a boundary on the number of records being returned.\nIf top is sepcificed, limit processes the dataset based on top's output"
+          "title": "limit is used to impose a boundary on the number of records being returned.\nIf top is specified, limit processes the dataset based on top's output"
         },
         "orderBy": {
           "$ref": "#/definitions/v1QueryOrder",
diff --git a/api/proto/openapi/banyandb/stream/v1/rpc.swagger.json b/api/proto/openapi/banyandb/stream/v1/rpc.swagger.json
index c07f760..765a46f 100644
--- a/api/proto/openapi/banyandb/stream/v1/rpc.swagger.json
+++ b/api/proto/openapi/banyandb/stream/v1/rpc.swagger.json
@@ -244,7 +244,7 @@
       "properties": {
         "op": {
           "$ref": "#/definitions/LogicalExpressionLogicalOp",
-          "title": "op is a logial operation"
+          "title": "op is a logical operation"
         },
         "left": {
           "$ref": "#/definitions/v1Criteria"
diff --git a/banyand/liaison/grpc/measure.go b/banyand/liaison/grpc/measure.go
index 5cc53c9..ac6a476 100644
--- a/banyand/liaison/grpc/measure.go
+++ b/banyand/liaison/grpc/measure.go
@@ -111,4 +111,26 @@ func (ms *measureService) Query(_ context.Context, entityCriteria *measurev1.Que
 	return nil, ErrQueryMsg
 }
 
-// TODO: implement topN
+func (ms *measureService) TopN(_ context.Context, topNRequest *measurev1.TopNRequest) (*measurev1.TopNResponse, error) {
+	if err := timestamp.CheckTimeRange(topNRequest.GetTimeRange()); err != nil {
+		return nil, status.Errorf(codes.InvalidArgument, "%v is invalid :%s", topNRequest.GetTimeRange(), err)
+	}
+
+	message := bus.NewMessage(bus.MessageID(time.Now().UnixNano()), topNRequest)
+	feat, errQuery := ms.pipeline.Publish(data.TopicTopNQuery, message)
+	if errQuery != nil {
+		return nil, errQuery
+	}
+	msg, errFeat := feat.Get()
+	if errFeat != nil {
+		return nil, errFeat
+	}
+	data := msg.Data()
+	switch d := data.(type) {
+	case []*measurev1.TopNList:
+		return &measurev1.TopNResponse{Lists: d}, nil
+	case common.Error:
+		return nil, errors.WithMessage(ErrQueryMsg, d.Msg())
+	}
+	return nil, ErrQueryMsg
+}
diff --git a/banyand/liaison/http/server.go b/banyand/liaison/http/server.go
index e416286..8b90c4d 100644
--- a/banyand/liaison/http/server.go
+++ b/banyand/liaison/http/server.go
@@ -22,7 +22,6 @@ import (
 	"fmt"
 	"io/fs"
 	"net/http"
-	stdhttp "net/http"
 	"strings"
 
 	"github.com/go-chi/chi/v5"
@@ -61,7 +60,7 @@ type service struct {
 	clientCloser context.CancelFunc
 	l            *logger.Logger
 
-	srv *stdhttp.Server
+	srv *http.Server
 }
 
 func (p *service) FlagSet() *run.FlagSet {
@@ -87,11 +86,11 @@ func (p *service) PreRun() error {
 	if err != nil {
 		return err
 	}
-	httpFS := stdhttp.FS(fSys)
-	fileServer := stdhttp.FileServer(stdhttp.FS(fSys))
+	httpFS := http.FS(fSys)
+	fileServer := http.FileServer(http.FS(fSys))
 	serveIndex := serveFileContents("index.html", httpFS)
 	p.mux.Mount("/", intercept404(fileServer, serveIndex))
-	p.srv = &stdhttp.Server{
+	p.srv = &http.Server{
 		Addr:    p.listenAddr,
 		Handler: p.mux,
 	}
@@ -145,24 +144,24 @@ func (p *service) GracefulStop() {
 	p.clientCloser()
 }
 
-func intercept404(handler, on404 stdhttp.Handler) stdhttp.HandlerFunc {
-	return stdhttp.HandlerFunc(func(w stdhttp.ResponseWriter, r *stdhttp.Request) {
+func intercept404(handler, on404 http.Handler) http.HandlerFunc {
+	return func(w http.ResponseWriter, r *http.Request) {
 		hookedWriter := &hookedResponseWriter{ResponseWriter: w}
 		handler.ServeHTTP(hookedWriter, r)
 
 		if hookedWriter.got404 {
 			on404.ServeHTTP(w, r)
 		}
-	})
+	}
 }
 
 type hookedResponseWriter struct {
-	stdhttp.ResponseWriter
+	http.ResponseWriter
 	got404 bool
 }
 
 func (hrw *hookedResponseWriter) WriteHeader(status int) {
-	if status == stdhttp.StatusNotFound {
+	if status == http.StatusNotFound {
 		hrw.got404 = true
 	} else {
 		hrw.ResponseWriter.WriteHeader(status)
@@ -177,29 +176,29 @@ func (hrw *hookedResponseWriter) Write(p []byte) (int, error) {
 	return hrw.ResponseWriter.Write(p)
 }
 
-func serveFileContents(file string, files stdhttp.FileSystem) stdhttp.HandlerFunc {
-	return func(w stdhttp.ResponseWriter, r *stdhttp.Request) {
+func serveFileContents(file string, files http.FileSystem) http.HandlerFunc {
+	return func(w http.ResponseWriter, r *http.Request) {
 		if !strings.Contains(r.Header.Get("Accept"), "text/html") {
-			w.WriteHeader(stdhttp.StatusNotFound)
+			w.WriteHeader(http.StatusNotFound)
 			fmt.Fprint(w, "404 not found")
 
 			return
 		}
 		index, err := files.Open(file)
 		if err != nil {
-			w.WriteHeader(stdhttp.StatusNotFound)
+			w.WriteHeader(http.StatusNotFound)
 			fmt.Fprintf(w, "%s not found", file)
 
 			return
 		}
 		fi, err := index.Stat()
 		if err != nil {
-			w.WriteHeader(stdhttp.StatusNotFound)
+			w.WriteHeader(http.StatusNotFound)
 			fmt.Fprintf(w, "%s not found", file)
 
 			return
 		}
 		w.Header().Set("Content-Type", "text/html; charset=utf-8")
-		stdhttp.ServeContent(w, r, fi.Name(), fi.ModTime(), index)
+		http.ServeContent(w, r, fi.Name(), fi.ModTime(), index)
 	}
 }
diff --git a/banyand/measure/encode.go b/banyand/measure/encode.go
index e1042e6..86a2bed 100644
--- a/banyand/measure/encode.go
+++ b/banyand/measure/encode.go
@@ -14,22 +14,23 @@
 // KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
+
 package measure
 
 import (
 	"time"
 
 	databasev1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/database/v1"
-	"github.com/apache/skywalking-banyandb/pkg/convert"
 	"github.com/apache/skywalking-banyandb/pkg/encoding"
 	"github.com/apache/skywalking-banyandb/pkg/logger"
+	pbv1 "github.com/apache/skywalking-banyandb/pkg/pb/v1"
 )
 
 var (
 	_          encoding.SeriesEncoderPool = (*encoderPool)(nil)
 	_          encoding.SeriesDecoderPool = (*decoderPool)(nil)
 	intervalFn                            = func(key []byte) time.Duration {
-		_, interval, err := decodeFieldFlag(key)
+		_, interval, err := pbv1.DecodeFieldFlag(key)
 		if err != nil {
 			panic(err)
 		}
@@ -52,7 +53,7 @@ func newEncoderPool(name string, plainSize, intSize int, l *logger.Logger) encod
 }
 
 func (p *encoderPool) Get(metadata []byte) encoding.SeriesEncoder {
-	fieldSpec, _, err := decodeFieldFlag(metadata)
+	fieldSpec, _, err := pbv1.DecodeFieldFlag(metadata)
 	if err != nil {
 		p.l.Err(err).Msg("failed to decode field flag")
 		return p.defaultPool.Get(metadata)
@@ -83,7 +84,7 @@ func newDecoderPool(name string, plainSize, intSize int, l *logger.Logger) encod
 }
 
 func (p *decoderPool) Get(metadata []byte) encoding.SeriesDecoder {
-	fieldSpec, _, err := decodeFieldFlag(metadata)
+	fieldSpec, _, err := pbv1.DecodeFieldFlag(metadata)
 	if err != nil {
 		p.l.Err(err).Msg("failed to decode field flag")
 		return p.defaultPool.Get(metadata)
@@ -98,25 +99,3 @@ func (p *decoderPool) Put(decoder encoding.SeriesDecoder) {
 	p.intPool.Put(decoder)
 	p.defaultPool.Put(decoder)
 }
-
-const fieldFlagLength = 9
-
-func encoderFieldFlag(fieldSpec *databasev1.FieldSpec, interval time.Duration) []byte {
-	encodingMethod := byte(fieldSpec.GetEncodingMethod().Number())
-	compressionMethod := byte(fieldSpec.GetCompressionMethod().Number())
-	bb := make([]byte, fieldFlagLength)
-	bb[0] = encodingMethod<<4 | compressionMethod
-	copy(bb[1:], convert.Int64ToBytes(int64(interval)))
-	return bb
-}
-
-func decodeFieldFlag(key []byte) (*databasev1.FieldSpec, time.Duration, error) {
-	if len(key) < fieldFlagLength {
-		return nil, 0, ErrMalformedFieldFlag
-	}
-	b := key[len(key)-9:]
-	return &databasev1.FieldSpec{
-		EncodingMethod:    databasev1.EncodingMethod(int32(b[0]) >> 4),
-		CompressionMethod: databasev1.CompressionMethod((int32(b[0] & 0x0F))),
-	}, time.Duration(convert.BytesToInt64(b[1:])), nil
-}
diff --git a/banyand/measure/field_flag_test.go b/banyand/measure/field_flag_test.go
index fba88db..ae0845f 100644
--- a/banyand/measure/field_flag_test.go
+++ b/banyand/measure/field_flag_test.go
@@ -23,14 +23,15 @@ import (
 	"github.com/stretchr/testify/assert"
 
 	databasev1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/database/v1"
+	pbv1 "github.com/apache/skywalking-banyandb/pkg/pb/v1"
 )
 
 func TestEncodeFieldFlag(t *testing.T) {
-	flag := encoderFieldFlag(&databasev1.FieldSpec{
+	flag := pbv1.EncoderFieldFlag(&databasev1.FieldSpec{
 		EncodingMethod:    databasev1.EncodingMethod_ENCODING_METHOD_GORILLA,
 		CompressionMethod: databasev1.CompressionMethod_COMPRESSION_METHOD_ZSTD,
 	}, time.Minute)
-	fieldSpec, interval, err := decodeFieldFlag(flag)
+	fieldSpec, interval, err := pbv1.DecodeFieldFlag(flag)
 	assert.NoError(t, err)
 	assert.Equal(t, databasev1.EncodingMethod_ENCODING_METHOD_GORILLA, fieldSpec.EncodingMethod)
 	assert.Equal(t, databasev1.CompressionMethod_COMPRESSION_METHOD_ZSTD, fieldSpec.CompressionMethod)
diff --git a/banyand/measure/measure.go b/banyand/measure/measure.go
index 0753b2b..971f08e 100644
--- a/banyand/measure/measure.go
+++ b/banyand/measure/measure.go
@@ -21,6 +21,8 @@ import (
 	"context"
 	"time"
 
+	"go.uber.org/multierr"
+
 	commonv1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/common/v1"
 	databasev1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/database/v1"
 	"github.com/apache/skywalking-banyandb/banyand/tsdb"
@@ -44,11 +46,12 @@ type measure struct {
 	schema   *databasev1.Measure
 	// maxObservedModRevision is the max observed revision of index rules in the spec
 	maxObservedModRevision int64
-	db                     tsdb.Supplier
+	databaseSupplier       tsdb.Supplier
 	entityLocator          partition.EntityLocator
 	indexRules             []*databasev1.IndexRule
 	indexWriter            *index.Writer
 	interval               time.Duration
+	processorManager       *topNProcessorManager
 }
 
 func (s *measure) GetSchema() *databasev1.Measure {
@@ -72,7 +75,7 @@ func (s *measure) EntityLocator() partition.EntityLocator {
 }
 
 func (s *measure) Close() error {
-	return s.indexWriter.Close()
+	return multierr.Combine(s.processorManager.Close(), s.indexWriter.Close())
 }
 
 func (s *measure) parseSpec() (err error) {
@@ -86,28 +89,42 @@ func (s *measure) parseSpec() (err error) {
 }
 
 type measureSpec struct {
-	schema     *databasev1.Measure
-	indexRules []*databasev1.IndexRule
+	schema           *databasev1.Measure
+	indexRules       []*databasev1.IndexRule
+	topNAggregations []*databasev1.TopNAggregation
 }
 
 func openMeasure(shardNum uint32, db tsdb.Supplier, spec measureSpec, l *logger.Logger) (*measure, error) {
-	sm := &measure{
+	m := &measure{
 		shardNum:   shardNum,
 		schema:     spec.schema,
 		indexRules: spec.indexRules,
 		l:          l,
 	}
-	if err := sm.parseSpec(); err != nil {
+	if err := m.parseSpec(); err != nil {
 		return nil, err
 	}
 	ctx := context.WithValue(context.Background(), logger.ContextKey, l)
 
-	sm.db = db
-	sm.indexWriter = index.NewWriter(ctx, index.WriterOptions{
+	m.databaseSupplier = db
+	m.indexWriter = index.NewWriter(ctx, index.WriterOptions{
 		DB:         db,
 		ShardNum:   shardNum,
 		Families:   spec.schema.TagFamilies,
 		IndexRules: spec.indexRules,
 	})
-	return sm, nil
+
+	m.processorManager = &topNProcessorManager{
+		l:            l,
+		m:            m,
+		topNSchemas:  spec.topNAggregations,
+		processorMap: make(map[*commonv1.Metadata][]*topNStreamingProcessor),
+	}
+
+	err := m.processorManager.start()
+	if err != nil {
+		return nil, err
+	}
+
+	return m, nil
 }
diff --git a/banyand/measure/measure_query.go b/banyand/measure/measure_query.go
index d47f723..ceefa42 100644
--- a/banyand/measure/measure_query.go
+++ b/banyand/measure/measure_query.go
@@ -19,6 +19,7 @@ package measure
 
 import (
 	"io"
+	"time"
 
 	"github.com/pkg/errors"
 	"google.golang.org/protobuf/proto"
@@ -30,6 +31,7 @@ import (
 	modelv1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/model/v1"
 	"github.com/apache/skywalking-banyandb/banyand/tsdb"
 	"github.com/apache/skywalking-banyandb/pkg/partition"
+	pbv1 "github.com/apache/skywalking-banyandb/pkg/pb/v1"
 	resourceSchema "github.com/apache/skywalking-banyandb/pkg/schema"
 )
 
@@ -44,15 +46,21 @@ type Measure interface {
 	io.Closer
 	Write(value *measurev1.DataPointValue) error
 	Shards(entity tsdb.Entity) ([]tsdb.Shard, error)
+	CompanionShards(metadata *commonv1.Metadata) ([]tsdb.Shard, error)
 	Shard(id common.ShardID) (tsdb.Shard, error)
 	ParseTagFamily(family string, item tsdb.Item) (*modelv1.TagFamily, error)
 	ParseField(name string, item tsdb.Item) (*measurev1.DataPoint_Field, error)
 	GetSchema() *databasev1.Measure
 	GetIndexRules() []*databasev1.IndexRule
+	GetInterval() time.Duration
 }
 
 var _ Measure = (*measure)(nil)
 
+func (s *measure) GetInterval() time.Duration {
+	return s.interval
+}
+
 func (s *measure) Shards(entity tsdb.Entity) ([]tsdb.Shard, error) {
 	wrap := func(shards []tsdb.Shard) []tsdb.Shard {
 		result := make([]tsdb.Shard, len(shards))
@@ -61,7 +69,7 @@ func (s *measure) Shards(entity tsdb.Entity) ([]tsdb.Shard, error) {
 		}
 		return result
 	}
-	db := s.db.SupplyTSDB()
+	db := s.databaseSupplier.SupplyTSDB()
 	if len(entity) < 1 {
 		return wrap(db.Shards()), nil
 	}
@@ -74,15 +82,31 @@ func (s *measure) Shards(entity tsdb.Entity) ([]tsdb.Shard, error) {
 	if err != nil {
 		return nil, err
 	}
-	shard, err := db.Shard(common.ShardID(shardID))
+	shard, err := s.Shard(common.ShardID(shardID))
 	if err != nil {
 		return nil, err
 	}
-	return []tsdb.Shard{tsdb.NewScopedShard(tsdb.Entry(s.name), shard)}, nil
+	return []tsdb.Shard{shard}, nil
+}
+
+func (s *measure) CompanionShards(metadata *commonv1.Metadata) ([]tsdb.Shard, error) {
+	wrap := func(shards []tsdb.Shard) []tsdb.Shard {
+		result := make([]tsdb.Shard, len(shards))
+		for i := 0; i < len(shards); i++ {
+			result[i] = tsdb.NewScopedShard(tsdb.Entry(formatMeasureCompanionPrefix(s.name, metadata.GetName())), shards[i])
+		}
+		return result
+	}
+	db := s.databaseSupplier.SupplyTSDB()
+	return wrap(db.Shards()), nil
+}
+
+func formatMeasureCompanionPrefix(measureName, name string) string {
+	return measureName + "." + name
 }
 
 func (s *measure) Shard(id common.ShardID) (tsdb.Shard, error) {
-	shard, err := s.db.SupplyTSDB().Shard(id)
+	shard, err := s.databaseSupplier.SupplyTSDB().Shard(id)
 	if err != nil {
 		return nil, err
 	}
@@ -90,7 +114,7 @@ func (s *measure) Shard(id common.ShardID) (tsdb.Shard, error) {
 }
 
 func (s *measure) ParseTagFamily(family string, item tsdb.Item) (*modelv1.TagFamily, error) {
-	familyRawBytes, err := item.Family(familyIdentity(family, TagFlag))
+	familyRawBytes, err := item.Family(familyIdentity(family, pbv1.TagFlag))
 	if err != nil {
 		return nil, err
 	}
@@ -131,11 +155,11 @@ func (s *measure) ParseField(name string, item tsdb.Item) (*measurev1.DataPoint_
 			break
 		}
 	}
-	bytes, err := item.Family(familyIdentity(name, encoderFieldFlag(fieldSpec, s.interval)))
+	bytes, err := item.Family(familyIdentity(name, pbv1.EncoderFieldFlag(fieldSpec, s.interval)))
 	if err != nil {
 		return nil, err
 	}
-	fieldValue := decodeFieldValue(bytes, fieldSpec)
+	fieldValue := pbv1.DecodeFieldValue(bytes, fieldSpec)
 	return &measurev1.DataPoint_Field{
 		Name:  name,
 		Value: fieldValue,
diff --git a/banyand/measure/measure_topn.go b/banyand/measure/measure_topn.go
new file mode 100644
index 0000000..d8cb66d
--- /dev/null
+++ b/banyand/measure/measure_topn.go
@@ -0,0 +1,681 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package measure
+
+import (
+	"context"
+	"encoding/base64"
+	"io"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/pkg/errors"
+	"go.uber.org/multierr"
+	"golang.org/x/exp/slices"
+	"google.golang.org/protobuf/proto"
+
+	"github.com/apache/skywalking-banyandb/api/common"
+	commonv1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/common/v1"
+	databasev1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/database/v1"
+	measurev1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/measure/v1"
+	modelv1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/model/v1"
+	"github.com/apache/skywalking-banyandb/banyand/tsdb"
+	"github.com/apache/skywalking-banyandb/pkg/bus"
+	"github.com/apache/skywalking-banyandb/pkg/convert"
+	"github.com/apache/skywalking-banyandb/pkg/flow"
+	"github.com/apache/skywalking-banyandb/pkg/flow/streaming"
+	"github.com/apache/skywalking-banyandb/pkg/flow/streaming/sources"
+	"github.com/apache/skywalking-banyandb/pkg/logger"
+	"github.com/apache/skywalking-banyandb/pkg/partition"
+	pbv1 "github.com/apache/skywalking-banyandb/pkg/pb/v1"
+	"github.com/apache/skywalking-banyandb/pkg/timestamp"
+)
+
+const (
+	timeBucketFormat = "200601021504"
+	TopNTagFamily    = "__topN__"
+)
+
+var (
+	_ bus.MessageListener = (*topNProcessCallback)(nil)
+	_ io.Closer           = (*topNStreamingProcessor)(nil)
+	_ io.Closer           = (*topNProcessorManager)(nil)
+	_ flow.Sink           = (*topNStreamingProcessor)(nil)
+
+	errUnsupportedConditionValueType = errors.New("unsupported value type in the condition")
+
+	TopNValueFieldSpec = &databasev1.FieldSpec{
+		Name:              "value",
+		FieldType:         databasev1.FieldType_FIELD_TYPE_INT,
+		EncodingMethod:    databasev1.EncodingMethod_ENCODING_METHOD_GORILLA,
+		CompressionMethod: databasev1.CompressionMethod_COMPRESSION_METHOD_ZSTD,
+	}
+)
+
+type topNStreamingProcessor struct {
+	flow.ComponentState
+	l                *logger.Logger
+	shardNum         uint32
+	interval         time.Duration
+	topNSchema       *databasev1.TopNAggregation
+	sortDirection    modelv1.Sort
+	databaseSupplier tsdb.Supplier
+	src              chan interface{}
+	in               chan flow.StreamRecord
+	errCh            <-chan error
+	stopCh           chan struct{}
+	streamingFlow    flow.Flow
+}
+
+func (t *topNStreamingProcessor) In() chan<- flow.StreamRecord {
+	return t.in
+}
+
+func (t *topNStreamingProcessor) Setup(ctx context.Context) error {
+	t.Add(1)
+	go t.run(ctx)
+	return nil
+}
+
+func (t *topNStreamingProcessor) run(ctx context.Context) {
+	defer t.Done()
+	for {
+		select {
+		case record, ok := <-t.in:
+			if !ok {
+				return
+			}
+			if err := t.writeStreamRecord(record); err != nil {
+				t.l.Err(err).Msg("fail to write stream record")
+			}
+		case <-ctx.Done():
+			return
+		}
+	}
+}
+
+// Teardown is called by the Flow as a lifecycle hook.
+// So we should not block on err channel within this method.
+func (t *topNStreamingProcessor) Teardown(ctx context.Context) error {
+	t.Wait()
+	return nil
+}
+
+func (t *topNStreamingProcessor) Close() error {
+	close(t.src)
+	// close streaming flow
+	err := t.streamingFlow.Close()
+	// and wait for error channel close
+	<-t.stopCh
+	t.stopCh = nil
+	return err
+}
+
+func (t *topNStreamingProcessor) writeStreamRecord(record flow.StreamRecord) error {
+	tuples, ok := record.Data().([]*streaming.Tuple2)
+	if !ok {
+		return errors.New("invalid data type")
+	}
+	// down-sample the start of the timeWindow to a time-bucket
+	eventTime := t.downSampleTimeBucket(record.TimestampMillis())
+	timeBucket := eventTime.Format(timeBucketFormat)
+	var err error
+	t.l.Warn().
+		Str("TopN", t.topNSchema.GetMetadata().GetName()).
+		Int("rankNums", len(tuples)).
+		Msg("Write a tuple")
+	for rankNum, tuple := range tuples {
+		fieldValue := tuple.V1.(int64)
+		data := tuple.V2.(flow.StreamRecord).Data().(flow.Data)
+		err = multierr.Append(err, t.writeData(eventTime, timeBucket, fieldValue, data, rankNum))
+	}
+	return err
+}
+
+func (t *topNStreamingProcessor) writeData(eventTime time.Time, timeBucket string, fieldValue int64, data flow.Data, rankNum int) error {
+	var tagValues []*modelv1.TagValue
+	if len(t.topNSchema.GetGroupByTagNames()) > 0 {
+		var ok bool
+		if tagValues, ok = data[2].([]*modelv1.TagValue); !ok {
+			return errors.New("fail to extract tag values from topN result")
+		}
+	}
+	entity, shardID, err := t.locate(tagValues, rankNum)
+	if err != nil {
+		return err
+	}
+	shard, err := t.databaseSupplier.SupplyTSDB().Shard(shardID)
+	if err != nil {
+		return err
+	}
+	series, err := shard.Series().GetByHashKey(tsdb.HashEntity(entity))
+	if err != nil {
+		return err
+	}
+	span, err := series.Span(timestamp.NewInclusiveTimeRangeDuration(eventTime, 0))
+	if err != nil {
+		if span != nil {
+			_ = span.Close()
+		}
+		return err
+	}
+	// measureID is consist of three parts,
+	// 1. groupValues
+	// 2. rankNumber
+	// 3. timeBucket
+	measureID := data[0].(string) + "_" + strconv.Itoa(rankNum) + "_" + timeBucket
+	writeFn := func() (tsdb.Writer, error) {
+		builder := span.WriterBuilder().Time(eventTime)
+		virtualTagFamily := &modelv1.TagFamilyForWrite{
+			Tags: []*modelv1.TagValue{
+				// MeasureID
+				{
+					Value: &modelv1.TagValue_Id{
+						Id: &modelv1.ID{
+							Value: measureID,
+						},
+					},
+				},
+				// GroupValues for merge in post processor
+				{
+					Value: &modelv1.TagValue_Str{
+						Str: &modelv1.Str{
+							Value: data[0].(string),
+						},
+					},
+				},
+			},
+		}
+		payload, errMarshal := proto.Marshal(virtualTagFamily)
+		if errMarshal != nil {
+			return nil, errMarshal
+		}
+		builder.Family(familyIdentity(TopNTagFamily, pbv1.TagFlag), payload)
+		virtualFieldValue := &modelv1.FieldValue{
+			Value: &modelv1.FieldValue_Int{
+				Int: &modelv1.Int{
+					Value: fieldValue,
+				},
+			},
+		}
+		fieldData := encodeFieldValue(virtualFieldValue)
+		builder.Family(familyIdentity(TopNValueFieldSpec.GetName(), pbv1.EncoderFieldFlag(TopNValueFieldSpec, t.interval)), fieldData)
+		writer, errWrite := builder.Build()
+		if errWrite != nil {
+			return nil, errWrite
+		}
+		_, errWrite = writer.Write()
+		t.l.Debug().
+			Time("ts", eventTime).
+			Int("ts_nano", eventTime.Nanosecond()).
+			Uint64("series_id", uint64(series.ID())).
+			Uint64("item_id", uint64(writer.ItemID().ID)).
+			Int("shard_id", int(shardID)).
+			Msg("write measure")
+		return writer, errWrite
+	}
+	_, err = writeFn()
+	if err != nil {
+		_ = span.Close()
+		return err
+	}
+	return span.Close()
+}
+
+func (t *topNStreamingProcessor) downSampleTimeBucket(eventTimeMillis int64) time.Time {
+	return time.UnixMilli(eventTimeMillis - eventTimeMillis%t.interval.Milliseconds())
+}
+
+func (t *topNStreamingProcessor) locate(tagValues []*modelv1.TagValue, rankNum int) (tsdb.Entity, common.ShardID, error) {
+	if len(t.topNSchema.GetGroupByTagNames()) != len(tagValues) {
+		return nil, 0, errors.New("no enough tag values for the entity")
+	}
+	entity := make(tsdb.Entity, 1+1+len(t.topNSchema.GetGroupByTagNames()))
+	// entity prefix
+	entity[0] = []byte(formatMeasureCompanionPrefix(t.topNSchema.GetSourceMeasure().GetName(),
+		t.topNSchema.GetMetadata().GetName()))
+	entity[1] = convert.Int64ToBytes(int64(rankNum))
+	// measureID as sharding key
+	for idx, tagVal := range tagValues {
+		var innerErr error
+		entity[idx+2], innerErr = pbv1.MarshalIndexFieldValue(tagVal)
+		if innerErr != nil {
+			return nil, 0, innerErr
+		}
+	}
+	id, err := partition.ShardID(entity.Marshal(), t.shardNum)
+	if err != nil {
+		return nil, 0, err
+	}
+	return entity, common.ShardID(id), nil
+}
+
+func (t *topNStreamingProcessor) start() *topNStreamingProcessor {
+	t.errCh = t.streamingFlow.Window(streaming.NewTumblingTimeWindows(t.interval)).
+		AllowedMaxWindows(int(t.topNSchema.GetLruSize())).
+		TopN(int(t.topNSchema.GetCountersNumber()),
+			streaming.WithSortKeyExtractor(func(record flow.StreamRecord) int64 {
+				return record.Data().(flow.Data)[1].(int64)
+			}),
+			OrderBy(t.topNSchema.GetFieldValueSort()),
+		).To(t).Open()
+	go t.handleError()
+	return t
+}
+
+func OrderBy(sort modelv1.Sort) streaming.TopNOption {
+	if sort == modelv1.Sort_SORT_ASC {
+		return streaming.OrderBy(streaming.ASC)
+	}
+	return streaming.OrderBy(streaming.DESC)
+}
+
+func (t *topNStreamingProcessor) handleError() {
+	for err := range t.errCh {
+		t.l.Err(err).Str("topN", t.topNSchema.GetMetadata().GetName()).
+			Msg("error occurred during flow setup or process")
+	}
+	t.stopCh <- struct{}{}
+}
+
+// topNProcessorManager manages multiple topNStreamingProcessor(s) belonging to a single measure
+type topNProcessorManager struct {
+	// RWMutex here is to protect the processorMap from data race, i.e.
+	// the send operation to the underlying channel vs. the close of the channel
+	// TODO: this can be optimized if the bus Listener can be synchronously finished,
+	sync.RWMutex
+	l            *logger.Logger
+	m            *measure
+	topNSchemas  []*databasev1.TopNAggregation
+	processorMap map[*commonv1.Metadata][]*topNStreamingProcessor
+}
+
+func (manager *topNProcessorManager) Close() error {
+	manager.Lock()
+	defer manager.Unlock()
+	var err error
+	for _, processorList := range manager.processorMap {
+		for _, processor := range processorList {
+			err = multierr.Append(err, processor.Close())
+		}
+	}
+	return err
+}
+
+func (manager *topNProcessorManager) onMeasureWrite(request *measurev1.WriteRequest) error {
+	manager.RLock()
+	defer manager.RUnlock()
+	for _, processorList := range manager.processorMap {
+		for _, processor := range processorList {
+			processor.src <- flow.NewStreamRecordWithTimestampPb(request.GetDataPoint(), request.GetDataPoint().GetTimestamp())
+		}
+	}
+
+	return nil
+}
+
+func (manager *topNProcessorManager) start() error {
+	interval := manager.m.interval
+	for _, topNSchema := range manager.topNSchemas {
+		sortDirections := make([]modelv1.Sort, 0, 2)
+		if topNSchema.GetFieldValueSort() == modelv1.Sort_SORT_UNSPECIFIED {
+			sortDirections = append(sortDirections, modelv1.Sort_SORT_ASC, modelv1.Sort_SORT_DESC)
+		} else {
+			sortDirections = append(sortDirections, topNSchema.GetFieldValueSort())
+		}
+
+		processorList := make([]*topNStreamingProcessor, len(sortDirections))
+		for i, sortDirection := range sortDirections {
+			srcCh := make(chan interface{})
+			src, _ := sources.NewChannel(srcCh)
+			streamingFlow := streaming.New(src)
+
+			filters, buildErr := manager.buildFilter(topNSchema.GetCriteria())
+			if buildErr != nil {
+				return buildErr
+			}
+			streamingFlow = streamingFlow.Filter(filters)
+
+			mapper, innerErr := manager.buildMapper(topNSchema.GetFieldName(), topNSchema.GetGroupByTagNames()...)
+			if innerErr != nil {
+				return innerErr
+			}
+			streamingFlow = streamingFlow.Map(mapper)
+
+			processor := &topNStreamingProcessor{
+				l:                manager.l,
+				shardNum:         manager.m.shardNum,
+				interval:         interval,
+				topNSchema:       topNSchema,
+				sortDirection:    sortDirection,
+				databaseSupplier: manager.m.databaseSupplier,
+				src:              srcCh,
+				in:               make(chan flow.StreamRecord),
+				stopCh:           make(chan struct{}),
+				streamingFlow:    streamingFlow,
+			}
+			processorList[i] = processor.start()
+		}
+
+		manager.processorMap[topNSchema.GetSourceMeasure()] = processorList
+	}
+
+	return nil
+}
+
+func (manager *topNProcessorManager) buildFilter(criteria *modelv1.Criteria) (flow.UnaryFunc[bool], error) {
+	// if criteria is nil, we handle all incoming elements
+	if criteria == nil {
+		return func(_ context.Context, dataPoint any) bool {
+			return true
+		}, nil
+	}
+
+	f, err := manager.buildFilterForCriteria(criteria)
+	if err != nil {
+		return nil, err
+	}
+
+	return func(_ context.Context, dataPoint any) bool {
+		tfs := dataPoint.(*measurev1.DataPointValue).GetTagFamilies()
+		return f.predicate(tfs)
+	}, nil
+}
+
+func (manager *topNProcessorManager) buildFilterForCriteria(criteria *modelv1.Criteria) (conditionFilter, error) {
+	switch v := criteria.GetExp().(type) {
+	case *modelv1.Criteria_Condition:
+		return manager.buildFilterForCondition(v.Condition)
+	case *modelv1.Criteria_Le:
+		return manager.buildFilterForLogicalExpr(v.Le)
+	default:
+		return nil, errors.New("should not reach here")
+	}
+}
+
+// buildFilterForCondition builds a logical and composable filter for a logical expression which have underlying conditions,
+// or nested logical expressions as its children
+func (manager *topNProcessorManager) buildFilterForLogicalExpr(logicalExpr *modelv1.LogicalExpression) (conditionFilter, error) {
+	left, lErr := manager.buildFilterForCriteria(logicalExpr.Left)
+	if lErr != nil {
+		return nil, lErr
+	}
+	right, rErr := manager.buildFilterForCriteria(logicalExpr.Right)
+	if rErr != nil {
+		return nil, rErr
+	}
+	return composeWithOp(left, right, logicalExpr.Op), nil
+}
+
+func composeWithOp(left, right conditionFilter, op modelv1.LogicalExpression_LogicalOp) conditionFilter {
+	if op == modelv1.LogicalExpression_LOGICAL_OP_AND {
+		return &andFilter{left, right}
+	}
+	return &orFilter{left, right}
+}
+
+// buildFilterForCondition builds a single, composable filter for a single condition
+func (manager *topNProcessorManager) buildFilterForCondition(cond *modelv1.Condition) (conditionFilter, error) {
+	familyOffset, tagOffset, spec := pbv1.FindTagByName(manager.m.GetSchema().GetTagFamilies(), cond.GetName())
+	if spec == nil {
+		return nil, errors.New("fail to parse tag by name")
+	}
+	switch v := cond.GetValue().GetValue().(type) {
+	case *modelv1.TagValue_Int:
+		return &int64TagFilter{
+			TagLocator: partition.TagLocator{
+				FamilyOffset: familyOffset,
+				TagOffset:    tagOffset,
+			},
+			op:  cond.GetOp(),
+			val: v.Int.GetValue(),
+		}, nil
+	case *modelv1.TagValue_Str:
+		return &strTagFilter{
+			TagLocator: partition.TagLocator{
+				FamilyOffset: familyOffset,
+				TagOffset:    tagOffset,
+			},
+			op:  cond.GetOp(),
+			val: v.Str.GetValue(),
+		}, nil
+	case *modelv1.TagValue_Id:
+		return &idTagFilter{
+			TagLocator: partition.TagLocator{
+				FamilyOffset: familyOffset,
+				TagOffset:    tagOffset,
+			},
+			op:  cond.GetOp(),
+			val: v.Id.GetValue(),
+		}, nil
+	default:
+		return nil, errUnsupportedConditionValueType
+	}
+}
+
+func (manager *topNProcessorManager) buildMapper(fieldName string, groupByNames ...string) (flow.UnaryFunc[any], error) {
+	fieldIdx := slices.IndexFunc(manager.m.GetSchema().GetFields(), func(spec *databasev1.FieldSpec) bool {
+		return spec.GetName() == fieldName
+	})
+	if fieldIdx == -1 {
+		return nil, errors.New("invalid fieldName")
+	}
+	if len(groupByNames) == 0 {
+		return func(_ context.Context, request any) any {
+			dataPoint := request.(*measurev1.DataPointValue)
+			return flow.Data{
+				// save string representation of group values as the key, i.e. v1
+				"",
+				// field value as v2
+				// TODO: we only support int64
+				dataPoint.GetFields()[fieldIdx].GetInt().GetValue(),
+				// groupBy tag values as v3
+				nil,
+			}
+		}, nil
+	}
+	groupLocator, err := newGroupLocator(manager.m.GetSchema(), groupByNames)
+	if err != nil {
+		return nil, err
+	}
+	return func(_ context.Context, request any) any {
+		dataPoint := request.(*measurev1.DataPointValue)
+		return flow.Data{
+			// save string representation of group values as the key, i.e. v1
+			strings.Join(transform(groupLocator, func(locator partition.TagLocator) string {
+				return stringify(dataPoint.GetTagFamilies()[locator.FamilyOffset].GetTags()[locator.TagOffset])
+			}), "|"),
+			// field value as v2
+			// TODO: we only support int64
+			dataPoint.GetFields()[fieldIdx].GetInt().GetValue(),
+			// groupBy tag values as v3
+			transform(groupLocator, func(locator partition.TagLocator) *modelv1.TagValue {
+				return dataPoint.GetTagFamilies()[locator.FamilyOffset].GetTags()[locator.TagOffset]
+			}),
+		}
+	}, nil
+}
+
+// topNProcessCallback listens pipeline for writing requests
+type topNProcessCallback struct {
+	l          *logger.Logger
+	schemaRepo *schemaRepo
+}
+
+func setUpStreamingProcessCallback(l *logger.Logger, schemaRepo *schemaRepo) bus.MessageListener {
+	return &topNProcessCallback{
+		l:          l,
+		schemaRepo: schemaRepo,
+	}
+}
+
+func (cb *topNProcessCallback) Rev(message bus.Message) (resp bus.Message) {
+	writeEvent, ok := message.Data().(*measurev1.InternalWriteRequest)
+	if !ok {
+		cb.l.Warn().Msg("invalid event data type")
+		return
+	}
+
+	// first get measure existence
+	m, ok := cb.schemaRepo.loadMeasure(writeEvent.GetRequest().GetMetadata())
+	if !ok {
+		cb.l.Warn().Msg("cannot find measure definition")
+		return
+	}
+
+	err := m.processorManager.onMeasureWrite(writeEvent.GetRequest())
+	if err != nil {
+		cb.l.Debug().Err(err).Msg("fail to send to the streaming processor")
+	}
+
+	return
+}
+
+var (
+	_ conditionFilter = (*strTagFilter)(nil)
+	_ conditionFilter = (*int64TagFilter)(nil)
+)
+
+type conditionFilter interface {
+	predicate(tagFamilies []*modelv1.TagFamilyForWrite) bool
+}
+
+type strTagFilter struct {
+	partition.TagLocator
+	op  modelv1.Condition_BinaryOp
+	val string
+}
+
+func (f *strTagFilter) predicate(tagFamilies []*modelv1.TagFamilyForWrite) bool {
+	strValue := tagFamilies[f.FamilyOffset].GetTags()[f.TagOffset].GetStr().GetValue()
+	switch f.op {
+	case modelv1.Condition_BINARY_OP_EQ:
+		return strValue == f.val
+	case modelv1.Condition_BINARY_OP_NE:
+		return strValue != f.val
+	}
+	return false
+}
+
+type idTagFilter struct {
+	partition.TagLocator
+	op  modelv1.Condition_BinaryOp
+	val string
+}
+
+func (f *idTagFilter) predicate(tagFamilies []*modelv1.TagFamilyForWrite) bool {
+	val := tagFamilies[f.FamilyOffset].GetTags()[f.TagOffset].GetId().GetValue()
+	switch f.op {
+	case modelv1.Condition_BINARY_OP_EQ:
+		return val == f.val
+	case modelv1.Condition_BINARY_OP_NE:
+		return val != f.val
+	}
+	return false
+}
+
+type int64TagFilter struct {
+	partition.TagLocator
+	op  modelv1.Condition_BinaryOp
+	val int64
+}
+
+func (f *int64TagFilter) predicate(tagFamilies []*modelv1.TagFamilyForWrite) bool {
+	val := tagFamilies[f.FamilyOffset].GetTags()[f.TagOffset].GetInt().GetValue()
+	switch f.op {
+	case modelv1.Condition_BINARY_OP_EQ:
+		return val == f.val
+	case modelv1.Condition_BINARY_OP_NE:
+		return val != f.val
+	case modelv1.Condition_BINARY_OP_GE:
+		return val >= f.val
+	case modelv1.Condition_BINARY_OP_GT:
+		return val > f.val
+	case modelv1.Condition_BINARY_OP_LE:
+		return val <= f.val
+	case modelv1.Condition_BINARY_OP_LT:
+		return val < f.val
+	}
+	return false
+}
+
+type andFilter struct {
+	l, r conditionFilter
+}
+
+func (f *andFilter) predicate(tagFamilies []*modelv1.TagFamilyForWrite) bool {
+	return f.l.predicate(tagFamilies) && f.r.predicate(tagFamilies)
+}
+
+type orFilter struct {
+	l, r conditionFilter
+}
+
+func (f *orFilter) predicate(tagFamilies []*modelv1.TagFamilyForWrite) bool {
+	return f.l.predicate(tagFamilies) || f.r.predicate(tagFamilies)
+}
+
+// groupTagsLocator can be used to locate tags within families
+type groupTagsLocator []partition.TagLocator
+
+// newGroupLocator generates a groupTagsLocator which strictly preserve the order of groupByNames
+func newGroupLocator(m *databasev1.Measure, groupByNames []string) (groupTagsLocator, error) {
+	groupTags := make([]partition.TagLocator, 0, len(groupByNames))
+	for _, groupByName := range groupByNames {
+		fIdx, tIdx, spec := pbv1.FindTagByName(m.GetTagFamilies(), groupByName)
+		if spec == nil {
+			return nil, errors.New("tag is not found")
+		}
+		groupTags = append(groupTags, partition.TagLocator{
+			FamilyOffset: fIdx,
+			TagOffset:    tIdx,
+		})
+	}
+	return groupTags, nil
+}
+
+func stringify(tagValue *modelv1.TagValue) string {
+	switch v := tagValue.GetValue().(type) {
+	case *modelv1.TagValue_Str:
+		return v.Str.GetValue()
+	case *modelv1.TagValue_Id:
+		return v.Id.GetValue()
+	case *modelv1.TagValue_Int:
+		return strconv.FormatInt(v.Int.GetValue(), 10)
+	case *modelv1.TagValue_BinaryData:
+		return base64.StdEncoding.EncodeToString(v.BinaryData)
+	case *modelv1.TagValue_IntArray:
+		return strings.Join(transform(v.IntArray.GetValue(), func(num int64) string {
+			return strconv.FormatInt(num, 10)
+		}), ",")
+	case *modelv1.TagValue_StrArray:
+		return strings.Join(v.StrArray.GetValue(), ",")
+	default:
+		return "<nil>"
+	}
+}
+
+func transform[I, O any](input []I, mapper func(I) O) []O {
+	output := make([]O, len(input))
+	for i := range input {
+		output[i] = mapper(input[i])
+	}
+	return output
+}
diff --git a/banyand/measure/measure_write.go b/banyand/measure/measure_write.go
index 22001cd..5dae6e1 100644
--- a/banyand/measure/measure_write.go
+++ b/banyand/measure/measure_write.go
@@ -23,7 +23,6 @@ import (
 	"github.com/pkg/errors"
 
 	"github.com/apache/skywalking-banyandb/api/common"
-	databasev1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/database/v1"
 	measurev1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/measure/v1"
 	modelv1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/model/v1"
 	"github.com/apache/skywalking-banyandb/banyand/tsdb"
@@ -35,13 +34,9 @@ import (
 	"github.com/apache/skywalking-banyandb/pkg/timestamp"
 )
 
-var (
-	ErrMalformedElement   = errors.New("element is malformed")
-	ErrMalformedFieldFlag = errors.New("field flag is malformed")
-
-	TagFlag []byte = make([]byte, fieldFlagLength)
-)
+var ErrMalformedElement = errors.New("element is malformed")
 
+// Write is for testing
 func (s *measure) Write(value *measurev1.DataPointValue) error {
 	entity, shardID, err := s.entityLocator.Locate(s.name, value.GetTagFamilies(), s.shardNum)
 	if err != nil {
@@ -55,6 +50,15 @@ func (s *measure) Write(value *measurev1.DataPointValue) error {
 		close(waitCh)
 		return err
 	}
+	// send to stream processor
+	err = s.processorManager.onMeasureWrite(&measurev1.WriteRequest{
+		Metadata:  s.GetMetadata(),
+		DataPoint: value,
+	})
+	if err != nil {
+		close(waitCh)
+		return err
+	}
 	<-waitCh
 	return nil
 }
@@ -72,7 +76,7 @@ func (s *measure) write(shardID common.ShardID, seriesHashKey []byte, value *mea
 	if fLen > len(sm.TagFamilies) {
 		return errors.Wrap(ErrMalformedElement, "tag family number is more than expected")
 	}
-	shard, err := s.db.SupplyTSDB().Shard(shardID)
+	shard, err := s.databaseSupplier.SupplyTSDB().Shard(shardID)
 	if err != nil {
 		return err
 	}
@@ -95,7 +99,7 @@ func (s *measure) write(shardID common.ShardID, seriesHashKey []byte, value *mea
 			if errMarshal != nil {
 				return nil, errMarshal
 			}
-			builder.Family(familyIdentity(spec.GetName(), TagFlag), bb)
+			builder.Family(familyIdentity(spec.GetName(), pbv1.TagFlag), bb)
 		}
 		if len(value.GetFields()) > len(sm.GetFields()) {
 			return nil, errors.Wrap(ErrMalformedElement, "fields number is more than expected")
@@ -113,7 +117,7 @@ func (s *measure) write(shardID common.ShardID, seriesHashKey []byte, value *mea
 			if data == nil {
 				continue
 			}
-			builder.Family(familyIdentity(sm.GetFields()[fi].GetName(), encoderFieldFlag(fieldSpec, s.interval)), data)
+			builder.Family(familyIdentity(sm.GetFields()[fi].GetName(), pbv1.EncoderFieldFlag(fieldSpec, s.interval)), data)
 		}
 		writer, errWrite := builder.Build()
 		if errWrite != nil {
@@ -153,12 +157,11 @@ type writeCallback struct {
 	schemaRepo *schemaRepo
 }
 
-func setUpWriteCallback(l *logger.Logger, schemaRepo *schemaRepo) *writeCallback {
-	wcb := &writeCallback{
+func setUpWriteCallback(l *logger.Logger, schemaRepo *schemaRepo) bus.MessageListener {
+	return &writeCallback{
 		l:          l,
 		schemaRepo: schemaRepo,
 	}
-	return wcb
 }
 
 func (w *writeCallback) Rev(message bus.Message) (resp bus.Message) {
@@ -195,15 +198,3 @@ func encodeFieldValue(fieldValue *modelv1.FieldValue) []byte {
 	}
 	return nil
 }
-
-func decodeFieldValue(fieldValue []byte, fieldSpec *databasev1.FieldSpec) *modelv1.FieldValue {
-	switch fieldSpec.GetFieldType() {
-	case databasev1.FieldType_FIELD_TYPE_STRING:
-		return &modelv1.FieldValue{Value: &modelv1.FieldValue_Str{Str: &modelv1.Str{Value: string(fieldValue)}}}
-	case databasev1.FieldType_FIELD_TYPE_INT:
-		return &modelv1.FieldValue{Value: &modelv1.FieldValue_Int{Int: &modelv1.Int{Value: convert.BytesToInt64(fieldValue)}}}
-	case databasev1.FieldType_FIELD_TYPE_DATA_BINARY:
-		return &modelv1.FieldValue{Value: &modelv1.FieldValue_BinaryData{BinaryData: fieldValue}}
-	}
-	return &modelv1.FieldValue{Value: &modelv1.FieldValue_Null{}}
-}
diff --git a/banyand/measure/metadata.go b/banyand/measure/metadata.go
index 7cd0d46..4bddaa6 100644
--- a/banyand/measure/metadata.go
+++ b/banyand/measure/metadata.go
@@ -14,6 +14,7 @@
 // KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
+
 package measure
 
 import (
@@ -57,10 +58,10 @@ func newSchemaRepo(path string, metadata metadata.Repo, repo discovery.ServiceRe
 	}
 }
 
-func (sr *schemaRepo) OnAddOrUpdate(m schema.Metadata) {
-	switch m.Kind {
+func (sr *schemaRepo) OnAddOrUpdate(metadata schema.Metadata) {
+	switch metadata.Kind {
 	case schema.KindGroup:
-		g := m.Spec.(*commonv1.Group)
+		g := metadata.Spec.(*commonv1.Group)
 		if g.Catalog != commonv1.Catalog_CATALOG_MEASURE {
 			return
 		}
@@ -73,10 +74,10 @@ func (sr *schemaRepo) OnAddOrUpdate(m schema.Metadata) {
 		sr.SendMetadataEvent(resourceSchema.MetadataEvent{
 			Typ:      resourceSchema.EventAddOrUpdate,
 			Kind:     resourceSchema.EventKindResource,
-			Metadata: m.Spec.(*databasev1.Measure).GetMetadata(),
+			Metadata: metadata.Spec.(*databasev1.Measure).GetMetadata(),
 		})
 	case schema.KindIndexRuleBinding:
-		irb, ok := m.Spec.(*databasev1.IndexRuleBinding)
+		irb, ok := metadata.Spec.(*databasev1.IndexRuleBinding)
 		if !ok {
 			sr.l.Warn().Msg("fail to convert message to IndexRuleBinding")
 			return
@@ -85,7 +86,7 @@ func (sr *schemaRepo) OnAddOrUpdate(m schema.Metadata) {
 			ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
 			stm, err := sr.metadata.MeasureRegistry().GetMeasure(ctx, &commonv1.Metadata{
 				Name:  irb.GetSubject().GetName(),
-				Group: m.Group,
+				Group: metadata.Group,
 			})
 			cancel()
 			if err != nil {
@@ -100,8 +101,8 @@ func (sr *schemaRepo) OnAddOrUpdate(m schema.Metadata) {
 		}
 	case schema.KindIndexRule:
 		ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
-		subjects, err := sr.metadata.Subjects(ctx, m.Spec.(*databasev1.IndexRule), commonv1.Catalog_CATALOG_MEASURE)
-		cancel()
+		defer cancel()
+		subjects, err := sr.metadata.Subjects(ctx, metadata.Spec.(*databasev1.IndexRule), commonv1.Catalog_CATALOG_MEASURE)
 		if err != nil {
 			sr.l.Error().Err(err).Msg("fail to get subjects(measure)")
 			return
@@ -113,14 +114,20 @@ func (sr *schemaRepo) OnAddOrUpdate(m schema.Metadata) {
 				Metadata: sub.(*databasev1.Measure).GetMetadata(),
 			})
 		}
+	case schema.KindTopNAggregation:
+		sr.SendMetadataEvent(resourceSchema.MetadataEvent{
+			Typ:      resourceSchema.EventAddOrUpdate,
+			Kind:     resourceSchema.EventKindResource,
+			Metadata: metadata.Spec.(*databasev1.TopNAggregation).GetSourceMeasure(),
+		})
 	default:
 	}
 }
 
-func (sr *schemaRepo) OnDelete(m schema.Metadata) {
-	switch m.Kind {
+func (sr *schemaRepo) OnDelete(metadata schema.Metadata) {
+	switch metadata.Kind {
 	case schema.KindGroup:
-		g := m.Spec.(*commonv1.Group)
+		g := metadata.Spec.(*commonv1.Group)
 		if g.Catalog != commonv1.Catalog_CATALOG_MEASURE {
 			return
 		}
@@ -133,27 +140,35 @@ func (sr *schemaRepo) OnDelete(m schema.Metadata) {
 		sr.SendMetadataEvent(resourceSchema.MetadataEvent{
 			Typ:      resourceSchema.EventDelete,
 			Kind:     resourceSchema.EventKindResource,
-			Metadata: m.Spec.(*databasev1.Measure).GetMetadata(),
+			Metadata: metadata.Spec.(*databasev1.Measure).GetMetadata(),
 		})
 	case schema.KindIndexRuleBinding:
-		if m.Spec.(*databasev1.IndexRuleBinding).GetSubject().Catalog == commonv1.Catalog_CATALOG_MEASURE {
+		if metadata.Spec.(*databasev1.IndexRuleBinding).GetSubject().Catalog == commonv1.Catalog_CATALOG_MEASURE {
 			ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
-			stm, err := sr.metadata.MeasureRegistry().GetMeasure(ctx, &commonv1.Metadata{
-				Name:  m.Name,
-				Group: m.Group,
+			defer cancel()
+			m, err := sr.metadata.MeasureRegistry().GetMeasure(ctx, &commonv1.Metadata{
+				Name:  metadata.Name,
+				Group: metadata.Group,
 			})
-			cancel()
 			if err != nil {
 				sr.l.Error().Err(err).Msg("fail to get subject")
 				return
 			}
+			// we should update instead of delete
 			sr.SendMetadataEvent(resourceSchema.MetadataEvent{
-				Typ:      resourceSchema.EventDelete,
+				Typ:      resourceSchema.EventAddOrUpdate,
 				Kind:     resourceSchema.EventKindResource,
-				Metadata: stm.GetMetadata(),
+				Metadata: m.GetMetadata(),
 			})
 		}
 	case schema.KindIndexRule:
+	case schema.KindTopNAggregation:
+		// we should update instead of delete
+		sr.SendMetadataEvent(resourceSchema.MetadataEvent{
+			Typ:      resourceSchema.EventAddOrUpdate,
+			Kind:     resourceSchema.EventKindResource,
+			Metadata: metadata.Spec.(*databasev1.TopNAggregation).GetSourceMeasure(),
+		})
 	default:
 	}
 }
@@ -188,8 +203,9 @@ func newSupplier(path string, metadata metadata.Repo, dbOpts tsdb.DatabaseOpts,
 func (s *supplier) OpenResource(shardNum uint32, db tsdb.Supplier, spec resourceSchema.ResourceSpec) (resourceSchema.Resource, error) {
 	measureSchema := spec.Schema.(*databasev1.Measure)
 	return openMeasure(shardNum, db, measureSpec{
-		schema:     measureSchema,
-		indexRules: spec.IndexRules,
+		schema:           measureSchema,
+		indexRules:       spec.IndexRules,
+		topNAggregations: spec.Aggregations,
 	}, s.l)
 }
 
diff --git a/banyand/measure/service.go b/banyand/measure/service.go
index c43a26c..fadcc38 100644
--- a/banyand/measure/service.go
+++ b/banyand/measure/service.go
@@ -31,6 +31,7 @@ import (
 	"github.com/apache/skywalking-banyandb/banyand/metadata/schema"
 	"github.com/apache/skywalking-banyandb/banyand/queue"
 	"github.com/apache/skywalking-banyandb/banyand/tsdb"
+	"github.com/apache/skywalking-banyandb/pkg/bus"
 	"github.com/apache/skywalking-banyandb/pkg/logger"
 	"github.com/apache/skywalking-banyandb/pkg/run"
 	resourceSchema "github.com/apache/skywalking-banyandb/pkg/schema"
@@ -54,12 +55,13 @@ type service struct {
 	root   string
 	dbOpts tsdb.DatabaseOpts
 
-	schemaRepo    schemaRepo
-	writeListener *writeCallback
-	l             *logger.Logger
-	metadata      metadata.Repo
-	pipeline      queue.Queue
-	repo          discovery.ServiceRepo
+	schemaRepo      schemaRepo
+	writeListener   bus.MessageListener
+	processListener bus.MessageListener
+	l               *logger.Logger
+	metadata        metadata.Repo
+	pipeline        queue.Queue
+	repo            discovery.ServiceRepo
 	// stop channel for the service
 	stopCh chan struct{}
 }
@@ -108,29 +110,36 @@ func (s *service) PreRun() error {
 		if g.Catalog != commonv1.Catalog_CATALOG_MEASURE {
 			continue
 		}
-		gp, err := s.schemaRepo.StoreGroup(g.Metadata)
-		if err != nil {
-			return err
+		gp, innerErr := s.schemaRepo.StoreGroup(g.Metadata)
+		if innerErr != nil {
+			return innerErr
 		}
 		ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
-		schemas, err := s.metadata.MeasureRegistry().ListMeasure(ctx, schema.ListOpt{Group: gp.GetSchema().GetMetadata().Name})
+		allMeasureSchemas, innerErr := s.metadata.MeasureRegistry().
+			ListMeasure(ctx, schema.ListOpt{Group: gp.GetSchema().GetMetadata().GetName()})
 		cancel()
-		if err != nil {
-			return err
+		if innerErr != nil {
+			return innerErr
 		}
-		for _, sa := range schemas {
-			if _, innerErr := gp.StoreResource(sa); innerErr != nil {
+		for _, measureSchema := range allMeasureSchemas {
+			if _, innerErr := gp.StoreResource(measureSchema); innerErr != nil {
 				return innerErr
 			}
 		}
 	}
 
 	s.writeListener = setUpWriteCallback(s.l, &s.schemaRepo)
+	err = s.pipeline.Subscribe(data.TopicMeasureWrite, s.writeListener)
+	if err != nil {
+		return err
+	}
 
-	errWrite := s.pipeline.Subscribe(data.TopicMeasureWrite, s.writeListener)
-	if errWrite != nil {
-		return errWrite
+	s.processListener = setUpStreamingProcessCallback(s.l, &s.schemaRepo)
+	err = s.pipeline.Subscribe(data.TopicMeasureWrite, s.processListener)
+	if err != nil {
+		return err
 	}
+
 	return nil
 }
 
@@ -139,8 +148,9 @@ func (s *service) Serve() run.StopNotify {
 	// run a serial watcher
 	go s.schemaRepo.Watcher()
 
-	s.metadata.MeasureRegistry().RegisterHandler(schema.KindGroup|schema.KindMeasure|schema.KindIndexRuleBinding|schema.KindIndexRule,
-		&s.schemaRepo)
+	s.metadata.MeasureRegistry().
+		RegisterHandler(schema.KindGroup|schema.KindMeasure|schema.KindIndexRuleBinding|schema.KindIndexRule|schema.KindTopNAggregation,
+			&s.schemaRepo)
 
 	return s.stopCh
 }
diff --git a/banyand/metadata/schema/measure.go b/banyand/metadata/schema/measure.go
index 2bc1fae..7bb71c5 100644
--- a/banyand/metadata/schema/measure.go
+++ b/banyand/metadata/schema/measure.go
@@ -196,6 +196,23 @@ func (e *etcdSchemaRegistry) DeleteMeasure(ctx context.Context, metadata *common
 	})
 }
 
+func (e *etcdSchemaRegistry) TopNAggregations(ctx context.Context, metadata *commonv1.Metadata) ([]*databasev1.TopNAggregation, error) {
+	aggregations, err := e.ListTopNAggregation(ctx, ListOpt{Group: metadata.GetGroup()})
+	if err != nil {
+		return nil, err
+	}
+
+	var result []*databasev1.TopNAggregation
+	for _, aggrDef := range aggregations {
+		// filter sourceMeasure
+		if aggrDef.GetSourceMeasure().GetName() == metadata.GetName() {
+			result = append(result, aggrDef)
+		}
+	}
+
+	return result, nil
+}
+
 func formatMeasureKey(metadata *commonv1.Metadata) string {
 	return formatKey(MeasureKeyPrefix, metadata)
 }
diff --git a/banyand/metadata/schema/schema.go b/banyand/metadata/schema/schema.go
index ec52e4d..62de89c 100644
--- a/banyand/metadata/schema/schema.go
+++ b/banyand/metadata/schema/schema.go
@@ -98,6 +98,8 @@ func (tm TypeMeta) Unmarshal(data []byte) (m proto.Message, err error) {
 		m = &databasev1.IndexRule{}
 	case KindProperty:
 		m = &propertyv1.Property{}
+	case KindTopNAggregation:
+		m = &databasev1.TopNAggregation{}
 	default:
 		return nil, ErrUnsupportedEntityType
 	}
@@ -134,6 +136,11 @@ func (m Metadata) Key() (string, error) {
 			Group: m.Group,
 			Name:  m.Name,
 		}), nil
+	case KindTopNAggregation:
+		return formatTopNAggregationKey(&commonv1.Metadata{
+			Group: m.Group,
+			Name:  m.Name,
+		}), nil
 	default:
 		return "", ErrUnsupportedEntityType
 	}
@@ -183,6 +190,7 @@ type Measure interface {
 	UpdateMeasure(ctx context.Context, measure *databasev1.Measure) error
 	DeleteMeasure(ctx context.Context, metadata *commonv1.Metadata) (bool, error)
 	RegisterHandler(Kind, EventHandler)
+	TopNAggregations(ctx context.Context, metadata *commonv1.Metadata) ([]*databasev1.TopNAggregation, error)
 }
 
 type Group interface {
diff --git a/banyand/query/processor.go b/banyand/query/processor.go
index 64d6146..cf9ac2e 100644
--- a/banyand/query/processor.go
+++ b/banyand/query/processor.go
@@ -47,6 +47,7 @@ var (
 	_ Executor            = (*queryService)(nil)
 	_ bus.MessageListener = (*streamQueryProcessor)(nil)
 	_ bus.MessageListener = (*measureQueryProcessor)(nil)
+	_ bus.MessageListener = (*topNQueryProcessor)(nil)
 )
 
 type queryService struct {
@@ -56,6 +57,7 @@ type queryService struct {
 	pipeline    queue.Queue
 	sqp         *streamQueryProcessor
 	mqp         *measureQueryProcessor
+	tqp         *topNQueryProcessor
 }
 
 type streamQueryProcessor struct {
@@ -178,8 +180,9 @@ func (q *queryService) Name() string {
 
 func (q *queryService) PreRun() error {
 	q.log = logger.GetLogger(moduleName)
-	var err error
-	err = multierr.Append(err, q.pipeline.Subscribe(data.TopicStreamQuery, q.sqp))
-	err = multierr.Append(err, q.pipeline.Subscribe(data.TopicMeasureQuery, q.mqp))
-	return err
+	return multierr.Combine(
+		q.pipeline.Subscribe(data.TopicStreamQuery, q.sqp),
+		q.pipeline.Subscribe(data.TopicMeasureQuery, q.mqp),
+		q.pipeline.Subscribe(data.TopicTopNQuery, q.tqp),
+	)
 }
diff --git a/banyand/query/processor_topn.go b/banyand/query/processor_topn.go
new file mode 100644
index 0000000..e8aa712
--- /dev/null
+++ b/banyand/query/processor_topn.go
@@ -0,0 +1,439 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package query
+
+import (
+	"bytes"
+	"container/heap"
+	"context"
+	"math"
+	"time"
+
+	"github.com/pkg/errors"
+	"golang.org/x/exp/slices"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/types/known/timestamppb"
+
+	databasev1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/database/v1"
+	measurev1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/measure/v1"
+	modelv1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/model/v1"
+	"github.com/apache/skywalking-banyandb/banyand/measure"
+	"github.com/apache/skywalking-banyandb/banyand/tsdb"
+	"github.com/apache/skywalking-banyandb/pkg/bus"
+	"github.com/apache/skywalking-banyandb/pkg/convert"
+	"github.com/apache/skywalking-banyandb/pkg/flow"
+	"github.com/apache/skywalking-banyandb/pkg/flow/streaming"
+	pbv1 "github.com/apache/skywalking-banyandb/pkg/pb/v1"
+	"github.com/apache/skywalking-banyandb/pkg/query/aggregation"
+	"github.com/apache/skywalking-banyandb/pkg/timestamp"
+)
+
+type topNQueryProcessor struct {
+	measureService measure.Service
+	*queryService
+}
+
+func (t *topNQueryProcessor) Rev(message bus.Message) (resp bus.Message) {
+	request, ok := message.Data().(*measurev1.TopNRequest)
+	if !ok {
+		t.log.Warn().Msg("invalid event data type")
+		return
+	}
+	if request.GetFieldValueSort() == modelv1.Sort_SORT_UNSPECIFIED {
+		t.log.Warn().Msg("invalid requested sort direction")
+		return
+	}
+	t.log.Info().Msg("received a topN query event")
+	topNMetadata := request.GetMetadata()
+	topNSchema, err := t.metaService.TopNAggregationRegistry().GetTopNAggregation(context.TODO(), topNMetadata)
+	if err != nil {
+		t.log.Error().Err(err).
+			Str("topN", topNMetadata.GetName()).
+			Msg("fail to get execution context")
+		return
+	}
+	if topNSchema.GetFieldValueSort() != modelv1.Sort_SORT_UNSPECIFIED &&
+		topNSchema.GetFieldValueSort() != request.GetFieldValueSort() {
+		t.log.Warn().Msg("unmatched sort direction")
+		return
+	}
+	sourceMeasure, err := t.measureService.Measure(topNSchema.GetSourceMeasure())
+	if err != nil {
+		t.log.Error().Err(err).
+			Str("topN", topNMetadata.GetName()).
+			Msg("fail to find source measure")
+		return
+	}
+	shards, err := sourceMeasure.CompanionShards(topNMetadata)
+	if err != nil {
+		t.log.Error().Err(err).
+			Str("topN", topNMetadata.GetName()).
+			Msg("fail to list shards")
+		return
+	}
+	aggregator := createTopNPostAggregator(request.GetTopN(),
+		request.GetAgg(), request.GetFieldValueSort())
+	entity, err := locateEntity(topNSchema, request.GetConditions())
+	if err != nil {
+		t.log.Error().Err(err).
+			Str("topN", topNMetadata.GetName()).
+			Msg("fail to parse entity")
+		return
+	}
+	for _, shard := range shards {
+		// TODO: support condition
+		sl, innerErr := shard.Series().List(tsdb.NewPath(entity))
+		if innerErr != nil {
+			t.log.Error().Err(innerErr).
+				Str("topN", topNMetadata.GetName()).
+				Msg("fail to list series")
+			return
+		}
+		for _, series := range sl {
+			iters, scanErr := t.scanSeries(series, request)
+			if scanErr != nil {
+				t.log.Error().Err(innerErr).
+					Str("topN", topNMetadata.GetName()).
+					Msg("fail to scan series")
+				return
+			}
+			for _, iter := range iters {
+				for iter.Next() {
+					tuple, parseErr := parseTopNFamily(iter.Val(), sourceMeasure.GetInterval())
+					if parseErr != nil {
+						t.log.Error().Err(parseErr).
+							Str("topN", topNMetadata.GetName()).
+							Msg("fail to parse topN family")
+						return
+					}
+					_ = aggregator.put(tuple.V1.(string), tuple.V2.(int64), iter.Val().Time())
+				}
+				_ = iter.Close()
+			}
+		}
+	}
+
+	now := time.Now().UnixNano()
+	resp = bus.NewMessage(bus.MessageID(now), aggregator.val())
+
+	return
+}
+
+func locateEntity(topNSchema *databasev1.TopNAggregation, conditions []*modelv1.Condition) (tsdb.Entity, error) {
+	entityMap := make(map[string]int)
+	entity := make([]tsdb.Entry, 1+len(topNSchema.GetGroupByTagNames()))
+	entity[0] = tsdb.AnyEntry
+	for idx, tagName := range topNSchema.GetGroupByTagNames() {
+		entityMap[tagName] = idx + 1
+		// fill AnyEntry by default
+		entity[idx+1] = tsdb.AnyEntry
+	}
+	for _, pairQuery := range conditions {
+		if pairQuery.GetOp() != modelv1.Condition_BINARY_OP_EQ {
+			return nil, errors.New("op other than EQ is not supported")
+		}
+		if entityIdx, ok := entityMap[pairQuery.GetName()]; ok {
+			switch v := pairQuery.GetValue().GetValue().(type) {
+			case *modelv1.TagValue_Str:
+				entity[entityIdx] = []byte(v.Str.GetValue())
+			case *modelv1.TagValue_Id:
+				entity[entityIdx] = []byte(v.Id.GetValue())
+			case *modelv1.TagValue_Int:
+				entity[entityIdx] = convert.Int64ToBytes(v.Int.GetValue())
+			default:
+				return nil, errors.New("unsupported condition tag type for entity")
+			}
+			continue
+		}
+		return nil, errors.New("only groupBy tag name is supported")
+	}
+	return entity, nil
+}
+
+func parseTopNFamily(item tsdb.Item, interval time.Duration) (*streaming.Tuple2, error) {
+	familyRawBytes, err := item.Family(familyIdentity(measure.TopNTagFamily, pbv1.TagFlag))
+	if err != nil {
+		return nil, err
+	}
+	tagFamily := &modelv1.TagFamilyForWrite{}
+	err = proto.Unmarshal(familyRawBytes, tagFamily)
+	if err != nil {
+		return nil, err
+	}
+	fieldBytes, err := item.Family(familyIdentity(measure.TopNValueFieldSpec.GetName(),
+		pbv1.EncoderFieldFlag(measure.TopNValueFieldSpec, interval)))
+	if err != nil {
+		return nil, err
+	}
+	fieldValue := pbv1.DecodeFieldValue(fieldBytes, measure.TopNValueFieldSpec)
+	return &streaming.Tuple2{
+		// GroupValues
+		V1: tagFamily.GetTags()[1].GetStr().GetValue(),
+		// FieldValue
+		V2: fieldValue.GetInt().GetValue(),
+	}, nil
+}
+
+func familyIdentity(name string, flag []byte) []byte {
+	return bytes.Join([][]byte{tsdb.Hash([]byte(name)), flag}, nil)
+}
+
+func (t *topNQueryProcessor) scanSeries(series tsdb.Series, request *measurev1.TopNRequest) ([]tsdb.Iterator, error) {
+	seriesSpan, err := series.Span(timestamp.NewInclusiveTimeRange(
+		request.GetTimeRange().GetBegin().AsTime(),
+		request.GetTimeRange().GetEnd().AsTime()),
+	)
+	defer func(seriesSpan tsdb.SeriesSpan) {
+		_ = seriesSpan.Close()
+	}(seriesSpan)
+	if err != nil {
+		return nil, err
+	}
+	seeker, err := seriesSpan.SeekerBuilder().OrderByTime(modelv1.Sort_SORT_ASC).Build()
+	if err != nil {
+		return nil, err
+	}
+	return seeker.Seek()
+}
+
+var _ heap.Interface = (*postAggregationProcessor)(nil)
+
+type aggregatorItem struct {
+	key       string
+	int64Func aggregation.Int64Func
+	index     int
+}
+
+// postProcessor defines necessary methods for Top-N post processor with or without aggregation
+type postProcessor interface {
+	put(key string, val int64, timestampMillis uint64) error
+	val() []*measurev1.TopNList
+}
+
+func createTopNPostAggregator(topN int32, aggrFunc modelv1.AggregationFunction, sort modelv1.Sort) postProcessor {
+	if aggrFunc == modelv1.AggregationFunction_AGGREGATION_FUNCTION_UNSPECIFIED {
+		// if aggregation is not specified, we have to keep all timelines
+		return &postNonAggregationProcessor{
+			topN:      topN,
+			sort:      sort,
+			timelines: make(map[uint64]*flow.DedupPriorityQueue),
+		}
+	}
+	aggregator := &postAggregationProcessor{
+		topN:     topN,
+		sort:     sort,
+		aggrFunc: aggrFunc,
+		cache:    make(map[string]*aggregatorItem),
+	}
+	heap.Init(aggregator)
+	return aggregator
+}
+
+// postAggregationProcessor is an implementation of postProcessor with aggregation
+type postAggregationProcessor struct {
+	topN            int32
+	sort            modelv1.Sort
+	aggrFunc        modelv1.AggregationFunction
+	items           []*aggregatorItem
+	cache           map[string]*aggregatorItem
+	latestTimestamp uint64
+}
+
+func (aggr postAggregationProcessor) Len() int {
+	return len(aggr.items)
+}
+
+func (aggr postAggregationProcessor) Less(i, j int) bool {
+	if aggr.sort == modelv1.Sort_SORT_DESC {
+		return aggr.items[i].int64Func.Val() > aggr.items[j].int64Func.Val()
+	}
+	return aggr.items[i].int64Func.Val() < aggr.items[j].int64Func.Val()
+}
+
+func (aggr *postAggregationProcessor) Swap(i, j int) {
+	aggr.items[i], aggr.items[j] = aggr.items[j], aggr.items[i]
+	aggr.items[i].index = i
+	aggr.items[j].index = j
+}
+
+func (aggr *postAggregationProcessor) Push(x any) {
+	n := len(aggr.items)
+	item := x.(*aggregatorItem)
+	item.index = n
+	aggr.items = append(aggr.items, item)
+}
+
+func (aggr *postAggregationProcessor) Pop() any {
+	old := aggr.items
+	n := len(old)
+	item := old[n-1]
+	old[n-1] = nil
+	item.index = -1
+	aggr.items = old[0 : n-1]
+	return item
+}
+
+func (aggr *postAggregationProcessor) put(key string, val int64, timestampMillis uint64) error {
+	// update latest ts
+	if aggr.latestTimestamp < timestampMillis {
+		aggr.latestTimestamp = timestampMillis
+	}
+	if item, found := aggr.cache[key]; found {
+		item.int64Func.In(val)
+		heap.Fix(aggr, item.index)
+		return nil
+	}
+	aggrFunc, err := aggregation.NewInt64Func(aggr.aggrFunc)
+	if err != nil {
+		return err
+	}
+	item := &aggregatorItem{
+		key:       key,
+		int64Func: aggrFunc,
+	}
+	item.int64Func.In(val)
+	aggr.cache[key] = item
+	heap.Push(aggr, item)
+	return nil
+}
+
+func (aggr *postAggregationProcessor) val() []*measurev1.TopNList {
+	itemLen := int(math.Min(float64(aggr.topN), float64(aggr.Len())))
+	topNItems := make([]*measurev1.TopNList_Item, 0, itemLen)
+
+	for _, item := range aggr.items[0:itemLen] {
+		topNItems = append(topNItems, &measurev1.TopNList_Item{
+			Name: item.key,
+			Value: &modelv1.FieldValue{
+				Value: &modelv1.FieldValue_Int{
+					Int: &modelv1.Int{Value: item.int64Func.Val()},
+				},
+			},
+		})
+	}
+	return []*measurev1.TopNList{
+		{
+			Timestamp: timestamppb.New(time.Unix(0, int64(aggr.latestTimestamp))),
+			Items:     topNItems,
+		},
+	}
+}
+
+var _ flow.Element = (*nonAggregatorItem)(nil)
+
+type nonAggregatorItem struct {
+	key   string
+	val   int64
+	index int
+}
+
+func (n *nonAggregatorItem) GetIndex() int {
+	return n.index
+}
+
+func (n *nonAggregatorItem) SetIndex(i int) {
+	n.index = i
+}
+
+type postNonAggregationProcessor struct {
+	topN      int32
+	sort      modelv1.Sort
+	timelines map[uint64]*flow.DedupPriorityQueue
+}
+
+func (naggr *postNonAggregationProcessor) val() []*measurev1.TopNList {
+	topNLists := make([]*measurev1.TopNList, 0, len(naggr.timelines))
+	for ts, timeline := range naggr.timelines {
+		items := make([]*measurev1.TopNList_Item, timeline.Len())
+		for _, elem := range timeline.Values() {
+			items[elem.GetIndex()] = &measurev1.TopNList_Item{
+				Name: elem.(*nonAggregatorItem).key,
+				Value: &modelv1.FieldValue{
+					Value: &modelv1.FieldValue_Int{
+						Int: &modelv1.Int{Value: elem.(*nonAggregatorItem).val},
+					},
+				},
+			}
+		}
+		topNLists = append(topNLists, &measurev1.TopNList{
+			Timestamp: timestamppb.New(time.Unix(0, int64(ts))),
+			Items:     items,
+		})
+	}
+
+	slices.SortStableFunc(topNLists, func(a, b *measurev1.TopNList) bool {
+		if a.GetTimestamp().GetSeconds() < b.GetTimestamp().GetSeconds() {
+			return true
+		} else if a.GetTimestamp().GetSeconds() == b.GetTimestamp().GetSeconds() {
+			return a.GetTimestamp().GetNanos() < b.GetTimestamp().GetNanos()
+		}
+		return false
+	})
+
+	return topNLists
+}
+
+func (naggr *postNonAggregationProcessor) put(key string, val int64, timestampMillis uint64) error {
+	if timeline, ok := naggr.timelines[timestampMillis]; ok {
+		if timeline.Len() < int(naggr.topN) {
+			heap.Push(timeline, &nonAggregatorItem{val: val, key: key})
+		} else {
+			if right := timeline.Right(); right != nil {
+				if naggr.sort == modelv1.Sort_SORT_DESC && right.(*nonAggregatorItem).val < val {
+					heap.Push(timeline, &nonAggregatorItem{val: val, key: key})
+					newTimeline, err := timeline.WithNewItems(timeline.Slice(0, int(naggr.topN)))
+					if err != nil {
+						return err
+					}
+					naggr.timelines[timestampMillis] = newTimeline
+				} else if naggr.sort != modelv1.Sort_SORT_DESC && right.(*nonAggregatorItem).val > val {
+					heap.Push(timeline, &nonAggregatorItem{val: val, key: key})
+					newTimeline, err := timeline.WithNewItems(timeline.Slice(0, int(naggr.topN)))
+					if err != nil {
+						return err
+					}
+					naggr.timelines[timestampMillis] = newTimeline
+				}
+			}
+		}
+		return nil
+	}
+
+	timeline := flow.NewPriorityQueue(func(a, b interface{}) int {
+		if naggr.sort == modelv1.Sort_SORT_DESC {
+			if a.(*nonAggregatorItem).val < b.(*nonAggregatorItem).val {
+				return 1
+			} else if a.(*nonAggregatorItem).val == b.(*nonAggregatorItem).val {
+				return 0
+			} else {
+				return -1
+			}
+		}
+		if a.(*nonAggregatorItem).val < b.(*nonAggregatorItem).val {
+			return -1
+		} else if a.(*nonAggregatorItem).val == b.(*nonAggregatorItem).val {
+			return 0
+		} else {
+			return 1
+		}
+	}, false)
+	naggr.timelines[timestampMillis] = timeline
+	heap.Push(timeline, &nonAggregatorItem{val: val, key: key})
+
+	return nil
+}
diff --git a/banyand/query/query.go b/banyand/query/query.go
index 1ef4d5f..267a922 100644
--- a/banyand/query/query.go
+++ b/banyand/query/query.go
@@ -40,13 +40,20 @@ func NewExecutor(_ context.Context, streamService stream.Service, measureService
 		serviceRepo: serviceRepo,
 		pipeline:    pipeline,
 	}
+	// measure query processor
 	svc.mqp = &measureQueryProcessor{
 		measureService: measureService,
 		queryService:   svc,
 	}
+	// stream query processor
 	svc.sqp = &streamQueryProcessor{
 		streamService: streamService,
 		queryService:  svc,
 	}
+	// topN query processor
+	svc.tqp = &topNQueryProcessor{
+		measureService: measureService,
+		queryService:   svc,
+	}
 	return svc, nil
 }
diff --git a/banyand/tsdb/block.go b/banyand/tsdb/block.go
index 2d930de..0043528 100644
--- a/banyand/tsdb/block.go
+++ b/banyand/tsdb/block.go
@@ -188,7 +188,7 @@ func (b *block) open() (err error) {
 	return nil
 }
 
-func (b *block) delegate() (blockDelegate, error) {
+func (b *block) delegate() (BlockDelegate, error) {
 	if b.deleted.Load() {
 		return nil, errors.WithMessagef(ErrBlockAbsent, "block %d is deleted", b.blockID)
 	}
@@ -313,7 +313,7 @@ func (b *block) stats() (names []string, stats []observability.Statistics) {
 	return names, stats
 }
 
-type blockDelegate interface {
+type BlockDelegate interface {
 	io.Closer
 	contains(ts time.Time) bool
 	write(key []byte, val []byte, ts time.Time) error
@@ -329,7 +329,7 @@ type blockDelegate interface {
 	String() string
 }
 
-var _ blockDelegate = (*bDelegate)(nil)
+var _ BlockDelegate = (*bDelegate)(nil)
 
 type bDelegate struct {
 	delegate *block
diff --git a/banyand/tsdb/scope.go b/banyand/tsdb/scope.go
index 8233d9a..a320520 100644
--- a/banyand/tsdb/scope.go
+++ b/banyand/tsdb/scope.go
@@ -14,6 +14,7 @@
 // KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
+
 package tsdb
 
 import (
@@ -87,5 +88,5 @@ func (sdd *scopedSeriesDatabase) Get(entity Entity) (Series, error) {
 }
 
 func (sdd *scopedSeriesDatabase) List(path Path) (SeriesList, error) {
-	return sdd.delegated.List(path.Prepand(sdd.scope))
+	return sdd.delegated.List(path.Prepend(sdd.scope))
 }
diff --git a/banyand/tsdb/segment.go b/banyand/tsdb/segment.go
index ed1bd3c..b516bd2 100644
--- a/banyand/tsdb/segment.go
+++ b/banyand/tsdb/segment.go
@@ -239,14 +239,14 @@ func (bc *blockController) Parse(value string) (time.Time, error) {
 	panic("invalid interval unit")
 }
 
-func (bc *blockController) span(timeRange timestamp.TimeRange) ([]blockDelegate, error) {
+func (bc *blockController) span(timeRange timestamp.TimeRange) ([]BlockDelegate, error) {
 	bb := bc.search(func(b *block) bool {
 		return b.Overlapping(timeRange)
 	})
 	if bb == nil {
 		return nil, nil
 	}
-	dd := make([]blockDelegate, len(bb))
+	dd := make([]BlockDelegate, len(bb))
 	for i, b := range bb {
 		d, err := b.delegate()
 		if err != nil {
@@ -257,7 +257,7 @@ func (bc *blockController) span(timeRange timestamp.TimeRange) ([]blockDelegate,
 	return dd, nil
 }
 
-func (bc *blockController) get(blockID uint16) (blockDelegate, error) {
+func (bc *blockController) get(blockID uint16) (BlockDelegate, error) {
 	b := bc.getBlock(blockID)
 	if b != nil {
 		return b.delegate()
diff --git a/banyand/tsdb/series.go b/banyand/tsdb/series.go
index 8aa98d0..2b9c355 100644
--- a/banyand/tsdb/series.go
+++ b/banyand/tsdb/series.go
@@ -169,7 +169,7 @@ func newSeries(ctx context.Context, id common.SeriesID, blockDB blockDatabase) *
 var _ SeriesSpan = (*seriesSpan)(nil)
 
 type seriesSpan struct {
-	blocks    []blockDelegate
+	blocks    []BlockDelegate
 	seriesID  common.SeriesID
 	shardID   common.ShardID
 	timeRange timestamp.TimeRange
@@ -191,7 +191,7 @@ func (s *seriesSpan) SeekerBuilder() SeekerBuilder {
 	return newSeekerBuilder(s)
 }
 
-func newSeriesSpan(ctx context.Context, timeRange timestamp.TimeRange, blocks []blockDelegate, id common.SeriesID, shardID common.ShardID) *seriesSpan {
+func newSeriesSpan(ctx context.Context, timeRange timestamp.TimeRange, blocks []BlockDelegate, id common.SeriesID, shardID common.ShardID) *seriesSpan {
 	s := &seriesSpan{
 		blocks:    blocks,
 		seriesID:  id,
diff --git a/banyand/tsdb/series_seek_filter.go b/banyand/tsdb/series_seek_filter.go
index 52bd9c2..7e8cd4a 100644
--- a/banyand/tsdb/series_seek_filter.go
+++ b/banyand/tsdb/series_seek_filter.go
@@ -31,7 +31,7 @@ func (s *seekerBuilder) Filter(predicator index.Filter) SeekerBuilder {
 	return s
 }
 
-func (s *seekerBuilder) buildIndexFilter(block blockDelegate) (filterFn, error) {
+func (s *seekerBuilder) buildIndexFilter(block BlockDelegate) (filterFn, error) {
 	if s.predicator == nil {
 		return nil, nil
 	}
diff --git a/banyand/tsdb/series_write.go b/banyand/tsdb/series_write.go
index e3e4a22..794165f 100644
--- a/banyand/tsdb/series_write.go
+++ b/banyand/tsdb/series_write.go
@@ -47,7 +47,7 @@ var _ WriterBuilder = (*writerBuilder)(nil)
 
 type writerBuilder struct {
 	series *seriesSpan
-	block  blockDelegate
+	block  BlockDelegate
 	values []struct {
 		family []byte
 		val    []byte
@@ -132,7 +132,7 @@ func newWriterBuilder(seriesSpan *seriesSpan) WriterBuilder {
 var _ Writer = (*writer)(nil)
 
 type writer struct {
-	block   blockDelegate
+	block   BlockDelegate
 	ts      time.Time
 	columns []struct {
 		family []byte
diff --git a/banyand/tsdb/seriesdb.go b/banyand/tsdb/seriesdb.go
index bd61e44..61332f7 100644
--- a/banyand/tsdb/seriesdb.go
+++ b/banyand/tsdb/seriesdb.go
@@ -121,18 +121,18 @@ func (p *Path) extractPrefix() {
 	}
 }
 
-func (p Path) Prepand(entry Entry) Path {
+func (p Path) Prepend(entry Entry) Path {
 	e := Hash(entry)
-	prepand := func(src []byte, entry []byte) []byte {
+	prependFunc := func(src []byte, entry []byte) []byte {
 		dst := make([]byte, len(src)+len(entry))
 		copy(dst, entry)
 		copy(dst[len(entry):], src)
 		return dst
 	}
-	p.template = prepand(p.template, e)
+	p.template = prependFunc(p.template, e)
 	p.offset += len(e)
 	p.extractPrefix()
-	p.mask = prepand(p.mask, maxIntBytes)
+	p.mask = prependFunc(p.mask, maxIntBytes)
 	return p
 }
 
@@ -147,9 +147,9 @@ type SeriesDatabase interface {
 
 type blockDatabase interface {
 	shardID() common.ShardID
-	span(timeRange timestamp.TimeRange) ([]blockDelegate, error)
-	create(timeRange timestamp.TimeRange) (blockDelegate, error)
-	block(id GlobalItemID) (blockDelegate, error)
+	span(timeRange timestamp.TimeRange) ([]BlockDelegate, error)
+	create(timeRange timestamp.TimeRange) (BlockDelegate, error)
+	block(id GlobalItemID) (BlockDelegate, error)
 }
 
 var (
@@ -188,7 +188,7 @@ func (s *seriesDB) GetByID(id common.SeriesID) (Series, error) {
 	return newSeries(s.context(), id, s), nil
 }
 
-func (s *seriesDB) block(id GlobalItemID) (blockDelegate, error) {
+func (s *seriesDB) block(id GlobalItemID) (BlockDelegate, error) {
 	seg := s.segCtrl.get(id.segID)
 	if seg == nil {
 		return nil, nil
@@ -250,9 +250,9 @@ func (s *seriesDB) List(path Path) (SeriesList, error) {
 	return result, err
 }
 
-func (s *seriesDB) span(timeRange timestamp.TimeRange) ([]blockDelegate, error) {
+func (s *seriesDB) span(timeRange timestamp.TimeRange) ([]BlockDelegate, error) {
 	// TODO: return correct blocks
-	result := make([]blockDelegate, 0)
+	result := make([]BlockDelegate, 0)
 	for _, s := range s.segCtrl.span(timeRange) {
 		dd, err := s.blockController.span(timeRange)
 		if err != nil {
@@ -266,7 +266,7 @@ func (s *seriesDB) span(timeRange timestamp.TimeRange) ([]blockDelegate, error)
 	return result, nil
 }
 
-func (s *seriesDB) create(timeRange timestamp.TimeRange) (blockDelegate, error) {
+func (s *seriesDB) create(timeRange timestamp.TimeRange) (BlockDelegate, error) {
 	s.Lock()
 	defer s.Unlock()
 	ss := s.segCtrl.span(timeRange)
@@ -337,8 +337,11 @@ func newSeriesDataBase(ctx context.Context, shardID common.ShardID, path string,
 	return sdb, nil
 }
 
+// HashEntity runs hash function (e.g. with xxhash algorithm) on each segment of the Entity,
+// and concatenates all uint64 in byte array. So the return length of the byte array will be
+// 8 (every uint64 has 8 bytes) * length of the input entity.
 func HashEntity(entity Entity) []byte {
-	result := make(Entry, 0, len(entity)*8)
+	result := make([]byte, 0, len(entity)*8)
 	for _, entry := range entity {
 		result = append(result, Hash(entry)...)
 	}
@@ -346,7 +349,7 @@ func HashEntity(entity Entity) []byte {
 }
 
 func SeriesID(entity Entity) common.SeriesID {
-	return common.SeriesID(convert.Hash((HashEntity(entity))))
+	return common.SeriesID(convert.Hash(HashEntity(entity)))
 }
 
 func Hash(entry []byte) []byte {
diff --git a/banyand/tsdb/seriesdb_test.go b/banyand/tsdb/seriesdb_test.go
index f7163dc..f5b1875 100644
--- a/banyand/tsdb/seriesdb_test.go
+++ b/banyand/tsdb/seriesdb_test.go
@@ -316,7 +316,7 @@ func TestNewPath(t *testing.T) {
 		t.Run(tt.name, func(t *testing.T) {
 			got := NewPath(tt.entity)
 			if tt.scope != nil {
-				got = got.Prepand(tt.scope)
+				got = got.Prepend(tt.scope)
 			}
 			tester.Equal(tt.want, got)
 		})
diff --git a/dist/LICENSE b/dist/LICENSE
index e41d8fb..19ffc3a 100644
--- a/dist/LICENSE
+++ b/dist/LICENSE
@@ -247,6 +247,12 @@ BSD-2-Clause licenses
     github.com/magiconair/properties v1.8.6 BSD-2-Clause
     github.com/pkg/errors v0.9.1 BSD-2-Clause
 
+========================================================================
+BSD-2-Clause and ISC licenses
+========================================================================
+
+    github.com/emirpasic/gods v1.18.1 BSD-2-Clause and ISC
+
 ========================================================================
 BSD-3-Clause licenses
 ========================================================================
diff --git a/dist/licenses/license-github.com-emirpasic-gods.txt b/dist/licenses/license-github.com-emirpasic-gods.txt
new file mode 100644
index 0000000..e5e449b
--- /dev/null
+++ b/dist/licenses/license-github.com-emirpasic-gods.txt
@@ -0,0 +1,41 @@
+Copyright (c) 2015, Emir Pasic
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+-------------------------------------------------------------------------------
+
+AVL Tree:
+
+Copyright (c) 2017 Benjamin Scher Purcell <be...@gmail.com>
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/docs/api-reference.md b/docs/api-reference.md
index c7de7c6..7e0e96b 100644
--- a/docs/api-reference.md
+++ b/docs/api-reference.md
@@ -671,7 +671,7 @@ LogicalExpression supports logical operation
 
 | Field | Type | Label | Description |
 | ----- | ---- | ----- | ----------- |
-| op | [LogicalExpression.LogicalOp](#banyandb-model-v1-LogicalExpression-LogicalOp) |  | op is a logial operation |
+| op | [LogicalExpression.LogicalOp](#banyandb-model-v1-LogicalExpression-LogicalOp) |  | op is a logical operation |
 | left | [Criteria](#banyandb-model-v1-Criteria) |  |  |
 | right | [Criteria](#banyandb-model-v1-Criteria) |  |  |
 
@@ -1026,8 +1026,9 @@ TopNAggregation generates offline TopN statistics for a measure&#39;s TopN appro
 | field_name | [string](#string) |  | field_name is the name of field used for ranking |
 | field_value_sort | [banyandb.model.v1.Sort](#banyandb-model-v1-Sort) |  | field_value_sort indicates how to sort fields ASC: bottomN DESC: topN UNSPECIFIED: topN &#43; bottomN |
 | group_by_tag_names | [string](#string) | repeated | group_by_tag_names groups data points into statistical counters |
-| criteria | [banyandb.model.v1.Criteria](#banyandb-model-v1-Criteria) | repeated | criteria select partial data points from measure |
+| criteria | [banyandb.model.v1.Criteria](#banyandb-model-v1-Criteria) |  | criteria select partial data points from measure |
 | counters_number | [int32](#int32) |  | counters_number sets the number of counters to be tracked. The default value is 1000 |
+| lru_size | [int32](#int32) |  | lru_size defines how much entry is allowed to be maintained in the memory |
 | updated_at | [google.protobuf.Timestamp](#google-protobuf-Timestamp) |  | updated_at indicates when the measure is updated |
 
 
@@ -2320,9 +2321,9 @@ QueryRequest is the request contract for query.
 | field_projection | [QueryRequest.FieldProjection](#banyandb-measure-v1-QueryRequest-FieldProjection) |  | field_projection can be used to select fields of the data points in the response |
 | group_by | [QueryRequest.GroupBy](#banyandb-measure-v1-QueryRequest-GroupBy) |  | group_by groups data points based on their field value for a specific tag and use field_name as the projection name |
 | agg | [QueryRequest.Aggregation](#banyandb-measure-v1-QueryRequest-Aggregation) |  | agg aggregates data points based on a field |
-| top | [QueryRequest.Top](#banyandb-measure-v1-QueryRequest-Top) |  | top limits the result based on a particular field. If order_by is specificed, top sorts the dataset based on order_by&#39;s output |
-| offset | [uint32](#uint32) |  | offset is used to support pagination, together with the following limit. If top is sepcificed, offset processes the dataset based on top&#39;s output |
-| limit | [uint32](#uint32) |  | limit is used to impose a boundary on the number of records being returned. If top is sepcificed, limit processes the dataset based on top&#39;s output |
+| top | [QueryRequest.Top](#banyandb-measure-v1-QueryRequest-Top) |  | top limits the result based on a particular field. If order_by is specified, top sorts the dataset based on order_by&#39;s output |
+| offset | [uint32](#uint32) |  | offset is used to support pagination, together with the following limit. If top is specified, offset processes the dataset based on top&#39;s output |
+| limit | [uint32](#uint32) |  | limit is used to impose a boundary on the number of records being returned. If top is specified, limit processes the dataset based on top&#39;s output |
 | order_by | [banyandb.model.v1.QueryOrder](#banyandb-model-v1-QueryOrder) |  | order_by is given to specify the sort for a tag. |
 
 
@@ -2545,7 +2546,7 @@ TopNRequest is the request contract for query.
 | time_range | [banyandb.model.v1.TimeRange](#banyandb-model-v1-TimeRange) |  | time_range is a range query with begin/end time of entities in the timeunit of milliseconds. |
 | top_n | [int32](#int32) |  | top_n set the how many items should be returned in each list. |
 | agg | [banyandb.model.v1.AggregationFunction](#banyandb-model-v1-AggregationFunction) |  | agg aggregates lists grouped by field names in the time_range TODO validate enum defined_only |
-| conditions | [banyandb.model.v1.Condition](#banyandb-model-v1-Condition) | repeated | criteria select counters. |
+| conditions | [banyandb.model.v1.Condition](#banyandb-model-v1-Condition) | repeated | criteria select counters. Only equals are acceptable. |
 | field_value_sort | [banyandb.model.v1.Sort](#banyandb-model-v1-Sort) |  | field_value_sort indicates how to sort fields |
 
 
diff --git a/go.mod b/go.mod
index 173cd7c..0f1e30c 100644
--- a/go.mod
+++ b/go.mod
@@ -9,6 +9,7 @@ require (
 	github.com/cespare/xxhash v1.1.0
 	github.com/dgraph-io/badger/v3 v3.2011.1
 	github.com/dgraph-io/ristretto v0.1.0
+	github.com/emirpasic/gods v1.18.1
 	github.com/envoyproxy/protoc-gen-validate v0.1.0
 	github.com/go-chi/chi/v5 v5.0.7
 	github.com/go-resty/resty/v2 v2.7.0
diff --git a/go.sum b/go.sum
index df29d43..d60ef94 100644
--- a/go.sum
+++ b/go.sum
@@ -153,6 +153,8 @@ github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc/go.mod h1:c9O8+fp
 github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
 github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
+github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
 github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
 github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
 github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
diff --git a/pkg/flow/dedup_priority_queue.go b/pkg/flow/dedup_priority_queue.go
new file mode 100644
index 0000000..fbd1b2b
--- /dev/null
+++ b/pkg/flow/dedup_priority_queue.go
@@ -0,0 +1,158 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package flow
+
+import (
+	"container/heap"
+
+	"github.com/emirpasic/gods/utils"
+	"github.com/pkg/errors"
+)
+
+var _ heap.Interface = (*DedupPriorityQueue)(nil)
+
+// Element represents an item in the DedupPriorityQueue.
+type Element interface {
+	GetIndex() int
+	SetIndex(int)
+}
+
+// DedupPriorityQueue implements heap.Interface.
+// DedupPriorityQueue is not thread-safe
+type DedupPriorityQueue struct {
+	comparator      utils.Comparator
+	Items           []Element
+	cache           map[Element]struct{}
+	allowDuplicates bool
+}
+
+func NewPriorityQueue(comparator utils.Comparator, allowDuplicates bool) *DedupPriorityQueue {
+	return &DedupPriorityQueue{
+		comparator:      comparator,
+		Items:           make([]Element, 0),
+		cache:           make(map[Element]struct{}),
+		allowDuplicates: allowDuplicates,
+	}
+}
+
+func (pq *DedupPriorityQueue) initCache() error {
+	if pq.allowDuplicates || len(pq.Items) == 0 {
+		return nil
+	}
+	for _, elem := range pq.Items {
+		if _, ok := pq.cache[elem]; !ok {
+			pq.cache[elem] = struct{}{}
+		} else {
+			return errors.New("duplicated item is not allowed")
+		}
+	}
+	return nil
+}
+
+// Len returns the DedupPriorityQueue length.
+func (pq *DedupPriorityQueue) Len() int { return len(pq.Items) }
+
+// Less is the items less comparator.
+func (pq *DedupPriorityQueue) Less(i, j int) bool {
+	return pq.comparator(pq.Items[i], pq.Items[j]) < 0
+}
+
+// Swap exchanges indexes of the items.
+func (pq *DedupPriorityQueue) Swap(i, j int) {
+	pq.Items[i], pq.Items[j] = pq.Items[j], pq.Items[i]
+	pq.Items[i].SetIndex(i)
+	pq.Items[j].SetIndex(j)
+}
+
+// Push implements heap.Interface.Push.
+// Appends an item to the DedupPriorityQueue.
+func (pq *DedupPriorityQueue) Push(x interface{}) {
+	item := x.(Element)
+	// if duplicates is not allowed
+	if !pq.allowDuplicates {
+		// use mutex to protect cache and items
+		// check existence
+		if _, ok := pq.cache[item]; ok {
+			return
+		}
+		pq.cache[item] = struct{}{}
+	}
+	n := len(pq.Items)
+	item.SetIndex(n)
+	pq.Items = append(pq.Items, item)
+}
+
+// Pop implements heap.Interface.Pop.
+// Removes and returns the Len() - 1 element.
+func (pq *DedupPriorityQueue) Pop() interface{} {
+	n := len(pq.Items)
+	item := pq.Items[n-1]
+	item.SetIndex(-1) // for safety
+	delete(pq.cache, item)
+	pq.Items = pq.Items[0 : n-1]
+	return item
+}
+
+// Peek returns the first item of the DedupPriorityQueue without removing it.
+func (pq *DedupPriorityQueue) Peek() Element {
+	if len(pq.Items) > 0 {
+		return (pq.Items)[0]
+	}
+	return nil
+}
+
+// Slice returns a sliced DedupPriorityQueue using the given bounds.
+func (pq *DedupPriorityQueue) Slice(start, end int) []Element {
+	return pq.Items[start:end]
+}
+
+func (pq *DedupPriorityQueue) Values() []Element {
+	values := make([]Element, pq.Len())
+	for _, elem := range pq.Items {
+		values[elem.GetIndex()] = elem
+	}
+	return values
+}
+
+func (pq *DedupPriorityQueue) Left() Element {
+	if pq.Len() == 0 {
+		return nil
+	}
+	return pq.Items[0]
+}
+
+func (pq *DedupPriorityQueue) Right() Element {
+	if pq.Len() == 0 {
+		return nil
+	}
+	return pq.Items[pq.Len()-1]
+}
+
+func (pq *DedupPriorityQueue) WithNewItems(items []Element) (*DedupPriorityQueue, error) {
+	newPq := &DedupPriorityQueue{
+		Items:           items,
+		cache:           make(map[Element]struct{}),
+		allowDuplicates: pq.allowDuplicates,
+		comparator:      pq.comparator,
+	}
+	err := newPq.initCache()
+	if err != nil {
+		return nil, err
+	}
+	return newPq, nil
+}
diff --git a/api/data/measure.go b/pkg/flow/op.go
similarity index 51%
copy from api/data/measure.go
copy to pkg/flow/op.go
index a582924..ebd9ca1 100644
--- a/api/data/measure.go
+++ b/pkg/flow/op.go
@@ -15,22 +15,32 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package data
+package flow
 
 import (
-	"github.com/apache/skywalking-banyandb/api/common"
-	"github.com/apache/skywalking-banyandb/pkg/bus"
+	"context"
 )
 
-var MeasureWriteKindVersion = common.KindVersion{
-	Version: "v1",
-	Kind:    "measure-write",
+// UnaryOperation represents user-defined unary function (i.e. Map, Filter, etc)
+type UnaryOperation[R any] interface {
+	Apply(ctx context.Context, data interface{}) R
 }
 
-var TopicMeasureWrite = bus.UniTopic(MeasureWriteKindVersion.String())
+// UnaryFunc implements UnaryOperation as type func (context.Context, interface{})
+type UnaryFunc[R any] func(context.Context, interface{}) R
 
-var MeasureQueryKindVersion = common.KindVersion{
-	Version: "v1",
-	Kind:    "measure-query",
+// Apply implements UnOperation.Apply method
+func (f UnaryFunc[R]) Apply(ctx context.Context, data interface{}) R {
+	return f(ctx, data)
+}
+
+// FilterFunc transform a function to an UnaryOperation
+func FilterFunc(filter UnaryOperation[bool]) (UnaryOperation[any], error) {
+	return UnaryFunc[any](func(ctx context.Context, payload interface{}) interface{} {
+		predicate := filter.Apply(ctx, payload)
+		if !predicate {
+			return nil
+		}
+		return payload
+	}), nil
 }
-var TopicMeasureQuery = bus.BiTopic(MeasureQueryKindVersion.String())
diff --git a/pkg/flow/streaming/flow.go b/pkg/flow/streaming/flow.go
new file mode 100644
index 0000000..c6c7a82
--- /dev/null
+++ b/pkg/flow/streaming/flow.go
@@ -0,0 +1,120 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package streaming
+
+import (
+	"context"
+	"time"
+
+	"go.uber.org/multierr"
+
+	"github.com/apache/skywalking-banyandb/pkg/flow"
+)
+
+var _ flow.Flow = (*streamingFlow)(nil)
+
+type streamingFlow struct {
+	ctx    context.Context
+	source flow.Source
+	sink   flow.Sink
+	ops    []flow.Operator
+	drain  chan error
+}
+
+func New(source flow.Source) flow.Flow {
+	return &streamingFlow{
+		source: source,
+		ops:    make([]flow.Operator, 0),
+		drain:  make(chan error),
+	}
+}
+
+func (f *streamingFlow) init() error {
+	f.prepareContext()
+
+	return nil
+}
+
+func (f *streamingFlow) prepareContext() {
+	if f.ctx == nil {
+		f.ctx = context.TODO()
+	}
+
+	// TODO: add more runtime utilities
+}
+
+func (f *streamingFlow) To(sink flow.Sink) flow.Flow {
+	f.sink = sink
+	return f
+}
+
+func (f *streamingFlow) Close() error {
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+	err := f.source.Teardown(ctx)
+	for _, op := range f.ops {
+		err = multierr.Append(err, op.Teardown(ctx))
+	}
+	defer close(f.drain)
+	return multierr.Append(err, f.sink.Teardown(ctx))
+}
+
+func (f *streamingFlow) Open() <-chan error {
+	if err := f.init(); err != nil {
+		go f.drainErr(err)
+		return f.drain
+	}
+
+	// setup sources
+	if err := f.source.Setup(f.ctx); err != nil {
+		go f.drainErr(err)
+		return f.drain
+	}
+
+	// setup all operators one by one
+	for _, op := range f.ops {
+		if err := op.Setup(f.ctx); err != nil {
+			go f.drainErr(err)
+			return f.drain
+		}
+	}
+
+	// setup sink
+	if err := f.sink.Setup(f.ctx); err != nil {
+		go f.drainErr(err)
+		return f.drain
+	}
+
+	// connect all operator and sink
+	for i := len(f.ops) - 1; i >= 0; i-- {
+		last := i == len(f.ops)-1
+		if last {
+			f.ops[i].Exec(f.sink)
+		} else {
+			f.ops[i].Exec(f.ops[i+1])
+		}
+	}
+	// finally connect sources and the first operator
+	f.source.Exec(f.ops[0])
+
+	return f.drain
+}
+
+func (f *streamingFlow) drainErr(err error) {
+	f.drain <- err
+}
diff --git a/pkg/flow/streaming/flow_sliding_window.go b/pkg/flow/streaming/flow_sliding_window.go
new file mode 100644
index 0000000..fb9f9c8
--- /dev/null
+++ b/pkg/flow/streaming/flow_sliding_window.go
@@ -0,0 +1,328 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package streaming
+
+import (
+	"container/heap"
+	"context"
+	"math"
+	"sync"
+	"time"
+
+	lru "github.com/hashicorp/golang-lru"
+	"github.com/pkg/errors"
+
+	"github.com/apache/skywalking-banyandb/pkg/flow"
+)
+
+type TriggerResult bool
+
+const (
+	FIRE     TriggerResult = true
+	CONTINUE               = false
+)
+
+var (
+	_ flow.Operator       = (*TumblingTimeWindows)(nil)
+	_ flow.WindowAssigner = (*TumblingTimeWindows)(nil)
+	_ flow.Window         = (*timeWindow)(nil)
+
+	DefaultCacheSize = 2
+)
+
+func (f *streamingFlow) Window(w flow.WindowAssigner) flow.WindowedFlow {
+	switch v := w.(type) {
+	case *TumblingTimeWindows:
+		v.errorHandler = f.drainErr
+		f.ops = append(f.ops, v)
+	default:
+		f.drainErr(errors.New("window type is not supported"))
+	}
+
+	return &windowedFlow{
+		f:  f,
+		wa: w,
+	}
+}
+
+func (s *windowedFlow) AllowedMaxWindows(windowCnt int) flow.WindowedFlow {
+	switch v := s.wa.(type) {
+	case *TumblingTimeWindows:
+		v.windowCount = windowCnt
+	default:
+		s.f.drainErr(errors.New("windowCnt is not supported"))
+	}
+	return s
+}
+
+type TumblingTimeWindows struct {
+	// internal state of the sliding time window
+	flow.ComponentState
+	// errorHandler is the error handler and set by the streamingFlow
+	errorHandler func(error)
+	// For TumblingTimeWindows
+	// Unit: Milliseconds
+	windowSize int64
+	// windowCount is the maximum allowed windows kept in the memory
+	windowCount int
+
+	// thread-safe snapshots
+	snapshots *lru.Cache
+
+	currentWatermark int64
+	// guard timerHeap
+	timerMu   sync.Mutex
+	timerHeap *flow.DedupPriorityQueue
+
+	// aggregationFactory is the factory for creating aggregation operator
+	aggregationFactory flow.AggregationOpFactory
+
+	// For api.Operator
+	in  chan flow.StreamRecord
+	out chan flow.StreamRecord
+}
+
+func (s *TumblingTimeWindows) In() chan<- flow.StreamRecord {
+	return s.in
+}
+
+func (s *TumblingTimeWindows) Out() <-chan flow.StreamRecord {
+	return s.out
+}
+
+func (s *TumblingTimeWindows) Setup(ctx context.Context) (err error) {
+	if s.snapshots == nil {
+		if s.windowCount <= 0 {
+			s.windowCount = DefaultCacheSize
+		}
+		s.snapshots, err = lru.NewWithEvict(s.windowCount, func(key interface{}, value interface{}) {
+			s.flushSnapshot(key.(timeWindow), value.(flow.AggregationOp))
+		})
+		if err != nil {
+			return err
+		}
+	}
+	// start processing
+	s.Add(1)
+	go s.receive()
+
+	return
+}
+
+func (s *TumblingTimeWindows) flushSnapshot(w timeWindow, snapshot flow.AggregationOp) {
+	if snapshot.Dirty() {
+		s.out <- flow.NewStreamRecord(snapshot.Snapshot(), w.start)
+	}
+}
+
+func (s *TumblingTimeWindows) flushWindow(w timeWindow) {
+	if snapshot, ok := s.snapshots.Get(w); ok {
+		s.flushSnapshot(w, snapshot.(flow.AggregationOp))
+	}
+}
+
+func (s *TumblingTimeWindows) flushDueWindows() {
+	s.timerMu.Lock()
+	defer s.timerMu.Unlock()
+	for {
+		if lookAhead, ok := s.timerHeap.Peek().(*internalTimer); ok {
+			if lookAhead.triggerTimeMillis <= s.currentWatermark {
+				oldestTimer := heap.Pop(s.timerHeap).(*internalTimer)
+				s.flushWindow(oldestTimer.w)
+				continue
+			}
+		}
+		return
+	}
+}
+
+func (s *TumblingTimeWindows) flushDirtyWindows() {
+	for _, key := range s.snapshots.Keys() {
+		s.flushWindow(key.(timeWindow))
+	}
+}
+
+func (s *TumblingTimeWindows) receive() {
+	defer s.Done()
+
+	for elem := range s.in {
+		assignedWindows, err := s.AssignWindows(elem.TimestampMillis())
+		if err != nil {
+			s.errorHandler(err)
+			continue
+		}
+		ctx := triggerContext{
+			delegation: s,
+		}
+		for _, w := range assignedWindows {
+			// drop if the window is late
+			if s.isWindowLate(w) {
+				continue
+			}
+			tw := w.(timeWindow)
+			ctx.window = tw
+			// add elem to the bucket
+			if oldAggr, ok := s.snapshots.Get(tw); ok {
+				oldAggr.(flow.AggregationOp).Add([]flow.StreamRecord{elem})
+			} else {
+				newAggr := s.aggregationFactory()
+				newAggr.Add([]flow.StreamRecord{elem})
+				s.snapshots.Add(tw, newAggr)
+			}
+
+			result := ctx.OnElement(elem)
+			if result == FIRE {
+				s.flushWindow(tw)
+			}
+		}
+
+		// even if the incoming elements do not follow strict order,
+		// the watermark could increase monotonically.
+		if pastDur := elem.TimestampMillis() - s.currentWatermark; pastDur > 0 {
+			previousWaterMark := s.currentWatermark
+			s.currentWatermark = elem.TimestampMillis()
+
+			// Currently, assume the current watermark is t,
+			// then we allow lateness items by not purging the window
+			// of which the flush trigger time is less and equal than t,
+			// i.e. triggerTime <= t
+			s.flushDueWindows()
+
+			// flush dirty windows if the necessary
+			// use 40% of the data point interval as the flush interval,
+			// which means roughly the record located in the same time bucket will be persistent twice.
+			// |---------------------------------|
+			// |    40%     |    40%     |  20%  |
+			// |          flush        flush     |
+			// |---------------------------------|
+			// TODO: how to determine the threshold
+			if previousWaterMark > 0 && float64(pastDur) > float64(s.windowSize)*0.4 {
+				s.flushDirtyWindows()
+			}
+		}
+	}
+	close(s.out)
+}
+
+// isWindowLate checks whether this window is valid. The window is late if and only if
+// it meets all the following conditions,
+// 1) the max timestamp is before the current watermark
+// 2) the LRU cache is full
+// 3) the LRU cache does not contain the window entry
+func (s *TumblingTimeWindows) isWindowLate(w flow.Window) bool {
+	return w.MaxTimestamp() <= s.currentWatermark && s.snapshots.Len() >= s.windowCount && !s.snapshots.Contains(w)
+}
+
+func (s *TumblingTimeWindows) Teardown(ctx context.Context) error {
+	s.Wait()
+	return nil
+}
+
+func (s *TumblingTimeWindows) Exec(downstream flow.Inlet) {
+	s.Add(1)
+	go flow.Transmit(&s.ComponentState, downstream, s)
+}
+
+func NewTumblingTimeWindows(size time.Duration) *TumblingTimeWindows {
+	return &TumblingTimeWindows{
+		windowSize: size.Milliseconds(),
+		timerHeap: flow.NewPriorityQueue(func(a, b interface{}) int {
+			return int(a.(*internalTimer).triggerTimeMillis - b.(*internalTimer).triggerTimeMillis)
+		}, false),
+		in:               make(chan flow.StreamRecord),
+		out:              make(chan flow.StreamRecord),
+		currentWatermark: 0,
+	}
+}
+
+type timeWindow struct {
+	start int64
+	end   int64
+}
+
+func (t timeWindow) MaxTimestamp() int64 {
+	return t.end - 1
+}
+
+// AssignWindows assigns windows according to the given timestamp
+func (s *TumblingTimeWindows) AssignWindows(timestamp int64) ([]flow.Window, error) {
+	if timestamp > math.MinInt64 {
+		start := getWindowStart(timestamp, s.windowSize)
+		return []flow.Window{
+			timeWindow{
+				start: start,
+				end:   start + s.windowSize,
+			},
+		}, nil
+	}
+	return nil, errors.New("invalid timestamp from the element")
+}
+
+// getWindowStart calculates the window start for a timestamp.
+func getWindowStart(timestamp, windowSize int64) int64 {
+	remainder := timestamp % windowSize
+	return timestamp - remainder
+}
+
+// eventTimeTriggerOnElement processes element(s) with EventTimeTrigger
+func eventTimeTriggerOnElement(window timeWindow, ctx *triggerContext) TriggerResult {
+	if window.MaxTimestamp() <= ctx.GetCurrentWatermark() {
+		// if watermark is already past the window fire immediately
+		return FIRE
+	}
+	ctx.RegisterEventTimeTimer(window.MaxTimestamp())
+	return CONTINUE
+}
+
+type triggerContext struct {
+	window     timeWindow
+	delegation *TumblingTimeWindows
+}
+
+func (ctx *triggerContext) GetCurrentWatermark() int64 {
+	return ctx.delegation.currentWatermark
+}
+
+func (ctx *triggerContext) RegisterEventTimeTimer(triggerTime int64) {
+	ctx.delegation.timerMu.Lock()
+	defer ctx.delegation.timerMu.Unlock()
+	heap.Push(ctx.delegation.timerHeap, &internalTimer{
+		triggerTimeMillis: triggerTime,
+		w:                 ctx.window,
+	})
+}
+
+func (ctx *triggerContext) OnElement(record flow.StreamRecord) TriggerResult {
+	return eventTimeTriggerOnElement(ctx.window, ctx)
+}
+
+var _ flow.Element = (*internalTimer)(nil)
+
+type internalTimer struct {
+	w                 timeWindow
+	triggerTimeMillis int64
+	index             int
+}
+
+func (t *internalTimer) GetIndex() int {
+	return t.index
+}
+
+func (t *internalTimer) SetIndex(idx int) {
+	t.index = idx
+}
diff --git a/pkg/flow/streaming/flow_sliding_window_test.go b/pkg/flow/streaming/flow_sliding_window_test.go
new file mode 100644
index 0000000..a69b9f3
--- /dev/null
+++ b/pkg/flow/streaming/flow_sliding_window_test.go
@@ -0,0 +1,137 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package streaming
+
+import (
+	"context"
+	"time"
+
+	. "github.com/onsi/ginkgo/v2"
+	. "github.com/onsi/gomega"
+
+	"github.com/apache/skywalking-banyandb/pkg/flow"
+	"github.com/apache/skywalking-banyandb/pkg/flow/streaming/sink"
+)
+
+var _ flow.AggregationOp = (*intSumAggregator)(nil)
+
+type intSumAggregator struct {
+	sum   int
+	dirty bool
+}
+
+func (i *intSumAggregator) Add(input []flow.StreamRecord) {
+	for _, item := range input {
+		i.sum += item.Data().(int)
+	}
+	if len(input) > 0 {
+		i.dirty = true
+	}
+}
+
+func (i *intSumAggregator) Snapshot() interface{} {
+	i.dirty = false
+	return i.sum
+}
+
+func (i *intSumAggregator) Dirty() bool {
+	return i.dirty
+}
+
+var _ = Describe("Sliding Window", func() {
+	var (
+		baseTs         time.Time
+		snk            *sink.Slice
+		input          []flow.StreamRecord
+		slidingWindows *TumblingTimeWindows
+
+		aggrFactory = func() flow.AggregationOp {
+			return &intSumAggregator{}
+		}
+	)
+
+	BeforeEach(func() {
+		baseTs = time.Now()
+	})
+
+	JustBeforeEach(func() {
+		snk = sink.NewSlice()
+
+		slidingWindows = NewTumblingTimeWindows(time.Second * 15)
+		slidingWindows.aggregationFactory = aggrFactory
+		slidingWindows.windowCount = 2
+
+		Expect(slidingWindows.Setup(context.TODO())).Should(Succeed())
+		Expect(snk.Setup(context.TODO())).Should(Succeed())
+		slidingWindows.Exec(snk)
+		for _, r := range input {
+			slidingWindows.In() <- r
+		}
+	})
+
+	AfterEach(func() {
+		close(slidingWindows.in)
+		Expect(slidingWindows.Teardown(context.TODO())).Should(Succeed())
+	})
+
+	When("input a single element", func() {
+		BeforeEach(func() {
+			input = []flow.StreamRecord{
+				flow.NewStreamRecord(1, baseTs.UnixMilli()),
+			}
+		})
+
+		It("Should not trigger", func() {
+			Eventually(func(g Gomega) {
+				g.Expect(snk.Value()).Should(BeEmpty())
+			}).WithTimeout(10 * time.Second).Should(Succeed())
+		})
+	})
+
+	When("input two elements within the same bucket", func() {
+		BeforeEach(func() {
+			baseTs = time.Unix(baseTs.Unix()-baseTs.Unix()%15, 0)
+			input = []flow.StreamRecord{
+				flow.NewStreamRecord(1, baseTs.UnixMilli()),
+				flow.NewStreamRecord(2, baseTs.Add(time.Second*5).UnixMilli()),
+			}
+		})
+
+		It("Should not trigger", func() {
+			Eventually(func(g Gomega) {
+				g.Expect(snk.Value()).Should(BeEmpty())
+			}).WithTimeout(10 * time.Second).Should(Succeed())
+		})
+	})
+
+	When("input two elements within adjacent buckets", func() {
+		BeforeEach(func() {
+			baseTs = time.Unix(baseTs.Unix()-baseTs.Unix()%15+14, 0)
+			input = []flow.StreamRecord{
+				flow.NewStreamRecord(1, baseTs.UnixMilli()),
+				flow.NewStreamRecord(2, baseTs.Add(time.Second*5).UnixMilli()),
+			}
+		})
+
+		It("Should trigger once due to the expiry", func() {
+			Eventually(func(g Gomega) {
+				g.Expect(snk.Value()).Should(HaveLen(1))
+			}).WithTimeout(10 * time.Second).Should(Succeed())
+		})
+	})
+})
diff --git a/pkg/flow/streaming/flow_test.go b/pkg/flow/streaming/flow_test.go
new file mode 100644
index 0000000..db03594
--- /dev/null
+++ b/pkg/flow/streaming/flow_test.go
@@ -0,0 +1,240 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package streaming
+
+import (
+	"context"
+	"time"
+
+	. "github.com/onsi/ginkgo/v2"
+	. "github.com/onsi/gomega"
+
+	"github.com/apache/skywalking-banyandb/pkg/flow"
+	"github.com/apache/skywalking-banyandb/pkg/flow/streaming/sink"
+	flowTest "github.com/apache/skywalking-banyandb/pkg/test/flow"
+)
+
+// numberRange generates a slice with `count` number of integers starting from `begin`,
+// i.e. [begin, begin + count)
+func numberRange(begin, count int) []int {
+	result := make([]int, 0)
+	for i := 0; i < count; i++ {
+		result = append(result, begin+i)
+	}
+	return result
+}
+
+var _ = Describe("Streaming", func() {
+	var (
+		f     flow.Flow
+		snk   *sink.Slice
+		errCh <-chan error
+	)
+
+	AfterEach(func() {
+		Expect(f.Close()).Should(Succeed())
+		Consistently(errCh).ShouldNot(Receive())
+	})
+
+	Context("With Filter operator", func() {
+		var (
+			filter flow.UnaryFunc[bool]
+
+			input = flowTest.NewSlice(numberRange(0, 10))
+		)
+
+		JustBeforeEach(func() {
+			snk = sink.NewSlice()
+			f = New(input).
+				Filter(filter).
+				To(snk)
+			errCh = f.Open()
+			Expect(errCh).ShouldNot(BeNil())
+		})
+
+		When("Given a odd filter", func() {
+			BeforeEach(func() {
+				filter = func(ctx context.Context, i interface{}) bool {
+					return i.(int)%2 == 0
+				}
+			})
+
+			It("Should filter odd number", func() {
+				Eventually(func(g Gomega) {
+					g.Expect(snk.Value()).Should(Equal([]interface{}{
+						flow.NewStreamRecordWithoutTS(0),
+						flow.NewStreamRecordWithoutTS(2),
+						flow.NewStreamRecordWithoutTS(4),
+						flow.NewStreamRecordWithoutTS(6),
+						flow.NewStreamRecordWithoutTS(8),
+					}))
+				}).Should(Succeed())
+			})
+		})
+	})
+
+	Context("With Mapper operator", func() {
+		var (
+			mapper flow.UnaryFunc[any]
+
+			input = flowTest.NewSlice(numberRange(0, 10))
+		)
+
+		JustBeforeEach(func() {
+			snk = sink.NewSlice()
+			f = New(input).
+				Map(mapper).
+				To(snk)
+			errCh = f.Open()
+			Expect(errCh).ShouldNot(BeNil())
+		})
+
+		When("given a multiplier", func() {
+			BeforeEach(func() {
+				mapper = func(ctx context.Context, i interface{}) interface{} {
+					return i.(int) * 2
+				}
+			})
+
+			It("Should multiply by 2", func() {
+				Eventually(func(g Gomega) {
+					g.Expect(snk.Value()).Should(Equal([]interface{}{
+						flow.NewStreamRecordWithoutTS(0),
+						flow.NewStreamRecordWithoutTS(2),
+						flow.NewStreamRecordWithoutTS(4),
+						flow.NewStreamRecordWithoutTS(6),
+						flow.NewStreamRecordWithoutTS(8),
+						flow.NewStreamRecordWithoutTS(10),
+						flow.NewStreamRecordWithoutTS(12),
+						flow.NewStreamRecordWithoutTS(14),
+						flow.NewStreamRecordWithoutTS(16),
+						flow.NewStreamRecordWithoutTS(18),
+					}))
+				}).Should(Succeed())
+			})
+		})
+	})
+
+	Context("With TopN operator order by ASC", func() {
+		type record struct {
+			service  string
+			instance string
+			value    int
+		}
+
+		var input []flow.StreamRecord
+
+		JustBeforeEach(func() {
+			snk = sink.NewSlice()
+
+			f = New(flowTest.NewSlice(input)).
+				Map(flow.UnaryFunc[any](func(ctx context.Context, item interface{}) interface{} {
+					// groupBy
+					return flow.Data{item.(*record).service, int64(item.(*record).value)}
+				})).
+				Window(NewTumblingTimeWindows(15*time.Second)).
+				TopN(3, WithSortKeyExtractor(func(record flow.StreamRecord) int64 {
+					return record.Data().(flow.Data)[1].(int64)
+				}), OrderBy(ASC)).
+				To(snk)
+
+			errCh = f.Open()
+			Expect(errCh).ShouldNot(BeNil())
+		})
+
+		When("Top3", func() {
+			BeforeEach(func() {
+				input = []flow.StreamRecord{
+					flow.NewStreamRecord(&record{"e2e-service-provider", "instance-001", 10000}, 1000),
+					flow.NewStreamRecord(&record{"e2e-service-consumer", "instance-001", 9900}, 2000),
+					flow.NewStreamRecord(&record{"e2e-service-provider", "instance-002", 9800}, 3000),
+					flow.NewStreamRecord(&record{"e2e-service-consumer", "instance-002", 9700}, 4000),
+					flow.NewStreamRecord(&record{"e2e-service-provider", "instance-003", 9700}, 5000),
+					flow.NewStreamRecord(&record{"e2e-service-consumer", "instance-004", 9600}, 6000),
+					flow.NewStreamRecord(&record{"e2e-service-consumer", "instance-001", 9500}, 7000),
+					flow.NewStreamRecord(&record{"e2e-service-provider", "instance-002", 9800}, 61000),
+				}
+			})
+
+			It("Should take top 3 elements", func() {
+				Eventually(func(g Gomega) {
+					g.Expect(len(snk.Value())).Should(BeNumerically(">=", 1))
+					g.Expect(snk.Value()[0].(flow.StreamRecord).Data()).Should(BeEquivalentTo([]*Tuple2{
+						{int64(9500), flow.NewStreamRecord(flow.Data{"e2e-service-consumer", int64(9500)}, 7000)},
+						{int64(9600), flow.NewStreamRecord(flow.Data{"e2e-service-consumer", int64(9600)}, 6000)},
+						{int64(9700), flow.NewStreamRecord(flow.Data{"e2e-service-consumer", int64(9700)}, 4000)},
+					}))
+				}).WithTimeout(10 * time.Second).Should(Succeed())
+			})
+		})
+	})
+
+	Context("With TopN operator order by DESC", func() {
+		type record struct {
+			service  string
+			instance string
+			value    int
+		}
+
+		var input []flow.StreamRecord
+
+		JustBeforeEach(func() {
+			snk = sink.NewSlice()
+
+			f = New(flowTest.NewSlice(input)).
+				Map(flow.UnaryFunc[any](func(ctx context.Context, item interface{}) interface{} {
+					// groupBy
+					return flow.Data{item.(*record).service, int64(item.(*record).value)}
+				})).
+				Window(NewTumblingTimeWindows(15*time.Second)).
+				TopN(3, WithSortKeyExtractor(func(record flow.StreamRecord) int64 {
+					return record.Data().(flow.Data)[1].(int64)
+				})).
+				To(snk)
+
+			errCh = f.Open()
+			Expect(errCh).ShouldNot(BeNil())
+		})
+
+		When("Top3", func() {
+			BeforeEach(func() {
+				input = []flow.StreamRecord{
+					flow.NewStreamRecord(&record{"e2e-service-provider", "instance-001", 10000}, 1000),
+					flow.NewStreamRecord(&record{"e2e-service-consumer", "instance-001", 9900}, 2000),
+					flow.NewStreamRecord(&record{"e2e-service-provider", "instance-002", 9800}, 3000),
+					flow.NewStreamRecord(&record{"e2e-service-consumer", "instance-002", 9700}, 4000),
+					flow.NewStreamRecord(&record{"e2e-service-provider", "instance-003", 9700}, 5000),
+					flow.NewStreamRecord(&record{"e2e-service-consumer", "instance-004", 9600}, 6000),
+					flow.NewStreamRecord(&record{"e2e-service-consumer", "instance-001", 9500}, 7000),
+					flow.NewStreamRecord(&record{"e2e-service-provider", "instance-002", 9800}, 61000),
+				}
+			})
+
+			It("Should take top 3 elements", func() {
+				Eventually(func(g Gomega) {
+					g.Expect(len(snk.Value())).Should(BeNumerically(">=", 1))
+					g.Expect(snk.Value()[0].(flow.StreamRecord).Data()).Should(BeEquivalentTo([]*Tuple2{
+						{int64(10000), flow.NewStreamRecord(flow.Data{"e2e-service-provider", int64(10000)}, 1000)},
+						{int64(9900), flow.NewStreamRecord(flow.Data{"e2e-service-consumer", int64(9900)}, 2000)},
+						{int64(9800), flow.NewStreamRecord(flow.Data{"e2e-service-provider", int64(9800)}, 3000)},
+					}))
+				}).WithTimeout(10 * time.Second).Should(Succeed())
+			})
+		})
+	})
+})
diff --git a/pkg/flow/streaming/flow_topn.go b/pkg/flow/streaming/flow_topn.go
new file mode 100644
index 0000000..2a52e21
--- /dev/null
+++ b/pkg/flow/streaming/flow_topn.go
@@ -0,0 +1,177 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package streaming
+
+import (
+	"github.com/emirpasic/gods/maps/treemap"
+	"github.com/emirpasic/gods/utils"
+	"github.com/google/go-cmp/cmp"
+	"github.com/pkg/errors"
+
+	"github.com/apache/skywalking-banyandb/pkg/flow"
+)
+
+type TopNSort uint8
+
+const (
+	DESC TopNSort = iota
+	ASC
+)
+
+type windowedFlow struct {
+	f  *streamingFlow
+	wa flow.WindowAssigner
+}
+
+func (s *windowedFlow) TopN(topNum int, opts ...any) flow.Flow {
+	s.wa.(*TumblingTimeWindows).aggregationFactory = func() flow.AggregationOp {
+		topNAggrFunc := &topNAggregator{
+			cacheSize: topNum,
+			sort:      DESC,
+			dirty:     false,
+		}
+		// apply user customized options
+		for _, opt := range opts {
+			if applier, ok := opt.(TopNOption); ok {
+				applier(topNAggrFunc)
+			}
+		}
+		if topNAggrFunc.sortKeyExtractor == nil {
+			s.f.drainErr(errors.New("sortKeyExtractor must be specified"))
+		}
+		if topNAggrFunc.sort == ASC {
+			topNAggrFunc.comparator = utils.Int64Comparator
+		} else { // DESC
+			topNAggrFunc.comparator = func(a, b interface{}) int {
+				return utils.Int64Comparator(b, a)
+			}
+		}
+		topNAggrFunc.treeMap = treemap.NewWith(topNAggrFunc.comparator)
+		return topNAggrFunc
+	}
+	return s.f
+}
+
+type topNAggregator struct {
+	// cacheSize is the maximum number of entries which can be held in the buffer, i.e. treeMap
+	cacheSize int
+	// currentTopNum indicates how many records are tracked.
+	// This should not exceed cacheSize
+	currentTopNum int
+	treeMap       *treemap.Map
+	// sortKeyExtractor is an extractor to fetch sort key from the record
+	// TODO: currently we only support sorting numeric field, i.e. int64
+	sortKeyExtractor func(flow.StreamRecord) int64
+	// sort indicates the order of results
+	sort       TopNSort
+	comparator utils.Comparator
+	dirty      bool
+}
+
+type TopNOption func(aggregator *topNAggregator)
+
+func WithSortKeyExtractor(sortKeyExtractor func(flow.StreamRecord) int64) TopNOption {
+	return func(aggregator *topNAggregator) {
+		aggregator.sortKeyExtractor = sortKeyExtractor
+	}
+}
+
+func OrderBy(sort TopNSort) TopNOption {
+	return func(aggregator *topNAggregator) {
+		aggregator.sort = sort
+	}
+}
+
+func (t *topNAggregator) Add(input []flow.StreamRecord) {
+	for _, item := range input {
+		sortKey := t.sortKeyExtractor(item)
+		// check
+		if t.checkSortKeyInBufferRange(sortKey) {
+			t.put(sortKey, item)
+			t.doCleanUp()
+		}
+	}
+}
+
+func (t *topNAggregator) doCleanUp() {
+	// do cleanup: maintain the treeMap windowSize
+	if t.currentTopNum > t.cacheSize {
+		lastKey, lastValues := t.treeMap.Max()
+		size := len(lastValues.([]interface{}))
+		// remove last one
+		if size <= 1 {
+			t.currentTopNum -= size
+			t.treeMap.Remove(lastKey)
+		} else {
+			t.currentTopNum--
+			t.treeMap.Put(lastKey, lastValues.([]interface{})[0:size-1])
+		}
+	}
+}
+
+func (t *topNAggregator) put(sortKey int64, data flow.StreamRecord) {
+	t.currentTopNum++
+	t.dirty = true
+	if existingList, ok := t.treeMap.Get(sortKey); ok {
+		existingList = append(existingList.([]interface{}), data)
+		t.treeMap.Put(sortKey, existingList)
+	} else {
+		t.treeMap.Put(sortKey, []interface{}{data})
+	}
+}
+
+func (t *topNAggregator) checkSortKeyInBufferRange(sortKey int64) bool {
+	// get the "maximum" item
+	// - if ASC, the maximum item
+	// - else DESC, the minimum item
+	worstKey, _ := t.treeMap.Max()
+	if worstKey == nil {
+		// return true if the buffer is empty.
+		return true
+	}
+	if t.comparator(sortKey, worstKey.(int64)) < 0 {
+		return true
+	}
+	return t.currentTopNum < t.cacheSize
+}
+
+type Tuple2 struct {
+	V1 interface{} `json:"v1"`
+	V2 interface{} `json:"v2"`
+}
+
+func (t *Tuple2) Equal(other *Tuple2) bool {
+	return cmp.Equal(t.V1, other.V1) && cmp.Equal(t.V2, other.V2)
+}
+
+func (t *topNAggregator) Snapshot() interface{} {
+	t.dirty = false
+	iter := t.treeMap.Iterator()
+	items := make([]*Tuple2, 0, t.currentTopNum)
+	for iter.Next() {
+		list := iter.Value().([]interface{})
+		for _, item := range list {
+			items = append(items, &Tuple2{iter.Key(), item})
+		}
+	}
+	return items
+}
+
+func (t *topNAggregator) Dirty() bool {
+	return t.dirty
+}
diff --git a/pkg/flow/streaming/flow_topn_test.go b/pkg/flow/streaming/flow_topn_test.go
new file mode 100644
index 0000000..a00f090
--- /dev/null
+++ b/pkg/flow/streaming/flow_topn_test.go
@@ -0,0 +1,102 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package streaming
+
+import (
+	"testing"
+
+	"github.com/emirpasic/gods/maps/treemap"
+	"github.com/emirpasic/gods/utils"
+	"github.com/stretchr/testify/require"
+
+	"github.com/apache/skywalking-banyandb/pkg/flow"
+)
+
+func TestFlow_TopN_Aggregator(t *testing.T) {
+	input := []flow.StreamRecord{
+		// 1. string
+		// 2. number
+		// 3. slices of groupBy values
+		flow.NewStreamRecordWithoutTS(flow.Data{"e2e-service-provider", 10000, []interface{}{"e2e-service-provider"}}),
+		flow.NewStreamRecordWithoutTS(flow.Data{"e2e-service-consumer", 9900, []interface{}{"e2e-service-consumer"}}),
+		flow.NewStreamRecordWithoutTS(flow.Data{"e2e-service-provider", 9800, []interface{}{"e2e-service-provider"}}),
+		flow.NewStreamRecordWithoutTS(flow.Data{"e2e-service-consumer", 9700, []interface{}{"e2e-service-consumer"}}),
+		flow.NewStreamRecordWithoutTS(flow.Data{"e2e-service-provider", 9700, []interface{}{"e2e-service-provider"}}),
+		flow.NewStreamRecordWithoutTS(flow.Data{"e2e-service-consumer", 9600, []interface{}{"e2e-service-consumer"}}),
+		flow.NewStreamRecordWithoutTS(flow.Data{"e2e-service-consumer", 9800, []interface{}{"e2e-service-consumer"}}),
+		flow.NewStreamRecordWithoutTS(flow.Data{"e2e-service-consumer", 9500, []interface{}{"e2e-service-consumer"}}),
+	}
+	tests := []struct {
+		name     string
+		sort     TopNSort
+		expected []*Tuple2
+	}{
+		{
+			name: "DESC",
+			sort: DESC,
+			expected: []*Tuple2{
+				{int64(10000), flow.NewStreamRecordWithoutTS(flow.Data{"e2e-service-provider", 10000, []interface{}{"e2e-service-provider"}})},
+				{int64(9900), flow.NewStreamRecordWithoutTS(flow.Data{"e2e-service-consumer", 9900, []interface{}{"e2e-service-consumer"}})},
+				{int64(9800), flow.NewStreamRecordWithoutTS(flow.Data{"e2e-service-provider", 9800, []interface{}{"e2e-service-provider"}})},
+			},
+		},
+		{
+			name: "DESC by default",
+			sort: 0,
+			expected: []*Tuple2{
+				{int64(10000), flow.NewStreamRecordWithoutTS(flow.Data{"e2e-service-provider", 10000, []interface{}{"e2e-service-provider"}})},
+				{int64(9900), flow.NewStreamRecordWithoutTS(flow.Data{"e2e-service-consumer", 9900, []interface{}{"e2e-service-consumer"}})},
+				{int64(9800), flow.NewStreamRecordWithoutTS(flow.Data{"e2e-service-provider", 9800, []interface{}{"e2e-service-provider"}})},
+			},
+		},
+		{
+			name: "ASC",
+			sort: ASC,
+			expected: []*Tuple2{
+				{int64(9500), flow.NewStreamRecordWithoutTS(flow.Data{"e2e-service-consumer", 9500, []interface{}{"e2e-service-consumer"}})},
+				{int64(9600), flow.NewStreamRecordWithoutTS(flow.Data{"e2e-service-consumer", 9600, []interface{}{"e2e-service-consumer"}})},
+				{int64(9700), flow.NewStreamRecordWithoutTS(flow.Data{"e2e-service-consumer", 9700, []interface{}{"e2e-service-consumer"}})},
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			require := require.New(t)
+			var comparator utils.Comparator
+			if tt.sort == DESC {
+				comparator = func(a, b interface{}) int {
+					return utils.Int64Comparator(b, a)
+				}
+			} else {
+				comparator = utils.Int64Comparator
+			}
+			topN := &topNAggregator{
+				cacheSize:  3,
+				sort:       tt.sort,
+				comparator: comparator,
+				treeMap:    treemap.NewWith(comparator),
+				sortKeyExtractor: func(record flow.StreamRecord) int64 {
+					return int64(record.Data().(flow.Data)[1].(int))
+				},
+			}
+			topN.Add(input)
+			require.Len(topN.Snapshot(), 3)
+			require.Equal(tt.expected, topN.Snapshot())
+		})
+	}
+}
diff --git a/pkg/flow/streaming/flow_unary.go b/pkg/flow/streaming/flow_unary.go
new file mode 100644
index 0000000..f12f2f8
--- /dev/null
+++ b/pkg/flow/streaming/flow_unary.go
@@ -0,0 +1,109 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package streaming
+
+import (
+	"context"
+
+	"github.com/apache/skywalking-banyandb/pkg/flow"
+)
+
+func (f *streamingFlow) Filter(predicate flow.UnaryOperation[bool]) flow.Flow {
+	op, err := flow.FilterFunc(predicate)
+	if err != nil {
+		f.drainErr(err)
+	}
+	return f.Transform(op)
+}
+
+func (f *streamingFlow) Map(mapper flow.UnaryOperation[any]) flow.Flow {
+	return f.Transform(mapper)
+}
+
+// Transform represents a general unary transformation
+// For example: filter, map, etc.
+func (f *streamingFlow) Transform(op flow.UnaryOperation[any]) flow.Flow {
+	// TODO: support parallelism
+	f.ops = append(f.ops, newUnaryOp(op, 1))
+	return f
+}
+
+var _ flow.Operator = (*unaryOperator)(nil)
+
+type unaryOperator struct {
+	flow.ComponentState
+	op          flow.UnaryOperation[any]
+	in          chan flow.StreamRecord
+	out         chan flow.StreamRecord
+	parallelism uint
+}
+
+func (u *unaryOperator) Setup(ctx context.Context) error {
+	// run a background job as a consumer
+	go u.run()
+	return nil
+}
+
+func (u *unaryOperator) Exec(downstream flow.Inlet) {
+	u.Add(1)
+	// start a background job for transmission
+	go flow.Transmit(&u.ComponentState, downstream, u)
+}
+
+func (u *unaryOperator) Teardown(ctx context.Context) error {
+	u.Wait()
+	return nil
+}
+
+func newUnaryOp(op flow.UnaryOperation[any], parallelism uint) *unaryOperator {
+	return &unaryOperator{
+		op:          op,
+		in:          make(chan flow.StreamRecord),
+		out:         make(chan flow.StreamRecord),
+		parallelism: parallelism,
+	}
+}
+
+func (u *unaryOperator) In() chan<- flow.StreamRecord {
+	return u.in
+}
+
+func (u *unaryOperator) Out() <-chan flow.StreamRecord {
+	return u.out
+}
+
+func (u *unaryOperator) run() {
+	semaphore := make(chan struct{}, u.parallelism)
+	for elem := range u.in {
+		semaphore <- struct{}{}
+		go func(r flow.StreamRecord) {
+			defer func() { <-semaphore }()
+			result := u.op.Apply(context.TODO(), r.Data())
+			switch val := result.(type) {
+			case nil:
+				return
+			default:
+				u.out <- r.WithNewData(val)
+			}
+		}(elem)
+	}
+	for i := 0; i < int(u.parallelism); i++ {
+		semaphore <- struct{}{}
+	}
+	close(u.out)
+}
diff --git a/pkg/flow/streaming/sink/slice.go b/pkg/flow/streaming/sink/slice.go
new file mode 100644
index 0000000..ad55850
--- /dev/null
+++ b/pkg/flow/streaming/sink/slice.go
@@ -0,0 +1,82 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package sink
+
+import (
+	"context"
+	"sync"
+
+	"github.com/apache/skywalking-banyandb/pkg/flow"
+)
+
+var _ flow.Sink = (*Slice)(nil)
+
+type Slice struct {
+	sync.RWMutex
+	flow.ComponentState
+	slice []interface{}
+	in    chan flow.StreamRecord
+}
+
+func NewSlice() *Slice {
+	return &Slice{
+		slice: make([]interface{}, 0),
+		in:    make(chan flow.StreamRecord),
+	}
+}
+
+func (s *Slice) Value() []interface{} {
+	s.RLock()
+	defer s.RUnlock()
+	return s.slice
+}
+
+func (s *Slice) In() chan<- flow.StreamRecord {
+	return s.in
+}
+
+func (s *Slice) Setup(ctx context.Context) error {
+	go s.run(ctx)
+
+	return nil
+}
+
+func (s *Slice) run(ctx context.Context) {
+	s.Add(1)
+	defer func() {
+		s.Done()
+	}()
+	for {
+		select {
+		case item, ok := <-s.in:
+			if !ok {
+				return
+			}
+			s.Lock()
+			s.slice = append(s.slice, item)
+			s.Unlock()
+		case <-ctx.Done():
+			return
+		}
+	}
+}
+
+func (s *Slice) Teardown(ctx context.Context) error {
+	s.Wait()
+	return nil
+}
diff --git a/pkg/flow/streaming/sources/channel.go b/pkg/flow/streaming/sources/channel.go
new file mode 100644
index 0000000..eb71354
--- /dev/null
+++ b/pkg/flow/streaming/sources/channel.go
@@ -0,0 +1,93 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package sources
+
+import (
+	"context"
+	"reflect"
+
+	"github.com/pkg/errors"
+
+	"github.com/apache/skywalking-banyandb/pkg/flow"
+)
+
+var _ flow.Source = (*sourceChan)(nil)
+
+type sourceChan struct {
+	flow.ComponentState
+	in  interface{}
+	out chan flow.StreamRecord
+}
+
+func (s *sourceChan) Out() <-chan flow.StreamRecord {
+	return s.out
+}
+
+func (s *sourceChan) Setup(ctx context.Context) error {
+	chanVal := reflect.ValueOf(s.in)
+
+	if !chanVal.IsValid() {
+		return errors.New("invalid channel")
+	}
+
+	s.Add(1)
+	go s.run(ctx, chanVal)
+	return nil
+}
+
+func (s *sourceChan) run(ctx context.Context, chanVal reflect.Value) {
+	ctx, cancel := context.WithCancel(ctx)
+	defer func() {
+		cancel()
+		close(s.out)
+		s.Done()
+	}()
+
+	for {
+		val, open := chanVal.Recv()
+		if !open {
+			return
+		}
+		select {
+		case s.out <- flow.TryExactTimestamp(val.Interface()):
+		case <-ctx.Done():
+			return
+		}
+	}
+}
+
+func (s *sourceChan) Teardown(ctx context.Context) error {
+	s.Wait()
+	return nil
+}
+
+func (s *sourceChan) Exec(downstream flow.Inlet) {
+	s.Add(1)
+	go flow.Transmit(&s.ComponentState, downstream, s)
+}
+
+func NewChannel(in interface{}) (flow.Source, error) {
+	if reflect.TypeOf(in).Kind() != reflect.Chan {
+		return nil, errors.New("in must be a Channel")
+	}
+
+	return &sourceChan{
+		in:  in,
+		out: make(chan flow.StreamRecord, 1024),
+	}, nil
+}
diff --git a/pkg/flow/streaming/sources/channel_test.go b/pkg/flow/streaming/sources/channel_test.go
new file mode 100644
index 0000000..984d513
--- /dev/null
+++ b/pkg/flow/streaming/sources/channel_test.go
@@ -0,0 +1,72 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package sources_test
+
+import (
+	"context"
+	"strings"
+	"testing"
+
+	"github.com/golang/mock/gomock"
+	"github.com/stretchr/testify/require"
+
+	"github.com/apache/skywalking-banyandb/pkg/flow"
+	"github.com/apache/skywalking-banyandb/pkg/flow/streaming/sources"
+)
+
+const (
+	ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+)
+
+func TestSource_channel(t *testing.T) {
+	ctrl := gomock.NewController(t)
+	defer ctrl.Finish()
+
+	inlet := flow.NewMockInlet(ctrl)
+
+	srcCh := make(chan string)
+
+	assert := require.New(t)
+	src, err := sources.NewChannel(srcCh)
+	assert.NoError(err)
+	assert.NoError(src.Setup(context.TODO()))
+
+	in := make(chan flow.StreamRecord)
+	inlet.
+		EXPECT().
+		In().Times(len(ALPHABET) + 1).
+		Return(in)
+
+	src.Exec(inlet)
+
+	go func() {
+		for _, char := range strings.Split(ALPHABET, "") {
+			srcCh <- char
+		}
+		close(srcCh)
+	}()
+
+	var result strings.Builder
+	for item := range in {
+		assert.IsType(flow.StreamRecord{}, item)
+		result.WriteString(item.Data().(string))
+	}
+
+	assert.Equal(ALPHABET, result.String())
+	assert.NoError(src.Teardown(context.TODO()))
+}
diff --git a/api/data/measure.go b/pkg/flow/streaming/streaming_suite_test.go
similarity index 65%
copy from api/data/measure.go
copy to pkg/flow/streaming/streaming_suite_test.go
index a582924..746890d 100644
--- a/api/data/measure.go
+++ b/pkg/flow/streaming/streaming_suite_test.go
@@ -15,22 +15,16 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package data
+package streaming_test
 
 import (
-	"github.com/apache/skywalking-banyandb/api/common"
-	"github.com/apache/skywalking-banyandb/pkg/bus"
-)
-
-var MeasureWriteKindVersion = common.KindVersion{
-	Version: "v1",
-	Kind:    "measure-write",
-}
+	"testing"
 
-var TopicMeasureWrite = bus.UniTopic(MeasureWriteKindVersion.String())
+	. "github.com/onsi/ginkgo/v2"
+	. "github.com/onsi/gomega"
+)
 
-var MeasureQueryKindVersion = common.KindVersion{
-	Version: "v1",
-	Kind:    "measure-query",
+func TestStreaming(t *testing.T) {
+	RegisterFailHandler(Fail)
+	RunSpecs(t, "Streaming Suite")
 }
-var TopicMeasureQuery = bus.BiTopic(MeasureQueryKindVersion.String())
diff --git a/pkg/flow/types.go b/pkg/flow/types.go
new file mode 100644
index 0000000..9efaefb
--- /dev/null
+++ b/pkg/flow/types.go
@@ -0,0 +1,174 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package flow
+
+import (
+	"context"
+	"io"
+	"sync"
+
+	"google.golang.org/protobuf/types/known/timestamppb"
+)
+
+type Data []any
+
+// Flow is an abstraction of data flow for
+// both Streaming and Batch
+type Flow interface {
+	io.Closer
+	// Filter is used to filter data.
+	// The parameter f can be either predicate function for streaming,
+	// or conditions for batch query.
+	Filter(UnaryOperation[bool]) Flow
+	// Map is used to transform data
+	Map(UnaryOperation[any]) Flow
+	// Window is used to split infinite data into "buckets" of finite size.
+	// Currently, it is only applicable to streaming context.
+	Window(WindowAssigner) WindowedFlow
+	// To pipes data to the given sink
+	To(sink Sink) Flow
+	// Open opens the flow in the async mode for streaming scenario.
+	// The first error is the error combination while opening all components,
+	// while the second is a channel for receiving async errors.
+	Open() <-chan error
+}
+
+// WindowedFlow is a flow which processes incoming elements based on window.
+// The WindowedFlow can be created with a WindowAssigner.
+type WindowedFlow interface {
+	AllowedMaxWindows(windowCnt int) WindowedFlow
+	// TopN applies a TopNAggregation to each Window.
+	TopN(topNum int, opts ...any) Flow
+}
+
+// Window is a bucket of elements with a finite size.
+// timedWindow is the only implementation now.
+type Window interface {
+	// MaxTimestamp returns the upper bound of the Window.
+	// Unit: Millisecond
+	MaxTimestamp() int64
+}
+
+// WindowAssigner is used to assign Window(s) for a given timestamp, and thus it can create a WindowedFlow.
+type WindowAssigner interface {
+	// AssignWindows assigns a slice of Window according to the given timestamp, e.g. eventTime.
+	// The unit of the timestamp here is MilliSecond.
+	AssignWindows(timestamp int64) ([]Window, error)
+}
+
+// AggregationOp defines the stateful operation for aggregation.
+type AggregationOp interface {
+	// Add puts a slice of elements as the input
+	Add([]StreamRecord)
+	// Snapshot takes a snapshot of the current state of the AggregationOp
+	// Taking a snapshot will restore the dirty flag
+	Snapshot() interface{}
+	// Dirty flag means if any new item is added after the last snapshot
+	Dirty() bool
+}
+
+type AggregationOpFactory func() AggregationOp
+
+// StreamRecord is a container wraps user data and timestamp.
+// It is the underlying transmission medium for the streaming processing.
+type StreamRecord struct {
+	ts   int64
+	data interface{}
+}
+
+func NewStreamRecord(data interface{}, ts int64) StreamRecord {
+	return StreamRecord{
+		data: data,
+		ts:   ts,
+	}
+}
+
+func NewStreamRecordWithTimestampPb(data interface{}, timestamp *timestamppb.Timestamp) StreamRecord {
+	return StreamRecord{
+		data: data,
+		ts:   timestamp.GetSeconds()*1000 + int64(timestamp.GetNanos())/1000_000,
+	}
+}
+
+func NewStreamRecordWithoutTS(data interface{}) StreamRecord {
+	return StreamRecord{
+		data: data,
+		ts:   -1,
+	}
+}
+
+func (sr StreamRecord) WithNewData(data interface{}) StreamRecord {
+	return StreamRecord{
+		ts:   sr.ts,
+		data: data,
+	}
+}
+
+func (sr StreamRecord) TimestampMillis() int64 {
+	return sr.ts
+}
+
+func (sr StreamRecord) Data() interface{} {
+	return sr.data
+}
+
+// Inlet represents a type that exposes one open input.
+//
+//go:generate mockgen -destination=./inlet_mock.go -package=flow github.com/apache/skywalking-banyandb/pkg/flow Inlet
+type Inlet interface {
+	In() chan<- StreamRecord
+}
+
+// Outlet represents a type that exposes one open output.
+type Outlet interface {
+	Out() <-chan StreamRecord
+}
+
+type Component interface {
+	// Setup is the lifecycle hook for resource preparation, e.g. start background job for listening input channel.
+	// It must be called before the flow starts to process elements.
+	Setup(context.Context) error
+	// Teardown is the lifecycle hook for shutting down the Component
+	// Implementation should ENSURE that all resource has been correctly recycled before this method returns.
+	Teardown(context.Context) error
+}
+
+type ComponentState struct {
+	sync.WaitGroup
+}
+
+// Source represents a set of stream processing steps that has one open output.
+type Source interface {
+	Outlet
+	Component
+	Exec(downstream Inlet)
+}
+
+// Operator represents a set of stream processing steps that has one open input and one open output.
+type Operator interface {
+	Inlet
+	Outlet
+	Component
+	Exec(downstream Inlet)
+}
+
+// Sink represents a set of stream processing steps that has one open input.
+type Sink interface {
+	Inlet
+	Component
+}
diff --git a/api/data/measure.go b/pkg/flow/utils.go
similarity index 53%
copy from api/data/measure.go
copy to pkg/flow/utils.go
index a582924..7f4d805 100644
--- a/api/data/measure.go
+++ b/pkg/flow/utils.go
@@ -15,22 +15,28 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package data
+package flow
 
-import (
-	"github.com/apache/skywalking-banyandb/api/common"
-	"github.com/apache/skywalking-banyandb/pkg/bus"
-)
-
-var MeasureWriteKindVersion = common.KindVersion{
-	Version: "v1",
-	Kind:    "measure-write",
+// Transmit provides a helper function to connect the current component with the downstream component.
+// It should be run in another goroutine.
+func Transmit(state *ComponentState, downstream Inlet, current Outlet) {
+	defer state.Done()
+	for elem := range current.Out() {
+		downstream.In() <- elem
+	}
+	close(downstream.In())
 }
 
-var TopicMeasureWrite = bus.UniTopic(MeasureWriteKindVersion.String())
-
-var MeasureQueryKindVersion = common.KindVersion{
-	Version: "v1",
-	Kind:    "measure-query",
+func TryExactTimestamp(item any) StreamRecord {
+	if r, ok := item.(StreamRecord); ok {
+		return r
+	}
+	type timestampExtractor interface {
+		TimestampMillis() int64
+	}
+	// otherwise, check if we can extract timestamp
+	if extractor, ok := item.(timestampExtractor); ok {
+		return NewStreamRecord(item, extractor.TimestampMillis())
+	}
+	return NewStreamRecordWithoutTS(item)
 }
-var TopicMeasureQuery = bus.BiTopic(MeasureQueryKindVersion.String())
diff --git a/pkg/iter/iter.go b/pkg/iter/iter.go
new file mode 100644
index 0000000..d2406c6
--- /dev/null
+++ b/pkg/iter/iter.go
@@ -0,0 +1,109 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package iter
+
+// An Iterator is a stream of items of some type.
+type Iterator[T any] interface {
+	// Next checks whether if the iteration has more elements and
+	// returns the next one if exists.
+	Next() (T, bool)
+}
+
+// FromSlice creates a new iterator which returns all items from the slice starting at index 0 until
+// all items are consumed.
+func FromSlice[T any](slice []T) Iterator[T] {
+	return &sliceIterator[T]{slice: slice}
+}
+
+type sliceIterator[T any] struct {
+	slice []T
+}
+
+func (iter *sliceIterator[T]) Next() (T, bool) {
+	if len(iter.slice) == 0 {
+		var zero T
+		return zero, false
+	}
+	item := iter.slice[0]
+	iter.slice = iter.slice[1:]
+	return item, true
+}
+
+// Map returns a new iterator which applies a function to all items from the input iterator which
+// are subsequently returned.
+//
+// The mapping function should not mutate the state outside its scope.
+func Map[T any, O any](from Iterator[T], mapFunc func(T) O) Iterator[O] {
+	return &mapIterator[T, O]{from: from, mapFunc: mapFunc}
+}
+
+type mapIterator[T any, O any] struct {
+	from    Iterator[T]
+	mapFunc func(T) O
+}
+
+func (iter *mapIterator[T, O]) Next() (O, bool) {
+	item, ok := iter.from.Next()
+	if !ok {
+		var zero O
+		return zero, false
+	}
+	mapped := iter.mapFunc(item)
+	return mapped, true
+}
+
+// Flatten applies a function to all items of the specified iterator, returning an iterator for each
+// item. The resulting iterators are then concatenated into a single iterator.
+func Flatten[T any](from Iterator[Iterator[T]]) Iterator[T] {
+	return &flattenIterator[T]{from: from}
+}
+
+type flattenIterator[T any] struct {
+	from Iterator[Iterator[T]]
+	head Iterator[T]
+}
+
+func (iter *flattenIterator[T]) Next() (T, bool) {
+	for {
+		if iter.head == nil {
+			item, ok := iter.from.Next()
+			if !ok {
+				var zero T
+				return zero, false
+			}
+			iter.head = item
+		}
+		item, ok := iter.head.Next()
+		if ok {
+			return item, true
+		}
+		iter.head = nil
+	}
+}
+
+// Empty returns an iterator that never returns anything.
+func Empty[T any]() Iterator[T] {
+	return emptyIterator[T]{}
+}
+
+type emptyIterator[T any] struct{}
+
+func (emptyIterator[T]) Next() (T, bool) {
+	var zero T
+	return zero, false
+}
diff --git a/pkg/pb/v1/write.go b/pkg/pb/v1/write.go
index 834c44c..c8b6a9f 100644
--- a/pkg/pb/v1/write.go
+++ b/pkg/pb/v1/write.go
@@ -35,13 +35,17 @@ import (
 
 type ID string
 
+const fieldFlagLength = 9
+
 var (
 	strDelimiter = []byte("\n")
 	NullTag      = &modelv1.TagValue{Value: &modelv1.TagValue_Null{}}
+	TagFlag      = make([]byte, fieldFlagLength)
 
 	ErrUnsupportedTagForIndexField = errors.New("the tag type(for example, null) can not be as the index field value")
 	ErrNullValue                   = errors.New("the tag value is null")
 	ErrMalformedElement            = errors.New("element is malformed")
+	ErrMalformedFieldFlag          = errors.New("field flag is malformed")
 )
 
 func MarshalIndexFieldValue(tagValue *modelv1.TagValue) ([]byte, error) {
@@ -359,3 +363,35 @@ func EncodeFamily(familySpec *databasev1.TagFamilySpec, family *modelv1.TagFamil
 	}
 	return proto.Marshal(data)
 }
+
+func DecodeFieldValue(fieldValue []byte, fieldSpec *databasev1.FieldSpec) *modelv1.FieldValue {
+	switch fieldSpec.GetFieldType() {
+	case databasev1.FieldType_FIELD_TYPE_STRING:
+		return &modelv1.FieldValue{Value: &modelv1.FieldValue_Str{Str: &modelv1.Str{Value: string(fieldValue)}}}
+	case databasev1.FieldType_FIELD_TYPE_INT:
+		return &modelv1.FieldValue{Value: &modelv1.FieldValue_Int{Int: &modelv1.Int{Value: convert.BytesToInt64(fieldValue)}}}
+	case databasev1.FieldType_FIELD_TYPE_DATA_BINARY:
+		return &modelv1.FieldValue{Value: &modelv1.FieldValue_BinaryData{BinaryData: fieldValue}}
+	}
+	return &modelv1.FieldValue{Value: &modelv1.FieldValue_Null{}}
+}
+
+func EncoderFieldFlag(fieldSpec *databasev1.FieldSpec, interval time.Duration) []byte {
+	encodingMethod := byte(fieldSpec.GetEncodingMethod().Number())
+	compressionMethod := byte(fieldSpec.GetCompressionMethod().Number())
+	bb := make([]byte, fieldFlagLength)
+	bb[0] = encodingMethod<<4 | compressionMethod
+	copy(bb[1:], convert.Int64ToBytes(int64(interval)))
+	return bb
+}
+
+func DecodeFieldFlag(key []byte) (*databasev1.FieldSpec, time.Duration, error) {
+	if len(key) < fieldFlagLength {
+		return nil, 0, ErrMalformedFieldFlag
+	}
+	b := key[len(key)-9:]
+	return &databasev1.FieldSpec{
+		EncodingMethod:    databasev1.EncodingMethod(int32(b[0]) >> 4),
+		CompressionMethod: databasev1.CompressionMethod(int32(b[0] & 0x0F)),
+	}, time.Duration(convert.BytesToInt64(b[1:])), nil
+}
diff --git a/pkg/query/aggregation/aggregation.go b/pkg/query/aggregation/aggregation.go
index 708db22..69d9320 100644
--- a/pkg/query/aggregation/aggregation.go
+++ b/pkg/query/aggregation/aggregation.go
@@ -14,9 +14,12 @@
 // KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
+
 package aggregation
 
 import (
+	"math"
+
 	"github.com/pkg/errors"
 
 	modelv1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/model/v1"
@@ -37,9 +40,13 @@ func NewInt64Func(af modelv1.AggregationFunction) (Int64Func, error) {
 	case modelv1.AggregationFunction_AGGREGATION_FUNCTION_COUNT:
 		return &countInt64Func{}, nil
 	case modelv1.AggregationFunction_AGGREGATION_FUNCTION_MAX:
-		return &maxInt64Func{}, nil
+		return &maxInt64Func{
+			val: math.MinInt64,
+		}, nil
 	case modelv1.AggregationFunction_AGGREGATION_FUNCTION_MIN:
-		return &minInt64Func{}, nil
+		return &minInt64Func{
+			val: math.MaxInt64,
+		}, nil
 	case modelv1.AggregationFunction_AGGREGATION_FUNCTION_SUM:
 		return &sumInt64Func{}, nil
 	}
diff --git a/pkg/query/aggregation/function.go b/pkg/query/aggregation/function.go
index 5144a81..59bdbc8 100644
--- a/pkg/query/aggregation/function.go
+++ b/pkg/query/aggregation/function.go
@@ -14,6 +14,7 @@
 // KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
+
 package aggregation
 
 import "math"
diff --git a/pkg/query/logical/expr.go b/pkg/query/logical/expr.go
index e919a25..3664d28 100644
--- a/pkg/query/logical/expr.go
+++ b/pkg/query/logical/expr.go
@@ -20,7 +20,7 @@ package logical
 import (
 	"fmt"
 
-	database_v1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/database/v1"
+	databasev1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/database/v1"
 )
 
 var _ ResolvableExpr = (*TagRef)(nil)
@@ -114,5 +114,5 @@ func (f *FieldRef) Resolve(s Schema) error {
 
 type FieldSpec struct {
 	FieldIdx int
-	Spec     *database_v1.FieldSpec
+	Spec     *databasev1.FieldSpec
 }
diff --git a/pkg/query/logical/measure/measure_analyzer.go b/pkg/query/logical/measure/measure_analyzer.go
index 0ae3fc6..db48e09 100644
--- a/pkg/query/logical/measure/measure_analyzer.go
+++ b/pkg/query/logical/measure/measure_analyzer.go
@@ -153,7 +153,7 @@ func parseFields(criteria *measurev1.QueryRequest, metadata *commonv1.Metadata,
 		// fill AnyEntry by default
 		entity[idx] = tsdb.AnyEntry
 	}
-	filter, entities, err := logical.BuildLocalFilter(criteria.Criteria, s, entityMap, entity)
+	filter, entities, err := logical.BuildLocalFilter(criteria.GetCriteria(), s, entityMap, entity)
 	if err != nil {
 		return nil, err
 	}
diff --git a/pkg/query/logical/measure/measure_plan_aggregation.go b/pkg/query/logical/measure/measure_plan_aggregation.go
index 28d2a45..5649ad2 100644
--- a/pkg/query/logical/measure/measure_plan_aggregation.go
+++ b/pkg/query/logical/measure/measure_plan_aggregation.go
@@ -14,6 +14,7 @@
 // KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
+
 package measure
 
 import (
diff --git a/pkg/query/logical/measure/measure_plan_top.go b/pkg/query/logical/measure/measure_plan_top.go
index 777e0c7..8344b91 100644
--- a/pkg/query/logical/measure/measure_plan_top.go
+++ b/pkg/query/logical/measure/measure_plan_top.go
@@ -14,6 +14,7 @@
 // KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
+
 package measure
 
 import (
diff --git a/pkg/query/logical/measure/measure_top.go b/pkg/query/logical/measure/measure_top.go
index 9dc9d4b..5b8766d 100644
--- a/pkg/query/logical/measure/measure_top.go
+++ b/pkg/query/logical/measure/measure_top.go
@@ -14,6 +14,7 @@
 // KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
+
 package measure
 
 import (
diff --git a/pkg/query/logical/measure/measure_top_test.go b/pkg/query/logical/measure/measure_top_test.go
index 4ffdf32..a004333 100644
--- a/pkg/query/logical/measure/measure_top_test.go
+++ b/pkg/query/logical/measure/measure_top_test.go
@@ -14,6 +14,7 @@
 // KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
+
 package measure_test
 
 import (
diff --git a/pkg/schema/metadata.go b/pkg/schema/metadata.go
index dec178f..f95737b 100644
--- a/pkg/schema/metadata.go
+++ b/pkg/schema/metadata.go
@@ -71,8 +71,11 @@ type ResourceSchema interface {
 }
 
 type ResourceSpec struct {
-	Schema     ResourceSchema
+	Schema ResourceSchema
+	// IndexRules are index rules bound to the Schema
 	IndexRules []*databasev1.IndexRule
+	// Aggregations are topN aggregation bound to the Schema, e.g. TopNAggregation
+	Aggregations []*databasev1.TopNAggregation
 }
 
 type Resource interface {
@@ -191,8 +194,8 @@ func (sr *schemaRepo) Watcher() {
 
 func (sr *schemaRepo) StoreGroup(groupMeta *commonv1.Metadata) (*group, error) {
 	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
 	groupSchema, err := sr.metadata.GroupRegistry().GetGroup(ctx, groupMeta.GetName())
-	cancel()
 	if err != nil {
 		return nil, err
 	}
@@ -438,14 +441,27 @@ func (g *group) StoreResource(resourceSchema ResourceSchema) (Resource, error) {
 		}
 	}
 	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
-	idxRules, errIndexRules := g.metadata.IndexRules(ctx, resourceSchema.GetMetadata())
-	cancel()
-	if errIndexRules != nil {
-		return nil, errIndexRules
+	defer cancel()
+	idxRules, err := g.metadata.IndexRules(ctx, resourceSchema.GetMetadata())
+	if err != nil {
+		return nil, err
 	}
+
+	var topNAggrs []*databasev1.TopNAggregation
+	if _, ok := resourceSchema.(*databasev1.Measure); ok {
+		ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
+		var innerErr error
+		topNAggrs, innerErr = g.metadata.MeasureRegistry().TopNAggregations(ctx, resourceSchema.GetMetadata())
+		cancel()
+		if innerErr != nil {
+			return nil, innerErr
+		}
+	}
+
 	sm, errTS := g.resourceSupplier.OpenResource(g.GetSchema().GetResourceOpts().ShardNum, g, ResourceSpec{
-		Schema:     resourceSchema,
-		IndexRules: idxRules,
+		Schema:       resourceSchema,
+		IndexRules:   idxRules,
+		Aggregations: topNAggrs,
 	})
 	if errTS != nil {
 		return nil, errTS
diff --git a/pkg/test/flow/slice.go b/pkg/test/flow/slice.go
new file mode 100644
index 0000000..1792d2e
--- /dev/null
+++ b/pkg/test/flow/slice.go
@@ -0,0 +1,85 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package flow
+
+import (
+	"context"
+	"reflect"
+
+	"github.com/pkg/errors"
+
+	"github.com/apache/skywalking-banyandb/pkg/flow"
+)
+
+var _ flow.Source = (*sourceSlice)(nil)
+
+type sourceSlice struct {
+	flow.ComponentState
+	slice interface{}
+	out   chan flow.StreamRecord
+}
+
+func (s *sourceSlice) Out() <-chan flow.StreamRecord {
+	return s.out
+}
+
+func (s *sourceSlice) Setup(ctx context.Context) error {
+	// ensure slice param is a Slice type
+	dataType := reflect.TypeOf(s.slice)
+	if dataType.Kind() != reflect.Slice {
+		return errors.New("sourceSlice must have a slice")
+	}
+	sliceVal := reflect.ValueOf(s.slice)
+
+	go s.run(ctx, sliceVal)
+	return nil
+}
+
+func (s *sourceSlice) run(ctx context.Context, sliceVal reflect.Value) {
+	ctx, cancel := context.WithCancel(ctx)
+	defer func() {
+		cancel()
+		close(s.out)
+	}()
+
+	for i := 0; i < sliceVal.Len(); i++ {
+		val := sliceVal.Index(i)
+		select {
+		case s.out <- flow.TryExactTimestamp(val.Interface()):
+		case <-ctx.Done():
+			return
+		}
+	}
+}
+
+func (s *sourceSlice) Teardown(ctx context.Context) error {
+	s.Wait()
+	return nil
+}
+
+func (s *sourceSlice) Exec(downstream flow.Inlet) {
+	s.Add(1)
+	go flow.Transmit(&s.ComponentState, downstream, s)
+}
+
+func NewSlice(slice interface{}) flow.Source {
+	return &sourceSlice{
+		slice: slice,
+		out:   make(chan flow.StreamRecord),
+	}
+}
diff --git a/pkg/test/flow/slice_test.go b/pkg/test/flow/slice_test.go
new file mode 100644
index 0000000..cc40294
--- /dev/null
+++ b/pkg/test/flow/slice_test.go
@@ -0,0 +1,63 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package flow_test
+
+import (
+	"context"
+	"strings"
+	"testing"
+
+	"github.com/golang/mock/gomock"
+	"github.com/stretchr/testify/require"
+
+	"github.com/apache/skywalking-banyandb/pkg/flow"
+	flowTest "github.com/apache/skywalking-banyandb/pkg/test/flow"
+)
+
+const (
+	ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+)
+
+func TestSource_slice(t *testing.T) {
+	ctrl := gomock.NewController(t)
+	defer ctrl.Finish()
+
+	inlet := flow.NewMockInlet(ctrl)
+
+	assert := require.New(t)
+	src := flowTest.NewSlice(strings.Split(ALPHABET, ""))
+	assert.NoError(src.Setup(context.TODO()))
+
+	in := make(chan flow.StreamRecord)
+	inlet.
+		EXPECT().
+		In().Times(len(ALPHABET) + 1).
+		Return(in)
+
+	src.Exec(inlet)
+
+	var result strings.Builder
+	for item := range in {
+		assert.IsType(flow.StreamRecord{}, item)
+		result.WriteString(item.Data().(string))
+	}
+
+	assert.Equal(ALPHABET, result.String())
+
+	assert.NoError(src.Teardown(context.TODO()))
+}
diff --git a/pkg/test/measure/etcd.go b/pkg/test/measure/etcd.go
index ee29262..5ed5a74 100644
--- a/pkg/test/measure/etcd.go
+++ b/pkg/test/measure/etcd.go
@@ -43,33 +43,38 @@ const (
 	measureDir          = "testdata/measures"
 	indexRuleDir        = "testdata/index_rules"
 	indexRuleBindingDir = "testdata/index_rule_bindings"
+	topNAggregationDir  = "testdata/topn_aggregations"
 )
 
 //go:embed testdata/*
 var store embed.FS
 
 func PreloadSchema(e schema.Registry) error {
-	if err := loadSchema(groupDir, &commonv1.Group{}, func(group proto.Message) error {
-		return e.CreateGroup(context.TODO(), group.(*commonv1.Group))
+	if err := loadSchema(groupDir, &commonv1.Group{}, func(group *commonv1.Group) error {
+		return e.CreateGroup(context.TODO(), group)
 	}); err != nil {
 		return errors.WithStack(err)
 	}
-	if err := loadSchema(measureDir, &databasev1.Measure{}, func(measure proto.Message) error {
-		return e.CreateMeasure(context.TODO(), measure.(*databasev1.Measure))
+	if err := loadSchema(measureDir, &databasev1.Measure{}, func(measure *databasev1.Measure) error {
+		return e.CreateMeasure(context.TODO(), measure)
 	}); err != nil {
 		return errors.WithStack(err)
 	}
-	if err := loadSchema(indexRuleDir, &databasev1.IndexRule{}, func(indexRule proto.Message) error {
-		return e.CreateIndexRule(context.TODO(), indexRule.(*databasev1.IndexRule))
+	if err := loadSchema(indexRuleDir, &databasev1.IndexRule{}, func(indexRule *databasev1.IndexRule) error {
+		return e.CreateIndexRule(context.TODO(), indexRule)
 	}); err != nil {
 		return errors.WithStack(err)
 	}
-	if err := loadSchema(indexRuleBindingDir, &databasev1.IndexRuleBinding{}, func(indexRuleBinding proto.Message) error {
-		return e.CreateIndexRuleBinding(context.TODO(), indexRuleBinding.(*databasev1.IndexRuleBinding))
+	if err := loadSchema(indexRuleBindingDir, &databasev1.IndexRuleBinding{}, func(indexRuleBinding *databasev1.IndexRuleBinding) error {
+		return e.CreateIndexRuleBinding(context.TODO(), indexRuleBinding)
+	}); err != nil {
+		return errors.WithStack(err)
+	}
+	if err := loadSchema(topNAggregationDir, &databasev1.TopNAggregation{}, func(topN *databasev1.TopNAggregation) error {
+		return e.CreateTopNAggregation(context.TODO(), topN)
 	}); err != nil {
 		return errors.WithStack(err)
 	}
-
 	return nil
 }
 
@@ -77,7 +82,7 @@ func RandomTempDir() string {
 	return path.Join(os.TempDir(), fmt.Sprintf("banyandb-embed-etcd-%s", uuid.New().String()))
 }
 
-func loadSchema(dir string, resource proto.Message, loadFn func(resource proto.Message) error) error {
+func loadSchema[T proto.Message](dir string, resource T, loadFn func(resource T) error) error {
 	entries, err := store.ReadDir(dir)
 	if err != nil {
 		return err
@@ -109,20 +114,20 @@ func RegisterForNew(addr string, metricNum int) error {
 
 	ctx := context.Background()
 
-	if err := loadSchema(groupDir, &commonv1.Group{}, func(group proto.Message) error {
+	if err := loadSchema(groupDir, &commonv1.Group{}, func(group *commonv1.Group) error {
 		return grpchelper.Request(ctx, rpcTimeout, func(rpcCtx context.Context) (err error) {
 			_, err = databasev1.NewGroupRegistryServiceClient(conn).
 				Create(rpcCtx, &databasev1.GroupRegistryServiceCreateRequest{
-					Group: group.(*commonv1.Group),
+					Group: group,
 				})
 			return err
 		})
 	}); err != nil {
 		return errors.WithStack(err)
 	}
-	if err := loadSchema(measureDir, &databasev1.Measure{}, func(measure proto.Message) error {
+	if err := loadSchema(measureDir, &databasev1.Measure{}, func(measure *databasev1.Measure) error {
 		var err error
-		name := measure.(*databasev1.Measure).Metadata.Name
+		name := measure.GetMetadata().GetName()
 		num := metricNum
 		if name != "service_cpm_minute" {
 			num = 1
@@ -131,7 +136,7 @@ func RegisterForNew(addr string, metricNum int) error {
 			err = multierr.Append(err, grpchelper.Request(ctx, rpcTimeout, func(rpcCtx context.Context) (err error) {
 				m := proto.Clone(measure).(*databasev1.Measure)
 				if i > 0 {
-					m.Metadata.Name = m.Metadata.Name + "_" + strconv.Itoa(i)
+					m.Metadata.Name = m.GetMetadata().GetName() + "_" + strconv.Itoa(i)
 				}
 				_, err = databasev1.NewMeasureRegistryServiceClient(conn).
 					Create(rpcCtx, &databasev1.MeasureRegistryServiceCreateRequest{
@@ -145,22 +150,22 @@ func RegisterForNew(addr string, metricNum int) error {
 		return errors.WithStack(err)
 	}
 
-	if err := loadSchema(indexRuleDir, &databasev1.IndexRule{}, func(indexRule proto.Message) error {
+	if err := loadSchema(indexRuleDir, &databasev1.IndexRule{}, func(indexRule *databasev1.IndexRule) error {
 		return grpchelper.Request(ctx, rpcTimeout, func(rpcCtx context.Context) (err error) {
 			_, err = databasev1.NewIndexRuleRegistryServiceClient(conn).
 				Create(rpcCtx, &databasev1.IndexRuleRegistryServiceCreateRequest{
-					IndexRule: indexRule.(*databasev1.IndexRule),
+					IndexRule: indexRule,
 				})
 			return err
 		})
 	}); err != nil {
 		return errors.WithStack(err)
 	}
-	if err := loadSchema(indexRuleBindingDir, &databasev1.IndexRuleBinding{}, func(indexRuleBinding proto.Message) error {
+	if err := loadSchema(indexRuleBindingDir, &databasev1.IndexRuleBinding{}, func(indexRuleBinding *databasev1.IndexRuleBinding) error {
 		return grpchelper.Request(ctx, rpcTimeout, func(rpcCtx context.Context) (err error) {
 			_, err = databasev1.NewIndexRuleBindingRegistryServiceClient(conn).
 				Create(rpcCtx, &databasev1.IndexRuleBindingRegistryServiceCreateRequest{
-					IndexRuleBinding: indexRuleBinding.(*databasev1.IndexRuleBinding),
+					IndexRuleBinding: indexRuleBinding,
 				})
 			return err
 		})
diff --git a/pkg/test/measure/testdata/topn_aggregations/service_cpm_minute_nogroup_top100.json b/pkg/test/measure/testdata/topn_aggregations/service_cpm_minute_nogroup_top100.json
new file mode 100644
index 0000000..a4e1456
--- /dev/null
+++ b/pkg/test/measure/testdata/topn_aggregations/service_cpm_minute_nogroup_top100.json
@@ -0,0 +1,14 @@
+{
+  "metadata": {
+    "name": "service_cpm_minute_no_group_by_top100",
+    "group": "sw_metric"
+  },
+  "source_measure": {
+    "name": "service_cpm_minute",
+    "group": "sw_metric"
+  },
+  "field_name": "value",
+  "field_value_sort": 1,
+  "counters_number": 1000,
+  "lru_size": 10
+}
\ No newline at end of file
diff --git a/pkg/test/measure/testdata/topn_aggregations/service_cpm_minute_top_bottom100.json b/pkg/test/measure/testdata/topn_aggregations/service_cpm_minute_top_bottom100.json
new file mode 100644
index 0000000..fec9f49
--- /dev/null
+++ b/pkg/test/measure/testdata/topn_aggregations/service_cpm_minute_top_bottom100.json
@@ -0,0 +1,17 @@
+{
+  "metadata": {
+    "name": "service_cpm_minute_top_bottom_100",
+    "group": "sw_metric"
+  },
+  "source_measure": {
+    "name": "service_cpm_minute",
+    "group": "sw_metric"
+  },
+  "field_name": "value",
+  "field_value_sort": 0,
+  "group_by_tag_names": [
+    "entity_id"
+  ],
+  "counters_number": 1000,
+  "lru_size": 10
+}
\ No newline at end of file
diff --git a/pkg/timestamp/duration.go b/pkg/timestamp/duration.go
index 8b07aa2..d345d9c 100644
--- a/pkg/timestamp/duration.go
+++ b/pkg/timestamp/duration.go
@@ -14,6 +14,7 @@
 // KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
+
 package timestamp
 
 import (
diff --git a/pkg/timestamp/nano.go b/pkg/timestamp/nano.go
index 259629a..3628573 100644
--- a/pkg/timestamp/nano.go
+++ b/pkg/timestamp/nano.go
@@ -14,6 +14,7 @@
 // KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
+
 package timestamp
 
 import (
diff --git a/pkg/timestamp/range.go b/pkg/timestamp/range.go
index 44f0bc0..3c5b77b 100644
--- a/pkg/timestamp/range.go
+++ b/pkg/timestamp/range.go
@@ -14,6 +14,7 @@
 // KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
+
 package timestamp
 
 import (
diff --git a/test/cases/measure/data/testdata/service_cpm_minute_data1.json b/test/cases/measure/data/testdata/service_cpm_minute_data1.json
index 9fa4aea..44bd61a 100644
--- a/test/cases/measure/data/testdata/service_cpm_minute_data1.json
+++ b/test/cases/measure/data/testdata/service_cpm_minute_data1.json
@@ -5,12 +5,72 @@
         "tags": [
           {
             "id": {
-              "value": "1"
+              "value": "7"
             }
           },
           {
             "str": {
-              "value": "entity_1"
+              "value": "entity_2"
+            }
+          }
+        ]
+      }
+    ],
+    "fields": [
+      {
+        "int": {
+          "value": 100
+        }
+      },
+      {
+        "int": {
+          "value": 10
+        }
+      }
+    ]
+  },
+  {
+    "tag_families": [
+      {
+        "tags": [
+          {
+            "id": {
+              "value": "8"
+            }
+          },
+          {
+            "str": {
+              "value": "entity_2"
+            }
+          }
+        ]
+      }
+    ],
+    "fields": [
+      {
+        "int": {
+          "value": 100
+        }
+      },
+      {
+        "int": {
+          "value": 5
+        }
+      }
+    ]
+  },
+  {
+    "tag_families": [
+      {
+        "tags": [
+          {
+            "id": {
+              "value": "9"
+            }
+          },
+          {
+            "str": {
+              "value": "entity_2"
             }
           }
         ]
@@ -19,7 +79,7 @@
     "fields": [
       {
         "int": {
-          "value": 200
+          "value": 100
         }
       },
       {
@@ -28,5 +88,95 @@
         }
       }
     ]
+  },
+  {
+    "tag_families": [
+      {
+        "tags": [
+          {
+            "id": {
+              "value": "10"
+            }
+          },
+          {
+            "str": {
+              "value": "entity_3"
+            }
+          }
+        ]
+      }
+    ],
+    "fields": [
+      {
+        "int": {
+          "value": 100
+        }
+      },
+      {
+        "int": {
+          "value": 2
+        }
+      }
+    ]
+  },
+  {
+    "tag_families": [
+      {
+        "tags": [
+          {
+            "id": {
+              "value": "11"
+            }
+          },
+          {
+            "str": {
+              "value": "entity_1"
+            }
+          }
+        ]
+      }
+    ],
+    "fields": [
+      {
+        "int": {
+          "value": 50
+        }
+      },
+      {
+        "int": {
+          "value": 1
+        }
+      }
+    ]
+  },
+  {
+    "tag_families": [
+      {
+        "tags": [
+          {
+            "id": {
+              "value": "12"
+            }
+          },
+          {
+            "str": {
+              "value": "entity_1"
+            }
+          }
+        ]
+      }
+    ],
+    "fields": [
+      {
+        "int": {
+          "value": 300
+        }
+      },
+      {
+        "int": {
+          "value": 4
+        }
+      }
+    ]
   }
-]
\ No newline at end of file
+]
diff --git a/test/cases/measure/data/want/all.yaml b/test/cases/measure/data/want/all.yaml
index 24bb21e..5041ece 100644
--- a/test/cases/measure/data/want/all.yaml
+++ b/test/cases/measure/data/want/all.yaml
@@ -36,6 +36,28 @@ dataPoints:
       value:
         str:
           value: entity_1
+  timestamp: "2022-10-13T13:34:18.389Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "100"
+  - name: value
+    value:
+      int:
+        value: "10"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: id
+      value:
+        id:
+          value: "7"
+    - key: entity_id
+      value:
+        str:
+          value: entity_2
+  timestamp: "2022-10-13T13:34:19.389Z"
 - fields:
   - name: total
     value:
@@ -56,6 +78,28 @@ dataPoints:
       value:
         str:
           value: entity_2
+  timestamp: "2022-10-13T13:35:18.389Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "100"
+  - name: value
+    value:
+      int:
+        value: "5"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: id
+      value:
+        id:
+          value: "8"
+    - key: entity_id
+      value:
+        str:
+          value: entity_2
+  timestamp: "2022-10-13T13:35:19.389Z"
 - fields:
   - name: total
     value:
@@ -76,6 +120,28 @@ dataPoints:
       value:
         str:
           value: entity_2
+  timestamp: "2022-10-13T13:36:18.389Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "100"
+  - name: value
+    value:
+      int:
+        value: "3"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: id
+      value:
+        id:
+          value: "9"
+    - key: entity_id
+      value:
+        str:
+          value: entity_2
+  timestamp: "2022-10-13T13:36:19.389Z"
 - fields:
   - name: total
     value:
@@ -96,6 +162,28 @@ dataPoints:
       value:
         str:
           value: entity_3
+  timestamp: "2022-10-13T13:37:18.389Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "100"
+  - name: value
+    value:
+      int:
+        value: "2"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: id
+      value:
+        id:
+          value: "10"
+    - key: entity_id
+      value:
+        str:
+          value: entity_3
+  timestamp: "2022-10-13T13:37:19.389Z"
 - fields:
   - name: total
     value:
@@ -116,6 +204,28 @@ dataPoints:
       value:
         str:
           value: entity_1
+  timestamp: "2022-10-13T13:38:18.389Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "50"
+  - name: value
+    value:
+      int:
+        value: "1"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: id
+      value:
+        id:
+          value: "11"
+    - key: entity_id
+      value:
+        str:
+          value: entity_1
+  timestamp: "2022-10-13T13:38:19.389Z"
 - fields:
   - name: total
     value:
@@ -135,4 +245,26 @@ dataPoints:
     - key: entity_id
       value:
         str:
-          value: entity_1
\ No newline at end of file
+          value: entity_1
+  timestamp: "2022-10-13T13:39:18.389Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "300"
+  - name: value
+    value:
+      int:
+        value: "4"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: id
+      value:
+        id:
+          value: "12"
+    - key: entity_id
+      value:
+        str:
+          value: entity_1
+  timestamp: "2022-10-13T13:39:19.389Z"
diff --git a/test/cases/measure/data/want/group_max.yaml b/test/cases/measure/data/want/group_max.yaml
index b1d43a7..f24bb15 100644
--- a/test/cases/measure/data/want/group_max.yaml
+++ b/test/cases/measure/data/want/group_max.yaml
@@ -32,7 +32,7 @@ dataPoints:
     - name: value
       value:
         int:
-          value: "1"
+          value: "10"
     tagFamilies:
     - name: default
       tags:
diff --git a/test/cases/measure/data/want/limit.yaml b/test/cases/measure/data/want/limit.yaml
index 7f8b727..2267a74 100644
--- a/test/cases/measure/data/want/limit.yaml
+++ b/test/cases/measure/data/want/limit.yaml
@@ -16,35 +16,37 @@
 # under the License.
 
 dataPoints:
-  - fields:
-    - name: total
+- fields:
+  - name: total
+    value:
+      int:
+        value: "100"
+  - name: value
+    value:
+      int:
+        value: "5"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: entity_id
       value:
-        int:
-          value: "100"
-    - name: value
+        str:
+          value: entity_2
+  timestamp: "2022-10-13T14:13:50.440Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "100"
+  - name: value
+    value:
+      int:
+        value: "1"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: entity_id
       value:
-        int:
-          value: "5"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_3
-  - fields:
-    - name: total
-      value:
-        int:
-          value: "50"
-    - name: value
-      value:
-        int:
-          value: "4"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_1
\ No newline at end of file
+        str:
+          value: entity_2
+  timestamp: "2022-10-13T14:14:49.440Z"
\ No newline at end of file
diff --git a/test/cases/measure/data/want/order_asc.yaml b/test/cases/measure/data/want/order_asc.yaml
index 4d7f0d7..67f6d11 100644
--- a/test/cases/measure/data/want/order_asc.yaml
+++ b/test/cases/measure/data/want/order_asc.yaml
@@ -16,99 +16,207 @@
 # under the License.
 
 dataPoints:
-  - fields:
-    - name: total
+- fields:
+  - name: total
+    value:
+      int:
+        value: "100"
+  - name: value
+    value:
+      int:
+        value: "1"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: entity_id
       value:
-        int:
-          value: "100"
-    - name: value
+        str:
+          value: entity_1
+  timestamp: "2022-10-13T14:11:30.182Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "100"
+  - name: value
+    value:
+      int:
+        value: "10"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: entity_id
       value:
-        int:
-          value: "1"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_1
-  - fields:
-    - name: total
+        str:
+          value: entity_2
+  timestamp: "2022-10-13T14:11:31.182Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "100"
+  - name: value
+    value:
+      int:
+        value: "1"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: entity_id
       value:
-        int:
-          value: "100"
-    - name: value
+        str:
+          value: entity_2
+  timestamp: "2022-10-13T14:12:30.182Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "100"
+  - name: value
+    value:
+      int:
+        value: "5"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: entity_id
       value:
-        int:
-          value: "1"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_2
-  - fields:
-    - name: total
+        str:
+          value: entity_2
+  timestamp: "2022-10-13T14:12:31.182Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "100"
+  - name: value
+    value:
+      int:
+        value: "1"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: entity_id
       value:
-        int:
-          value: "100"
-    - name: value
+        str:
+          value: entity_2
+  timestamp: "2022-10-13T14:13:30.182Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "100"
+  - name: value
+    value:
+      int:
+        value: "3"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: entity_id
       value:
-        int:
-          value: "1"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_2
-  - fields:
-    - name: total
+        str:
+          value: entity_2
+  timestamp: "2022-10-13T14:13:31.182Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "100"
+  - name: value
+    value:
+      int:
+        value: "5"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: entity_id
       value:
-        int:
-          value: "100"
-    - name: value
+        str:
+          value: entity_3
+  timestamp: "2022-10-13T14:14:30.182Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "100"
+  - name: value
+    value:
+      int:
+        value: "2"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: entity_id
       value:
-        int:
-          value: "5"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_3
-  - fields:
-    - name: total
+        str:
+          value: entity_3
+  timestamp: "2022-10-13T14:14:31.182Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "50"
+  - name: value
+    value:
+      int:
+        value: "4"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: entity_id
       value:
-        int:
-          value: "50"
-    - name: value
+        str:
+          value: entity_1
+  timestamp: "2022-10-13T14:15:30.182Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "50"
+  - name: value
+    value:
+      int:
+        value: "1"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: entity_id
       value:
-        int:
-          value: "4"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_1
-  - fields:
-    - name: total
+        str:
+          value: entity_1
+  timestamp: "2022-10-13T14:15:31.182Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "300"
+  - name: value
+    value:
+      int:
+        value: "5"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: entity_id
       value:
-        int:
-          value: "300"
-    - name: value
+        str:
+          value: entity_1
+  timestamp: "2022-10-13T14:16:30.182Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "300"
+  - name: value
+    value:
+      int:
+        value: "4"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: entity_id
       value:
-        int:
-          value: "5"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_1
\ No newline at end of file
+        str:
+          value: entity_1
+  timestamp: "2022-10-13T14:16:31.182Z"
\ No newline at end of file
diff --git a/test/cases/measure/data/want/order_desc.yaml b/test/cases/measure/data/want/order_desc.yaml
index bc6000e..b503b36 100644
--- a/test/cases/measure/data/want/order_desc.yaml
+++ b/test/cases/measure/data/want/order_desc.yaml
@@ -16,99 +16,207 @@
 # under the License.
 
 dataPoints:
-  - fields:
-    - name: total
+- fields:
+  - name: total
+    value:
+      int:
+        value: "300"
+  - name: value
+    value:
+      int:
+        value: "4"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: entity_id
       value:
-        int:
-          value: "300"
-    - name: value
+        str:
+          value: entity_1
+  timestamp: "2022-10-13T14:17:50.440Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "300"
+  - name: value
+    value:
+      int:
+        value: "5"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: entity_id
       value:
-        int:
-          value: "5"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_1
-  - fields:
-    - name: total
+        str:
+          value: entity_1
+  timestamp: "2022-10-13T14:17:49.440Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "50"
+  - name: value
+    value:
+      int:
+        value: "1"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: entity_id
       value:
-        int:
-          value: "50"
-    - name: value
+        str:
+          value: entity_1
+  timestamp: "2022-10-13T14:16:50.440Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "50"
+  - name: value
+    value:
+      int:
+        value: "4"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: entity_id
       value:
-        int:
-          value: "4"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_1
-  - fields:
-    - name: total
+        str:
+          value: entity_1
+  timestamp: "2022-10-13T14:16:49.440Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "100"
+  - name: value
+    value:
+      int:
+        value: "2"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: entity_id
       value:
-        int:
-          value: "100"
-    - name: value
+        str:
+          value: entity_3
+  timestamp: "2022-10-13T14:15:50.440Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "100"
+  - name: value
+    value:
+      int:
+        value: "5"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: entity_id
       value:
-        int:
-          value: "5"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_3
-  - fields:
-    - name: total
+        str:
+          value: entity_3
+  timestamp: "2022-10-13T14:15:49.440Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "100"
+  - name: value
+    value:
+      int:
+        value: "3"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: entity_id
       value:
-        int:
-          value: "100"
-    - name: value
+        str:
+          value: entity_2
+  timestamp: "2022-10-13T14:14:50.440Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "100"
+  - name: value
+    value:
+      int:
+        value: "1"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: entity_id
       value:
-        int:
-          value: "1"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_2
-  - fields:
-    - name: total
+        str:
+          value: entity_2
+  timestamp: "2022-10-13T14:14:49.440Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "100"
+  - name: value
+    value:
+      int:
+        value: "5"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: entity_id
       value:
-        int:
-          value: "100"
-    - name: value
+        str:
+          value: entity_2
+  timestamp: "2022-10-13T14:13:50.440Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "100"
+  - name: value
+    value:
+      int:
+        value: "1"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: entity_id
       value:
-        int:
-          value: "1"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_2
-  - fields:
-    - name: total
+        str:
+          value: entity_2
+  timestamp: "2022-10-13T14:13:49.440Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "100"
+  - name: value
+    value:
+      int:
+        value: "10"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: entity_id
       value:
-        int:
-          value: "100"
-    - name: value
+        str:
+          value: entity_2
+  timestamp: "2022-10-13T14:12:50.440Z"
+- fields:
+  - name: total
+    value:
+      int:
+        value: "100"
+  - name: value
+    value:
+      int:
+        value: "1"
+  tagFamilies:
+  - name: default
+    tags:
+    - key: entity_id
       value:
-        int:
-          value: "1"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_1
\ No newline at end of file
+        str:
+          value: entity_1
+  timestamp: "2022-10-13T14:12:49.440Z"
\ No newline at end of file
diff --git a/test/cases/measure/data/want/top.yaml b/test/cases/measure/data/want/top.yaml
index f43d733..06c543c 100644
--- a/test/cases/measure/data/want/top.yaml
+++ b/test/cases/measure/data/want/top.yaml
@@ -20,7 +20,7 @@ dataPoints:
     - name: value
       value:
         int:
-          value: "150"
+          value: "160"
     tagFamilies:
     - name: default
       tags:
diff --git a/test/cases/measure/measure.go b/test/cases/measure/measure.go
index b7939ae..3cb8394 100644
--- a/test/cases/measure/measure.go
+++ b/test/cases/measure/measure.go
@@ -34,7 +34,7 @@ var (
 	verify        = func(args helpers.Args) {
 		gm.Eventually(func(innerGm gm.Gomega) {
 			measureTestData.VerifyFn(innerGm, SharedContext, args)
-		})
+		}).Should(gm.Succeed())
 	}
 )
 
diff --git a/test/cases/stream/data/testdata/service_cpm_minute_data.json b/test/cases/stream/data/testdata/service_cpm_minute_data.json
new file mode 100644
index 0000000..877290c
--- /dev/null
+++ b/test/cases/stream/data/testdata/service_cpm_minute_data.json
@@ -0,0 +1,182 @@
+[
+  {
+    "tag_families": [
+      {
+        "tags": [
+          {
+            "id": {
+              "value": "1"
+            }
+          },
+          {
+            "str": {
+              "value": "entity_1"
+            }
+          }
+        ]
+      }
+    ],
+    "fields": [
+      {
+        "int": {
+          "value": 100
+        }
+      },
+      {
+        "int": {
+          "value": 1
+        }
+      }
+    ]
+  },
+  {
+    "tag_families": [
+      {
+        "tags": [
+          {
+            "id": {
+              "value": "4"
+            }
+          },
+          {
+            "str": {
+              "value": "entity_2"
+            }
+          }
+        ]
+      }
+    ],
+    "fields": [
+      {
+        "int": {
+          "value": 100
+        }
+      },
+      {
+        "int": {
+          "value": 1
+        }
+      }
+    ]
+  },
+  {
+    "tag_families": [
+      {
+        "tags": [
+          {
+            "id": {
+              "value": "5"
+            }
+          },
+          {
+            "str": {
+              "value": "entity_2"
+            }
+          }
+        ]
+      }
+    ],
+    "fields": [
+      {
+        "int": {
+          "value": 100
+        }
+      },
+      {
+        "int": {
+          "value": 1
+        }
+      }
+    ]
+  },
+  {
+    "tag_families": [
+      {
+        "tags": [
+          {
+            "id": {
+              "value": "6"
+            }
+          },
+          {
+            "str": {
+              "value": "entity_3"
+            }
+          }
+        ]
+      }
+    ],
+    "fields": [
+      {
+        "int": {
+          "value": 100
+        }
+      },
+      {
+        "int": {
+          "value": 5
+        }
+      }
+    ]
+  },
+  {
+    "tag_families": [
+      {
+        "tags": [
+          {
+            "id": {
+              "value": "2"
+            }
+          },
+          {
+            "str": {
+              "value": "entity_1"
+            }
+          }
+        ]
+      }
+    ],
+    "fields": [
+      {
+        "int": {
+          "value": 50
+        }
+      },
+      {
+        "int": {
+          "value": 4
+        }
+      }
+    ]
+  },
+  {
+    "tag_families": [
+      {
+        "tags": [
+          {
+            "id": {
+              "value": "3"
+            }
+          },
+          {
+            "str": {
+              "value": "entity_1"
+            }
+          }
+        ]
+      }
+    ],
+    "fields": [
+      {
+        "int": {
+          "value": 300
+        }
+      },
+      {
+        "int": {
+          "value": 5
+        }
+      }
+    ]
+  }
+]
diff --git a/test/cases/stream/stream.go b/test/cases/stream/stream.go
index d03539a..95f4cc1 100644
--- a/test/cases/stream/stream.go
+++ b/test/cases/stream/stream.go
@@ -33,14 +33,18 @@ import (
 var (
 	// SharedContext is the parallel execution context
 	SharedContext helpers.SharedContext
-	verify        = func(args helpers.Args) {
+	verify        = func(innerGm gm.Gomega, args helpers.Args) {
 		gm.Eventually(func(innerGm gm.Gomega) {
 			stream_test_data.VerifyFn(innerGm, SharedContext, args)
 		})
 	}
 )
 
-var _ = g.DescribeTable("Scanning Streams", verify,
+var _ = g.DescribeTable("Scanning Streams", func(args helpers.Args) {
+	gm.Eventually(func(innerGm gm.Gomega) {
+		verify(innerGm, args)
+	}).Should(gm.Succeed())
+},
 	g.Entry("all elements", helpers.Args{Input: "all", Duration: 1 * time.Hour}),
 	g.Entry("limit", helpers.Args{Input: "limit", Duration: 1 * time.Hour}),
 	g.Entry("offset", helpers.Args{Input: "offset", Duration: 1 * time.Hour}),
diff --git a/test/cases/topn/data/data.go b/test/cases/topn/data/data.go
new file mode 100644
index 0000000..48fb0c2
--- /dev/null
+++ b/test/cases/topn/data/data.go
@@ -0,0 +1,85 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// Package data contains integration test cases of the topN
+package data
+
+import (
+	"context"
+	"embed"
+
+	"github.com/google/go-cmp/cmp"
+	g "github.com/onsi/ginkgo/v2"
+	gm "github.com/onsi/gomega"
+	"google.golang.org/protobuf/encoding/protojson"
+	"google.golang.org/protobuf/testing/protocmp"
+	"sigs.k8s.io/yaml"
+
+	measurev1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/measure/v1"
+	"github.com/apache/skywalking-banyandb/pkg/test/helpers"
+)
+
+//go:embed input/*.yaml
+var inputFS embed.FS
+
+//go:embed want/*.yaml
+var wantFS embed.FS
+
+// VerifyFn verify whether the query response matches the wanted result
+var VerifyFn = func(innerGm gm.Gomega, sharedContext helpers.SharedContext, args helpers.Args) {
+	i, err := inputFS.ReadFile("input/" + args.Input + ".yaml")
+	innerGm.Expect(err).NotTo(gm.HaveOccurred())
+	query := &measurev1.TopNRequest{}
+	helpers.UnmarshalYAML(i, query)
+	query.TimeRange = helpers.TimeRange(args, sharedContext)
+	c := measurev1.NewMeasureServiceClient(sharedContext.Connection)
+	ctx := context.Background()
+	resp, err := c.TopN(ctx, query)
+	if args.WantErr {
+		if err == nil {
+			g.Fail("expect error")
+		}
+		return
+	}
+	innerGm.Expect(err).NotTo(gm.HaveOccurred(), query.String())
+	if args.WantEmpty {
+		innerGm.Expect(resp.Lists).To(gm.BeEmpty())
+		return
+	}
+	if args.Want == "" {
+		args.Want = args.Input
+	}
+	ww, err := wantFS.ReadFile("want/" + args.Want + ".yaml")
+	innerGm.Expect(err).NotTo(gm.HaveOccurred())
+	want := &measurev1.TopNResponse{}
+	helpers.UnmarshalYAML(ww, want)
+	innerGm.Expect(cmp.Equal(resp, want,
+		protocmp.IgnoreUnknown(),
+		protocmp.IgnoreFields(&measurev1.TopNList{}, "timestamp"),
+		protocmp.Transform())).
+		To(gm.BeTrue(), func() string {
+			j, err := protojson.Marshal(resp)
+			if err != nil {
+				return err.Error()
+			}
+			y, err := yaml.JSONToYAML(j)
+			if err != nil {
+				return err.Error()
+			}
+			return string(y)
+		})
+}
diff --git a/test/cases/measure/data/want/top.yaml b/test/cases/topn/data/input/aggr_desc.yaml
similarity index 65%
copy from test/cases/measure/data/want/top.yaml
copy to test/cases/topn/data/input/aggr_desc.yaml
index f43d733..83d2f7d 100644
--- a/test/cases/measure/data/want/top.yaml
+++ b/test/cases/topn/data/input/aggr_desc.yaml
@@ -15,28 +15,9 @@
 # specific language governing permissions and limitations
 # under the License.
 
-dataPoints:
-  - fields:
-    - name: value
-      value:
-        int:
-          value: "150"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_1
-  - fields:
-    - name: value
-      value:
-        int:
-          value: "100"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_3
\ No newline at end of file
+metadata:
+  name: "service_cpm_minute_top_bottom_100"
+  group: "sw_metric"
+topN: 3
+fieldValueSort: 1
+agg: 2
diff --git a/test/cases/measure/data/want/top.yaml b/test/cases/topn/data/input/asc.yaml
similarity index 65%
copy from test/cases/measure/data/want/top.yaml
copy to test/cases/topn/data/input/asc.yaml
index f43d733..75e714b 100644
--- a/test/cases/measure/data/want/top.yaml
+++ b/test/cases/topn/data/input/asc.yaml
@@ -15,28 +15,8 @@
 # specific language governing permissions and limitations
 # under the License.
 
-dataPoints:
-  - fields:
-    - name: value
-      value:
-        int:
-          value: "150"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_1
-  - fields:
-    - name: value
-      value:
-        int:
-          value: "100"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_3
\ No newline at end of file
+metadata:
+  name: "service_cpm_minute_top_bottom_100"
+  group: "sw_metric"
+topN: 1
+fieldValueSort: 2
diff --git a/test/cases/measure/data/want/top.yaml b/test/cases/topn/data/input/condition_aggr_desc.yaml
similarity index 65%
copy from test/cases/measure/data/want/top.yaml
copy to test/cases/topn/data/input/condition_aggr_desc.yaml
index f43d733..88f701c 100644
--- a/test/cases/measure/data/want/top.yaml
+++ b/test/cases/topn/data/input/condition_aggr_desc.yaml
@@ -15,28 +15,15 @@
 # specific language governing permissions and limitations
 # under the License.
 
-dataPoints:
-  - fields:
-    - name: value
-      value:
-        int:
-          value: "150"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_1
-  - fields:
-    - name: value
-      value:
-        int:
-          value: "100"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_3
\ No newline at end of file
+metadata:
+  name: "service_cpm_minute_top_bottom_100"
+  group: "sw_metric"
+topN: 1
+fieldValueSort: 1
+agg: 2
+conditions:
+- name: entity_id
+  op: 1
+  value:
+    str:
+      value: entity_1
diff --git a/test/cases/measure/data/want/top.yaml b/test/cases/topn/data/input/desc.yaml
similarity index 65%
copy from test/cases/measure/data/want/top.yaml
copy to test/cases/topn/data/input/desc.yaml
index f43d733..d40dfb5 100644
--- a/test/cases/measure/data/want/top.yaml
+++ b/test/cases/topn/data/input/desc.yaml
@@ -15,28 +15,8 @@
 # specific language governing permissions and limitations
 # under the License.
 
-dataPoints:
-  - fields:
-    - name: value
-      value:
-        int:
-          value: "150"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_1
-  - fields:
-    - name: value
-      value:
-        int:
-          value: "100"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_3
\ No newline at end of file
+metadata:
+  name: "service_cpm_minute_top_bottom_100"
+  group: "sw_metric"
+topN: 1
+fieldValueSort: 1
diff --git a/test/cases/measure/data/want/top.yaml b/test/cases/topn/data/want/aggr_desc.yaml
similarity index 65%
copy from test/cases/measure/data/want/top.yaml
copy to test/cases/topn/data/want/aggr_desc.yaml
index f43d733..336774a 100644
--- a/test/cases/measure/data/want/top.yaml
+++ b/test/cases/topn/data/want/aggr_desc.yaml
@@ -15,28 +15,18 @@
 # specific language governing permissions and limitations
 # under the License.
 
-dataPoints:
-  - fields:
-    - name: value
-      value:
-        int:
-          value: "150"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_1
-  - fields:
-    - name: value
-      value:
-        int:
-          value: "100"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_3
\ No newline at end of file
+lists:
+- items:
+  - name: entity_2
+    value:
+      int:
+        value: "10"
+  - name: entity_3
+    value:
+      int:
+        value: "5"
+  - name: entity_1
+    value:
+      int:
+        value: "5"
+  timestamp: "2022-10-13T14:35:00Z"
\ No newline at end of file
diff --git a/test/cases/measure/data/want/top.yaml b/test/cases/topn/data/want/asc.yaml
similarity index 55%
copy from test/cases/measure/data/want/top.yaml
copy to test/cases/topn/data/want/asc.yaml
index f43d733..6eebcc6 100644
--- a/test/cases/measure/data/want/top.yaml
+++ b/test/cases/topn/data/want/asc.yaml
@@ -15,28 +15,40 @@
 # specific language governing permissions and limitations
 # under the License.
 
-dataPoints:
-  - fields:
-    - name: value
-      value:
-        int:
-          value: "150"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_1
-  - fields:
-    - name: value
-      value:
-        int:
-          value: "100"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_3
\ No newline at end of file
+lists:
+- items:
+  - name: entity_1
+    value:
+      int:
+        value: "1"
+  timestamp: "2022-10-13T14:14:00Z"
+- items:
+  - name: entity_2
+    value:
+      int:
+        value: "1"
+  timestamp: "2022-10-13T14:15:00Z"
+- items:
+  - name: entity_2
+    value:
+      int:
+        value: "1"
+  timestamp: "2022-10-13T14:16:00Z"
+- items:
+  - name: entity_3
+    value:
+      int:
+        value: "2"
+  timestamp: "2022-10-13T14:17:00Z"
+- items:
+  - name: entity_1
+    value:
+      int:
+        value: "1"
+  timestamp: "2022-10-13T14:18:00Z"
+- items:
+  - name: entity_1
+    value:
+      int:
+        value: "5"
+  timestamp: "2022-10-13T14:19:00Z"
\ No newline at end of file
diff --git a/test/cases/measure/data/want/top.yaml b/test/cases/topn/data/want/condition_aggr_desc.yaml
similarity index 65%
copy from test/cases/measure/data/want/top.yaml
copy to test/cases/topn/data/want/condition_aggr_desc.yaml
index f43d733..d1ae3bb 100644
--- a/test/cases/measure/data/want/top.yaml
+++ b/test/cases/topn/data/want/condition_aggr_desc.yaml
@@ -15,28 +15,10 @@
 # specific language governing permissions and limitations
 # under the License.
 
-dataPoints:
-  - fields:
-    - name: value
-      value:
-        int:
-          value: "150"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_1
-  - fields:
-    - name: value
-      value:
-        int:
-          value: "100"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_3
\ No newline at end of file
+lists:
+- items:
+  - name: entity_1
+    value:
+      int:
+        value: "5"
+  timestamp: "2022-10-13T14:45:00Z"
\ No newline at end of file
diff --git a/test/cases/measure/data/want/top.yaml b/test/cases/topn/data/want/desc.yaml
similarity index 55%
copy from test/cases/measure/data/want/top.yaml
copy to test/cases/topn/data/want/desc.yaml
index f43d733..dbc19ab 100644
--- a/test/cases/measure/data/want/top.yaml
+++ b/test/cases/topn/data/want/desc.yaml
@@ -15,28 +15,40 @@
 # specific language governing permissions and limitations
 # under the License.
 
-dataPoints:
-  - fields:
-    - name: value
-      value:
-        int:
-          value: "150"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_1
-  - fields:
-    - name: value
-      value:
-        int:
-          value: "100"
-    tagFamilies:
-    - name: default
-      tags:
-      - key: entity_id
-        value:
-          str:
-            value: entity_3
\ No newline at end of file
+lists:
+- items:
+  - name: entity_2
+    value:
+      int:
+        value: "10"
+  timestamp: "2022-10-13T14:14:00Z"
+- items:
+  - name: entity_2
+    value:
+      int:
+        value: "5"
+  timestamp: "2022-10-13T14:15:00Z"
+- items:
+  - name: entity_2
+    value:
+      int:
+        value: "3"
+  timestamp: "2022-10-13T14:16:00Z"
+- items:
+  - name: entity_3
+    value:
+      int:
+        value: "5"
+  timestamp: "2022-10-13T14:17:00Z"
+- items:
+  - name: entity_1
+    value:
+      int:
+        value: "4"
+  timestamp: "2022-10-13T14:18:00Z"
+- items:
+  - name: entity_1
+    value:
+      int:
+        value: "5"
+  timestamp: "2022-10-13T14:19:00Z"
\ No newline at end of file
diff --git a/test/cases/topn/topn.go b/test/cases/topn/topn.go
new file mode 100644
index 0000000..dd055d3
--- /dev/null
+++ b/test/cases/topn/topn.go
@@ -0,0 +1,46 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// Package topn_test contains integration test cases of the TopN
+package topn_test
+
+import (
+	"time"
+
+	g "github.com/onsi/ginkgo/v2"
+	gm "github.com/onsi/gomega"
+
+	"github.com/apache/skywalking-banyandb/pkg/test/helpers"
+	topNTestData "github.com/apache/skywalking-banyandb/test/cases/topn/data"
+)
+
+var (
+	// SharedContext is the parallel execution context
+	SharedContext helpers.SharedContext
+	verify        = func(args helpers.Args) {
+		gm.Eventually(func(innerGm gm.Gomega) {
+			topNTestData.VerifyFn(innerGm, SharedContext, args)
+		}).WithTimeout(10 * time.Second).WithPolling(2 * time.Second).Should(gm.Succeed())
+	}
+)
+
+var _ = g.DescribeTable("TopN Tests", verify,
+	g.Entry("desc", helpers.Args{Input: "desc", Duration: 1 * time.Hour, Offset: -5 * time.Minute}),
+	g.Entry("asc", helpers.Args{Input: "asc", Duration: 1 * time.Hour, Offset: -5 * time.Minute}),
+	g.Entry("max top3 order by desc", helpers.Args{Input: "aggr_desc", Duration: 1 * time.Hour, Offset: -5 * time.Minute}),
+	g.Entry("max top3 with condition order by desc", helpers.Args{Input: "condition_aggr_desc", Duration: 1 * time.Hour, Offset: -5 * time.Minute}),
+)
diff --git a/test/integration/cold_query/query_suite_test.go b/test/integration/cold_query/query_suite_test.go
index 9b35b6c..0f8f2fd 100644
--- a/test/integration/cold_query/query_suite_test.go
+++ b/test/integration/cold_query/query_suite_test.go
@@ -30,10 +30,11 @@ import (
 	"github.com/apache/skywalking-banyandb/pkg/test/helpers"
 	"github.com/apache/skywalking-banyandb/pkg/test/setup"
 	"github.com/apache/skywalking-banyandb/pkg/timestamp"
-	cases_measure "github.com/apache/skywalking-banyandb/test/cases/measure"
-	cases_measure_data "github.com/apache/skywalking-banyandb/test/cases/measure/data"
-	cases_stream "github.com/apache/skywalking-banyandb/test/cases/stream"
-	cases_stream_data "github.com/apache/skywalking-banyandb/test/cases/stream/data"
+	casesMeasure "github.com/apache/skywalking-banyandb/test/cases/measure"
+	casesMeasureData "github.com/apache/skywalking-banyandb/test/cases/measure/data"
+	casesStream "github.com/apache/skywalking-banyandb/test/cases/stream"
+	casesStreamData "github.com/apache/skywalking-banyandb/test/cases/stream/data"
+	casesTopn "github.com/apache/skywalking-banyandb/test/cases/topn"
 )
 
 func TestIntegrationColdQuery(t *testing.T) {
@@ -61,10 +62,11 @@ var _ = SynchronizedBeforeSuite(func() []byte {
 	Expect(err).NotTo(HaveOccurred())
 	now = timestamp.NowMilli().Add(-time.Hour * 24)
 	interval := 500 * time.Millisecond
-	cases_stream_data.Write(conn, "data.json", now, interval)
-	cases_measure_data.Write(conn, "service_traffic", "sw_metric", "service_traffic_data.json", now, interval)
-	cases_measure_data.Write(conn, "service_instance_traffic", "sw_metric", "service_instance_traffic_data.json", now, interval)
-	cases_measure_data.Write(conn, "service_cpm_minute", "sw_metric", "service_cpm_minute_data.json", now, interval)
+	casesStreamData.Write(conn, "data.json", now, interval)
+	casesMeasureData.Write(conn, "service_traffic", "sw_metric", "service_traffic_data.json", now, interval)
+	casesMeasureData.Write(conn, "service_instance_traffic", "sw_metric", "service_instance_traffic_data.json", now, interval)
+	casesMeasureData.Write(conn, "service_cpm_minute", "sw_metric", "service_cpm_minute_data.json", now, interval)
+	casesMeasureData.Write(conn, "service_cpm_minute", "sw_metric", "service_cpm_minute_data1.json", now.Add(time.Second), interval)
 	Expect(conn.Close()).To(Succeed())
 	return []byte(addr)
 }, func(address []byte) {
@@ -74,11 +76,15 @@ var _ = SynchronizedBeforeSuite(func() []byte {
 		grpclib.WithTransportCredentials(insecure.NewCredentials()),
 		grpclib.WithBlock(),
 	)
-	cases_stream.SharedContext = helpers.SharedContext{
+	casesStream.SharedContext = helpers.SharedContext{
 		Connection: connection,
 		BaseTime:   now,
 	}
-	cases_measure.SharedContext = helpers.SharedContext{
+	casesMeasure.SharedContext = helpers.SharedContext{
+		Connection: connection,
+		BaseTime:   now,
+	}
+	casesTopn.SharedContext = helpers.SharedContext{
 		Connection: connection,
 		BaseTime:   now,
 	}
diff --git a/test/integration/query/query_suite_test.go b/test/integration/query/query_suite_test.go
index b1a3092..4b659b0 100644
--- a/test/integration/query/query_suite_test.go
+++ b/test/integration/query/query_suite_test.go
@@ -34,6 +34,7 @@ import (
 	cases_measure_data "github.com/apache/skywalking-banyandb/test/cases/measure/data"
 	cases_stream "github.com/apache/skywalking-banyandb/test/cases/stream"
 	cases_stream_data "github.com/apache/skywalking-banyandb/test/cases/stream/data"
+	cases_topn "github.com/apache/skywalking-banyandb/test/cases/topn"
 )
 
 func TestIntegrationQuery(t *testing.T) {
@@ -61,10 +62,13 @@ var _ = SynchronizedBeforeSuite(func() []byte {
 	Expect(err).NotTo(HaveOccurred())
 	now = timestamp.NowMilli()
 	interval := 500 * time.Millisecond
+	// stream
 	cases_stream_data.Write(conn, "data.json", now, interval)
+	// measure
 	cases_measure_data.Write(conn, "service_traffic", "sw_metric", "service_traffic_data.json", now, interval)
 	cases_measure_data.Write(conn, "service_instance_traffic", "sw_metric", "service_instance_traffic_data.json", now, interval)
 	cases_measure_data.Write(conn, "service_cpm_minute", "sw_metric", "service_cpm_minute_data.json", now, interval)
+	cases_measure_data.Write(conn, "service_cpm_minute", "sw_metric", "service_cpm_minute_data1.json", now.Add(time.Second), interval)
 	Expect(conn.Close()).To(Succeed())
 	return []byte(addr)
 }, func(address []byte) {
@@ -82,6 +86,10 @@ var _ = SynchronizedBeforeSuite(func() []byte {
 		Connection: connection,
 		BaseTime:   now,
 	}
+	cases_topn.SharedContext = helpers.SharedContext{
+		Connection: connection,
+		BaseTime:   now,
+	}
 	Expect(err).NotTo(HaveOccurred())
 })