You are viewing a plain text version of this content. The canonical link for it is here.
Posted to notifications@skywalking.apache.org by ha...@apache.org on 2022/11/29 14:51:53 UTC

[skywalking-banyandb] branch debug created (now 95833d5)

This is an automated email from the ASF dual-hosted git repository.

hanahmily pushed a change to branch debug
in repository https://gitbox.apache.org/repos/asf/skywalking-banyandb.git


      at 95833d5  Add more debug infos

This branch includes the following new commits:

     new 95833d5  Add more debug infos

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



[skywalking-banyandb] 01/01: Add more debug infos

Posted by ha...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

hanahmily pushed a commit to branch debug
in repository https://gitbox.apache.org/repos/asf/skywalking-banyandb.git

commit 95833d57d38d9364de90e90b0078c97eb2feba0a
Author: Gao Hongtao <ha...@gmail.com>
AuthorDate: Tue Nov 22 01:26:35 2022 +0000

    Add more debug infos
    
    Refactor logs
    
    Correct block start time
    
    Update block and segment prefix templates
    
    Increase the length of segment/block id
    
    Correct the overlapping of blocks/segments
    
    Add series literal to debug info
    
    Compute log output only if the debug log is enabled
    
    Bump up OAP
    
    Signed-off-by: Gao Hongtao <ha...@gmail.com>
---
 .gitignore                                         |   3 +
 api/proto/banyandb/measure/v1/write.proto          |   3 +-
 api/proto/banyandb/stream/v1/write.proto           |   3 +-
 banyand/internal/cmd/standalone.go                 |   4 +-
 banyand/k8s.yml                                    | 165 +++++++++++++
 banyand/kv/badger.go                               |   4 +-
 banyand/liaison/grpc/discovery.go                  |  27 ++-
 banyand/liaison/grpc/measure.go                    |  33 ++-
 banyand/liaison/grpc/stream.go                     |  37 ++-
 banyand/measure/measure_query.go                   |   4 +-
 banyand/measure/measure_topn.go                    |  58 ++---
 banyand/measure/measure_write.go                   |  30 ++-
 banyand/measure/metadata.go                        |   2 +-
 banyand/metadata/schema/etcd.go                    |   3 +-
 banyand/okteto.yml                                 |  34 +++
 banyand/query/processor.go                         |  30 ++-
 banyand/query/processor_topn.go                    |  23 +-
 banyand/stream/metadata.go                         |   2 +-
 banyand/stream/stream_write.go                     |  29 ++-
 banyand/tsdb/block.go                              |  10 +-
 banyand/tsdb/block_ctrl.go                         | 120 ++++-----
 banyand/tsdb/bucket/bucket.go                      |   4 +-
 banyand/tsdb/bucket/queue.go                       |  24 +-
 banyand/tsdb/index/writer.go                       |   2 +-
 banyand/tsdb/indexdb.go                            |   2 +-
 banyand/tsdb/scope.go                              |  14 +-
 banyand/tsdb/segment.go                            |   8 +-
 banyand/tsdb/segment_ctrl.go                       |  86 ++++---
 banyand/tsdb/series.go                             |  80 +++---
 banyand/tsdb/series_seek.go                        |  43 ++--
 banyand/tsdb/series_seek_sort.go                   |  20 +-
 banyand/tsdb/series_write.go                       |   2 +-
 banyand/tsdb/seriesdb.go                           | 268 ++++++++++++++++-----
 banyand/tsdb/seriesdb_test.go                      |  57 ++++-
 banyand/tsdb/shard.go                              |  34 +++
 banyand/tsdb/shard_test.go                         | 158 ++++++------
 banyand/tsdb/tsdb.go                               |  45 ++--
 banyand/tsdb/tsdb_test.go                          |   4 +-
 dist/LICENSE                                       |  93 +++----
 .../license-github.com-cenkalti-backoff-v4.txt     |  20 ++
 ...ric.txt => license-github.com-go-logr-logr.txt} |   4 +-
 ...rib.txt => license-github.com-go-logr-stdr.txt} |   0
 ...xt => license-github.com-golang-jwt-jwt-v4.txt} |   1 +
 ...icense-github.com-inconshreveable-mousetrap.txt | 208 +++++++++++++++-
 ...etry.io-otel-exporters-otlp-internal-retry.txt} |   0
 ...tel-exporters-otlp-otlptrace-otlptracegrpc.txt} |   0
 ...telemetry.io-otel-exporters-otlp-otlptrace.txt} |   0
 ...license-go.opentelemetry.io-otel-sdk-metric.txt | 201 ----------------
 docs/api-reference.md                              |   2 +
 go.mod                                             |  93 +++----
 go.sum                                             | 218 ++++++++++-------
 pkg/grpchelper/client.go                           |   8 +-
 pkg/index/index.go                                 |   4 +
 pkg/index/inverted/inverted.go                     |  77 +++---
 pkg/index/inverted/inverted_test.go                |  32 ++-
 pkg/index/iterator.go                              |   9 +-
 pkg/index/lsm/search.go                            |   7 +-
 pkg/logger/logger.go                               |  49 +++-
 pkg/logger/setting.go                              |  31 ++-
 pkg/partition/entity.go                            |  33 ++-
 pkg/pb/v1/metadata.go                              |  19 --
 .../http_health.go => pb/v1/tsdb/interval.go}      |  43 ++--
 pkg/pb/v1/write.go                                 |  32 +--
 pkg/query/logical/common.go                        |  10 +-
 pkg/query/logical/measure/measure_plan.go          |   2 +-
 .../logical/measure/measure_plan_aggregation.go    |   3 +-
 pkg/query/logical/measure/measure_plan_groupby.go  |   3 +-
 .../measure/measure_plan_indexscan_local.go        |  16 +-
 pkg/query/logical/measure/measure_plan_top.go      |   4 +-
 .../logical/stream/stream_plan_indexscan_global.go |   2 +-
 .../logical/stream/stream_plan_indexscan_local.go  |  11 +-
 pkg/query/logical/stream/stream_plan_tag_filter.go |   2 +
 pkg/schema/metadata.go                             |   4 +-
 pkg/test/helpers/grpc_health.go                    |   4 +-
 pkg/test/helpers/http_health.go                    |   4 +-
 .../index_rule_bindings/service_traffic.json       |   3 +-
 pkg/test/measure/testdata/index_rules/layer.json   |  13 +
 .../testdata/index_rules/searchable_name.json      |   2 +-
 pkg/timestamp/scheduler.go                         |   8 +-
 .../cases/measure/data/input/tag_filter_int.yaml   |  52 ++--
 .../data/testdata/service_traffic_data.json        |   2 +-
 .../cases/measure/data/want/tag_filter_int.yaml    |  61 ++---
 test/cases/measure/measure.go                      |   1 +
 test/cases/stream/stream.go                        |   2 +-
 test/e2e-v2/script/env                             |   6 +-
 85 files changed, 1784 insertions(+), 1090 deletions(-)

diff --git a/.gitignore b/.gitignore
index 14ba169..eaad4b5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -53,3 +53,6 @@ target
 
 # snky cache
 .dccache
+
+# okteto
+.stignore
diff --git a/api/proto/banyandb/measure/v1/write.proto b/api/proto/banyandb/measure/v1/write.proto
index 6562a97..49a7a00 100644
--- a/api/proto/banyandb/measure/v1/write.proto
+++ b/api/proto/banyandb/measure/v1/write.proto
@@ -51,5 +51,6 @@ message WriteResponse {}
 message InternalWriteRequest {
   uint32 shard_id = 1;
   bytes series_hash = 2;
-  WriteRequest request = 3;
+  repeated model.v1.TagValue entity_values = 3;
+  WriteRequest request = 4;
 }
diff --git a/api/proto/banyandb/stream/v1/write.proto b/api/proto/banyandb/stream/v1/write.proto
index 396323b..e177eb1 100644
--- a/api/proto/banyandb/stream/v1/write.proto
+++ b/api/proto/banyandb/stream/v1/write.proto
@@ -50,5 +50,6 @@ message WriteResponse {}
 message InternalWriteRequest {
   uint32 shard_id = 1;
   bytes series_hash = 2;
-  WriteRequest request = 3;
+  repeated model.v1.TagValue entity_values = 3;
+  WriteRequest request = 4;
 }
diff --git a/banyand/internal/cmd/standalone.go b/banyand/internal/cmd/standalone.go
index 440c8c9..020fb51 100644
--- a/banyand/internal/cmd/standalone.go
+++ b/banyand/internal/cmd/standalone.go
@@ -115,7 +115,9 @@ func newStandaloneCmd() *cobra.Command {
 	}
 
 	standaloneCmd.Flags().StringVarP(&logging.Env, "logging.env", "", "prod", "the logging")
-	standaloneCmd.Flags().StringVarP(&logging.Level, "logging.level", "", "info", "the level of logging")
+	standaloneCmd.Flags().StringVarP(&logging.Level, "logging.level", "", "info", "the root level of logging")
+	standaloneCmd.Flags().StringArrayVarP(&logging.Modules, "logging.modules", "", nil, "the specific module")
+	standaloneCmd.Flags().StringArrayVarP(&logging.Levels, "logging.levels", "", nil, "the level logging of logging")
 	standaloneCmd.Flags().AddFlagSet(g.RegisterFlags().FlagSet)
 	return standaloneCmd
 }
diff --git a/banyand/k8s.yml b/banyand/k8s.yml
new file mode 100644
index 0000000..0d78bb6
--- /dev/null
+++ b/banyand/k8s.yml
@@ -0,0 +1,165 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: banyand-metadata
+spec:
+  resources:
+    requests:
+      storage: 5Gi
+  volumeMode: Filesystem
+  accessModes:
+    - ReadWriteOnce
+
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: banyand-measure
+spec:
+  resources:
+    requests:
+      storage: 100Gi
+  volumeMode: Filesystem
+  accessModes:
+    - ReadWriteOnce
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: banyand-stream
+spec:
+  resources:
+    requests:
+      storage: 100Gi
+  volumeMode: Filesystem
+  accessModes:
+    - ReadWriteOnce
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: banyand
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: banyand
+  strategy:
+    type: Recreate
+  template:
+    metadata:
+      labels:
+        app: banyand
+    spec:
+      initContainers:
+      - image: busybox
+        command: ["/bin/sh"]
+        args: ["-c", "rm -rf /tmp/measure/* && rm -rf /tmp/stream/*"]
+        name: cleanup
+        volumeMounts:
+        - name:  measure
+          mountPath: /tmp/measure
+        - name:  stream
+          mountPath: /tmp/stream
+      containers:
+      - name: banyand
+        image: apache/skywalking-banyandb:v0.0.0-dev
+        args:
+        - "standalone"
+        - "--logging.level=warn"
+        - "--logging.modules=measure.measure-default.service_cpm_minute"
+        - "--logging.levels=debug"
+        - "--logging.modules=query.measure.measure-default.service_cpm_minute"
+        - "--logging.levels=debug"
+        imagePullPolicy: Always
+        livenessProbe:
+          failureThreshold: 5
+          httpGet:
+            path: /api/healthz
+            port: 17913
+            scheme: HTTP
+          initialDelaySeconds: 20
+          periodSeconds: 10
+          successThreshold: 1
+          timeoutSeconds: 10
+        resources:
+          limits:
+            memory: "29G"
+            cpu: "7"
+        ports:
+        - containerPort: 17912
+        - containerPort: 17913
+        - containerPort: 2121
+        - containerPort: 6060
+        volumeMounts:
+        - name:  metadata
+          mountPath: /tmp/metadata
+        - name:  measure
+          mountPath: /tmp/measure
+        - name:  stream
+          mountPath: /tmp/stream
+      - image: busybox
+        command: ["/bin/sh"]
+        args: ["-c", "while true; do ls /tmp; sleep 300s;done"]
+        name: debug-entry 
+        # resources:
+        #   limits:
+        #     memory: "10"
+        #     cpu: "100mi"
+        volumeMounts:
+        - name:  metadata
+          mountPath: /tmp/metadata
+        - name:  measure
+          mountPath: /tmp/measure
+        - name:  stream
+          mountPath: /tmp/stream
+      volumes:
+        - name: metadata
+          persistentVolumeClaim:
+            claimName: banyand-metadata
+        - name: measure
+          persistentVolumeClaim:
+            claimName: banyand-measure
+        - name: stream
+          persistentVolumeClaim:
+            claimName: banyand-stream
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: banyand
+spec:
+  selector:
+    app: banyand
+  ports:
+  - port: 17912
+    targetPort: 17912
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: api
+spec:
+  selector:
+    app: banyand
+  ports:
+  - port: 17913
+    targetPort: 17913
+  type: LoadBalancer
diff --git a/banyand/kv/badger.go b/banyand/kv/badger.go
index 9ef467b..9508a16 100644
--- a/banyand/kv/badger.go
+++ b/banyand/kv/badger.go
@@ -164,10 +164,10 @@ func (b *badgerDB) Scan(prefix, seekKey []byte, opt ScanOpts, f ScanFunc) error
 	for it.Seek(seekKey); it.Valid(); it.Next() {
 		k := y.ParseKey(it.Key())
 		if len(k) < len(seekKey) {
-			break
+			continue
 		}
 		if !bytes.Equal(prefix, k[0:len(prefix)]) {
-			break
+			continue
 		}
 		err := f(b.shardID, k, func() ([]byte, error) {
 			return y.Copy(it.Value().Value), nil
diff --git a/banyand/liaison/grpc/discovery.go b/banyand/liaison/grpc/discovery.go
index 98cba84..4c20e0e 100644
--- a/banyand/liaison/grpc/discovery.go
+++ b/banyand/liaison/grpc/discovery.go
@@ -56,16 +56,16 @@ func (ds *discoveryService) SetLogger(log *logger.Logger) {
 	ds.entityRepo.log = log
 }
 
-func (ds *discoveryService) navigate(metadata *commonv1.Metadata, tagFamilies []*modelv1.TagFamilyForWrite) (tsdb.Entity, common.ShardID, error) {
+func (ds *discoveryService) navigate(metadata *commonv1.Metadata, tagFamilies []*modelv1.TagFamilyForWrite) (tsdb.Entity, tsdb.EntityValues, common.ShardID, error) {
 	shardNum, existed := ds.shardRepo.shardNum(getID(&commonv1.Metadata{
 		Name: metadata.Group,
 	}))
 	if !existed {
-		return nil, common.ShardID(0), errors.Wrapf(ErrNotExist, "finding the shard num by: %v", metadata)
+		return nil, nil, common.ShardID(0), errors.Wrapf(ErrNotExist, "finding the shard num by: %v", metadata)
 	}
 	locator, existed := ds.entityRepo.getLocator(getID(metadata))
 	if !existed {
-		return nil, common.ShardID(0), errors.Wrapf(ErrNotExist, "finding the locator by: %v", metadata)
+		return nil, nil, common.ShardID(0), errors.Wrapf(ErrNotExist, "finding the locator by: %v", metadata)
 	}
 	return locator.Locate(metadata.Name, tagFamilies, shardNum)
 }
@@ -88,10 +88,13 @@ func (s *shardRepo) Rev(message bus.Message) (resp bus.Message) {
 		return
 	}
 	s.setShardNum(e)
-	s.log.Debug().
-		Str("action", databasev1.Action_name[int32(e.Action)]).
-		Uint64("shardID", e.Shard.Id).
-		Msg("received a shard e")
+
+	if le := s.log.Debug(); le.Enabled() {
+		le.
+			Str("action", databasev1.Action_name[int32(e.Action)]).
+			Uint64("shardID", e.Shard.Id).
+			Msg("received a shard e")
+	}
 	return
 }
 
@@ -136,10 +139,12 @@ func (s *entityRepo) Rev(message bus.Message) (resp bus.Message) {
 		return
 	}
 	id := getID(e.GetSubject())
-	s.log.Debug().
-		Str("action", databasev1.Action_name[int32(e.Action)]).
-		Interface("subject", id).
-		Msg("received an entity event")
+	if le := s.log.Debug(); le.Enabled() {
+		le.
+			Str("action", databasev1.Action_name[int32(e.Action)]).
+			Interface("subject", id).
+			Msg("received an entity event")
+	}
 	s.RWMutex.Lock()
 	defer s.RWMutex.Unlock()
 	switch e.Action {
diff --git a/banyand/liaison/grpc/measure.go b/banyand/liaison/grpc/measure.go
index 8d23d87..404f11f 100644
--- a/banyand/liaison/grpc/measure.go
+++ b/banyand/liaison/grpc/measure.go
@@ -58,28 +58,32 @@ func (ms *measureService) Write(measure measurev1.MeasureService_WriteServer) er
 			return err
 		}
 		if errTime := timestamp.CheckPb(writeRequest.DataPoint.Timestamp); errTime != nil {
-			sampled.Error().Err(errTime).RawJSON("written", logger.Proto(writeRequest)).Msg("the data point time is invalid")
+			sampled.Error().Err(errTime).Stringer("written", writeRequest).Msg("the data point time is invalid")
 			if errResp := reply(); errResp != nil {
 				return errResp
 			}
 			continue
 		}
-		entity, shardID, err := ms.navigate(writeRequest.GetMetadata(), writeRequest.GetDataPoint().GetTagFamilies())
+		entity, tagValues, shardID, err := ms.navigate(writeRequest.GetMetadata(), writeRequest.GetDataPoint().GetTagFamilies())
 		if err != nil {
-			sampled.Error().Err(err).Msg("failed to navigate to the write target")
+			sampled.Error().Err(err).RawJSON("written", logger.Proto(writeRequest)).Msg("failed to navigate to the write target")
 			if errResp := reply(); errResp != nil {
 				return errResp
 			}
 			continue
 		}
-		message := bus.NewMessage(bus.MessageID(time.Now().UnixNano()), &measurev1.InternalWriteRequest{
+		iwr := &measurev1.InternalWriteRequest{
 			Request:    writeRequest,
 			ShardId:    uint32(shardID),
 			SeriesHash: tsdb.HashEntity(entity),
-		})
+		}
+		if ms.log.Debug().Enabled() {
+			iwr.EntityValues = tagValues.Encode()
+		}
+		message := bus.NewMessage(bus.MessageID(time.Now().UnixNano()), iwr)
 		_, errWritePub := ms.pipeline.Publish(data.TopicMeasureWrite, message)
 		if errWritePub != nil {
-			sampled.Error().Err(errWritePub).Msg("failed to send a message")
+			sampled.Error().Err(errWritePub).RawJSON("written", logger.Proto(writeRequest)).Msg("failed to send a message")
 			if errResp := reply(); errResp != nil {
 				return errResp
 			}
@@ -91,17 +95,22 @@ func (ms *measureService) Write(measure measurev1.MeasureService_WriteServer) er
 	}
 }
 
-func (ms *measureService) Query(_ context.Context, entityCriteria *measurev1.QueryRequest) (*measurev1.QueryResponse, error) {
-	if err := timestamp.CheckTimeRange(entityCriteria.GetTimeRange()); err != nil {
-		return nil, status.Errorf(codes.InvalidArgument, "%v is invalid :%s", entityCriteria.GetTimeRange(), err)
+var emptyMeasureQueryResponse = &measurev1.QueryResponse{DataPoints: make([]*measurev1.DataPoint, 0)}
+
+func (ms *measureService) Query(_ context.Context, req *measurev1.QueryRequest) (*measurev1.QueryResponse, error) {
+	if err := timestamp.CheckTimeRange(req.GetTimeRange()); err != nil {
+		return nil, status.Errorf(codes.InvalidArgument, "%v is invalid :%s", req.GetTimeRange(), err)
 	}
-	message := bus.NewMessage(bus.MessageID(time.Now().UnixNano()), entityCriteria)
+	message := bus.NewMessage(bus.MessageID(time.Now().UnixNano()), req)
 	feat, errQuery := ms.pipeline.Publish(data.TopicMeasureQuery, message)
 	if errQuery != nil {
 		return nil, errQuery
 	}
 	msg, errFeat := feat.Get()
 	if errFeat != nil {
+		if errFeat == io.EOF {
+			return emptyMeasureQueryResponse, nil
+		}
 		return nil, errFeat
 	}
 	data := msg.Data()
@@ -111,7 +120,7 @@ func (ms *measureService) Query(_ context.Context, entityCriteria *measurev1.Que
 	case common.Error:
 		return nil, errors.WithMessage(ErrQueryMsg, d.Msg())
 	}
-	return nil, ErrQueryMsg
+	return nil, nil
 }
 
 func (ms *measureService) TopN(_ context.Context, topNRequest *measurev1.TopNRequest) (*measurev1.TopNResponse, error) {
@@ -135,5 +144,5 @@ func (ms *measureService) TopN(_ context.Context, topNRequest *measurev1.TopNReq
 	case common.Error:
 		return nil, errors.WithMessage(ErrQueryMsg, d.Msg())
 	}
-	return nil, ErrQueryMsg
+	return nil, nil
 }
diff --git a/banyand/liaison/grpc/stream.go b/banyand/liaison/grpc/stream.go
index 5a3ab2d..bf5ef5a 100644
--- a/banyand/liaison/grpc/stream.go
+++ b/banyand/liaison/grpc/stream.go
@@ -32,6 +32,7 @@ import (
 	streamv1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/stream/v1"
 	"github.com/apache/skywalking-banyandb/banyand/tsdb"
 	"github.com/apache/skywalking-banyandb/pkg/bus"
+	"github.com/apache/skywalking-banyandb/pkg/logger"
 	"github.com/apache/skywalking-banyandb/pkg/timestamp"
 )
 
@@ -63,22 +64,31 @@ func (s *streamService) Write(stream streamv1.StreamService_WriteServer) error {
 			}
 			continue
 		}
-		entity, shardID, err := s.navigate(writeEntity.GetMetadata(), writeEntity.GetElement().GetTagFamilies())
+		entity, tagValues, shardID, err := s.navigate(writeEntity.GetMetadata(), writeEntity.GetElement().GetTagFamilies())
 		if err != nil {
-			sampled.Error().Err(err).Msg("failed to navigate to the write target")
+			sampled.Error().Err(err).RawJSON("written", logger.Proto(writeEntity)).Msg("failed to navigate to the write target")
 			if errResp := reply(); errResp != nil {
 				return errResp
 			}
 			continue
 		}
-		message := bus.NewMessage(bus.MessageID(time.Now().UnixNano()), &streamv1.InternalWriteRequest{
+		iwr := &streamv1.InternalWriteRequest{
 			Request:    writeEntity,
 			ShardId:    uint32(shardID),
 			SeriesHash: tsdb.HashEntity(entity),
+		}
+		if s.log.Debug().Enabled() {
+			iwr.EntityValues = tagValues.Encode()
+		}
+		message := bus.NewMessage(bus.MessageID(time.Now().UnixNano()), &streamv1.InternalWriteRequest{
+			Request:      writeEntity,
+			ShardId:      uint32(shardID),
+			SeriesHash:   tsdb.HashEntity(entity),
+			EntityValues: tagValues.Encode(),
 		})
 		_, errWritePub := s.pipeline.Publish(data.TopicStreamWrite, message)
 		if errWritePub != nil {
-			sampled.Error().Err(errWritePub).Msg("failed to send a message")
+			sampled.Error().Err(errWritePub).RawJSON("written", logger.Proto(writeEntity)).Msg("failed to send a message")
 			if errResp := reply(); errResp != nil {
 				return errResp
 			}
@@ -90,17 +100,22 @@ func (s *streamService) Write(stream streamv1.StreamService_WriteServer) error {
 	}
 }
 
-func (s *streamService) Query(_ context.Context, entityCriteria *streamv1.QueryRequest) (*streamv1.QueryResponse, error) {
-	timeRange := entityCriteria.GetTimeRange()
+var emptyStreamQueryResponse = &streamv1.QueryResponse{Elements: make([]*streamv1.Element, 0)}
+
+func (s *streamService) Query(_ context.Context, req *streamv1.QueryRequest) (*streamv1.QueryResponse, error) {
+	timeRange := req.GetTimeRange()
 	if timeRange == nil {
-		entityCriteria.TimeRange = timestamp.DefaultTimeRange
+		req.TimeRange = timestamp.DefaultTimeRange
 	}
-	if err := timestamp.CheckTimeRange(entityCriteria.GetTimeRange()); err != nil {
-		return nil, status.Errorf(codes.InvalidArgument, "%v is invalid :%s", entityCriteria.GetTimeRange(), err)
+	if err := timestamp.CheckTimeRange(req.GetTimeRange()); err != nil {
+		return nil, status.Errorf(codes.InvalidArgument, "%v is invalid :%s", req.GetTimeRange(), err)
 	}
-	message := bus.NewMessage(bus.MessageID(time.Now().UnixNano()), entityCriteria)
+	message := bus.NewMessage(bus.MessageID(time.Now().UnixNano()), req)
 	feat, errQuery := s.pipeline.Publish(data.TopicStreamQuery, message)
 	if errQuery != nil {
+		if errQuery == io.EOF {
+			return emptyStreamQueryResponse, nil
+		}
 		return nil, errQuery
 	}
 	msg, errFeat := feat.Get()
@@ -114,5 +129,5 @@ func (s *streamService) Query(_ context.Context, entityCriteria *streamv1.QueryR
 	case common.Error:
 		return nil, errors.WithMessage(ErrQueryMsg, d.Msg())
 	}
-	return nil, ErrQueryMsg
+	return nil, nil
 }
diff --git a/banyand/measure/measure_query.go b/banyand/measure/measure_query.go
index 2b09c90..1220e73 100644
--- a/banyand/measure/measure_query.go
+++ b/banyand/measure/measure_query.go
@@ -116,6 +116,7 @@ func (s *measure) ParseTagFamily(family string, item tsdb.Item) (*modelv1.TagFam
 	fid := familyIdentity(family, pbv1.TagFlag)
 	familyRawBytes, err := item.Family(fid)
 	if err != nil {
+		item.PrintContext(s.l.Named("tag-family"), fid, 10)
 		return nil, errors.Wrapf(err, "measure %s.%s parse family %s", s.name, s.group, family)
 	}
 	if len(familyRawBytes) < 1 {
@@ -161,7 +162,8 @@ func (s *measure) ParseField(name string, item tsdb.Item) (*measurev1.DataPoint_
 	fid := familyIdentity(name, pbv1.EncoderFieldFlag(fieldSpec, s.interval))
 	bytes, err := item.Family(fid)
 	if err != nil {
-		return nil, err
+		item.PrintContext(s.l.Named("field"), fid, 10)
+		return nil, errors.Wrapf(err, "measure %s.%s parse field %s", s.name, s.group, name)
 	}
 	if len(bytes) < 1 {
 		item.PrintContext(s.l.Named("field"), fid, 10)
diff --git a/banyand/measure/measure_topn.go b/banyand/measure/measure_topn.go
index 6a969f1..2454d72 100644
--- a/banyand/measure/measure_topn.go
+++ b/banyand/measure/measure_topn.go
@@ -37,7 +37,6 @@ import (
 	measurev1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/measure/v1"
 	modelv1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/model/v1"
 	"github.com/apache/skywalking-banyandb/banyand/tsdb"
-	"github.com/apache/skywalking-banyandb/pkg/convert"
 	"github.com/apache/skywalking-banyandb/pkg/flow"
 	"github.com/apache/skywalking-banyandb/pkg/flow/streaming"
 	"github.com/apache/skywalking-banyandb/pkg/flow/streaming/sources"
@@ -134,10 +133,11 @@ func (t *topNStreamingProcessor) writeStreamRecord(record flow.StreamRecord) err
 	eventTime := t.downSampleTimeBucket(record.TimestampMillis())
 	timeBucket := eventTime.Format(timeBucketFormat)
 	var err error
-	t.l.Debug().
-		Str("TopN", t.topNSchema.GetMetadata().GetName()).
-		Int("rankNums", len(tuples)).
-		Msg("Write a tuple")
+	if e := t.l.Debug(); e.Enabled() {
+		e.Str("TopN", t.topNSchema.GetMetadata().GetName()).
+			Int("rankNums", len(tuples)).
+			Msg("Write a tuple")
+	}
 	for rankNum, tuple := range tuples {
 		fieldValue := tuple.V1.(int64)
 		data := tuple.V2.(flow.StreamRecord).Data().(flow.Data)
@@ -154,7 +154,7 @@ func (t *topNStreamingProcessor) writeData(eventTime time.Time, timeBucket strin
 			return errors.New("fail to extract tag values from topN result")
 		}
 	}
-	entity, shardID, err := t.locate(tagValues, rankNum)
+	entity, entityValues, shardID, err := t.locate(tagValues, rankNum)
 	if err != nil {
 		return err
 	}
@@ -162,7 +162,7 @@ func (t *topNStreamingProcessor) writeData(eventTime time.Time, timeBucket strin
 	if err != nil {
 		return err
 	}
-	series, err := shard.Series().GetByHashKey(tsdb.HashEntity(entity))
+	series, err := shard.Series().Get(tsdb.HashEntity(entity), entityValues)
 	if err != nil {
 		return err
 	}
@@ -221,13 +221,15 @@ func (t *topNStreamingProcessor) writeData(eventTime time.Time, timeBucket strin
 			return nil, errWrite
 		}
 		_, errWrite = writer.Write()
-		t.l.Debug().
-			Time("ts", eventTime).
-			Int("ts_nano", eventTime.Nanosecond()).
-			Uint64("series_id", uint64(series.ID())).
-			Uint64("item_id", uint64(writer.ItemID().ID)).
-			Int("shard_id", int(shardID)).
-			Msg("write measure")
+		if e := t.l.Debug(); e.Enabled() {
+			e.Time("ts", eventTime).
+				Int("ts_nano", eventTime.Nanosecond()).
+				Uint64("series_id", uint64(series.ID())).
+				Stringer("series", series).
+				Uint64("item_id", uint64(writer.ItemID().ID)).
+				Int("shard_id", int(shardID)).
+				Msg("write measure")
+		}
 		return writer, errWrite
 	}
 	_, err = writeFn()
@@ -242,33 +244,33 @@ func (t *topNStreamingProcessor) downSampleTimeBucket(eventTimeMillis int64) tim
 	return time.UnixMilli(eventTimeMillis - eventTimeMillis%t.interval.Milliseconds())
 }
 
-func (t *topNStreamingProcessor) locate(tagValues []*modelv1.TagValue, rankNum int) (tsdb.Entity, common.ShardID, error) {
+func (t *topNStreamingProcessor) locate(tagValues []*modelv1.TagValue, rankNum int) (tsdb.Entity, tsdb.EntityValues, common.ShardID, error) {
 	if len(t.topNSchema.GetGroupByTagNames()) != len(tagValues) {
-		return nil, 0, errors.New("no enough tag values for the entity")
+		return nil, nil, 0, errors.New("no enough tag values for the entity")
 	}
 	// entity prefix
 	// 1) source measure Name + topN aggregation Name
 	// 2) sort direction
 	// 3) rank number
-	entity := make(tsdb.Entity, 1+1+1+len(t.topNSchema.GetGroupByTagNames()))
+	entity := make(tsdb.EntityValues, 1+1+1+len(t.topNSchema.GetGroupByTagNames()))
 	// entity prefix
-	entity[0] = []byte(formatMeasureCompanionPrefix(t.topNSchema.GetSourceMeasure().GetName(),
+	entity[0] = tsdb.StrValue(formatMeasureCompanionPrefix(t.topNSchema.GetSourceMeasure().GetName(),
 		t.topNSchema.GetMetadata().GetName()))
-	entity[1] = convert.Int64ToBytes(int64(t.sortDirection.Number()))
-	entity[2] = convert.Int64ToBytes(int64(rankNum))
+	entity[1] = tsdb.Int64Value(int64(t.sortDirection.Number()))
+	entity[2] = tsdb.Int64Value(int64(rankNum))
 	// measureID as sharding key
 	for idx, tagVal := range tagValues {
-		var innerErr error
-		entity[idx+3], innerErr = pbv1.MarshalIndexFieldValue(tagVal)
-		if innerErr != nil {
-			return nil, 0, innerErr
-		}
+		entity[idx+3] = tagVal
+	}
+	e, err := entity.ToEntity()
+	if err != nil {
+		return nil, nil, 0, err
 	}
-	id, err := partition.ShardID(entity.Marshal(), t.shardNum)
+	id, err := partition.ShardID(e.Marshal(), t.shardNum)
 	if err != nil {
-		return nil, 0, err
+		return nil, nil, 0, err
 	}
-	return entity, common.ShardID(id), nil
+	return e, entity, common.ShardID(id), nil
 }
 
 func (t *topNStreamingProcessor) start() *topNStreamingProcessor {
diff --git a/banyand/measure/measure_write.go b/banyand/measure/measure_write.go
index 0e4b4fe..0c4d100 100644
--- a/banyand/measure/measure_write.go
+++ b/banyand/measure/measure_write.go
@@ -25,6 +25,7 @@ import (
 	"github.com/pkg/errors"
 
 	"github.com/apache/skywalking-banyandb/api/common"
+	commonv1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/common/v1"
 	measurev1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/measure/v1"
 	modelv1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/model/v1"
 	"github.com/apache/skywalking-banyandb/banyand/tsdb"
@@ -38,7 +39,7 @@ import (
 
 var ErrMalformedElement = errors.New("element is malformed")
 
-func (s *measure) write(shardID common.ShardID, seriesHashKey []byte, value *measurev1.DataPointValue) error {
+func (s *measure) write(md *commonv1.Metadata, shardID common.ShardID, entity []byte, entityValues tsdb.EntityValues, value *measurev1.DataPointValue) error {
 	t := value.GetTimestamp().AsTime().Local()
 	if err := timestamp.Check(t); err != nil {
 		return errors.WithMessage(err, "writing stream")
@@ -55,7 +56,7 @@ func (s *measure) write(shardID common.ShardID, seriesHashKey []byte, value *mea
 	if err != nil {
 		return err
 	}
-	series, err := shard.Series().GetByHashKey(seriesHashKey)
+	series, err := shard.Series().Get(entity, entityValues)
 	if err != nil {
 		return err
 	}
@@ -85,6 +86,7 @@ func (s *measure) write(shardID common.ShardID, seriesHashKey []byte, value *mea
 			fieldSpec := sm.GetFields()[fi]
 			fType, isNull := pbv1.FieldValueTypeConv(fieldValue)
 			if isNull {
+				s.l.Warn().RawJSON("written", logger.Proto(value)).Msg("ignore null field")
 				continue
 			}
 			if fType != fieldSpec.GetFieldType() {
@@ -92,6 +94,7 @@ func (s *measure) write(shardID common.ShardID, seriesHashKey []byte, value *mea
 			}
 			data := encodeFieldValue(fieldValue)
 			if data == nil {
+				s.l.Warn().RawJSON("written", logger.Proto(value)).Msg("ignore unknown field")
 				continue
 			}
 			builder.Family(familyIdentity(sm.GetFields()[fi].GetName(), pbv1.EncoderFieldFlag(fieldSpec, s.interval)), data)
@@ -101,14 +104,16 @@ func (s *measure) write(shardID common.ShardID, seriesHashKey []byte, value *mea
 			return nil, errWrite
 		}
 		_, errWrite = writer.Write()
-		s.l.Debug().
-			Time("ts", t).
-			Int("ts_nano", t.Nanosecond()).
-			Interface("data", value).
-			Uint64("series_id", uint64(series.ID())).
-			Uint64("item_id", uint64(writer.ItemID().ID)).
-			Int("shard_id", int(shardID)).
-			Msg("write measure")
+		if e := s.l.Named(md.Group, md.Name).Debug(); e.Enabled() {
+			e.Time("ts", t).
+				Int("ts_nano", t.Nanosecond()).
+				RawJSON("data", logger.Proto(value)).
+				Uint64("series_id", uint64(series.ID())).
+				Stringer("series", series).
+				Uint64("item_id", uint64(writer.ItemID().ID)).
+				Int("shard_id", int(shardID)).
+				Msg("write measure")
+		}
 		return writer, errWrite
 	}
 	writer, err := writeFn()
@@ -156,9 +161,10 @@ func (w *writeCallback) Rev(message bus.Message) (resp bus.Message) {
 		w.l.Warn().Msg("cannot find measure definition")
 		return
 	}
-	err := stm.write(common.ShardID(writeEvent.GetShardId()), writeEvent.GetSeriesHash(), writeEvent.GetRequest().GetDataPoint())
+	err := stm.write(writeEvent.GetRequest().GetMetadata(), common.ShardID(writeEvent.GetShardId()),
+		writeEvent.SeriesHash, tsdb.DecodeEntityValues(writeEvent.GetEntityValues()), writeEvent.GetRequest().GetDataPoint())
 	if err != nil {
-		w.l.Error().Err(err).Msg("fail to write entity")
+		w.l.Error().Err(err).RawJSON("written", logger.Proto(writeEvent)).Msg("fail to write entity")
 	}
 	return
 }
diff --git a/banyand/measure/metadata.go b/banyand/measure/metadata.go
index 4bddaa6..549c838 100644
--- a/banyand/measure/metadata.go
+++ b/banyand/measure/metadata.go
@@ -31,7 +31,7 @@ import (
 	"github.com/apache/skywalking-banyandb/banyand/metadata/schema"
 	"github.com/apache/skywalking-banyandb/banyand/tsdb"
 	"github.com/apache/skywalking-banyandb/pkg/logger"
-	pb_v1 "github.com/apache/skywalking-banyandb/pkg/pb/v1"
+	pb_v1 "github.com/apache/skywalking-banyandb/pkg/pb/v1/tsdb"
 	resourceSchema "github.com/apache/skywalking-banyandb/pkg/schema"
 )
 
diff --git a/banyand/metadata/schema/etcd.go b/banyand/metadata/schema/etcd.go
index 83ad69d..8fc654d 100644
--- a/banyand/metadata/schema/etcd.go
+++ b/banyand/metadata/schema/etcd.go
@@ -144,8 +144,9 @@ func (e *etcdSchemaRegistry) StoppingNotify() <-chan struct{} {
 }
 
 func (e *etcdSchemaRegistry) Close() error {
+	_ = e.client.Close()
 	e.server.Close()
-	return e.client.Close()
+	return nil
 }
 
 func NewEtcdSchemaRegistry(options ...RegistryOption) (Registry, error) {
diff --git a/banyand/okteto.yml b/banyand/okteto.yml
new file mode 100644
index 0000000..936fdd0
--- /dev/null
+++ b/banyand/okteto.yml
@@ -0,0 +1,34 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+build:
+  banyand:
+    image: apache/skywalking-banyandb:v0.0.0-dev
+    context: ..
+
+deploy:
+  - kubectl apply -f k8s.yml
+
+dev:
+  banyand:
+    image: okteto/golang:1
+    command: bash
+    sync:
+      - ..:/usr/src/app
+    volumes:
+      - /go
+      - /root/.cache
+    forward:
+      - 2345:2345
\ No newline at end of file
diff --git a/banyand/query/processor.go b/banyand/query/processor.go
index d3668c9..43100f2 100644
--- a/banyand/query/processor.go
+++ b/banyand/query/processor.go
@@ -40,7 +40,7 @@ import (
 )
 
 const (
-	moduleName = "query-processor"
+	moduleName = "query"
 )
 
 var (
@@ -72,8 +72,10 @@ func (p *streamQueryProcessor) Rev(message bus.Message) (resp bus.Message) {
 		resp = bus.NewMessage(bus.MessageID(now), common.NewError("invalid event data type"))
 		return
 	}
-	queryJSON := logger.Proto(queryCriteria)
-	p.log.Debug().RawJSON("criteria", queryJSON).Msg("received a query request")
+	sl := p.log.Named("stream", queryCriteria.Metadata.Group, queryCriteria.Metadata.Name)
+	if e := sl.Debug(); e.Enabled() {
+		e.RawJSON("criteria", logger.Proto(queryCriteria)).Msg("received a query request")
+	}
 
 	meta := queryCriteria.GetMetadata()
 	ec, err := p.streamService.Stream(meta)
@@ -100,11 +102,13 @@ func (p *streamQueryProcessor) Rev(message bus.Message) (resp bus.Message) {
 		return
 	}
 
-	p.log.Debug().Str("plan", plan.String()).Msg("query plan")
+	if e := sl.Debug(); e.Enabled() {
+		e.Str("plan", plan.String()).Msg("query plan")
+	}
 
 	entities, err := plan.(executor.StreamExecutable).Execute(ec)
 	if err != nil {
-		p.log.Error().Err(err).RawJSON("req", queryJSON).Msg("fail to execute the query plan")
+		sl.Error().Err(err).RawJSON("req", logger.Proto(queryCriteria)).Msg("fail to execute the query plan")
 		resp = bus.NewMessage(bus.MessageID(now), common.NewError("execute the query plan for stream %s: %v", meta.GetName(), err))
 		return
 	}
@@ -126,8 +130,10 @@ func (p *measureQueryProcessor) Rev(message bus.Message) (resp bus.Message) {
 		resp = bus.NewMessage(bus.MessageID(now), common.NewError("invalid event data type"))
 		return
 	}
-	queryJSON := logger.Proto(queryCriteria)
-	p.log.Info().RawJSON("req", queryJSON).Msg("received a query event")
+	ml := p.log.Named("measure", queryCriteria.Metadata.Group, queryCriteria.Metadata.Name)
+	if e := ml.Debug(); e.Enabled() {
+		e.RawJSON("req", logger.Proto(queryCriteria)).Msg("received a query event")
+	}
 
 	meta := queryCriteria.GetMetadata()
 	ec, err := p.measureService.Measure(meta)
@@ -154,16 +160,19 @@ func (p *measureQueryProcessor) Rev(message bus.Message) (resp bus.Message) {
 		return
 	}
 
-	p.queryService.log.Debug().Str("plan", plan.String()).Msg("query plan")
+	if e := ml.Debug(); e.Enabled() {
+		e.Str("plan", plan.String()).Msg("query plan")
+	}
 
 	mIterator, err := plan.(executor.MeasureExecutable).Execute(ec)
 	if err != nil {
+		ml.Error().Err(err).RawJSON("req", logger.Proto(queryCriteria)).Msg("fail to close the query plan")
 		resp = bus.NewMessage(bus.MessageID(now), common.NewError("fail to execute the query plan for measure %s: %v", meta.GetName(), err))
 		return
 	}
 	defer func() {
 		if err = mIterator.Close(); err != nil {
-			p.queryService.log.Error().Err(err).RawJSON("req", queryJSON).Msg("fail to close the query plan")
+			ml.Error().Err(err).RawJSON("req", logger.Proto(queryCriteria)).Msg("fail to close the query plan")
 		}
 	}()
 	result := make([]*measurev1.DataPoint, 0)
@@ -173,6 +182,9 @@ func (p *measureQueryProcessor) Rev(message bus.Message) (resp bus.Message) {
 			result = append(result, current[0])
 		}
 	}
+	if e := ml.Debug(); e.Enabled() {
+		e.RawJSON("ret", logger.Proto(&measurev1.QueryResponse{DataPoints: result})).Msg("got a measure")
+	}
 	resp = bus.NewMessage(bus.MessageID(now), result)
 	return
 }
diff --git a/banyand/query/processor_topn.go b/banyand/query/processor_topn.go
index 805937e..bfb3d2c 100644
--- a/banyand/query/processor_topn.go
+++ b/banyand/query/processor_topn.go
@@ -37,6 +37,7 @@ import (
 	"github.com/apache/skywalking-banyandb/pkg/convert"
 	"github.com/apache/skywalking-banyandb/pkg/flow"
 	"github.com/apache/skywalking-banyandb/pkg/flow/streaming"
+	"github.com/apache/skywalking-banyandb/pkg/logger"
 	pbv1 "github.com/apache/skywalking-banyandb/pkg/pb/v1"
 	"github.com/apache/skywalking-banyandb/pkg/query/aggregation"
 	"github.com/apache/skywalking-banyandb/pkg/timestamp"
@@ -57,7 +58,9 @@ func (t *topNQueryProcessor) Rev(message bus.Message) (resp bus.Message) {
 		t.log.Warn().Msg("invalid requested sort direction")
 		return
 	}
-	t.log.Debug().Msg("received a topN query event")
+	if e := t.log.Debug(); e.Enabled() {
+		e.Stringer("req", request).Msg("received a topN query event")
+	}
 	topNMetadata := request.GetMetadata()
 	topNSchema, err := t.metaService.TopNAggregationRegistry().GetTopNAggregation(context.TODO(), topNMetadata)
 	if err != nil {
@@ -96,7 +99,11 @@ func (t *topNQueryProcessor) Rev(message bus.Message) (resp bus.Message) {
 	}
 	for _, shard := range shards {
 		// TODO: support condition
-		sl, innerErr := shard.Series().List(tsdb.NewPath(entity))
+		sl, innerErr := shard.Series().List(context.WithValue(
+			context.Background(),
+			logger.ContextKey,
+			t.log,
+		), tsdb.NewPath(entity))
 		if innerErr != nil {
 			t.log.Error().Err(innerErr).
 				Str("topN", topNMetadata.GetName()).
@@ -111,6 +118,9 @@ func (t *topNQueryProcessor) Rev(message bus.Message) (resp bus.Message) {
 					Msg("fail to scan series")
 				return
 			}
+			if len(iters) < 1 {
+				continue
+			}
 			for _, iter := range iters {
 				for iter.Next() {
 					tuple, parseErr := parseTopNFamily(iter.Val(), sourceMeasure.GetInterval())
@@ -205,14 +215,17 @@ func (t *topNQueryProcessor) scanSeries(series tsdb.Series, request *measurev1.T
 		request.GetTimeRange().GetBegin().AsTime(),
 		request.GetTimeRange().GetEnd().AsTime()),
 	)
+	if err != nil {
+		if errors.Is(err, tsdb.ErrEmptySeriesSpan) {
+			return nil, nil
+		}
+		return nil, err
+	}
 	defer func(seriesSpan tsdb.SeriesSpan) {
 		if seriesSpan != nil {
 			_ = seriesSpan.Close()
 		}
 	}(seriesSpan)
-	if err != nil {
-		return nil, err
-	}
 	seeker, err := seriesSpan.SeekerBuilder().OrderByTime(modelv1.Sort_SORT_ASC).Build()
 	if err != nil {
 		return nil, err
diff --git a/banyand/stream/metadata.go b/banyand/stream/metadata.go
index 980c347..f4fba35 100644
--- a/banyand/stream/metadata.go
+++ b/banyand/stream/metadata.go
@@ -32,7 +32,7 @@ import (
 	"github.com/apache/skywalking-banyandb/banyand/tsdb"
 	"github.com/apache/skywalking-banyandb/pkg/encoding"
 	"github.com/apache/skywalking-banyandb/pkg/logger"
-	pb_v1 "github.com/apache/skywalking-banyandb/pkg/pb/v1"
+	pb_v1 "github.com/apache/skywalking-banyandb/pkg/pb/v1/tsdb"
 	resourceSchema "github.com/apache/skywalking-banyandb/pkg/schema"
 )
 
diff --git a/banyand/stream/stream_write.go b/banyand/stream/stream_write.go
index ea4098c..4f84ae0 100644
--- a/banyand/stream/stream_write.go
+++ b/banyand/stream/stream_write.go
@@ -51,7 +51,7 @@ func init() {
 	)
 }
 
-func (s *stream) write(shardID common.ShardID, seriesHashKey []byte, value *streamv1.ElementValue) error {
+func (s *stream) write(shardID common.ShardID, entity []byte, entityValues tsdb.EntityValues, value *streamv1.ElementValue) error {
 	tp := value.GetTimestamp().AsTime()
 	if err := timestamp.Check(tp); err != nil {
 		return errors.WithMessage(err, "writing stream")
@@ -68,7 +68,7 @@ func (s *stream) write(shardID common.ShardID, seriesHashKey []byte, value *stre
 	if err != nil {
 		return err
 	}
-	series, err := shard.Series().GetByHashKey(seriesHashKey)
+	series, err := shard.Series().Get(entity, entityValues)
 	if err != nil {
 		return err
 	}
@@ -101,15 +101,17 @@ func (s *stream) write(shardID common.ShardID, seriesHashKey []byte, value *stre
 		}
 		_, errWrite = writer.Write()
 		writtenBytes.WithLabelValues(s.group).Add(float64(size))
-		s.l.Debug().
-			Time("ts", t).
-			Int("ts_nano", t.Nanosecond()).
-			Interface("data", value).
-			Uint64("series_id", uint64(series.ID())).
-			Uint64("item_id", uint64(writer.ItemID().ID)).
-			Int("shard_id", int(shardID)).
-			Str("stream", sm.Metadata.GetName()).
-			Msg("write stream")
+		if e := s.l.Debug(); e.Enabled() {
+			e.Time("ts", t).
+				Int("ts_nano", t.Nanosecond()).
+				RawJSON("data", logger.Proto(value)).
+				Uint64("series_id", uint64(series.ID())).
+				Stringer("series", series).
+				Uint64("item_id", uint64(writer.ItemID().ID)).
+				Int("shard_id", int(shardID)).
+				Str("stream", sm.Metadata.GetName()).
+				Msg("write stream")
+		}
 		return writer, errWrite
 	}
 	writer, err := writeFn()
@@ -154,9 +156,10 @@ func (w *writeCallback) Rev(message bus.Message) (resp bus.Message) {
 		w.l.Warn().Msg("cannot find stream definition")
 		return
 	}
-	err := stm.write(common.ShardID(writeEvent.GetShardId()), writeEvent.GetSeriesHash(), writeEvent.GetRequest().GetElement())
+	err := stm.write(common.ShardID(writeEvent.GetShardId()), writeEvent.SeriesHash,
+		tsdb.DecodeEntityValues(writeEvent.GetEntityValues()), writeEvent.GetRequest().GetElement())
 	if err != nil {
-		w.l.Error().Err(err).Msg("fail to write entity")
+		w.l.Error().Err(err).RawJSON("written", logger.Proto(writeEvent)).Msg("fail to write entity")
 	}
 	return
 }
diff --git a/banyand/tsdb/block.go b/banyand/tsdb/block.go
index bdfde12..4ab5fc6 100644
--- a/banyand/tsdb/block.go
+++ b/banyand/tsdb/block.go
@@ -75,14 +75,14 @@ type block struct {
 	clock         timestamp.Clock
 	timestamp.TimeRange
 	bucket.Reporter
-	segID          uint16
-	blockID        uint16
+	segID          SectionID
+	blockID        SectionID
 	segSuffix      string
 	encodingMethod EncodingMethod
 }
 
 type blockOpts struct {
-	segID     uint16
+	segID     SectionID
 	segSuffix string
 	blockSize IntervalRule
 	timeRange timestamp.TimeRange
@@ -326,7 +326,7 @@ type BlockDelegate interface {
 	lsmIndexReader() index.Searcher
 	invertedIndexReader() index.Searcher
 	primaryIndexReader() index.FieldIterable
-	identity() (segID uint16, blockID uint16)
+	identity() (segID SectionID, blockID SectionID)
 	startTime() time.Time
 	String() string
 }
@@ -361,7 +361,7 @@ func (d *bDelegate) startTime() time.Time {
 	return d.delegate.Start
 }
 
-func (d *bDelegate) identity() (segID uint16, blockID uint16) {
+func (d *bDelegate) identity() (segID SectionID, blockID SectionID) {
 	return d.delegate.segID, d.delegate.blockID
 }
 
diff --git a/banyand/tsdb/block_ctrl.go b/banyand/tsdb/block_ctrl.go
index 9df42a9..2df4160 100644
--- a/banyand/tsdb/block_ctrl.go
+++ b/banyand/tsdb/block_ctrl.go
@@ -19,6 +19,7 @@ package tsdb
 
 import (
 	"context"
+	"fmt"
 	"sort"
 	"sync"
 	"time"
@@ -34,7 +35,7 @@ import (
 type blockController struct {
 	sync.RWMutex
 	segCtx       context.Context
-	segID        uint16
+	segID        SectionID
 	segSuffix    string
 	location     string
 	segTimeRange timestamp.TimeRange
@@ -47,7 +48,7 @@ type blockController struct {
 	l *logger.Logger
 }
 
-func newBlockController(segCtx context.Context, segID uint16, segSuffix, location string, segTimeRange timestamp.TimeRange,
+func newBlockController(segCtx context.Context, segID SectionID, segSuffix, location string, segTimeRange timestamp.TimeRange,
 	blockSize IntervalRule, l *logger.Logger, blockQueue bucket.Queue, scheduler *timestamp.Scheduler,
 ) *blockController {
 	clock, _ := timestamp.GetClock(segCtx)
@@ -66,7 +67,7 @@ func newBlockController(segCtx context.Context, segID uint16, segSuffix, locatio
 }
 
 func (bc *blockController) Current() (bucket.Reporter, error) {
-	now := bc.clock.Now()
+	now := bc.Standard(bc.clock.Now())
 	ns := uint64(now.UnixNano())
 	if b := func() *block {
 		bc.RLock()
@@ -115,7 +116,7 @@ func (bc *blockController) OnMove(prev bucket.Reporter, next bucket.Reporter) {
 			SegID:   bc.segID,
 			BlockID: b.blockID,
 		}, nil); err != nil {
-			bc.l.Debug().Err(err).Msg("failed to push a expired head block to the queue")
+			bc.l.Error().Err(err).Msg("failed to push a expired head block to the queue")
 			ctxClosing, cancelClosing := context.WithTimeout(context.Background(), defaultEnqueueTimeout)
 			defer cancelClosing()
 			b.close(ctxClosing)
@@ -127,12 +128,22 @@ func (bc *blockController) OnMove(prev bucket.Reporter, next bucket.Reporter) {
 	event.Msg("move to the next block")
 }
 
+func (bc *blockController) Standard(t time.Time) time.Time {
+	switch bc.blockSize.Unit {
+	case HOUR:
+		return time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, t.Location())
+	case DAY:
+		return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location())
+	}
+	panic("invalid interval unit")
+}
+
 func (bc *blockController) Format(tm time.Time) string {
 	switch bc.blockSize.Unit {
 	case HOUR:
-		return tm.Format(blockHourFormat)
+		return tm.Format(hourFormat)
 	case DAY:
-		return tm.Format(blockDayFormat)
+		return tm.Format(dayFormat)
 	}
 	panic("invalid interval unit")
 }
@@ -140,9 +151,9 @@ func (bc *blockController) Format(tm time.Time) string {
 func (bc *blockController) Parse(value string) (time.Time, error) {
 	switch bc.blockSize.Unit {
 	case HOUR:
-		return time.ParseInLocation(blockHourFormat, value, time.Local)
+		return time.ParseInLocation(hourFormat, value, time.Local)
 	case DAY:
-		return time.ParseInLocation(blockDayFormat, value, time.Local)
+		return time.ParseInLocation(dayFormat, value, time.Local)
 	}
 	panic("invalid interval unit")
 }
@@ -165,7 +176,7 @@ func (bc *blockController) span(ctx context.Context, timeRange timestamp.TimeRan
 	return dd, nil
 }
 
-func (bc *blockController) get(ctx context.Context, blockID uint16) (BlockDelegate, error) {
+func (bc *blockController) get(ctx context.Context, blockID SectionID) (BlockDelegate, error) {
 	b := bc.getBlock(blockID)
 	if b != nil {
 		return b.delegate(ctx)
@@ -173,7 +184,7 @@ func (bc *blockController) get(ctx context.Context, blockID uint16) (BlockDelega
 	return nil, nil
 }
 
-func (bc *blockController) getBlock(blockID uint16) *block {
+func (bc *blockController) getBlock(blockID SectionID) *block {
 	bb := bc.search(func(b *block) bool {
 		return b.blockID == blockID
 	})
@@ -203,7 +214,7 @@ func (bc *blockController) search(matcher func(*block) bool) (bb []*block) {
 	return bb
 }
 
-func (bc *blockController) closeBlock(ctx context.Context, blockID uint16) error {
+func (bc *blockController) closeBlock(ctx context.Context, blockID SectionID) error {
 	bc.RLock()
 	b := bc.getBlock(blockID)
 	bc.RUnlock()
@@ -213,55 +224,49 @@ func (bc *blockController) closeBlock(ctx context.Context, blockID uint16) error
 	return b.close(ctx)
 }
 
-func (bc *blockController) startTime(suffix string) (time.Time, error) {
-	t, err := bc.Parse(suffix)
-	if err != nil {
-		return time.Time{}, err
-	}
-	startTime := bc.segTimeRange.Start
-	switch bc.blockSize.Unit {
-	case HOUR:
-		return time.Date(startTime.Year(), startTime.Month(),
-			startTime.Day(), t.Hour(), 0, 0, 0, startTime.Location()), nil
-	case DAY:
-		return time.Date(startTime.Year(), startTime.Month(),
-			t.Day(), t.Hour(), 0, 0, 0, startTime.Location()), nil
-	}
-	panic("invalid interval unit")
-}
-
 func (bc *blockController) open() error {
-	return WalkDir(
-		bc.location,
-		blockPathPrefix,
-		func(suffix, absolutePath string) error {
-			bc.Lock()
-			defer bc.Unlock()
-			_, err := bc.load(suffix, absolutePath)
-			return err
-		})
+	bc.Lock()
+	defer bc.Unlock()
+	return loadSections(bc.location, bc, bc.blockSize, func(start, end time.Time) error {
+		_, err := bc.load(start, end, bc.location)
+		return err
+	})
 }
 
-func (bc *blockController) create(startTime time.Time) (*block, error) {
-	if startTime.Before(bc.segTimeRange.Start) {
-		startTime = bc.segTimeRange.Start
+func (bc *blockController) create(start time.Time) (*block, error) {
+	start = bc.Standard(start)
+	if start.Before(bc.segTimeRange.Start) {
+		start = bc.segTimeRange.Start
 	}
-	if !startTime.Before(bc.segTimeRange.End) {
+	if !start.Before(bc.segTimeRange.End) {
 		return nil, bucket.ErrNoMoreBucket
 	}
 	bc.Lock()
 	defer bc.Unlock()
-	suffix := bc.Format(startTime)
-	for _, b := range bc.lst {
-		if b.suffix == suffix {
-			return b, nil
+	var next *block
+	for _, s := range bc.lst {
+		if s.Start.Equal(start) {
+			return s, nil
 		}
+		if next == nil && s.Start.After(start) {
+			next = s
+		}
+	}
+	stdEnd := bc.blockSize.NextTime(start)
+	var end time.Time
+	if next != nil && next.Start.Before(stdEnd) {
+		end = next.Start
+	} else {
+		end = stdEnd
 	}
-	segPath, err := mkdir(blockTemplate, bc.location, suffix)
+	if end.After(bc.segTimeRange.End) {
+		end = bc.segTimeRange.End
+	}
+	_, err := mkdir(blockTemplate, bc.location, bc.Format(start))
 	if err != nil {
 		return nil, err
 	}
-	b, err := bc.load(suffix, segPath)
+	b, err := bc.load(start, end, bc.location)
 	if err != nil {
 		return nil, err
 	}
@@ -271,15 +276,8 @@ func (bc *blockController) create(startTime time.Time) (*block, error) {
 	return b, nil
 }
 
-func (bc *blockController) load(suffix, path string) (b *block, err error) {
-	starTime, err := bc.startTime(suffix)
-	if err != nil {
-		return nil, err
-	}
-	endTime := bc.blockSize.NextTime(starTime)
-	if endTime.After(bc.segTimeRange.End) {
-		endTime = bc.segTimeRange.End
-	}
+func (bc *blockController) load(startTime, endTime time.Time, root string) (b *block, err error) {
+	suffix := bc.Format(startTime)
 	if b, err = newBlock(
 		common.SetPosition(bc.segCtx, func(p common.Position) common.Position {
 			p.Block = suffix
@@ -288,8 +286,8 @@ func (bc *blockController) load(suffix, path string) (b *block, err error) {
 		blockOpts{
 			segID:     bc.segID,
 			segSuffix: bc.segSuffix,
-			path:      path,
-			timeRange: timestamp.NewSectionTimeRange(starTime, endTime),
+			path:      fmt.Sprintf(blockTemplate, root, suffix),
+			timeRange: timestamp.NewSectionTimeRange(startTime, endTime),
 			suffix:    suffix,
 			blockSize: bc.blockSize,
 			queue:     bc.blockQueue,
@@ -321,7 +319,9 @@ func (bc *blockController) close(ctx context.Context) (err error) {
 func (bc *blockController) remove(ctx context.Context, deadline time.Time) (err error) {
 	for _, b := range bc.blocks() {
 		if b.End.Before(deadline) {
-			bc.l.Debug().Stringer("block", b).Msg("start to remove data in a block")
+			if e := bc.l.Debug(); e.Enabled() {
+				e.Stringer("block", b).Msg("start to remove data in a block")
+			}
 			bc.Lock()
 			if errDel := b.delete(ctx); errDel != nil {
 				err = multierr.Append(err, errDel)
@@ -338,7 +338,7 @@ func (bc *blockController) remove(ctx context.Context, deadline time.Time) (err
 	return err
 }
 
-func (bc *blockController) removeBlock(blockID uint16) {
+func (bc *blockController) removeBlock(blockID SectionID) {
 	for i, b := range bc.lst {
 		if b.blockID == blockID {
 			bc.lst = append(bc.lst[:i], bc.lst[i+1:]...)
diff --git a/banyand/tsdb/bucket/bucket.go b/banyand/tsdb/bucket/bucket.go
index ccafd94..297a1dd 100644
--- a/banyand/tsdb/bucket/bucket.go
+++ b/banyand/tsdb/bucket/bucket.go
@@ -114,7 +114,9 @@ func (tr *timeBasedReporter) Report() (Channel, error) {
 				Capacity: int(tr.End.UnixNano() - tr.Start.UnixNano()),
 				Volume:   int(now.UnixNano() - tr.Start.UnixNano()),
 			}
-			l.Debug().Int("volume", status.Volume).Int("capacity", status.Capacity).Int("progress%", status.Volume*100/status.Capacity).Msg("reporting a status")
+			if e := l.Debug(); e.Enabled() {
+				e.Int("volume", status.Volume).Int("capacity", status.Capacity).Int("progress%", status.Volume*100/status.Capacity).Msg("reporting a status")
+			}
 			select {
 			case ch <- status:
 			default:
diff --git a/banyand/tsdb/bucket/queue.go b/banyand/tsdb/bucket/queue.go
index 31d3185..5128406 100644
--- a/banyand/tsdb/bucket/queue.go
+++ b/banyand/tsdb/bucket/queue.go
@@ -107,12 +107,16 @@ func (q *lruQueue) Touch(id fmt.Stringer) bool {
 	defer q.lock.Unlock()
 
 	if q.frequent.Contains(id) {
-		q.l.Debug().Stringer("id", id).Msg("get from frequent")
+		if e := q.l.Debug(); e.Enabled() {
+			e.Stringer("id", id).Msg("get from frequent")
+		}
 		return true
 	}
 
 	if q.recent.Contains(id) {
-		q.l.Debug().Stringer("id", id).Msg("promote from recent to frequent")
+		if e := q.l.Debug(); e.Enabled() {
+			e.Stringer("id", id).Msg("promote from recent to frequent")
+		}
 		q.recent.Remove(id)
 		q.frequent.Add(id, nil)
 		return true
@@ -125,20 +129,26 @@ func (q *lruQueue) Push(ctx context.Context, id fmt.Stringer, fn OnAddRecentFn)
 	defer q.lock.Unlock()
 
 	if q.frequent.Contains(id) {
-		q.l.Debug().Stringer("id", id).Msg("push to frequent")
+		if e := q.l.Debug(); e.Enabled() {
+			e.Stringer("id", id).Msg("push to frequent")
+		}
 		q.frequent.Add(id, nil)
 		return nil
 	}
 
 	if q.recent.Contains(id) {
-		q.l.Debug().Stringer("id", id).Msg("promote from recent to frequent")
+		if e := q.l.Debug(); e.Enabled() {
+			e.Stringer("id", id).Msg("promote from recent to frequent")
+		}
 		q.recent.Remove(id)
 		q.frequent.Add(id, nil)
 		return nil
 	}
 
 	if q.recentEvict.Contains(id) {
-		q.l.Debug().Stringer("id", id).Msg("restore from recentEvict")
+		if e := q.l.Debug(); e.Enabled() {
+			e.Stringer("id", id).Msg("restore from recentEvict")
+		}
 		if err := q.ensureSpace(ctx, true); err != nil {
 			return err
 		}
@@ -246,7 +256,9 @@ func (q *lruQueue) removeOldest(ctx context.Context, lst simplelru.LRUCache) err
 }
 
 func (q *lruQueue) cleanEvict(now time.Time, l *logger.Logger) bool {
-	l.Debug().Time("now", now).Msg("block queue wakes")
+	if e := l.Debug(); e.Enabled() {
+		e.Time("now", now).Msg("block queue wakes")
+	}
 	if q.evictLen() < 1 {
 		return true
 	}
diff --git a/banyand/tsdb/index/writer.go b/banyand/tsdb/index/writer.go
index a410bd8..1424be0 100644
--- a/banyand/tsdb/index/writer.go
+++ b/banyand/tsdb/index/writer.go
@@ -241,7 +241,7 @@ func getIndexValue(ruleIndex *partition.IndexRuleLocator, value Value) (val [][]
 	if tag.GetInt() != nil {
 		existInt = true
 	}
-	fv, err := pbv1.ParseIndexFieldValue(tag)
+	fv, err := pbv1.ParseTagValue(tag)
 	if errors.Is(err, pbv1.ErrNullValue) {
 		return nil, existInt, nil
 	}
diff --git a/banyand/tsdb/indexdb.go b/banyand/tsdb/indexdb.go
index c4281d0..4add96f 100644
--- a/banyand/tsdb/indexdb.go
+++ b/banyand/tsdb/indexdb.go
@@ -115,7 +115,7 @@ func (i *indexWriterBuilder) GlobalItemID(itemID GlobalItemID) IndexWriterBuilde
 }
 
 func (i *indexWriterBuilder) Build() (IndexWriter, error) {
-	seg, err := i.segCtrl.create(i.segCtrl.Format(i.ts), false)
+	seg, err := i.segCtrl.create(i.ts, false)
 	if err != nil {
 		return nil, err
 	}
diff --git a/banyand/tsdb/scope.go b/banyand/tsdb/scope.go
index f690a93..f65764f 100644
--- a/banyand/tsdb/scope.go
+++ b/banyand/tsdb/scope.go
@@ -18,6 +18,8 @@
 package tsdb
 
 import (
+	"context"
+
 	"github.com/apache/skywalking-banyandb/api/common"
 	"github.com/apache/skywalking-banyandb/banyand/observability"
 )
@@ -79,18 +81,14 @@ func (sdd *scopedSeriesDatabase) Close() error {
 	return nil
 }
 
-func (sdd *scopedSeriesDatabase) GetByHashKey(key []byte) (Series, error) {
-	return sdd.delegated.GetByHashKey(key)
+func (sdd *scopedSeriesDatabase) Get(key []byte, entityValues EntityValues) (Series, error) {
+	return sdd.delegated.Get(key, entityValues)
 }
 
 func (sdd *scopedSeriesDatabase) GetByID(id common.SeriesID) (Series, error) {
 	return sdd.delegated.GetByID(id)
 }
 
-func (sdd *scopedSeriesDatabase) Get(entity Entity) (Series, error) {
-	return sdd.delegated.Get(entity.Prepend(sdd.scope))
-}
-
-func (sdd *scopedSeriesDatabase) List(path Path) (SeriesList, error) {
-	return sdd.delegated.List(path.Prepend(sdd.scope))
+func (sdd *scopedSeriesDatabase) List(ctx context.Context, path Path) (SeriesList, error) {
+	return sdd.delegated.List(ctx, path.Prepend(sdd.scope))
 }
diff --git a/banyand/tsdb/segment.go b/banyand/tsdb/segment.go
index 5167398..d4d346e 100644
--- a/banyand/tsdb/segment.go
+++ b/banyand/tsdb/segment.go
@@ -35,7 +35,7 @@ import (
 var ErrEndOfSegment = errors.New("reached the end of the segment")
 
 type segment struct {
-	id     uint16
+	id     SectionID
 	path   string
 	suffix string
 
@@ -48,7 +48,7 @@ type segment struct {
 	closeOnce           sync.Once
 }
 
-func openSegment(ctx context.Context, startTime time.Time, path, suffix string,
+func openSegment(ctx context.Context, startTime, endTime time.Time, path, suffix string,
 	segmentSize, blockSize IntervalRule, blockQueue bucket.Queue, scheduler *timestamp.Scheduler,
 ) (s *segment, err error) {
 	suffixInteger, err := strconv.Atoi(suffix)
@@ -56,7 +56,7 @@ func openSegment(ctx context.Context, startTime time.Time, path, suffix string,
 		return nil, err
 	}
 	id := GenerateInternalID(segmentSize.Unit, suffixInteger)
-	timeRange := timestamp.NewSectionTimeRange(startTime, segmentSize.NextTime(startTime))
+	timeRange := timestamp.NewSectionTimeRange(startTime, endTime)
 	s = &segment{
 		id:        id,
 		path:      path,
@@ -124,7 +124,7 @@ func (s *segment) close(ctx context.Context) (err error) {
 	return nil
 }
 
-func (s *segment) closeBlock(ctx context.Context, id uint16) error {
+func (s *segment) closeBlock(ctx context.Context, id SectionID) error {
 	return s.blockController.closeBlock(ctx, id)
 }
 
diff --git a/banyand/tsdb/segment_ctrl.go b/banyand/tsdb/segment_ctrl.go
index 9f0f508..4d6cd30 100644
--- a/banyand/tsdb/segment_ctrl.go
+++ b/banyand/tsdb/segment_ctrl.go
@@ -19,6 +19,7 @@ package tsdb
 
 import (
 	"context"
+	"fmt"
 	"sort"
 	"sync"
 	"time"
@@ -72,13 +73,13 @@ func newSegmentController(shardCtx context.Context, location string, segmentSize
 				l.Warn().Int("segID", parseSuffix(bsID.SegID)).Msg("segment is absent")
 				return nil
 			}
-			l.Info().Uint16("blockID", bsID.BlockID).Int("segID", parseSuffix(bsID.SegID)).Msg("closing the block")
+			l.Info().Int("blockID", parseSuffix(bsID.BlockID)).Msg("closing the block")
 			return seg.closeBlock(ctx, bsID.BlockID)
 		})
 	return sc, err
 }
 
-func (sc *segmentController) get(segID uint16) *segment {
+func (sc *segmentController) get(segID SectionID) *segment {
 	lst := sc.segments()
 	last := len(lst) - 1
 	for i := range lst {
@@ -111,7 +112,7 @@ func (sc *segmentController) segments() (ss []*segment) {
 }
 
 func (sc *segmentController) Current() (bucket.Reporter, error) {
-	now := sc.clock.Now()
+	now := sc.Standard(sc.clock.Now())
 	ns := uint64(now.UnixNano())
 	if b := func() bucket.Reporter {
 		sc.RLock()
@@ -125,7 +126,7 @@ func (sc *segmentController) Current() (bucket.Reporter, error) {
 	}(); b != nil {
 		return b, nil
 	}
-	return sc.create(sc.Format(now), true)
+	return sc.create(now, true)
 }
 
 func (sc *segmentController) Next() (bucket.Reporter, error) {
@@ -134,8 +135,7 @@ func (sc *segmentController) Next() (bucket.Reporter, error) {
 		return nil, err
 	}
 	seg := c.(*segment)
-	reporter, err := sc.create(sc.Format(
-		sc.segmentSize.NextTime(seg.Start)), true)
+	reporter, err := sc.create(sc.segmentSize.NextTime(seg.Start), true)
 	if errors.Is(err, ErrEndOfSegment) {
 		return nil, bucket.ErrNoMoreBucket
 	}
@@ -153,12 +153,22 @@ func (sc *segmentController) OnMove(prev bucket.Reporter, next bucket.Reporter)
 	event.Msg("move to the next segment")
 }
 
+func (sc *segmentController) Standard(t time.Time) time.Time {
+	switch sc.segmentSize.Unit {
+	case HOUR:
+		return time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, t.Location())
+	case DAY:
+		return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location())
+	}
+	panic("invalid interval unit")
+}
+
 func (sc *segmentController) Format(tm time.Time) string {
 	switch sc.segmentSize.Unit {
 	case HOUR:
-		return tm.Format(segHourFormat)
+		return tm.Format(hourFormat)
 	case DAY:
-		return tm.Format(segDayFormat)
+		return tm.Format(dayFormat)
 	}
 	panic("invalid interval unit")
 }
@@ -166,41 +176,50 @@ func (sc *segmentController) Format(tm time.Time) string {
 func (sc *segmentController) Parse(value string) (time.Time, error) {
 	switch sc.segmentSize.Unit {
 	case HOUR:
-		return time.ParseInLocation(segHourFormat, value, time.Local)
+		return time.ParseInLocation(hourFormat, value, time.Local)
 	case DAY:
-		return time.ParseInLocation(segDayFormat, value, time.Local)
+		return time.ParseInLocation(dayFormat, value, time.Local)
 	}
 	panic("invalid interval unit")
 }
 
 func (sc *segmentController) open() error {
-	return WalkDir(
-		sc.location,
-		segPathPrefix,
-		func(suffix, absolutePath string) error {
-			sc.Lock()
-			defer sc.Unlock()
-			_, err := sc.load(suffix, absolutePath, false)
-			if errors.Is(err, ErrEndOfSegment) {
-				return nil
-			}
-			return err
-		})
+	sc.Lock()
+	defer sc.Unlock()
+	return loadSections(sc.location, sc, sc.segmentSize, func(start, end time.Time) error {
+		_, err := sc.load(start, end, sc.location, false)
+		if errors.Is(err, ErrEndOfSegment) {
+			return nil
+		}
+		return err
+	})
 }
 
-func (sc *segmentController) create(suffix string, createBlockIfEmpty bool) (*segment, error) {
+func (sc *segmentController) create(start time.Time, createBlockIfEmpty bool) (*segment, error) {
 	sc.Lock()
 	defer sc.Unlock()
+	start = sc.Standard(start)
+	var next *segment
 	for _, s := range sc.lst {
-		if s.suffix == suffix {
+		if s.Contains(uint64(start.UnixNano())) {
 			return s, nil
 		}
+		if next == nil && s.Start.After(start) {
+			next = s
+		}
 	}
-	segPath, err := mkdir(segTemplate, sc.location, suffix)
+	stdEnd := sc.segmentSize.NextTime(start)
+	var end time.Time
+	if next != nil && next.Start.Before(stdEnd) {
+		end = next.Start
+	} else {
+		end = stdEnd
+	}
+	_, err := mkdir(segTemplate, sc.location, sc.Format(start))
 	if err != nil {
 		return nil, err
 	}
-	return sc.load(suffix, segPath, createBlockIfEmpty)
+	return sc.load(start, end, sc.location, createBlockIfEmpty)
 }
 
 func (sc *segmentController) sortLst() {
@@ -209,15 +228,12 @@ func (sc *segmentController) sortLst() {
 	})
 }
 
-func (sc *segmentController) load(suffix, path string, createBlockIfEmpty bool) (seg *segment, err error) {
-	startTime, err := sc.Parse(suffix)
-	if err != nil {
-		return nil, err
-	}
+func (sc *segmentController) load(start, end time.Time, root string, createBlockIfEmpty bool) (seg *segment, err error) {
+	suffix := sc.Format(start)
 	seg, err = openSegment(common.SetPosition(sc.shardCtx, func(p common.Position) common.Position {
 		p.Segment = suffix
 		return p
-	}), startTime, path, suffix, sc.segmentSize, sc.blockSize, sc.blockQueue, sc.scheduler)
+	}), start, end, fmt.Sprintf(segTemplate, root, suffix), suffix, sc.segmentSize, sc.blockSize, sc.blockQueue, sc.scheduler)
 	if err != nil {
 		return nil, err
 	}
@@ -230,7 +246,9 @@ func (sc *segmentController) remove(ctx context.Context, deadline time.Time) (er
 	sc.l.Info().Time("deadline", deadline).Msg("start to remove before deadline")
 	for _, s := range sc.segments() {
 		if s.End.Before(deadline) || s.Contains(uint64(deadline.UnixNano())) {
-			sc.l.Debug().Stringer("segment", s).Msg("start to remove data in a segment")
+			if e := sc.l.Debug(); e.Enabled() {
+				e.Stringer("segment", s).Msg("start to remove data in a segment")
+			}
 			err = multierr.Append(err, s.blockController.remove(ctx, deadline))
 			if s.End.Before(deadline) {
 				sc.Lock()
@@ -246,7 +264,7 @@ func (sc *segmentController) remove(ctx context.Context, deadline time.Time) (er
 	return err
 }
 
-func (sc *segmentController) removeSeg(segID uint16) {
+func (sc *segmentController) removeSeg(segID SectionID) {
 	for i, b := range sc.lst {
 		if b.id == segID {
 			sc.lst = append(sc.lst[:i], sc.lst[i+1:]...)
diff --git a/banyand/tsdb/series.go b/banyand/tsdb/series.go
index 4d44f30..386ff91 100644
--- a/banyand/tsdb/series.go
+++ b/banyand/tsdb/series.go
@@ -40,8 +40,8 @@ var (
 
 type GlobalItemID struct {
 	ShardID  common.ShardID
-	segID    uint16
-	blockID  uint16
+	segID    SectionID
+	blockID  SectionID
 	SeriesID common.SeriesID
 	ID       common.ItemID
 }
@@ -49,24 +49,22 @@ type GlobalItemID struct {
 func (i *GlobalItemID) Marshal() []byte {
 	return bytes.Join([][]byte{
 		convert.Uint32ToBytes(uint32(i.ShardID)),
-		convert.Uint16ToBytes(i.segID),
-		convert.Uint16ToBytes(i.blockID),
+		sectionIDToBytes(i.segID),
+		sectionIDToBytes(i.blockID),
 		convert.Uint64ToBytes(uint64(i.SeriesID)),
 		convert.Uint64ToBytes(uint64(i.ID)),
 	}, nil)
 }
 
 func (i *GlobalItemID) UnMarshal(data []byte) error {
-	if len(data) != 4+2+2+8+8 {
+	if len(data) != 4+4+4+8+8 {
 		return ErrItemIDMalformed
 	}
 	var offset int
 	i.ShardID = common.ShardID(convert.BytesToUint32(data[offset : offset+4]))
 	offset += 4
-	i.segID = convert.BytesToUint16(data[offset : offset+2])
-	offset += 2
-	i.blockID = convert.BytesToUint16(data[offset : offset+2])
-	offset += 2
+	i.segID, offset = readSectionID(data, offset)
+	i.blockID, offset = readSectionID(data, offset)
 	i.SeriesID = common.SeriesID(convert.BytesToUint64(data[offset : offset+8]))
 	offset += 8
 	i.ID = common.ItemID(convert.BytesToUint64(data[offset:]))
@@ -78,6 +76,7 @@ type Series interface {
 	Span(ctx context.Context, timeRange timestamp.TimeRange) (SeriesSpan, error)
 	Create(ctx context.Context, t time.Time) (SeriesSpan, error)
 	Get(ctx context.Context, id GlobalItemID) (Item, io.Closer, error)
+	String() string
 }
 
 type SeriesSpan interface {
@@ -89,10 +88,11 @@ type SeriesSpan interface {
 var _ Series = (*series)(nil)
 
 type series struct {
-	id      common.SeriesID
-	blockDB blockDatabase
-	shardID common.ShardID
-	l       *logger.Logger
+	id        common.SeriesID
+	idLiteral string
+	blockDB   blockDatabase
+	shardID   common.ShardID
+	l         *logger.Logger
 }
 
 func (s *series) Get(ctx context.Context, id GlobalItemID) (Item, io.Closer, error) {
@@ -115,6 +115,10 @@ func (s *series) ID() common.SeriesID {
 	return s.id
 }
 
+func (s *series) String() string {
+	return s.idLiteral
+}
+
 func (s *series) Span(ctx context.Context, timeRange timestamp.TimeRange) (SeriesSpan, error) {
 	blocks, err := s.blockDB.span(ctx, timeRange)
 	if err != nil {
@@ -123,10 +127,15 @@ func (s *series) Span(ctx context.Context, timeRange timestamp.TimeRange) (Serie
 	if len(blocks) < 1 {
 		return nil, ErrEmptySeriesSpan
 	}
-	s.l.Debug().
-		Times("time_range", []time.Time{timeRange.Start, timeRange.End}).
-		Msg("select series span")
-	return newSeriesSpan(context.WithValue(context.Background(), logger.ContextKey, s.l), timeRange, blocks, s.id, s.shardID), nil
+	l := logger.FetchOrDefault(ctx, "series", s.l)
+	if e := l.Debug(); e.Enabled() {
+		e.Times("time_range", []time.Time{timeRange.Start, timeRange.End}).
+			Uint64("series_id", uint64(s.id)).
+			Str("series", s.idLiteral).
+			Msg("select series span")
+	}
+	return newSeriesSpan(context.WithValue(context.Background(), logger.ContextKey, l), timeRange, blocks,
+		s.id, s.idLiteral, s.shardID), nil
 }
 
 func (s *series) Create(ctx context.Context, t time.Time) (SeriesSpan, error) {
@@ -136,27 +145,36 @@ func (s *series) Create(ctx context.Context, t time.Time) (SeriesSpan, error) {
 		return nil, err
 	}
 	if len(blocks) > 0 {
-		s.l.Debug().
-			Time("time", t).
-			Msg("load a series span")
-		return newSeriesSpan(context.WithValue(context.Background(), logger.ContextKey, s.l), tr, blocks, s.id, s.shardID), nil
+		if e := s.l.Debug(); e.Enabled() {
+			e.Time("time", t).
+				Uint64("series_id", uint64(s.id)).
+				Str("series", s.idLiteral).
+				Msg("load a series span")
+		}
+		return newSeriesSpan(context.WithValue(context.Background(), logger.ContextKey, s.l), tr, blocks,
+			s.id, s.idLiteral, s.shardID), nil
 	}
 	b, err := s.blockDB.create(ctx, t)
 	if err != nil {
 		return nil, err
 	}
 	blocks = append(blocks, b)
-	s.l.Debug().
-		Time("time", t).
-		Msg("create a series span")
-	return newSeriesSpan(context.WithValue(context.Background(), logger.ContextKey, s.l), tr, blocks, s.id, s.shardID), nil
+	if e := s.l.Debug(); e.Enabled() {
+		e.Time("time", t).
+			Uint64("series_id", uint64(s.id)).
+			Str("series", s.idLiteral).
+			Msg("create a series span")
+	}
+	return newSeriesSpan(context.WithValue(context.Background(), logger.ContextKey, s.l), tr, blocks,
+		s.id, s.idLiteral, s.shardID), nil
 }
 
-func newSeries(ctx context.Context, id common.SeriesID, blockDB blockDatabase) *series {
+func newSeries(ctx context.Context, id common.SeriesID, idLiteral string, blockDB blockDatabase) *series {
 	s := &series{
-		id:      id,
-		blockDB: blockDB,
-		shardID: blockDB.shardID(),
+		id:        id,
+		idLiteral: idLiteral,
+		blockDB:   blockDB,
+		shardID:   blockDB.shardID(),
 	}
 	parentLogger := ctx.Value(logger.ContextKey)
 	if pl, ok := parentLogger.(*logger.Logger); ok {
@@ -172,6 +190,7 @@ var _ SeriesSpan = (*seriesSpan)(nil)
 type seriesSpan struct {
 	blocks    []BlockDelegate
 	seriesID  common.SeriesID
+	series    string
 	shardID   common.ShardID
 	timeRange timestamp.TimeRange
 	l         *logger.Logger
@@ -192,10 +211,11 @@ func (s *seriesSpan) SeekerBuilder() SeekerBuilder {
 	return newSeekerBuilder(s)
 }
 
-func newSeriesSpan(ctx context.Context, timeRange timestamp.TimeRange, blocks []BlockDelegate, id common.SeriesID, shardID common.ShardID) *seriesSpan {
+func newSeriesSpan(ctx context.Context, timeRange timestamp.TimeRange, blocks []BlockDelegate, id common.SeriesID, series string, shardID common.ShardID) *seriesSpan {
 	s := &seriesSpan{
 		blocks:    blocks,
 		seriesID:  id,
+		series:    series,
 		shardID:   shardID,
 		timeRange: timeRange,
 	}
diff --git a/banyand/tsdb/series_seek.go b/banyand/tsdb/series_seek.go
index 18db6e2..0ff2196 100644
--- a/banyand/tsdb/series_seek.go
+++ b/banyand/tsdb/series_seek.go
@@ -23,6 +23,7 @@ import (
 	"time"
 
 	"github.com/dgraph-io/badger/v3/y"
+	"github.com/rs/zerolog"
 
 	"github.com/apache/skywalking-banyandb/api/common"
 	databasev1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/database/v1"
@@ -143,49 +144,45 @@ func (i *item) PrintContext(l *logger.Logger, family []byte, n int) {
 	defer pre.Close()
 	defer next.Close()
 	j := 0
-	currentTS := uint64(i.itemID)
-
-	each := func(iter kv.Iterator) {
+	each := func(iter kv.Iterator, logEvent *zerolog.Event) *zerolog.Event {
 		if !bytes.Equal(key, iter.Key()) {
-			return
+			return logEvent
 		}
 		j++
 
-		ts := y.ParseTs(iter.RawKey())
-
-		logEvent := l.Info().Int("i", j).
+		logEvent = logEvent.Int("i", j).
 			Time("ts", time.Unix(0, int64(y.ParseTs(iter.RawKey()))))
+		locArr := zerolog.Arr()
+		rangeArr := zerolog.Arr()
+		decodedNumArr := zerolog.Arr()
 		if err := decoder.Decode(family, iter.Val()); err != nil {
-			logEvent = logEvent.Str("loc", "mem")
-			if ts == currentTS {
-				logEvent = logEvent.Bool("at", true)
-			}
+			locArr.Str("mem")
 		} else {
+			locArr.Str("table")
 			start, end := decoder.Range()
-			logEvent = logEvent.Time("start", time.Unix(0, int64(start))).
-				Time("end", time.Unix(0, int64(end))).Int("num", decoder.Len()).Str("loc", "table")
-			if start <= currentTS && currentTS <= end {
-				if dd, err := decoder.Get(currentTS); err == nil && len(dd) > 0 {
-					logEvent = logEvent.Bool("at", true)
-				}
-			}
+			rangeArr.Time(time.Unix(0, int64(start)))
+			rangeArr.Time(time.Unix(0, int64(end)))
+			decodedNumArr.Int(decoder.Len())
 		}
-		logEvent.Send()
+		logEvent = logEvent.Array("loc", locArr).Array("range", rangeArr).Array("decodedNum", decodedNumArr)
+		return logEvent
 	}
 
 	s := hex.EncodeToString(key)
 	if len(s) > 7 {
 		s = s[:7]
 	}
-	l.Info().Str("prefix", s).Time("ts", time.Unix(0, int64(i.itemID))).Msg("print previous lines")
+	event := l.Info().Str("prefix", s).Time("ts", time.Unix(0, int64(i.itemID)))
 	for ; pre.Valid() && j < n; pre.Next() {
-		each(pre)
+		event = each(pre, event)
 	}
+	event.Msg("print previous lines")
 	j = 0
-	l.Info().Str("prefix", s).Time("ts", time.Unix(0, int64(i.itemID))).Msg("print next lines")
+	event = l.Info().Str("prefix", s).Time("ts", time.Unix(0, int64(i.itemID)))
 	for ; next.Valid() && j < n; next.Next() {
-		each(next)
+		event = each(next, event)
 	}
+	event.Msg("print next lines")
 }
 
 func (i *item) Val() ([]byte, error) {
diff --git a/banyand/tsdb/series_seek_sort.go b/banyand/tsdb/series_seek_sort.go
index 910499e..2137899 100644
--- a/banyand/tsdb/series_seek_sort.go
+++ b/banyand/tsdb/series_seek_sort.go
@@ -144,12 +144,14 @@ func (s *seekerBuilder) buildSeriesByTime() ([]Iterator, error) {
 			}
 		}
 	}
-	s.seriesSpan.l.Debug().
-		Str("order", modelv1.Sort_name[int32(s.order)]).
-		Times("blocks", bTimes).
-		Uint64("series_id", uint64(s.seriesSpan.seriesID)).
-		Int("shard_id", int(s.seriesSpan.shardID)).
-		Msg("seek series by time")
+	if e := s.seriesSpan.l.Debug(); e.Enabled() {
+		e.Str("order", modelv1.Sort_name[int32(s.order)]).
+			Times("blocks", bTimes).
+			Uint64("series_id", uint64(s.seriesSpan.seriesID)).
+			Str("series", s.seriesSpan.series).
+			Int("shard_id", int(s.seriesSpan.shardID)).
+			Msg("seek series by time")
+	}
 	return []Iterator{newMergedIterator(delegated)}, nil
 }
 
@@ -172,7 +174,6 @@ func (s *searcherIterator) Next() bool {
 			v := s.fieldIterator.Val()
 			s.cur = v.Value.Iterator()
 			s.curKey = v.Term
-			s.l.Trace().Uint64("series_id", uint64(s.seriesID)).Hex("term", s.curKey).Msg("got a new field")
 		} else {
 			return false
 		}
@@ -181,11 +182,12 @@ func (s *searcherIterator) Next() bool {
 
 		for _, filter := range s.filters {
 			if !filter(s.Val()) {
-				s.l.Trace().Uint64("series_id", uint64(s.seriesID)).Uint64("item_id", uint64(s.Val().ID())).Msg("ignore the item")
 				return s.Next()
 			}
 		}
-		s.l.Trace().Uint64("series_id", uint64(s.seriesID)).Uint64("item_id", uint64(s.Val().ID())).Msg("got an item")
+		if e := s.l.Debug(); e.Enabled() {
+			e.Uint64("series_id", uint64(s.seriesID)).Uint64("item_id", uint64(s.Val().ID())).Msg("got an item")
+		}
 		return true
 	}
 	s.cur = nil
diff --git a/banyand/tsdb/series_write.go b/banyand/tsdb/series_write.go
index 794165f..661d1f7 100644
--- a/banyand/tsdb/series_write.go
+++ b/banyand/tsdb/series_write.go
@@ -92,7 +92,7 @@ var ErrDuplicatedFamily = errors.New("duplicated family")
 
 func (w *writerBuilder) Build() (Writer, error) {
 	if w.block == nil {
-		return nil, errors.WithStack(ErrNoTime)
+		return nil, errors.WithMessagef(ErrNoTime, "ts:%v", w.ts)
 	}
 	if len(w.values) < 1 {
 		return nil, errors.WithStack(ErrNoVal)
diff --git a/banyand/tsdb/seriesdb.go b/banyand/tsdb/seriesdb.go
index 6c4ae55..c9ab074 100644
--- a/banyand/tsdb/seriesdb.go
+++ b/banyand/tsdb/seriesdb.go
@@ -20,25 +20,33 @@ package tsdb
 import (
 	"bytes"
 	"context"
+	"errors"
 	"io"
 	"math"
 	"sort"
+	"strings"
 	"sync"
 	"time"
 
 	"go.uber.org/multierr"
+	"google.golang.org/protobuf/proto"
 
 	"github.com/apache/skywalking-banyandb/api/common"
+	modelv1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/model/v1"
 	"github.com/apache/skywalking-banyandb/banyand/kv"
 	"github.com/apache/skywalking-banyandb/banyand/observability"
 	"github.com/apache/skywalking-banyandb/pkg/convert"
 	"github.com/apache/skywalking-banyandb/pkg/logger"
+	pbv1 "github.com/apache/skywalking-banyandb/pkg/pb/v1"
 	"github.com/apache/skywalking-banyandb/pkg/timestamp"
 )
 
 var (
-	maxIntBytes  = convert.Uint64ToBytes(math.MaxUint64)
-	zeroIntBytes = convert.Uint64ToBytes(0)
+	entityPrefix    = []byte("entity_")
+	entityPrefixLen = len(entityPrefix)
+	seriesPrefix    = []byte("series_")
+	maxIntBytes     = convert.Uint64ToBytes(math.MaxUint64)
+	zeroIntBytes    = convert.Uint64ToBytes(0)
 )
 
 var AnyEntry = Entry(nil)
@@ -75,6 +83,83 @@ func NewEntity(len int) Entity {
 	return e
 }
 
+type EntityValue *modelv1.TagValue
+
+func EntityValueToEntry(ev EntityValue) (Entry, error) {
+	return pbv1.MarshalTagValue(ev)
+}
+
+type EntityValues []EntityValue
+
+func (evs EntityValues) Prepend(scope EntityValue) EntityValues {
+	return append(EntityValues{scope}, evs...)
+}
+
+func (evs EntityValues) Encode() (result []*modelv1.TagValue) {
+	for _, v := range evs {
+		result = append(result, v)
+	}
+	return
+}
+
+func (evs EntityValues) ToEntity() (result Entity, err error) {
+	for _, v := range evs {
+		entry, errMarshal := EntityValueToEntry(v)
+		if errMarshal != nil {
+			return nil, err
+		}
+		result = append(result, entry)
+	}
+	return
+}
+
+func (evs EntityValues) String() string {
+	var strBuilder strings.Builder
+	vv := evs.Encode()
+	for i := 0; i < len(vv); i++ {
+		strBuilder.WriteString(vv[i].String())
+		if i < len(vv)-1 {
+			strBuilder.WriteString(".")
+		}
+	}
+	return strBuilder.String()
+}
+
+func DecodeEntityValues(tvv []*modelv1.TagValue) (result EntityValues) {
+	for _, tv := range tvv {
+		result = append(result, tv)
+	}
+	return
+}
+
+func StrValue(v string) EntityValue {
+	return &modelv1.TagValue{Value: &modelv1.TagValue_Str{Str: &modelv1.Str{Value: v}}}
+}
+
+func Int64Value(v int64) EntityValue {
+	return &modelv1.TagValue{Value: &modelv1.TagValue_Int{Int: &modelv1.Int{Value: v}}}
+}
+
+func MarshalEntityValues(evs EntityValues) ([]byte, error) {
+	data := &modelv1.TagFamilyForWrite{}
+	for _, v := range evs {
+		data.Tags = append(data.Tags, v)
+	}
+	return proto.Marshal(data)
+}
+
+func UnmarshalEntityValues(evs []byte) (result EntityValues, err error) {
+	data := &modelv1.TagFamilyForWrite{}
+	result = make(EntityValues, len(data.Tags))
+	if err = proto.Unmarshal(evs, data); err != nil {
+		return nil, err
+	}
+	for _, tv := range data.Tags {
+		result = append(result, tv)
+	}
+	return
+}
+
 type Path struct {
 	prefix   []byte
 	seekKey  []byte
@@ -124,26 +209,26 @@ func (p *Path) extractPrefix() {
 
 func (p Path) Prepend(entry Entry) Path {
 	e := Hash(entry)
-	prependFunc := func(src []byte, entry []byte) []byte {
-		dst := make([]byte, len(src)+len(entry))
-		copy(dst, entry)
-		copy(dst[len(entry):], src)
-		return dst
-	}
-	p.template = prependFunc(p.template, e)
+	p.template = prepend(p.template, e)
 	p.offset += len(e)
 	p.extractPrefix()
-	p.mask = prependFunc(p.mask, maxIntBytes)
+	p.mask = prepend(p.mask, maxIntBytes)
 	return p
 }
 
+func prepend(src []byte, entry []byte) []byte {
+	dst := make([]byte, len(src)+len(entry))
+	copy(dst, entry)
+	copy(dst[len(entry):], src)
+	return dst
+}
+
 type SeriesDatabase interface {
 	observability.Observable
 	io.Closer
 	GetByID(id common.SeriesID) (Series, error)
-	Get(entity Entity) (Series, error)
-	GetByHashKey(key []byte) (Series, error)
-	List(path Path) (SeriesList, error)
+	Get(key []byte, entityValues EntityValues) (Series, error)
+	List(ctx context.Context, path Path) (SeriesList, error)
 }
 
 type blockDatabase interface {
@@ -167,26 +252,25 @@ type seriesDB struct {
 	sID            common.ShardID
 }
 
-func (s *seriesDB) GetByHashKey(key []byte) (Series, error) {
-	seriesID, err := s.seriesMetadata.Get(key)
-	if err != nil && err != kv.ErrKeyNotFound {
-		return nil, err
-	}
-	if err == nil {
-		return newSeries(s.context(), bytesToSeriesID(seriesID), s), nil
-	}
-	s.Lock()
-	defer s.Unlock()
-	seriesID = Hash(key)
-	err = s.seriesMetadata.Put(key, seriesID)
-	if err != nil {
-		return nil, err
-	}
-	return newSeries(s.context(), bytesToSeriesID(seriesID), s), nil
-}
-
 func (s *seriesDB) GetByID(id common.SeriesID) (Series, error) {
-	return newSeries(s.context(), id, s), nil
+	var series string
+	if e := s.l.Debug(); e.Enabled() {
+		var buf bytes.Buffer
+		buf.Write(seriesPrefix)
+		buf.Write(id.Marshal())
+		data, err := s.seriesMetadata.Get(buf.Bytes())
+		if err != nil {
+			e.Err(err).Msg("failed to get series id's literal")
+			return newSeries(s.context(), id, "unknown", s), nil
+		}
+		entityValues, err := UnmarshalEntityValues(data)
+		if err != nil {
+			e.Err(err).Msg("malformed series id's literal")
+			return newSeries(s.context(), id, "malformed", s), nil
+		}
+		series = entityValues.String()
+	}
+	return newSeries(s.context(), id, series, s), nil
 }
 
 func (s *seriesDB) block(ctx context.Context, id GlobalItemID) (BlockDelegate, error) {
@@ -194,6 +278,7 @@ func (s *seriesDB) block(ctx context.Context, id GlobalItemID) (BlockDelegate, e
 	if seg == nil {
 		return nil, nil
 	}
+
 	return seg.blockController.get(ctx, id.blockID)
 }
 
@@ -201,47 +286,122 @@ func (s *seriesDB) shardID() common.ShardID {
 	return s.sID
 }
 
-func (s *seriesDB) Get(entity Entity) (Series, error) {
-	key := HashEntity(entity)
-	return s.GetByHashKey(key)
+func (s *seriesDB) Get(key []byte, entityValues EntityValues) (Series, error) {
+	entityKey := prepend(key, entityPrefix)
+	data, err := s.seriesMetadata.Get(entityKey)
+	if errors.Is(err, kv.ErrKeyNotFound) {
+		s.Lock()
+		defer s.Unlock()
+		seriesID := bytesToSeriesID(Hash(key))
+		encodedData, entityValuesBytes, errDecode := encode(seriesID, entityValues)
+		if errDecode != nil {
+			return nil, errDecode
+		}
+		errDecode = s.seriesMetadata.Put(entityKey, encodedData)
+		if errDecode != nil {
+			return nil, errDecode
+		}
+
+		var series string
+		if e := s.l.Debug(); e.Enabled() {
+			// TODO: store following info when the debug is enabled
+			errDecode = s.seriesMetadata.Put(prepend(seriesID.Marshal(), seriesPrefix), entityValuesBytes)
+			if errDecode != nil {
+				return nil, errDecode
+			}
+			series = entityValues.String()
+			e.Str("series", series).
+				Uint64("series_id", uint64(seriesID)).
+				Msg("create a new series")
+		}
+		return newSeries(s.context(), seriesID, series, s), nil
+	}
+	if err != nil {
+		return nil, err
+	}
+	seriesID, entityValues, err := decode(data)
+	if err != nil {
+		return nil, err
+	}
+
+	return newSeries(s.context(), seriesID, entityValues.String(), s), nil
+}
+
+func encode(seriesID common.SeriesID, evv EntityValues) ([]byte, []byte, error) {
+	data, err := MarshalEntityValues(evv)
+	if err != nil {
+		return nil, nil, err
+	}
+	var buf bytes.Buffer
+	buf.Write(convert.Uint64ToBytes(uint64(seriesID)))
+	buf.Write(data)
+	return buf.Bytes(), data, nil
+}
+
+func decode(value []byte) (common.SeriesID, EntityValues, error) {
+	seriesID := convert.BytesToUint64(value[:8])
+	entityValues, err := UnmarshalEntityValues(value[8:])
+	if err != nil {
+		return 0, nil, err
+	}
+	return common.SeriesID(seriesID), entityValues, nil
 }
 
-func (s *seriesDB) List(path Path) (SeriesList, error) {
+func (s *seriesDB) List(ctx context.Context, path Path) (SeriesList, error) {
+	prefix := prepend(path.prefix, entityPrefix)
+	l := logger.FetchOrDefault(ctx, "series_database", s.l)
 	if path.isFull {
-		id, err := s.seriesMetadata.Get(path.prefix)
+		data, err := s.seriesMetadata.Get(prefix)
 		if err != nil && err != kv.ErrKeyNotFound {
 			return nil, err
 		}
 		if err == nil {
-			seriesID := bytesToSeriesID(id)
-			s.l.Debug().
-				Hex("path", path.prefix).
-				Uint64("series_id", uint64(seriesID)).
-				Msg("got a series with a full path")
-			return []Series{newSeries(s.context(), seriesID, s)}, nil
+			seriesID, entityValue, err := decode(data)
+			if err != nil {
+				return nil, err
+			}
+			var series string
+			if e := l.Debug(); e.Enabled() {
+				series = entityValue.String()
+				e.Int("prefix_len", path.offset/8).
+					Str("series", series).
+					Uint64("series_id", uint64(seriesID)).
+					Msg("got a series with a full path")
+			}
+			return []Series{newSeries(s.context(), seriesID, series, s)}, nil
+		}
+		if e := l.Debug(); e.Enabled() {
+			e.Hex("path", path.prefix).Msg("doesn't get any series")
 		}
-		s.l.Debug().Hex("path", path.prefix).Msg("doesn't get any series")
 		return nil, nil
 	}
 	result := make([]Series, 0)
 	var err error
-	errScan := s.seriesMetadata.Scan(path.prefix, path.seekKey, kv.DefaultScanOpts, func(_ int, key []byte, getVal func() ([]byte, error)) error {
+	errScan := s.seriesMetadata.Scan(prefix, prepend(path.seekKey, entityPrefix), kv.DefaultScanOpts, func(_ int, key []byte, getVal func() ([]byte, error)) error {
+		key = key[entityPrefixLen:]
 		comparableKey := make([]byte, len(key))
 		for i, b := range key {
 			comparableKey[i] = path.mask[i] & b
 		}
 		if bytes.Equal(path.template, comparableKey) {
-			id, errGetVal := getVal()
+			data, errGetVal := getVal()
 			if errGetVal != nil {
 				err = multierr.Append(err, errGetVal)
 				return nil
 			}
-			seriesID := bytesToSeriesID(id)
-			s.l.Debug().
-				Hex("path", path.prefix).
-				Uint64("series_id", uint64(seriesID)).
-				Msg("got a series")
-			result = append(result, newSeries(s.context(), seriesID, s))
+			seriesID, entityValue, errDecode := decode(data)
+			if errDecode != nil {
+				err = multierr.Append(err, errDecode)
+				return nil
+			}
+			series := entityValue.String()
+			if e := l.Debug(); e.Enabled() {
+				e.Int("prefix_len", path.offset/8).
+					Str("series", series).
+					Uint64("series_id", uint64(seriesID)).
+					Msg("match a series")
+			}
+			result = append(result, newSeries(s.context(), seriesID, series, s))
 		}
 		return nil
 	})
@@ -286,7 +446,7 @@ func (s *seriesDB) create(ctx context.Context, ts time.Time) (BlockDelegate, err
 		}
 		return block.delegate(ctx)
 	}
-	seg, err := s.segCtrl.create(s.segCtrl.Format(timeRange.Start), false)
+	seg, err := s.segCtrl.create(timeRange.Start, false)
 	if err != nil {
 		return nil, err
 	}
@@ -340,7 +500,7 @@ func newSeriesDataBase(ctx context.Context, shardID common.ShardID, path string,
 
 // HashEntity runs hash function (e.g. with xxhash algorithm) on each segment of the Entity,
 // and concatenates all uint64 in byte array. So the return length of the byte array will be
-// 8 (every uint64 has 8 bytes) * length of the input entity.
+// 8 (every uint64 has 8 bytes) * length of the input
 func HashEntity(entity Entity) []byte {
 	result := make([]byte, 0, len(entity)*8)
 	for _, entry := range entity {
diff --git a/banyand/tsdb/seriesdb_test.go b/banyand/tsdb/seriesdb_test.go
index 3c4623c..96d9f82 100644
--- a/banyand/tsdb/seriesdb_test.go
+++ b/banyand/tsdb/seriesdb_test.go
@@ -312,6 +312,31 @@ func TestNewPath(t *testing.T) {
 				offset: 24,
 			},
 		},
+		{
+			name: "prepend a scope to any",
+			entity: Entity{
+				AnyEntry,
+			},
+			scope: Entry("segment"),
+			want: Path{
+				prefix: bytes.Join([][]byte{
+					Hash([]byte("segment")),
+				}, nil),
+				seekKey: bytes.Join([][]byte{
+					Hash([]byte("segment")),
+					zeroIntBytes,
+				}, nil),
+				template: bytes.Join([][]byte{
+					Hash([]byte("segment")),
+					zeroIntBytes,
+				}, nil),
+				mask: bytes.Join([][]byte{
+					maxIntBytes,
+					zeroIntBytes,
+				}, nil),
+				offset: 8,
+			},
+		},
 	}
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
@@ -324,7 +349,7 @@ func TestNewPath(t *testing.T) {
 	}
 }
 
-func Test_SeriesDatabase_Get(t *testing.T) {
+func Test_SeriesDatabase_Get_GetByID(t *testing.T) {
 	tests := []struct {
 		name     string
 		entities []Entity
@@ -365,9 +390,20 @@ func Test_SeriesDatabase_Get(t *testing.T) {
 			s, err := newSeriesDataBase(context.WithValue(context.Background(), logger.ContextKey, logger.GetLogger("test")), 0, dir, nil)
 			tester.NoError(err)
 			for _, entity := range tt.entities {
-				series, err := s.Get(entity)
+				evv := toEntityValues(entity)
+				series, err := s.Get(HashEntity(entity), evv)
 				tester.NoError(err)
 				tester.Greater(uint(series.ID()), uint(0))
+				literal := series.String()
+				if literal != "" {
+					tester.Equal(evv.String(), literal)
+				}
+				series, err = s.GetByID(series.ID())
+				tester.NoError(err)
+				literal = series.String()
+				if literal != "" {
+					tester.Equal(evv.String(), literal)
+				}
 			}
 		})
 	}
@@ -456,7 +492,7 @@ func Test_SeriesDatabase_List(t *testing.T) {
 
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
-			series, err := s.List(tt.path)
+			series, err := s.List(context.Background(), tt.path)
 			if tt.wantErr {
 				tester.Error(err)
 				return
@@ -512,15 +548,26 @@ func setUpEntities(t *assert.Assertions, db SeriesDatabase) []*entityWithID {
 	}
 	for _, d := range data {
 		d.id = common.SeriesID(convert.BytesToUint64(Hash(HashEntity(d.entity))))
-		series, err := db.Get(d.entity)
+		series, err := db.Get(HashEntity(d.entity), toEntityValues(d.entity))
 		t.NoError(err)
 		t.Greater(uint(series.ID()), uint(0))
 	}
 	return data
 }
 
+func toEntityValues(entity Entity) (result EntityValues) {
+	for i, e := range entity {
+		if len(e) == 8 && i == len(entity)-1 {
+			result = append(result, Int64Value(int64(convert.BytesToUint64(e))))
+		} else {
+			result = append(result, StrValue(string(e)))
+		}
+	}
+	return
+}
+
 func newMockSeries(id common.SeriesID, blockDB *seriesDB) *series {
-	return newSeries(context.TODO(), id, blockDB)
+	return newSeries(context.TODO(), id, "", blockDB)
 }
 
 func transform(list SeriesList) (seriesIDs []common.SeriesID) {
diff --git a/banyand/tsdb/shard.go b/banyand/tsdb/shard.go
index 8541ef8..23dabb3 100644
--- a/banyand/tsdb/shard.go
+++ b/banyand/tsdb/shard.go
@@ -234,3 +234,37 @@ func (ir IntervalRule) EstimatedDuration() time.Duration {
 	}
 	panic("invalid interval unit")
 }
+
+type parser interface {
+	Parse(value string) (time.Time, error)
+}
+
+func loadSections(root string, parser parser, intervalRule IntervalRule, loadFn func(start, end time.Time) error) error {
+	var startTimeLst []time.Time
+	if err := WalkDir(
+		root,
+		segPathPrefix,
+		func(suffix string) error {
+			startTime, err := parser.Parse(suffix)
+			if err != nil {
+				return err
+			}
+			startTimeLst = append(startTimeLst, startTime)
+			return nil
+		}); err != nil {
+		return err
+	}
+	sort.Slice(startTimeLst, func(i, j int) bool { return i < j })
+	for i, start := range startTimeLst {
+		var end time.Time
+		if i < len(startTimeLst)-1 {
+			end = startTimeLst[i+1]
+		} else {
+			end = intervalRule.NextTime(start)
+		}
+		if err := loadFn(start, end); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/banyand/tsdb/shard_test.go b/banyand/tsdb/shard_test.go
index b836935..1d8709d 100644
--- a/banyand/tsdb/shard_test.go
+++ b/banyand/tsdb/shard_test.go
@@ -89,7 +89,7 @@ var _ = Describe("Shard", func() {
 				3,
 			)
 			Expect(err).NotTo(HaveOccurred())
-			started("BlockID-19700101-00-1", "SegID-19700101-1")
+			started("BlockID-19700101-1970010100-1", "SegID-19700101-1")
 			By("01/01 00:00 1st block is opened")
 			t1 := clock.Now()
 			Eventually(func() []tsdb.BlockState {
@@ -98,13 +98,13 @@ var _ = Describe("Shard", func() {
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700101),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o0),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010100),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t1, 12*time.Hour, true, false),
 				},
 			}))
 			By("01/01 11:00 2nd block is opened")
-			forward(11, "BlockID-19700101-00-1", "SegID-19700101-1")
+			forward(11, "BlockID-19700101-1970010100-1", "SegID-19700101-1")
 			t2 := clock.Now().Add(1 * time.Hour)
 			Eventually(func() []tsdb.BlockState {
 				return shard.State().Blocks
@@ -112,14 +112,14 @@ var _ = Describe("Shard", func() {
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700101),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o0),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010100),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t1, 12*time.Hour, true, false),
 				},
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700101),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 12),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010112),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t2, 12*time.Hour, true, false),
 				},
@@ -128,18 +128,18 @@ var _ = Describe("Shard", func() {
 				return shard.State().OpenBlocks
 			}, flags.EventuallyTimeout).Should(Equal([]tsdb.BlockID{}))
 			By("01/01 13:00 moves to the 2nd block")
-			forward(2, "BlockID-19700101-00-1", "SegID-19700101-1")
-			started("BlockID-19700101-12-1")
+			forward(2, "BlockID-19700101-1970010100-1", "SegID-19700101-1")
+			started("BlockID-19700101-1970010112-1")
 			Eventually(func() []tsdb.BlockID {
 				return shard.State().OpenBlocks
 			}, flags.EventuallyTimeout).Should(Equal([]tsdb.BlockID{
 				{
 					SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700101),
-					BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o0),
+					BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010100),
 				},
 			}))
 			By("01/01 23:00 3rd block is opened")
-			forward(10, "BlockID-19700101-12-1", "SegID-19700101-1")
+			forward(10, "BlockID-19700101-1970010112-1", "SegID-19700101-1")
 			t3 := clock.Now().Add(1 * time.Hour)
 			Eventually(func() []tsdb.BlockState {
 				return shard.State().Blocks
@@ -147,28 +147,28 @@ var _ = Describe("Shard", func() {
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700101),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o0),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010100),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t1, 12*time.Hour, true, false),
 				},
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700101),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 12),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010112),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t2, 12*time.Hour, true, false),
 				},
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700102),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o0),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010200),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t3, 12*time.Hour, true, false),
 				},
 			}))
 			By("01/02 01:00 moves to 3rd block")
-			forward(2, "BlockID-19700101-12-1", "SegID-19700101-1")
-			started("BlockID-19700102-00-1", "SegID-19700102-1")
+			forward(2, "BlockID-19700101-1970010112-1", "SegID-19700101-1")
+			started("BlockID-19700102-1970010200-1", "SegID-19700102-1")
 			Eventually(func() []tsdb.BlockID {
 				if clock.TriggerTimer() {
 					GinkgoWriter.Println("01/02 01:00 has been triggered")
@@ -177,15 +177,15 @@ var _ = Describe("Shard", func() {
 			}, flags.EventuallyTimeout).Should(Equal([]tsdb.BlockID{
 				{
 					SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700101),
-					BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o0),
+					BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010100),
 				},
 				{
 					SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700101),
-					BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 12),
+					BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010112),
 				},
 			}))
 			By("01/02 11:00 4th block is opened")
-			forward(10, "BlockID-19700102-00-1", "SegID-19700102-1")
+			forward(10, "BlockID-19700102-1970010200-1", "SegID-19700102-1")
 			t4 := clock.Now().Add(1 * time.Hour)
 			Eventually(func() []tsdb.BlockState {
 				if clock.TriggerTimer() {
@@ -196,53 +196,53 @@ var _ = Describe("Shard", func() {
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700101),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o0),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010100),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t1, 12*time.Hour, true, false),
 				},
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700101),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 12),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010112),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t2, 12*time.Hour, true, false),
 				},
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700102),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o0),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010200),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t3, 12*time.Hour, true, false),
 				},
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700102),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 12),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010212),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t4, 12*time.Hour, true, false),
 				},
 			}))
 			By("01/02 13:00 moves to 4th block")
-			forward(2, "BlockID-19700102-00-1", "SegID-19700102-1")
-			started("BlockID-19700102-12-1")
+			forward(2, "BlockID-19700102-1970010200-1", "SegID-19700102-1")
+			started("BlockID-19700102-1970010212-1")
 			Eventually(func() []tsdb.BlockID {
 				return shard.State().OpenBlocks
 			}, flags.EventuallyTimeout).Should(Equal([]tsdb.BlockID{
 				{
 					SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700101),
-					BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o0),
+					BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010100),
 				},
 				{
 					SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700101),
-					BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 12),
+					BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010112),
 				},
 				{
 					SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700102),
-					BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o0),
+					BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010200),
 				},
 			}))
 			By("01/02 23:00 5th block is opened")
-			forward(10, "BlockID-19700102-12-1", "SegID-19700102-1")
+			forward(10, "BlockID-19700102-1970010212-1", "SegID-19700102-1")
 			t5 := clock.Now().Add(1 * time.Hour)
 			Eventually(func() []tsdb.BlockState {
 				return shard.State().Blocks
@@ -250,56 +250,56 @@ var _ = Describe("Shard", func() {
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700101),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o0),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010100),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t1, 12*time.Hour, true, false),
 				},
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700101),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 12),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010112),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t2, 12*time.Hour, true, false),
 				},
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700102),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o0),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010200),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t3, 12*time.Hour, true, false),
 				},
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700102),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 12),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010212),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t4, 12*time.Hour, true, false),
 				},
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700103),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o0),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010300),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t5, 12*time.Hour, true, false),
 				},
 			}))
 			By("01/03 01:00 close 1st block by adding 5th block")
-			forward(2, "BlockID-19700102-12-1", "SegID-19700102-1")
-			started("BlockID-19700103-00-1", "SegID-19700103-1")
+			forward(2, "BlockID-19700102-1970010212-1", "SegID-19700102-1")
+			started("BlockID-19700103-1970010300-1", "SegID-19700103-1")
 			Eventually(func() []tsdb.BlockID {
 				return shard.State().OpenBlocks
 			}, flags.EventuallyTimeout).Should(Equal([]tsdb.BlockID{
 				{
 					SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700101),
-					BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 12),
+					BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010112),
 				},
 				{
 					SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700102),
-					BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o0),
+					BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010200),
 				},
 				{
 					SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700102),
-					BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 12),
+					BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010212),
 				},
 			}))
 			Eventually(func() []tsdb.BlockState {
@@ -308,7 +308,7 @@ var _ = Describe("Shard", func() {
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700101),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o0),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010100),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t1, 12*time.Hour, true, false),
 					Closed:    true,
@@ -316,28 +316,28 @@ var _ = Describe("Shard", func() {
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700101),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 12),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010112),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t2, 12*time.Hour, true, false),
 				},
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700102),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o0),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010200),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t3, 12*time.Hour, true, false),
 				},
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700102),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 12),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010212),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t4, 12*time.Hour, true, false),
 				},
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700103),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o0),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010300),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t5, 12*time.Hour, true, false),
 				},
@@ -358,15 +358,15 @@ var _ = Describe("Shard", func() {
 			}, flags.EventuallyTimeout).Should(Equal([]tsdb.BlockID{
 				{
 					SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700101),
-					BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o0),
+					BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010100),
 				},
 				{
 					SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700102),
-					BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o0),
+					BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010200),
 				},
 				{
 					SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700102),
-					BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 12),
+					BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010212),
 				},
 			}))
 			Eventually(func() []tsdb.BlockState {
@@ -375,14 +375,14 @@ var _ = Describe("Shard", func() {
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700101),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o0),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010100),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t1, 12*time.Hour, true, false),
 				},
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700101),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 12),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010112),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t2, 12*time.Hour, true, false),
 					Closed:    true,
@@ -390,21 +390,21 @@ var _ = Describe("Shard", func() {
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700102),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o0),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010200),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t3, 12*time.Hour, true, false),
 				},
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700102),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 12),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010212),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t4, 12*time.Hour, true, false),
 				},
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700103),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o0),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010300),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t5, 12*time.Hour, true, false),
 				},
@@ -429,23 +429,23 @@ var _ = Describe("Shard", func() {
 				15,
 			)
 			Expect(err).NotTo(HaveOccurred())
-			started("BlockID-19700101-00-1", "SegID-19700101-1", "retention")
+			started("BlockID-19700101-1970010100-1", "SegID-19700101-1", "retention")
 			By("01/01 00:00 1st block is opened")
 			t1 := clock.Now()
 			By("01/01 11:00 2nd block is opened")
-			forward(11, "BlockID-19700101-00-1", "SegID-19700101-1", "retention")
+			forward(11, "BlockID-19700101-1970010100-1", "SegID-19700101-1", "retention")
 			t2 := clock.Now().Add(1 * time.Hour)
 			By("01/01 13:00 moves to the 2nd block")
-			forward(2, "BlockID-19700101-00-1", "SegID-19700101-1", "retention")
-			started("BlockID-19700101-12-1", "retention")
+			forward(2, "BlockID-19700101-1970010100-1", "SegID-19700101-1", "retention")
+			started("BlockID-19700101-1970010112-1", "retention")
 			By("01/01 23:00 3rd block is opened")
-			forward(10, "BlockID-19700101-12-1", "SegID-19700101-1", "retention")
+			forward(10, "BlockID-19700101-1970010112-1", "SegID-19700101-1", "retention")
 			t3 := clock.Now().Add(1 * time.Hour)
 			By("01/02 01:00 moves to 3rd block")
-			forward(2, "BlockID-19700101-12-1", "SegID-19700101-1", "retention")
-			started("BlockID-19700102-00-1", "SegID-19700102-1", "retention")
+			forward(2, "BlockID-19700101-1970010112-1", "SegID-19700101-1", "retention")
+			started("BlockID-19700102-1970010200-1", "SegID-19700102-1", "retention")
 			By("01/02 11:00 4th block is opened")
-			forward(10, "BlockID-19700102-00-1", "SegID-19700102-1", "retention")
+			forward(10, "BlockID-19700102-1970010200-1", "SegID-19700102-1", "retention")
 			t4 := clock.Now().Add(1 * time.Hour)
 
 			Eventually(func() []tsdb.BlockState {
@@ -454,41 +454,41 @@ var _ = Describe("Shard", func() {
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700101),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o0),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010100),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t1, 12*time.Hour, true, false),
 				},
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700101),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 12),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010112),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t2, 12*time.Hour, true, false),
 				},
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700102),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o0),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010200),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t3, 12*time.Hour, true, false),
 				},
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700102),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 12),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010212),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t4, 12*time.Hour, true, false),
 				},
 			}))
 			By("01/02 13:00 moves to 4th block")
-			forward(2, "BlockID-19700102-00-1", "SegID-19700102-1", "retention")
-			started("BlockID-19700102-12-1", "retention")
+			forward(2, "BlockID-19700102-1970010200-1", "SegID-19700102-1", "retention")
+			started("BlockID-19700102-1970010212-1", "retention")
 			By("01/02 23:00 5th block is opened")
-			forward(10, "BlockID-19700102-12-1", "SegID-19700102-1", "retention")
+			forward(10, "BlockID-19700102-1970010212-1", "SegID-19700102-1", "retention")
 			t5 := clock.Now().Add(1 * time.Hour)
 			By("01/03 01:00 close 1st block by adding 5th block")
-			forward(2, "BlockID-19700102-12-1", "SegID-19700102-1", "retention")
-			started("BlockID-19700103-00-1", "SegID-19700103-1", "retention")
+			forward(2, "BlockID-19700102-1970010212-1", "SegID-19700102-1", "retention")
+			started("BlockID-19700103-1970010300-1", "SegID-19700103-1", "retention")
 			Eventually(func() []tsdb.BlockState {
 				started("retention")
 				return shard.State().Blocks
@@ -496,21 +496,21 @@ var _ = Describe("Shard", func() {
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700102),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o0),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010200),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t3, 12*time.Hour, true, false),
 				},
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700102),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 12),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010212),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t4, 12*time.Hour, true, false),
 				},
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700103),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o0),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010300),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t5, 12*time.Hour, true, false),
 				},
@@ -520,11 +520,11 @@ var _ = Describe("Shard", func() {
 			}, flags.EventuallyTimeout).Should(Equal([]tsdb.BlockID{
 				{
 					SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700102),
-					BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o0),
+					BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010200),
 				},
 				{
 					SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700102),
-					BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 12),
+					BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010212),
 				},
 			}))
 		})
@@ -549,7 +549,7 @@ var _ = Describe("Shard", func() {
 				3,
 			)
 			Expect(err).NotTo(HaveOccurred())
-			started("BlockID-19700101-01-1", "SegID-19700101-1", "retention")
+			started("BlockID-19700101-1970010101-1", "SegID-19700101-1", "retention")
 			By("01/01 00:01 1st block is opened")
 			t1 := clock.Now()
 			Eventually(func() []tsdb.BlockState {
@@ -558,31 +558,31 @@ var _ = Describe("Shard", func() {
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700101),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o1),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010101),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t1, 12*time.Hour, true, false),
 				},
 			}))
 			By("01/01 12:00 2nd block is opened")
-			forward(11, "BlockID-19700101-01-1", "SegID-19700101-1")
+			forward(11, "BlockID-19700101-1970010101-1", "SegID-19700101-1")
 			t2 := clock.Now().Add(1 * time.Hour)
 			By("01/01 14:00 moves to the 2nd block")
-			forward(2, "BlockID-19700101-01-1", "SegID-19700101-1")
-			started("BlockID-19700101-13-1")
+			forward(2, "BlockID-19700101-1970010101-1", "SegID-19700101-1")
+			started("BlockID-19700101-1970010113-1")
 			Eventually(func() []tsdb.BlockState {
 				return shard.State().Blocks
 			}, flags.EventuallyTimeout).Should(Equal([]tsdb.BlockState{
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700101),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 0o1),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010101),
 					},
 					TimeRange: timestamp.NewTimeRangeDuration(t1, 12*time.Hour, true, false),
 				},
 				{
 					ID: tsdb.BlockID{
 						SegID:   tsdb.GenerateInternalID(tsdb.DAY, 19700101),
-						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 13),
+						BlockID: tsdb.GenerateInternalID(tsdb.HOUR, 1970010113),
 					},
 					// The last block only takes 11 hours to align the segment's size
 					TimeRange: timestamp.NewTimeRangeDuration(t2, 11*time.Hour, true, false),
diff --git a/banyand/tsdb/tsdb.go b/banyand/tsdb/tsdb.go
index c4428d8..770c5bd 100644
--- a/banyand/tsdb/tsdb.go
+++ b/banyand/tsdb/tsdb.go
@@ -25,12 +25,12 @@ import (
 	"strconv"
 	"strings"
 	"sync"
-	"time"
 
 	"github.com/pkg/errors"
 	"go.uber.org/multierr"
 
 	"github.com/apache/skywalking-banyandb/api/common"
+	"github.com/apache/skywalking-banyandb/pkg/convert"
 	"github.com/apache/skywalking-banyandb/pkg/encoding"
 	"github.com/apache/skywalking-banyandb/pkg/logger"
 	"github.com/apache/skywalking-banyandb/pkg/timestamp"
@@ -48,10 +48,8 @@ const (
 	blockTemplate       = rootPrefix + blockPathPrefix + "-%s"
 	globalIndexTemplate = rootPrefix + "index"
 
-	segHourFormat   = "2006010215"
-	segDayFormat    = "20060102"
-	blockHourFormat = "15"
-	blockDayFormat  = "0102"
+	hourFormat = "2006010215"
+	dayFormat  = "20060102"
 
 	dirPerm = 0o700
 )
@@ -104,21 +102,33 @@ type EncodingMethod struct {
 	DecoderPool encoding.SeriesDecoderPool
 }
 
-type BlockID struct {
-	SegID   uint16
-	BlockID uint16
-}
+type (
+	SectionID uint32
+	BlockID   struct {
+		SegID   SectionID
+		BlockID SectionID
+	}
+)
 
 func (b BlockID) String() string {
 	return fmt.Sprintf("BlockID-%d-%d", parseSuffix(b.SegID), parseSuffix(b.BlockID))
 }
 
-func GenerateInternalID(unit IntervalUnit, suffix int) uint16 {
-	return uint16(unit)<<12 | ((uint16(suffix) << 4) >> 4)
+func GenerateInternalID(unit IntervalUnit, suffix int) SectionID {
+	return SectionID(unit)<<31 | ((SectionID(suffix) << 1) >> 1)
+}
+
+func parseSuffix(id SectionID) int {
+	return int((id << 1) >> 1)
+}
+
+func sectionIDToBytes(id SectionID) []byte {
+	return convert.Uint32ToBytes(uint32(id))
 }
 
-func parseSuffix(id uint16) int {
-	return int((id << 4) >> 4)
+func readSectionID(data []byte, offset int) (SectionID, int) {
+	end := offset + 4
+	return SectionID(convert.BytesToUint32(data[offset:end])), end
 }
 
 type BlockState struct {
@@ -185,9 +195,6 @@ func OpenDatabase(ctx context.Context, opts DatabaseOpts) (Database, error) {
 	if opts.TTL.Num == 0 {
 		return nil, errors.Wrap(ErrOpenDatabase, "ttl is absent")
 	}
-	if opts.SegmentInterval.EstimatedDuration() > 24*time.Hour {
-		return nil, errors.Wrap(ErrOpenDatabase, "segment interval should not be greater than 24 hours")
-	}
 	db := &database{
 		location:    opts.Location,
 		shardNum:    opts.ShardNum,
@@ -236,7 +243,7 @@ func loadDatabase(ctx context.Context, db *database) (Database, error) {
 	// TODO: open the manifest file
 	db.Lock()
 	defer db.Unlock()
-	err := WalkDir(db.location, shardPathPrefix, func(suffix, _ string) error {
+	err := WalkDir(db.location, shardPathPrefix, func(suffix string) error {
 		shardID, err := strconv.Atoi(suffix)
 		if err != nil {
 			return err
@@ -276,7 +283,7 @@ func loadDatabase(ctx context.Context, db *database) (Database, error) {
 	return db, nil
 }
 
-type WalkFn func(suffix, absolutePath string) error
+type WalkFn func(suffix string) error
 
 func WalkDir(root, prefix string, walkFn WalkFn) error {
 	files, err := os.ReadDir(root)
@@ -288,7 +295,7 @@ func WalkDir(root, prefix string, walkFn WalkFn) error {
 			continue
 		}
 		segs := strings.Split(f.Name(), "-")
-		errWalk := walkFn(segs[len(segs)-1], fmt.Sprintf(rootPrefix, root)+f.Name())
+		errWalk := walkFn(segs[len(segs)-1])
 		if errWalk != nil {
 			return errors.WithMessagef(errWalk, "failed to load: %s", f.Name())
 		}
diff --git a/banyand/tsdb/tsdb_test.go b/banyand/tsdb/tsdb_test.go
index 9a830df..443ae6d 100644
--- a/banyand/tsdb/tsdb_test.go
+++ b/banyand/tsdb/tsdb_test.go
@@ -96,9 +96,9 @@ func verifyDatabaseStructure(tester *assert.Assertions, tempDir string, now time
 	validateDirectory(tester, shardPath)
 	seriesPath := fmt.Sprintf(seriesTemplate, shardPath)
 	validateDirectory(tester, seriesPath)
-	segPath := fmt.Sprintf(segTemplate, shardPath, now.Format(segDayFormat))
+	segPath := fmt.Sprintf(segTemplate, shardPath, now.Format(dayFormat))
 	validateDirectory(tester, segPath)
-	validateDirectory(tester, fmt.Sprintf(blockTemplate, segPath, now.Format(blockHourFormat)))
+	validateDirectory(tester, fmt.Sprintf(blockTemplate, segPath, now.Format(hourFormat)))
 }
 
 func openDatabase(ctx context.Context, t *require.Assertions, path string) (db Database) {
diff --git a/dist/LICENSE b/dist/LICENSE
index 9d60250..fc75cee 100644
--- a/dist/LICENSE
+++ b/dist/LICENSE
@@ -182,53 +182,53 @@ Apache-2.0 licenses
     github.com/blevesearch/segment v0.9.0 Apache-2.0
     github.com/blevesearch/vellum v1.0.7 Apache-2.0
     github.com/coreos/go-semver v0.3.0 Apache-2.0
-    github.com/coreos/go-systemd/v22 v22.3.2 Apache-2.0
+    github.com/coreos/go-systemd/v22 v22.5.0 Apache-2.0
     github.com/dgraph-io/ristretto v0.1.0 Apache-2.0
     github.com/envoyproxy/protoc-gen-validate v0.1.0 Apache-2.0
+    github.com/go-logr/logr v1.2.3 Apache-2.0
+    github.com/go-logr/stdr v1.2.2 Apache-2.0
     github.com/golang/glog v1.0.0 Apache-2.0
     github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da Apache-2.0
     github.com/golang/mock v1.6.0 Apache-2.0
-    github.com/google/btree v1.0.1 Apache-2.0
+    github.com/google/btree v1.1.2 Apache-2.0
     github.com/google/flatbuffers v1.12.1 Apache-2.0
     github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 Apache-2.0
     github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 Apache-2.0
-    github.com/inconshreveable/mousetrap v1.0.0 Apache-2.0
-    github.com/jonboulle/clockwork v0.2.2 Apache-2.0
-    github.com/matttproud/golang_protobuf_extensions v1.0.1 Apache-2.0
+    github.com/inconshreveable/mousetrap v1.0.1 Apache-2.0
+    github.com/jonboulle/clockwork v0.3.0 Apache-2.0
+    github.com/matttproud/golang_protobuf_extensions v1.0.4 Apache-2.0
     github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd Apache-2.0
     github.com/modern-go/reflect2 v1.0.2 Apache-2.0
     github.com/mschoch/smat v0.2.0 Apache-2.0
     github.com/oklog/run v1.1.0 Apache-2.0
-    github.com/prometheus/client_golang v1.12.2 Apache-2.0
-    github.com/prometheus/client_model v0.2.0 Apache-2.0
-    github.com/prometheus/common v0.32.1 Apache-2.0
-    github.com/prometheus/procfs v0.7.3 Apache-2.0
+    github.com/prometheus/client_golang v1.14.0 Apache-2.0
+    github.com/prometheus/client_model v0.3.0 Apache-2.0
+    github.com/prometheus/common v0.37.0 Apache-2.0
+    github.com/prometheus/procfs v0.8.0 Apache-2.0
     github.com/soheilhy/cmux v0.1.5 Apache-2.0
     github.com/spf13/afero v1.8.2 Apache-2.0
-    github.com/spf13/cobra v1.4.0 Apache-2.0
+    github.com/spf13/cobra v1.6.1 Apache-2.0
     github.com/zinclabs/bluge v1.1.5 Apache-2.0
     github.com/zinclabs/bluge_segment_api v1.0.0 Apache-2.0
     github.com/zinclabs/ice v1.1.3 Apache-2.0
-    go.etcd.io/etcd/api/v3 v3.5.4 Apache-2.0
-    go.etcd.io/etcd/client/pkg/v3 v3.5.4 Apache-2.0
-    go.etcd.io/etcd/client/v2 v2.305.4 Apache-2.0
-    go.etcd.io/etcd/client/v3 v3.5.4 Apache-2.0
-    go.etcd.io/etcd/pkg/v3 v3.5.4 Apache-2.0
-    go.etcd.io/etcd/raft/v3 v3.5.4 Apache-2.0
-    go.etcd.io/etcd/server/v3 v3.5.4 Apache-2.0
+    go.etcd.io/etcd/api/v3 v3.5.6 Apache-2.0
+    go.etcd.io/etcd/client/pkg/v3 v3.5.6 Apache-2.0
+    go.etcd.io/etcd/client/v2 v2.305.6 Apache-2.0
+    go.etcd.io/etcd/client/v3 v3.5.6 Apache-2.0
+    go.etcd.io/etcd/pkg/v3 v3.5.6 Apache-2.0
+    go.etcd.io/etcd/raft/v3 v3.5.6 Apache-2.0
+    go.etcd.io/etcd/server/v3 v3.5.6 Apache-2.0
     go.opencensus.io v0.23.0 Apache-2.0
-    go.opentelemetry.io/contrib v0.20.0 Apache-2.0
-    go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 Apache-2.0
-    go.opentelemetry.io/otel v0.20.0 Apache-2.0
-    go.opentelemetry.io/otel/exporters/otlp v0.20.0 Apache-2.0
-    go.opentelemetry.io/otel/metric v0.20.0 Apache-2.0
-    go.opentelemetry.io/otel/sdk v0.20.0 Apache-2.0
-    go.opentelemetry.io/otel/sdk/export/metric v0.20.0 Apache-2.0
-    go.opentelemetry.io/otel/sdk/metric v0.20.0 Apache-2.0
-    go.opentelemetry.io/otel/trace v0.20.0 Apache-2.0
-    go.opentelemetry.io/proto/otlp v0.7.0 Apache-2.0
-    google.golang.org/genproto v0.0.0-20220615141314-f1464d18c36b Apache-2.0
-    google.golang.org/grpc v1.47.0 Apache-2.0
+    go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.4 Apache-2.0
+    go.opentelemetry.io/otel v1.11.1 Apache-2.0
+    go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.1 Apache-2.0
+    go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.1 Apache-2.0
+    go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.1 Apache-2.0
+    go.opentelemetry.io/otel/sdk v1.11.1 Apache-2.0
+    go.opentelemetry.io/otel/trace v1.11.1 Apache-2.0
+    go.opentelemetry.io/proto/otlp v0.19.0 Apache-2.0
+    google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 Apache-2.0
+    google.golang.org/grpc v1.51.0 Apache-2.0
     gopkg.in/ini.v1 v1.66.4 Apache-2.0
     gopkg.in/yaml.v2 v2.4.0 Apache-2.0
 
@@ -242,7 +242,7 @@ Apache-2.0 and BSD-3-Clause licenses
 BSD-2-Clause licenses
 ========================================================================
 
-    github.com/gorilla/websocket v1.4.2 BSD-2-Clause
+    github.com/gorilla/websocket v1.5.0 BSD-2-Clause
     github.com/magiconair/properties v1.8.6 BSD-2-Clause
     github.com/pkg/errors v0.9.1 BSD-2-Clause
 
@@ -263,21 +263,21 @@ BSD-3-Clause licenses
     github.com/gogo/protobuf v1.3.2 BSD-3-Clause
     github.com/golang/protobuf v1.5.2 BSD-3-Clause
     github.com/golang/snappy v0.0.3 BSD-3-Clause
-    github.com/google/go-cmp v0.5.8 BSD-3-Clause
+    github.com/google/go-cmp v0.5.9 BSD-3-Clause
     github.com/google/uuid v1.3.0 BSD-3-Clause
     github.com/grpc-ecosystem/grpc-gateway v1.16.0 BSD-3-Clause
-    github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.3 BSD-3-Clause
+    github.com/grpc-ecosystem/grpc-gateway/v2 v2.14.0 BSD-3-Clause
     github.com/pmezard/go-difflib v1.0.0 BSD-3-Clause
     github.com/spf13/pflag v1.0.5 BSD-3-Clause
     github.com/xhit/go-str2duration/v2 v2.0.0 BSD-3-Clause
-    golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 BSD-3-Clause
+    golang.org/x/crypto v0.3.0 BSD-3-Clause
     golang.org/x/exp v0.0.0-20220602145555-4a0574d9293f BSD-3-Clause
     golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 BSD-3-Clause
-    golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 BSD-3-Clause
-    golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f BSD-3-Clause
+    golang.org/x/net v0.2.0 BSD-3-Clause
+    golang.org/x/sys v0.2.0 BSD-3-Clause
     golang.org/x/text v0.4.0 BSD-3-Clause
-    golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba BSD-3-Clause
-    google.golang.org/protobuf v1.28.0 BSD-3-Clause
+    golang.org/x/time v0.2.0 BSD-3-Clause
+    google.golang.org/protobuf v1.28.1 BSD-3-Clause
 
 ========================================================================
 BSD-3-Clause and Apache-2.0 and MIT licenses
@@ -300,13 +300,14 @@ MIT licenses
     github.com/beorn7/perks v1.0.1 MIT
     github.com/blevesearch/go-porterstemmer v1.0.3 MIT
     github.com/caio/go-tdigest v3.1.0+incompatible MIT
+    github.com/cenkalti/backoff/v4 v4.2.0 MIT
     github.com/cespare/xxhash v1.1.0 MIT
     github.com/cespare/xxhash/v2 v2.1.2 MIT
     github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc MIT
     github.com/dustin/go-humanize v1.0.0 MIT
-    github.com/form3tech-oss/jwt-go v3.2.3+incompatible MIT
     github.com/go-chi/chi/v5 v5.0.7 MIT
     github.com/go-resty/resty/v2 v2.7.0 MIT
+    github.com/golang-jwt/jwt/v4 v4.4.2 MIT
     github.com/json-iterator/go v1.1.12 MIT
     github.com/mitchellh/mapstructure v1.5.0 MIT
     github.com/onsi/ginkgo/v2 v2.1.4 MIT
@@ -314,20 +315,20 @@ MIT licenses
     github.com/pelletier/go-toml/v2 v2.0.1 MIT
     github.com/robfig/cron/v3 v3.0.1 MIT
     github.com/rs/zerolog v1.26.1 MIT
-    github.com/sirupsen/logrus v1.7.0 MIT
+    github.com/sirupsen/logrus v1.9.0 MIT
     github.com/spf13/cast v1.5.0 MIT
     github.com/spf13/jwalterweatherman v1.1.0 MIT
     github.com/spf13/viper v1.12.0 MIT
-    github.com/stretchr/objx v0.1.1 MIT
-    github.com/stretchr/testify v1.7.1 MIT
+    github.com/stretchr/objx v0.4.0 MIT
+    github.com/stretchr/testify v1.8.0 MIT
     github.com/subosito/gotenv v1.3.0 MIT
-    github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 MIT
-    github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 MIT
+    github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 MIT
+    github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 MIT
     github.com/zenizh/go-capturer v0.0.0-20211219060012-52ea6c8fed04 MIT
     go.etcd.io/bbolt v1.3.6 MIT
-    go.uber.org/atomic v1.9.0 MIT
+    go.uber.org/atomic v1.10.0 MIT
     go.uber.org/multierr v1.8.0 MIT
-    go.uber.org/zap v1.17.0 MIT
+    go.uber.org/zap v1.23.0 MIT
     gopkg.in/natefinch/lumberjack.v2 v2.0.0 MIT
 
 ========================================================================
@@ -341,7 +342,7 @@ MIT and Apache-2.0 licenses
 MIT and BSD-3-Clause licenses
 ========================================================================
 
-    sigs.k8s.io/yaml v1.2.0 MIT and BSD-3-Clause
+    sigs.k8s.io/yaml v1.3.0 MIT and BSD-3-Clause
 
 ========================================================================
 MPL-2.0 licenses
diff --git a/dist/licenses/license-github.com-cenkalti-backoff-v4.txt b/dist/licenses/license-github.com-cenkalti-backoff-v4.txt
new file mode 100644
index 0000000..89b8179
--- /dev/null
+++ b/dist/licenses/license-github.com-cenkalti-backoff-v4.txt
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Cenk Altı
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/dist/licenses/license-go.opentelemetry.io-otel-metric.txt b/dist/licenses/license-github.com-go-logr-logr.txt
similarity index 99%
copy from dist/licenses/license-go.opentelemetry.io-otel-metric.txt
copy to dist/licenses/license-github.com-go-logr-logr.txt
index 261eeb9..8dada3e 100644
--- a/dist/licenses/license-go.opentelemetry.io-otel-metric.txt
+++ b/dist/licenses/license-github.com-go-logr-logr.txt
@@ -178,7 +178,7 @@
    APPENDIX: How to apply the Apache License to your work.
 
       To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
+      boilerplate notice, with the fields enclosed by brackets "{}"
       replaced with your own identifying information. (Don't include
       the brackets!)  The text should be enclosed in the appropriate
       comment syntax for the file format. We also recommend that a
@@ -186,7 +186,7 @@
       same "printed page" as the copyright notice for easier
       identification within third-party archives.
 
-   Copyright [yyyy] [name of copyright owner]
+   Copyright {yyyy} {name of copyright owner}
 
    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
diff --git a/dist/licenses/license-go.opentelemetry.io-contrib.txt b/dist/licenses/license-github.com-go-logr-stdr.txt
similarity index 100%
rename from dist/licenses/license-go.opentelemetry.io-contrib.txt
rename to dist/licenses/license-github.com-go-logr-stdr.txt
diff --git a/dist/licenses/license-github.com-form3tech-oss-jwt-go.txt b/dist/licenses/license-github.com-golang-jwt-jwt-v4.txt
similarity index 96%
rename from dist/licenses/license-github.com-form3tech-oss-jwt-go.txt
rename to dist/licenses/license-github.com-golang-jwt-jwt-v4.txt
index df83a9c..35dbc25 100644
--- a/dist/licenses/license-github.com-form3tech-oss-jwt-go.txt
+++ b/dist/licenses/license-github.com-golang-jwt-jwt-v4.txt
@@ -1,4 +1,5 @@
 Copyright (c) 2012 Dave Grijalva
+Copyright (c) 2021 golang-jwt maintainers
 
 Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
 
diff --git a/dist/licenses/license-github.com-inconshreveable-mousetrap.txt b/dist/licenses/license-github.com-inconshreveable-mousetrap.txt
index 5f0d1fb..5f920e9 100644
--- a/dist/licenses/license-github.com-inconshreveable-mousetrap.txt
+++ b/dist/licenses/license-github.com-inconshreveable-mousetrap.txt
@@ -1,13 +1,201 @@
-Copyright 2014 Alan Shreve
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
 
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
 
-   http://www.apache.org/licenses/LICENSE-2.0
+   1. Definitions.
 
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright 2022 Alan Shreve (@inconshreveable)
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/dist/licenses/license-go.opentelemetry.io-otel-exporters-otlp.txt b/dist/licenses/license-go.opentelemetry.io-otel-exporters-otlp-internal-retry.txt
similarity index 100%
rename from dist/licenses/license-go.opentelemetry.io-otel-exporters-otlp.txt
rename to dist/licenses/license-go.opentelemetry.io-otel-exporters-otlp-internal-retry.txt
diff --git a/dist/licenses/license-go.opentelemetry.io-otel-metric.txt b/dist/licenses/license-go.opentelemetry.io-otel-exporters-otlp-otlptrace-otlptracegrpc.txt
similarity index 100%
rename from dist/licenses/license-go.opentelemetry.io-otel-metric.txt
rename to dist/licenses/license-go.opentelemetry.io-otel-exporters-otlp-otlptrace-otlptracegrpc.txt
diff --git a/dist/licenses/license-go.opentelemetry.io-otel-sdk-export-metric.txt b/dist/licenses/license-go.opentelemetry.io-otel-exporters-otlp-otlptrace.txt
similarity index 100%
rename from dist/licenses/license-go.opentelemetry.io-otel-sdk-export-metric.txt
rename to dist/licenses/license-go.opentelemetry.io-otel-exporters-otlp-otlptrace.txt
diff --git a/dist/licenses/license-go.opentelemetry.io-otel-sdk-metric.txt b/dist/licenses/license-go.opentelemetry.io-otel-sdk-metric.txt
deleted file mode 100644
index 261eeb9..0000000
--- a/dist/licenses/license-go.opentelemetry.io-otel-sdk-metric.txt
+++ /dev/null
@@ -1,201 +0,0 @@
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/docs/api-reference.md b/docs/api-reference.md
index b372fe3..2c5839b 100644
--- a/docs/api-reference.md
+++ b/docs/api-reference.md
@@ -2536,6 +2536,7 @@ DataPointValue is the data point for writing. It only contains values.
 | ----- | ---- | ----- | ----------- |
 | shard_id | [uint32](#uint32) |  |  |
 | series_hash | [bytes](#bytes) |  |  |
+| entity_values | [banyandb.model.v1.TagValue](#banyandb-model-v1-TagValue) | repeated |  |
 | request | [WriteRequest](#banyandb-measure-v1-WriteRequest) |  |  |
 
 
@@ -2929,6 +2930,7 @@ QueryResponse is the response for a query to the Query module.
 | ----- | ---- | ----- | ----------- |
 | shard_id | [uint32](#uint32) |  |  |
 | series_hash | [bytes](#bytes) |  |  |
+| entity_values | [banyandb.model.v1.TagValue](#banyandb-model-v1-TagValue) | repeated |  |
 | request | [WriteRequest](#banyandb-stream-v1-WriteRequest) |  |  |
 
 
diff --git a/go.mod b/go.mod
index a690871..d8191c3 100644
--- a/go.mod
+++ b/go.mod
@@ -14,33 +14,33 @@ require (
 	github.com/go-chi/chi/v5 v5.0.7
 	github.com/go-resty/resty/v2 v2.7.0
 	github.com/golang/mock v1.6.0
-	github.com/google/go-cmp v0.5.8
+	github.com/google/go-cmp v0.5.9
 	github.com/google/uuid v1.3.0
 	github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
-	github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.3
+	github.com/grpc-ecosystem/grpc-gateway/v2 v2.14.0
 	github.com/hashicorp/golang-lru v0.5.4
 	github.com/klauspost/compress v1.15.6
 	github.com/oklog/run v1.1.0
 	github.com/onsi/ginkgo/v2 v2.1.4
 	github.com/onsi/gomega v1.20.0
 	github.com/pkg/errors v0.9.1
-	github.com/prometheus/client_golang v1.12.2
+	github.com/prometheus/client_golang v1.14.0
 	github.com/rs/zerolog v1.26.1
-	github.com/spf13/cobra v1.4.0
+	github.com/spf13/cobra v1.6.1
 	github.com/spf13/pflag v1.0.5
 	github.com/spf13/viper v1.12.0
-	github.com/stretchr/testify v1.7.1
+	github.com/stretchr/testify v1.8.0
 	github.com/xhit/go-str2duration/v2 v2.0.0
 	github.com/zenizh/go-capturer v0.0.0-20211219060012-52ea6c8fed04
-	go.etcd.io/etcd/client/v3 v3.5.4
-	go.etcd.io/etcd/server/v3 v3.5.4
+	go.etcd.io/etcd/client/v3 v3.5.6
+	go.etcd.io/etcd/server/v3 v3.5.6
 	go.uber.org/multierr v1.8.0
 	golang.org/x/exp v0.0.0-20220602145555-4a0574d9293f
 	golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4
-	google.golang.org/genproto v0.0.0-20220615141314-f1464d18c36b
-	google.golang.org/grpc v1.47.0
-	google.golang.org/protobuf v1.28.0
-	sigs.k8s.io/yaml v1.2.0
+	google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6
+	google.golang.org/grpc v1.51.0
+	google.golang.org/protobuf v1.28.1
+	sigs.k8s.io/yaml v1.3.0
 )
 
 require (
@@ -55,30 +55,33 @@ require (
 	github.com/blugelabs/bluge_segment_api v0.2.0 // indirect
 	github.com/blugelabs/ice v1.0.0 // indirect
 	github.com/caio/go-tdigest v3.1.0+incompatible // indirect
+	github.com/cenkalti/backoff/v4 v4.2.0 // indirect
 	github.com/cespare/xxhash/v2 v2.1.2 // indirect
 	github.com/coreos/go-semver v0.3.0 // indirect
-	github.com/coreos/go-systemd/v22 v22.3.2 // indirect
+	github.com/coreos/go-systemd/v22 v22.5.0 // indirect
 	github.com/davecgh/go-spew v1.1.1 // indirect
 	github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect
 	github.com/dustin/go-humanize v1.0.0 // indirect
-	github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect
 	github.com/fsnotify/fsnotify v1.5.4 // indirect
+	github.com/go-logr/logr v1.2.3 // indirect
+	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/gogo/protobuf v1.3.2 // indirect
+	github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
 	github.com/golang/glog v1.0.0 // indirect
 	github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
 	github.com/golang/protobuf v1.5.2 // indirect
 	github.com/golang/snappy v0.0.3 // indirect
-	github.com/google/btree v1.0.1 // indirect
+	github.com/google/btree v1.1.2 // indirect
 	github.com/google/flatbuffers v1.12.1 // indirect
-	github.com/gorilla/websocket v1.4.2 // indirect
+	github.com/gorilla/websocket v1.5.0 // indirect
 	github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
 	github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
 	github.com/hashicorp/hcl v1.0.0 // indirect
-	github.com/inconshreveable/mousetrap v1.0.0 // indirect
-	github.com/jonboulle/clockwork v0.2.2 // indirect
+	github.com/inconshreveable/mousetrap v1.0.1 // indirect
+	github.com/jonboulle/clockwork v0.3.0 // indirect
 	github.com/json-iterator/go v1.1.12 // indirect
 	github.com/magiconair/properties v1.8.6 // indirect
-	github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
+	github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
 	github.com/mitchellh/mapstructure v1.5.0 // indirect
 	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
 	github.com/modern-go/reflect2 v1.0.2 // indirect
@@ -86,43 +89,41 @@ require (
 	github.com/pelletier/go-toml v1.9.5 // indirect
 	github.com/pelletier/go-toml/v2 v2.0.1 // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
-	github.com/prometheus/client_model v0.2.0 // indirect
-	github.com/prometheus/common v0.32.1 // indirect
-	github.com/prometheus/procfs v0.7.3 // indirect
+	github.com/prometheus/client_model v0.3.0 // indirect
+	github.com/prometheus/common v0.37.0 // indirect
+	github.com/prometheus/procfs v0.8.0 // indirect
 	github.com/robfig/cron/v3 v3.0.1
-	github.com/sirupsen/logrus v1.7.0 // indirect
+	github.com/sirupsen/logrus v1.9.0 // indirect
 	github.com/soheilhy/cmux v0.1.5 // indirect
 	github.com/spf13/afero v1.8.2 // indirect
 	github.com/spf13/cast v1.5.0 // indirect
 	github.com/spf13/jwalterweatherman v1.1.0 // indirect
-	github.com/stretchr/objx v0.1.1 // indirect
+	github.com/stretchr/objx v0.4.0 // indirect
 	github.com/subosito/gotenv v1.3.0 // indirect
-	github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect
-	github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect
+	github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 // indirect
+	github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 // indirect
 	go.etcd.io/bbolt v1.3.6 // indirect
-	go.etcd.io/etcd/api/v3 v3.5.4 // indirect
-	go.etcd.io/etcd/client/pkg/v3 v3.5.4 // indirect
-	go.etcd.io/etcd/client/v2 v2.305.4 // indirect
-	go.etcd.io/etcd/pkg/v3 v3.5.4 // indirect
-	go.etcd.io/etcd/raft/v3 v3.5.4 // indirect
+	go.etcd.io/etcd/api/v3 v3.5.6 // indirect
+	go.etcd.io/etcd/client/pkg/v3 v3.5.6 // indirect
+	go.etcd.io/etcd/client/v2 v2.305.6 // indirect
+	go.etcd.io/etcd/pkg/v3 v3.5.6 // indirect
+	go.etcd.io/etcd/raft/v3 v3.5.6 // indirect
 	go.opencensus.io v0.23.0 // indirect
-	go.opentelemetry.io/contrib v0.20.0 // indirect
-	go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 // indirect
-	go.opentelemetry.io/otel v0.20.0 // indirect
-	go.opentelemetry.io/otel/exporters/otlp v0.20.0 // indirect
-	go.opentelemetry.io/otel/metric v0.20.0 // indirect
-	go.opentelemetry.io/otel/sdk v0.20.0 // indirect
-	go.opentelemetry.io/otel/sdk/export/metric v0.20.0 // indirect
-	go.opentelemetry.io/otel/sdk/metric v0.20.0 // indirect
-	go.opentelemetry.io/otel/trace v0.20.0 // indirect
-	go.opentelemetry.io/proto/otlp v0.7.0 // indirect
-	go.uber.org/atomic v1.9.0 // indirect
-	go.uber.org/zap v1.17.0 // indirect
-	golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 // indirect
-	golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 // indirect
-	golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect
+	go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.4 // indirect
+	go.opentelemetry.io/otel v1.11.1 // indirect
+	go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.1 // indirect
+	go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.1 // indirect
+	go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.1 // indirect
+	go.opentelemetry.io/otel/sdk v1.11.1 // indirect
+	go.opentelemetry.io/otel/trace v1.11.1 // indirect
+	go.opentelemetry.io/proto/otlp v0.19.0 // indirect
+	go.uber.org/atomic v1.10.0 // indirect
+	go.uber.org/zap v1.23.0 // indirect
+	golang.org/x/crypto v0.3.0 // indirect
+	golang.org/x/net v0.2.0 // indirect
+	golang.org/x/sys v0.2.0 // indirect
 	golang.org/x/text v0.4.0 // indirect
-	golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect
+	golang.org/x/time v0.2.0 // indirect
 	gopkg.in/ini.v1 v1.66.4 // indirect
 	gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
 	gopkg.in/yaml.v2 v2.4.0 // indirect
diff --git a/go.sum b/go.sum
index 41f517d..263b97d 100644
--- a/go.sum
+++ b/go.sum
@@ -17,7 +17,7 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb
 cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
 cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
 cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
-cloud.google.com/go v0.100.2 h1:t9Iw5QH5v4XtlEQaCtUY7x6sCABps8sW0acw7e2WQ6Y=
+cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y=
 cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
 cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
 cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
@@ -63,7 +63,6 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV
 github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
 github.com/axiomhq/hyperloglog v0.0.0-20191112132149-a4c4c47bc57f h1:y06x6vGnFYfXUoVMbrcP1Uzpj4JG01eB5vRps9G8agM=
 github.com/axiomhq/hyperloglog v0.0.0-20191112132149-a4c4c47bc57f/go.mod h1:2stgcRjl6QmW+gU2h5E7BQXg4HU0gzxKWDuT5HviN9s=
-github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
 github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
 github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@@ -85,6 +84,9 @@ github.com/blevesearch/vellum v1.0.7 h1:+vn8rfyCRHxKVRgDLeR0FAXej2+6mEb5Q15aQE/X
 github.com/blevesearch/vellum v1.0.7/go.mod h1:doBZpmRhwTsASB4QdUZANlJvqVAUdUyX0ZK7QJCTeBE=
 github.com/caio/go-tdigest v3.1.0+incompatible h1:uoVMJ3Q5lXmVLCCqaMGHLBWnbGoN6Lpu7OAUPR60cds=
 github.com/caio/go-tdigest v3.1.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI=
+github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
+github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4=
+github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
 github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
 github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
 github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI=
@@ -102,8 +104,9 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
 github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
 github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
 github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
 github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
 github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
 github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E=
 github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
@@ -119,12 +122,13 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee
 github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
 github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
 github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
 github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
 github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
 github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
 github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
 github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
 github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -148,13 +152,12 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
 github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
 github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
 github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
+github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
 github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=
 github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
 github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
 github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
-github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c=
-github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
 github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
 github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
 github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
@@ -170,9 +173,16 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2
 github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
 github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
 github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
+github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
 github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
 github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
+github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
 github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
 github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
 github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
@@ -182,6 +192,8 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV
 github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
 github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
 github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs=
+github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
 github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
 github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
@@ -223,8 +235,9 @@ github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
 github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
 github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
 github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
 github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
+github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
+github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
 github.com/google/flatbuffers v1.12.1 h1:MVlul7pQNoDzWRLTw5imwYsl+usrS1TXG2H4jg6ImGw=
 github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
 github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@@ -239,8 +252,8 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
 github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
-github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
 github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
 github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
@@ -263,8 +276,9 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
 github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
 github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
 github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
 github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
+github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
 github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
@@ -273,8 +287,9 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf
 github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
 github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.3 h1:BGNSrTRW4rwfhJiFwvwF4XQ0Y72Jj9YEgxVrtovbD5o=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.3/go.mod h1:VHn7KgNsRriXa4mcgtkpR00OXyQY6g67JWMvn+R27A4=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.14.0 h1:t7uX3JBHdVwAi3G7sSSdbsk8NfgA+LnUS88V/2EKaA0=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.14.0/go.mod h1:4OGVnY4qf2+gw+ssiHbW+pq4mo2yko94YxxMmXZ7jCA=
 github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
 github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
 github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -300,12 +315,14 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p
 github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
 github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
 github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
 github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
+github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
 github.com/influxdata/influxdb v1.7.6/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY=
 github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
 github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
+github.com/jonboulle/clockwork v0.3.0 h1:9BSCMi8C+0qdApAp4auwX0RkLGUjs956h0EkuQymUhg=
+github.com/jonboulle/clockwork v0.3.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
 github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
 github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
 github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
@@ -344,8 +361,9 @@ github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamh
 github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
 github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
 github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
 github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
+github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
 github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
 github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
 github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
@@ -396,27 +414,31 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn
 github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
 github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
 github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34=
-github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
+github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
+github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
+github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
 github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
 github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
+github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
 github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
 github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
 github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
 github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
 github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
-github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
 github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
+github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
 github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
 github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
 github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
 github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
 github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
 github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
+github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
 github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
 github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
 github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
@@ -436,8 +458,10 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV
 github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
 github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
 github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
-github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
 github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
+github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
 github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
 github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
@@ -453,8 +477,8 @@ github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
 github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
 github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
 github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
-github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q=
-github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
+github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
+github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
 github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
 github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
 github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
@@ -466,27 +490,31 @@ github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5q
 github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ=
 github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI=
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
 github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
 github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
 github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
 github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
 github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
 github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
 github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI=
 github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs=
 github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA=
 github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
 github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
 github.com/xhit/go-str2duration/v2 v2.0.0 h1:uFtk6FWB375bP7ewQl+/1wBcn840GPhnySOdcz/okPE=
 github.com/xhit/go-str2duration/v2 v2.0.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
 github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk=
+github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
 github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
 github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -505,20 +533,20 @@ github.com/zinclabs/ice v1.1.3/go.mod h1:wTwGEe30mQnSLaR1ezxu4E80GcwO6EyOww67KpJ
 go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
 go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
 go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
-go.etcd.io/etcd/api/v3 v3.5.4 h1:OHVyt3TopwtUQ2GKdd5wu3PmmipR4FTwCqoEjSyRdIc=
-go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A=
-go.etcd.io/etcd/client/pkg/v3 v3.5.4 h1:lrneYvz923dvC14R54XcA7FXoZ3mlGZAgmwhfm7HqOg=
-go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
-go.etcd.io/etcd/client/v2 v2.305.4 h1:Dcx3/MYyfKcPNLpR4VVQUP5KgYrBeJtktBwEKkw08Ao=
-go.etcd.io/etcd/client/v2 v2.305.4/go.mod h1:Ud+VUwIi9/uQHOMA+4ekToJ12lTxlv0zB/+DHwTGEbU=
-go.etcd.io/etcd/client/v3 v3.5.4 h1:p83BUL3tAYS0OT/r0qglgc3M1JjhM0diV8DSWAhVXv4=
-go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY=
-go.etcd.io/etcd/pkg/v3 v3.5.4 h1:V5Dvl7S39ZDwjkKqJG2BfXgxZ3QREqqKifWQgIw5IM0=
-go.etcd.io/etcd/pkg/v3 v3.5.4/go.mod h1:OI+TtO+Aa3nhQSppMbwE4ld3uF1/fqqwbpfndbbrEe0=
-go.etcd.io/etcd/raft/v3 v3.5.4 h1:YGrnAgRfgXloBNuqa+oBI/aRZMcK/1GS6trJePJ/Gqc=
-go.etcd.io/etcd/raft/v3 v3.5.4/go.mod h1:SCuunjYvZFC0fBX0vxMSPjuZmpcSk+XaAcMrD6Do03w=
-go.etcd.io/etcd/server/v3 v3.5.4 h1:CMAZd0g8Bn5NRhynW6pKhc4FRg41/0QYy3d7aNm9874=
-go.etcd.io/etcd/server/v3 v3.5.4/go.mod h1:S5/YTU15KxymM5l3T6b09sNOHPXqGYIZStpuuGbb65c=
+go.etcd.io/etcd/api/v3 v3.5.6 h1:Cy2qx3npLcYqTKqGJzMypnMv2tiRyifZJ17BlWIWA7A=
+go.etcd.io/etcd/api/v3 v3.5.6/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8=
+go.etcd.io/etcd/client/pkg/v3 v3.5.6 h1:TXQWYceBKqLp4sa87rcPs11SXxUA/mHwH975v+BDvLU=
+go.etcd.io/etcd/client/pkg/v3 v3.5.6/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ=
+go.etcd.io/etcd/client/v2 v2.305.6 h1:fIDR0p4KMjw01MJMfUIDWdQbjo06PD6CeYM5z4EHLi0=
+go.etcd.io/etcd/client/v2 v2.305.6/go.mod h1:BHha8XJGe8vCIBfWBpbBLVZ4QjOIlfoouvOwydu63E0=
+go.etcd.io/etcd/client/v3 v3.5.6 h1:coLs69PWCXE9G4FKquzNaSHrRyMCAXwF+IX1tAPVO8E=
+go.etcd.io/etcd/client/v3 v3.5.6/go.mod h1:f6GRinRMCsFVv9Ht42EyY7nfsVGwrNO0WEoS2pRKzQk=
+go.etcd.io/etcd/pkg/v3 v3.5.6 h1:k1GZrGrfMHy5/cg2bxNGsmLTFisatyhDYCFLRuaavWg=
+go.etcd.io/etcd/pkg/v3 v3.5.6/go.mod h1:qATwUzDb6MLyGWq2nUj+jwXqZJcxkCuabh0P7Cuff3k=
+go.etcd.io/etcd/raft/v3 v3.5.6 h1:tOmx6Ym6rn2GpZOrvTGJZciJHek6RnC3U/zNInzIN50=
+go.etcd.io/etcd/raft/v3 v3.5.6/go.mod h1:wL8kkRGx1Hp8FmZUuHfL3K2/OaGIDaXGr1N7i2G07J0=
+go.etcd.io/etcd/server/v3 v3.5.6 h1:RXuwaB8AMiV62TqcqIt4O4bG8NWjsxOkDJVT3MZI5Ds=
+go.etcd.io/etcd/server/v3 v3.5.6/go.mod h1:6/Gfe8XTGXQJgLYQ65oGKMfPivb2EASLUSMSWN9Sroo=
 go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
 go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
 go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
@@ -527,41 +555,43 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
 go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
 go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
 go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
-go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0=
-go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 h1:sO4WKdPAudZGKPcpZT4MJn6JaDmpyLrMPDGGyA1SttE=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
-go.opentelemetry.io/otel v0.20.0 h1:eaP0Fqu7SXHwvjiqDq83zImeehOHX8doTvU9AwXON8g=
-go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
-go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg=
-go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
-go.opentelemetry.io/otel/metric v0.20.0 h1:4kzhXFP+btKm4jwxpjIqjs41A7MakRFUS86bqLHTIw8=
-go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
-go.opentelemetry.io/otel/oteltest v0.20.0 h1:HiITxCawalo5vQzdHfKeZurV8x7ljcqAgiWzF6Vaeaw=
-go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
-go.opentelemetry.io/otel/sdk v0.20.0 h1:JsxtGXd06J8jrnya7fdI/U/MR6yXA5DtbZy+qoHQlr8=
-go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
-go.opentelemetry.io/otel/sdk/export/metric v0.20.0 h1:c5VRjxCXdQlx1HjzwGdQHzZaVI82b5EbBgOu2ljD92g=
-go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
-go.opentelemetry.io/otel/sdk/metric v0.20.0 h1:7ao1wpzHRVKf0OQ7GIxiQJA6X7DLX9o14gmVon7mMK8=
-go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
-go.opentelemetry.io/otel/trace v0.20.0 h1:1DL6EXUdcg95gukhuRRvLDO/4X5THh/5dIV52lqtnbw=
-go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
-go.opentelemetry.io/proto/otlp v0.7.0 h1:rwOQPCuKAKmwGKq2aVNnYIibI6wnV7EvzgfTCzcdGg8=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0/go.mod h1:E5NNboN0UqSAki0Atn9kVwaN7I+l25gGxDqBueo/74E=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.4 h1:PRXhsszxTt5bbPriTjmaweWUsAnJYeWBhUMLRetUgBU=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.4/go.mod h1:05eWWy6ZWzmpeImD3UowLTB3VjDMU1yxQ+ENuVWDM3c=
+go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU=
+go.opentelemetry.io/otel v1.11.1 h1:4WLLAmcfkmDk2ukNXJyq3/kiz/3UzCaYq6PskJsaou4=
+go.opentelemetry.io/otel v1.11.1/go.mod h1:1nNhXBbWSD0nsL38H6btgnFN2k4i0sNLHNNMZMSbUGE=
+go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.1 h1:X2GndnMCsUPh6CiY2a+frAbNsXaPLbB0soHRYhAZ5Ig=
+go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.1/go.mod h1:i8vjiSzbiUC7wOQplijSXMYUpNM93DtlS5CbUT+C6oQ=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.1 h1:MEQNafcNCB0uQIti/oHgU7CZpUMYQ7qigBwMVKycHvc=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.1/go.mod h1:19O5I2U5iys38SsmT2uDJja/300woyzE1KPIQxEUBUc=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1/go.mod h1:xOvWoTOrQjxjW61xtOmD/WKGRYb/P4NzRo3bs65U6Rk=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.1 h1:LYyG/f1W/jzAix16jbksJfMQFpOH/Ma6T639pVPMgfI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.1/go.mod h1:QrRRQiY3kzAoYPNLP0W/Ikg0gR6V3LMc+ODSxr7yyvg=
+go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI=
+go.opentelemetry.io/otel/sdk v1.11.1 h1:F7KmQgoHljhUuJyA+9BiU+EkJfyX5nVVF4wyzWZpKxs=
+go.opentelemetry.io/otel/sdk v1.11.1/go.mod h1:/l3FE4SupHJ12TduVjUkZtlfFqDCQJlOlithYrdktys=
+go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk=
+go.opentelemetry.io/otel/trace v1.11.1 h1:ofxdnzsNrGBYXbP7t7zpUK281+go5rF7dvdIZXF8gdQ=
+go.opentelemetry.io/otel/trace v1.11.1/go.mod h1:f/Q9G7vzk5u91PhbmKbg1Qn0rzH1LJ4vbPHFGkTPtOk=
 go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg=
+go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
+go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
 go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
 go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
-go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
-go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
+go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
+go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
+go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
 go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
 go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
 go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8=
 go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
 go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U=
 go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
+go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY=
+go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY=
 golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
 golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
 golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@@ -573,9 +603,9 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
 golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
 golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
 golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
-golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA=
 golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.3.0 h1:a06MkbcxBrEFc0w0QIZWXrH/9cCX6KJyWbBOIwAn+7A=
+golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
 golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -605,7 +635,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu
 golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
 golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
 golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
 golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
 golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
 golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
@@ -663,8 +692,11 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx
 golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 h1:NWy5+hlRbC7HK+PmcXVUmW1IMyFce7to56IUvhUFm7Y=
-golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU=
+golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
 golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
 golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -675,7 +707,9 @@ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ
 golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
 golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
 golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 h1:OSnWWcOd/CtWQC2cYSBgbTSJv3ciqd8r54ySIW2y3RE=
+golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.2.0 h1:GtQkldQ9m7yvzCL1V+LrYow3Khe0eJH0w7RbX/VbaIU=
 golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -741,12 +775,15 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc
 golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
-golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
 golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -755,13 +792,15 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
 golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
 golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
 golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE=
 golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.2.0 h1:52I/1L54xyEQAYdtcSuxtiT84KGYTBGXwayxmIpNJhE=
+golang.org/x/time v0.2.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -782,7 +821,6 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw
 golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -821,7 +859,6 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
 golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
 golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
 golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
-golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -897,8 +934,9 @@ google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6D
 google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
 google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
 google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20220615141314-f1464d18c36b h1:2LXbOcxY7BehyA9yu5hxYzaY67bLaJQhBX9O1zxxVis=
-google.golang.org/genproto v0.0.0-20220615141314-f1464d18c36b/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 h1:a2S6M0+660BgMNl++4JPlcAO/CjkqYItDEZwkoDQK7c=
+google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
 google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
 google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
 google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -917,10 +955,13 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv
 google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
 google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
 google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
 google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8=
-google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
+google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
+google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
 google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
 google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
 google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -934,8 +975,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
 google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
 google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
-google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
+google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
 gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -973,5 +1014,6 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8
 rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
 rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
 rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
 sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
+sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
+sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
diff --git a/pkg/grpchelper/client.go b/pkg/grpchelper/client.go
index e73e356..0c95ecf 100644
--- a/pkg/grpchelper/client.go
+++ b/pkg/grpchelper/client.go
@@ -48,7 +48,9 @@ func Conn(addr string, connTimeout time.Duration, opts ...grpc.DialOption) (*grp
 		return nil, err
 	}
 	connDuration := time.Since(connStart)
-	l.Debug().Dur("conn", connDuration).Msg("time elapsed")
+	if e := l.Debug(); e.Enabled() {
+		e.Dur("conn", connDuration).Msg("time elapsed")
+	}
 	return conn, nil
 }
 
@@ -71,6 +73,8 @@ func Request(ctx context.Context, rpcTimeout time.Duration, fn func(rpcCtx conte
 		return err
 	}
 	rpcDuration := time.Since(rpcStart)
-	l.Debug().Dur("rpc", rpcDuration).Msg("time elapsed")
+	if e := l.Debug(); e.Enabled() {
+		e.Dur("rpc", rpcDuration).Msg("time elapsed")
+	}
 	return nil
 }
diff --git a/pkg/index/index.go b/pkg/index/index.go
index 6d06790..44051fe 100644
--- a/pkg/index/index.go
+++ b/pkg/index/index.go
@@ -40,6 +40,10 @@ type FieldKey struct {
 	Analyzer    databasev1.IndexRule_Analyzer
 }
 
+func (f FieldKey) MarshalIndexRule() string {
+	return string(convert.Uint32ToBytes(f.IndexRuleID))
+}
+
 func (f FieldKey) Marshal() []byte {
 	return bytes.Join([][]byte{
 		f.SeriesID.Marshal(),
diff --git a/pkg/index/inverted/inverted.go b/pkg/index/inverted/inverted.go
index 31e57e9..0797a8f 100644
--- a/pkg/index/inverted/inverted.go
+++ b/pkg/index/inverted/inverted.go
@@ -20,7 +20,6 @@ package inverted
 import (
 	"bytes"
 	"context"
-	"errors"
 	"log"
 	"math"
 	"time"
@@ -45,8 +44,9 @@ import (
 )
 
 const (
-	docID     = "_id"
-	batchSize = 1024
+	docID         = "_id"
+	batchSize     = 1024
+	seriesIDField = "series_id"
 )
 
 var analyzers map[databasev1.IndexRule_Analyzer]*analysis.Analyzer
@@ -110,6 +110,9 @@ func (s *store) Close() error {
 }
 
 func (s *store) Write(fields []index.Field, itemID common.ItemID) error {
+	if len(fields) < 1 {
+		return nil
+	}
 	if !s.closer.AddRunning() {
 		return nil
 	}
@@ -134,14 +137,17 @@ func (s *store) Iterator(fieldKey index.FieldKey, termRange index.RangeOpts, ord
 	if err != nil {
 		return nil, err
 	}
-	fk := fieldKey.MarshalToStr()
-	query := bluge.NewTermRangeInclusiveQuery(
-		string(termRange.Lower),
-		string(termRange.Upper),
-		termRange.IncludesLower,
-		termRange.IncludesUpper,
-	).
-		SetField(fk)
+	fk := fieldKey.MarshalIndexRule()
+	query := bluge.NewBooleanQuery()
+	query.
+		AddMust(bluge.NewTermQuery(string(fieldKey.SeriesID.Marshal())).SetField(seriesIDField)).
+		AddMust(bluge.NewTermRangeInclusiveQuery(
+			string(termRange.Lower),
+			string(termRange.Upper),
+			termRange.IncludesLower,
+			termRange.IncludesUpper,
+		).
+			SetField(fk))
 	sortedKey := fk
 	if order == modelv1.Sort_SORT_DESC {
 		sortedKey = "-" + sortedKey
@@ -163,8 +169,11 @@ func (s *store) MatchTerms(field index.Field) (list posting.List, err error) {
 	if err != nil {
 		return nil, err
 	}
-	fk := field.Key.MarshalToStr()
-	query := bluge.NewTermQuery(string(field.Term)).SetField(fk)
+	fk := field.Key.MarshalIndexRule()
+	query := bluge.NewBooleanQuery()
+	query.
+		AddMust(bluge.NewTermQuery(string(field.Key.SeriesID.Marshal())).SetField(seriesIDField)).
+		AddMust(bluge.NewTermQuery(string(field.Term)).SetField(fk))
 	documentMatchIterator, err := reader.Search(context.Background(), bluge.NewAllMatches(query))
 	if err != nil {
 		return nil, err
@@ -186,23 +195,15 @@ func (s *store) Match(fieldKey index.FieldKey, matches []string) (posting.List,
 	if err != nil {
 		return nil, err
 	}
-	fk := fieldKey.MarshalToStr()
-	var query bluge.Query
-	getMatchQuery := func(match string) bluge.Query {
-		q := bluge.NewMatchQuery(match).SetField(fk)
+	fk := fieldKey.MarshalIndexRule()
+	query := bluge.NewBooleanQuery()
+	query.AddMust(bluge.NewTermQuery(string(fieldKey.SeriesID.Marshal())).SetField(seriesIDField))
+	for _, m := range matches {
+		q := bluge.NewMatchQuery(m).SetField(fk)
 		if fieldKey.Analyzer != databasev1.IndexRule_ANALYZER_UNSPECIFIED {
 			q.SetAnalyzer(analyzers[fieldKey.Analyzer])
 		}
-		return q
-	}
-	if len(matches) == 1 {
-		query = getMatchQuery(matches[0])
-	} else {
-		bq := bluge.NewBooleanQuery()
-		for _, m := range matches {
-			bq.AddMust(getMatchQuery(m))
-		}
-		query = bq
+		query.AddMust(q)
 	}
 	documentMatchIterator, err := reader.Search(context.Background(), bluge.NewAllMatches(query))
 	if err != nil {
@@ -245,8 +246,10 @@ func (s *store) run() {
 			batch.Reset()
 			size = 0
 		}
+		var docIDBuffer bytes.Buffer
 		for {
 			timer := time.NewTimer(time.Second)
+			docIDBuffer.Reset()
 			select {
 			case <-s.closer.CloseNotify():
 				return
@@ -259,9 +262,15 @@ func (s *store) run() {
 					flush()
 					close(d.onComplete)
 				case doc:
-					doc := bluge.NewDocument(string(convert.Uint64ToBytes(uint64(d.itemID))))
+					// TODO: generate a segment directly.
+					fk := d.fields[0].Key
+					docIDBuffer.Write(fk.SeriesID.Marshal())
+					docIDBuffer.Write(convert.Uint64ToBytes(uint64(d.itemID)))
+					doc := bluge.NewDocument(docIDBuffer.String())
+
 					for _, f := range d.fields {
-						field := bluge.NewKeywordFieldBytes(f.Key.MarshalToStr(), f.Term).StoreValue().Sortable()
+						doc.AddField(bluge.NewKeywordFieldBytes(seriesIDField, f.Key.SeriesID.Marshal()))
+						field := bluge.NewKeywordFieldBytes(f.Key.MarshalIndexRule(), f.Term).StoreValue().Sortable()
 						if f.Key.Analyzer != databasev1.IndexRule_ANALYZER_UNSPECIFIED {
 							field.WithAnalyzer(analyzers[f.Key.Analyzer])
 						}
@@ -271,7 +280,7 @@ func (s *store) run() {
 					if size >= batchSize {
 						flush()
 					} else {
-						batch.Insert(doc)
+						batch.Update(doc.ID(), doc)
 					}
 				}
 
@@ -345,7 +354,8 @@ func (bmi *blugeMatchIterator) nextTerm() bool {
 	var term []byte
 	bmi.err = match.VisitStoredFields(func(field string, value []byte) bool {
 		if field == docID {
-			id := convert.BytesToUint64(value)
+			// value = seriesID(8bytes)+itemID(8bytes)
+			id := convert.BytesToUint64(value[8:])
 			itemID = common.ItemID(id)
 			i++
 		}
@@ -356,8 +366,9 @@ func (bmi *blugeMatchIterator) nextTerm() bool {
 		return i < 2
 	})
 	if i != 2 {
-		bmi.err = errors.New("less fields")
-		return false
+		// ignore invalid data
+		// TODO: add metric to cumulate ignored docs
+		return true
 	}
 	if bmi.err != nil {
 		return false
diff --git a/pkg/index/inverted/inverted_test.go b/pkg/index/inverted/inverted_test.go
index d6cac53..ebcdce1 100644
--- a/pkg/index/inverted/inverted_test.go
+++ b/pkg/index/inverted/inverted_test.go
@@ -35,11 +35,20 @@ import (
 	"github.com/apache/skywalking-banyandb/pkg/test/flags"
 )
 
-var serviceName = index.FieldKey{
-	// http_method
-	IndexRuleID: 6,
-	Analyzer:    databasev1.IndexRule_ANALYZER_SIMPLE,
-}
+var (
+	serviceName = index.FieldKey{
+		// http_method
+		IndexRuleID: 6,
+		SeriesID:    common.SeriesID(0),
+		Analyzer:    databasev1.IndexRule_ANALYZER_SIMPLE,
+	}
+	serviceName1 = index.FieldKey{
+		// http_method
+		IndexRuleID: 6,
+		SeriesID:    common.SeriesID(1),
+		Analyzer:    databasev1.IndexRule_ANALYZER_SIMPLE,
+	}
+)
 
 func TestStore_Match(t *testing.T) {
 	tester := assert.New(t)
@@ -66,6 +75,19 @@ func TestStore_Match(t *testing.T) {
 		Term: []byte("org.apache.skywalking.examples.OrderService.order"),
 	}}, common.ItemID(3)))
 	s.(*store).flush()
+	tester.NoError(s.Write([]index.Field{{
+		Key:  serviceName1,
+		Term: []byte("test.1"),
+	}}, common.ItemID(1)))
+	tester.NoError(s.Write([]index.Field{{
+		Key:  serviceName1,
+		Term: []byte("test.2"),
+	}}, common.ItemID(2)))
+	tester.NoError(s.Write([]index.Field{{
+		Key:  serviceName1,
+		Term: []byte("test.3"),
+	}}, common.ItemID(3)))
+	s.(*store).flush()
 
 	tests := []struct {
 		matches []string
diff --git a/pkg/index/iterator.go b/pkg/index/iterator.go
index 8259d7d..5a350a2 100644
--- a/pkg/index/iterator.go
+++ b/pkg/index/iterator.go
@@ -290,10 +290,11 @@ func (di *delegateIterator) Valid() bool {
 		return false
 	}
 	if !bytes.Equal(di.curField.Key.Marshal(), di.fieldKeyBytes) {
-		di.l.Debug().
-			Uint64("series_id", uint64(di.fieldKey.SeriesID)).
-			Uint32("index_rule_id", di.fieldKey.IndexRuleID).
-			Msg("reached the limitation of the field(series_id+index_rule_id)")
+		if e := di.l.Debug(); e.Enabled() {
+			e.Uint64("series_id", uint64(di.fieldKey.SeriesID)).
+				Uint32("index_rule_id", di.fieldKey.IndexRuleID).
+				Msg("reached the limitation of the field(series_id+index_rule_id)")
+		}
 		di.Close()
 		return false
 	}
diff --git a/pkg/index/lsm/search.go b/pkg/index/lsm/search.go
index 251dfdd..3f6f52e 100644
--- a/pkg/index/lsm/search.go
+++ b/pkg/index/lsm/search.go
@@ -85,7 +85,12 @@ func (s *store) Iterator(fieldKey index.FieldKey, termRange index.RangeOpts, ord
 					break
 				}
 				itemID := convert.BytesToUint64(delegated.Val())
-				s.l.Debug().Uint64("item_id", itemID).Msg("add item id")
+				if e := s.l.Debug(); e.Enabled() {
+					e.Uint64("series_id", uint64(fieldKey.SeriesID)).
+						Uint64("index_rule_id", uint64(fieldKey.IndexRuleID)).
+						Uint64("item_id", itemID).
+						Msg("fetched item from the index")
+				}
 				pv.Value.Insert(common.ItemID(itemID))
 			}
 			return pv, nil
diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go
index 4fce4de..cd37bcd 100644
--- a/pkg/logger/logger.go
+++ b/pkg/logger/logger.go
@@ -34,24 +34,45 @@ type contextKey struct{}
 
 // Logging is the config info
 type Logging struct {
-	Env   string
-	Level string
+	Env     string
+	Level   string
+	Modules []string
+	Levels  []string
 }
 
 // Logger is wrapper for rs/zerolog logger with module, it is singleton.
 type Logger struct {
 	module string
 	*zerolog.Logger
+	modules map[string]zerolog.Level
 }
 
 func (l Logger) Module() string {
 	return l.module
 }
 
-func (l *Logger) Named(name string) *Logger {
-	module := strings.Join([]string{l.module, name}, ".")
-	subLogger := root.l.With().Str("module", module).Logger()
-	return &Logger{module: module, Logger: &subLogger}
+func (l *Logger) Named(name ...string) *Logger {
+	var mm []string
+	if l.module == rootName {
+		mm = name
+	} else {
+		mm = append([]string{l.module}, name...)
+	}
+	var moduleBuilder strings.Builder
+	var module string
+	level := l.GetLevel()
+	for i, m := range mm {
+		if i != 0 {
+			moduleBuilder.WriteString(".")
+		}
+		moduleBuilder.WriteString(strings.ToUpper(m))
+		module = moduleBuilder.String()
+		if ml, ok := l.modules[module]; ok {
+			level = ml
+		}
+	}
+	subLogger := root.l.With().Str("module", moduleBuilder.String()).Logger().Level(level)
+	return &Logger{module: module, modules: l.modules, Logger: &subLogger}
 }
 
 // Loggable indicates the implement supports logging
@@ -60,12 +81,18 @@ type Loggable interface {
 }
 
 func Fetch(ctx context.Context, name string) *Logger {
+	return FetchOrDefault(ctx, name, nil)
+}
+
+func FetchOrDefault(ctx context.Context, name string, defaultLogger *Logger) *Logger {
 	parentLogger := ctx.Value(ContextKey)
-	if parentLogger == nil {
-		return GetLogger(name)
+	if parentLogger != nil {
+		if pl, ok := parentLogger.(*Logger); ok {
+			return pl.Named(name)
+		}
 	}
-	if pl, ok := parentLogger.(*Logger); ok {
-		return pl.Named(name)
+	if defaultLogger == nil {
+		return GetLogger(name)
 	}
-	return GetLogger(name)
+	return defaultLogger
 }
diff --git a/pkg/logger/setting.go b/pkg/logger/setting.go
index 40739b3..ebf923c 100644
--- a/pkg/logger/setting.go
+++ b/pkg/logger/setting.go
@@ -26,9 +26,12 @@ import (
 	"sync/atomic"
 	"time"
 
+	"github.com/pkg/errors"
 	"github.com/rs/zerolog"
 )
 
+const rootName = "ROOT"
+
 var root = rootLogger{}
 
 type rootLogger struct {
@@ -77,21 +80,33 @@ func GetLogger(scope ...string) *Logger {
 	if len(scope) < 1 {
 		return root.l
 	}
-	module := strings.Join(scope, ".")
-	subLogger := root.l.Logger.With().Str("module", module).Logger()
-	return &Logger{module: module, Logger: &subLogger}
+	l := root.l
+	for _, v := range scope {
+		l = l.Named(v)
+	}
+	return l
 }
 
 // Init initializes a rs/zerolog logger from user config
 func Init(cfg Logging) (err error) {
-	if err != root.set(cfg) {
-		return err
-	}
-	return nil
+	return root.set(cfg)
 }
 
 // getLogger initializes a root logger
 func getLogger(cfg Logging) (*Logger, error) {
+	modules := make(map[string]zerolog.Level)
+	if len(cfg.Modules) > 0 {
+		if len(cfg.Modules) != len(cfg.Levels) {
+			return nil, fmt.Errorf("modules %v don't match levels %v", cfg.Modules, cfg.Levels)
+		}
+		for i, v := range cfg.Modules {
+			lvl, err := zerolog.ParseLevel(cfg.Levels[i])
+			if err != nil {
+				return nil, errors.WithMessagef(err, "unknown module level %s", v)
+			}
+			modules[strings.ToUpper(v)] = lvl
+		}
+	}
 	lvl, err := zerolog.ParseLevel(cfg.Level)
 	if err != nil {
 		return nil, err
@@ -117,5 +132,5 @@ func getLogger(cfg Logging) (*Logger, error) {
 		w = os.Stdout
 	}
 	l := zerolog.New(w).Level(lvl).With().Timestamp().Logger()
-	return &Logger{module: "root", Logger: &l}, nil
+	return &Logger{module: rootName, Logger: &l, modules: modules}, nil
 }
diff --git a/pkg/partition/entity.go b/pkg/partition/entity.go
index 4a8c698..24a358d 100644
--- a/pkg/partition/entity.go
+++ b/pkg/partition/entity.go
@@ -47,36 +47,33 @@ func NewEntityLocator(families []*databasev1.TagFamilySpec, entity *databasev1.E
 	return locator
 }
 
-func (e EntityLocator) Find(subject string, value []*modelv1.TagFamilyForWrite) (tsdb.Entity, error) {
-	entity := make(tsdb.Entity, len(e)+1)
-	entity[0] = []byte(subject)
+func (e EntityLocator) Find(subject string, value []*modelv1.TagFamilyForWrite) (tsdb.Entity, tsdb.EntityValues, error) {
+	entityValues := make(tsdb.EntityValues, len(e)+1)
+	entityValues[0] = tsdb.StrValue(subject)
 	for i, index := range e {
 		tag, err := GetTagByOffset(value, index.FamilyOffset, index.TagOffset)
 		if err != nil {
-			return nil, err
+			return nil, nil, err
 		}
-		entry, errMarshal := pbv1.MarshalIndexFieldValue(tag)
-		if errors.Is(errMarshal, pbv1.ErrNullValue) {
-			continue
-		}
-		if errMarshal != nil {
-			return nil, errMarshal
-		}
-		entity[i+1] = entry
+		entityValues[i+1] = tag
+	}
+	entity, err := entityValues.ToEntity()
+	if err != nil {
+		return nil, nil, err
 	}
-	return entity, nil
+	return entity, entityValues, nil
 }
 
-func (e EntityLocator) Locate(subject string, value []*modelv1.TagFamilyForWrite, shardNum uint32) (tsdb.Entity, common.ShardID, error) {
-	entity, err := e.Find(subject, value)
+func (e EntityLocator) Locate(subject string, value []*modelv1.TagFamilyForWrite, shardNum uint32) (tsdb.Entity, tsdb.EntityValues, common.ShardID, error) {
+	entity, tagValues, err := e.Find(subject, value)
 	if err != nil {
-		return nil, 0, err
+		return nil, nil, 0, err
 	}
 	id, err := ShardID(entity.Marshal(), shardNum)
 	if err != nil {
-		return nil, 0, err
+		return nil, nil, 0, err
 	}
-	return entity, common.ShardID(id), nil
+	return entity, tagValues, common.ShardID(id), nil
 }
 
 func GetTagByOffset(value []*modelv1.TagFamilyForWrite, fIndex, tIndex int) (*modelv1.TagValue, error) {
diff --git a/pkg/pb/v1/metadata.go b/pkg/pb/v1/metadata.go
index 3af667d..b5eef8c 100644
--- a/pkg/pb/v1/metadata.go
+++ b/pkg/pb/v1/metadata.go
@@ -18,16 +18,10 @@
 package v1
 
 import (
-	"errors"
-
-	common_v1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/common/v1"
 	database_v1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/database/v1"
 	model_v1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/model/v1"
-	"github.com/apache/skywalking-banyandb/banyand/tsdb"
 )
 
-var ErrInvalidUnit = errors.New("invalid interval rule's unit")
-
 func FindTagByName(families []*database_v1.TagFamilySpec, tagName string) (int, int, *database_v1.TagSpec) {
 	for fi, family := range families {
 		for ti, tag := range family.Tags {
@@ -82,16 +76,3 @@ func ParseMaxModRevision(indexRules []*database_v1.IndexRule) (maxRevisionForIdx
 	}
 	return
 }
-
-func ToIntervalRule(ir *common_v1.IntervalRule) (result tsdb.IntervalRule, err error) {
-	switch ir.Unit {
-	case common_v1.IntervalRule_UNIT_DAY:
-		result.Unit = tsdb.DAY
-	case common_v1.IntervalRule_UNIT_HOUR:
-		result.Unit = tsdb.HOUR
-	default:
-		return result, ErrInvalidUnit
-	}
-	result.Num = int(ir.Num)
-	return result, err
-}
diff --git a/pkg/test/helpers/http_health.go b/pkg/pb/v1/tsdb/interval.go
similarity index 50%
copy from pkg/test/helpers/http_health.go
copy to pkg/pb/v1/tsdb/interval.go
index b2c83a1..ace265d 100644
--- a/pkg/test/helpers/http_health.go
+++ b/pkg/pb/v1/tsdb/interval.go
@@ -6,7 +6,7 @@
 // not use this file except in compliance with the License.
 // You may obtain a copy of the License at
 //
-//	http://www.apache.org/licenses/LICENSE-2.0
+//     http://www.apache.org/licenses/LICENSE-2.0
 //
 // Unless required by applicable law or agreed to in writing,
 // software distributed under the License is distributed on an
@@ -14,36 +14,27 @@
 // KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
-package helpers
 
-import (
-	"fmt"
-	"time"
+package tsdb
 
-	"github.com/go-resty/resty/v2"
+import (
+	"errors"
 
-	"github.com/apache/skywalking-banyandb/pkg/logger"
+	common_v1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/common/v1"
+	"github.com/apache/skywalking-banyandb/banyand/tsdb"
 )
 
-func HTTPHealthCheck(addr string) func() error {
-	return func() error {
-		client := resty.New()
+var ErrInvalidUnit = errors.New("invalid interval rule's unit")
 
-		resp, err := client.R().
-			SetHeader("Accept", "application/json").
-			Get(fmt.Sprintf("http://%s/api/healthz", addr))
-		if err != nil {
-			time.Sleep(1 * time.Second)
-			return err
-		}
-		l := logger.GetLogger("http-health")
-		if resp.StatusCode() != 200 {
-			l.Warn().Str("responded_status", resp.Status()).Msg("service unhealthy")
-			time.Sleep(1 * time.Second)
-			return ErrServiceUnhealthy
-		}
-		l.Debug().Stringer("response", resp).Msg("connected")
-		time.Sleep(500 * time.Millisecond)
-		return nil
+func ToIntervalRule(ir *common_v1.IntervalRule) (result tsdb.IntervalRule, err error) {
+	switch ir.Unit {
+	case common_v1.IntervalRule_UNIT_DAY:
+		result.Unit = tsdb.DAY
+	case common_v1.IntervalRule_UNIT_HOUR:
+		result.Unit = tsdb.HOUR
+	default:
+		return result, ErrInvalidUnit
 	}
+	result.Num = int(ir.Num)
+	return result, err
 }
diff --git a/pkg/pb/v1/write.go b/pkg/pb/v1/write.go
index 3d22f92..7434966 100644
--- a/pkg/pb/v1/write.go
+++ b/pkg/pb/v1/write.go
@@ -51,8 +51,8 @@ var (
 	ErrMalformedField              = errors.New("field is malformed")
 )
 
-func MarshalIndexFieldValue(tagValue *modelv1.TagValue) ([]byte, error) {
-	fv, err := ParseIndexFieldValue(tagValue)
+func MarshalTagValue(tagValue *modelv1.TagValue) ([]byte, error) {
+	fv, err := ParseTagValue(tagValue)
 	if err != nil {
 		return nil, err
 	}
@@ -63,47 +63,47 @@ func MarshalIndexFieldValue(tagValue *modelv1.TagValue) ([]byte, error) {
 	return fv.marshalArr(), nil
 }
 
-type FieldValue struct {
+type TagValue struct {
 	value    []byte
 	arr      [][]byte
 	splitter []byte
 }
 
-func newValue(value []byte) FieldValue {
-	return FieldValue{
+func newValue(value []byte) TagValue {
+	return TagValue{
 		value: value,
 	}
 }
 
-func newValueWithSplitter(splitter []byte) *FieldValue {
-	return &FieldValue{
+func newValueWithSplitter(splitter []byte) *TagValue {
+	return &TagValue{
 		splitter: splitter,
 	}
 }
 
-func appendValue(fv *FieldValue, value []byte) *FieldValue {
+func appendValue(fv *TagValue, value []byte) *TagValue {
 	if fv == nil {
-		fv = &FieldValue{}
+		fv = &TagValue{}
 	}
 	fv.arr = append(fv.arr, value)
 	return fv
 }
 
-func (fv FieldValue) GetValue() []byte {
+func (fv TagValue) GetValue() []byte {
 	if len(fv.value) < 1 {
 		return nil
 	}
 	return fv.value
 }
 
-func (fv FieldValue) GetArr() [][]byte {
+func (fv TagValue) GetArr() [][]byte {
 	if len(fv.arr) < 1 {
 		return nil
 	}
 	return fv.arr
 }
 
-func (fv *FieldValue) marshalArr() []byte {
+func (fv *TagValue) marshalArr() []byte {
 	switch len(fv.arr) {
 	case 0:
 		return nil
@@ -126,10 +126,10 @@ func (fv *FieldValue) marshalArr() []byte {
 	return buf.Bytes()
 }
 
-func ParseIndexFieldValue(tagValue *modelv1.TagValue) (FieldValue, error) {
+func ParseTagValue(tagValue *modelv1.TagValue) (TagValue, error) {
 	switch x := tagValue.GetValue().(type) {
 	case *modelv1.TagValue_Null:
-		return FieldValue{}, ErrNullValue
+		return TagValue{}, ErrNullValue
 	case *modelv1.TagValue_Str:
 		return newValue([]byte(x.Str.GetValue())), nil
 	case *modelv1.TagValue_Int:
@@ -141,7 +141,7 @@ func ParseIndexFieldValue(tagValue *modelv1.TagValue) (FieldValue, error) {
 		}
 		return *fv, nil
 	case *modelv1.TagValue_IntArray:
-		var fv *FieldValue
+		var fv *TagValue
 		for _, i := range x.IntArray.GetValue() {
 			fv = appendValue(fv, convert.Int64ToBytes(i))
 		}
@@ -151,7 +151,7 @@ func ParseIndexFieldValue(tagValue *modelv1.TagValue) (FieldValue, error) {
 	case *modelv1.TagValue_Id:
 		return newValue([]byte(x.Id.GetValue())), nil
 	}
-	return FieldValue{}, ErrUnsupportedTagForIndexField
+	return TagValue{}, ErrUnsupportedTagForIndexField
 }
 
 type StreamWriteRequestBuilder struct {
diff --git a/pkg/query/logical/common.go b/pkg/query/logical/common.go
index 2cd6850..f5f634e 100644
--- a/pkg/query/logical/common.go
+++ b/pkg/query/logical/common.go
@@ -27,6 +27,7 @@ import (
 
 	modelv1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/model/v1"
 	"github.com/apache/skywalking-banyandb/banyand/tsdb"
+	"github.com/apache/skywalking-banyandb/pkg/logger"
 	"github.com/apache/skywalking-banyandb/pkg/query/executor"
 	"github.com/apache/skywalking-banyandb/pkg/timestamp"
 )
@@ -102,7 +103,7 @@ func ProjectItem(ec executor.ExecutionContext, item tsdb.Item, projectionFieldRe
 // with the help of Entity. The result is a list of element set, where the order of inner list is kept
 // as what the users specify in the seekerBuilder.
 // This method is used by the underlying tableScan and localIndexScan plans.
-func ExecuteForShard(series tsdb.SeriesList, timeRange timestamp.TimeRange,
+func ExecuteForShard(l *logger.Logger, series tsdb.SeriesList, timeRange timestamp.TimeRange,
 	builders ...SeekerBuilder,
 ) ([]tsdb.Iterator, []io.Closer, error) {
 	var itersInShard []tsdb.Iterator
@@ -111,11 +112,14 @@ func ExecuteForShard(series tsdb.SeriesList, timeRange timestamp.TimeRange,
 		itersInSeries, err := func() ([]tsdb.Iterator, error) {
 			ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
 			defer cancel()
-			sp, errInner := seriesFound.Span(ctx, timeRange)
-			closers = append(closers, sp)
+			sp, errInner := seriesFound.Span(context.WithValue(ctx, logger.ContextKey, l), timeRange)
 			if errInner != nil {
+				if errors.Is(errInner, tsdb.ErrEmptySeriesSpan) {
+					return nil, nil
+				}
 				return nil, errInner
 			}
+			closers = append(closers, sp)
 			b := sp.SeekerBuilder()
 			for _, builder := range builders {
 				builder(b)
diff --git a/pkg/query/logical/measure/measure_plan.go b/pkg/query/logical/measure/measure_plan.go
index cae1fb9..8460711 100644
--- a/pkg/query/logical/measure/measure_plan.go
+++ b/pkg/query/logical/measure/measure_plan.go
@@ -59,7 +59,7 @@ func (l *limitPlan) Schema() logical.Schema {
 }
 
 func (l *limitPlan) String() string {
-	return fmt.Sprintf("Limit: %d, %d", l.offset, l.limit)
+	return fmt.Sprintf("%s Limit: %d, %d", l.Input.String(), l.offset, l.limit)
 }
 
 func (l *limitPlan) Children() []logical.Plan {
diff --git a/pkg/query/logical/measure/measure_plan_aggregation.go b/pkg/query/logical/measure/measure_plan_aggregation.go
index 5649ad2..ddbe355 100644
--- a/pkg/query/logical/measure/measure_plan_aggregation.go
+++ b/pkg/query/logical/measure/measure_plan_aggregation.go
@@ -91,7 +91,8 @@ type aggregationPlan struct {
 }
 
 func (g *aggregationPlan) String() string {
-	return fmt.Sprintf("aggregation: aggregation{type=%d,field=%s}",
+	return fmt.Sprintf("%s aggregation: aggregation{type=%d,field=%s}",
+		g.Input,
 		g.aggrType,
 		g.aggregationFieldRef.Field.Name)
 }
diff --git a/pkg/query/logical/measure/measure_plan_groupby.go b/pkg/query/logical/measure/measure_plan_groupby.go
index caec8f3..444da8b 100644
--- a/pkg/query/logical/measure/measure_plan_groupby.go
+++ b/pkg/query/logical/measure/measure_plan_groupby.go
@@ -85,7 +85,8 @@ func (g *groupBy) String() string {
 	} else {
 		method = "hash"
 	}
-	return fmt.Sprintf("GroupBy: groupBy=%s, method=%s",
+	return fmt.Sprintf("%s GroupBy: groupBy=%s, method=%s",
+		g.Input,
 		logical.FormatTagRefs(", ", g.groupByTagsRefs...), method)
 }
 
diff --git a/pkg/query/logical/measure/measure_plan_indexscan_local.go b/pkg/query/logical/measure/measure_plan_indexscan_local.go
index 3640595..52e1898 100644
--- a/pkg/query/logical/measure/measure_plan_indexscan_local.go
+++ b/pkg/query/logical/measure/measure_plan_indexscan_local.go
@@ -18,6 +18,7 @@
 package measure
 
 import (
+	"context"
 	"fmt"
 	"io"
 	"time"
@@ -29,6 +30,7 @@ import (
 	measurev1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/measure/v1"
 	"github.com/apache/skywalking-banyandb/banyand/tsdb"
 	"github.com/apache/skywalking-banyandb/pkg/index"
+	"github.com/apache/skywalking-banyandb/pkg/logger"
 	"github.com/apache/skywalking-banyandb/pkg/query/executor"
 	"github.com/apache/skywalking-banyandb/pkg/query/logical"
 	"github.com/apache/skywalking-banyandb/pkg/timestamp"
@@ -83,6 +85,7 @@ func (uis *unresolvedIndexScan) Analyze(s logical.Schema) (logical.Plan, error)
 		entities:             uis.entities,
 		groupByEntity:        uis.groupByEntity,
 		OrderBy:              orderBySubPlan,
+		l:                    logger.GetLogger("query", "measure", uis.metadata.Group, uis.metadata.Name, "local-index"),
 	}, nil
 }
 
@@ -98,6 +101,7 @@ type localIndexScan struct {
 	projectionFieldsRefs []*logical.FieldRef
 	entities             []tsdb.Entity
 	groupByEntity        bool
+	l                    *logger.Logger
 }
 
 func (i *localIndexScan) Execute(ec executor.MeasureExecutionContext) (executor.MIterator, error) {
@@ -108,7 +112,11 @@ func (i *localIndexScan) Execute(ec executor.MeasureExecutionContext) (executor.
 			return nil, err
 		}
 		for _, shard := range shards {
-			sl, err := shard.Series().List(tsdb.NewPath(e))
+			sl, err := shard.Series().List(context.WithValue(
+				context.Background(),
+				logger.ContextKey,
+				i.l,
+			), tsdb.NewPath(e))
 			if err != nil {
 				return nil, err
 			}
@@ -133,7 +141,7 @@ func (i *localIndexScan) Execute(ec executor.MeasureExecutionContext) (executor.
 			b.Filter(i.filter)
 		})
 	}
-	iters, closers, innerErr := logical.ExecuteForShard(seriesList, i.timeRange, builders...)
+	iters, closers, innerErr := logical.ExecuteForShard(i.l, seriesList, i.timeRange, builders...)
 	if len(closers) > 0 {
 		defer func(closers []io.Closer) {
 			for _, c := range closers {
@@ -162,9 +170,9 @@ func (i *localIndexScan) Execute(ec executor.MeasureExecutionContext) (executor.
 }
 
 func (i *localIndexScan) String() string {
-	return fmt.Sprintf("IndexScan: startTime=%d,endTime=%d,Metadata{group=%s,name=%s},conditions=%s; projection=%s",
+	return fmt.Sprintf("IndexScan: startTime=%d,endTime=%d,Metadata{group=%s,name=%s},conditions=%s; projection=%s; order=%s",
 		i.timeRange.Start.Unix(), i.timeRange.End.Unix(), i.metadata.GetGroup(), i.metadata.GetName(),
-		i.filter, logical.FormatTagRefs(", ", i.projectionTagsRefs...))
+		i.filter, logical.FormatTagRefs(", ", i.projectionTagsRefs...), i.OrderBy)
 }
 
 func (i *localIndexScan) Children() []logical.Plan {
diff --git a/pkg/query/logical/measure/measure_plan_top.go b/pkg/query/logical/measure/measure_plan_top.go
index 8344b91..c774afb 100644
--- a/pkg/query/logical/measure/measure_plan_top.go
+++ b/pkg/query/logical/measure/measure_plan_top.go
@@ -18,6 +18,8 @@
 package measure
 
 import (
+	"fmt"
+
 	"github.com/pkg/errors"
 
 	measurev1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/measure/v1"
@@ -76,7 +78,7 @@ type top struct {
 }
 
 func (g *top) String() string {
-	return g.topNStream.String()
+	return fmt.Sprintf("%s top %s", g.Input, g.topNStream.String())
 }
 
 func (g *top) Children() []logical.Plan {
diff --git a/pkg/query/logical/stream/stream_plan_indexscan_global.go b/pkg/query/logical/stream/stream_plan_indexscan_global.go
index 549a83d..d2e1842 100644
--- a/pkg/query/logical/stream/stream_plan_indexscan_global.go
+++ b/pkg/query/logical/stream/stream_plan_indexscan_global.go
@@ -85,7 +85,7 @@ func (t *globalIndexScan) executeForShard(ec executor.StreamExecutionContext, sh
 		Term: t.expr.Bytes()[0],
 	})
 	if err != nil || len(itemIDs) < 1 {
-		return elementsInShard, nil
+		return nil, err
 	}
 	for _, itemID := range itemIDs {
 		segShard, err := ec.Shard(itemID.ShardID)
diff --git a/pkg/query/logical/stream/stream_plan_indexscan_local.go b/pkg/query/logical/stream/stream_plan_indexscan_local.go
index b33e4ff..47bf70f 100644
--- a/pkg/query/logical/stream/stream_plan_indexscan_local.go
+++ b/pkg/query/logical/stream/stream_plan_indexscan_local.go
@@ -18,6 +18,7 @@
 package stream
 
 import (
+	"context"
 	"fmt"
 	"io"
 	"time"
@@ -28,6 +29,7 @@ import (
 	streamv1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/stream/v1"
 	"github.com/apache/skywalking-banyandb/banyand/tsdb"
 	"github.com/apache/skywalking-banyandb/pkg/index"
+	"github.com/apache/skywalking-banyandb/pkg/logger"
 	"github.com/apache/skywalking-banyandb/pkg/query/executor"
 	"github.com/apache/skywalking-banyandb/pkg/query/logical"
 	"github.com/apache/skywalking-banyandb/pkg/timestamp"
@@ -43,6 +45,7 @@ type localIndexScan struct {
 	projectionTagRefs [][]*logical.TagRef
 	entities          []tsdb.Entity
 	filter            index.Filter
+	l                 *logger.Logger
 }
 
 func (i *localIndexScan) Execute(ec executor.StreamExecutionContext) ([]*streamv1.Element, error) {
@@ -53,7 +56,11 @@ func (i *localIndexScan) Execute(ec executor.StreamExecutionContext) ([]*streamv
 			return nil, err
 		}
 		for _, shard := range shards {
-			sl, err := shard.Series().List(tsdb.NewPath(e))
+			sl, err := shard.Series().List(context.WithValue(
+				context.Background(),
+				logger.ContextKey,
+				i.l,
+			), tsdb.NewPath(e))
 			if err != nil {
 				return nil, err
 			}
@@ -78,7 +85,7 @@ func (i *localIndexScan) Execute(ec executor.StreamExecutionContext) ([]*streamv
 			b.Filter(i.filter)
 		})
 	}
-	iters, closers, innerErr := logical.ExecuteForShard(seriesList, i.timeRange, builders...)
+	iters, closers, innerErr := logical.ExecuteForShard(i.l, seriesList, i.timeRange, builders...)
 	if len(closers) > 0 {
 		defer func(closers []io.Closer) {
 			for _, c := range closers {
diff --git a/pkg/query/logical/stream/stream_plan_tag_filter.go b/pkg/query/logical/stream/stream_plan_tag_filter.go
index c71e48b..798bfc7 100644
--- a/pkg/query/logical/stream/stream_plan_tag_filter.go
+++ b/pkg/query/logical/stream/stream_plan_tag_filter.go
@@ -27,6 +27,7 @@ import (
 	streamv1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/stream/v1"
 	"github.com/apache/skywalking-banyandb/banyand/tsdb"
 	"github.com/apache/skywalking-banyandb/pkg/index"
+	"github.com/apache/skywalking-banyandb/pkg/logger"
 	"github.com/apache/skywalking-banyandb/pkg/query/executor"
 	"github.com/apache/skywalking-banyandb/pkg/query/logical"
 	"github.com/apache/skywalking-banyandb/pkg/timestamp"
@@ -114,6 +115,7 @@ func (uis *unresolvedTagFilter) selectIndexScanner(ctx *analyzeContext) (logical
 		metadata:          uis.metadata,
 		filter:            ctx.filter,
 		entities:          ctx.entities,
+		l:                 logger.GetLogger("query", "stream", "local-index"),
 	}, nil
 }
 
diff --git a/pkg/schema/metadata.go b/pkg/schema/metadata.go
index 3ba04be..f48b237 100644
--- a/pkg/schema/metadata.go
+++ b/pkg/schema/metadata.go
@@ -158,7 +158,9 @@ func (sr *schemaRepo) Watcher() {
 					if !more {
 						return
 					}
-					sr.l.Debug().Interface("event", evt).Msg("received an event")
+					if e := sr.l.Debug(); e.Enabled() {
+						e.Interface("event", evt).Msg("received an event")
+					}
 					for i := 0; i < 10; i++ {
 						var err error
 						switch evt.Typ {
diff --git a/pkg/test/helpers/grpc_health.go b/pkg/test/helpers/grpc_health.go
index 694dfaf..db46c7d 100644
--- a/pkg/test/helpers/grpc_health.go
+++ b/pkg/test/helpers/grpc_health.go
@@ -52,7 +52,9 @@ func HealthCheck(addr string, connTimeout time.Duration, rpcTimeout time.Duratio
 			l.Warn().Str("responded_status", resp.GetStatus().String()).Msg("service unhealthy")
 			return ErrServiceUnhealthy
 		}
-		l.Debug().Stringer("status", resp.GetStatus()).Msg("connected")
+		if e := l.Debug(); e.Enabled() {
+			e.Stringer("status", resp.GetStatus()).Msg("connected")
+		}
 		return nil
 	}
 }
diff --git a/pkg/test/helpers/http_health.go b/pkg/test/helpers/http_health.go
index b2c83a1..d591be4 100644
--- a/pkg/test/helpers/http_health.go
+++ b/pkg/test/helpers/http_health.go
@@ -42,7 +42,9 @@ func HTTPHealthCheck(addr string) func() error {
 			time.Sleep(1 * time.Second)
 			return ErrServiceUnhealthy
 		}
-		l.Debug().Stringer("response", resp).Msg("connected")
+		if e := l.Debug(); e.Enabled() {
+			e.Stringer("response", resp).Msg("connected")
+		}
 		time.Sleep(500 * time.Millisecond)
 		return nil
 	}
diff --git a/pkg/test/measure/testdata/index_rule_bindings/service_traffic.json b/pkg/test/measure/testdata/index_rule_bindings/service_traffic.json
index 83571f9..6a1bf9b 100644
--- a/pkg/test/measure/testdata/index_rule_bindings/service_traffic.json
+++ b/pkg/test/measure/testdata/index_rule_bindings/service_traffic.json
@@ -4,7 +4,8 @@
     "group": "sw_metric"
   },
   "rules": [
-    "service_id"
+    "service_id",
+    "layer"
   ],
   "subject":{
     "catalog": "CATALOG_MEASURE",
diff --git a/pkg/test/measure/testdata/index_rules/layer.json b/pkg/test/measure/testdata/index_rules/layer.json
new file mode 100644
index 0000000..0cad8a6
--- /dev/null
+++ b/pkg/test/measure/testdata/index_rules/layer.json
@@ -0,0 +1,13 @@
+{
+  "metadata": {
+    "id": 3,
+    "name": "layer",
+    "group": "sw_metric"
+  },
+  "tags": [
+    "layer"
+  ],
+  "type": "TYPE_INVERTED",
+  "location": "LOCATION_SERIES",
+  "updated_at": "2021-04-15T01:30:15.01Z"
+}
diff --git a/pkg/test/measure/testdata/index_rules/searchable_name.json b/pkg/test/measure/testdata/index_rules/searchable_name.json
index 80229bf..f862a63 100644
--- a/pkg/test/measure/testdata/index_rules/searchable_name.json
+++ b/pkg/test/measure/testdata/index_rules/searchable_name.json
@@ -9,6 +9,6 @@
 	],
 	"type": "TYPE_INVERTED",
 	"location": "LOCATION_SERIES",
-  "analyzer": "ANALYZER_SIMPLE",
+	"analyzer": "ANALYZER_SIMPLE",
 	"updated_at": "2021-04-15T01:30:15.01Z"
 }
\ No newline at end of file
diff --git a/pkg/timestamp/scheduler.go b/pkg/timestamp/scheduler.go
index 74cd16a..60a6eac 100644
--- a/pkg/timestamp/scheduler.go
+++ b/pkg/timestamp/scheduler.go
@@ -163,11 +163,15 @@ func (t *task) run() {
 	for {
 		next := t.schedule.Next(now)
 		d := next.Sub(now)
-		t.l.Debug().Time("now", now).Time("next", next).Dur("dur", d).Msg("schedule to")
+		if e := t.l.Debug(); e.Enabled() {
+			e.Time("now", now).Time("next", next).Dur("dur", d).Msg("schedule to")
+		}
 		timer := t.clock.Timer(d)
 		select {
 		case now = <-timer.C:
-			t.l.Debug().Time("now", now).Msg("wake")
+			if e := t.l.Debug(); e.Enabled() {
+				e.Time("now", now).Msg("wake")
+			}
 			if !t.action(now, t.l) {
 				t.l.Info().Msg("action stops the task")
 				return
diff --git a/.gitignore b/test/cases/measure/data/input/tag_filter_int.yaml
similarity index 67%
copy from .gitignore
copy to test/cases/measure/data/input/tag_filter_int.yaml
index 14ba169..f44e191 100644
--- a/.gitignore
+++ b/test/cases/measure/data/input/tag_filter_int.yaml
@@ -15,41 +15,17 @@
 # specific language governing permissions and limitations
 # under the License.
 
-# Binaries for programs and plugins
-*.exe
-*.exe~
-*.dll
-*.so
-*.dylib
-bin
-include
-/build
-target
-/tmp
-
-# Test binary, build with `go test -c`
-*.test
-
-# Ginkgo test report
-*.report
-
-# Output of the go coverage tool, specifically when used with LiteIDE
-*.out
-
-# editor and IDE paraphernalia
-.idea
-*.swp
-*.swo
-*~
-.vscode
-
-.DS_Store
-.env
-.run
-
-# mock files
-*mock.go
-*mock_test.go
-
-# snky cache
-.dccache
+metadata:
+  name: "service_traffic"
+  group: "sw_metric"
+tagProjection:
+  tagFamilies:
+  - name: "default"
+    tags: ["name", "layer"]
+criteria:
+  condition:
+    name: "layer"
+    op: "BINARY_OP_EQ"
+    value:
+      int:
+        value: "1"
diff --git a/test/cases/measure/data/testdata/service_traffic_data.json b/test/cases/measure/data/testdata/service_traffic_data.json
index c03e354..f9fee42 100644
--- a/test/cases/measure/data/testdata/service_traffic_data.json
+++ b/test/cases/measure/data/testdata/service_traffic_data.json
@@ -68,7 +68,7 @@
           },
           {
             "int": {
-              "value": 1
+              "value": 2
             }
           }
         ]
diff --git a/.gitignore b/test/cases/measure/data/want/tag_filter_int.yaml
similarity index 62%
copy from .gitignore
copy to test/cases/measure/data/want/tag_filter_int.yaml
index 14ba169..5a726b9 100644
--- a/.gitignore
+++ b/test/cases/measure/data/want/tag_filter_int.yaml
@@ -15,41 +15,26 @@
 # specific language governing permissions and limitations
 # under the License.
 
-# Binaries for programs and plugins
-*.exe
-*.exe~
-*.dll
-*.so
-*.dylib
-bin
-include
-/build
-target
-/tmp
-
-# Test binary, build with `go test -c`
-*.test
-
-# Ginkgo test report
-*.report
-
-# Output of the go coverage tool, specifically when used with LiteIDE
-*.out
-
-# editor and IDE paraphernalia
-.idea
-*.swp
-*.swo
-*~
-.vscode
-
-.DS_Store
-.env
-.run
-
-# mock files
-*mock.go
-*mock_test.go
-
-# snky cache
-.dccache
+dataPoints:
+  - tagFamilies:
+      - name: default
+        tags:
+          - key: name
+            value:
+              str:
+                value: service_name_1
+          - key: layer
+            value:
+              int:
+                value: "1"
+  - tagFamilies:
+      - name: default
+        tags:
+          - key: name
+            value:
+              str:
+                value: service_name_3
+          - key: layer
+            value:
+              int:
+                value: "1"
diff --git a/test/cases/measure/measure.go b/test/cases/measure/measure.go
index 5f3bce7..8af9ca5 100644
--- a/test/cases/measure/measure.go
+++ b/test/cases/measure/measure.go
@@ -42,6 +42,7 @@ var (
 var _ = g.DescribeTable("Scanning Measures", verify,
 	g.Entry("all", helpers.Args{Input: "all", Duration: 25 * time.Minute, Offset: -20 * time.Minute}),
 	g.Entry("filter by tag", helpers.Args{Input: "tag_filter", Duration: 25 * time.Minute, Offset: -20 * time.Minute}),
+	g.Entry("filter by a integer tag", helpers.Args{Input: "tag_filter_int", Duration: 25 * time.Minute, Offset: -20 * time.Minute}),
 	g.Entry("filter by an unknown tag", helpers.Args{Input: "tag_filter_unknown", Duration: 25 * time.Minute, Offset: -20 * time.Minute, WantEmpty: true}),
 	g.Entry("group and max", helpers.Args{Input: "group_max", Duration: 25 * time.Minute, Offset: -20 * time.Minute}),
 	g.Entry("group without field", helpers.Args{Input: "group_no_field", Duration: 25 * time.Minute, Offset: -20 * time.Minute}),
diff --git a/test/cases/stream/stream.go b/test/cases/stream/stream.go
index 0e791f8..5b2e4f6 100644
--- a/test/cases/stream/stream.go
+++ b/test/cases/stream/stream.go
@@ -37,7 +37,7 @@ var (
 	verify        = func(innerGm gm.Gomega, args helpers.Args) {
 		gm.Eventually(func(innerGm gm.Gomega) {
 			stream_test_data.VerifyFn(innerGm, SharedContext, args)
-		}, flags.EventuallyTimeout)
+		}, flags.EventuallyTimeout).Should(gm.Succeed())
 	}
 )
 
diff --git a/test/e2e-v2/script/env b/test/e2e-v2/script/env
index 5ed4284..094ca6f 100644
--- a/test/e2e-v2/script/env
+++ b/test/e2e-v2/script/env
@@ -21,8 +21,8 @@ SW_AGENT_GO_COMMIT=4af380c2db6243106b0fc650b6003ce3b3eb82a0
 SW_AGENT_PYTHON_COMMIT=c76a6ec51a478ac91abb20ec8f22a99b8d4d6a58
 SW_AGENT_CLIENT_JS_COMMIT=af0565a67d382b683c1dbd94c379b7080db61449
 SW_AGENT_CLIENT_JS_TEST_COMMIT=4f1eb1dcdbde3ec4a38534bf01dded4ab5d2f016
-SW_KUBERNETES_COMMIT_SHA=0f3ec68e5a7e1608cec8688716b848ed15e971e5
+SW_KUBERNETES_COMMIT_SHA=b670c41d94a82ddefcf466d54bab5c492d88d772
 SW_ROVER_COMMIT=d956eaede57b62108b78bca48045bd09ba88e653
 SW_CTL_COMMIT=e684fae0107045fc23799146d62f04cb68bd5a3b
-SW_OAP_COMMIT=d5388683322ee6a4aed2a3bc29d439aadfca9a04
-SW_AGENT_E2E_SERVICE_PROVIDER_COMMIT=828e6e2f2b57a0f06bb0d507e3296d2377943d9a
+
+SW_OAP_COMMIT=1335a48f1c034abc1fe24f6197ee7acfa3118bf0