You are viewing a plain text version of this content. The canonical link for it is here.
Posted to notifications@skywalking.apache.org by wu...@apache.org on 2021/08/27 00:31:03 UTC

[skywalking-java] branch doc-polish created (now 1d4b652)

This is an automated email from the ASF dual-hosted git repository.

wusheng pushed a change to branch doc-polish
in repository https://gitbox.apache.org/repos/asf/skywalking-java.git.


      at 1d4b652  Polish doc

This branch includes the following new commits:

     new 1d4b652  Polish doc

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


[skywalking-java] 01/01: Polish doc

Posted by wu...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

wusheng pushed a commit to branch doc-polish
in repository https://gitbox.apache.org/repos/asf/skywalking-java.git

commit 1d4b652ac95af47ba20a5c340dc8e947f2d66745
Author: Wu Sheng <wu...@foxmail.com>
AuthorDate: Fri Aug 27 08:30:58 2021 +0800

    Polish doc
---
 CHANGES.md                                         |   49 +-
 LICENSE                                            |   21 -
 README.md                                          |    8 +-
 docs/README.md                                     |   31 -
 ...ble-with-other-javaagent-bytecode-processing.md |   52 -
 docs/en/FAQ/ES-Server-FAQ.md                       |   26 -
 .../EnhanceRequireObjectCache-Cast-Exception.md    |   20 -
 docs/en/FAQ/Hour-Day-Metrics-Stopping.md           |    8 -
 docs/en/FAQ/How-to-build-with-mac-m1.md            |   30 -
 ...mport-Project-Eclipse-RequireItems-Exception.md |   17 -
 docs/en/FAQ/MQ-involved-architecture.png           |  Bin 35911 -> 0 bytes
 docs/en/FAQ/Memory-leak-enhance-Worker-thread.md   |   32 -
 docs/en/FAQ/Protoc-Plugin-Fails-When-Build.md      |   12 -
 docs/en/FAQ/README.md                              |   33 -
 docs/en/FAQ/Unexpected-endpoint-register.md        |   12 -
 docs/en/FAQ/Why-have-traces-no-others.md           |    9 -
 docs/en/FAQ/es-version-conflict.md                 |   35 -
 docs/en/FAQ/install_agent_on_websphere.md          |   30 -
 docs/en/FAQ/kafka-plugin.md                        |    8 -
 docs/en/FAQ/maven-compile-npm-failure.md           |   62 -
 docs/en/FAQ/thrift-plugin.md                       |   10 -
 docs/en/FAQ/time-and-timezone.md                   |   21 -
 docs/en/FAQ/v3-version-upgrade.md                  |   11 -
 docs/en/FAQ/v6-version-upgrade.md                  |   28 -
 docs/en/FAQ/v8-version-upgrade.md                  |   10 -
 docs/en/FAQ/vnode.md                               |   15 -
 docs/en/FAQ/why_mq_not_involved.md                 |   26 -
 docs/en/concepts-and-designs/backend-overview.md   |   16 -
 docs/en/concepts-and-designs/event.md              |  137 -
 docs/en/concepts-and-designs/lal.md                |  356 --
 docs/en/concepts-and-designs/mal.md                |  251 -
 docs/en/concepts-and-designs/manual-sdk.md         |   10 -
 docs/en/concepts-and-designs/meter.md              |   24 -
 docs/en/concepts-and-designs/oal.md                |  144 -
 docs/en/concepts-and-designs/overview.md           |   44 -
 docs/en/concepts-and-designs/probe-introduction.md |   36 -
 docs/en/concepts-and-designs/project-goals.md      |   37 -
 docs/en/concepts-and-designs/scope-definitions.md  |  301 --
 docs/en/concepts-and-designs/service-agent.md      |   33 -
 docs/en/concepts-and-designs/service-mesh-probe.md |   28 -
 docs/en/concepts-and-designs/ui-overview.md        |   10 -
 docs/en/guides/Component-library-settings.md       |   67 -
 docs/en/guides/E2E-local-remote-debug.md           |   28 -
 docs/en/guides/How-to-build.md                     |   94 -
 docs/en/guides/How-to-release.md                   |  288 --
 docs/en/guides/Java-Plugin-Development-Guide.md    |  564 ---
 docs/en/guides/Plugin-test.md                      |  638 ---
 docs/en/guides/README.md                           |  144 -
 docs/en/guides/asf/committer.md                    |  153 -
 docs/en/guides/backend-oal-scripts.md              |    7 -
 docs/en/guides/backend-profile-export.md           |   24 -
 docs/en/guides/backend-profile.md                  |   52 -
 docs/en/guides/source-extension.md                 |   62 -
 docs/en/guides/storage-extention.md                |   50 -
 docs/en/protocols/Browser-HTTP-API-Protocol.md     |  108 -
 docs/en/protocols/Browser-Protocol.md              |   19 -
 docs/en/protocols/HTTP-API-Protocol.md             |  186 -
 docs/en/protocols/JVM-Protocol.md                  |    5 -
 docs/en/protocols/Log-Data-Protocol.md             |   83 -
 docs/en/protocols/README.md                        |   80 -
 ...ross-Process-Correlation-Headers-Protocol-v1.md |   16 -
 ...ross-Process-Propagation-Headers-Protocol-v3.md |   46 -
 docs/en/protocols/Trace-Data-Protocol-v3.md        |   44 -
 docs/en/protocols/query-protocol.md                |  173 -
 docs/en/setup/backend/advanced-deployment.md       |   37 -
 docs/en/setup/backend/apdex-threshold.md           |   27 -
 docs/en/setup/backend/backend-alarm.md             |  308 --
 docs/en/setup/backend/backend-cluster.md           |  136 -
 docs/en/setup/backend/backend-fetcher.md           |  149 -
 docs/en/setup/backend/backend-health-check.md      |   68 -
 docs/en/setup/backend/backend-init-mode.md         |   20 -
 docs/en/setup/backend/backend-ip-port.md           |   28 -
 docs/en/setup/backend/backend-k8s-monitoring.md    |   79 -
 docs/en/setup/backend/backend-k8s.md               |    9 -
 docs/en/setup/backend/backend-meter.md             |   57 -
 docs/en/setup/backend/backend-receivers.md         |  194 -
 docs/en/setup/backend/backend-setting-override.md  |   52 -
 docs/en/setup/backend/backend-setup.md             |  123 -
 docs/en/setup/backend/backend-start-up-mode.md     |   21 -
 docs/en/setup/backend/backend-storage.md           |  279 --
 docs/en/setup/backend/backend-telemetry.md         |  193 -
 docs/en/setup/backend/backend-token-auth.md        |   42 -
 docs/en/setup/backend/backend-vm-monitoring.md     |   41 -
 docs/en/setup/backend/backend-zabbix.md            |   73 -
 docs/en/setup/backend/configuration-vocabulary.md  |  297 --
 docs/en/setup/backend/dynamic-config.md            |  150 -
 docs/en/setup/backend/dynamical-logging.md         |   50 -
 docs/en/setup/backend/endpoint-grouping-rules.md   |  304 --
 docs/en/setup/backend/grafana-cluster.json         | 4453 -----------------
 docs/en/setup/backend/grafana-instance.json        | 5066 --------------------
 docs/en/setup/backend/grpc-ssl.md                  |   46 -
 docs/en/setup/backend/log-analyzer.md              |   75 -
 docs/en/setup/backend/metrics-exporter.md          |   81 -
 docs/en/setup/backend/otel-collector-config.yaml   |  168 -
 docs/en/setup/backend/otel-collector-oap.yaml      |  180 -
 docs/en/setup/backend/service-auto-grouping.md     |   16 -
 docs/en/setup/backend/slow-db-statement.md         |   14 -
 docs/en/setup/backend/spring-sleuth-setup.md       |   71 -
 docs/en/setup/backend/trace-sampling.md            |   46 -
 docs/en/setup/backend/ttl.md                       |   14 -
 docs/en/setup/backend/ui-setup.md                  |   34 -
 docs/en/setup/backend/uninstrumented-gateways.md   |   22 -
 docs/en/setup/envoy/als_setting.md                 |  110 -
 docs/en/setup/envoy/config.yaml                    |  108 -
 docs/en/setup/envoy/examples/metrics/Makefile      |   23 -
 docs/en/setup/envoy/examples/metrics/README.md     |  115 -
 .../metrics/docker-compose-envoy-v3-api.yaml       |   34 -
 .../envoy/examples/metrics/docker-compose.yaml     |   34 -
 .../setup/envoy/examples/metrics/envoy-v1.16.yaml  |  107 -
 .../setup/envoy/examples/metrics/envoy-v1.19.yaml  |  116 -
 docs/en/setup/envoy/examples/metrics/log4j2.xml    |   38 -
 docs/en/setup/envoy/identify.json                  |  366 --
 docs/en/setup/envoy/metrics.json                   |  545 ---
 docs/en/setup/envoy/metrics_service_setting.md     |  110 -
 docs/en/setup/istio/README.md                      |   67 -
 docs/en/setup/service-agent/browser-agent.md       |   10 -
 docs/en/setup/service-agent/server-agents.md       |   23 -
 docs/en/ui/README.md                               |  153 -
 docs/menu.yml                                      |  270 +-
 119 files changed, 63 insertions(+), 20163 deletions(-)

diff --git a/CHANGES.md b/CHANGES.md
index 5a40b60..04cb623 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -5,64 +5,19 @@ Release Notes.
 8.8.0
 ------------------
 
-#### Project
-
+* **Split Java agent from the main monorepo. It is a separate repository and going to release separately**.
 * Upgrade jdk 11 in dockerfile and remove unused java_opts.
 * DataCarrier changes a `#consume` API to add properties as a parameter to initialize consumer when
   use `Class<? extends IConsumer<T>> consumerClass`.
-
-#### Java Agent
-
 * Support Multiple DNS period resolving mechanism
 * Modify `Tags.STATUS_CODE` field name to `Tags.HTTP_RESPONSE_STATUS_CODE` and type from `StringTag` to `IntegerTag`, add `Tags.RPC_RESPONSE_STATUS_CODE` field to hold rpc response code value.
 * Fix kafka-reporter-plugin shade package conflict
 * Add all config items to `agent.conf` file for convenient containerization use cases.
 * Advanced Kafka Producer configuration enhancement.
 
-#### OAP-Backend
-
-* Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to
-  1.21.
-* kubernetes java client upgrade from 12.0.1 to 13.0.0
-* Add `event` http receiver
-* Support Metric level function `serviceRelation` in `MAL`.
-* Support envoy metrics binding into the topology.
-* Fix openapi-definitions folder not being read correctly.
-* Trace segment wouldn't be recognized as a TopN sample service. Add through #4694 experimentally, but it caused
-  performance impact.
-* Remove `version` and `endTime` in the segment entity. Reduce indexing payload.
-* Fix `mapper_parsing_exception` in ElasticSearch 7.14.
-* Support component IDs for Go-Kratos framework.
-* [Break Change] Remove endpoint name in the trace query condition. Only support `query by endpoint id`.
-* Fix `ProfileSnapshotExporterTest` case on `OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9)`,
-  MacOS.
-* [Break Change] Remove page path in the browser log query condition. Only support `query by page path id`.
-* [Break Change] Remove endpoint name in the backend log query condition. Only support `query by endpoint id`.
-* [Break Change] Fix typo for a column `page_path_id`(was `pate_path_id`) of storage entity `browser_error_log`.
-* Add component id for Python falcon plugin.
-* Add `rpcStatusCode` for `rpc.status_code` tag. The `responseCode` field is marked as deprecated and replaced by `httpResponseStatusCode` field. 
-* Remove the duplicated tags to reduce the storage payload.
-* Add a new API to test log analysis language.
-* Harden the security of Groovy-based DSL, MAL and LAL.
-* Fix distinct in Service/Instance/Endpoint query is not working.
-* Support collection type in dynamic configuration core.
-* Support zookeeper grouped dynamic configurations.
-* Fix NPE when OAP nodes synchronize events with each other in cluster mode.
-* Support k8s configmap grouped dynamic configurations.
-
-#### UI
-
-* Fix not found error when refresh UI.
-* Update endpointName to endpointId in the query trace condition.
-* Add Python falcon icon on the UI.
-* Fix searching endpoints with keywords.
-
 #### Documentation
 
-* Add a section in `Log Collecting And Analysis` doc, introducing the new Python agent log reporter.
-* Add one missing step in `otel-receiver` doc about how to activate the default receiver.
-
-All issues and pull requests are [here](https://github.com/apache/skywalking/milestone/96?closed=1)
+All issues and pull requests are [here](https://github.com/apache/skywalking/milestone/99?closed=1)
 
 ------------------
 Find change logs of all versions [here](changes).
diff --git a/LICENSE b/LICENSE
index 017d072..8f71f43 100644
--- a/LICENSE
+++ b/LICENSE
@@ -200,24 +200,3 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 
-=======================================================================
-Apache SkyWalking Subcomponents:
-
-The Apache SkyWalking project contains subcomponents with separate copyright
-notices and license terms. Your use of the source code for the these
-subcomponents is subject to the terms and conditions of the following
-licenses.
-
-========================================================================
-Apache 2.0 licenses
-========================================================================
-
-The following components are provided under the Apache License. See project link for details.
-The text of each license is the standard Apache 2.0 license.
-
-   proto files from cncf/udpa: https://github.com/cncf/udpa Apache 2.0
-   proto files from envoyproxy/data-plane-api: https://github.com/envoyproxy/data-plane-api  Apache 2.0
-   proto files from prometheus/client_model: https://github.com/prometheus/client_model Apache 2.0
-   flatbuffers files from istio/proxy: https://github.com/istio/proxy Apache 2.0
-   mvnw files from https://github.com/takari/maven-wrapper Apache 2.0
-   svg files from skywalking-ui/src/assets/icons: https://github.com/google/material-design-icons Apache 2.0
diff --git a/README.md b/README.md
index 95c32a4..918da1d 100644
--- a/README.md
+++ b/README.md
@@ -16,13 +16,7 @@ SkyWalking-Java: The Java Agent for Apache SkyWalking, which provides the native
 SkyWalking: an APM(application performance monitor) system, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures.
 
 # Documentation
-- [Official documentation](https://skywalking.apache.org/docs/#SkyWalking)
-- [The paper of STAM](https://wu-sheng.github.io/STAM/), Streaming Topology Analysis Method.
-- [Blog](https://skywalking.apache.org/blog/2020-04-13-apache-skywalking-profiling/) about Use Profiling to Fix the Blind Spot of Distributed Tracing
-- [Blog](https://skywalking.apache.org/blog/2020-12-03-obs-service-mesh-with-sw-and-als/) about observing Istio + Envoy service mesh with ALS solution.
-- [Blog](https://skywalking.apache.org/blog/obs-service-mesh-vm-with-sw-and-als/) about observing Istio + Envoy service mesh with ALS Metadata-Exchange mechanism (in VMs and / or Kubernetes).
-
-NOTICE, SkyWalking 8.0+ uses [v3 protocols](docs/en/protocols/README.md). They are incompatible with previous releases.
+- [Official documentation](https://skywalking.apache.org/docs/#JavaAgent)
 
 # Downloads
 Please head to the [releases page](https://skywalking.apache.org/downloads/) to download a release of Apache SkyWalking.
diff --git a/docs/README.md b/docs/README.md
deleted file mode 100644
index 9a44553..0000000
--- a/docs/README.md
+++ /dev/null
@@ -1,31 +0,0 @@
-# Welcome
-**This is the official documentation of SkyWalking 8. Welcome to the SkyWalking community!**
-
-Here you can learn all you need to know about **SkyWalking**’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking's contributing guidelines.
-
-**NOTE: SkyWalking 8 uses brand new tracing APIs which are incompatible with all previous releases.**
-
-- **Concepts and Designs**. You'll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.
-
-- **Setup**. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.
-
-- **UI Introduction**. An introduction to the UI components and their features. 
-
-- **Contributing Guides**. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!
-
-- **Protocols**. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.
-
-- **FAQs**. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.
-
-
-You might also find these links interesting:
-
-- The latest and old releases are all available at [Apache SkyWalking release page](https://skywalking.apache.org/downloads/). The change logs can be found [here](https://github.com/apache/skywalking/tree/master/changes).
-
-- [SkyWalking WIKI](https://cwiki.apache.org/confluence/display/SKYWALKING/Home) hosts the context of some changes and events.
-
-- You can find the conference schedules, video recordings, and articles about SkyWalking in the [community resource catalog](https://github.com/OpenSkywalking/Community).
-
-We're always looking for help to improve our documentation and codes, so please don’t hesitate to [file an issue](https://github.com/apache/skywalking/issues/new) if you see any problems. 
-Or better yet, directly contribute by submitting a pull request to help us get better!
-
diff --git a/docs/en/FAQ/Compatible-with-other-javaagent-bytecode-processing.md b/docs/en/FAQ/Compatible-with-other-javaagent-bytecode-processing.md
deleted file mode 100644
index fa9591f..0000000
--- a/docs/en/FAQ/Compatible-with-other-javaagent-bytecode-processing.md
+++ /dev/null
@@ -1,52 +0,0 @@
-## Compatibility with other Java agent bytecode processes
-
-### Problem
-1. When using the SkyWalking agent, some other agents, such as Arthas, can't work properly. 
-https://github.com/apache/skywalking/pull/4858
-
-2. The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this [demo](https://github.com/SkyAPMTest/retransform-conflict-demo)
- 
-### Cause
-The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. 
-ByteBuddy generates auxiliary classes with different random names every time. 
-
-When another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. 
-Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.
-
-
-### Resolution
-
-**1. Enable the class cache feature**  
-
-Add JVM parameters:  
-`-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY`    
-
-Or uncomment the following options in `agent.conf`:
-  
-```
-# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode),
-# Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent.
-agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false}
-
-# The instrumented classes cache mode: MEMORY or FILE
-# MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory
-# FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits
-agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY}
-
-```
-
-If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. 
-When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.
-
-If the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. 
-Then, the process of the subsequent Java agent will not be affected.
-
-**2. Class cache save mode**  
-We recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:  
-`-Dskywalking.agent.class_cache_mode=MEMORY` : save cache classes to Java memory.    
-`-Dskywalking.agent.class_cache_mode=FILE` : save cache classes to SkyWalking agent path '/class-cache'.  
-
-Or modify these options in `agent.conf`:
-  
-`agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY}`    
-`agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}`    
diff --git a/docs/en/FAQ/ES-Server-FAQ.md b/docs/en/FAQ/ES-Server-FAQ.md
deleted file mode 100644
index 7e1d039..0000000
--- a/docs/en/FAQ/ES-Server-FAQ.md
+++ /dev/null
@@ -1,26 +0,0 @@
-# ElasticSearch 
-Some new users may encounter the following issues:
-* The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.
-
-Or 
-* ERROR CODE 429.
-```
-    Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true&timeout=1m], status line [HTTP/1.1 429 Too Many Requests]
-{"error":{"root_cause":[{"type":"remote_transport_exception","reason":"[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]"}],"type":"es_rejected_execution_exception","reason":"rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed ta [...]
-        at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2]
-        at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2]
-        at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2]
-        at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch
-```
-
-You could add the following config to `elasticsearch.yml`, and set the value based on your environment variable.
-```yml
-# In the case of tracing, consider setting a value higher than this.
-thread_pool.index.queue_size: 1000
-thread_pool.write.queue_size: 1000
-
-# When you face query error at trace page, remember to check this.
-index.max_result_window: 1000000
-```
-
-For more information, see ElasticSearch's official documentation.
diff --git a/docs/en/FAQ/EnhanceRequireObjectCache-Cast-Exception.md b/docs/en/FAQ/EnhanceRequireObjectCache-Cast-Exception.md
deleted file mode 100644
index 607c11d..0000000
--- a/docs/en/FAQ/EnhanceRequireObjectCache-Cast-Exception.md
+++ /dev/null
@@ -1,20 +0,0 @@
-### Problem
-When you start your application with the `skywalking` agent, you may find this exception in your agent log which means that `EnhanceRequireObjectCache` cannot be casted to `EnhanceRequireObjectCache`. For example:
-```java
-ERROR 2018-05-07 21:31:24 InstMethodsInter :  class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure
-java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache
-	at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45)
-	at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105)
-	at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java)
-	at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47)
-	at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131)
-	at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76)
-	...
-```
-
-### Reason
-This exception may be caused by `hot deployment` tools (`spring-boot-devtool`) or otherwise, which changes the  `classloader` in runtime.
-
-### Resolution
-1. This error does not occur under the production environment, since developer tools are automatically disabled: See [spring-boot-devtools](https://docs.spring.io/spring-boot/docs/2.4.x/reference/html/using-spring-boot.html#using-boot-devtools).
-2. If you would like to debug in your development environment as usual, you should temporarily remove such `hot deployment` package in your lib path.
diff --git a/docs/en/FAQ/Hour-Day-Metrics-Stopping.md b/docs/en/FAQ/Hour-Day-Metrics-Stopping.md
deleted file mode 100644
index c6dcbff..0000000
--- a/docs/en/FAQ/Hour-Day-Metrics-Stopping.md
+++ /dev/null
@@ -1,8 +0,0 @@
-# Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?
-
-This issue is to be expected with an upgrade from 6.x to 7.x. 
-See the [Downsampling Data Packing feature](../setup/backend/backend-storage.md#downsampling-data-packing)
-of the ElasticSearch storage.
-
-You may simply delete all expired `*-day_xxxxx` and `*-hour_xxxxx`(`xxxxx` is a timestamp) indexes. 
-Currently, SkyWalking uses the `metrics name-xxxxx` and `metrics name-month_xxxxx` indexes only.
diff --git a/docs/en/FAQ/How-to-build-with-mac-m1.md b/docs/en/FAQ/How-to-build-with-mac-m1.md
deleted file mode 100644
index 1624b28..0000000
--- a/docs/en/FAQ/How-to-build-with-mac-m1.md
+++ /dev/null
@@ -1,30 +0,0 @@
-# Compiling issues on Mac's M1 chip
-### Problem
-- When compiling according to [How-to-build](../guides/How-to-build.md), The following problems may occur, causing the build to fail.
-```
-[ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing:
-[ERROR] ----------
-[ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0
-[ERROR]
-[ERROR]   Try downloading the file manually from the project website.
-[ERROR]
-[ERROR]   Then, install it using the command:
-[ERROR]       mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file
-[ERROR]
-[ERROR]   Alternatively, if you host your own repository you can deploy the file there:
-[ERROR]       mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id]
-[ERROR]
-[ERROR]   Path to dependency:
-[ERROR]         1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT
-[ERROR]         2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0
-[ERROR]
-[ERROR] ----------
-[ERROR] 1 required artifact is missing.
-
-```
-
-### Reason
-The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac's M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.
-
-### Resolution
-You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: `./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64`. After specifying the version, compile and run normally.
diff --git a/docs/en/FAQ/Import-Project-Eclipse-RequireItems-Exception.md b/docs/en/FAQ/Import-Project-Eclipse-RequireItems-Exception.md
deleted file mode 100644
index c31bedb..0000000
--- a/docs/en/FAQ/Import-Project-Eclipse-RequireItems-Exception.md
+++ /dev/null
@@ -1,17 +0,0 @@
-### Problem
-- When importing the SkyWalking project to Eclipse, the following errors may occur:
-> Software being installed: Checkstyle configuration plugin for
-> M2Eclipse 1.0.0.201705301746
-> (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 
-> 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746
-> (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 
-> 1.0.0.201705301746) requires 'net.sf.eclipsecs.core 5.2.0' but it could not be found
-
-### Reason
-The Eclipse Checkstyle Plug-in has not been installed.
-
-### Resolution
-Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect 
-Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required.
-Plug-in notification:
-The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.
diff --git a/docs/en/FAQ/MQ-involved-architecture.png b/docs/en/FAQ/MQ-involved-architecture.png
deleted file mode 100644
index a022243..0000000
Binary files a/docs/en/FAQ/MQ-involved-architecture.png and /dev/null differ
diff --git a/docs/en/FAQ/Memory-leak-enhance-Worker-thread.md b/docs/en/FAQ/Memory-leak-enhance-Worker-thread.md
deleted file mode 100644
index 1cbabf2..0000000
--- a/docs/en/FAQ/Memory-leak-enhance-Worker-thread.md
+++ /dev/null
@@ -1,32 +0,0 @@
-### Problem 
-When using a thread pool, `TraceSegment` data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).
-
-### Example
-``` java
-    ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor();
-    executor.setThreadFactory(r -> new Thread(RunnableWrapper.of(r)));
-```
-
-### Reason
-
-* Worker threads are enhanced when using the thread pool. 
-* Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.
-
-### Resolution
-
-* When using `Thread Schedule Framework`:
-See SkyWalking Thread Schedule Framework at [SkyWalking Java agent supported list](../setup/service-agent/java-agent/Supported-list.md), such as Spring FrameWork @Async, which can implement tracing without any modification. 
-
-* When using `Custom Thread Pool`:
-Enhance the task thread with the following code.
-
-```java
-    ExecutorService executorService = Executors.newFixedThreadPool(1);
-    executorService.execute(RunnableWrapper.of(new Runnable() {
-        @Override public void run() {
-            //your code
-        }
-    }));
-```
-See [across thread solution APIs](../setup/service-agent/java-agent/Application-toolkit-trace-cross-thread.md) for more use cases.
-
diff --git a/docs/en/FAQ/Protoc-Plugin-Fails-When-Build.md b/docs/en/FAQ/Protoc-Plugin-Fails-When-Build.md
deleted file mode 100644
index 11b6b5a..0000000
--- a/docs/en/FAQ/Protoc-Plugin-Fails-When-Build.md
+++ /dev/null
@@ -1,12 +0,0 @@
-### Problem
-- In maven build, the following error may occur with the protoc-plugin:
-```
-[ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \skywalking\apm-network\target\protoc-plugins: \skywalking\apm-network\target\protoc-plugins\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -> [Help 1]
-```
-
-### Reason
-- The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.
-
-### Resolution
-- Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended.
-Please refer to http://www.gnu.org/software/libc/documentation.html.
diff --git a/docs/en/FAQ/README.md b/docs/en/FAQ/README.md
deleted file mode 100644
index 49ec708..0000000
--- a/docs/en/FAQ/README.md
+++ /dev/null
@@ -1,33 +0,0 @@
-# FAQs
-These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.
-
-## Design
-* [Why doesn't SkyWalking involve MQ in its architecture?](why_mq_not_involved.md)
-
-## Compiling
-* [Protoc plugin fails in maven build](Protoc-Plugin-Fails-When-Build.md)
-* [Required items could not be found when importing project into Eclipse](Import-Project-Eclipse-RequireItems-Exception.md)
-* [Maven compilation failure with error such as `python2 not found`](maven-compile-npm-failure.md)
-* [Compiling issues on Mac's M1 chip](How-to-build-with-mac-m1.md)
-
-## Runtime
-* [Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0](es-version-conflict.md)
-* [Version 8.x+ upgrade](v8-version-upgrade.md)
-* [Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?](Hour-Day-Metrics-Stopping.md)
-* [Version 6.x upgrade](v6-version-upgrade.md)
-* [Why are there only traces in UI?](Why-have-traces-no-others.md)
-* [Tracing doesn't work on the Kafka consumer end](kafka-plugin.md)
-* [Agent or collector version upgrade,  3.x -> 5.0.0-alpha](v3-version-upgrade.md)
-* [EnhanceRequireObjectCache class cast exception](EnhanceRequireObjectCache-Cast-Exception.md)
-* [ElasticSearch server performance issues, including ERROR CODE:429](ES-Server-FAQ.md)
-* [IllegalStateException when installing Java agent on WebSphere 7](install_agent_on_websphere.md)
-* ["FORBIDDEN/12/index read-only / allow delete (api)" appears in the log](https://discuss.elastic.co/t/forbidden-12-index-read-only-allow-delete-api/110282)
-* [No data shown and backend replies with "Variable 'serviceId' has coerced Null value for NonNull type 'ID!'"](time-and-timezone.md)
-* [**Unexpected endpoint register** warning after 6.6.0](Unexpected-endpoint-register.md)
-* [Use the profile exporter tool if the profile analysis is not right](../guides/backend-profile-export.md)
-* [Compatibility with other javaagent bytecode processes](Compatible-with-other-javaagent-bytecode-processing.md)
-* [**Java agent memory leak** when enhancing `Worker thread` at Thread Pool](Memory-leak-enhance-Worker-thread.md)
-* [Thrift plugin](thrift-plugin.md)
-
-## UI
-* [What is **VNode**? And why does SkyWalking have that?](vnode.md)
diff --git a/docs/en/FAQ/Unexpected-endpoint-register.md b/docs/en/FAQ/Unexpected-endpoint-register.md
deleted file mode 100644
index 2eff4d0..0000000
--- a/docs/en/FAQ/Unexpected-endpoint-register.md
+++ /dev/null
@@ -1,12 +0,0 @@
-# Register mechanism is no longer required for local / exit span
-
-Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running,
-which registers to the 6.6.0+ backend, you will face the following warning message.
-```
-class=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side.
-```
-
-This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol
-requirements.
-
-You could simply use `log4j2.xml` to filter this warning message out.
diff --git a/docs/en/FAQ/Why-have-traces-no-others.md b/docs/en/FAQ/Why-have-traces-no-others.md
deleted file mode 100644
index 26e8a79..0000000
--- a/docs/en/FAQ/Why-have-traces-no-others.md
+++ /dev/null
@@ -1,9 +0,0 @@
-### Problem
-- There is no abnormal log in Agent log and Collector log.
-- The traces can be seen, but no other information is available in UI.
-
-### Reason
-The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.
-
-### Resolution
-Make sure the time is synchronized between collector servers and monitored application servers.
diff --git a/docs/en/FAQ/es-version-conflict.md b/docs/en/FAQ/es-version-conflict.md
deleted file mode 100644
index 2cde770..0000000
--- a/docs/en/FAQ/es-version-conflict.md
+++ /dev/null
@@ -1,35 +0,0 @@
-# Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0
-
-Since 8.7.0, we did the following optimization to reduce Elasticsearch load.
-
-```markdown
-Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced
-persistent session mechanism, don't require the data queryable immediately after the insert and update anymore.
-```
-
-Due to this, we flush the metrics into Elasticsearch without using `WriteRequest.RefreshPolicy.WAIT_UNTIL`. This reduces
-the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.
-
-Meanwhile, there is little chance you could see following **warn**s in your logs.
-
-```
-{
-  "timeMillis": 1626247722647,
-  "thread": "I/O dispatcher 4",
-  "level": "WARN",
-  "loggerName": "org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient",
-  "message": "Bulk [70] executed with failures:[failure in bulk execution:\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1b [...]
-  "endOfBatch": false,
-  "loggerFqcn": "org.apache.logging.slf4j.Log4jLogger",
-  "threadId": 44,
-  "threadPriority": 5,
-  "timestamp": "2021-07-14 15:28:42.647"
-}
-```
-
-This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn't show up in high
-frequency, you could ignore this directly.
-
-In case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can't
-catch up your setting. Or you set the `persistentPeriod` less than the flush period.
-
diff --git a/docs/en/FAQ/install_agent_on_websphere.md b/docs/en/FAQ/install_agent_on_websphere.md
deleted file mode 100644
index daddc75..0000000
--- a/docs/en/FAQ/install_agent_on_websphere.md
+++ /dev/null
@@ -1,30 +0,0 @@
-# IllegalStateException when installing Java agent on WebSphere
-This issue was found in our [community discussion and feedback](https://github.com/apache/skywalking/issues/2652). 
-A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407,
-and experienced the following error logs:
-```
-WARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type.
-java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction
-at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144)
-at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392)
-at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016)
-at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621)
-at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53)
-at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54)
-at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38)
-at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15)
-at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107)
-at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147)
-at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147)
-at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147)
-at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147)
-at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147)
-...
-```
-
-The exception occured because access grant was required in WebSphere. 
-Simply follow these steps:
-
-1. Set the agent's owner to the owner of WebSphere.
-2. Add "grant codeBase "file:${agent_dir}/-" { permission java.security.AllPermission; };" in the file of "server.policy".
-
diff --git a/docs/en/FAQ/kafka-plugin.md b/docs/en/FAQ/kafka-plugin.md
deleted file mode 100644
index 221e60c..0000000
--- a/docs/en/FAQ/kafka-plugin.md
+++ /dev/null
@@ -1,8 +0,0 @@
-### Problem 
-Tracing doesn't work on the Kafka consumer end.
-
-### Reason
-The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.
-
-### Resolve
-For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the `@KafkaPollAndInvoke` annotation in `apm-toolkit-kafka` or with OpenTracing API. If you're using `spring-kafka` 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.
diff --git a/docs/en/FAQ/maven-compile-npm-failure.md b/docs/en/FAQ/maven-compile-npm-failure.md
deleted file mode 100644
index 32b9109..0000000
--- a/docs/en/FAQ/maven-compile-npm-failure.md
+++ /dev/null
@@ -1,62 +0,0 @@
-### Problem: Maven compilation failure with error such as `Error: not found: python2`
-When you compile the project via Maven, it fails at module `apm-webapp` and the following error occurs.
-
-Pay attention to keywords such as `node-sass` and `Error: not found: python2`.
-
-```
-[INFO] > node-sass@4.11.0 postinstall C:\XXX\skywalking\skywalking-ui\node_modules\node-sass
-[INFO] > node scripts/build.js
-
-[ERROR] gyp verb check python checking for Python executable "python2" in the PATH
-[ERROR] gyp verb `which` failed Error: not found: python2
-[ERROR] gyp verb `which` failed     at getNotFoundError (C:\XXX\skywalking\skywalking-ui\node_modules\which\which.js:13:12)
-[ERROR] gyp verb `which` failed     at F (C:\XXX\skywalking\skywalking-ui\node_modules\which\which.js:68:19)
-[ERROR] gyp verb `which` failed     at E (C:\XXX\skywalking\skywalking-ui\node_modules\which\which.js:80:29)
-[ERROR] gyp verb `which` failed     at C:\XXX\skywalking\skywalking-ui\node_modules\which\which.js:89:16
-[ERROR] gyp verb `which` failed     at C:\XXX\skywalking\skywalking-ui\node_modules\isexe\index.js:42:5
-[ERROR] gyp verb `which` failed     at C:\XXX\skywalking\skywalking-ui\node_modules\isexe\windows.js:36:5
-[ERROR] gyp verb `which` failed     at FSReqWrap.oncomplete (fs.js:152:21)
-
-[ERROR] gyp verb `which` failed   code: 'ENOENT' }
-[ERROR] gyp verb check python checking for Python executable "python" in the PATH
-[ERROR] gyp verb `which` succeeded python C:\Users\XXX\AppData\Local\Programs\Python\Python37\python.EXE
-[ERROR] gyp ERR! configure error 
-[ERROR] gyp ERR! stack Error: Command failed: C:\Users\XXX\AppData\Local\Programs\Python\Python37\python.EXE -c import sys; print "%s.%s.%s" % sys.version_info[:3];
-[ERROR] gyp ERR! stack   File "<string>", line 1
-[ERROR] gyp ERR! stack     import sys; print "%s.%s.%s" % sys.version_info[:3];
-[ERROR] gyp ERR! stack                                ^
-[ERROR] gyp ERR! stack SyntaxError: invalid syntax
-[ERROR] gyp ERR! stack 
-[ERROR] gyp ERR! stack     at ChildProcess.exithandler (child_process.js:275:12)
-[ERROR] gyp ERR! stack     at emitTwo (events.js:126:13)
-[ERROR] gyp ERR! stack     at ChildProcess.emit (events.js:214:7)
-[ERROR] gyp ERR! stack     at maybeClose (internal/child_process.js:925:16)
-[ERROR] gyp ERR! stack     at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5)
-[ERROR] gyp ERR! System Windows_NT 10.0.17134
-......
-[INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s]
-[INFO] apm-webapp ......................................... FAILURE [ 25.857 s]
-[INFO] apache-skywalking-apm .............................. SKIPPED
-[INFO] apache-skywalking-apm-es7 .......................... SKIPPED
-```
-
-### Reason
-
-The error has nothing to do with SkyWalking.   
-According to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from `GitHub` and `npmjs.org` runs slow, some precompiled binaries for dependency `node-sass` would fail to be downloaded during `npm install`, and npm would try to compile them itself. That's why `python2` is needed.
-
-### Resolution
-#### 1. Use mirror. For instance, if you're in China, please edit `skywalking\apm-webapp\pom.xml` as follows.    
-Find
-```
-<configuration>  
- <arguments>install --registry=https://registry.npmjs.org/</arguments>  
-</configuration>
-```
-Replace it with
-```
-<configuration>  
- <arguments>install --registry=https://registry.npm.taobao.org/ --sass_binary_site=https://npm.taobao.org/mirrors/node-sass/</arguments>  
-</configuration>
-```
-#### 2. Get a sufficiently powerful VPN.
diff --git a/docs/en/FAQ/thrift-plugin.md b/docs/en/FAQ/thrift-plugin.md
deleted file mode 100644
index 7395299..0000000
--- a/docs/en/FAQ/thrift-plugin.md
+++ /dev/null
@@ -1,10 +0,0 @@
-### Problem
-The message with Field ID, 8888, must be reserved.
-
-### Reason
-Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.
-
-Thrift allows us to append any additional fields in the message even if the receiver doesn't deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.
-
-### Resolution
-Avoid using the Field(ID is 8888) in your application.
diff --git a/docs/en/FAQ/time-and-timezone.md b/docs/en/FAQ/time-and-timezone.md
deleted file mode 100644
index 688e292..0000000
--- a/docs/en/FAQ/time-and-timezone.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# Why can't I see any data in the UI?
-
-There are three main reasons no data can be shown by the UI:
-
-1. No traces have been sent to the collector.
-2. Traces have been sent, but the timezone of your containers is incorrect.
-3. Traces are in the collector, but you're not watching the correct timeframe in the UI.
-
-## No traces
-
-Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.
-
-
-## Incorrect timezone in containers
-
-Be sure to check the time in your containers.
-
-
-## The UI isn't showing any data
-
-Be sure to configure the timeframe shown by the UI.
diff --git a/docs/en/FAQ/v3-version-upgrade.md b/docs/en/FAQ/v3-version-upgrade.md
deleted file mode 100644
index 84646bf..0000000
--- a/docs/en/FAQ/v3-version-upgrade.md
+++ /dev/null
@@ -1,11 +0,0 @@
-## Version 3.x -> 5.0.0-alpha Upgrade FAQs
-### Collector
-### Problem
-There is no information showing in the UI.
-
-### Cause
-In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren't compatible with 5.0.0-alpha.
-When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.
-
-### Solution
-Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.
diff --git a/docs/en/FAQ/v6-version-upgrade.md b/docs/en/FAQ/v6-version-upgrade.md
deleted file mode 100644
index 117a934..0000000
--- a/docs/en/FAQ/v6-version-upgrade.md
+++ /dev/null
@@ -1,28 +0,0 @@
-# V6 upgrade
-SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.
-
-**NOTE**: The ways to upgrade are not limited to the steps below. 
-
-## Use Canary Release
-Like all applications, you may upgrade SkyWalking using the `canary release` method through the following steps.
-1. Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster.
-1. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have `collector.backend_service`
-pointing to the new OAP backend, and use/add a new namespace(`agent.namespace` in [Table of Agent Configuration Properties](../setup/service-agent/java-agent/README.md#table-of-agent-configuration-properties)).
-The namespace will prevent conflicts from arising between different versions.
-1. When all target services have been rebooted, the old OAP clusters could be discarded.
-
-The `Canary Release` method works for any version upgrades.
-
-## Online Hot Reboot Upgrade
-The reason we require `Canary Release` is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the 
-cache to become unavailable for new OAP clusters.
-In version 6.5.0+ (especially for agent versions), we have [**Agent hot reboot trigger mechanism**](../setup/backend/backend-setup.md#agent-hot-reboot-trigger-mechanism-in-oap-server-upgrade).
-This streamlines the upgrade process as we **deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster**,
-and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the `cool_down` mode, and come
-back online. For more details, see the backend setup documentation.
-
-**NOTE**: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.
-
-## Agent Compatibility
-All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. 
-As the agent has also been enhanced in the latest versions, according to the SkyWalking team's recommendation, upgrade the agent as soon as practicable.
diff --git a/docs/en/FAQ/v8-version-upgrade.md b/docs/en/FAQ/v8-version-upgrade.md
deleted file mode 100644
index df0d001..0000000
--- a/docs/en/FAQ/v8-version-upgrade.md
+++ /dev/null
@@ -1,10 +0,0 @@
-# V8 upgrade
-Starting from SkyWalking v8, the [v3 protocol](../protocols/README.md) has been used. This makes it incompatible with previous releases.
-Users who intend to upgrade in v8 series releases could follow the steps below.
-
-
-Registers in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.
-1. Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking.
-2. Deploy the whole SkyWalking cluster, and expose it in a new network address.
-3. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language.
-Then, set up the backend address to the new SkyWalking OAP cluster.
diff --git a/docs/en/FAQ/vnode.md b/docs/en/FAQ/vnode.md
deleted file mode 100644
index 4ad94e3..0000000
--- a/docs/en/FAQ/vnode.md
+++ /dev/null
@@ -1,15 +0,0 @@
-# What is VNode?
-On the trace page, you may sometimes find nodes with their spans named **VNode**, and that there are no attributes for such spans.
-
-**VNode** is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.
-
-## How does the UI detect the missing span(s)?
-The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found,
-then it creates a VNode automatically.
-
-## How did this happen?
-The VNode appears when the trace data is incomplete.
-1. The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. 
-2. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.
-
-In such case, the trace would not exist in the query, thus the VNode shows up. 
diff --git a/docs/en/FAQ/why_mq_not_involved.md b/docs/en/FAQ/why_mq_not_involved.md
deleted file mode 100644
index 21b1bb3..0000000
--- a/docs/en/FAQ/why_mq_not_involved.md
+++ /dev/null
@@ -1,26 +0,0 @@
-# Why doesn't SkyWalking involve MQ in its architecture?
-This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:
-
-<img src="MQ-involved-architecture.png"/>
-
-Here's what we think.
-
-### Is MQ appropriate for communicating with the OAP backend?
-This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. 
-But the following issues must first be addressed:
-1. Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn't have caught up with the producers (or agents). Then what is the point of adding new deployment requirements?
-1. Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is.
-1. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes?
-1. Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. 
-Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, 
-if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.
-
-With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? 
-The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.
-
-### Is MQ transport recommended despite its side effects?
-Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named
-`kafka-reporter` and `kafka-fetcher` for this feature since 8.1.0. 
-
-### How about MQ metrics data exporter?
-The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.
diff --git a/docs/en/concepts-and-designs/backend-overview.md b/docs/en/concepts-and-designs/backend-overview.md
deleted file mode 100644
index 1889b70..0000000
--- a/docs/en/concepts-and-designs/backend-overview.md
+++ /dev/null
@@ -1,16 +0,0 @@
-# Observability Analysis Platform
-SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.
-
-## Capabilities
-SkyWalking covers all 3 areas of observability, including, **Tracing**, **Metrics** and **Logging**.
-
-- **Tracing**. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger.
-- **Metrics**. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data panel 
-or control panel. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances.
-- **Logging**. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically,
-or use SkyWalking to bind the trace and log through the text content.
-
-There are 3 powerful and native language engines designed to analyze observability data from the above areas.
-1. [Observability Analysis Language](oal.md) processes native traces and service mesh data.
-1. [Meter Analysis Language](mal.md) is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry.
-1. [Log Analysis Language](lal.md) focuses on log contents and collaborate with Meter Analysis Language.
diff --git a/docs/en/concepts-and-designs/event.md b/docs/en/concepts-and-designs/event.md
deleted file mode 100644
index b73c864..0000000
--- a/docs/en/concepts-and-designs/event.md
+++ /dev/null
@@ -1,137 +0,0 @@
-# Events
-
-SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces.
-In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc.
-Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events.
-This doc details how SkyWalking collects events and what events look like in SkyWalking.
-
-## How to Report Events
-
-The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking.
-Currently, the officially supported clients to report events are:
-
-- [ ] Java Agent Toolkit: Using the Java agent toolkit to report events within the applications.
-- [x] SkyWalking CLI: Using the CLI to report events from the command line interface.
-- [x] [Kubernetes Event Exporter](http://github.com/apache/skywalking-kubernetes-event-exporter): Deploying an event exporter to refine and report Kubernetes events.
-
-## Event Definitions
-
-An event contains the following fields. The definitions of event can be found at the [protocol repo](https://github.com/apache/skywalking-data-collect-protocol/tree/master/event).
-
-### UUID
-
-Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event. 
-
-### Source
-
-The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.
-
-### Name
-
-Name of the event. For example, `Start`, `Stop`, `Crash`, `Reboot`, `Upgrade`, etc.
-
-### Type
-
-Type of the event. This field is friendly for UI visualization, where events of type `Normal` are considered normal operations,
-while `Error` is considered unexpected operations, such as `Crash` events. Marking them with different colors allows us to more easily identify them.
-
-### Message
-
-The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an `Upgrade` event may be something like `Upgrade from ${from_version} to ${to_version}`.
-It's NOT recommended to include the detailed logs of this event, such as the exception stack trace.
-
-### Parameters
-
-The parameters in the `message` field. This is a simple `<string,string>` map. 
-
-### Start Time
-
-The start time of the event. This field is mandatory when an event occurs.
-
-### End Time
-
-The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after `startTime`.
-
-**NOTE:** When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID.
-There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.
-
-## How to Configure Alarms for Events
-
-Events derive from metrics, and can be the source to trigger alarms. For example, if a specific event occurs for a
-certain times in a period, alarms can be triggered and sent.
-
-Every event has a default `value = 1`, when `n` events with the same name are reported, they are aggregated
-into `value = n` as follows.
-
-```
-Event{name=Unhealthy, source={service=A,instance=a}, ...}
-Event{name=Unhealthy, source={service=A,instance=a}, ...}
-Event{name=Unhealthy, source={service=A,instance=a}, ...}
-Event{name=Unhealthy, source={service=A,instance=a}, ...}
-Event{name=Unhealthy, source={service=A,instance=a}, ...}
-Event{name=Unhealthy, source={service=A,instance=a}, ...}
-```
-
-will be aggregated into
-
-```
-Event{name=Unhealthy, source={service=A,instance=a}, ...} <value = 6>
-```
-
-so you can configure the following alarm rule to trigger alarm when `Unhealthy` event occurs more than 5 times within 10
-minutes.
-
-```yaml
-rules:
-  unhealthy_event_rule:
-    metrics-name: Unhealthy
-    # Healthiness check is usually a scheduled task,
-    # they may be unhealthy for the first few times,
-    # and can be unhealthy occasionally due to network jitter,
-    # please adjust the threshold as per your actual situation.
-    threshold: 5
-    op: ">"
-    period: 10
-    count: 1
-    message: Service instance has been unhealthy for 10 minutes
-```
-
-For more alarm configuration details, please refer to the [alarm doc](../setup/backend/backend-alarm.md).
-
-**Note** that the `Unhealthy` event above is only for demonstration, they are not detected by default in SkyWalking,
-however, you can use the methods in [How to Report Events](#how-to-report-events) to report this kind of events.
-
-## Correlation between events and metrics
-
-SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed
-service / instance / endpoint.
-
-By default, SkyWalking also generates some metrics for events by using [OAL](oal.md). The default metrics list of event
-may change over time, you can find the complete list
-in [event.oal](../../../oap-server/server-bootstrap/src/main/resources/oal/event.oal). If you want to generate you
-custom metrics from events, please refer to [OAL](oal.md) about how to write OAL rules.
-
-## Known Events
-
-| Name | Type | When | Where |
-| :----: | :----: | :-----| :---- |
-| Start | Normal | When your Java Application starts with SkyWalking Agent installed, the `Start` Event will be created. | Reported from SkyWalking agent. |
-| Shutdown | Normal | When your Java Application stops with SkyWalking Agent installed, the `Shutdown` Event will be created. | Reported from SkyWalking agent. |
-| Alarm | Error | When the Alarm is triggered, the corresponding `Alarm` Event will is created. | Reported from internal SkyWalking OAP. |
-
-The following events are all reported
-by [Kubernetes Event Exporter](http://github.com/apache/skywalking-kubernetes-event-exporter), in order to see these
-events, please make sure you have deployed the exporter. 
-
-| Name | Type | When | Where |
-| :----: | :----: | :-----| :---- |
-| Killing | Normal | When the Kubernetes Pod is being killing. | Reporter by Kubernetes Event Exporter. |
-| Pulling | Normal | When a docker image is being pulled for deployment. | Reporter by Kubernetes Event Exporter. |
-| Pulled | Normal | When a docker image is pulled for deployment. | Reporter by Kubernetes Event Exporter. |
-| Created | Normal | When a container inside a Pod is created. | Reporter by Kubernetes Event Exporter. |
-| Started | Normal | When a container inside a Pod is started. | Reporter by Kubernetes Event Exporter. |
-| Unhealthy | Error | When the readiness probe failed. | Reporter by Kubernetes Event Exporter. |
-
-The complete event lists can be found
-in [the Kubernetes codebase](https://github.com/kubernetes/kubernetes/blob/v1.21.1/pkg/kubelet/events/event.go), please
-note that not all the events are supported by the exporter for now.
diff --git a/docs/en/concepts-and-designs/lal.md b/docs/en/concepts-and-designs/lal.md
deleted file mode 100644
index 65ef65c..0000000
--- a/docs/en/concepts-and-designs/lal.md
+++ /dev/null
@@ -1,356 +0,0 @@
-# Log Analysis Language
-
-Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use
-LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID,
-segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).
-
-The LAL config files are in YAML format, and are located under directory `lal`. You can
-set `log-analyzer/default/lalFiles` in the `application.yml` file or set environment variable `SW_LOG_LAL_FILES` to
-activate specific LAL config files.
-
-## Filter
-
-A filter is a group of [parser](#parser), [extractor](#extractor) and [sink](#sink). Users can use one or more filters
-to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log
-sent to the filter is available as property `log` in the LAL, therefore you can access the log service name
-via `log.service`. For all available fields of `log`, please refer to [the protocol definition](https://github.com/apache/skywalking-data-collect-protocol/blob/master/logging/Logging.proto#L41).
-
-All components are executed sequentially in the orders they are declared.
-
-### Global Functions
-
-Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.
-
-- `abort`
-
-By default, all components declared are executed no matter what flags (`dropped`, `saved`, etc.) have been set. There
-are cases where you may want the filter chain to stop earlier when specified conditions are met. `abort` function aborts
-the remaining filter chain from where it's declared, and all the remaining components won't be executed at all.
-`abort` function serves as a fast-fail mechanism in LAL.
-
-```groovy
-filter {
-    if (log.service == "TestingService") { // Don't waste resources on TestingServices
-        abort {} // all remaining components won't be executed at all
-    }
-    // ... parsers, extractors, sinks
-}
-```
-
-Note that when you put `regexp` in an `if` statement, you need to surround the expression with `()`
-like `regexp(<the expression>)`, instead of `regexp <the expression>`.
-
-### Parser
-
-Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3
-types of parsers at the moment, namely `json`, `yaml`, and `text`.
-
-When a piece of log is parsed, there is a corresponding property available, called `parsed`, injected by LAL.
-Property `parsed` is typically a map, containing all the fields parsed from the raw logs. For example, if the parser
-is `json` / `yaml`, `parsed` is a map containing all the key-values in the `json` / `yaml`; if the parser is `text`
-, `parsed` is a map containing all the captured groups and their values (for `regexp` and `grok`).
-
-All parsers share the following options:
-
-| Option | Type | Description | Default Value |
-| ------ | ---- | ----------- | ------------- |
-| `abortOnFailure` | `boolean` | Whether the filter chain should abort if the parser failed to parse / match the logs | `true` |
-
-See examples below.
-
-#### `json`
-
-```groovy
-filter {
-    json {
-        abortOnFailure true // this is optional because it's default behaviour
-    }
-}
-```
-
-#### `yaml`
-
-```groovy
-filter {
-    yaml {
-        abortOnFailure true // this is optional because it's default behaviour
-    }
-}
-```
-
-#### `text`
-
-For unstructured logs, there are some `text` parsers for use.
-
-- `regexp`
-
-`regexp` parser uses a regular expression (`regexp`) to parse the logs. It leverages the captured groups of the regexp,
-all the captured groups can be used later in the extractors or sinks.
-`regexp` returns a `boolean` indicating whether the log matches the pattern or not.
-
-```groovy
-filter {
-    text {
-        abortOnFailure true // this is optional because it's default behaviour
-        // this is just a demo pattern
-        regexp "(?<timestamp>\\d{8}) (?<thread>\\w+) (?<level>\\w+) (?<traceId>\\w+) (?<msg>.+)"
-    }
-    extractor {
-        tag level: parsed.level
-        // we add a tag called `level` and its value is parsed.level, captured from the regexp above
-        traceId parsed.traceId
-        // we also extract the trace id from the parsed result, which will be used to associate the log with the trace
-    }
-    // ...
-}
-```
-
-- `grok` (TODO)
-
-We're aware of certains performance issues in the grok Java library, and so we're currently conducting investigations and benchmarking. Contributions are
-welcome.
-
-### Extractor
-
-Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an
-endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.
-
-- `service`
-
-`service` extracts the service name from the `parsed` result, and set it into the `LogData`, which will be persisted (if
-not dropped) and is used to associate with traces / metrics.
-
-- `instance`
-
-`instance` extracts the service instance name from the `parsed` result, and set it into the `LogData`, which will be
-persisted (if not dropped) and is used to associate with traces / metrics.
-
-- `endpoint`
-
-`endpoint` extracts the service instance name from the `parsed` result, and set it into the `LogData`, which will be
-persisted (if not dropped) and is used to associate with traces / metrics.
-
-- `traceId`
-
-`traceId` extracts the trace ID from the `parsed` result, and set it into the `LogData`, which will be persisted (if not
-dropped) and is used to associate with traces / metrics.
-
-- `segmentId`
-
-`segmentId` extracts the segment ID from the `parsed` result, and set it into the `LogData`, which will be persisted (if
-not dropped) and is used to associate with traces / metrics.
-
-- `spanId`
-
-`spanId` extracts the span ID from the `parsed` result, and set it into the `LogData`, which will be persisted (if not
-dropped) and is used to associate with traces / metrics.
-
-- `timestamp`
-
-`timestamp` extracts the timestamp from the `parsed` result, and set it into the `LogData`, which will be persisted (if
-not dropped) and is used to associate with traces / metrics.
-
-The unit of `timestamp` is millisecond.
-
-- `tag`
-
-`tag` extracts the tags from the `parsed` result, and set them into the `LogData`. The form of this extractor should look something like this: `tag key1: value, key2: value2`. You may use the properties of `parsed` as both keys and values.
-
-```groovy
-filter {
-    // ... parser
-
-    extractor {
-        tag level: parsed.level, (parsed.statusCode): parsed.statusMsg
-        tag anotherKey: "anotherConstantValue"
-    }
-}
-```
-
-- `metrics`
-
-`metrics` extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may
-configure [MAL](mal.md) for further analysis of these metrics. The dedicated MAL config files are under
-directory `log-mal-rules`, and you can set `log-analyzer/default/malFiles` to enable configured files.
-
-```yaml
-# application.yml
-# ...
-log-analyzer:
-  selector: ${SW_LOG_ANALYZER:default}
-  default:
-    lalFiles: ${SW_LOG_LAL_FILES:my-lal-config} # files are under "lal" directory
-    malFiles: ${SW_LOG_MAL_FILES:my-lal-mal-config,another-lal-mal-config} # files are under "log-mal-rules" directory
-```
-
-Examples are as follows:
-
-```groovy
-filter {
-    // ...
-    extractor {
-        service parsed.serviceName
-        metrics {
-            name "log_count"
-            timestamp parsed.timestamp
-            labels level: parsed.level, service: parsed.service, instance: parsed.instance
-            value 1
-        }
-        metrics {
-            name "http_response_time"
-            timestamp parsed.timestamp
-            labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance
-            value parsed.duration
-        }
-    }
-    // ...
-}
-```
-
-The extractor above generates a metrics named `log_count`, with tag key `level` and value `1`. After that, you can
-configure MAL rules to calculate the log count grouping by logging level like this:
-
-```yaml
-# ... other configurations of MAL
-
-metrics:
-  - name: log_count_debug
-    exp: log_count.tagEqual('level', 'DEBUG').sum(['service', 'instance']).increase('PT1M')
-  - name: log_count_error
-    exp: log_count.tagEqual('level', 'ERROR').sum(['service', 'instance']).increase('PT1M')
-
-```
-
-The other metrics generated is `http_response_time`, so you can configure MAL rules to generate more useful metrics
-like percentiles.
-
-```yaml
-# ... other configurations of MAL
-
-metrics:
-  - name: response_time_percentile
-    exp: http_response_time.sum(['le', 'service', 'instance']).increase('PT5M').histogram().histogram_percentile([50,70,90,99])
-```
-
-### Sink
-
-Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage.
-However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you've
-extracted useful information, such as metrics.
-
-#### Sampler
-
-Sampler allows you to save the logs in a sampling manner. Currently, the sampling strategy `rateLimit` is supported. We welcome
-contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling
-result. See examples in [Enforcer](#enforcer).
-
-`rateLimit` samples `n` logs at a maximum rate of 1 minute. `rateLimit("SamplerID")` requires an ID for the sampler. Sampler
-declarations with the same ID share the same sampler instance, thus sharing the same `rpm` and resetting logic.
-
-Examples:
-
-```groovy
-filter {
-    // ... parser
-
-    sink {
-        sampler {
-            if (parsed.service == "ImportantApp") {
-                rateLimit("ImportantAppSampler") {
-                    rpm 1800  // samples 1800 pieces of logs every minute for service "ImportantApp"
-                }
-            } else {
-                rateLimit("OtherSampler") {
-                    rpm 180   // samples 180 pieces of logs every minute for other services than "ImportantApp"
-                }
-            }
-        }
-    }
-}
-```
-
-#### Dropper
-
-Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to
-drop debugging logs.
-
-```groovy
-filter {
-    // ... parser
-
-    sink {
-        if (parsed.level == "DEBUG") {
-            dropper {}
-        } else {
-            sampler {
-                // ... configs
-            }
-        }
-    }
-}
-```
-
-Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.
-
-```groovy
-filter { // filter A: this is for persistence
-    // ... parser
-
-    sink {
-        sampler {
-            // .. sampler configs
-        }
-    }
-}
-filter { // filter B:
-    // ... extractors to generate many metrics
-    extractors {
-        metrics {
-            // ... metrics
-        }
-    }
-    sink {
-        dropper {} // drop all logs because they have been saved in "filter A" above.
-    }
-}
-```
-
-#### Enforcer
-
-Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have
-configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism
-has been configured.
-
-```groovy
-filter {
-    // ... parser
-
-    sink {
-        sampler {
-            // ... sampler configs
-        }
-        if (parserd.level == "ERROR" || parsed.userId == "TestingUserId") { // sample error logs or testing users' logs (userId == "TestingUserId") even if the sampling strategy is configured
-            enforcer {
-            }
-        }
-    }
-}
-```
-
-You can use `enforcer` and `dropper` to simulate a probabilistic sampler like this.
-
-```groovy
-filter {
-    // ... parser
-
-    sink {
-        sampler { // simulate a probabilistic sampler with sampler rate 30% (not accurate though)
-            if (Math.abs(Math.random()) > 0.3) {
-                enforcer {}
-            } else {
-                dropper {}
-            }
-        }
-    }
-}
-```
diff --git a/docs/en/concepts-and-designs/mal.md b/docs/en/concepts-and-designs/mal.md
deleted file mode 100644
index a9a92a9..0000000
--- a/docs/en/concepts-and-designs/mal.md
+++ /dev/null
@@ -1,251 +0,0 @@
-# Meter Analysis Language
-
-The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and 
-aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer,
-or the OC/Prometheus analyzer.
-
-## Language data type
-
-In MAL, an expression or sub-expression can evaluate to one of the following two types:
-
- - **Sample family**:  A set of samples (metrics) containing a range of metrics whose names are identical.
- - **Scalar**: A simple numeric value that supports integer/long and floating/double.
-
-## Sample family
-
-A set of samples, which acts as the basic unit in MAL. For example:
-
-```
-instance_trace_count
-```
-
-The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:
-
-```
-instance_trace_count{region="us-west",az="az-1"} 100
-instance_trace_count{region="us-east",az="az-3"} 20
-instance_trace_count{region="asia-north",az="az-1"} 33
-```
-
-### Tag filter
-
-MAL supports four type operations to filter samples in a sample family:
-
- - tagEqual: Filter tags exactly equal to the string provided.
- - tagNotEqual: Filter tags not equal to the string provided.
- - tagMatch: Filter tags that regex-match the string provided.
- - tagNotMatch: Filter labels that do not regex-match the string provided.
-
-For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:
-
-```
-instance_trace_count.tagMatch("region", "us-west|asia-north").tagEqual("az", "az-1")
-```
-### Value filter
-
-MAL supports six type operations to filter samples in a sample family by value:
-
-- valueEqual: Filter values exactly equal to the value provided.
-- valueNotEqual: Filter values equal to the value provided.
-- valueGreater: Filter values greater than the value provided.
-- valueGreaterEqual: Filter values greater than or equal to the value provided.
-- valueLess: Filter values less than the value provided.
-- valueLessEqual: Filter values less than or equal to the value provided.
-
-For example, this filters all instance_trace_count samples for values >= 33:
-
-```
-instance_trace_count.valueGreaterEqual(33)
-```
-### Tag manipulator
-MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.
-
-#### K8s
-MAL supports using the metadata of K8s to manipulate the tags and their values.
-This feature requires authorizing the OAP Server to access K8s's `API Server`.
-
-##### retagByK8sMeta
-`retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName)`. Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including
-- K8sRetagType.Pod2Service  
-
-Add a tag to the sample using `service` as the key, `$serviceName.$namespace` as the value, and according to the given value of the tag key, which represents the name of a pod.
-
-For example:
-```
-container_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2
-```
-Expression:
-```
-container_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace')
-```
-Output:
-```
-container_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2
-```
-
-### Binary operators
-
-The following binary arithmetic operators are available in MAL:
-
- - \+ (addition)
- - \- (subtraction)
- - \* (multiplication)
- - / (division)
-
-Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.
-
-Between two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:
-
-```
-1 + 2
-```
-
-Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:
-
-```
-instance_trace_count + 2
-``` 
-
-or 
-
-```
-2 + instance_trace_count
-``` 
-
-results in
-
-```
-instance_trace_count{region="us-west",az="az-1"} 102 // 100 + 2
-instance_trace_count{region="us-east",az="az-3"} 22 // 20 + 2
-instance_trace_count{region="asia-north",az="az-1"} 35 // 33 + 2
-```
-
-Between two sample families, a binary operator is applied to each sample in the sample family on the left and 
-its matching sample in the sample family on the right. A new sample family with empty name will be generated.
-Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.
-
-Another sample family `instance_trace_analysis_error_count` is 
-
-```
-instance_trace_analysis_error_count{region="us-west",az="az-1"} 20
-instance_trace_analysis_error_count{region="asia-north",az="az-1"} 11 
-```
-
-Example expression:
-
-```
-instance_trace_analysis_error_count / instance_trace_count
-```
-
-This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 
-have no match and will not show up in the result:
-
-```
-{region="us-west",az="az-1"} 0.8  // 20 / 100
-{region="asia-north",az="az-1"} 0.3333  // 11 / 33
-```
-
-### Aggregation Operation
-
-Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family,
-resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:
-
- - sum (calculate sum over dimensions)
- - min (select minimum over dimensions)
- - max (select maximum over dimensions)
- - avg (calculate the average over dimensions)
- 
-These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting `by` parameter. 
-
-```
-<aggr-op>(by: <tag1, tag2, ...>)
-```
-
-Example expression:
-
-```
-instance_trace_count.sum(by: ['az'])
-```
-
-will output the following result:
-
-```
-instance_trace_count{az="az-1"} 133 // 100 + 33
-instance_trace_count{az="az-3"} 20
-```
-
-### Function
-
-`Duraton` is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS}
- where a day is regarded as exactly 24 hours.
-
-Examples:
- - "PT20.345S" -- parses as "20.345 seconds"
- - "PT15M"     -- parses as "15 minutes" (where a minute is 60 seconds)
- - "PT10H"     -- parses as "10 hours" (where an hour is 3600 seconds)
- - "P2D"       -- parses as "2 days" (where a day is 24 hours or 86400 seconds)
- - "P2DT3H4M"  -- parses as "2 days, 3 hours and 4 minutes"
- - "P-6H3M"    -- parses as "-6 hours and +3 minutes"
- - "-P6H3M"    -- parses as "-6 hours and -3 minutes"
- - "-P-6H+3M"  -- parses as "+6 hours and -3 minutes"
-
-#### increase
-`increase(Duration)`: Calculates the increase in the time range.
-
-#### rate
-`rate(Duration)`: Calculates the per-second average rate of increase in the time range.
-
-#### irate
-`irate()`: Calculates the per-second instant rate of increase in the time range.
-
-#### tag
-`tag({allTags -> })`: Updates tags of samples. User can add, drop, rename and update tags.
-
-#### histogram
-`histogram(le: '<the tag name of le>')`: Transforms less-based histogram buckets to meter system histogram buckets. 
-`le` parameter represents the tag name of the bucket. 
-
-#### histogram_percentile
-`histogram_percentile([<p scalar>])`. Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets. 
-
-#### time
-`time()`: Returns the number of seconds since January 1, 1970 UTC.
-
-
-## Down Sampling Operation
-MAL should instruct meter-system on how to downsample for metrics. It doesn't only refer to aggregate raw samples to 
-`minute` level, but also expresses data from `minute` in higher levels, such as `hour` and `day`. 
-
-Down sampling function is called `downsampling` in MAL, and it accepts the following types:
-
- - AVG
- - SUM
- - LATEST
- - MIN (TODO)
- - MAX (TODO)
- - MEAN (TODO)
- - COUNT (TODO)
-
-The default type is `AVG`.
-
-If users want to get the latest time from `last_server_state_sync_time_in_seconds`:
-
-```
-last_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST)
-```
-
-## Metric level function
-
-They extract level relevant labels from metric labels, then informs the meter-system the level to which this metric belongs.
-
- - `servcie([svc_label1, svc_label2...])` extracts service level labels from the array argument.
- - `instance([svc_label1, svc_label2...], [ins_label1, ins_label2...])` extracts service level labels from the first array argument, 
-                                                                        extracts instance level labels from the second array argument.
- - `endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...])` extracts service level labels from the first array argument, 
-                                                                      extracts endpoint level labels from the second array argument.
- - `serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...])` DetectPoint including `DetectPoint.CLIENT` and `DetectPoint.SERVER`, 
-   extracts `sourceService` labels from the first array argument, extracts `destService` labels from the second array argument.
-
-## More Examples
-
-Please refer to [OAP Self-Observability](../../../oap-server/server-bootstrap/src/main/resources/fetcher-prom-rules/self.yaml)
diff --git a/docs/en/concepts-and-designs/manual-sdk.md b/docs/en/concepts-and-designs/manual-sdk.md
deleted file mode 100644
index cd3d037..0000000
--- a/docs/en/concepts-and-designs/manual-sdk.md
+++ /dev/null
@@ -1,10 +0,0 @@
-# Manual instrument SDK
-Our incredible community has contributed to the manual instrument SDK.
-- [Go2Sky](https://github.com/SkyAPM/go2sky). Go SDK follows the SkyWalking format.
-- [C++](https://github.com/SkyAPM/cpp2sky). C++ SDK follows the SkyWalking format. 
-
-## What are the SkyWalking format and the propagation protocols?
-See these protocols in [protocols document](../protocols/README.md).
-
-## Envoy tracer
-Envoy has its internal tracer implementation for SkyWalking. Read [SkyWalking Tracer doc](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/trace/v3/skywalking.proto.html?highlight=skywalking) and [SkyWalking tracing sandbox](https://www.envoyproxy.io/docs/envoy/latest/start/sandboxes/skywalking_tracing.html?highlight=skywalking) for more details.
diff --git a/docs/en/concepts-and-designs/meter.md b/docs/en/concepts-and-designs/meter.md
deleted file mode 100644
index 617c7f9..0000000
--- a/docs/en/concepts-and-designs/meter.md
+++ /dev/null
@@ -1,24 +0,0 @@
-# Meter System
-Meter system is another streaming calculation mode designed for metrics data. In the [OAL](oal.md), there are clear 
-[Scope Definitions](scope-definitions.md), including definitions for native objects. Meter system is focused on the data type itself,
-and provides a more flexible approach to the end user in defining the scope entity.
-
-The meter system is open to different receivers and fetchers in the backend, 
-see the [backend setup document](../setup/backend/backend-setup.md) for more details.
-
-Every metric is declared in the meter system to include the following attributes:
-1. **Metrics Name**. A globally unique name to avoid overlapping between the OAL variable names.
-1. **Function Name**. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation
-based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long.
-1. **Scope Type**. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. 
-Type values include service, instance, and endpoint, just as we have described in the Overview section.
-The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.
-
-NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.
-
-The Meter System supports the following binding functions:
-- **avg**. Calculates the avg value for every entity under the same metrics name.
-- **histogram**. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage.
-- **percentile**. See [percentile in WIKI](https://en.wikipedia.org/wiki/Percentile). Unlike the OAL, we provide
-50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in
-the (0, 100) range.
diff --git a/docs/en/concepts-and-designs/oal.md b/docs/en/concepts-and-designs/oal.md
deleted file mode 100644
index 5e580fb..0000000
--- a/docs/en/concepts-and-designs/oal.md
+++ /dev/null
@@ -1,144 +0,0 @@
-# Observability Analysis Language
-OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode. 
-
-OAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to 
-learn and use.
-
-
-Since 6.3, the OAL engine is embedded in OAP server runtime as `oal-rt`(OAL Runtime).
-OAL scripts are now found in the `/config` folder, and users could simply change and reboot the server to run them.
-However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.
-
-You can open set `SW_OAL_ENGINE_DEBUG=Y` at system env to see which classes are generated.
-
-## Grammar
-Scripts should be named `*.oal`
-```
-// Declare the metrics.
-METRICS_NAME = from(SCOPE.(* | [FIELD][,FIELD ...]))
-[.filter(FIELD OP [INT | STRING])]
-.FUNCTION([PARAM][, PARAM ...])
-
-// Disable hard code 
-disable(METRICS_NAME);
-```
-
-## Scope
-Primary **SCOPE**s are `All`, `Service`, `ServiceInstance`, `Endpoint`, `ServiceRelation`, `ServiceInstanceRelation`, and `EndpointRelation`.
-There are also some secondary scopes which belong to a primary scope. 
-
-See [Scope Definitions](scope-definitions.md), where you can find all existing Scopes and Fields.
-
-
-## Filter
-Use filter to build conditions for the value of fields by using field name and expression. 
-
-The expressions support linking by `and`, `or` and `(...)`. 
-The OPs support `==`, `!=`, `>`, `<`, `>=`, `<=`, `in [...]` ,`like %...`, `like ...%` , `like %...%` , `contain` and `not contain`, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered. 
-
-## Aggregation Function
-The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions. 
-
-Functions provided
-- `longAvg`. The avg of all input per scope entity. The input field must be a long.
-> instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();
-
-In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field `max`.
-- `doubleAvg`. The avg of all input per scope entity. The input field must be a double.
-> instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();
-
-In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field `usePercent`.
-- `percent`. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.
-> endpoint_percent = from(Endpoint.*).percent(status == true);
-
-In this case, all input represents requests of each endpoint, and the condition is `endpoint.status == true`.
-- `rate`. The rate expressed is as a fraction of 100, where the input matches with the condition.
-> browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);
-
-In this case, all input represents requests of each browser app traffic, the `numerator` condition is `trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR` and `denominator` condition is `trafficCategory == BrowserAppTrafficCategory.NORMAL`.
-Parameter (1) is the `numerator` condition.
-Parameter (2) is the `denominator` condition.
-- `count`. The sum of calls per scope entity.
-> service_calls_sum = from(Service.*).count();
-
-In this case, the number of calls of each service. 
-
-- `histogram`. See [Heatmap in WIKI](https://en.wikipedia.org/wiki/Heat_map).
-> all_heatmap = from(All.latency).histogram(100, 20);
-
-In this case, the thermodynamic heatmap of all incoming requests. 
-Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group.
-Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, ... 1901-2000ms, 2000+ms 
-
-- `apdex`. See [Apdex in WIKI](https://en.wikipedia.org/wiki/Apdex).
-> service_apdex = from(Service.latency).apdex(name, status);
-
-In this case, the apdex score of each service.
-Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder.
-Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.
-
-- `p99`, `p95`, `p90`, `p75`, `p50`. See [percentile in WIKI](https://en.wikipedia.org/wiki/Percentile).
-> all_percentile = from(All.latency).percentile(10);
-
-**percentile** is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the `getMultipleLinearIntValues` GraphQL query.
-In this case, see `p99`, `p95`, `p90`, `p75`, and `p50` of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.
-Before 7.0.0, `p99`, `p95`, `p90`, `p75`, `p50` func(s) are used to calculate metrics separately. They are still supported in 7.x, but they are no longer recommended and are not included in the current official OAL script. 
-> all_p99 = from(All.latency).p99(10);
-
-In this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.
-
-## Metrics name
-The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.
-
-## Group
-All metrics data will be grouped by Scope.ID and min-level TimeBucket. 
-
-- In the `Endpoint` scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).
-
-## Disable
-`Disable` is an advanced statement in OAL, which is only used in certain cases.
-Some of the aggregation and metrics are defined through core hard codes. Examples include `segment` and `top_n_database_statement`.
-This `disable` statement is designed to render them inactive.
-By default, none of them are disabled.
-
-**NOTICE**, all disable statements should be in `oal/disable.oal` script file. 
-
-## Examples
-```
-// Calculate p99 of both Endpoint1 and Endpoint2
-endpoint_p99 = from(Endpoint.latency).filter(name in ("Endpoint1", "Endpoint2")).summary(0.99)
-
-// Calculate p99 of Endpoint name started with `serv`
-serv_Endpoint_p99 = from(Endpoint.latency).filter(name like "serv%").summary(0.99)
-
-// Calculate the avg response time of each Endpoint
-endpoint_avg = from(Endpoint.latency).avg()
-
-// Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps.
-endpoint_percentile = from(Endpoint.latency).percentile(10)
-
-// Calculate the percent of response status is true, for each service.
-endpoint_success = from(Endpoint.*).filter(status == true).percent()
-
-// Calculate the sum of response code in [404, 500, 503], for each service.
-endpoint_abnormal = from(Endpoint.*).filter(responseCode in [404, 500, 503]).count()
-
-// Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service.
-endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count()
-
-// Calculate the sum of endpoint name in ["/v1", "/v2"], for each service.
-endpoint_url_sum = from(Endpoint.*).filter(name in ["/v1", "/v2"]).count()
-
-// Calculate the sum of calls for each service.
-endpoint_calls = from(Endpoint.*).count()
-
-// Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`.
-service_cpm_http_get = from(Service.*).filter(tags contain "http.method:GET").cpm()
-
-// Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`.
-service_cpm_http_other = from(Service.*).filter(tags not contain "http.method:GET").cpm()
-
-disable(segment);
-disable(endpoint_relation_server_side);
-disable(top_n_database_statement);
-```
diff --git a/docs/en/concepts-and-designs/overview.md b/docs/en/concepts-and-designs/overview.md
deleted file mode 100644
index 787086d..0000000
--- a/docs/en/concepts-and-designs/overview.md
+++ /dev/null
@@ -1,44 +0,0 @@
-# Overview
-SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native
-infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds.
-It is a modern APM, specially designed for cloud native, container based distributed systems.
-
-## Why use SkyWalking?
-SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all,
-like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. 
-(with calls out for Python and C++ SDK contributions). 
-In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. 
-SkyWalking's service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks
-such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.
-
-SkyWalking provides observability capabilities for **service**(s), **service instance**(s), **endpoint**(s). The terms Service,
-Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:
-
-- **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service
-  name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio.
-- **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it 
-  doesn't need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.
-- **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. 
-
-SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every 
-Service/Service Instance/Endpoint and to set alarm rules.
-
-In addition, you can integrate 
-1. Other distributed tracing using SkyWalking native agents and SDKs with Zipkin, Jaeger and OpenCensus.
-1. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.
-
-## Architecture
-SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.
-
-<img src="https://skywalking.apache.org/assets/frame-v8.jpg?u=20200423"/>
-
-- **Probe**s collect data and reformat them for SkyWalking requirements (different probes support different sources).
-- **Platform backend** supports data aggregation, analysis and streaming process covers traces, metrics, and logs.
-- **Storage** houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as
-  ElasticSearch, H2, MySQL, TiDB, InfluxDB, or implement your own. Patches for new storage implementors welcome!
-- **UI** is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.
-
-
-## What is next?
-- Learn SkyWalking's [Project Goals](project-goals.md)
-- FAQ, [Why doesn't SkyWalking involve MQ in the architecture in default?](../FAQ/why_mq_not_involved.md)
diff --git a/docs/en/concepts-and-designs/probe-introduction.md b/docs/en/concepts-and-designs/probe-introduction.md
deleted file mode 100644
index d6e9cd6..0000000
--- a/docs/en/concepts-and-designs/probe-introduction.md
+++ /dev/null
@@ -1,36 +0,0 @@
-# Probe Introduction
-In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of 
-collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.
-
-On a high level, there are three typical categories in all SkyWalking probes.
-- **Language based native agent**. These agents run in target service user spaces, such as a part of user codes. For example,
-the SkyWalking Java agent uses the `-javaagent` command line argument to manipulate codes in runtime, where `manipulate` means to change and inject
-user's codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.
- 
-- **Service Mesh probes**. Service Mesh probes collect data from sidecar, control panel in service mesh or proxy. In the old days, proxy
-is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.
- 
-- **3rd-party instrument library**. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the
-data, transfers it to SkyWalking's formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See
-[Receiver for other tracers](../setup/backend/backend-receivers.md) for more information. 
-
-You don't need to use **Language based native agent** and **Service Mesh probe** at the same time, since they both serve to collect
-metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.
-
-There are several recommended ways on how to use these probes:
-1. Use **Language based native agent** only.
-1. Use **3rd-party instrument library** only, like the Zipkin instrument ecosystem.
-1. Use **Service Mesh probe** only.
-1. Use **Service Mesh probe** with **Language based native agent** or **3rd-party instrument library** in tracing status. (Advanced usage)
-
-What is the meaning of **in tracing status**?
-
-By default, **Language based native agent** and **3rd-party instrument library** both send distributed traces to the backend,
-where analyses and aggregation on those traces are performed. **In tracing status** means that the backend considers these traces as something
-like logs. In other words, the backend saves them, and builds the links between traces and metrics, like `which endpoint and service does the trace belong?`.
-
-## What is next?
-- Learn more about the probes supported by SkyWalking in [Service auto instrument agent](service-agent.md), [Manual instrument SDK](manual-sdk.md),
-[Service Mesh probe](service-mesh-probe.md) and [Zipkin receiver](../setup/backend/backend-receivers.md#zipkin-receiver).
-- After understanding how the probe works, see the [backend overview](backend-overview.md) for more on analysis and persistence.
-
diff --git a/docs/en/concepts-and-designs/project-goals.md b/docs/en/concepts-and-designs/project-goals.md
deleted file mode 100644
index 6ba3b2f..0000000
--- a/docs/en/concepts-and-designs/project-goals.md
+++ /dev/null
@@ -1,37 +0,0 @@
-# Design Goals
-This document outlines the core design goals for the SkyWalking project.
-
-- **Maintaining Observability**. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.
-
-- **Topology, Metrics and Trace Together**. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics.
-For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see,
-they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to
-make this possible and easy understand.
-
-- **Light Weight**. There two parts of light weight are needed. (1) In probe, we just depend on network
-communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library
-conflicts and the payload of VM, such as permsize requirement in JVM.
-(2) As an observability platform, it is secondary and third level system in your project environment.
-So we are using our own light weight framework to build the backend core. Then you don't need to 
-deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.
-
-- **Pluggable**. SkyWalking core team provides many default implementations, but definitely it is not enough,
-and also don't fit every scenario. So, we provide a lot of features for being pluggable. 
-
-- **Portability**.  SkyWalking can run in multiple environments, including: 
-(1) Use traditional register center like eureka.
-(2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo.
-(3) Use Service Mesh in modern infrastructure.
-(4) Use cloud services.
-(5) Across cloud deployment. 
-SkyWalking should run well in all of these cases.
-
-- **Interoperability**. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community.
-Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, OpenTracing, and OpenCensus.
-It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.
-
-
-## What is next?
-- See [probe Introduction](probe-introduction.md) to learn about SkyWalking's probe groups.
-- From [backend overview](backend-overview.md), you can understand what the backend does after it receives probe data.
-- If you want to customize the UI, start with the [UI overview](ui-overview.md) document. 
diff --git a/docs/en/concepts-and-designs/scope-definitions.md b/docs/en/concepts-and-designs/scope-definitions.md
deleted file mode 100644
index 146368a..0000000
--- a/docs/en/concepts-and-designs/scope-definitions.md
+++ /dev/null
@@ -1,301 +0,0 @@
-# Scopes and Fields
-Using the Aggregation Function, the requests will be grouped by time and **Group Key(s)** in each scope.
-
-### SCOPE `All`
-
-| Name | Remarks | Group Key | Type | 
-|---|---|---|---|
-| name  | The service name of each request.  |   | string |
-| serviceInstanceName  | The name of the service instance ID.  |   | string |
-| endpoint  | The endpoint path of each request.  |   | string |
-| latency  | The time taken by each request. |   |  int(in ms)  |
-| status  | The success or failure of the request.  |   | bool(true for success)  |
-| ~~responseCode~~ | Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302| | int |
-| httpResponseStatusCode | The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302| | int |
-| rpcStatusCode | The string value of the rpc response code. | | string |
-| type | The type of each request, such as Database, HTTP, RPC, or gRPC. | | enum |
-| tags | The labels of each request. Each value is made up by `TagKey:TagValue` in the segment. | | `List<String>` |
-
-### SCOPE `Service`
-
-This calculates the metrics data from each request of the service. 
-
-| Name | Remarks | Group Key | Type | 
-|---|---|---|---|
-| name | The name of the service. | | string |
-| nodeType | The kind of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache. | | enum |
-| serviceInstanceName | The name of the service instance ID. | | string |
-| endpointName | The name of the endpoint, such as a full path of HTTP URI. | | string |
-| latency | The time taken by each request. | | int |
-| status | Indicates the success or failure of the request. | | bool(true for success)  |
-| ~~responseCode~~ | Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302| | int |
-| httpResponseStatusCode | The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302| | int |
-| rpcStatusCode | The string value of the rpc response code. | | string |
-| type | The type of each request. Such as: Database, HTTP, RPC, gRPC. | | enum |
-| tags | The labels of each request. Each value is made up by `TagKey:TagValue` in the segment. | | `List<String>` |
-| sideCar.internalErrorCode | The sidecar/gateway proxy internal error code. The value is based on the implementation. | | string|
-| tcpInfo.receivedBytes | The received bytes of the TCP traffic, if this request is a TCP call. | | long |
-| tcpInfo.sentBytes | The sent bytes of the TCP traffic, if this request is a TCP call. | | long |
-
-### SCOPE `ServiceInstance`
-
-This calculates the metrics data from each request of the service instance. 
-
-| Name | Remarks | Group Key | Type | 
-|---|---|---|---|
-| name |  The name of the service instance, such as `ip:port@Service Name`.  **Note**: Currently, the native agent uses `uuid@ipv4` as the instance name, which does not assist in setting up a filter in aggregation. | | string|
-| serviceName | The name of the service. | | string |
-| nodeType | The kind of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache. | | enum |
-| endpointName | The name of the endpoint, such as a full path of the HTTP URI. | | string|
-| latency | The time taken by each request. | | int |
-| status | Indicates the success or failure of the request. | | bool(true for success) |
-| ~~responseCode~~ | Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302| | int |
-| httpResponseStatusCode | The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302| | int |
-| rpcStatusCode | The string value of the rpc response code. | | string |
-| type | The type of each request, such as Database, HTTP, RPC, or gRPC. | | enum |
-| tags | The labels of each request. Each value is made up by `TagKey:TagValue` in the segment. | | `List<String>` |
-| sideCar.internalErrorCode | The sidecar/gateway proxy internal error code. The value is based on the implementation. | | string|
-| tcpInfo.receivedBytes | The received bytes of the TCP traffic, if this request is a TCP call. | | long |
-| tcpInfo.sentBytes | The sent bytes of the TCP traffic, if this request is a TCP call. | | long |
-
-#### Secondary scopes of `ServiceInstance` 
-
-This calculates the metrics data if the service instance is a JVM and collects through javaagent.
-
-1. SCOPE `ServiceInstanceJVMCPU`
-
-| Name | Remarks | Group Key | Type | 
-|---|---|---|---|
-| name |  The name of the service instance, such as `ip:port@Service Name`.  **Note**: Currently, the native agent uses `uuid@ipv4` as the instance name, which does not assist in setting up a filter in aggregation. | | string|
-| serviceName | The name of the service. | | string |
-| usePercent | The percentage of CPU time spent.| | double|
-
-2. SCOPE `ServiceInstanceJVMMemory`
-
-| Name | Remarks | Group Key | Type | 
-|---|---|---|---|
-| name |  The name of the service instance, such as `ip:port@Service Name`.  **Note**: Currently, the native agent uses `uuid@ipv4` as the instance name, which does not assist in setting up a filter in aggregation. | | string|
-| serviceName | The name of the service. | | string |
-| heapStatus | Indicates whether the metric has a heap property or not. | | bool |
-| init | See the JVM documentation. | | long |
-| max | See the JVM documentation. | | long |
-| used | See the JVM documentation. | | long |
-| committed | See the JVM documentation. | | long |
-
-3. SCOPE `ServiceInstanceJVMMemoryPool`
-
-| Name | Remarks | Group Key | Type | 
-|---|---|---|---|
-| name |  The name of the service instance, such as `ip:port@Service Name`.  **Note**: Currently, the native agent uses `uuid@ipv4` as the instance name, which does not assist in setting up a filter in aggregation. | | string|
-| serviceName | The name of the service. | | string |
-| poolType | The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM. | | enum |
-| init | See the JVM documentation. | | long |
-| max | See the JVM documentation. | | long |
-| used | See the JVM documentation. | | long |
-| committed | See the JVM documentation. | | long |
-
-4. SCOPE `ServiceInstanceJVMGC`
-
-| Name | Remarks | Group Key | Type | 
-|---|---|---|---|
-| name |  The name of the service instance, such as `ip:port@Service Name`.  **Note**: Currently, the native agent uses `uuid@ipv4` as the instance name, which does not assist in setting up a filter in aggregation. | | string|
-| serviceName | The name of the service. | | string |
-| phrase | Includes both NEW and OLD. | | Enum |
-| time | The time spent in GC. | | long |
-| count | The count in GC operations. | | long |
-
-5. SCOPE `ServiceInstanceJVMThread`
-
-| Name | Remarks | Group Key | Type | 
-|---|---|---|---|
-| name |  The name of the service instance, such as `ip:port@Service Name`.  **Note**: Currently, the native agent uses `uuid@ipv4` as the instance name, which does not assist in setting up a filter in aggregation. | | string|
-| serviceName | The name of the service. | | string |
-| liveCount | The current number of live threads. | | long |
-| daemonCount | The current number of daemon threads. | | long |
-| peakCount | The current number of peak threads. | | long |
-| runnableStateThreadCount | The current number of threads in runnable state. | | long |
-| blockedStateThreadCount | The current number of threads in blocked state. | | long |
-| waitingStateThreadCount | The current number of threads in waiting state. | | long |
-| timedWaitingStateThreadCount | The current number of threads in time-waiting state. | | long |
-
-6. SCOPE `ServiceInstanceJVMClass`
-
-| Name | Remarks | Group Key | Type | 
-|---|---|---|---|
-| name |  The name of the service instance, such as `ip:port@Service Name`.  **Note**: Currently, the native agent uses `uuid@ipv4` as the instance name, which does not assist in setting up a filter in aggregation. | | string|
-| serviceName | The name of the service. | | string |
-| loadedClassCount | The number of classes that are currently loaded in the JVM. | | long |
-| totalUnloadedClassCount | The total number of classes unloaded since the JVM has started execution. | | long |
-| totalLoadedClassCount | The total number of classes that have been loaded since the JVM has started execution. | | long |
-
-### SCOPE `Endpoint`
-
-This calculates the metrics data from each request of the endpoint in the service. 
-
-| Name | Remarks | Group Key | Type | 
-|---|---|---|---|
-| name | The name of the endpoint, such as a full path of the HTTP URI. | | string |
-| serviceName | The name of the service. | | string |
-| serviceNodeType | The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache. | | enum |
-| serviceInstanceName | The name of the service instance ID. | | string |
-| latency | The time taken by each request. | | int |
-| status | Indicates the success or failure of the request.| | bool(true for success) |
-| ~~responseCode~~ | Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302| | int |
-| httpResponseStatusCode | The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302| | int |
-| rpcStatusCode | The string value of the rpc response code. | | string |
-| type | The type of each request, such as Database, HTTP, RPC, or gRPC. | | enum |
-| tags | The labels of each request. Each value is made up by `TagKey:TagValue` in the segment. | | `List<String>` |
-| sideCar.internalErrorCode | The sidecar/gateway proxy internal error code. The value is based on the implementation. | | string|
-| tcpInfo.receivedBytes | The received bytes of the TCP traffic, if this request is a TCP call. | | long |
-| tcpInfo.sentBytes | The sent bytes of the TCP traffic, if this request is a TCP call. | | long |
-
-### SCOPE `ServiceRelation`
-
-This calculates the metrics data from each request between services.
-
-| Name | Remarks | Group Key | Type | 
-|---|---|---|---|
-| sourceServiceName | The name of the source service. | | string |
-| sourceServiceNodeType | The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache. | | enum |
-| sourceServiceInstanceName | The name of the source service instance. | | string |
-| destServiceName | The name of the destination service. | | string |
-| destServiceNodeType | The type of node of to which the Service or Network address belongs. | | enum |
-| destServiceInstanceName | The name of the destination service instance.| | string|
-| endpoint | The endpoint used in this call. | | string
-| componentId | The ID of component used in this call. | yes | string
-| latency | The time taken by each request. | | int |
-| status | Indicates the success or failure of the request.| | bool(true for success) |
-| ~~responseCode~~ | Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302| | int |
-| httpResponseStatusCode | The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302| | int |
-| rpcStatusCode | The string value of the rpc response code. | | string |
-| type | The type of each request, such as Database, HTTP, RPC, or gRPC. | | enum |
-| detectPoint | Where the relation is detected. The value may be client, server, or proxy. | yes | enum|
-| tlsMode | The TLS mode between source and destination services, such as `service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == "mTLS").cpm()` || string|
-| sideCar.internalErrorCode | The sidecar/gateway proxy internal error code. The value is based on the implementation. | | string|
-| tcpInfo.receivedBytes | The received bytes of the TCP traffic, if this request is a TCP call. | | long |
-| tcpInfo.sentBytes | The sent bytes of the TCP traffic, if this request is a TCP call. | | long |
-
-### SCOPE `ServiceInstanceRelation`
-
-This calculates the metrics data from each request between service instances.
-
-| Name | Remarks | Group Key | Type | 
-|---|---|---|---|
-| sourceServiceName | The name of the source service. | | string |
-| sourceServiceNodeType | The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache. | | enum |
-| sourceServiceInstanceName | The name of the source service instance. | | string |
-| destServiceName | The name of the destination service. | | |
-| destServiceNodeType | The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache. | | string |
-| destServiceInstanceName | The name of the destination service instance. | | string |
-| endpoint | The endpoint used in this call. | | string
-| componentId | The ID of the component used in this call. | yes | string
-| latency | The time taken by each request. | | int |
-| status | Indicates the success or failure of the request.| | bool(true for success) |
-| ~~responseCode~~ | Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302| | int |
-| httpResponseStatusCode | The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302| | int |
-| rpcStatusCode | The string value of the rpc response code. | | string |
-| type | The type of each request, such as Database, HTTP, RPC, or gRPC. | | enum |
-| detectPoint | Where the relation is detected. The value may be client, server, or proxy. | yes | enum|
-| tlsMode | The TLS mode between source and destination service instances, such as `service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == "mTLS").cpm()` || string|
-| sideCar.internalErrorCode | The sidecar/gateway proxy internal error code. The value is based on the implementation. | | string|
-| tcpInfo.receivedBytes | The received bytes of the TCP traffic, if this request is a TCP call. | | long |
-| tcpInfo.sentBytes | The sent bytes of the TCP traffic, if this request is a TCP call. | | long |
-
-### SCOPE `EndpointRelation`
-
-This calculates the metrics data of the dependency between endpoints. 
-This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. 
-Therefore, the `EndpointRelation` scope aggregation comes into effect only in services under tracing by SkyWalking native agents, 
-including auto instrument agents (like Java and .NET), OpenCensus SkyWalking exporter implementation, or other tracing context propagation in SkyWalking specification.
-
-| Name | Remarks | Group Key | Type | 
-|---|---|---|---|
-| endpoint | The parent endpoint in the dependency.| | string|
-| serviceName | The name of the service. | | string |
-| serviceNodeType | The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache. | | enum |
-| childEndpoint| The endpoint used by the parent endpoint in row(1). | | string |
-| childServiceName | The endpoint used by the parent service in row(1). | | string |
-| childServiceNodeType | The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache. | | string |
-| childServiceInstanceName | The endpoint used by the parent service instance in row(1). | | string |
-| rpcLatency | The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.
-| componentId | The ID of the component used in this call. | yes | string
-| status | Indicates the success or failure of the request.| | bool(true for success) |
-| ~~responseCode~~ | Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302| | int |
-| httpResponseStatusCode | The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302| | int |
-| rpcStatusCode | The string value of the rpc response code. | | string |
-| type | The type of each request, such as Database, HTTP, RPC, or gRPC. | | enum |
-| detectPoint | Indicates where the relation is detected. The value may be client, server, or proxy. | yes | enum|
-
-
-### SCOPE `BrowserAppTraffic`
-
-This calculates the metrics data from each request of the browser application (browser only).
-
-| Name | Remarks | Group Key | Type | 
-|---|---|---|---|
-| name | The browser application name of each request. |  | string |
-| count | The number of request, which is fixed at 1. |  | int |
-| trafficCategory | The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR. | | enum |
-| errorCategory | The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN. | | enum |
-
-### SCOPE `BrowserAppSingleVersionTraffic`
-
-This calculates the metrics data from each request of a single version in the browser application (browser only).
-
-| Name | Remarks | Group Key | Type | 
-|---|---|---|---|
-| name | The single version name of each request. |  | string |
-| serviceName | The name of the browser application. | | string |
-| count | The number of request, which is fixed at 1. |  | int |
-| trafficCategory | The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR. | | enum |
-| errorCategory | The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN. | | enum |
-
-### SCOPE `BrowserAppPageTraffic`
-
-This calculates the metrics data from each request of the page in the browser application (browser only).
-
-| Name | Remarks | Group Key | Type | 
-|---|---|---|---|
-| name | The page name of each request. |  | string |
-| serviceName | The name of the browser application. | | string |
-| count | The number of request, which is fixed at 1. |  | int |
-| trafficCategory | The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR. | | enum |
-| errorCategory | The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN. | | enum |
-
-
-### SCOPE `BrowserAppPagePerf`
-
-This calculates the metrics data from each request of the page in the browser application (browser only).
-
-| Name | Remarks | Group Key | Type | 
-|---|---|---|---|
-| name | The page name of each request. |  | string |
-| serviceName | The name of the browser application. | | string |
-| redirectTime | The time taken to redirect. |  | int(in ms) |
-| dnsTime | The DNS query time. | | int(in ms) |
-| ttfbTime | Time to first byte. | | int(in ms) |
-| tcpTime | TCP connection time. | | int(in ms) |
-| transTime | Content transfer time.  | | int(in ms) |
-| domAnalysisTime | Dom parsing time. | | int(in ms) |
-| fptTime | First paint time or blank screen time. | | int(in ms) |
-| domReadyTime | Dom ready time. | | int(in ms) |
-| loadPageTime | Page full load time. | | int(in ms) |
-| resTime | Synchronous load resources in the page. | | int(in ms) |
-| sslTime | Only valid for HTTPS. | | int(in ms) |
-| ttlTime | Time to interact. | | int(in ms) |
-| firstPackTime | First pack time. | | int(in ms) |
-| fmpTime | First Meaningful Paint. | | int(in ms) |
-
-### SCOPE `Event`
-
-This calculates the metrics data from [events](event.md).
-
-| Name | Remarks | Group Key | Type | 
-|---|---|---|---|
-| name | The name of the event. |  | string |
-| service | The service name to which the event belongs to. | | string |
-| serviceInstance | The service instance to which the event belongs to, if any. | | string|
-| endpoint | The service endpoint to which the event belongs to, if any. | | string|
-| type | The type of the event, `Normal` or `Error`. | | string|
-| message | The message of the event. | | string |
-| parameters | The parameters in the `message`, see [parameters](event.md#parameters). | | string |
diff --git a/docs/en/concepts-and-designs/service-agent.md b/docs/en/concepts-and-designs/service-agent.md
deleted file mode 100644
index 594c10a..0000000
--- a/docs/en/concepts-and-designs/service-agent.md
+++ /dev/null
@@ -1,33 +0,0 @@
-# Service Auto Instrument Agent
-The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on
-some language-specific features, especially those of a VM-based language. 
-
-## What does Auto Instrument mean?
-Many users learned about these agents when they first heard that "Not a single line of code has to be changed". SkyWalking used to mention this in its readme page as well.
-However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases.
-But it is important to understand that the codes are in fact still modified by the agent, which is usually known as "runtime code manipulation". The underlying logic is that the
-auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through 
-`javaagent premain`.
-
-In fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than
-runtime.
-
-## What are the limitations?
-Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:
-
-- **Higher possibility of in-process propagation in many cases**. Many high-level languages, such as Java and .NET, are used for building business systems. 
- Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.
-
-- **Only works in certain frameworks or libraries**. Since the agents are responsible for modifying the codes during runtime, the codes are already known 
-to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes.
-For example, see the [SkyWalking Java agent supported list](../setup/service-agent/java-agent/Supported-list.md).
-
-- **Cross-thread operations are not always supported**. Like what is mentioned above regarding in-process propagation, most codes (especially business codes)
-run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like `Goroutine`, which allows developers to run async process with low payload. In such cases, auto instrument will face problems. 
-
-So, there's nothing mysterious about auto instrument. In short, agent developers write an activation script to make 
-instrument codes work for you. That's it! 
-
-## What is next?
-If you want to learn about manual instrument libs in SkyWalking, see the [Manual instrument SDK](manual-sdk.md) section.
-
diff --git a/docs/en/concepts-and-designs/service-mesh-probe.md b/docs/en/concepts-and-designs/service-mesh-probe.md
deleted file mode 100644
index 423fb43..0000000
--- a/docs/en/concepts-and-designs/service-mesh-probe.md
+++ /dev/null
@@ -1,28 +0,0 @@
-# Service Mesh Probe
-Service Mesh probes use the extendable mechanism provided in the Service Mesh implementor, like Istio.
-
-## What is Service Mesh?
-The following explanation comes from Istio's documentation.
-> The term "service mesh" is often used to describe the networks of microservices that make up such applications and the interactions between them.
-As a service mesh grows in size and complexity, it can become harder to understand and manage.
-Its requirements can include discovery, load balancing, failure recovery, metrics, and monitoring, and often more complex operational requirements
-such as A/B testing, canary releases, rate limiting, access control, and end-to-end authentication.
-
-## Where does the probe collect data from?
-Istio is a typical Service Mesh design and implementor. It defines **Control Panel** and **Data Panel**,
-which are widely used. Here is the Istio Architecture:
-
-![Istio Architecture](https://istio.io/latest/docs/ops/deployment/architecture/arch.svg)
-
-The Service Mesh probe can choose to collect data from **Data Panel**. In Istio, it means collecting telemetry data from 
-Envoy sidecar (Data Panel). The probe collects two telemetry entities from the client end and the server end per request.
-
-## How does Service Mesh make backend work?
-In this kind of probes, you can see that there is no trace related to them. So how does the SkyWalking
-platform manage to work?
-
-The Service Mesh probe collects telemetry data from each request, so they know about information such as the source, destination,
-endpoint, latency and status. From these information, the backend can tell the whole topology map by combining these calls
-into lines, as well as the metrics of each node through their incoming requests. The backend requests for the same
-metrics data by parsing the trace data. In short:
-**The Service Mesh metrics work exactly the same way as the metrics that are generated by trace parsers.**
diff --git a/docs/en/concepts-and-designs/ui-overview.md b/docs/en/concepts-and-designs/ui-overview.md
deleted file mode 100644
index af898d8..0000000
--- a/docs/en/concepts-and-designs/ui-overview.md
+++ /dev/null
@@ -1,10 +0,0 @@
-# Visualization
-The SkyWalking native UI provides a default solution for visualization.
-It provides observability related graphs
-on overview, service, service instance, endpoint, trace, and alarm, 
-such as topology maps, dependency graphs, heatmaps, etc.
-
-We know that many of our users have integrated SkyWalking
-into their own products. 
-If you would like to do that too, please refer to the [SkyWalking query protocol](../protocols/README.md#query-protocol).
- 
diff --git a/docs/en/guides/Component-library-settings.md b/docs/en/guides/Component-library-settings.md
deleted file mode 100644
index 649dd1d..0000000
--- a/docs/en/guides/Component-library-settings.md
+++ /dev/null
@@ -1,67 +0,0 @@
-# Component library settings
-Component library settings are about your own or third-party libraries used in the monitored application.
-
-In agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector
-formats data in ID for better performance and less storage requirements. 
-
-Also, the collector conjectures the remote service based on the component library. For example: if
-the component library is MySQL Driver library, then the remote service should be MySQL Server. 
-
-For these two reasons, the collector requires two parts of settings in this file:
-1. Component library ID, names and languages.
-1. Remote server mapping based on the local library.
-
-**All component names and IDs must be defined in this file.**
-
-## Component Library ID
-Define all names and IDs from component libraries which are used in the monitored application.
-This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.
-
-- Name: the component name used in agent and UI
-- ID: Unique ID. All IDs are reserved once they are released.
-- Languages: Program languages may use this component. Multi languages should be separated by `,`.
-
-### ID rules
-- Java and multi languages shared: (0, 3000)
-- .NET Platform reserved: [3000, 4000)
-- Node.js Platform reserved: [4000, 5000)
-- Go reserved: [5000, 6000)
-- Lua reserved: [6000, 7000)
-- Python reserved: [7000, 8000)
-- PHP reserved: [8000, 9000)
-- C++ reserved: [9000, 10000)
-
-Example:
-```yaml
-Tomcat:
-  id: 1
-  languages: Java
-HttpClient:
-  id: 2
-  languages: Java,C#,Node.js
-Dubbo:
-  id: 3
-  languages: Java
-H2:
-  id: 4
-  languages: Java
-```
-
-## Remote server mapping
-The remote server will be conjectured by the local component. The mappings are based on names in the component library.
-
-- Key: client component library name
-- Value: server component name
-
-```yaml
-Component-Server-Mappings:
-  Jedis: Redis
-  StackExchange.Redis: Redis
-  Redisson: Redis
-  Lettuce: Redis
-  Zookeeper: Zookeeper
-  SqlClient: SqlServer
-  Npgsql: PostgreSQL
-  MySqlConnector: Mysql
-  EntityFrameworkCore.InMemory: InMemoryDatabase
-```
diff --git a/docs/en/guides/E2E-local-remote-debug.md b/docs/en/guides/E2E-local-remote-debug.md
deleted file mode 100644
index 1342b55..0000000
--- a/docs/en/guides/E2E-local-remote-debug.md
+++ /dev/null
@@ -1,28 +0,0 @@
-# Using E2E local remote debugging
-The E2E remote debugging port of service containers is `5005`. If the developer wants to use remote debugging, he needs to add remote debugging parameters to the start service command, and then expose the port `5005`. 
-
-For example, this is the configuration of a container in [skywalking/test/e2e/e2e-test/docker/base-compose.yml](https://github.com/apache/skywalking/blob/master/test/e2e/e2e-test/docker/base-compose.yml). [JAVA_OPTS](https://github.com/apache/skywalking/blob/190ca93b6bf48e9d966de5b05cd6490ba54b7266/docker/oap/docker-entrypoint.sh) is a preset variable for passing additional parameters in the AOP service startup command, so we only need to add the JAVA remote debugging parameters `agentli [...]
-```yml
-oap:
-    image: skywalking/oap:latest
-    expose:
-      ...
-      - 5005
-    ...
-    environment:
-      ...
-      JAVA_OPTS: >-
-        ...
-        -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005
-    ...
-```
-At last, if the E2E test fails and is retrying, the developer may get the ports mapping in the file `skywalking/test/e2e/e2e-test/remote_real_port` and select the host port of the corresponding service for remote debugging. For example,
-```bash
-#remote_real_port
-
-#The remote debugging port on the host is 32783
-oap-localhost:32783 
-
-#The remote debugging port on the host is 32782
-provider-localhost:32782 
-```
diff --git a/docs/en/guides/How-to-build.md b/docs/en/guides/How-to-build.md
deleted file mode 100644
index 1667665..0000000
--- a/docs/en/guides/How-to-build.md
+++ /dev/null
@@ -1,94 +0,0 @@
-# How to build a project
-This document will help you compile and build a project in your maven and set your IDE.
-
-## Building the Project
-**Since we are using Git submodule, we do not recommend using the `GitHub` tag or release page to download source codes for compiling.**
-
-### Maven behind the Proxy
-If you need to execute build behind the proxy, edit the *.mvn/jvm.config* and set the follow properties:
-```properties
--Dhttp.proxyHost=proxy_ip
--Dhttp.proxyPort=proxy_port
--Dhttps.proxyHost=proxy_ip
--Dhttps.proxyPort=proxy_port 
--Dhttp.proxyUser=username
--Dhttp.proxyPassword=password
-```
-
-### Building from GitHub
-1. Prepare git, JDK8+, and Maven 3.6+.
-1. Clone the project.
-
-    If you want to build a release from source codes, set a `tag name` by using `git clone -b [tag_name] ...` while cloning.
-    
-    ```bash
-    git clone --recurse-submodules https://github.com/apache/skywalking.git
-    cd skywalking/
-    
-    OR
-    
-    git clone https://github.com/apache/skywalking.git
-    cd skywalking/
-    git submodule init
-    git submodule update
-    ```
-   
-1. Run `./mvnw clean package -DskipTests`
-1. All packages are in `/dist` (.tar.gz for Linux and .zip for Windows).
-
-### Building from Apache source code release
-- What is the `Apache source code release`?
-
-For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from [SkyWalking Apache download page](http://skywalking.apache.org/downloads/). There is no requirement related to git when compiling this. Just follow these steps.
-
-1. Prepare JDK8+ and Maven 3.6+.
-1. Run `./mvnw clean package -DskipTests`.
-1. All packages are in `/dist`.(.tar.gz for Linux and .zip for Windows).
-
-### Advanced compiling
-SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual.
-If you just want to recompile part of the project, you have the following options:
-- Compile agent and package
->  ./mvnw package -Pagent,dist
-
-or
-
-> make build.agent
-
-If you intend to compile a single plugin, such as one in the dev stage, you could
->  cd plugin_module_dir & mvn clean package
-
-- Compile backend and package
->  ./mvnw package -Pbackend,dist
-
-or
-
-> make build.backend
-
-- Compile UI and package
->  ./mvnw package -Pui,dist
-
-or
-
-> make build.ui
-
-
-### Building docker images
-You can build docker images of `backend` and `ui` with `Makefile` located in root folder.
-
-Refer to [Build docker image](../../../docker) for more details.
-
-## Setting up your IntelliJ IDEA
-**NOTE**: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section **[Build from GitHub](#build-from-github)**. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section **[Build from Apache source code release](#build-from-apache-source-code-release)**.
-
-1. Import the project as a maven project.
-1. Run `./mvnw compile -Dmaven.test.skip=true` to compile project and generate source codes. The reason is that we use gRPC and protobuf.
-1. Set **Generated Source Codes** folders.
-    * `grpc-java` and `java` folders in **apm-protocol/apm-network/target/generated-sources/protobuf**
-    * `grpc-java` and `java` folders in **oap-server/server-core/target/generated-sources/protobuf**
-    * `grpc-java` and `java` folders in **oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs**
-    * `grpc-java` and `java` folders in **oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf**
-    * `grpc-java` and `java` folders in **oap-server/exporter/target/generated-sources/protobuf**
-    * `grpc-java` and `java` folders in **oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf**
-    * `grpc-java` and `java` folders in **oap-server/server-alarm-plugin/target/generated-sources/protobuf**
-    * `antlr4` folder in **oap-server/oal-grammar/target/generated-sources**
diff --git a/docs/en/guides/How-to-release.md b/docs/en/guides/How-to-release.md
deleted file mode 100644
index 5bd71f5..0000000
--- a/docs/en/guides/How-to-release.md
+++ /dev/null
@@ -1,288 +0,0 @@
-Apache SkyWalking release guide
---------------------
-If you're a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.
-
-
-## Set up your development environment
-Follow the steps in the [Apache maven deployment environment document](http://www.apache.org/dev/publishing-maven-artifacts.html#dev-env)
-to set gpg tool and encrypt passwords.
-
-Use the following block as a template and place it in `~/.m2/settings.xml`.
-
-```
-<settings>
-...
-  <servers>
-    <!-- To publish a snapshot of some part of Maven -->
-    <server>
-      <id>apache.snapshots.https</id>
-      <username> <!-- YOUR APACHE LDAP USERNAME --> </username>
-      <password> <!-- YOUR APACHE LDAP PASSWORD (encrypted) --> </password>
-    </server>
-    <!-- To stage a release of some part of Maven -->
-    <server>
-      <id>apache.releases.https</id>
-      <username> <!-- YOUR APACHE LDAP USERNAME --> </username>
-      <password> <!-- YOUR APACHE LDAP PASSWORD (encrypted) --> </password>
-    </server>
-   ...
-  </servers>
-</settings>
-```
-
-## Add your GPG public key
-1. Add your GPG public key into the [SkyWalking GPG KEYS](https://dist.apache.org/repos/dist/release/skywalking/KEYS) file.
-If you are a committer, use your Apache ID and password to log in this svn, and update the file. **Don't override the existing file.**
-1. Upload your GPG public key to the public GPG site, such as [MIT's site](http://pgp.mit.edu:11371/). This site should be in the
-Apache maven staging repository checklist.
-
-## Test your settings
-This step is only for testing purpose. If your env is correctly set, you don't need to check every time.
-```
-./mvnw clean install -Pall (this will build artifacts, sources and sign)
-```
-
-## Prepare for the release
-```
-./mvnw release:clean
-./mvnw release:prepare -DautoVersionSubmodules=true -Pall
-```
-
-- Set version number as x.y.z, and tag as **v**x.y.z (The version tag must start with **v**. You will find out why this is necessary in the next step.)
-
-_You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn't provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run `gpg --sign xxx` in any file. This will allow it to remember the password for long enough to prepare for the release._ 
-
-## Stage the release 
-```
-./mvnw release:perform -DskipTests -Pall
-```
-
-- The release will be automatically inserted into a temporary staging repository.
-
-## Build and sign the source code package
-```shell
-export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha)
-cd tools/releasing
-bash create_source_release.sh
-```
-
-This script takes care of the following things:
-1. Use `v` + `RELEASE_VERSION` as tag to clone the codes.
-1. Complete `git submodule init/update`.
-1. Exclude all unnecessary files in the target source tar, such as `.git`, `.github`, and `.gitmodules`. See the script for more details.
-1. Execute `gpg` and `shasum 512`. 
-
-
-`apache-skywalking-apm-x.y.z-src.tgz` and files ending with `.asc` and `.sha512` may be found in the `tools/releasing` folder.
-
-## Locate and download the distribution package in Apache Nexus Staging repositories
-1. Use your Apache ID to log in to `https://repository.apache.org/`.
-1. Go to `https://repository.apache.org/#stagingRepositories`.
-1. Search `skywalking` and find your staging repository.
-1. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the [set PGP document](#add-your-gpg-public-key),
-if you haven't done it before.
-1. Go to `{REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z`.
-1. Download `.tar.gz` and `.zip` and files ending with `.asc` and `.sha1`.
-
-
-## Upload to Apache svn
-1. Use your Apache ID to log in to `https://dist.apache.org/repos/dist/dev/skywalking/`.
-1. Create a folder and name it by the release version and round, such as: `x.y.z`
-1. Upload the source code package to the folder with files ending with `.asc` and `.sha512`.
-    * Package name: `apache-skywalking-x.y.z-src.tar.gz`
-    * See Section "Build and sign the source code package" for more details 
-1. Upload the distribution package to the folder with files ending with `.asc` and `.sha512`.
-    * Package name:  `apache-skywalking-bin-x.y.z.tar.gz` and `apache-skywalking-bin-x.y.z.zip`
-    * See Section "Locate and download the distribution package in Apache Nexus Staging repositories" for more details.
-    * Create a `.sha512` package: `shasum -a 512 file > file.sha512`
-
-## Make the internal announcements
-Send an announcement mail in dev mail list.
-
-```
-Mail title: [ANNOUNCE] SkyWalking x.y.z test build available
-
-Mail content:
-The test build of x.y.z is available.
-
-We welcome any comments you may have, and will take all feedback into
-account if a quality vote is called for this build.
-
-Release notes:
-
- * https://github.com/apache/skywalking/blob/master/changes/changes-x.y.z.md
-
-Release Candidate:
-
- * https://dist.apache.org/repos/dist/dev/skywalking/xxxx
- * sha512 checksums
-   - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz
-   - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz
-   - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.zip
-
-Maven 2 staging repository:
-
- * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/
-
-Release Tag :
-
- * (Git Tag) x.y.z
-
-Release CommitID :
-
- * https://github.com/apache/skywalking/tree/(Git Commit ID)
- * Git submodule
-   * skywalking-ui: https://github.com/apache/skywalking-rocketbot-ui/tree/(Git Commit ID)
-   * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID)
-   * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID)
-
-Keys to verify the Release Candidate :
-
- * https://dist.apache.org/repos/dist/release/skywalking/KEYS
-
-Guide to build the release from source :
-
- * https://github.com/apache/skywalking/blob/x.y.z/docs/en/guides/How-to-build.md
-
-A vote regarding the quality of this test build will be initiated
-within the next couple of days.
-```
-
-## Wait for at least 48 hours for test responses
-Any PMC member, committer or contributor can test the release features and provide feedback.
-Based on that, the PMC will decide whether to start the voting process.
-
-## Call a vote in dev
-Call a vote in `dev@skywalking.apache.org`
-
-```
-Mail title: [VOTE] Release Apache SkyWalking version x.y.z
-
-Mail content:
-Hi All,
-This is a call for vote to release Apache SkyWalking version x.y.z.
-
-Release notes:
-
- * https://github.com/apache/skywalking/blob/master/changes/changes-x.y.z.md
-
-Release Candidate:
-
- * https://dist.apache.org/repos/dist/dev/skywalking/xxxx
- * sha512 checksums
-   - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz
-   - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz
-   - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.zip
-
-Maven 2 staging repository:
-
- * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/
-
-Release Tag :
-
- * (Git Tag) x.y.z
-
-Release CommitID :
-
- * https://github.com/apache/skywalking/tree/(Git Commit ID)
- * Git submodule
-   * skywalking-ui: https://github.com/apache/skywalking-rocketbot-ui/tree/(Git Commit ID)
-   * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID)
-   * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID)
-
-Keys to verify the Release Candidate :
-
- * https://dist.apache.org/repos/dist/release/skywalking/KEYS
-
-Guide to build the release from source :
-
- * https://github.com/apache/skywalking/blob/x.y.z/docs/en/guides/How-to-build.md
-
-Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote.
-[ ] +1 Release this package.
-[ ] +0 No opinion.
-[ ] -1 Do not release this package because....
-```
-
-## Vote Check
-All PMC members and committers should check these before casting +1 votes.
-
-1. Features test.
-1. All artifacts in staging repository are published with `.asc`, `.md5`, and `*sha1` files.
-1. Source code and distribution package (`apache-skywalking-x.y.z-src.tar.gz`, `apache-skywalking-bin-x.y.z.tar.gz`, `apache-skywalking-bin-x.y.z.zip`)
-are found in `https://dist.apache.org/repos/dist/dev/skywalking/x.y.z` with `.asc` and `.sha512`.
-1. `LICENSE` and `NOTICE` are in the source code and distribution package.
-1. Check `shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512`.
-1. Check `gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz`
-1. Build a distribution package from the source code package (`apache-skywalking-x.y.z-src.tar.gz`) by following this [doc](https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-build.md#build-from-apache-source-code-release).
-1. Check the Apache License Header. Run `docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check`. (No binaries in source codes)
-
-
-The voting process is as follows:
-1. All PMC member votes are +1 binding, and all other votes are +1 but non-binding.
-1. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.
-
-
-## Publish the release
-1. Move source codes tar and distribution packages to `https://dist.apache.org/repos/dist/release/skywalking/`.
-```
-> export SVN_EDITOR=vim
-> svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking
-....
-enter your apache password
-....
-
-```
-2. Release in the nexus staging repo.
-3. Public download source and distribution tar/zip are located in `http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx`.
-The Apache mirror path is the only release information that we publish.
-4. Public asc and sha512 are located in `https://www.apache.org/dist/skywalking/x.y.z/xxx`.
-5. Public KEYS point to  `https://www.apache.org/dist/skywalking/KEYS`.
-6. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document
-links. The links can be found following rules (3) to (6) above.
-7. Add a release event on the website homepage and event page. Announce the public release with changelog or key features.
-8. Send ANNOUNCE email to `dev@skywalking.apache.org`, `announce@apache.org`. The sender should use the Apache email account.
-```
-Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released
-
-Mail content:
-Hi all,
-
-Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z.
-
-SkyWalking: APM (application performance monitor) tool for distributed systems,
-especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures.
-
-This release contains a number of new features, bug fixes and improvements compared to
-version a.b.c(last release). The notable changes since x.y.z include:
-
-(Highlight key changes)
-1. ...
-2. ...
-3. ...
-
-Please refer to the change log for the complete list of changes:
-https://github.com/apache/skywalking/blob/master/changes/changes-x.y.z.md
-
-Apache SkyWalking website:
-http://skywalking.apache.org/
-
-Downloads:
-http://skywalking.apache.org/downloads/
-
-Twitter:
-https://twitter.com/ASFSkyWalking
-
-SkyWalking Resources:
-- GitHub: https://github.com/apache/skywalking
-- Issue: https://github.com/apache/skywalking/issues
-- Mailing list: dev@skywalkiing.apache.org
-
-
-- Apache SkyWalking Team
-```
-
-## Clean up the old releases
-Once the latest release has been published, you should clean up the old releases from the mirror system.
-1. Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking).
-2. Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.
diff --git a/docs/en/guides/Java-Plugin-Development-Guide.md b/docs/en/guides/Java-Plugin-Development-Guide.md
deleted file mode 100644
index 7fca22b..0000000
--- a/docs/en/guides/Java-Plugin-Development-Guide.md
+++ /dev/null
@@ -1,564 +0,0 @@
-# Plugin Development Guide
-This document describes how to understand, develop and contribute a plugin. 
-
-There are 2 kinds of plugin:
-1. [Tracing plugin](#tracing-plugin). Follow the distributed tracing concept to collect spans with tags and logs.
-1. [Meter plugin](#meter-plugin). Collect numeric metrics in Counter, Gauge, and Histogram formats.
-
-We also provide the [plugin test tool](#plugin-test-tool) to verify the data collected and reported by the plugin. If you plan to contribute any plugin to our main repo, the data would be verified by this tool too.
-
-# Tracing plugin
-## Concepts
-### Span
-The span is an important and recognized concept in the distributed tracing system. Learn about the **span** from the
-[Google Dapper Paper](https://research.google.com/pubs/pub36356.html)  and
-[OpenTracing](http://opentracing.io)
-
-SkyWalking has supported OpenTracing and OpenTracing-Java API since 2017. Our concepts of the span are similar to that of the Google Dapper Paper and OpenTracing. We have also extended the span.
-
-There are three types of span:
-
-1.1 EntrySpan
-The EntrySpan represents a service provider. It is also an endpoint on the server end. As an APM system, our target is the 
-application servers. Therefore, almost all the services and MQ-consumers are EntrySpan.
-
-1.2 LocalSpan
-The LocalSpan represents a normal Java method that does not concern remote services. It is neither a MQ producer/consumer
-nor a service (e.g. HTTP service) provider/consumer.
-
-1.3 ExitSpan
-The ExitSpan represents a client of service or MQ-producer. It is named the `LeafSpan` in the early versions of SkyWalking.
-For example, accessing DB through JDBC and reading Redis/Memcached are classified as an ExitSpan. 
-
-### ContextCarrier
-In order to implement distributed tracing, cross-process tracing has to be bound, and the context must propagate 
-across the process. This is where the ContextCarrier comes in.
-
-Here are the steps on how to use the **ContextCarrier** in an `A->B` distributed call.
-1. Create a new and empty `ContextCarrier` on the client end.
-1. Create an ExitSpan by `ContextManager#createExitSpan` or use `ContextManager#inject` to initalize the `ContextCarrier`.
-1. Place all items of `ContextCarrier` into heads (e.g. HTTP HEAD), attachments (e.g. Dubbo RPC framework) or messages (e.g. Kafka).
-1. The `ContextCarrier` propagates to the server end through the service call.
-1. On the server end, obtain all items from the heads, attachments or messages.
-1. Create an EntrySpan by `ContextManager#createEntrySpan` or use `ContextManager#extract` to bind the client and server ends.
-
-
-See the following examples, where we use the Apache HTTPComponent client plugin and Tomcat 7 server plugin:
-1. Using the Apache HTTPComponent client plugin on the client end
-```java
-            span = ContextManager.createExitSpan("/span/operation/name", contextCarrier, "ip:port");
-            CarrierItem next = contextCarrier.items();
-            while (next.hasNext()) {
-                next = next.next();
-                httpRequest.setHeader(next.getHeadKey(), next.getHeadValue());
-            }
-```
-
-2. Using the Tomcat 7 server plugin on the server end
-```java
-            ContextCarrier contextCarrier = new ContextCarrier();
-            CarrierItem next = contextCarrier.items();
-            while (next.hasNext()) {
-                next = next.next();
-                next.setHeadValue(request.getHeader(next.getHeadKey()));
-            }
-
-            span = ContextManager.createEntrySpan(“/span/operation/name”, contextCarrier);
-```
-
-### ContextSnapshot
-Besides cross-process tracing, cross-thread tracing has to be supported as well. For instance, both async process (in-memory MQ) 
-and batch process are common in Java. Cross-process and cross-thread tracing are very similar in that they both require propagating
-context, except that cross-thread tracing does not require serialization.
-
-Here are the three steps on cross-thread propagation:
-1. Use `ContextManager#capture` to get the ContextSnapshot object.
-1. Let the sub-thread access the ContextSnapshot through method arguments or being carried by existing arguments
-1. Use `ContextManager#continued` in sub-thread.
-
-## Core APIs
-### ContextManager
-ContextManager provides all major and primary APIs.
-
-1. Create EntrySpan
-```java
-public static AbstractSpan createEntrySpan(String endpointName, ContextCarrier carrier)
-```
-Create EntrySpan according to the operation name (e.g. service name, uri) and **ContextCarrier**.
-
-2. Create LocalSpan
-```java
-public static AbstractSpan createLocalSpan(String endpointName)
-```
-Create LocalSpan according to the operation name (e.g. full method signature).
-
-3. Create ExitSpan
-```java
-public static AbstractSpan createExitSpan(String endpointName, ContextCarrier carrier, String remotePeer)
-```
-Create ExitSpan according to the operation name (e.g. service name, uri) and the new **ContextCarrier** and peer address 
-(e.g. ip+port, hostname+port).
-
-### AbstractSpan
-```java
-    /**
-     * Set the component id, which defines in {@link ComponentsDefine}
-     *
-     * @param component
-     * @return the span for chaining.
-     */
-    AbstractSpan setComponent(Component component);
-
-    AbstractSpan setLayer(SpanLayer layer);
-
-    /**
-     * Set a key:value tag on the Span.
-     *
-     * @return this Span instance, for chaining
-     */
-    AbstractSpan tag(String key, String value);
-
-    /**
-     * Record an exception event of the current walltime timestamp.
-     *
-     * @param t any subclass of {@link Throwable}, which occurs in this span.
-     * @return the Span, for chaining
-     */
-    AbstractSpan log(Throwable t);
-
-    AbstractSpan errorOccurred();
-
-    /**
-     * Record an event at a specific timestamp.
-     *
-     * @param timestamp The explicit timestamp for the log record.
-     * @param event the events
-     * @return the Span, for chaining
-     */
-    AbstractSpan log(long timestamp, Map<String, ?> event);
-
-    /**
-     * Sets the string name for the logical operation this span represents.
-     *
-     * @return this Span instance, for chaining
-     */
-    AbstractSpan setOperationName(String endpointName);
-```
-Besides setting the operation name, tags and logs, two attributes must be set, namely the component and layer. This is especially important for the EntrySpan and ExitSpan.
-
-SpanLayer is the type of span. There are 5 values:
-1. UNKNOWN (default)
-1. DB
-1. RPC_FRAMEWORK (designed for the RPC framework, rather than an ordinary HTTP call)
-1. HTTP
-1. MQ
-
-Component IDs are defined and reserved by the SkyWalking project.
-For extension of the component name/ID, please follow the [component library definitions and extensions](Component-library-settings.md) document.
-
-### Special Span Tags
-All tags are available in the trace view. Meanwhile, in the OAP backend analysis, some special tags or tag combinations provide other advanced features.
-
-#### Tag key `http.status_code`
-The value should be an integer. The response code of OAL entities corresponds to this value.
-
-#### Tag keys `db.statement` and `db.type`.
-The value of `db.statement` should be a string that represents the database statement, such as SQL, or `[No statement]/`+span#operationName if the value is empty.
-When the exit span contains this tag, OAP samples the slow statements based on `agent-analyzer/default/maxSlowSQLLength`.
-The threshold of slow statement is defined in accordance with [`agent-analyzer/default/slowDBAccessThreshold`](../setup/backend/slow-db-statement.md)
-
-#### Extension logic endpoint: Tag key `x-le`
-The logic endpoint is a concept that doesn't represent a real RPC call, but requires the statistic.
-The value of `x-le` should be in JSON format. There are two options:
-1. Define a separated logic endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.
-```json
-{
-  "name": "GraphQL-service",
-  "latency": 100,
-  "status": true
-}
-```
-2. Declare the current local span representing a logic endpoint.
-```json
-{
-  "logic-span": true
-}
-``` 
-
-### Advanced APIs
-#### Async Span APIs
-There is a set of advanced APIs in Span which is specifically designed for async use cases. When tags, logs, and attributes (including end time) of the span need to be set in another thread, you should use these APIs.
-
-```java
-    /**
-     * The span finish at current tracing context, but the current span is still alive, until {@link #asyncFinish}
-     * called.
-     *
-     * This method must be called<br/>
-     * 1. In original thread(tracing context).
-     * 2. Current span is active span.
-     *
-     * During alive, tags, logs and attributes of the span could be changed, in any thread.
-     *
-     * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match.
-     *
-     * @return the current span
-     */
-    AbstractSpan prepareForAsync();
-
-    /**
-     * Notify the span, it could be finished.
-     *
-     * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match.
-     *
-     * @return the current span
-     */
-    AbstractSpan asyncFinish();
-```
-1. Call `#prepareForAsync` in the original context.
-1. Run `ContextManager#stopSpan` in the original context when your job in the current thread is complete.
-1. Propagate the span to any other thread.
-1. Once the above steps are all set, call `#asyncFinish` in any thread.
-1. When `#prepareForAsync` is complete for all spans, the tracing context will be finished and will report to the backend (based on the count of API execution).
-
-## Develop a plugin
-### Abstract
-The basic method to trace is to intercept a Java method, by using byte code manipulation tech and AOP concept.
-SkyWalking has packaged the byte code manipulation tech and tracing context propagation,
-so you simply have to define the intercept point (a.k.a. aspect pointcut in Spring).
-
-### Intercept
-SkyWalking provides two common definitions to intercept constructor, instance method and class method.
-
-#### v1 APIs
-* Extend `ClassInstanceMethodsEnhancePluginDefine` to define `constructor` intercept points and `instance method` intercept points.
-* Extend `ClassStaticMethodsEnhancePluginDefine` to define `class method` intercept points.
-
-Of course, you can extend `ClassEnhancePluginDefine` to set all intercept points, although it is uncommon to do so.
-
-#### v2 APIs
-v2 APIs provide an enhanced interceptor, which could propagate context through MIC(MethodInvocationContext).
-
-* Extend `ClassInstanceMethodsEnhancePluginDefineV2` to define `constructor` intercept points and `instance method` intercept points.
-* Extend `ClassStaticMethodsEnhancePluginDefineV2` to define `class method` intercept points.
-
-Of course, you can extend `ClassEnhancePluginDefineV2` to set all intercept points, although it is uncommon to do so.
-
-
-### Implement plugin
-See the following demonstration on how to implement a plugin by extending `ClassInstanceMethodsEnhancePluginDefine`.
-
-1. Define the target class name.
-```java
-protected abstract ClassMatch enhanceClass();
-```
-
-ClassMatch represents how to match the target classes. There are 4 ways:
-* `byName`: Based on the full class names (package name + `.` + class name).
-* `byClassAnnotationMatch`: Depends on whether there are certain annotations in the target classes.
-* `byMethodAnnotationMatch`: Depends on whether there are certain annotations in the methods of the target classes.
-* `byHierarchyMatch`: Based on the parent classes or interfaces of the target classes.
-
-**Attention**:
-* Never use `ThirdPartyClass.class` in the instrumentation definitions, such as `takesArguments(ThirdPartyClass.class)`, or `byName(ThirdPartyClass.class.getName())`, because of the fact that `ThirdPartyClass` dose not necessarily exist in the target application and this will break the agent; we have `import` checks to assist in checking this in CI, but it doesn't cover all scenarios of this limitation, so never try to work around this limitation by something like using full-qualified-cl [...]
-* Even if you are perfectly sure that the class to be intercepted exists in the target application (such as JDK classes), still, do not use `*.class.getName()` to get the class String name. We recommend you to use a literal string. This is to avoid ClassLoader issues.
-* `by*AnnotationMatch` does not support inherited annotations.
-* We do not recommend using `byHierarchyMatch` unless necessary. Using it may trigger the interception of
-many unexcepted methods, which would cause performance issues.
-
-Example:
-```java
-@Override
-protected ClassMatch enhanceClassName() {
-    return byName("org.apache.catalina.core.StandardEngineValve");		
-}		      
-
-```
-
-2. Define an instance method intercept point.
-```java
-public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints();
-
-public interface InstanceMethodsInterceptPoint {
-    /**
-     * class instance methods matcher.
-     *
-     * @return methods matcher
-     */
-    ElementMatcher<MethodDescription> getMethodsMatcher();
-
-    /**
-     * @return represents a class name, the class instance must instanceof InstanceMethodsAroundInterceptor.
-     */
-    String getMethodsInterceptor();
-
-    boolean isOverrideArgs();
-}
-```
-You may also use `Matcher` to set the target methods. Return **true** in `isOverrideArgs`, if you want to change the argument
-ref in interceptor.
-
-The following sections will tell you how to implement the interceptor.
-
-3. Add plugin definition into the `skywalking-plugin.def` file.
-```properties
-tomcat-7.x/8.x=TomcatInstrumentation
-```
-
-4. Set up `witnessClasses` and/or `witnessMethods` if the instrumentation has to be activated in specific versions.
-
-   Example:
-
-   ```java
-   // The plugin is activated only when the foo.Bar class exists.
-   @Override
-   protected String[] witnessClasses() {
-     return new String[] {
-       "foo.Bar"
-     };
-   }
-   
-   // The plugin is activated only when the foo.Bar#hello method exists.
-   @Override
-   protected List<WitnessMethod> witnessMethods() {
-     List<WitnessMethod> witnessMethodList = new ArrayList<>();
-     WitnessMethod witnessMethod = new WitnessMethod("foo.Bar", ElementMatchers.named("hello"));
-     witnessMethodList.add(witnessMethod);
-     return witnessMethodList;
-   }
-   ```
-   For more examples, see [WitnessTest.java](../../../apm-sniffer/apm-agent-core/src/test/java/org/apache/skywalking/apm/agent/core/plugin/witness/WitnessTest.java)
-
-   
-
-### Implement an interceptor
-As an interceptor for an instance method, it has to implement 
-`org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstanceMethodsAroundInterceptor`
-```java
-/**
- * A interceptor, which intercept method's invocation. The target methods will be defined in {@link
- * ClassEnhancePluginDefine}'s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine}
-*/
-public interface InstanceMethodsAroundInterceptor {
-    /**
-     * called before target method invocation.
-     *
-     * @param result change this result, if you want to truncate the method.
-     * @throws Throwable
-     */
-    void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class<?>[] argumentsTypes,
-        MethodInterceptResult result) throws Throwable;
-
-    /**
-     * called after target method invocation. Even method's invocation triggers an exception.
-     *
-     * @param ret the method's original return value.
-     * @return the method's actual return value.
-     * @throws Throwable
-     */
-    Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class<?>[] argumentsTypes,
-        Object ret) throws Throwable;
-
-    /**
-     * called when occur exception.
-     *
-     * @param t the exception occur.
-     */
-    void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class<?>[] argumentsTypes,
-        Throwable t);
-}
-```
-Use the core APIs before and after calling the method, as well as during exception handling.
-
-
-#### V2 APIs
-The interceptor of V2 API uses `MethodInvocationContext context` to replace the `MethodInterceptResult result` in the `beforeMethod`,
-and be added as a new parameter in `afterMethod` and `handleMethodException`.
-
-`MethodInvocationContext context` is only shared in one time execution, and safe to use when face concurrency execution.
-
-```java
-/**
- * A v2 interceptor, which intercept method's invocation. The target methods will be defined in {@link
- * ClassEnhancePluginDefineV2}'s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine}
- */
-public interface InstanceMethodsAroundInterceptorV2 {
-    /**
-     * called before target method invocation.
-     *
-     * @param context the method invocation context including result context.
-     */
-    void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class<?>[] argumentsTypes,
-                      MethodInvocationContext context) throws Throwable;
-
-    /**
-     * called after target method invocation. Even method's invocation triggers an exception.
-     *
-     * @param ret the method's original return value. May be null if the method triggers an exception.
-     * @return the method's actual return value.
-     */
-    Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class<?>[] argumentsTypes,
-                       Object ret, MethodInvocationContext context) throws Throwable;
-
-    /**
-     * called when occur exception.
-     *
-     * @param t the exception occur.
-     */
-    void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments,
-                               Class<?>[] argumentsTypes, Throwable t, MethodInvocationContext context);
-
-}
-```
-
-### Bootstrap class instrumentation.
-SkyWalking has packaged the bootstrap instrumentation in the agent core. You can easily implement it by declaring it in the instrumentation definition.
-
-Override the `public boolean isBootstrapInstrumentation()` and return **true**. Such as
-```java
-public class URLInstrumentation extends ClassEnhancePluginDefine {
-    private static String CLASS_NAME = "java.net.URL";
-
-    @Override protected ClassMatch enhanceClass() {
-        return byName(CLASS_NAME);
-    }
-
-    @Override public ConstructorInterceptPoint[] getConstructorsInterceptPoints() {
-        return new ConstructorInterceptPoint[] {
-            new ConstructorInterceptPoint() {
-                @Override public ElementMatcher<MethodDescription> getConstructorMatcher() {
-                    return any();
-                }
-
-                @Override public String getConstructorInterceptor() {
-                    return "org.apache.skywalking.apm.plugin.jre.httpurlconnection.Interceptor2";
-                }
-            }
-        };
-    }
-
-    @Override public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints() {
-        return new InstanceMethodsInterceptPoint[0];
-    }
-
-    @Override public StaticMethodsInterceptPoint[] getStaticMethodsInterceptPoints() {
-        return new StaticMethodsInterceptPoint[0];
-    }
-
-    @Override public boolean isBootstrapInstrumentation() {
-        return true;
-    }
-}
-```
-
-`ClassEnhancePluginDefineV2` is provided in v2 APIs, `#isBootstrapInstrumentation` works too.
-
-**NOTE**: Bootstrap instrumentation should be used only where necessary. During its actual execution, it mostly affects the JRE core(rt.jar). Defining it other than where necessary could lead to unexpected results or side effects.
-
-### Provide custom config for the plugin
-The config could provide different behaviours based on the configurations. The SkyWalking plugin mechanism provides the configuration
-injection and initialization system in the agent core.
-
-Every plugin could declare one or more classes to represent the config by using `@PluginConfig` annotation. The agent core
-could initialize this class' static field through System environments, System properties, and `agent.config` static file.
-
-The `#root()` method in the `@PluginConfig` annotation requires declaring the root class for the initialization process.
-Typically, SkyWalking prefers to use nested inner static classes for the hierarchy of the configuration. 
-We recommend using `Plugin`/`plugin-name`/`config-key` as the nested classes structure of the config class.
-
-**NOTE**: because of the Java ClassLoader mechanism, the `@PluginConfig` annotation should be added on the real class used in the interceptor codes. 
-
-In the following example, `@PluginConfig(root = SpringMVCPluginConfig.class)` indicates that initialization should 
-start with using `SpringMVCPluginConfig` as the root. Then, the config key of the attribute `USE_QUALIFIED_NAME_AS_ENDPOINT_NAME`
-should be `plugin.springmvc.use_qualified_name_as_endpoint_name`.
-```java
-public class SpringMVCPluginConfig {
-    public static class Plugin {
-        // NOTE, if move this annotation on the `Plugin` or `SpringMVCPluginConfig` class, it no longer has any effect. 
-        @PluginConfig(root = SpringMVCPluginConfig.class)
-        public static class SpringMVC {
-            /**
-             * If true, the fully qualified method name will be used as the endpoint name instead of the request URL,
-             * default is false.
-             */
-            public static boolean USE_QUALIFIED_NAME_AS_ENDPOINT_NAME = false;
-
-            /**
-             * This config item controls that whether the SpringMVC plugin should collect the parameters of the
-             * request.
-             */
-            public static boolean COLLECT_HTTP_PARAMS = false;
-        }
-
-        @PluginConfig(root = SpringMVCPluginConfig.class)
-        public static class Http {
-            /**
-             * When either {@link Plugin.SpringMVC#COLLECT_HTTP_PARAMS} is enabled, how many characters to keep and send
-             * to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is
-             * added for the sake of performance
-             */
-            public static int HTTP_PARAMS_LENGTH_THRESHOLD = 1024;
-        }
-    }
-}
-```
-
-
-# Meter Plugin
-Java agent plugin could use meter APIs to collect metrics for backend analysis.
-
-* `Counter` API represents a single monotonically increasing counter which automatically collects data and reports to the backend.
-```java
-import org.apache.skywalking.apm.agent.core.meter.MeterFactory;
-
-Counter counter = MeterFactory.counter(meterName).tag("tagKey", "tagValue").mode(Counter.Mode.INCREMENT).build();
-counter.increment(1d);
-```
-1. `MeterFactory.counter` creates a new counter builder with the meter name.
-1. `Counter.Builder.tag(String key, String value)` marks a tag key/value pair.
-1. `Counter.Builder.mode(Counter.Mode mode)` changes the counter mode. `RATE` mode means the reporting rate to the backend.
-1. `Counter.Builder.build()` builds a new `Counter` which is collected and reported to the backend.
-1. `Counter.increment(double count)` increment counts to the `Counter`. It could be a positive value.
-
-* `Gauge` API represents a single numerical value.
-```java
-import org.apache.skywalking.apm.agent.core.meter.MeterFactory;
-
-ThreadPoolExecutor threadPool = ...;
-Gauge gauge = MeterFactory.gauge(meterName, () -> threadPool.getActiveCount()).tag("tagKey", "tagValue").build();
-```
-1. `MeterFactory.gauge(String name, Supplier<Double> getter)` creates a new gauge builder with the meter name and supplier function. This function must return a `double` value.
-1. `Gauge.Builder.tag(String key, String value)` marks a tag key/value pair.
-1. `Gauge.Builder.build()` builds a new `Gauge` which is collected and reported to the backend.
-
-* `Histogram` API represents a summary sample observations with customized buckets.
-```java
-import org.apache.skywalking.apm.agent.core.meter.MeterFactory;
-
-Histogram histogram = MeterFactory.histogram("test").tag("tagKey", "tagValue").steps(Arrays.asList(1, 5, 10)).minValue(0).build();
-histogram.addValue(3);
-```
-1. `MeterFactory.histogram(String name)` creates a new histogram builder with the meter name.
-1. `Histogram.Builder.tag(String key, String value)` marks a tag key/value pair.
-1. `Histogram.Builder.steps(List<Double> steps)` sets up the max values of every histogram buckets.
-1. `Histogram.Builder.minValue(double value)` sets up the minimal value of this histogram. Default is `0`.
-1. `Histogram.Builder.build()` builds a new `Histogram` which is collected and reported to the backend.
-1. `Histogram.addValue(double value)` adds value into the histogram, and automatically analyzes what bucket count needs to be incremented. Rule: count into [step1, step2).
-
-# Plugin Test Tool
-The [Apache SkyWalking Agent Test Tool Suite](https://github.com/apache/skywalking-agent-test-tool) is an incredibly useful test tool suite that is available in a wide variety of agent languages. It includes the mock collector and validator. The mock collector is a SkyWalking receiver, like the OAP server.
-
-You could learn how to use this tool to test the plugin in [this doc](Plugin-test.md). This is a must if you want to contribute plugins to the SkyWalking official repo.
-
-# Contribute plugins to the Apache SkyWalking repository
-We welcome everyone to contribute their plugins.
-
-Please follow these steps:
-1. Submit an issue for your plugin, including any supported versions.
-1. Create sub modules under `apm-sniffer/apm-sdk-plugin` or `apm-sniffer/optional-plugins`, and the name should include supported library name and versions.
-1. Follow this guide to develop. Make sure comments and test cases are provided.
-1. Develop and test.
-1. Provide the automatic test cases. Learn `how to write the plugin test case` from this [doc](Plugin-test.md)
-1. Send a pull request and ask for review. 
-1. The plugin committers will approve your plugins, plugin CI-with-IT, e2e, and the plugin tests will be passed.
-1. The plugin is accepted by SkyWalking. 
diff --git a/docs/en/guides/Plugin-test.md b/docs/en/guides/Plugin-test.md
deleted file mode 100644
index 9a14714..0000000
--- a/docs/en/guides/Plugin-test.md
+++ /dev/null
@@ -1,638 +0,0 @@
-# Plugin automatic test framework
-
-The plugin test framework is designed to verify the function and compatibility of plugins. As there are dozens of plugins and
-hundreds of versions that need to be verified, it is impossible to do it manually.
-The test framework uses container-based tech stack and requires a set of real services with the agents installed. Then, the test mock
-OAP backend runs to check the segments data sent from agents.
-
-Every plugin maintained in the main repo requires corresponding test cases as well as matching versions in the supported list doc.
-
-## Environment Requirements
-
-1. MacOS/Linux
-2. JDK 8+
-3. Docker
-4. Docker Compose
-
-## Case Base Image Introduction
-
-The test framework provides `JVM-container` and `Tomcat-container` base images including JDK8 and JDK14. You can choose the best one for your test case. If both are suitable for your case, **`JVM-container` is preferred**.
-
-### JVM-container Image Introduction
-
-[JVM-container](../../../test/plugin/containers/jvm-container) uses `openjdk:8` as the base image. `JVM-container` supports JDK14, which inherits `openjdk:14`.
-The test case project must be packaged as `project-name.zip`, including `startup.sh` and uber jar, by using `mvn clean package`.
-
-Take the following test projects as examples:
-* [sofarpc-scenario](../../../test/plugin/scenarios/sofarpc-scenario) is a single project case.
-* [webflux-scenario](../../../test/plugin/scenarios/webflux-scenario) is a case including multiple projects.
-* [jdk14-with-gson-scenario](../../../test/plugin/scenarios/jdk14-with-gson-scenario) is a single project case with JDK14.
-
-### Tomcat-container Image Introduction
-
-[Tomcat-container](../../../test/plugin/containers/tomcat-container) uses `tomcat:8.5.57-jdk8-openjdk` or `tomcat:8.5.57-jdk14-openjdk` as the base image.
-The test case project must be packaged as `project-name.war` by using `mvn package`.
-
-Take the following test project as an example
-* [spring-4.3.x-scenario](https://github.com/apache/skywalking/tree/master/test/plugin/scenarios/spring-4.3.x-scenario)
-
-
-## Test project hierarchical structure
-The test case is an independent maven project, and it must be packaged as a war tar ball or zip file, depending on the chosen base image. Also, two external accessible endpoints usually two URLs) are required.
-
-All test case codes should be in the `org.apache.skywalking.apm.testcase.*` package. If there are some codes expected to be instrumented, then the classes could be in the `test.org.apache.skywalking.apm.testcase.*` package.
-
-**JVM-container test project hierarchical structure**
-
-```
-[plugin-scenario]
-    |- [bin]
-        |- startup.sh
-    |- [config]
-        |- expectedData.yaml
-    |- [src]
-        |- [main]
-            |- ...
-        |- [resource]
-            |- log4j2.xml
-    |- pom.xml
-    |- configuration.yaml
-    |- support-version.list
-
-[] = directory
-```
-
-**Tomcat-container test project hierarchical structure**
-
-```
-[plugin-scenario]
-    |- [config]
-        |- expectedData.yaml
-    |- [src]
-        |- [main]
-            |- ...
-        |- [resource]
-            |- log4j2.xml
-        |- [webapp]
-            |- [WEB-INF]
-                |- web.xml
-    |- pom.xml
-    |- configuration.yaml
-    |- support-version.list
-
-[] = directory
-```
-
-## Test case configuration files
-The following files are required in every test case.
-
-File Name | Descriptions
----|---
-`configuration.yml` | Declare the basic case information, including case name, entrance endpoints, mode, and dependencies.
-`expectedData.yaml` | Describe the expected segmentItems.
-`support-version.list` | List the target versions for this case.
-`startup.sh` |`JVM-container` only. This is not required when using `Tomcat-container`.
-
-`*` support-version.list format requires every line for a single version (contains only the last version number of each minor version). You may use `#` to comment out this version.
-
-### configuration.yml
-
-| Field | description
-| --- | ---
-| type | Image type, options, `jvm`, or `tomcat`. Required.
-| entryService | The entrance endpoint (URL) for test case access. Required. (HTTP Method: GET)
-| healthCheck | The health check endpoint (URL) for test case access. Required. (HTTP Method: HEAD)
-| startScript | Path of the start up script. Required in `type: jvm` only.
-| runningMode | Running mode with the optional plugin, options, `default`(default), `with_optional`, or `with_bootstrap`.
-| withPlugins | Plugin selector rule, e.g.:`apm-spring-annotation-plugin-*.jar`. Required for `runningMode=with_optional` or `runningMode=with_bootstrap`.
-| environment | Same as `docker-compose#environment`.
-| depends_on | Same as `docker-compose#depends_on`.
-| dependencies | Same as `docker-compose#services`, `image`, `links`, `hostname`, `environment` and `depends_on` are supported.
-
-**Note:, `docker-compose` activates only when `dependencies` is blank.**
-
-**runningMode** option description.
-
-| Option | description
-| --- | ---
-| default | Activate all plugins in `plugin` folder like the official distribution agent. 
-| with_optional | Activate `default` and plugins in `optional-plugin` by the give selector.
-| with_bootstrap | Activate `default` and plugins in `bootstrap-plugin` by the give selector.
-
-with_optional/with_bootstrap supports multiple selectors, separated by `;`.
-
-**File Format**
-
-```
-type:
-entryService:
-healthCheck:
-startScript:
-runningMode:
-withPlugins:
-environment:
-  ...
-depends_on:
-  ...
-dependencies:
-  service1:
-    image:
-    hostname: 
-    expose:
-      ...
-    environment:
-      ...
-    depends_on:
-      ...
-    links:
-      ...
-    entrypoint:
-      ...
-    healthcheck:
-      ...
-```
-
-* dependencies support docker compose `healthcheck`. But the format is a little different. We need to have `-` as the start of every config item,
-and describe it as a string line.
-
-For example, in the official document, the health check is:
-```yaml
-healthcheck:
-  test: ["CMD", "curl", "-f", "http://localhost"]
-  interval: 1m30s
-  timeout: 10s
-  retries: 3
-  start_period: 40s
-```
-
-Here you should write:
-```yaml
-healthcheck:
-  - 'test: ["CMD", "curl", "-f", "http://localhost"]'
-  - "interval: 1m30s"
-  - "timeout: 10s"
-  - "retries: 3"
-  - "start_period: 40s"
-```
-
-In some cases, the dependency service (usually a third-party server like the SolrJ server) is required to keep the same version
-as the client lib version, which is defined as `${test.framework.version}` in pom. You may use `${CASE_SERVER_IMAGE_VERSION}`
-as the version number, which will be changed in the test for each version.
-
-> It does not support resource related configurations, such as volumes, ports, and ulimits. The reason for this is that in test scenarios, no mapping is required for any port to the host VM, or to mount any folder.
-
-**Take the following test cases as examples:**
-* [dubbo-2.7.x with JVM-container](../../../test/plugin/scenarios/dubbo-2.7.x-scenario/configuration.yml)
-* [jetty with JVM-container](../../../test/plugin/scenarios/jetty-scenario/configuration.yml)
-* [gateway with runningMode](../../../test/plugin/scenarios/gateway-2.1.x-scenario/configuration.yml)
-* [canal with docker-compose](../../../test/plugin/scenarios/canal-scenario/configuration.yml)
-
-### expectedData.yaml
-
-**Operator for number**
-
-| Operator | Description |
-| :--- | :--- |
-| `nq` | Not equal |
-| `eq` | Equal(default) |
-| `ge` | Greater than or equal |
-| `gt` | Greater than |
-
-**Operator for String**
-
-| Operator | Description |
-| :--- | :--- |
-| `not null` | Not null |
-| `null` | Null or empty String |
-| `eq` | Equal(default) |
-
-**Expected Data Format Of The Segment**
-```yml
-segmentItems:
--
-  serviceName: SERVICE_NAME(string)
-  segmentSize: SEGMENT_SIZE(int)
-  segments:
-  - segmentId: SEGMENT_ID(string)
-    spans:
-        ...
-```
-
-
-| Field |  Description
-| --- | ---  
-| serviceName | Service Name.
-| segmentSize | The number of segments is expected.
-| segmentId | Trace ID.
-| spans | Segment span list. In the next section, you will learn how to describe each span.
-
-**Expected Data Format Of The Span**
-
-**Note**: The order of span list should follow the order of the span finish time.
-
-```yml
-    operationName: OPERATION_NAME(string)
-    parentSpanId: PARENT_SPAN_ID(int)
-    spanId: SPAN_ID(int)
-    startTime: START_TIME(int)
-    endTime: END_TIME(int)
-    isError: IS_ERROR(string: true, false)
-    spanLayer: SPAN_LAYER(string: DB, RPC_FRAMEWORK, HTTP, MQ, CACHE)
-    spanType: SPAN_TYPE(string: Exit, Entry, Local)
-    componentId: COMPONENT_ID(int)
-    tags:
-    - {key: TAG_KEY(string), value: TAG_VALUE(string)}
-    ...
-    logs:
-    - {key: LOG_KEY(string), value: LOG_VALUE(string)}
-    ...
-    peer: PEER(string)
-    refs:
-    - {
-       traceId: TRACE_ID(string),
-       parentTraceSegmentId: PARENT_TRACE_SEGMENT_ID(string),
-       parentSpanId: PARENT_SPAN_ID(int),
-       parentService: PARENT_SERVICE(string),
-       parentServiceInstance: PARENT_SERVICE_INSTANCE(string),
-       parentEndpoint: PARENT_ENDPOINT_NAME(string),
-       networkAddress: NETWORK_ADDRESS(string),
-       refType:  REF_TYPE(string: CrossProcess, CrossThread)
-     }
-   ...
-```
-
-| Field | Description 
-|--- |--- 
-| operationName | Span Operation Name.
-| parentSpanId | Parent span ID. **Note**: The parent span ID of the first span should be -1. 
-| spanId | Span ID. **Note**: Start from 0. 
-| startTime | Span start time. It is impossible to get the accurate time, not 0 should be enough.
-| endTime | Span finish time. It is impossible to get the accurate time, not 0 should be enough.
-| isError | Span status, true or false. 
-| componentId | Component id for your plugin. 
-| tags | Span tag list. **Notice**, Keep in the same order as the plugin coded.
-| logs | Span log list. **Notice**, Keep in the same order as the plugin coded.
-| SpanLayer | Options, DB, RPC_FRAMEWORK, HTTP, MQ, CACHE.
-| SpanType | Span type, options, Exit, Entry or Local.
-| peer | Remote network address, IP + port mostly. For exit span, this should be required. 
-
-The verify description for SegmentRef
-
-| Field | Description 
-|---- |---- 
-| traceId | 
-| parentTraceSegmentId | Parent SegmentId, pointing to the segment id in the parent segment.
-| parentSpanId | Parent SpanID, pointing to the span id in the parent segment.
-| parentService | The service of parent/downstream service name.
-| parentServiceInstance | The instance of parent/downstream service instance name.
-| parentEndpoint |  The endpoint of parent/downstream service.
-| networkAddress | The peer value of parent exit span.
-| refType | Ref type, options, CrossProcess or CrossThread.
-
-**Expected Data Format Of The Meter Items**
-```yml
-meterItems:
--
-  serviceName: SERVICE_NAME(string)
-  meterSize: METER_SIZE(int)
-  meters:
-  - ...
-```
-
-| Field |  Description
-| --- | ---  
-| serviceName | Service Name.
-| meterSize | The number of meters is expected.
-| meters | meter list. Follow the next section to see how to describe every meter.
-
-**Expected Data Format Of The Meter**
-
-```yml
-    meterId: 
-        name: NAME(string)
-        tags:
-        - {name: TAG_NAME(string), value: TAG_VALUE(string)}
-    singleValue: SINGLE_VALUE(double)
-    histogramBuckets:
-    - HISTOGRAM_BUCKET(double)
-    ...
-```
-
-The verify description for MeterId
-
-| Field | Description 
-|--- |--- 
-| name | meter name.
-| tags | meter tags.
-| tags.name | tag name.
-| tags.value | tag value.
-| singleValue | counter or gauge value. Using condition operate of the number to validate, such as `gt`, `ge`. If current meter is histogram, don't need to write this field.
-| histogramBuckets | histogram bucket. The bucket list must be ordered. The tool assert at least one bucket of the histogram having nonzero count. If current meter is counter or gauge, don't need to write this field.
-
-### startup.sh
-
-This script provide a start point to JVM based service, most of them starts by a `java -jar`, with some variables.
-The following system environment variables are available in the shell.
-
-| Variable   | Description    |
-|:----     |:----        |
-| agent_opts               |     Agent plugin opts, check the detail in plugin doc or the same opt added in this PR.        |
-| SCENARIO_NAME       |  Service name. Default same as the case folder name    |
-| SCENARIO_VERSION           | Version |
-| SCENARIO_ENTRY_SERVICE             | Entrance URL to access this service |
-| SCENARIO_HEALTH_CHECK_URL          | Health check URL  |
-
-
-> `${agent_opts}` is required to add into your `java -jar` command, which including the parameter injected by test framework, and
-> make agent installed. All other parameters should be added after `${agent_opts}`.
-
-The test framework will set the service name as the test case folder name by default, but in some cases, there are more 
-than one test projects are required to run in different service codes, could set it explicitly like the following example.
-
-Example
-```bash
-home="$(cd "$(dirname $0)"; pwd)"
-
-java -jar ${agent_opts} "-Dskywalking.agent.service_name=jettyserver-scenario" ${home}/../libs/jettyserver-scenario.jar &
-sleep 1
-
-java -jar ${agent_opts} "-Dskywalking.agent.service_name=jettyclient-scenario"  ${home}/../libs/jettyclient-scenario.jar &
-
-```
-
-> Only set this or use other skywalking options when it is really necessary.
-
-**Take the following test cases as examples**
-* [undertow](../../../test/plugin/scenarios/undertow-scenario/bin/startup.sh)
-* [webflux](../../../test/plugin/scenarios/webflux-scenario/webflux-dist/bin/startup.sh)
-
-
-## Best Practices
-
-### How To Use The Archetype To Create A Test Case Project
-We provided archetypes and a script to make creating a project easier. It creates a completed project of a test case. So that we only need to focus on cases.
-First, we can use followed command to get usage about the script.
-
-`bash ${SKYWALKING_HOME}/test/plugin/generator.sh`
-
-Then, runs and generates a project, named by `scenario_name`, in `./scenarios`.
-
-
-### Recommendations for pom
-
-```xml
-    <properties>
-        <!-- Provide and use this property in the pom. -->
-        <!-- This version should match the library version, -->
-        <!-- in this case, http components lib version 4.3. -->
-        <test.framework.version>4.3</test.framework.version>
-    </properties>
-    <dependencies>
-        <dependency>
-            <groupId>org.apache.httpcomponents</groupId>
-            <artifactId>httpclient</artifactId>
-            <version>${test.framework.version}</version>
-        </dependency>
-        ...
-    </dependencies>
-
-    <build>
-        <!-- Set the package final name as same as the test case folder case. -->
-        <finalName>httpclient-4.3.x-scenario</finalName>
-        ....
-    </build>
-```
-
-### How To Implement Heartbeat Service
-
-Heartbeat service is designed for checking the service available status. This service is a simple HTTP service, returning 200 means the
-target service is ready. Then the traffic generator will access the entry service and verify the expected data.
-User should consider to use this service to detect such as whether the dependent services are ready, especially when 
-dependent services are database or cluster.
-
-Notice, because heartbeat service could be traced fully or partially, so, segmentSize in `expectedData.yaml` should use `ge` as the operator,
-and don't include the segments of heartbeat service in the expected segment data.
-
-### The example Process of Writing Tracing Expected Data
-
-Expected data file, `expectedData.yaml`, include `SegmentItems` part.
-
-We are using the HttpClient plugin to show how to write the expected data.
-
-There are two key points of testing
-1. Whether is HttpClient span created.
-1. Whether the ContextCarrier created correctly, and propagates across processes.
-
-```
-+-------------+         +------------------+            +-------------------------+
-|   Browser   |         |  Case Servlet    |            | ContextPropagateServlet |
-|             |         |                  |            |                         |
-+-----|-------+         +---------|--------+            +------------|------------+
-      |                           |                                  |
-      |                           |                                  |
-      |       WebHttp            +-+                                 |
-      +------------------------> |-|         HttpClient             +-+
-      |                          |--------------------------------> |-|
-      |                          |-|                                |-|
-      |                          |-|                                |-|
-      |                          |-| <--------------------------------|
-      |                          |-|                                +-+
-      | <--------------------------|                                 |
-      |                          +-+                                 |
-      |                           |                                  |
-      |                           |                                  |
-      |                           |                                  |
-      |                           |                                  |
-      +                           +                                  +
-```
-#### segmentItems
-
-By following the flow of HttpClient case, there should be two segments created.
-1. Segment represents the CaseServlet access. Let's name it as `SegmentA`.
-1. Segment represents the ContextPropagateServlet access. Let's name it as `SegmentB`.
-
-```yml
-segmentItems:
-  - serviceName: httpclient-case
-    segmentSize: ge 2 # Could have more than one health check segments, because, the dependency is not standby.
-```
-
-Because Tomcat plugin is a default plugin of SkyWalking, so, in SegmentA, there are two spans
-1. Tomcat entry span
-1. HttpClient exit span
-
-SegmentA span list should like following
-```yml
-    - segmentId: not null
-      spans:
-        - operationName: /httpclient-case/case/context-propagate
-          parentSpanId: 0
-          spanId: 1
-          startTime: nq 0
-          endTime: nq 0
-          isError: false
-          spanLayer: Http
-          spanType: Exit
-          componentId: eq 2
-          tags:
-            - {key: url, value: 'http://127.0.0.1:8080/httpclient-case/case/context-propagate'}
-            - {key: http.method, value: GET}
-          logs: []
-          peer: 127.0.0.1:8080
-        - operationName: /httpclient-case/case/httpclient
-          parentSpanId: -1
-          spanId: 0
-          startTime: nq 0
-          endTime: nq 0
-          spanLayer: Http
-          isError: false
-          spanType: Entry
-          componentId: 1
-          tags:
-            - {key: url, value: 'http://localhost:{SERVER_OUTPUT_PORT}/httpclient-case/case/httpclient'}
-            - {key: http.method, value: GET}
-          logs: []
-          peer: null
-```
-
-SegmentB should only have one Tomcat entry span, but includes the Ref pointing to SegmentA.
-
-SegmentB span list should like following
-```yml
-- segmentId: not null
-  spans:
-  -
-   operationName: /httpclient-case/case/context-propagate
-   parentSpanId: -1
-   spanId: 0
-   tags:
-   - {key: url, value: 'http://127.0.0.1:8080/httpclient-case/case/context-propagate'}
-   - {key: http.method, value: GET}
-   logs: []
-   startTime: nq 0
-   endTime: nq 0
-   spanLayer: Http
-   isError: false
-   spanType: Entry
-   componentId: 1
-   peer: null
-   refs:
-    - {parentEndpoint: /httpclient-case/case/httpclient, networkAddress: 'localhost:8080', refType: CrossProcess, parentSpanId: 1, parentTraceSegmentId: not null, parentServiceInstance: not null, parentService: not null, traceId: not null}
-```
-
-### The example Process of Writing Meter Expected Data
-
-Expected data file, `expectedData.yaml`, include `MeterItems` part.
-
-We are using the toolkit plugin to demonstrate how to write the expected data. When write the [meter plugin](Java-Plugin-Development-Guide.md#meter-plugin), the expected data file keeps the same.
-
-There is one key point of testing
-1. Build a meter and operate it.
-
-Such as `Counter`:
-```java
-MeterFactory.counter("test_counter").tag("ck1", "cv1").build().increment(1d);
-MeterFactory.histogram("test_histogram").tag("hk1", "hv1").steps(1d, 5d, 10d).build().addValue(2d);
-```
-
-```
-+-------------+         +------------------+
-|   Plugin    |         |    Agent core    |
-|             |         |                  |
-+-----|-------+         +---------|--------+
-      |                           |         
-      |                           |         
-      |    Build or operate      +-+        
-      +------------------------> |-|        
-      |                          |-]
-      |                          |-|        
-      |                          |-|        
-      |                          |-|
-      |                          |-|        
-      | <--------------------------|        
-      |                          +-+        
-      |                           |         
-      |                           |         
-      |                           |         
-      |                           |         
-      +                           +         
-```
-
-#### meterItems
-
-By following the flow of the toolkit case,  there should be two meters created.
-1. Meter `test_counter` created from `MeterFactory#counter`. Let's name it as `MeterA`.
-1. Meter `test_histogram` created from `MeterFactory#histogram`. Let's name it as `MeterB`.
-
-```yml
-meterItems:
-  - serviceName: toolkit-case
-    meterSize: 2
-```
-
-They're showing two kinds of meter, MeterA has a single value, MeterB has a histogram value.
-
-MeterA should like following, `counter` and `gauge` use the same data format.
-```yaml
-- meterId:
-    name: test_counter
-    tags:
-      - {name: ck1, value: cv1}
-  singleValue: gt 0
-```
-
-MeterB should like following.
-```yaml
-- meterId:
-    name: test_histogram
-    tags:
-      - {name: hk1, value: hv1}
-  histogramBuckets:
-    - 0.0
-    - 1.0
-    - 5.0
-    - 10.0
-```
-
-## Local Test and Pull Request To The Upstream
-
-First of all, the test case project could be compiled successfully, with right project structure and be able to deploy.
-The developer should test the start script could run in Linux/MacOS, and entryService/health services are able to provide
-the response.
-
-You could run test by using following commands
-
-```bash
-cd ${SKYWALKING_HOME}
-bash ./test/plugin/run.sh -f ${scenario_name}
-```
-
-**Notice**,if codes in `./apm-sniffer` have been changed, no matter because your change or git update,
-please recompile the `skywalking-agent`. Because the test framework will use the existing `skywalking-agent` folder,
-rather than recompiling it every time.
-
-Use `${SKYWALKING_HOME}/test/plugin/run.sh -h` to know more command options.
-
-If the local test passed, then you could add it to `.github/workflows/plugins-test.<n>.yaml` file, which will drive the tests running on the GitHub Actions of official SkyWalking repository.
-Based on your plugin's name, please add the test case into file `.github/workflows/plugins-test.<n>.yaml`, by alphabetical orders.
-
-Every test case is a GitHub Actions Job. Please use the scenario directory name as the case `name`,
-mostly you'll just need to decide which file (`plugins-test.<n>.yaml`) to add your test case, and simply put one line (as follows) in it, take the existed cases as examples.
-You can run `python3 tools/select-group.py` to see which file contains the least cases and add your cases into it, in order to balance the running time of each group.
-
-If a test case required to run in JDK 14 environment, please add you test case into file `plugins-jdk14-test.<n>.yaml`.
-
-```yaml
-jobs:
-  PluginsTest:
-    name: Plugin
-    runs-on: ubuntu-latest
-    timeout-minutes: 90
-    strategy:
-      fail-fast: true
-      matrix:
-        case:
-          # ...
-          - <your scenario test directory name>
-          # ...
-```
diff --git a/docs/en/guides/README.md b/docs/en/guides/README.md
deleted file mode 100755
index cbab6e0..0000000
--- a/docs/en/guides/README.md
+++ /dev/null
@@ -1,144 +0,0 @@
-# Guides
-There are many ways you can contribute to the SkyWalking community.
-
-- Go through our documents, and point out or fix a problem. Translate the documents into other languages.
-- Download our [releases](http://skywalking.apache.org/downloads/), try to monitor your applications, and provide feedback to us.
-- Read our source codes. For details, reach out to us.
-- If you find any bugs, [submit an issue](https://github.com/apache/skywalking/issues). You can also try to fix it.
-- Find [help wanted issues](https://github.com/apache/skywalking/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22). This is a good place for you to start.
-- Submit an issue or start a discussion at [GitHub issue](https://github.com/apache/skywalking/issues/new).
-- See all mail list discussions at [website list review](https://lists.apache.org/list.html?dev@skywalking.apache.org).
-If you are already a SkyWalking committer, you can log in and use the mail list in the browser mode. Otherwise, subscribe following the step below.
-- Issue reports and discussions may also take place via `dev@skywalking.apache.org`. 
-Mail to `dev-subscribe@skywalking.apache.org`, and follow the instructions in the reply to subscribe to the mail list. 
-
-## Contact Us
-All of the following channels are open to the community.
-* Submit an [issue](https://github.com/apache/skywalking/issues)
-* Mail list: **dev@skywalking.apache.org**. Mail to `dev-subscribe@skywalking.apache.org`. Follow the instructions in the reply to subscribe to the mail list.
-* [Gitter](https://gitter.im/openskywalking/Lobby)
-* QQ Group: 392443393
-
-## Become an official Apache SkyWalking Committer
-The PMC assesses the contributions of every contributor, including their code contributions. It also  promotes, votes on, and invites new committers and PMC members according to the Apache guides.
-See [Become official Apache SkyWalking Committer](asf/committer.md) for more details.
-
-## For code developer
-For developers, the starting point is the [Compiling Guide](How-to-build.md). It guides developers on how to build the project in local and set up the environment.
-
-### Integration Tests
-After setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you'll
-need to run tests locally to verify that your codes would not break any existing features,
-as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors.
-If the new codes involve other components or libraries, you should also write integration tests (IT).
-
-SkyWalking leverages the plugin `maven-surefire-plugin` to run the UTs and uses `maven-failsafe-plugin`
-to run the ITs. `maven-surefire-plugin` excludes ITs (whose class name starts with `IT`)
-and leaves them for `maven-failsafe-plugin` to run, which is bound to the `verify` goal and `CI-with-IT` profile.
-Therefore, to run the UTs, try `./mvnw clean test`, which only runs the UTs but not the ITs.
-
-If you would like to run the ITs, please activate the `CI-with-IT` profile
-as well as the the profiles of the modules whose ITs you want to run.
-E.g. if you would like to run the ITs in `oap-server`, try `./mvnw -Pbackend,CI-with-IT clean verify`,
-and if you would like to run all the ITs, simply run `./mvnw -Pall,CI-with-IT clean verify`.
-
-Please be advised that if you're writing integration tests, name it with the pattern `IT*` so they would only run with the `CI-with-IT` profile.
-
-### End to End Tests (E2E)
-Since version 6.3.0, we have introduced more automatic tests to perform software quality assurance. E2E is an integral part of it.
-
-> End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish.
- The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.
-
-The E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by `docker-compose`. Besides, there is a test controller (JUnit test) running outside of the container that sends traffic to the instrumented service,
-and then verifies the corresponding results after those requests have been made through GraphQL API of the SkyWalking Web App.
-
-Before you take the following steps, please set the SkyWalking version `sw.version` in the [pom.xml](../../../test/e2e/pom.xml)
-so that you can build it in your local IDE. Make sure not to check this change into the codebase. However, if
-you prefer to build it in the command line interface with `./mvnw`, you can simply use property `-Dsw.version=x.y.z` without
-modifying `pom.xml`.
-
-#### Writing E2E Cases
-
-- Set up the environment in IntelliJ IDEA
-
-The E2E test is a separate project under the SkyWalking root directory and the IDEA cannot recognize it by default. Right click
-on the file `test/e2e/pom.xml` and click `Add as Maven Project`. We recommend opening the directory `skywalking/test/e2e`
-in a separate IDE window for better experience, since there may be shaded classes issues.
-
-- Orchestrate the components
-
-The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the 
-components.
- 
-To make the orchestration process easier, we're using a [docker-compose](https://docs.docker.com/compose/) that provides a simple file format (`docker-compose.yml`) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.
-
-Follow these steps:
-1. Decide what (and how many) containers will be needed. For example, for cluster testing, you'll need > 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services;
-1. Define the containers in `docker-compose.yml`, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc.
-1. Write (or hopefully reuse) the test codes to verify that the results are correct.
-
-As for the final step, we have a user-friendly framework to help you get started more quickly. This framework provides the annotation `@DockerCompose("docker-compose.yml")` to load/parse and start up all the containers in the proper order.
-`@ContainerHost`/`@ContainerPort` obtains the real host/port of the container. `@ContainerHostAndPort` obtains both. `@DockerContainer` obtains the running container.
-
-- Write test controller
-
-Put it simply, test controllers are tests that can be bound to the maven `integration-test/verify` phase.
-They send **design** requests to the instrumented services, and anticipate corresponding traces/metrics/metadata from the SkyWalking webapp GraphQL API.
-
-In the test framework, we provide a `TrafficController` that periodically sends traffic data to the instrumented services. You can simply enable it by providing a url and traffic data. Refer to [this](../../../test/e2e/e2e-test/src/test/java/org/apache/skywalking/e2e/base/TrafficController.java).
-
-- Troubleshooting
-
-We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at "Artifacts/Download artifacts/logs" (see top right) for debugging.
-
-**NOTE:** Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules)
-are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.
-
-#### E2E local remote debugging
-When the E2E test is executed locally, if any test case fails, the [E2E local remote debugging function](E2E-local-remote-debug.md) can be used to quickly troubleshoot the bug.
-
-### Project Extensions
-The SkyWalking project supports various extensions of existing features. If you are interesting in writing extensions,
-read the following guides.
-
-- [Java agent plugin development guide](Java-Plugin-Development-Guide.md).
-This guides you in developing SkyWalking agent plugins to support more frameworks. Developers for both open source and private plugins should read this. 
-- If you would like to build a new probe or plugin in any language, please read the [Component library definition and extension](Component-library-settings.md) document.
-- [Storage extension development guide](storage-extention.md). Potential contributors can learn how to build a new 
-storage implementor in addition to the official one.
-- Customize analysis using OAL scripts. OAL scripts are located in `config/oal/*.oal`. You could modify them and reboot the OAP server. Read 
-[Observability Analysis Language Introduction](../concepts-and-designs/oal.md) to learn more about OAL scripts.
-- [Source and scope extension for new metrics](source-extension.md). For analysis of a new metric which SkyWalking
-hasn't yet provided. Add a new receiver, rather than choosing an [existing receiver](../setup/backend/backend-receivers.md).
-You would most likely have to add a new source and scope. To learn how to do this, read the document.
-
-### UI developer
-Our UI consists of static pages and the web container.
-
-- [RocketBot UI](https://github.com/apache/skywalking-rocketbot-ui) is SkyWalking's primary UI since the 6.1 release.
-It is built with vue + typescript. Learn more at the rocketbot repository.
-- **Web container** source codes are in the `apm-webapp` module. This is a simple zuul proxy which hosts
-static resources and sends GraphQL query requests to the backend.
-- [Legacy UI repository](https://github.com/apache/skywalking-ui) is retained, but not included
-in SkyWalking releases since 6.0.0-GA.
-
-### OAP backend dependency management
-> This section is only applicable to dependencies of the backend module.
-
-As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the [ASF 3RD PARTY LICENSE POLICY](https://apache.org/legal/resolved.html). So if you're adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.
-
-We have a [simple script](../../../tools/dependencies/check-LICENSE.sh) to help you make sure that you haven't missed out any new dependencies:
-- Build a distribution package and unzip/untar it to folder `dist`.
-- Run the script in the root directory. It will print out all new dependencies.
-- Check the LICENSE and NOTICE of those dependencies to make sure that they can be included in an ASF project. Add them to the `apm-dist/release-docs/{LICENSE,NOTICE}` file.
-- Add the names of these dependencies to the `tools/dependencies/known-oap-backend-dependencies.txt` file (**in alphabetical order**). `check-LICENSE.sh` should pass in the next run.
-
-## Profile
-The performance profile is an enhancement feature in the APM system. We use thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the cost would be significantly reduced compared to using distributed tracing to locate the slow method. This feature is suitable in the production environment. The following documents are key to understanding the essential parts of this feature.
-- [Profile data report protocol](https://github.com/apache/skywalking-data-collect-protocol/tree/master/profile) is provided through gRPC, just like other traces and JVM data.
-- [Thread dump merging mechanism](backend-profile.md) introduces the merging mechanism. This mechanism helps end users understand profile reports.
-- [Exporter tool of profile raw data](backend-profile-export.md) guides you on how to package the original profile data for issue reports when the visualization doesn't work well on the official UI.
-
-## Release
-If you're a committer, read the [Apache Release Guide](How-to-release.md) to learn about how to create an official Apache version release in accordance with avoid Apache's rules. As long as you keep our LICENSE and NOTICE, the Apache license allows everyone to redistribute.
diff --git a/docs/en/guides/asf/committer.md b/docs/en/guides/asf/committer.md
deleted file mode 100644
index 66f6a2c..0000000
--- a/docs/en/guides/asf/committer.md
+++ /dev/null
@@ -1,153 +0,0 @@
-# Apache SkyWalking committer
-SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.
-
-Like many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.
-
-## Committer
-### Nominate new committer
-In SkyWalking, **new committer nomination** could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.
-
-The following steps are recommended (to be initiated only by an existing PMC member):
-1. Send an email titled `[DISCUSS] Promote xxx as new committer` to `private@skywalking.a.o`. List the important contributions of the candidate,
-so you could gather support from other PMC members for your proposal.
-1. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern.
-1. If the PMC generally agrees to the proposal, send an email titled `[VOTE] Promote xxx as new committer` to `private@skywalking.a.o`.
-1. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as `Consensus Approval` if there are three +1 votes and
-+1 votes > -1 votes.
-1. Send an email titled `[RESULT][VOTE] Promote xxx as new committer` to `private@skywalking.a.o`, and list the voting details, including who the voters are.
-
-### Invite new committer
-The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.
-
-The PMC member should send an email using the following template to the new committer:
-```
-To: JoeBloggs@foo.net
-Cc: private@skywalking.apache.org
-Subject: Invitation to become SkyWalking committer: Joe Bloggs
-
-Hello [invitee name],
-
-The SkyWalking Project Management Committee] (PMC) 
-hereby offers you committer privileges to the project. These privileges are
-offered on the understanding that you'll use them
-reasonably and with common sense. We like to work on trust
-rather than unnecessary constraints.
-
-Being a committer enables you to more easily make 
-changes without needing to go through the patch 
-submission process. 
-
-Being a committer does not require you to 
-participate any more than you already do. It does 
-tend to make one even more committed.  You will 
-probably find that you spend more time here.
-
-Of course, you can decline and instead remain as a 
-contributor, participating as you do now.
-
-A. This personal invitation is a chance for you to 
-accept or decline in private.  Either way, please 
-let us know in reply to the [private@skywalking.apache.org] 
-address only.
-
-B. If you accept, the next step is to register an iCLA:
-    1. Details of the iCLA and the forms are found 
-    through this link: http://www.apache.org/licenses/#clas
-
-    2. Instructions for its completion and return to 
-    the Secretary of the ASF are found at
-    http://www.apache.org/licenses/#submitting
-
-    3. When you transmit the completed iCLA, request 
-    to notify the Apache SkyWalking and choose a 
-    unique Apache id. Look to see if your preferred 
-    id is already taken at 
-    http://people.apache.org/committer-index.html     
-    This will allow the Secretary to notify the PMC 
-    when your iCLA has been recorded.
-
-When recording of your iCLA is noticed, you will 
-receive a follow-up message with the next steps for 
-establishing you as a committer.
-```
-
-### Invitation acceptance process
-The new committer should reply to `private@skywalking.apache.org` (choose `reply all`), and express his/her intention to accept the invitation.
-Then, this invitation will be treated as accepted by the project's PMC. Of course, the new committer may also choose to decline the invitation.
-
-Once the invitation has been accepted, the new committer has to take the following steps:
-1. Subscribe to `dev@skywalking.apache.org`. Usually this is already done.
-1. Choose a Apache ID that is not on the [apache committers list page](http://people.apache.org/committer-index.html).
-1. Download the [ICLA](https://www.apache.org/licenses/icla.pdf)  (If the new committer contributes to the project as a day job, [CCLA](http://www.apache.org/licenses/cla-corporate.pdf)  is expected).
-1. After filling in the `icla.pdf` (or `ccla.pdf`) with the correct information, print, sign it by hand,  scan it as an PDF, and send it as an attachment to [secretary@apache.org](mailto:secretary@apache.org). (If electronic signature is preferred, please follow the steps on [this page](http://www.apache.org/licenses/contributor-agreements.html#submitting))
-1. The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:
-
-```
-Dear XXX,
-
-This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records.
-
-Your account has been requested for you and you should receive email with next steps
-within the next few days (can take up to a week).
-
-Please refer to https://www.apache.org/foundation/how-it-works.html#developers
-for more information about roles at Apache.
-```
-
-In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P..
-The V.P. could request through the [Apache Account Submission Helper Form](https://whimsy.apache.org/officers/acreq).
-
-After several days, the new committer will receive an email confirming creation of the account, titled `Welcome to the Apache Software Foundation (ASF)!`.
-Congratulations! The new committer now has an official Apache ID.
-
-The PMC member should add the new committer to the official committer list through [roster](https://whimsy.apache.org/roster/committee/skywalking). 
-
-### Set up the Apache ID and dev env
-1. Go to [Apache Account Utility Platform](https://id.apache.org/), create your password, set up your personal mailbox (`Forwarding email address`) and GitHub account(`Your GitHub Username`). An organizational invite will be sent to you via email shortly thereafter (within 2 hours).
-1. If you would like to use the `xxx@apache.org` email service, please refer to [here](https://infra.apache.org/committer-email.html). Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings.
-1. Follow the [authorized GitHub 2FA wiki](https://help.github.com/articles/configuring-two-factor-authentication-via-a-totp-mobile-app/) to enable two-factor authorization (2FA) on [Github](http://github.com/). When you set 2FA to "off", it will be delisted by the corresponding Apache committer write permission group until you set it up again. (**NOTE: Treat your recovery codes with the same level of attention as you would your password!**)
-1. Use [GitBox Account Linking Utility](https://gitbox.apache.org/setup/) to obtain write permission of the SkyWalking project.
-1. Follow this [doc](https://github.com/apache/skywalking-website#how-to-add-a-new-committer) to update the website.
-
-If you would like to show up publicly in the Apache GitHub org, you need to go to the [Apache GitHub org people page](https://github.com/orgs/apache/people), 
-search for yourself, and choose `Organization visibility` to `Public`.
-
-### Committer rights, duties, and responsibilities
-The SkyWalking project doesn't require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!
-
-As a committer, you could
-1. Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits **must be squashed and merged** into a single commit **with explanatory comments**. It is recommended for new committers to request recheck of the pull request from senior committers.
-1. Create and push codes to the new branch in the Apache repo.
-1. Follow the [release process](../How-to-release.md) to prepare a new release. Remember to confirm with the committer team
-that it is the right time to create the release.
-
-The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as `+1 no binding`.
-Being familiar with the release process is key to being promoted to the role of PMC member.
-
-## Project Management Committee
-The Project Management Committee (PMC) member does not have any special rights in code contributions. 
-They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:
-1. Binding voting for releases and license checks;
-1. New committer and PMC member recognition;
-1. Identification of branding issues and brand protection; and
-1. Responding to questions raised by the ASF board, and taking necessary actions.
-
-The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.
-
-In most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.
-
-The new PMC voting process should also follow the `[DISCUSS]`, `[VOTE]` and `[RESULT][VOTE]` procedures using a private mail list, just like the [voting process for new committers](#new-committer-nomination).
-Before sending the invitation, the PMC [must also send a NOTICE mail to the Apache board](http://www.apache.org/dev/pmc.html#newpmc).
-```
-To: board@apache.org
-Cc: private@skywalking.apache.org
-Subject: [NOTICE] Jane Doe for SkyWalking PMC
-
-SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC.
-
-(include if a vote was held) The vote result is available here: https://lists.apache.org/...
-```
-
-After 72 hours, if the board doesn't object to the nomination (which it won't most cases), an invitation may then be sent to the candidate.
-
-Once the invitation is accepted, a PMC member should add the new member to the official PMC list through [roster](https://whimsy.apache.org/roster/committee/skywalking).
diff --git a/docs/en/guides/backend-oal-scripts.md b/docs/en/guides/backend-oal-scripts.md
deleted file mode 100644
index 5698422..0000000
--- a/docs/en/guides/backend-oal-scripts.md
+++ /dev/null
@@ -1,7 +0,0 @@
-# Official OAL script
-First, read the [OAL introduction](../concepts-and-designs/oal.md).
-
-From 8.0.0, you may find the OAL script at `/config/oal/*.oal` of the SkyWalking dist.
-You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.
-
-All metrics named in this script may be used in alarm and UI query.
diff --git a/docs/en/guides/backend-profile-export.md b/docs/en/guides/backend-profile-export.md
deleted file mode 100644
index cf10644..0000000
--- a/docs/en/guides/backend-profile-export.md
+++ /dev/null
@@ -1,24 +0,0 @@
-# Exporter tool for profile raw data
-When visualization doesn't work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn't be compromised.
-
-## Export using command line
-1. Set the storage in the `tools/profile-exporter/application.yml` file based on your use case.
-1. Prepare the data
-    - Profile task ID: Profile task ID
-    - Trace ID: Trace ID of the profile error
-    - Export dir: Directory exported by the data
-1. Enter the Skywalking root path
-1. Execute shell command
-    ```bash
-   bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}
-   ```
-1. The file `{traceId}.tar.gz` will be generated after executing shell.
-
-## Exported data content
-1. `basic.yml`: Contains the complete information of the profiled segments in the trace.
-1. `snapshot.data`: All monitored thread snapshot data in the current segment. 
-
-## Report profile issues
-1. Provide exported data generated from this tool.
-1. Provide the operation name and the mode of analysis (including/excluding child span) for the span.
-1. Issue description. (It would be great if you could provide UI screenshots.)
diff --git a/docs/en/guides/backend-profile.md b/docs/en/guides/backend-profile.md
deleted file mode 100644
index e582779..0000000
--- a/docs/en/guides/backend-profile.md
+++ /dev/null
@@ -1,52 +0,0 @@
-# Thread dump merging mechanism
-The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).
-
-## Thread analyst
-### Read data and transform
-Read the data from the database and convert it to a data structure in gRPC.
-```
-st=>start: Start
-e=>end: End
-op1=>operation: Load data using paging
-op2=>operation: Transform data using parallel
-
-st(right)->op1(right)->op2
-op2(right)->e
-```
-Copy the code and paste it into this [link](http://flowchart.js.org/) to generate flow chart.
-1. Use the stream to read data by page (50 records per page).
-2. Convert the data into gRPC data structures in the form of parallel streams.
-3. Merge into a list of data.
-### Data analysis
-Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records,
-and use the collector to perform data aggregation. Generate a multi-root tree.
-```
-st=>start: Start
-e=>end: End
-op1=>operation: Group by first stack element
-sup=>operation: Generate empty stack tree
-acc=>operation: Accumulator data to stack tree
-com=>operation: Combine stack trees
-fin=>operation: Calculate durations and build result
-
-st(right)->op1->sup(right)->acc
-acc(right)->com(right)->fin->e
-```
-Copy the code and paste it into this [link](http://flowchart.js.org/) to generate a flow chart.
-- **Group by first stack element**: Use the first level element in each stack to group, ensuring that the stacks have the same root node.
-- **Generate empty stack tree**: Generate multiple top-level empty trees to prepare for the following steps.
-The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks.
-- **Accumulator data to stack tree**: Add every thread dump into the generated trees.
-    1. Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. 
-    If not, add this element.
-    2. Keep the dump sequences and timestamps in each nodes from the source.
-- **Combine stack trees**: Combine all trees structures into one by using the same rules as the `Accumulator`.
-    1. Use LDR to traverse the tree node. Use the `Stack` data structure to avoid recursive calls. Each stack element represents the node that needs to be merged.
-    2. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.
-- **Calculate durations and build result**: Calculate relevant statistics and generate response.
-    1. Use the same traversal node logic as in the `Combine stack trees` step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations.
-    2. Calculate each node's duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq's timestamp.
-    3. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.
-
-## Profile data debuggiing
-Please follow the [exporter tool](backend-profile-export.md#export-command-line-usage) to package profile data. Unzip the profile data and use [analyzer main function](../../../oap-server/server-tools/profile-exporter/tool-profile-snapshot-bootstrap/src/test/java/org/apache/skywalking/oap/server/tool/profile/exporter/ProfileExportedAnalyze.java) to run it.
diff --git a/docs/en/guides/source-extension.md b/docs/en/guides/source-extension.md
deleted file mode 100644
index 5516f83..0000000
--- a/docs/en/guides/source-extension.md
+++ /dev/null
@@ -1,62 +0,0 @@
-# Source and scope extension for new metrics
-From the [OAL scope introduction](../concepts-and-designs/oal.md#scope), you should already have understood what a scope is.
-If you would like to create more extensions, you need to have a deeper understanding of what a **source** is. 
-
-**Source** and **scope** are interrelated concepts. **Scope** declares the ID (int) and name, while **source** declares the attributes.
-Follow these steps to create a new source and sccope.
-
-1. In the OAP core module, it provides **SourceReceiver** internal services.
-```java
-public interface SourceReceiver extends Service {
-    void receive(Source source);
-}
-```
-
-2. All data of the analysis must be a **org.apache.skywalking.oap.server.core.source.Source** sub class that is
-tagged by `@SourceType` annotation, and included in the `org.apache.skywalking` package. Then, it can be supported by the OAL script and OAP core.
-
-Take the existing source **service** as an example.
-```java
-@ScopeDeclaration(id = SERVICE_INSTANCE, name = "ServiceInstance", catalog = SERVICE_INSTANCE_CATALOG_NAME)
-@ScopeDefaultColumn.VirtualColumnDefinition(fieldName = "entityId", columnName = "entity_id", isID = true, type = String.class)
-public class ServiceInstance extends Source {
-    @Override public int scope() {
-        return DefaultScopeDefine.SERVICE_INSTANCE;
-    }
-
-    @Override public String getEntityId() {
-        return String.valueOf(id);
-    }
-
-    @Getter @Setter private int id;
-    @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = "service_id") private int serviceId;
-    @Getter @Setter private String name;
-    @Getter @Setter private String serviceName;
-    @Getter @Setter private String endpointName;
-    @Getter @Setter private int latency;
-    @Getter @Setter private boolean status;
-    @Getter @Setter private int responseCode;
-    @Getter @Setter private RequestType type;
-}
-```
-
-3. The `scope()` method in source returns an ID, which is not a random value. This ID must be declared through the `@ScopeDeclaration` annotation too. The ID in `@ScopeDeclaration` and ID in `scope()` method must be the same for this source.
-
-4. The `String getEntityId()` method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the `Order` service.
-This value is used in the [OAL group mechanism](../concepts-and-designs/oal.md#group).
-
-5. `@ScopeDefaultColumn.VirtualColumnDefinition` and `@ScopeDefaultColumn.DefinedByField` are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column.
-For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes.
-All these fields are detected by OAL Runtime, and are required during query.
-
-6. Add scope name as keyword to OAL grammar definition file, `OALLexer.g4`, which is at the `antlr4` folder of the `generate-tool-grammar` module.
-
-7. Add scope name as keyword to the parser definition file, `OALParser.g4`, which is located in the same folder as `OALLexer.g4`.
-
-
-___
-After finishing these steps, you could build a receiver, which do
-1. Obtain the original data of the metrics.
-1. Build the source, and send to `SourceReceiver`.
-1. Complete your OAL scripts.
-1. Repackage the project.
diff --git a/docs/en/guides/storage-extention.md b/docs/en/guides/storage-extention.md
deleted file mode 100644
index 17c7c65..0000000
--- a/docs/en/guides/storage-extention.md
+++ /dev/null
@@ -1,50 +0,0 @@
-# Extend storage
-SkyWalking has already provided several storage solutions. In this document, you could 
-learn how to easily implement a new storage. 
-
-## Define your storage provider
-1. Define class extension `org.apache.skywalking.oap.server.library.module.ModuleProvider`.
-2. Set this provider targeting to storage module.
-```java
-@Override 
-public Class<? extends ModuleDefine> module() {
-    return StorageModule.class;
-}
-```
-
-## Implement all DAOs
-Here's a list of all DAO interfaces in storage:
-1. IServiceInventoryCacheDAO
-1. IServiceInstanceInventoryCacheDAO
-1. IEndpointInventoryCacheDAO
-1. INetworkAddressInventoryCacheDAO
-1. IBatchDAO
-1. StorageDAO
-1. IRegisterLockDAO
-1. ITopologyQueryDAO
-1. IMetricsQueryDAO
-1. ITraceQueryDAO
-1. IMetadataQueryDAO
-1. IAggregationQueryDAO
-1. IAlarmQueryDAO
-1. IHistoryDeleteDAO
-1. IMetricsDAO
-1. IRecordDAO
-1. IRegisterDAO
-1. ILogQueryDAO
-1. ITopNRecordsQueryDAO
-1. IBrowserLogQueryDAO
-
-1. IProfileTaskQueryDAO
-1. IProfileTaskLogQueryDAO
-1. IProfileThreadSnapshotQueryDAO
-1. UITemplateManagementDAO
-## Register all service implementations
-In `public void prepare()`, use `this#registerServiceImplementation` method to register and bind with your implementation of the above interfaces.
-
-## Example
-`org.apache.skywalking.oap.server.storage.plugin.elasticsearch.StorageModuleElasticsearchProvider` and `org.apache.skywalking.oap.server.storage.plugin.jdbc.mysql.MySQLStorageProvider`  are good examples.
-
-## Redistribution with new storage implementation
-To implement the storage, you don't have to clone the main repo. Simply use our Apache releases.
-Take a look at [SkyAPM/SkyWalking-With-Es5x-Storage](https://github.com/SkyAPM/SkyWalking-With-Es5x-Storage) repo, SkyWalking v6 redistribution with ElasticSearch 5 TCP connection storage implementation.
diff --git a/docs/en/protocols/Browser-HTTP-API-Protocol.md b/docs/en/protocols/Browser-HTTP-API-Protocol.md
deleted file mode 100644
index 72265e0..0000000
--- a/docs/en/protocols/Browser-HTTP-API-Protocol.md
+++ /dev/null
@@ -1,108 +0,0 @@
-# HTTP API Protocol
-
-HTTP API Protocol defines the API data format, including API request and response data format.
-They use the HTTP1.1 wrapper of the official [SkyWalking Browser Protocol](Browser-Protocol.md). Read it for more details.
-
-## Performance Data Report
-
-Detailed information about data format can be found in [BrowserPerf.proto](https://github.com/apache/skywalking-data-collect-protocol/blob/master/browser/BrowserPerf.proto).
-
-### POST http://localhost:12800/browser/perfData
-
-Send a performance data object in JSON format.
-
-Input:
-
-```json
-{
-  "service": "web",
-  "serviceVersion": "v0.0.1",
-  "pagePath": "/index.html",
-  "redirectTime": 10,
-  "dnsTime": 10,
-  "ttfbTime": 10,
-  "tcpTime": 10,
-  "transTime": 10,
-  "domAnalysisTime": 10,
-  "fptTime": 10,
-  "domReadyTime": 10,
-  "loadPageTime": 10,
-  "resTime": 10,
-  "sslTime": 10,
-  "ttlTime": 10,
-  "firstPackTime": 10,
-  "fmpTime": 10
-}
-```
-
-OutPut:
-
-Http Status: 204
-
-## Error Log Report
-
-Detailed information about data format can be found in [BrowserPerf.proto](https://github.com/apache/skywalking-data-collect-protocol/blob/master/browser/BrowserPerf.proto).
-
-### POST http://localhost:12800/browser/errorLogs
-
-Send an error log object list in JSON format.
-
-Input:
-
-```json
-[
-    {
-        "uniqueId": "55ec6178-3fb7-43ef-899c-a26944407b01",
-        "service": "web",
-        "serviceVersion": "v0.0.1",
-        "pagePath": "/index.html",
-        "category": "ajax",
-        "message": "error",
-        "line": 1,
-        "col": 1,
-        "stack": "error",
-        "errorUrl": "/index.html"
-    },
-    {
-        "uniqueId": "55ec6178-3fb7-43ef-899c-a26944407b02",
-        "service": "web",
-        "serviceVersion": "v0.0.1",
-        "pagePath": "/index.html",
-        "category": "ajax",
-        "message": "error",
-        "line": 1,
-        "col": 1,
-        "stack": "error",
-        "errorUrl": "/index.html"
-    }
-]
-```
-
-OutPut:
-
-Http Status: 204
-
-### POST http://localhost:12800/browser/errorLog
-
-Send a single error log object in JSON format.
-
-Input:
-
-```json
-{
-  "uniqueId": "55ec6178-3fb7-43ef-899c-a26944407b01",
-  "service": "web",
-  "serviceVersion": "v0.0.1",
-  "pagePath": "/index.html",
-  "category": "ajax",    
-  "message": "error",
-  "line": 1,
-  "col": 1,
-  "stack": "error",
-  "errorUrl": "/index.html"
-}
-```
-
-OutPut:
-
-Http Status: 204
diff --git a/docs/en/protocols/Browser-Protocol.md b/docs/en/protocols/Browser-Protocol.md
deleted file mode 100644
index 44607fb..0000000
--- a/docs/en/protocols/Browser-Protocol.md
+++ /dev/null
@@ -1,19 +0,0 @@
-# Browser Protocol
-
-Browser protocol describes the data format between [skywalking-client-js](https://github.com/apache/skywalking-client-js) and the backend.
-
-## Overview
-
-Browser protocol is defined and provided in [gRPC format](https://github.com/apache/skywalking-data-collect-protocol/blob/master/browser/BrowserPerf.proto),
-and also implemented in [HTTP 1.1](Browser-HTTP-API-Protocol.md)
-
-### Send performance data and error logs
-
-You can send performance data and error logs using the following services:
-
-1. `BrowserPerfService#collectPerfData` for performance data format.
-1. `BrowserPerfService#collectErrorLogs` for error log format.
-
-For error log format, note that:
-
-1. `BrowserErrorLog#uniqueId` should be unique in all distributed environments.
diff --git a/docs/en/protocols/HTTP-API-Protocol.md b/docs/en/protocols/HTTP-API-Protocol.md
deleted file mode 100644
index 1ade7c5..0000000
--- a/docs/en/protocols/HTTP-API-Protocol.md
+++ /dev/null
@@ -1,186 +0,0 @@
-# HTTP API Protocol
-
-HTTP API Protocol defines the API data format, including API request and response data format.
-They use the HTTP1.1 wrapper of the official [SkyWalking Trace Data Protocol v3](Trace-Data-Protocol-v3.md). Read it for more details.
-
-## Instance Management
-
-Detailed information about data format can be found in [Instance Management](https://github.com/apache/skywalking-data-collect-protocol/blob/master/management/Management.proto).
-
-- Report service instance properties
-
-> POST http://localhost:12800/v3/management/reportProperties
-
-Input:
-
-```json
-{
-	"service": "User Service Name",
-	"serviceInstance": "User Service Instance Name",
-	"properties": [{
-		"language": "Lua"
-	}]
-}
-```
-
-Output JSON Array:
-
-```json
-{}
-```
-
-- Service instance ping
-
-> POST http://localhost:12800/v3/management/keepAlive
-
-Input:
-
-```json
-{
-	"service": "User Service Name",
-	"serviceInstance": "User Service Instance Name"
-}
-```
-
-OutPut:
-
-```json
-{}
-```
-
-## Trace Report
-
-Detailed information about data format can be found in [Instance Management](https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/Tracing.proto).
-There are two ways to report segment data: one segment per request or segment array in bulk mode.
-
-### POST http://localhost:12800/v3/segment
-
-Send a single segment object in JSON format.
-
-Input:
-
-```json
-{
-	"traceId": "a12ff60b-5807-463b-a1f8-fb1c8608219e",
-	"serviceInstance": "User_Service_Instance_Name",
-	"spans": [{
-		"operationName": "/ingress",
-		"startTime": 1588664577013,
-		"endTime": 1588664577028,
-		"spanType": "Exit",
-		"spanId": 1,
-		"isError": false,
-		"parentSpanId": 0,
-		"componentId": 6000,
-		"peer": "upstream service",
-		"spanLayer": "Http"
-	}, {
-		"operationName": "/ingress",
-		"startTime": 1588664577013,
-		"tags": [{
-			"key": "http.method",
-			"value": "GET"
-		}, {
-			"key": "http.params",
-			"value": "http://localhost/ingress"
-		}],
-		"endTime": 1588664577028,
-		"spanType": "Entry",
-		"spanId": 0,
-		"parentSpanId": -1,
-		"isError": false,
-		"spanLayer": "Http",
-		"componentId": 6000
-	}],
-	"service": "User_Service_Name",
-	"traceSegmentId": "a12ff60b-5807-463b-a1f8-fb1c8608219e"
-}
-```
- OutPut:
- 
- ```json
-
-```
-
-### POST http://localhost:12800/v3/segments
-
-Send a segment object list in JSON format.
-
-Input:
-
-```json
-[{
-	"traceId": "a12ff60b-5807-463b-a1f8-fb1c8608219e",
-	"serviceInstance": "User_Service_Instance_Name",
-	"spans": [{
-		"operationName": "/ingress",
-		"startTime": 1588664577013,
-		"endTime": 1588664577028,
-		"spanType": "Exit",
-		"spanId": 1,
-		"isError": false,
-		"parentSpanId": 0,
-		"componentId": 6000,
-		"peer": "upstream service",
-		"spanLayer": "Http"
-	}, {
-		"operationName": "/ingress",
-		"startTime": 1588664577013,
-		"tags": [{
-			"key": "http.method",
-			"value": "GET"
-		}, {
-			"key": "http.params",
-			"value": "http://localhost/ingress"
-		}],
-		"endTime": 1588664577028,
-		"spanType": "Entry",
-		"spanId": 0,
-		"parentSpanId": -1,
-		"isError": false,
-		"spanLayer": "Http",
-		"componentId": 6000
-	}],
-	"service": "User_Service_Name",
-	"traceSegmentId": "a12ff60b-5807-463b-a1f8-fb1c8608219e"
-}, {
-	"traceId": "f956699e-5106-4ea3-95e5-da748c55bac1",
-	"serviceInstance": "User_Service_Instance_Name",
-	"spans": [{
-		"operationName": "/ingress",
-		"startTime": 1588664577250,
-		"endTime": 1588664577250,
-		"spanType": "Exit",
-		"spanId": 1,
-		"isError": false,
-		"parentSpanId": 0,
-		"componentId": 6000,
-		"peer": "upstream service",
-		"spanLayer": "Http"
-	}, {
-		"operationName": "/ingress",
-		"startTime": 1588664577250,
-		"tags": [{
-			"key": "http.method",
-			"value": "GET"
-		}, {
-			"key": "http.params",
-			"value": "http://localhost/ingress"
-		}],
-		"endTime": 1588664577250,
-		"spanType": "Entry",
-		"spanId": 0,
-		"parentSpanId": -1,
-		"isError": false,
-		"spanLayer": "Http",
-		"componentId": 6000
-	}],
-	"service": "User_Service_Name",
-	"traceSegmentId": "f956699e-5106-4ea3-95e5-da748c55bac1"
-}]
-```
- OutPut:
- 
- ```json
-
-```
diff --git a/docs/en/protocols/JVM-Protocol.md b/docs/en/protocols/JVM-Protocol.md
deleted file mode 100644
index 36214d4..0000000
--- a/docs/en/protocols/JVM-Protocol.md
+++ /dev/null
@@ -1,5 +0,0 @@
-# JVM Metrics Service
-## Abstract
-Uplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.
-
-[gRPC service define](https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/JVMMetric.proto)
diff --git a/docs/en/protocols/Log-Data-Protocol.md b/docs/en/protocols/Log-Data-Protocol.md
deleted file mode 100644
index 10ca593..0000000
--- a/docs/en/protocols/Log-Data-Protocol.md
+++ /dev/null
@@ -1,83 +0,0 @@
-# Log Data Protocol
-
-Report log data via protocol.
-
-## Native Proto Protocol
-
-Report `native-proto` format log via gRPC.
-
-[gRPC service define](https://github.com/apache/skywalking-data-collect-protocol/blob/master/logging/Logging.proto)
-
-## Native Kafka Protocol
-
-Report `native-json` format log via kafka.
-
-Json log record example:
-```json
-{
-    "timestamp":1618161813371,
-    "service":"Your_ApplicationName",
-    "serviceInstance":"3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8",
-    "traceContext":{
-        "traceId":"ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001",
-        "spanId":"0",
-        "traceSegmentId":"ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000"
-    },
-    "tags":{
-        "data":[
-            {
-                "key":"level",
-                "value":"INFO"
-            },
-            {
-                "key":"logger",
-                "value":"com.example.MyLogger"
-            }
-        ]
-    },
-    "body":{
-        "text":{
-            "text":"log message"
-        }
-    }
-}
-```
-
-## HTTP API
-
-Report `json` format logs via HTTP API, the endpoint is `http://<oap-address>:12800/v3/logs`.
-
-Json log record example:
-
-```json
-[
-  {
-    "timestamp": 1618161813371,
-    "service": "Your_ApplicationName",
-    "serviceInstance": "3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8",
-    "traceContext": {
-      "traceId": "ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001",
-      "spanId": "0",
-      "traceSegmentId": "ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000"
-    },
-    "tags": {
-      "data": [
-        {
-          "key": "level",
-          "value": "INFO"
-        },
-        {
-          "key": "logger",
-          "value": "com.example.MyLogger"
-        }
-      ]
-    },
-    "body": {
-      "text": {
-        "text": "log message"
-      }
-    }
-  }
-]
-```
-
diff --git a/docs/en/protocols/README.md b/docs/en/protocols/README.md
deleted file mode 100644
index 49d9757..0000000
--- a/docs/en/protocols/README.md
+++ /dev/null
@@ -1,80 +0,0 @@
-# Protocols
-There are two different types of protocols.
-
-- [**Probe Protocol**](#probe-protocols). It includes descriptions and definitions on how agents send collected metrics data and traces, as well as the format of each entity.
-
-- [**Query Protocol**](#query-protocol). The backend enables the query function in SkyWalking's own UI and other UIs. These queries are based on GraphQL.
-
-
-## Probe Protocols
-They also related to the probe group. For more information, see [Concepts and Designs](../concepts-and-designs/overview.md).
-These groups are **language-based native agent protocol**, **service mesh protocol** and **3rd-party instrument protocol**.
-
-### Language-based native agent protocol
-There are two types of protocols that help language agents work in distributed environments.
-1. **Cross Process Propagation Headers Protocol** and **Cross Process Correlation Headers Protocol** come in in-wire data format. Agent/SDK usually uses HTTP/MQ/HTTP2 headers
-to carry the data with the RPC request. The remote agent will receive this in the request handler, and bind the context with this specific request.
-1. **Trace Data Protocol** is in out-of-wire data format. Agent/SDK uses this to send traces and metrics to SkyWalking or other compatible backends. 
-
-[Cross Process Propagation Headers Protocol v3](Skywalking-Cross-Process-Propagation-Headers-Protocol-v3.md) has been the new protocol for in-wire context propagation since the version 8.0.0 release.
-
-[Cross Process Correlation Headers Protocol v1](Skywalking-Cross-Process-Correlation-Headers-Protocol-v1.md) is a new in-wire context propagation protocol which is additional and optional. 
-Please read SkyWalking language agents documentation to see whether it is supported. 
-This protocol defines the data format of transporting custom data with `Cross Process Propagation Headers Protocol`.
-It has been supported by the SkyWalking javaagent since 8.0.0, 
-
-[SkyWalking Trace Data Protocol v3](Trace-Data-Protocol-v3.md) defines the communication method and format between the agent and backend.
-
-[SkyWalking Log Data Protocol](Log-Data-Protocol.md) defines the communication method and format between the agent and backend.
-
-### Browser probe protocol
-
-The browser probe, such as  [skywalking-client-js](https://github.com/apache/skywalking-client-js), could use this protocol to send data to the backend. This service is provided by gRPC.
-
-[SkyWalking Browser Protocol](Browser-Protocol.md) defines the communication method and format between `skywalking-client-js` and backend.
-
-### Service Mesh probe protocol
-The probe in sidecar or proxy could use this protocol to send data to the backend. This service provided by gRPC requires 
-the following key information:
-
-1. Service Name or ID on both sides.
-1. Service Instance Name or ID on both sides.
-1. Endpoint. URI in HTTP, service method full signature in gRPC.
-1. Latency. In milliseconds.
-1. Response code in HTTP
-1. Status. Success or fail.
-1. Protocol. HTTP, gRPC
-1. DetectPoint. In Service Mesh sidecar, `client` or `server`. In normal L7 proxy, value is `proxy`.
-
-### Events Report Protocol
-
-The protocol is used to report events to the backend. The [doc](../concepts-and-designs/event.md) introduces the definition of an event, and [the protocol repository](https://github.com/apache/skywalking-data-collect-protocol/blob/master/event) defines gRPC services and message formats of events.
-
-Report `JSON` format events via HTTP API, the endpoint is `http://<oap-address>:12800/v3/events`.
-JSON event record example:
-```json
-[
-    {
-        "uuid": "f498b3c0-8bca-438d-a5b0-3701826ae21c",
-        "source": {
-            "service": "SERVICE-A",
-            "instance": "INSTANCE-1"
-        },
-        "name": "Reboot",
-        "type": "Normal",
-        "message": "App reboot.",
-        "parameters": {},
-        "startTime": 1628044330000,
-        "endTime": 1628044331000
-    }
-]
-```
-
-### 3rd-party instrument protocol
-3rd-party instrument protocols are not defined by SkyWalking. They are just protocols/formats with which SkyWalking is compatible, and SkyWalking could receive them from their existing libraries. SkyWalking starts with supporting Zipkin v1, v2 data formats.
-
-The backend has a modular design, so it is very easy to extend a new receiver to support a new protocol/format.
-
-## Query Protocol
-The query protocol follows GraphQL grammar, and provides data query capabilities, which depends on your analysis metrics.
-Read [query protocol doc](query-protocol.md) for more details.
diff --git a/docs/en/protocols/Skywalking-Cross-Process-Correlation-Headers-Protocol-v1.md b/docs/en/protocols/Skywalking-Cross-Process-Correlation-Headers-Protocol-v1.md
deleted file mode 100644
index 5bdf1de..0000000
--- a/docs/en/protocols/Skywalking-Cross-Process-Correlation-Headers-Protocol-v1.md
+++ /dev/null
@@ -1,16 +0,0 @@
-# SkyWalking Cross Process Correlation Headers Protocol
-* Version 1.0
-
-The Cross Process Correlation Headers Protocol is used to transport custom data by leveraging the capability of [Cross Process Propagation Headers Protocol](Skywalking-Cross-Process-Propagation-Headers-Protocol-v3.md). 
-
-This is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this.
-Cross Process Correlation Header key is `sw8-correlation`. The value is the `encoded(key):encoded(value)` list with elements splitted by `,` such as `base64(string key):base64(string value),base64(string key2):base64(string value2)`.
-
-## Recommendations for language APIs
-The following implementation method is recommended for different language APIs.
-
-1. `TraceContext#putCorrelation` and `TraceContext#getCorrelation` are recommended to write and read the correlation context, with key/value string.
-1. The key should be added if it is absent.
-1. The latter writes should override the previous value.
-1. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes.
-1. The context should be propagated as well when tracing context is propagated across threads and processes.
diff --git a/docs/en/protocols/Skywalking-Cross-Process-Propagation-Headers-Protocol-v3.md b/docs/en/protocols/Skywalking-Cross-Process-Propagation-Headers-Protocol-v3.md
deleted file mode 100644
index 2044a41..0000000
--- a/docs/en/protocols/Skywalking-Cross-Process-Propagation-Headers-Protocol-v3.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# SkyWalking Cross Process Propagation Headers Protocol
-* Version 3.0
-
-SkyWalking is more akin to an APM system, rather than a common distributed tracing system. 
-SkyWalking's headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. 
-You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).
-
-## Abstract
-The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.
-
-### Standard Header Item
-The standard header is the minimal requirement for context propagation.
-* Header Name: `sw8`.
-* Header Value: 8 fields split by `-`. The length of header value must be less than 2k (default).
-
-Example of the value format: `XXXXX-XXXXX-XXXX-XXXX`
-
-#### Values
-Values must include the following segments, and all string type values are in BASE64 encoding.
-
-- Required:
-1. Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. 
-1. Trace ID. **String(BASE64 encoded)**. A literal string that is globally unique.
-1. Parent trace segment ID. **String(BASE64 encoded)**. A literal string that is globally unique.
-1. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment.
-1. Parent service.  **String(BASE64 encoded)**. Its length should be no more than 50 UTF-8 characters.
-1. Parent service instance.  **String(BASE64 encoded)**.  Its length should be no more than 50 UTF-8 characters.
-1. Parent endpoint. **String(BASE64 encoded)**. The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters.
-1. Target address of this request used on the client end. **String(BASE64 encoded)**. The network address (not necessarily IP + port) used on the client end to access this target service.
-
-- Sample values:
-`1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT`
-
-### Extension Header Item
-The extension header item is designed for advanced features. It provides interaction capabilities between the agents
-deployed in upstream and downstream services.
-* Header Name: `sw8-x`
-* Header Value: Split by `-`. The fields are extendable.
-
-#### Values
-The current value includes fields.
-1. Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis,
-`spanObject#skipAnalysis=true`. This context is propagated to upstream by default, unless it is changed in the 
-tracing process.
-2. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key `transmission.latency` automatically.
-
diff --git a/docs/en/protocols/Trace-Data-Protocol-v3.md b/docs/en/protocols/Trace-Data-Protocol-v3.md
deleted file mode 100644
index 3fe886f..0000000
--- a/docs/en/protocols/Trace-Data-Protocol-v3.md
+++ /dev/null
@@ -1,44 +0,0 @@
-# Trace Data Protocol v3
-Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend. 
-
-## Overview
-Trace data protocol is defined and provided in [gRPC format](https://github.com/apache/skywalking-data-collect-protocol),
-also implemented in [HTTP 1.1](HTTP-API-Protocol.md)
-
-### Report service instance status
-1. Service Instance Properties 
-Service instance contains more information than just a name. Once the agent wants to report this, use `ManagementService#reportInstanceProperties` service
-to provide a string-key/string-value pair list as the parameter. `language` of target instance is expected at least.
-
-2. Service Ping
-Service instance should keep alive with the backend. The agent should set a scheduler using `ManagementService#keepAlive` service every minute.
-
-### Send trace and metrics
-After you have the service ID and service instance ID ready, you could send traces and metrics. Now we
-have 
-1. `TraceSegmentReportService#collect` for the SkyWalking native trace format
-1. `JVMMetricReportService#collect` for the SkyWalking native jvm format
-
-For trace format, note that:
-1. The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread.
-2. There are three types of spans.
-
-* EntrySpan
-EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the 
-application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.
-
-* LocalSpan
-LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer
-nor a provider/consumer of a service (e.g. HTTP service).
-
-* ExitSpan
-ExitSpan represents a client of service or MQ-producer. It is known as the `LeafSpan` in the early stages of SkyWalking.
-For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans. 
-
-3. Cross-thread/process span parent information is called "reference". Reference carries the trace ID, 
-segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) 
-of this request in the parent. 
-See [Cross Process Propagation Headers Protocol v3](Skywalking-Cross-Process-Propagation-Headers-Protocol-v3.md) for more details.
-
-4. `Span#skipAnalysis` may be TRUE, if this span doesn't require backend analysis.
-
diff --git a/docs/en/protocols/query-protocol.md b/docs/en/protocols/query-protocol.md
deleted file mode 100644
index 558ce4d..0000000
--- a/docs/en/protocols/query-protocol.md
+++ /dev/null
@@ -1,173 +0,0 @@
-# Query Protocol
-Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking
-native visualization tool or 3rd party system, including Web UI, CLI or private system.
-
-Query protocol official repository, https://github.com/apache/skywalking-query-protocol.
-
-### Metadata  
-Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring.
-You may query the metadata in different ways.
-```graphql
-extend type Query {
-    getGlobalBrief(duration: Duration!): ClusterBrief
-
-    # Normal service related metainfo 
-    getAllServices(duration: Duration!): [Service!]!
-    searchServices(duration: Duration!, keyword: String!): [Service!]!
-    searchService(serviceCode: String!): Service
-    
-    # Fetch all services of Browser type
-    getAllBrowserServices(duration: Duration!): [Service!]!
-
-    # Service intance query
-    getServiceInstances(duration: Duration!, serviceId: ID!): [ServiceInstance!]!
-
-    # Endpoint query
-    # Consider there are huge numbers of endpoint,
-    # must use endpoint owner's service id, keyword and limit filter to do query.
-    searchEndpoint(keyword: String!, serviceId: ID!, limit: Int!): [Endpoint!]!
-    getEndpointInfo(endpointId: ID!): EndpointInfo
-
-    # Database related meta info.
-    getAllDatabases(duration: Duration!): [Database!]!
-    getTimeInfo: TimeInfo
-}
-```
-
-### Topology
-The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.
-
-```graphql
-extend type Query {
-    # Query the global topology
-    getGlobalTopology(duration: Duration!): Topology
-    # Query the topology, based on the given service
-    getServiceTopology(serviceId: ID!, duration: Duration!): Topology
-    # Query the topology, based on the given services.
-    # `#getServiceTopology` could be replaced by this.
-    getServicesTopology(serviceIds: [ID!]!, duration: Duration!): Topology
-    # Query the instance topology, based on the given clientServiceId and serverServiceId
-    getServiceInstanceTopology(clientServiceId: ID!, serverServiceId: ID!, duration: Duration!): ServiceInstanceTopology
-    # Query the topology, based on the given endpoint
-    getEndpointTopology(endpointId: ID!, duration: Duration!): Topology
-    # v2 of getEndpointTopology
-    getEndpointDependencies(endpointId: ID!, duration: Duration!): EndpointTopology
-}
-```
-
-### Metrics
-Metrics query targets all objects defined in [OAL script](../concepts-and-designs/oal.md) and [MAL](../concepts-and-designs/mal.md). 
-You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script. 
-
-#### V2 APIs
-Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.
-```graphql
-extend type Query {
-    # Metrics definition metadata query. Response the metrics type which determines the suitable query methods.
-    typeOfMetrics(name: String!): MetricsType!
-    # Get the list of all available metrics in the current OAP server.
-    # Param, regex, could be used to filter the metrics by name.
-    listMetrics(regex: String): [MetricDefinition!]!
-
-    # Read metrics single value in the duration of required metrics
-    readMetricsValue(condition: MetricsCondition!, duration: Duration!): Long!
-    # Read time-series values in the duration of required metrics
-    readMetricsValues(condition: MetricsCondition!, duration: Duration!): MetricsValues!
-    # Read entity list of required metrics and parent entity type.
-    sortMetrics(condition: TopNCondition!, duration: Duration!): [SelectedRecord!]!
-    # Read value in the given time duration, usually as a linear.
-    # labels: the labels you need to query.
-    readLabeledMetricsValues(condition: MetricsCondition!, labels: [String!]!, duration: Duration!): [MetricsValues!]!
-    # Heatmap is bucket based value statistic result.
-    readHeatMap(condition: MetricsCondition!, duration: Duration!): HeatMap
-    # Read the sampled records
-    # TopNCondition#scope is not required.
-    readSampledRecords(condition: TopNCondition!, duration: Duration!): [SelectedRecord!]!
-}
-```
-
-#### V1 APIs
-3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.
-1. Single value. Most default metrics are in single value. `getValues` and `getLinearIntValues` are suitable for this purpose.
-1. Multiple value.  A metric defined in OAL includes multiple value calculations. Use `getMultipleLinearIntValues` to obtain all values. `percentile` is a typical multiple value function in OAL.
-1. Heatmap value. Read [Heatmap in WIKI](https://en.wikipedia.org/wiki/Heat_map) for details. `thermodynamic` is the only OAL function. Use `getThermodynamic` to get the values.
-```graphql
-extend type Query {
-    getValues(metric: BatchMetricConditions!, duration: Duration!): IntValues
-    getLinearIntValues(metric: MetricCondition!, duration: Duration!): IntValues
-    # Query the type of metrics including multiple values, and format them as multiple linears.
-    # The seq of these multiple lines base on the calculation func in OAL
-    # Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,
-    # then five lines will be responsed, p50 is the first element of return value.
-    getMultipleLinearIntValues(metric: MetricCondition!, numOfLinear: Int!, duration: Duration!): [IntValues!]!
-    getThermodynamic(metric: MetricCondition!, duration: Duration!): Thermodynamic
-}
-```
-
-Metrics are defined in the `config/oal/*.oal` files.
-
-### Aggregation
-Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query 
-interfaces to have some different arguments. A typical example of aggregation query is the `TopN` list of services. 
-Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data
-by their values.
-
-Aggregation query is for single value metrics only.
-
-```graphql
-# The aggregation query is different with the metric query.
-# All aggregation queries require backend or/and storage do aggregation in query time.
-extend type Query {
-    # TopN is an aggregation query.
-    getServiceTopN(name: String!, topN: Int!, duration: Duration!, order: Order!): [TopNEntity!]!
-    getAllServiceInstanceTopN(name: String!, topN: Int!, duration: Duration!, order: Order!): [TopNEntity!]!
-    getServiceInstanceTopN(serviceId: ID!, name: String!, topN: Int!, duration: Duration!, order: Order!): [TopNEntity!]!
-    getAllEndpointTopN(name: String!, topN: Int!, duration: Duration!, order: Order!): [TopNEntity!]!
-    getEndpointTopN(serviceId: ID!, name: String!, topN: Int!, duration: Duration!, order: Order!): [TopNEntity!]!
-}
-```
-
-### Others
-The following queries are for specific features, including trace, alarm, and profile.
-1. Trace. Query distributed traces by this.
-1. Alarm. Through alarm query, you can find alarm trends and their details.
-
-The actual query GraphQL scripts can be found in the `query-protocol` folder [here](../../../oap-server/server-query-plugin/query-graphql-plugin/src/main/resources).
-
-## Condition
-### Duration
-Duration is a widely used parameter type as the APM data is time-related. See the following for more details. 
-Step relates to precision. 
-```graphql
-# The Duration defines the start and end time for each query operation.
-# Fields: `start` and `end`
-#   represents the time span. And each of them matches the step.
-#   ref https://www.ietf.org/rfc/rfc3339.txt
-#   The time formats are
-#       `SECOND` step: yyyy-MM-dd HHmmss
-#       `MINUTE` step: yyyy-MM-dd HHmm
-#       `HOUR` step: yyyy-MM-dd HH
-#       `DAY` step: yyyy-MM-dd
-#       `MONTH` step: yyyy-MM
-# Field: `step`
-#   represents the accurate time point.
-# e.g.
-#   if step==HOUR , start=2017-11-08 09, end=2017-11-08 19
-#   then
-#       metrics from the following time points expected
-#       2017-11-08 9:00 -> 2017-11-08 19:00
-#       there are 11 time points (hours) in the time span.
-input Duration {
-    start: String!
-    end: String!
-    step: Step!
-}
-
-enum Step {
-    MONTH
-    DAY
-    HOUR
-    MINUTE
-    SECOND
-}
-```
diff --git a/docs/en/setup/backend/advanced-deployment.md b/docs/en/setup/backend/advanced-deployment.md
deleted file mode 100644
index 5064de6..0000000
--- a/docs/en/setup/backend/advanced-deployment.md
+++ /dev/null
@@ -1,37 +0,0 @@
-# Advanced deployment
-OAP servers communicate with each other in a cluster environment. 
-In the cluster mode, you could run in different roles.
-- Mixed(default)
-- Receiver
-- Aggregator
-
-Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.
-
-## Mixed
-By default, the OAP is responsible for:
-1. Receiving agent traces or metrics.
-1. L1 aggregation
-1. Internal communication (sending/receiving)
-1. L2 aggregation
-1. Persistence
-1. Alarm
-
-## Receiver
-The OAP is responsible for:
-1. Receiving agent traces or metrics.
-1. L1 aggregation
-1. Internal communication (sending)
-
-## Aggregator
-The OAP is responsible for:
-1. Internal communication(receive)
-1. L2 aggregation
-1. Persistence
-1. Alarm
-
-___
-These roles are designed for complex deployment requirements on security and network policy.
-
-## Kubernetes
-If you are using our native [Kubernetes coordinator](backend-cluster.md#kubernetes), the `labelSelector`
-setting is used for `Aggregator` role selection rules. Choose the right OAP deployment based on your needs.
diff --git a/docs/en/setup/backend/apdex-threshold.md b/docs/en/setup/backend/apdex-threshold.md
deleted file mode 100644
index 7a4e80d..0000000
--- a/docs/en/setup/backend/apdex-threshold.md
+++ /dev/null
@@ -1,27 +0,0 @@
-# Apdex threshold
-
-Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times
-to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to 
-the requestor.
- 
-A user defines a response time threshold T. All responses handled in T or less time satisfy the user.
- 
-For example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses 
-greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.
-
-The apdex threshold T can be configured in `service-apdex-threshold.yml` file or via [Dynamic Configuration](dynamic-config.md). 
-The `default` item will apply to a service that isn't defined in this configuration as the default threshold.
-
-## Configuration Format
-
-The configuration content includes the names and thresholds of the services:
-
-```yml
-# default threshold is 500ms
-default: 500
-# example:
-# the threshold of service "tomcat" is 1s
-# tomcat: 1000
-# the threshold of service "springboot1" is 50ms
-# springboot1: 50
-```
diff --git a/docs/en/setup/backend/backend-alarm.md b/docs/en/setup/backend/backend-alarm.md
deleted file mode 100644
index 7e6e34f..0000000
--- a/docs/en/setup/backend/backend-alarm.md
+++ /dev/null
@@ -1,308 +0,0 @@
-# Alarm
-Alarm core is driven by a collection of rules, which are defined in `config/alarm-settings.yml`.
-There are three parts in alarm rule definition.
-1. [Alarm rules](#rules). They define how metrics alarm should be triggered and what conditions should be considered.
-1. [Webhooks](#webhook). The list of web service endpoints, which should be called after the alarm is triggered.
-1. [gRPCHook](#gRPCHook). The host and port of the remote gRPC method, which should be called after the alarm is triggered.
-
-## Entity name
-Defines the relation between scope and entity name.
-- **Service**: Service name
-- **Instance**: {Instance name} of {Service name}
-- **Endpoint**: {Endpoint name} in {Service name}
-- **Database**: Database service name
-- **Service Relation**: {Source service name} to {Dest service name}
-- **Instance Relation**: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name}
-- **Endpoint Relation**: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}
-
-## Rules
-**There are two types of rules: individual rules and composite rules. A composite rule is a combination of individual rules.**
-### Individual rules
-An alarm rule is made up of the following elements:
-- **Rule name**. A unique name shown in the alarm message. It must end with `_rule`.
-- **Metrics name**. This is also the metrics name in the OAL script. Only long, double, int types are supported. See the
-[list of all potential metrics name](#list-of-all-potential-metrics-name). Events can be also configured as the source
-of alarm, please refer to [the event doc](../../concepts-and-designs/event.md) for more details.
-- **Include names**. Entity names which are included in this rule. Please follow the [entity name definitions](#entity-name).
-- **Exclude names**. Entity names which are excluded from this rule. Please follow the [entity name definitions](#entity-name).
-- **Include names regex**. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect.
-- **Exclude names regex**. A regex that excludes entity names. If both exclude-name list and exclude-name regex are set, both rules will take effect.
-- **Include labels**. Metric labels which are included in this rule.
-- **Exclude labels**. Metric labels which are excluded from this rule.
-- **Include labels regex**. A regex that includes labels. If both include-label list and include-label regex are set, both rules will take effect.
-- **Exclude labels regex**. A regex that exclude labels. If both the exclude-label list and exclude-label regex are set, both rules will take effect.
-- **Tags**. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you would like to make these tags searchable on the SkyWalking UI, you may set the tag keys in `core/default/searchableAlarmTags`, or through system environment variable `SW_SEARCHABLE_ALARM_TAG_KEYS`. The key `level` is supported by default.
-
-*Label settings are required by the meter-system. They are used to store metrics from the label-system platform, such as Prometheus, Micrometer, etc.
-The four label settings mentioned above must implement `LabeledValueHolder`.*
-
-- **Threshold**. The target value. 
-For multiple-value metrics, such as **percentile**, the threshold is an array. It is described as:  `value1, value2, value3, value4, value5`.
-Each value may serve as the threshold for each value of the metrics. Set the value to `-` if you do not wish to trigger the alarm by one or more of the values.  
-For example in **percentile**, `value1` is the threshold of P50, and `-, -, value3, value4, value5` means that there is no threshold for P50 and P75 in the percentile alarm rule.
-- **OP**. The operator. It supports `>`, `>=`, `<`, `<=`, `=`. We welcome contributions of all OPs.
-- **Period**. The frequency for checking the alarm rule. This is a time window that corresponds to the backend deployment env time.
-- **Count**. Within a period window, if the number of times which **value** goes over the threshold (based on OP) reaches `count`, then an alarm will be sent.
-- **Only as condition**. Indicates if the rule can send notifications, or if it simply serves as an condition of the composite rule.
-- **Silence period**. After the alarm is triggered in Time-N, there will be silence during the **TN -> TN + period**.
-By default, it works in the same manner as **period**. The same alarm (having the same ID in the same metrics name) may only be triggered once within a period. 
-
-### Composite rules
-**NOTE**: Composite rules are only applicable to alarm rules targeting the same entity level, such as service-level alarm rules (`service_percent_rule && service_resp_time_percentile_rule`). Do not compose alarm rules of different entity levels, such as an alarm rule of the service metrics with another rule of the endpoint metrics.
-
-A composite rule is made up of the following elements:
-- **Rule name**. A unique name shown in the alarm message. Must end with `_rule`.
-- **Expression**. Specifies how to compose rules, and supports `&&`, `||`, and `()`.
-- **Message**. The notification message to be sent out when the rule is triggered.
-- **Tags**. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users.
-```yaml
-rules:
-  # Rule unique name, must be ended with `_rule`.
-  endpoint_percent_rule:
-    # Metrics value need to be long, double or int
-    metrics-name: endpoint_percent
-    threshold: 75
-    op: <
-    # The length of time to evaluate the metrics
-    period: 10
-    # How many times after the metrics match the condition, will trigger alarm
-    count: 3
-    # How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.
-    silence-period: 10
-    # Specify if the rule can send notification or just as an condition of composite rule
-    only-as-condition: false
-    tags:
-      level: WARNING
-  service_percent_rule:
-    metrics-name: service_percent
-    # [Optional] Default, match all services in this metrics
-    include-names:
-      - service_a
-      - service_b
-    exclude-names:
-      - service_c
-    # Single value metrics threshold.
-    threshold: 85
-    op: <
-    period: 10
-    count: 4
-    only-as-condition: false
-  service_resp_time_percentile_rule:
-    # Metrics value need to be long, double or int
-    metrics-name: service_percentile
-    op: ">"
-    # Multiple value metrics threshold. Thresholds for P50, P75, P90, P95, P99.
-    threshold: 1000,1000,1000,1000,1000
-    period: 10
-    count: 3
-    silence-period: 5
-    message: Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 > 1000, p75 > 1000, p90 > 1000, p95 > 1000, p99 > 1000
-    only-as-condition: false
-  meter_service_status_code_rule:
-    metrics-name: meter_status_code
-    exclude-labels:
-      - "200"
-    op: ">"
-    threshold: 10
-    period: 10
-    count: 3
-    silence-period: 5
-    message: The request number of entity {name} non-200 status is more than expected.
-    only-as-condition: false
-composite-rules:
-  comp_rule:
-    # Must satisfied percent rule and resp time rule 
-    expression: service_percent_rule && service_resp_time_percentile_rule
-    message: Service {name} successful rate is less than 80% and P50 of response time is over 1000ms
-    tags:
-      level: CRITICAL
-```
-
-
-### Default alarm rules
-For convenience's sake, we have provided a default `alarm-setting.yml` in our release. It includes the following rules:
-1. Service average response time over 1s in the last 3 minutes.
-1. Service success rate lower than 80% in the last 2 minutes.
-1. Percentile of service response time over 1s in the last 3 minutes
-1. Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex.
-1. Endpoint average response time over 1s in the last 2 minutes.
-1. Database access average response time over 1s in the last 2 minutes.
-1. Endpoint relation average response time over 1s in the last 2 minutes.
-
-### List of all potential metrics name
-The metrics names are defined in the official [OAL scripts](../../guides/backend-oal-scripts.md) and
-[MAL scripts](../../concepts-and-designs/mal.md), the [Event](../../concepts-and-designs/event.md) names can also serve
-as the metrics names, all possible event names can be also found in [the Event doc](../../concepts-and-designs/event.md).
-
-Currently, metrics from the **Service**, **Service Instance**, **Endpoint**, **Service Relation**, **Service Instance Relation**, **Endpoint Relation** scopes could be used in Alarm, and the **Database access** scope is same as **Service**.
-
-Submit an issue or a pull request if you want to support any other scopes in alarm.
-
-## Webhook
-The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by `application/json` content type. The JSON format is based on `List<org.apache.skywalking.oap.server.core.alarm.AlarmMessage>` with the following key information:
-- **scopeId**, **scope**. All scopes are defined in `org.apache.skywalking.oap.server.core.source.DefaultScopeDefine`.
-- **name**. Target scope entity name. Please follow the [entity name definitions](#entity-name).
-- **id0**. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID.
-- **id1**. When using the relation scope, it is the destination entity ID. Otherwise, it is empty.
-- **ruleName**. The rule name configured in `alarm-settings.yml`.
-- **alarmMessage**. The alarm text message.
-- **startTime**. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC.
-- **tags**. The tags configured in `alarm-settings.yml`.
-
-See the following example:
-```json
-[{
-	"scopeId": 1, 
-	"scope": "SERVICE",
-	"name": "serviceA", 
-	"id0": "12",  
-	"id1": "",  
-    "ruleName": "service_resp_time_rule",
-	"alarmMessage": "alarmMessage xxxx",
-	"startTime": 1560524171000,
-    "tags": [{
-        "key": "level",
-        "value": "WARNING"
-     }]
-}, {
-	"scopeId": 1,
-	"scope": "SERVICE",
-	"name": "serviceB",
-	"id0": "23",
-	"id1": "",
-    "ruleName": "service_resp_time_rule",
-	"alarmMessage": "alarmMessage yyy",
-	"startTime": 1560524171000,
-    "tags": [{
-        "key": "level",
-        "value": "CRITICAL"
-    }]
-}]
-```
-
-## gRPCHook
-The alarm message will be sent through remote gRPC method by `Protobuf` content type. 
-The message contains key information which are defined in `oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto`.
-
-Part of the protocol looks like this:
-```protobuf
-message AlarmMessage {
-    int64 scopeId = 1;
-    string scope = 2;
-    string name = 3;
-    string id0 = 4;
-    string id1 = 5;
-    string ruleName = 6;
-    string alarmMessage = 7;
-    int64 startTime = 8;
-    AlarmTags tags = 9;
-}
-
-message AlarmTags {
-    // String key, String value pair.
-    repeated KeyStringValuePair data = 1;
-}
-
-message KeyStringValuePair {
-    string key = 1;
-    string value = 2;
-}
-```
-
-## Slack Chat Hook
-Follow the [Getting Started with Incoming Webhooks guide](https://api.slack.com/messaging/webhooks) and create new Webhooks.
-
-The alarm message will be sent through HTTP post by `application/json` content type if you have configured Slack Incoming Webhooks as follows:
-```yml
-slackHooks:
-  textTemplate: |-
-    {
-      "type": "section",
-      "text": {
-        "type": "mrkdwn",
-        "text": ":alarm_clock: *Apache Skywalking Alarm* \n **%s**."
-      }
-    }
-  webhooks:
-    - https://hooks.slack.com/services/x/y/z
-```
-
-## WeChat Hook
-Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the [Wechat Webhooks guide](https://work.weixin.qq.com/help?doc_id=13376).
-The alarm message will be sent through HTTP post by `application/json` content type after you have set up Wechat Webhooks as follows:
-```yml
-wechatHooks:
-  textTemplate: |-
-    {
-      "msgtype": "text",
-      "text": {
-        "content": "Apache SkyWalking Alarm: \n %s."
-      }
-    }
-  webhooks:
-    - https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_key
-```
-
-## Dingtalk Hook
-Follow the [Dingtalk Webhooks guide](https://ding-doc.dingtalk.com/doc#/serverapi2/qf2nxq/uKPlK) and create new Webhooks.
-For security purposes, you can config an optional secret for an individual webhook URL.
-The alarm message will be sent through HTTP post by `application/json` content type if you have configured Dingtalk Webhooks as follows:
-```yml
-dingtalkHooks:
-  textTemplate: |-
-    {
-      "msgtype": "text",
-      "text": {
-        "content": "Apache SkyWalking Alarm: \n %s."
-      }
-    }
-  webhooks:
-    - url: https://oapi.dingtalk.com/robot/send?access_token=dummy_token
-      secret: dummysecret
-```
-
-## Feishu Hook
-Follow the [Feishu Webhooks guide](https://www.feishu.cn/hc/zh-cn/articles/360024984973) and create new Webhooks.
-For security purposes, you can config an optional secret for an individual webhook URL.
-If you would like to direct a text to a user, you can config `ats` which is the feishu's user_id and separated by "," .
-The alarm message will be sent through HTTP post by `application/json` content type if you have configured Feishu Webhooks as follows:
-```yml
-feishuHooks:
-  textTemplate: |-
-    {
-      "msg_type": "text",
-      "content": {
-        "text": "Apache SkyWalking Alarm: \n %s."
-      },
-      "ats":"feishu_user_id_1,feishu_user_id_2"
-    }
-  webhooks:
-    - url: https://open.feishu.cn/open-apis/bot/v2/hook/dummy_token
-      secret: dummysecret
-```
-
-## WeLink Hook
-Follow the [WeLink Webhooks guide](https://open.welink.huaweicloud.com/apiexplorer/#/apiexplorer?type=internal&method=POST&path=/welinkim/v1/im-service/chat/group-chat) and create new Webhooks.
-The alarm message will be sent through HTTP post by `application/json` content type if you have configured WeLink Webhooks as follows:
-```yml
-welinkHooks:
-  textTemplate: "Apache SkyWalking Alarm: \n %s."
-  webhooks:
-    # you may find your own client_id and client_secret in your app, below are dummy, need to change.
-    - client_id: "dummy_client_id"
-      client_secret: dummy_secret_key
-      access_token_url: https://open.welink.huaweicloud.com/api/auth/v2/tickets
-      message_url: https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat
-      # if you send to multi group at a time, separate group_ids with commas, e.g. "123xx","456xx"
-      group_ids: "dummy_group_id"
-      # make a name you like for the robot, it will display in group
-      robot_name: robot
-```
-
-## Update the settings dynamically
-Since 6.5.0, the alarm settings can be updated dynamically at runtime by [Dynamic Configuration](dynamic-config.md),
-which will override the settings in `alarm-settings.yml`.
-
-In order to determine whether an alarm rule is triggered or not, SkyWalking needs to cache the metrics of a time window for
-each alarm rule. If any attribute (`metrics-name`, `op`, `threshold`, `period`, `count`, etc.) of a rule is changed,
-the sliding window will be destroyed and re-created, causing the alarm of this specific rule to restart again.
diff --git a/docs/en/setup/backend/backend-cluster.md b/docs/en/setup/backend/backend-cluster.md
deleted file mode 100644
index dcd025a..0000000
--- a/docs/en/setup/backend/backend-cluster.md
+++ /dev/null
@@ -1,136 +0,0 @@
-# Cluster Management
-In many product environments, the backend needs to support high throughput and provide HA to maintain robustness,
-so you always need cluster management in product env.
-
-NOTICE, cluster management doesn't provide service discovery mechanism for agents and probes. We recommend agents/probes using
-gateway to load balancer to access OAP clusters.
-
-The core feature of cluster management is supporting the whole OAP cluster running distributed aggregation and analysis for telemetry data.
- 
-There are various ways to manage the cluster in the backend. Choose the one that best suits your needs.
-
-- [Zookeeper coordinator](#zookeeper-coordinator). Use Zookeeper to let the backend instances detect and communicate
-with each other.
-- [Kubernetes](#kubernetes). When the backend clusters are deployed inside Kubernetes, you could make use of this method
-by using k8s native APIs to manage clusters.
-- [Consul](#consul). Use Consul as the backend cluster management implementor and coordinate backend instances.
-- [Etcd](#etcd). Use Etcd to coordinate backend instances.
-- [Nacos](#nacos). Use Nacos to coordinate backend instances.
-In the `application.yml` file, there are default configurations for the aforementioned coordinators under the section `cluster`.
-You can specify any of them in the `selector` property to enable it.
-
-## Zookeeper coordinator
-Zookeeper is a very common and widely used cluster coordinator. Set the **cluster/selector** to **zookeeper** in the yml to enable it.
-
-Required Zookeeper version: 3.5+
-
-```yaml
-cluster:
-  selector: ${SW_CLUSTER:zookeeper}
-  # other configurations
-```
-
-- `hostPort` is the list of zookeeper servers. Format is `IP1:PORT1,IP2:PORT2,...,IPn:PORTn`
-- `enableACL` enable [Zookeeper ACL](https://zookeeper.apache.org/doc/r3.5.5/zookeeperProgrammers.html#sc_ZooKeeperAccessControl) to control access to its znode.
-- `schema` is Zookeeper ACL schemas.
-- `expression` is a expression of ACL. The format of the expression is specific to the [schema](https://zookeeper.apache.org/doc/r3.5.5/zookeeperProgrammers.html#sc_BuiltinACLSchemes). 
-- `hostPort`, `baseSleepTimeMs` and `maxRetries` are settings of Zookeeper curator client.
-
-Note: 
-- If `Zookeeper ACL` is enabled and `/skywalking` exists, you must make sure that `SkyWalking` has `CREATE`, `READ` and `WRITE` permissions. If `/skywalking` does not exist, it will be created by SkyWalking and all permissions to the specified user will be granted. Simultaneously, znode grants the READ permission to anyone.
-- If you set `schema` as `digest`, the password of the expression is set in **clear text**. 
-
-In some cases, the OAP default gRPC host and port in core are not suitable for internal communication among the OAP nodes.
-The following settings are provided to set the host and port manually, based on your own LAN env.
-- internalComHost: The registered host and other OAP nodes use this to communicate with the current node.
-- internalComPort: the registered port and other OAP nodes use this to communicate with the current node.
-
-```yaml
-zookeeper:
-  nameSpace: ${SW_NAMESPACE:""}
-  hostPort: ${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}
-  #Retry Policy
-  baseSleepTimeMs: ${SW_CLUSTER_ZK_SLEEP_TIME:1000} # initial amount of time to wait between retries
-  maxRetries: ${SW_CLUSTER_ZK_MAX_RETRIES:3} # max number of times to retry
-  internalComHost: 172.10.4.10
-  internalComPort: 11800
-  # Enable ACL
-  enableACL: ${SW_ZK_ENABLE_ACL:false} # disable ACL in default
-  schema: ${SW_ZK_SCHEMA:digest} # only support digest schema
-  expression: ${SW_ZK_EXPRESSION:skywalking:skywalking}
-``` 
-
-
-## Kubernetes
-The require backend clusters are deployed inside Kubernetes. See the guides in [Deploy in kubernetes](backend-k8s.md).
-Set the selector to `kubernetes`.
-
-```yaml
-cluster:
-  selector: ${SW_CLUSTER:kubernetes}
-  # other configurations
-```
-
-## Consul
-Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as 
-their service discovery solution. Set the **cluster/selector** to **consul** in the yml to enable it.
-
-```yaml
-cluster:
-  selector: ${SW_CLUSTER:consul}
-  # other configurations
-```
-
-Same as the Zookeeper coordinator,
-in some cases, the OAP default gRPC host and port in core are not suitable for internal communication among the OAP nodes.
-The following settings are provided to set the host and port manually, based on your own LAN env.
-- internalComHost: The registed host and other OAP nodes use this to communicate with the current node.
-- internalComPort: The registered port and other OAP nodes use this to communicate with the current node.
-
-
-## Etcd
-Set the **cluster/selector** to **etcd** in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. **Since 8.7.0, only the v3 protocol is supported for Etcd.** 
-
-```yaml
-cluster:
-  selector: ${SW_CLUSTER:etcd}
-  # other configurations
-  etcd:
-    # etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379
-    endpoints: ${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}
-    namespace: ${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}
-    serviceName: ${SW_SCLUSTER_ETCD_ERVICE_NAME:"SkyWalking_OAP_Cluster"}
-    authentication: ${SW_CLUSTER_ETCD_AUTHENTICATION:false}
-    user: ${SW_SCLUSTER_ETCD_USER:}
-    password: ${SW_SCLUSTER_ETCD_PASSWORD:}
-```
-
-Same as the Zookeeper coordinator,
-in some cases, the OAP default gRPC host and port in core are not suitable for internal communication among the oap nodes.
-The following settings are provided to set the host and port manually, based on your own LAN env.
-- internalComHost: The registered host and other OAP nodes use this to communicate with the current node.
-- internalComPort: The registered port and other OAP nodes use this to communicate with the current node.
-
-## Nacos
-Set the **cluster/selector** to **nacos** in the yml to enable it.
-
-```yaml
-cluster:
-  selector: ${SW_CLUSTER:nacos}
-  # other configurations
-```
-
-Nacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:
-```yaml
-nacos:
-  username:
-  password:
-  accessKey:
-  secretKey:
-```
-
-Same as the Zookeeper coordinator,
-in some cases, the OAP default gRPC host and port in core are not suitable for internal communication among the OAP nodes.
-The following settings are provided to set the host and port manually, based on your own LAN env.
-- internalComHost: The registered host and other OAP nodes use this to communicate with the current node.
-- internalComPort: The registered port and other OAP nodes use this to communicate with the current node.
diff --git a/docs/en/setup/backend/backend-fetcher.md b/docs/en/setup/backend/backend-fetcher.md
deleted file mode 100644
index 717c55a..0000000
--- a/docs/en/setup/backend/backend-fetcher.md
+++ /dev/null
@@ -1,149 +0,0 @@
-# Open Fetcher
-Fetcher is a concept in SkyWalking backend. When reading data from target systems, the pull mode is more suitable than the [receiver](backend-receivers.md). This mode is typically found in metrics SDKs, such as Prometheus.
-
-## Prometheus Fetcher
-Suppose you want to enable some `metric-custom.yaml` files stored at `fetcher-prom-rules`, append its name to `enabledRules` of 
- `prometheus-fetcher` as follows:
- 
-```yaml
-prometheus-fetcher:
-  selector: ${SW_PROMETHEUS_FETCHER:default}
-  default:
-    enabledRules: ${SW_PROMETHEUS_FETCHER_ENABLED_RULES:"self,metric-custom"}
-```
-
-### Configuration file
-Prometheus fetcher is configured via a configuration file. The configuration file defines everything related to fetching
- services and their instances, as well as which rule files to load.
-
-The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files
-are located at `$CLASSPATH/fetcher-prom-rules`.
-
-The file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.
-
-A full example can be found [here](../../../../oap-server/server-bootstrap/src/main/resources/fetcher-prom-rules/self.yaml)
-
-Generic placeholders are defined as follows:
-
- * `<duration>`: This is parsed into a textual representation of a duration. The formats accepted are based on
-                 the ISO-8601 duration format `PnDTnHnMn.nS` with days considered to be exactly 24 hours.
- * `<labelname>`: A string matching the regular expression \[a-zA-Z_\]\[a-zA-Z0-9_\]*.
- * `<labelvalue>`: A string of unicode characters.
- * `<host>`: A valid string consisting of a hostname or IP followed by an optional port number.
- * `<path>`: A valid URL path.
- * `<string>`: A regular string.
-
-```yaml
-# How frequently to fetch targets.
-fetcherInterval: <duration>
-# Per-fetch timeout when fetching this target.
-fetcherTimeout: <duration>
-# The HTTP resource path on which to fetch metrics from targets.
-metricsPath: <path>
-#Statically configured targets.
-staticConfig:
-  # The targets specified by the static config.
-  targets:
-    [ - <target> ]
-  # Labels assigned to all metrics fetched from the targets.
-  labels:
-    [ <labelname>: <labelvalue> ... ]
-# expSuffix is appended to all expression in this file.
-expSuffix: <string>
-# insert metricPrefix into metric name:  <metricPrefix>_<raw_metric_name>
-metricPrefix: <string>
-# Metrics rule allow you to recompute queries.
-metricsRules:
-   [ - <metric_rules> ]
-```
-
-#### <target>
-
-```yaml
-# The url of target exporter. the format should be complied with "java.net.URI"
-url: <string>
-# The path of root CA file.
-sslCaFilePath: <string>
-```
-
-#### <metric_rules>
-
-```yaml
-# The name of rule, which combinates with a prefix 'meter_' as the index/table name in storage.
-name: <string>
-# MAL expression.
-exp: <string>
-```
-
-To know more about MAL, please refer to [mal.md](../../concepts-and-designs/mal.md)
-
-## Kafka Fetcher
-
-The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agent is delivered. Check the agent documentation for details. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported.  Kafka Fetcher can work with gRPC/HTTP Receivers at the same time for adopting different transport protocols.
-
-Kafka Fetcher is disabled by default. To enable it, configure as follows.
-
-Namespace aims to isolate multi OAP cluster when using the same Kafka cluster.
-If you set a namespace for Kafka fetcher, the OAP will add a prefix to topic name. You should also set namespace in the property named `plugin.kafka.namespace` in `agent.config`.
-
-```yaml
-kafka-fetcher:
-  selector: ${SW_KAFKA_FETCHER:default}
-  default:
-    bootstrapServers: ${SW_KAFKA_FETCHER_SERVERS:localhost:9092}
-    namespace: ${SW_NAMESPACE:""}
-```
-
-`skywalking-segments`, `skywalking-metrics`, `skywalking-profilings`, `skywalking-managements`, `skywalking-meters`, `skywalking-logs`
-and `skywalking-logs-json` topics are required by `kafka-fetcher`.
-If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.
-
-When using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:
-
-```yaml
-kafka-fetcher:
-  selector: ${SW_KAFKA_FETCHER:default}
-  default:
-    bootstrapServers: ${SW_KAFKA_FETCHER_SERVERS:localhost:9092}
-    namespace: ${SW_NAMESPACE:""}
-    partitions: ${SW_KAFKA_FETCHER_PARTITIONS:3}
-    replicationFactor: ${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}
-    isSharding: ${SW_KAFKA_FETCHER_IS_SHARDING:false}
-    consumePartitions: ${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:""}
-```
-
-In the cluster mode, all topics have the same number of partitions. Set `"isSharding"` to `"true"` and assign the partitions to consume for the OAP server.  Use commas to separate multiple partitions for the OAP server.
-
-The Kafka Fetcher allows you to configure all the Kafka producers listed [here](http://kafka.apache.org/24/documentation.html#consumerconfigs) in property `kafkaConsumerConfig`. For example:
-```yaml
-kafka-fetcher:
-  selector: ${SW_KAFKA_FETCHER:default}
-  default:
-    bootstrapServers: ${SW_KAFKA_FETCHER_SERVERS:localhost:9092}
-    namespace: ${SW_NAMESPACE:""}
-    partitions: ${SW_KAFKA_FETCHER_PARTITIONS:3}
-    replicationFactor: ${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}
-    isSharding: ${SW_KAFKA_FETCHER_IS_SHARDING:true}
-    consumePartitions: ${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}
-    kafkaConsumerConfig:
-      enable.auto.commit: true
-      ...
-```
-
-When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker [config](https://github.com/apache/kafka/tree/trunk/connect/mirror#remote-topics).
-```yaml
-kafka-fetcher:
-  selector: ${SW_KAFKA_FETCHER:default}
-  default:
-    bootstrapServers: ${SW_KAFKA_FETCHER_SERVERS:localhost:9092}
-    namespace: ${SW_NAMESPACE:""}
-    partitions: ${SW_KAFKA_FETCHER_PARTITIONS:3}
-    replicationFactor: ${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}
-    isSharding: ${SW_KAFKA_FETCHER_IS_SHARDING:true}
-    consumePartitions: ${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}
-    mm2SourceAlias: ${SW_KAFKA_MM2_SOURCE_ALIAS:""}
-    mm2SourceSeparator: ${SW_KAFKA_MM2_SOURCE_SEPARATOR:""}
-    kafkaConsumerConfig:
-      enable.auto.commit: true
-      ...
-```
diff --git a/docs/en/setup/backend/backend-health-check.md b/docs/en/setup/backend/backend-health-check.md
deleted file mode 100644
index 85caae5..0000000
--- a/docs/en/setup/backend/backend-health-check.md
+++ /dev/null
@@ -1,68 +0,0 @@
-# Health Check
-
-Health check intends to provide a unique approach to check the health status of the OAP server. It includes the health status
-of modules, GraphQL, and gRPC services readiness.
-
-> 0 means healthy, and more than 0 means unhealthy.
-> less than 0 means that the OAP doesn't start up.
-
-## Health Checker Module.
-
-The Health Checker module helps observe the health status of modules. You may activate it as follows:
-```yaml
-health-checker:
-  selector: ${SW_HEALTH_CHECKER:default}
-  default:
-    checkIntervalSeconds: ${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}
-```
-Note: The `telemetry` module should be enabled at the same time. This means that the provider should not be `-` and `none`.
-
-After that, we can check the OAP server health status by querying GraphQL:
-
-```
-query{
-  checkHealth{
-    score
-    details
-  }
-}
-```
-
-If the OAP server is healthy, the response should be
-
-```json
-{
-  "data": {
-    "checkHealth": {
-      "score": 0,
-      "details": ""
-    }
-  }
-}
-```
-
-If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:
-
-```json
-{
-  "data": {
-    "checkHealth": {
-      "score": 1,
-      "details": "storage_h2,"
-    }
-  }
-}
-```
-Refer to [checkHealth query](https://github.com/apache/skywalking-query-protocol/blob/master/common.graphqls)
-for more details.
-
-## The readiness of GraphQL and gRPC
-
-Use the query above to check the readiness of GraphQL.
-
-OAP has implemented the [gRPC Health Checking Protocol](https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
-You may use the [grpc-health-probe](https://github.com/grpc-ecosystem/grpc-health-probe) or any other tools to check the
-health of OAP gRPC services.
-
-## CLI tool
-Please follow the [CLI doc](https://github.com/apache/skywalking-cli#checkhealth) to get the health status score directly through the `checkhealth` command.
diff --git a/docs/en/setup/backend/backend-init-mode.md b/docs/en/setup/backend/backend-init-mode.md
deleted file mode 100644
index 480b43a..0000000
--- a/docs/en/setup/backend/backend-init-mode.md
+++ /dev/null
@@ -1,20 +0,0 @@
-# Init mode
-The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, 
-such as Elastic Search or Database, when the backend starts up at first.
-
-But there may be some unexpected events that may occur with the storage, such as
-`When multiple Elastic Search indexes are created concurrently, these backend instances would start up at the same time.`,
-When there is a change, the APIs of Elastic Search would be blocked without reporting any exception.
-This often happens on container management platforms, such as k8s.
-
-This is where you need the **Init mode** startup.
-
-## Solution
-Only one single instance should run in the **Init mode** before other instances start up.
-And this instance will exit graciously after all initialization steps are done.
-
-Use `oapServiceInit.sh`/`oapServiceInit.bat` to start up backend. You should see the following logs:
-> 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO  [] - OAP starts up in init mode successfully, exit now...
-
-## Kubernetes
-Initialization in this mode would be included in our Kubernetes scripts and Helm.
diff --git a/docs/en/setup/backend/backend-ip-port.md b/docs/en/setup/backend/backend-ip-port.md
deleted file mode 100644
index cd6363d..0000000
--- a/docs/en/setup/backend/backend-ip-port.md
+++ /dev/null
@@ -1,28 +0,0 @@
-# IP and port setting
-The backend uses IP and port binding in order to allow the OS to have multiple IPs.
-The binding/listening IP and port are specified by the core module
-```yaml
-core:
-  default:
-    restHost: 0.0.0.0
-    restPort: 12800
-    restContextPath: /
-    gRPCHost: 0.0.0.0
-    gRPCPort: 11800
-```
-There are two IP/port pairs for gRPC and HTTP REST services.
-
-- Most agents and probes use gRPC service for better performance and code readability.
-- Some agents use REST service, because gRPC may be not supported in that language.
-- The UI uses REST service, but the data is always in GraphQL format.
-
-
-## Note
-### IP binding
-For users who are not familiar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if `172.09.13.28` is bound, even if you are
-in this machine, you must use `172.09.13.28`, rather than `127.0.0.1` or `localhost`, to access the service.
-
-### Module provider specified IP and port
-The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.
-
-
diff --git a/docs/en/setup/backend/backend-k8s-monitoring.md b/docs/en/setup/backend/backend-k8s-monitoring.md
deleted file mode 100644
index f3e5eb1..0000000
--- a/docs/en/setup/backend/backend-k8s-monitoring.md
+++ /dev/null
@@ -1,79 +0,0 @@
-# K8s monitoring 
-SkyWalking leverages K8s kube-state-metrics and cAdvisor for collecting metrics data from K8s, and leverages OpenTelemetry Collector to transfer the metrics to
-[OpenTelemetry receiver](backend-receivers.md#opentelemetry-receiver) and into the [Meter System](./../../concepts-and-designs/meter.md). This feature requires authorizing the OAP Server to access K8s's `API Server`.  
-We define the k8s-cluster as a `Service` in the OAP, and use `k8s-cluster::` as a prefix to identify it.  
-We define the k8s-node as an `Instance` in the OAP, and set its name as the K8s `node name`.  
-We define the k8s-service as an `Endpoint` in the OAP, and set its name as `$serviceName.$namespace`.  
-
-## Data flow
-1. K8s kube-state-metrics and cAdvisor collect metrics data from K8s.
-2. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus GRPC Exporter.
-3. The SkyWalking OAP Server access to K8s's `API Server` gets meta info and parses the expression with [MAL](../../concepts-and-designs/mal.md) to filter/calculate/aggregate and store the results. 
-
-## Setup 
-1. Setup [kube-state-metric](https://github.com/kubernetes/kube-state-metrics#kubernetes-deployment).
-2. cAdvisor is integrated into `kubelet` by default.
-3. Set up [OpenTelemetry Collector ](https://opentelemetry.io/docs/collector/getting-started/#kubernetes). For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to [here](https://github.com/prometheus/prometheus/blob/main/documentation/examples/prometheus-kubernetes.yml). For a quick start, we have provided a full example for OpenTelemetry Collector configuration [otel-collector-config.yaml](otel-collector-config.yaml).
-4. Config SkyWalking [OpenTelemetry receiver](backend-receivers.md#opentelemetry-receiver).
-
-## Supported Metrics
-From the different points of view to monitor K8s, there are 3 kinds of metrics: [Cluster](#cluster) / [Node](#node) / [Service](#service) 
-
-### Cluster 
-These metrics are related to the selected cluster (`Current Service in the dashboard`).
-
-| Monitoring Panel | Unit | Metric Name | Description | Data Source |
-|-----|-----|-----|-----|-----|
-| Node Total |  | k8s_cluster_node_total | The number of nodes | K8s kube-state-metrics|
-| Namespace Total |  | k8s_cluster_namespace_total | The number of namespaces | K8s kube-state-metrics|
-| Deployment Total |  | k8s_cluster_deployment_total | The number of deployments | K8s kube-state-metrics|
-| Service Total |  | k8s_cluster_service_total | The number of services | K8s kube-state-metrics|
-| Pod Total |  | k8s_cluster_pod_total | The number of pods | K8s kube-state-metrics|
-| Container Total |  | k8s_cluster_container_total | The number of containers | K8s kube-state-metrics|
-| CPU Resources | m | k8s_cluster_cpu_cores<br />k8s_cluster_cpu_cores_requests<br />k8s_cluster_cpu_cores_limits<br />k8s_cluster_cpu_cores_allocatable | The capacity and the Requests / Limits / Allocatable of the CPU | K8s kube-state-metrics|
-| Memory Resources | GB | k8s_cluster_memory_total<br />k8s_cluster_memory_requests<br />k8s_cluster_memory_limits<br />k8s_cluster_memory_allocatable | The capacity and the Requests / Limits / Allocatable of the memory | K8s kube-state-metrics|
-| Storage Resources | GB | k8s_cluster_storage_total<br />k8s_cluster_storage_allocatable | The capacity and allocatable of the storage | K8s kube-state-metrics|
-| Node Status |  | k8s_cluster_node_status | The current status of the nodes | K8s kube-state-metrics|
-| Deployment Status |  | k8s_cluster_deployment_status | The current status of the deployment | K8s kube-state-metrics|
-| Deployment Spec Replicas |  | k8s_cluster_deployment_spec_replicas | The number of desired pods for a deployment | K8s kube-state-metrics|
-| Service Status |  | k8s_cluster_service_pod_status | The services current status, depending on the related pods' status | K8s kube-state-metrics|
-| Pod Status Not Running |  | k8s_cluster_pod_status_not_running | The pods which are not running in the current phase | K8s kube-state-metrics|
-| Pod Status Waiting |  | k8s_cluster_pod_status_waiting | The pods and containers which are currently in the waiting status, with reasons shown | K8s kube-state-metrics|
-| Pod Status Terminated |  | k8s_cluster_container_status_terminated | The pods and containers which are currently in the terminated status, with reasons shown | K8s kube-state-metrics|
-
-### Node
-These metrics are related to the selected node (`Current Instance in the dashboard`).
-
-| Monitoring Panel | Unit | Metric Name | Description | Data Source |
-|-----|-----|-----|-----|-----|
-| Pod Total |  | k8s_node_pod_total | The number of pods in this node | K8s kube-state-metrics |
-| Node Status |  | k8s_node_node_status | The current status of this node | K8s kube-state-metrics |
-| CPU Resources | m | k8s_node_cpu_cores<br />k8s_node_cpu_cores_allocatable<br />k8s_node_cpu_cores_requests<br />k8s_node_cpu_cores_limits |  The capacity and the requests / Limits / Allocatable of the CPU  | K8s kube-state-metrics |
-| Memory Resources | GB | k8s_node_memory_total<br />k8s_node_memory_allocatable<br />k8s_node_memory_requests<br />k8s_node_memory_limits | The capacity and the requests / Limits / Allocatable of the memory | K8s kube-state-metrics |
-| Storage Resources | GB | k8s_node_storage_total<br />k8s_node_storage_allocatable | The capacity and allocatable of the storage | K8s kube-state-metrics |
-| CPU Usage | m | k8s_node_cpu_usage | The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m | cAdvisor |
-| Memory Usage | GB | k8s_node_memory_usage | The totaly memory usage | cAdvisor |
-| Network I/O| KB/s | k8s_node_network_receive<br />k8s_node_network_transmit | The network receive and transmit | cAdvisor |
-
-### Service
-In these metrics, the pods are related to the selected service (`Current Endpoint in the dashboard`).
-
-| Monitoring Panel | Unit | Metric Name | Description | Data Source |
-|-----|-----|-----|-----|-----|
-| Service Pod Total |  | k8s_service_pod_total | The number of pods | K8s kube-state-metrics |
-| Service Pod Status |  | k8s_service_pod_status | The current status of pods | K8s kube-state-metrics |
-| Service CPU Resources | m | k8s_service_cpu_cores_requests<br />k8s_service_cpu_cores_limits | The CPU resources requests / Limits of this service | K8s kube-state-metrics |
-| Service Memory Resources | MB | k8s_service_memory_requests<br />k8s_service_memory_limits | The memory resources requests / Limits of this service | K8s kube-state-metrics |
-| Pod CPU Usage | m | k8s_service_pod_cpu_usage | The CPU resources total usage of pods | cAdvisor |
-| Pod Memory Usage | MB | k8s_service_pod_memory_usage | The memory resources total usage of pods | cAdvisor |
-| Pod Waiting |  | k8s_service_pod_status_waiting | The pods and containers which are currently in the waiting status, with reasons shown | K8s kube-state-metrics |
-| Pod Terminated |  | k8s_service_pod_status_terminated | The pods and containers which are currently in the terminated status, with reasons shown | K8s kube-state-metrics |
-| Pod Restarts |  | k8s_service_pod_status_restarts_total | The number of per container restarts related to the pods | K8s kube-state-metrics |
-| Pod Network Receive | KB/s | k8s_service_pod_network_receive | The network receive of the pods | cAdvisor |
-| Pod Network Transmit | KB/s | k8s_service_pod_network_transmit | The network transmit of the pods  | cAdvisor |
-| Pod Storage Usage | MB | k8s_service_pod_fs_usage | The storage resources total usage of pods related to this service | cAdvisor |
-
-## Customizing 
-You can customize your own metrics/expression/dashboard panel.   
-The metrics definition and expression rules are found in `/config/otel-oc-rules/k8s-cluster.yaml,/config/otel-oc-rules/k8s-node.yaml, /config/otel-oc-rules/k8s-service.yaml`.  
-The dashboard panel configurations are found in `/config/ui-initialized-templates/k8s.yml`.
diff --git a/docs/en/setup/backend/backend-k8s.md b/docs/en/setup/backend/backend-k8s.md
deleted file mode 100644
index 561d638..0000000
--- a/docs/en/setup/backend/backend-k8s.md
+++ /dev/null
@@ -1,9 +0,0 @@
-# Deploy SkyWalking backend and UI in kubernetes
-
-Before you read Kubernetes deployment guidance, please make sure you have read `Quick Start` and `Advanced Setup` documents.
-Most SkyWalking OAP settings are controlled through System environment variables when apply helm deployment. 
-
-Follow instructions in the [deploying SkyWalking backend to Kubernetes cluster](https://github.com/apache/skywalking-kubernetes)
- to deploy oap and ui to a kubernetes cluster.
- 
-Please read the Readme file.
\ No newline at end of file
diff --git a/docs/en/setup/backend/backend-meter.md b/docs/en/setup/backend/backend-meter.md
deleted file mode 100644
index 178c043..0000000
--- a/docs/en/setup/backend/backend-meter.md
+++ /dev/null
@@ -1,57 +0,0 @@
-# Meter receiver
-The meter receiver accepts the metrics of [meter protocol](https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/Meter.proto) into the [meter system](./../../concepts-and-designs/meter.md).
-
-## Module definition
-```yaml
-receiver-meter:
-  selector: ${SW_RECEIVER_METER:default}
-  default:
-
-```
-
-In Kafka Fetcher, follow these configurations to enable it.  
-```yaml
-kafka-fetcher:
-  selector: ${SW_KAFKA_FETCHER:default}
-  default:
-    bootstrapServers: ${SW_KAFKA_FETCHER_SERVERS:localhost:9092}
-```
-
-## Configuration file
-The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving 
- from agents, as well as which rule files to load.
- 
-The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files
-are located at `$CLASSPATH/meter-analyzer-config`.
-
-The file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.
-
-An example can be found [here](../../../../oap-server/server-bootstrap/src/main/resources/meter-analyzer-config/spring-sleuth.yaml).
-If you're using Spring Sleuth, see [Spring Sleuth Setup](spring-sleuth-setup.md).
-
-| Rule Name | Description | Configuration File | Data Source |
-|-----|-----|-----|-----|
-|spring-sleuth| Metrics of Spring Sleuth Application | meter-analyzer-config/spring-sleuth.yaml | Sprign Sleuth Application --meter format--> SkyWalking OAP Server |
-
-### Meters configuration
-
-```yaml
-# expSuffix is appended to all expression in this file.
-expSuffix: <string>
-# insert metricPrefix into metric name:  <metricPrefix>_<raw_metric_name>
-metricPrefix: <string>
-# Metrics rule allow you to recompute queries.
-metricsRules:
-  # The name of rule, which combinates with a prefix 'meter_' as the index/table name in storage.
-  name: <string>
-  # MAL expression.
-  exp: <string>
-```
-
-For more information on MAL, please refer to [mal.md](../../concepts-and-designs/mal.md)
-
-#### `rate`, `irate`, and `increase`
-
-Although we support the `rate`, `irate`, `increase` functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:
-1. The OAP has to set up caches to calculate the values.
-1. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.
diff --git a/docs/en/setup/backend/backend-receivers.md b/docs/en/setup/backend/backend-receivers.md
deleted file mode 100644
index 0e59e24..0000000
--- a/docs/en/setup/backend/backend-receivers.md
+++ /dev/null
@@ -1,194 +0,0 @@
-
-# Choosing a receiver
-Receiver is a defined concept in SkyWalking's backend. All modules which are responsible for receiving telemetry
-or tracing data from other systems being monitored are all called **receivers**. If you are looking for the pull mode,
-take a look at the [fetcher document](backend-fetcher.md).
-
-We have the following receivers, and `default` implementors are provided in our Apache distribution.
-1. **receiver-trace**. gRPC and HTTPRestful services that accept SkyWalking format traces.
-1. **receiver-register**. gRPC and HTTPRestful services that provide service, service instance and endpoint register.
-1. **service-mesh**. gRPC services that accept data from inbound mesh probes.
-1. **receiver-jvm**. gRPC services that accept JVM metrics data.
-1. **envoy-metric**. Envoy `metrics_service` and `ALS(access log service)` are supported by this receiver. The OAL script supports all GAUGE type metrics.
-1. **receiver-profile**. gRPC services that accept profile task status and snapshot reporter.
-1. **receiver-otel**. See [details](#opentelemetry-receiver). A receiver for analyzing metrics data from OpenTelemetry.
-1. **receiver-meter**. See [details](backend-meter.md). A receiver for analyzing metrics in SkyWalking native meter format.
-1. **receiver-browser**. gRPC services that accept browser performance data and error log.
-1. **receiver-log**. A receiver for native log format. See [Log Analyzer](log-analyzer.md) for advanced features. 
-1. **configuration-discovery**. gRPC services that handle configurationDiscovery.
-1. **receiver-event**. gRPC services that handle events data.
-1. **receiver-zabbix**. See [details](backend-zabbix.md).
-1. Experimental receivers. 
-    1. **receiver_zipkin**. See [details](#zipkin-receiver).
-
-The sample settings of these receivers are by default included in `application.yml`, and also listed here:
-```yaml
-receiver-register:
-  selector: ${SW_RECEIVER_REGISTER:default}
-  default:
-
-receiver-trace:
-  selector: ${SW_RECEIVER_TRACE:default}
-  default:
-
-receiver-jvm:
-  selector: ${SW_RECEIVER_JVM:default}
-  default:
-
-service-mesh:
-  selector: ${SW_SERVICE_MESH:default}
-  default:
-
-envoy-metric:
-  selector: ${SW_ENVOY_METRIC:default}
-  default:
-    acceptMetricsService: ${SW_ENVOY_METRIC_SERVICE:true}
-    alsHTTPAnalysis: ${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:""}
-
-receiver_zipkin:
-  selector: ${SW_RECEIVER_ZIPKIN:-}
-  default:
-    host: ${SW_RECEIVER_ZIPKIN_HOST:0.0.0.0}
-    port: ${SW_RECEIVER_ZIPKIN_PORT:9411}
-    contextPath: ${SW_RECEIVER_ZIPKIN_CONTEXT_PATH:/}
-    jettyMinThreads: ${SW_RECEIVER_ZIPKIN_JETTY_MIN_THREADS:1}
-    jettyMaxThreads: ${SW_RECEIVER_ZIPKIN_JETTY_MAX_THREADS:200}
-    jettyIdleTimeOut: ${SW_RECEIVER_ZIPKIN_JETTY_IDLE_TIMEOUT:30000}
-    jettyAcceptorPriorityDelta: ${SW_RECEIVER_ZIPKIN_JETTY_DELTA:0}
-    jettyAcceptQueueSize: ${SW_RECEIVER_ZIPKIN_QUEUE_SIZE:0}
-
-receiver-profile:
-  selector: ${SW_RECEIVER_PROFILE:default}
-  default:
-
-receiver-browser:
-  selector: ${SW_RECEIVER_BROWSER:default}
-  default:
-    sampleRate: ${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}
-
-log-analyzer:
-   selector: ${SW_LOG_ANALYZER:default}
-   default:
-      lalFiles: ${SW_LOG_LAL_FILES:default}
-      malFiles: ${SW_LOG_MAL_FILES:""}
-  
-configuration-discovery:
-  selector: ${SW_CONFIGURATION_DISCOVERY:default}
-  default:
-
-receiver-event:
-   selector: ${SW_RECEIVER_EVENT:default}
-   default:
-
-```
-
-## gRPC/HTTP server for receiver
-By default, all gRPC/HTTP services should be served at `core/gRPC` and `core/rest`.
-But the `receiver-sharing-server` module allows all receivers to be served at
-different ip:port, if you set them explicitly. 
-```yaml
-receiver-sharing-server:
-  selector: ${SW_RECEIVER_SHARING_SERVER:default}
-  default:
-    host: ${SW_RECEIVER_JETTY_HOST:0.0.0.0}
-    contextPath: ${SW_RECEIVER_JETTY_CONTEXT_PATH:/}
-    authentication: ${SW_AUTHENTICATION:""}
-    jettyMinThreads: ${SW_RECEIVER_SHARING_JETTY_MIN_THREADS:1}
-    jettyMaxThreads: ${SW_RECEIVER_SHARING_JETTY_MAX_THREADS:200}
-    jettyIdleTimeOut: ${SW_RECEIVER_SHARING_JETTY_IDLE_TIMEOUT:30000}
-    jettyAcceptorPriorityDelta: ${SW_RECEIVER_SHARING_JETTY_DELTA:0}
-    jettyAcceptQueueSize: ${SW_RECEIVER_SHARING_JETTY_QUEUE_SIZE:0}
-```
-
-Note: If you add these settings, make sure that they are not the same as the core module. This is because gRPC/HTTP servers of the core are still used for UI and OAP internal communications.
-
-## OpenTelemetry receiver
-
-The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. 
-If the new configuration is not well-formed, the OAP may fail to start up. The files are located at `$CLASSPATH/otel-<handler>-rules`.
-E.g. The `oc` handler loads rules from `$CLASSPATH/otel-oc-rules`.
-
-Supported handlers:
-
-* `oc`: [OpenCensus](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/a08903f05d3a544f548535c222b1c205b9f5a154/exporter/opencensusexporter/README.md) gRPC service handler.
-
-**Notice:**  Set `SW_OTEL_RECEIVER=default` through system environment or change `receiver-otel/selector=${SW_OTEL_RECEIVER:default}` to activate the OpenTelemetry receiver.
-
-The rule file should be in YAML format, defined by the scheme described in [prometheus-fetcher](./backend-fetcher.md).
-Note: `receiver-otel` only supports the `group`, `defaultMetricLevel`, and `metricsRules` nodes of the scheme due to its push mode.
-
-To activate the `oc` handler and relevant rules of `istio`:
-
-```yaml
-receiver-otel:
-  // Change selector value to default, for activating the otel receiver.
-  selector: ${SW_OTEL_RECEIVER:default}
-  default:
-    enabledHandlers: ${SW_OTEL_RECEIVER_ENABLED_HANDLERS:"oc"}
-    enabledOcRules: ${SW_OTEL_RECEIVER_ENABLED_OC_RULES:"istio-controlplane"}
-```
-The receiver adds labels with `key = node_identifier_host_name` and `key = node_identifier_pid` to the collected data samples,
-and values from `Node.identifier.host_name` and `Node.identifier.pid` defined in OpenCensus Agent Proto,
-for identification of the metric data.
-
-| Rule Name | Description | Configuration File | Data Source |
-|----|----|-----|----|
-|istio-controlplane| Metrics of Istio control panel | otel-oc-rules/istio-controlplane.yaml | Istio Control Panel -> OpenTelemetry Collector --OC format--> SkyWalking OAP Server |
-|oap| Metrics of SkyWalking OAP server itself | otel-oc-rules/oap.yaml | SkyWalking OAP Server(SelfObservability) -> OpenTelemetry Collector --OC format--> SkyWalking OAP Server |
-|vm| Metrics of VMs | otel-oc-rules/vm.yaml | Prometheus node-exporter(VMs) -> OpenTelemetry Collector --OC format--> SkyWalking OAP Server |
-|k8s-cluster| Metrics of K8s cluster | otel-oc-rules/k8s-cluster.yaml | K8s kube-state-metrics -> OpenTelemetry Collector --OC format--> SkyWalking OAP Server |
-|k8s-node| Metrics of K8s cluster | otel-oc-rules/k8s-node.yaml | cAdvisor & K8s kube-state-metrics -> OpenTelemetry Collector --OC format--> SkyWalking OAP Server |
-|k8s-service| Metrics of K8s cluster | otel-oc-rules/k8s-service.yaml | cAdvisor & K8s kube-state-metrics -> OpenTelemetry Collector --OC format--> SkyWalking OAP Server |
-
-## Meter receiver
-
-The meter receiver supports accepting the metrics into the meter-system. The OAP can load the configuration at bootstrap. 
-
-The file is written in YAML format, defined by the scheme described in [backend-meter](./backend-meter.md).
-
-To activate the `default` implementation:
-```yaml
-receiver-meter:
-  selector: ${SW_RECEIVER_METER:default}
-  default:
-```
-
-To activate the meter rule files:
-
-Put your customized meter file xxx.yaml ( [mal](../../concepts-and-designs/mal.md) format) in the `config/meter-analyzer-config` directory and configure meteranalyzer `activefiles=${SW_ METER_ ANALYZER_ ACTIVE_ FILES:xxx}`
-
-```yaml
-agent-analyzer:
-  selector: ${SW_AGENT_ANALYZER:default}
-  default:
-    meterAnalyzerActiveFiles: ${SW_METER_ANALYZER_ACTIVE_FILES:} # Which files could be meter analyzed, files split by ","
-```
-
-The receiver adds labels with `key = service` and `key = instance` to the collected data samples,
-and values from service and service instance name defined in SkyWalking Agent,
-for identification of the metric data.
-
-## Zipkin receiver
-The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation. It supports Zipkin v1/v2 formats through HTTP service.
-Make sure you use this with `SW_STORAGE=zipkin-elasticsearch7` option to activate Zipkin storage implementation.
-Once this receiver and storage are activated, SkyWalking's native traces would be ignored, and SkyWalking wouldn't analyze topology, metrics, and endpoint
-dependency from Zipkin's trace. 
-
-Use the following config to activate it.
-```yaml
-receiver_zipkin:
-  selector: ${SW_RECEIVER_ZIPKIN:-}
-  default:
-    host: ${SW_RECEIVER_ZIPKIN_HOST:0.0.0.0}
-    port: ${SW_RECEIVER_ZIPKIN_PORT:9411}
-    contextPath: ${SW_RECEIVER_ZIPKIN_CONTEXT_PATH:/}
-    jettyMinThreads: ${SW_RECEIVER_ZIPKIN_JETTY_MIN_THREADS:1}
-    jettyMaxThreads: ${SW_RECEIVER_ZIPKIN_JETTY_MAX_THREADS:200}
-    jettyIdleTimeOut: ${SW_RECEIVER_ZIPKIN_JETTY_IDLE_TIMEOUT:30000}
-    jettyAcceptorPriorityDelta: ${SW_RECEIVER_ZIPKIN_JETTY_DELTA:0}
-    jettyAcceptQueueSize: ${SW_RECEIVER_ZIPKIN_QUEUE_SIZE:0}
-```
-
-NOTE: Zipkin receiver is only provided in `apache-skywalking-apm-es7-x.y.z.tar.gz` tar.
-This requires `zipkin-elasticsearch7` storage implementation to be activated.
-Read [this](backend-storage.md#elasticsearch-7-with-zipkin-trace-extension) doc to learn about Zipkin as a storage option.
diff --git a/docs/en/setup/backend/backend-setting-override.md b/docs/en/setup/backend/backend-setting-override.md
deleted file mode 100644
index 2e95e63..0000000
--- a/docs/en/setup/backend/backend-setting-override.md
+++ /dev/null
@@ -1,52 +0,0 @@
-# Setting Override
-SkyWalking backend supports setting overrides by system properties and system environment variables. 
-You may override the settings in `application.yml`
-
-## System properties key rule
-**ModuleName**.**ProviderName**.**SettingKey**.
-
-- Example
-
-  Override `restHost` in this setting segment
-  
-```yaml
-core:
-  default:
-    restHost: ${SW_CORE_REST_HOST:0.0.0.0}
-    restPort: ${SW_CORE_REST_PORT:12800}
-    restContextPath: ${SW_CORE_REST_CONTEXT_PATH:/}
-    gRPCHost: ${SW_CORE_GRPC_HOST:0.0.0.0}
-    gRPCPort: ${SW_CORE_GRPC_PORT:11800}
-```
-
-Use command arg
-```
--Dcore.default.restHost=172.0.4.12
-```
-
-## System environment variables
-- Example
-
-  Override `restHost` in this setting segment through environment variables
-  
-```yaml
-core:
-  default:
-    restHost: ${REST_HOST:0.0.0.0}
-    restPort: ${SW_CORE_REST_PORT:12800}
-    restContextPath: ${SW_CORE_REST_CONTEXT_PATH:/}
-    gRPCHost: ${SW_CORE_GRPC_HOST:0.0.0.0}
-    gRPCPort: ${SW_CORE_GRPC_PORT:11800}
-```
-
-If the `REST_HOST ` environment variable exists in your operating system and its value is `172.0.4.12`, 
-then the value of `restHost` here will be overwritten to `172.0.4.12`; otherwise, it will be set to `0.0.0.0`.
-
-Placeholder nesting is also supported, like `${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}`.
-In this case, if the `REST_HOST ` environment variable does not exist, but the ```REST_ANOTHER_REST_HOSTHOST``` 
-environment variable exists and its value is `172.0.4.12`, then the value of `restHost` here will be overwritten to `172.0.4.12`;
-otherwise, it will be set to `127.0.0.1`.
-
-
-
-
diff --git a/docs/en/setup/backend/backend-setup.md b/docs/en/setup/backend/backend-setup.md
deleted file mode 100755
index 543392f..0000000
--- a/docs/en/setup/backend/backend-setup.md
+++ /dev/null
@@ -1,123 +0,0 @@
-# Backend setup
-SkyWalking's backend distribution package consists of the following parts:
-
-1. **bin/cmd scripts**: Located in the `/bin` folder. Includes startup linux shell and Windows cmd scripts for the backend
-   server and UI startup.
-
-2. **Backend config**: Located in the `/config` folder. Includes settings files of the backend, which are:
-    * `application.yml`
-    * `log4j.xml`
-    * `alarm-settings.yml`
-
-3. **Libraries of backend**: Located in the `/oap-libs` folder. All dependencies of the backend can be found in it.
-
-4. **Webapp env**: Located in the `webapp` folder. UI frontend jar file can be found here, together with its `webapp.yml` setting file.
-
-## Requirements and default settings
-
-Requirement: **JDK8 to JDK12 are tested**. Other versions are not tested and may or may not work.
-
-Before you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demo. Performance and long-term running are not our goals.
-
-For production/QA/tests environments, see [Backend and UI deployment documents](#deploy-backend-and-ui).
-
-You can use `bin/startup.sh` (or cmd) to start up the backend and UI with their default settings, set out as follows:
-
-- Backend storage uses **H2 by default** (for an easier start)
-- Backend listens on `0.0.0.0/11800` for gRPC APIs and `0.0.0.0/12800` for HTTP REST APIs.
-
-In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to `ip/host:11800`, and ip/host should be where your backend is.
-- UI listens on `8080` port and request `127.0.0.1/12800` to run a GraphQL query.
-
-### Interaction
-
-Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:
-
-<img src="https://skywalking.apache.org/doc-graph/communication-net.png"/>
-
-- All native agents and probes, either language based or mesh probe, use the gRPC service (`core/default/gRPC*` in `application.yml`) to report data to the backend. Also, the Jetty service is supported in JSON format.
-- UI uses GraphQL (HTTP) query to access the backend also in Jetty service (`core/default/rest*` in `application.yml`).
-
-
-## Startup script
-The default startup scripts are `/bin/oapService.sh`(.bat). 
-Read the [start up mode](backend-start-up-mode.md) document to learn about other ways to start up the backend.
-
-
-## application.yml
-SkyWalking backend startup behaviours are driven by `config/application.yml`.
-Understanding the setting file will help you read this document.
-The core concept behind this setting file is that the SkyWalking collector is based on pure modular design. 
-End users can switch or assemble the collector features according to their own requirements.
-
-In `application.yml`, there are three levels.
-1. **Level 1**: Module name. This means that this module is active in running mode.
-1. **Level 2**: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If there is only one provider listed, the `selector` is optional and can be omitted.
-1. **Level 3**. Settings of the provider.
-
-Example:
-
-```yaml
-storage:
-  selector: mysql # the mysql storage will actually be activated, while the h2 storage takes no effect
-  h2:
-    driver: ${SW_STORAGE_H2_DRIVER:org.h2.jdbcx.JdbcDataSource}
-    url: ${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db}
-    user: ${SW_STORAGE_H2_USER:sa}
-    metadataQueryMaxSize: ${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}
-  mysql:
-    properties:
-      jdbcUrl: ${SW_JDBC_URL:"jdbc:mysql://localhost:3306/swtest"}
-      dataSource.user: ${SW_DATA_SOURCE_USER:root}
-      dataSource.password: ${SW_DATA_SOURCE_PASSWORD:root@1234}
-      dataSource.cachePrepStmts: ${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}
-      dataSource.prepStmtCacheSize: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}
-      dataSource.prepStmtCacheSqlLimit: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}
-      dataSource.useServerPrepStmts: ${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}
-    metadataQueryMaxSize: ${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}
-  # other configurations
-```
-
-1. **`storage`** is the module.
-1. **`selector`** selects one out of all providers listed below. The unselected ones take no effect as if they were deleted.
-1. **`default`** is the default implementor of the core module.
-1. `driver`, `url`, ... `metadataQueryMaxSize` are all setting items of the implementor.
-
-At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. 
-Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have
-a provider implementation called `none`, meaning that it only provides a shell with no actual logic, typically such as telemetry.
-Setting `-` to the `selector` means that this whole module will be excluded at runtime.
-We advise against trying to change the APIs of those modules, unless you understand the SkyWalking project and its codes very well.
-
-The required modules are listed here:
-1. **Core**. Provides the basic and major skeleton of all data analysis and stream dispatch.
-1. **Cluster**. Manages multiple backend instances in a cluster, which could provide high throughput process
-capabilities. See [**Cluster Management**](backend-cluster.md) for more details.
-1. **Storage**. Makes the analysis result persistent. See [**Choose storage**](backend-storage.md) for more details
-1. **Query**. Provides query interfaces to UI.
-1. **Receiver** and **Fetcher**. Expose the service to the agents and probes, or read telemetry data from a channel. 
-See [Receiver](backend-receivers.md) and [Fetcher](backend-fetcher.md) documents for more details.
-
-## FAQs
-#### Why do we need to set the timezone? And when do we do it?
-SkyWalking provides downsampling time series metrics features. 
-Query and store at each time dimension (minute, hour, day, month metrics indexes)
-related to timezone when time formatting.
-
-For example, metrics time will be formatted like YYYYMMDDHHmm in minute dimension metrics,
-which is timezone related.
-  
-By default, SkyWalking's OAP backend chooses the OS default timezone.
-If you want to override it, please follow the Java and OS documents.
-
-#### How to query the storage directly from a 3rd party tool?
-SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data 
-directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.
-
-By default, in order to reduce memory, network and storage space usages, SkyWalking saves based64-encoded ID(s) only in metrics entities. 
-But these tools usually don't support nested query, and are not convenient to work with. For these exceptional reasons,
-SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.
-
-Take a look at `core/default/activeExtraModelColumns` config in the `application.yaml`, and set it as `true` to enable this feature.
-
-Note that this feature is simply for 3rd party integration and doesn't provide any new features to native SkyWalking use cases.
diff --git a/docs/en/setup/backend/backend-start-up-mode.md b/docs/en/setup/backend/backend-start-up-mode.md
deleted file mode 100644
index 50d5b1b..0000000
--- a/docs/en/setup/backend/backend-start-up-mode.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# Start up mode
-In different deployment tools, such as k8s, you may need different startup modes.
-We provide two other optional startup modes.
-
-## Default mode
-The default mode carries out tasks to initialize as necessary, starts to listen, and provide services. 
-
-Run `/bin/oapService.sh`(.bat) to start in this mode. This is also applicable when you're using `startup.sh`(.bat) to start.
-
-## Init mode
-In this mode, the OAP server starts up to carry out initialization, and then exits.
-You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables),
-as well as your data.
-
-Run `/bin/oapServiceInit.sh`(.bat) to start in this mode.
-
-## No-init mode
-In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, and TiDB tables,
-starts to listen, and provide services. In other words, the OAP server would anticipate having another OAP server to carry out the initialization.
-
-Run `/bin/oapServiceNoInit.sh`(.bat) to start in this mode.
diff --git a/docs/en/setup/backend/backend-storage.md b/docs/en/setup/backend/backend-storage.md
deleted file mode 100644
index e750e0d..0000000
--- a/docs/en/setup/backend/backend-storage.md
+++ /dev/null
@@ -1,279 +0,0 @@
-# Backend storage
-The SkyWalking storage is pluggable. We have provided the following storage solutions, which allows you to easily
-use one of them by specifying it as the `selector` in `application.yml`:
-
-```yaml
-storage:
-  selector: ${SW_STORAGE:elasticsearch7}
-```
-
-Natively supported storage:
-- H2
-- OpenSearch
-- ElasticSearch 6, 7
-- MySQL
-- TiDB
-- InfluxDB
-- PostgreSQL
-
-
-## H2
-Activate H2 as storage, set storage provider to **H2** In-Memory Databases. Default in distribution package.
-Please read `Database URL Overview` in [H2 official document](http://www.h2database.com/html/features.html).
-You can set the target to H2 in **Embedded**, **Server** and **Mixed** modes.
-
-Setting fragment example
-```yaml
-storage:
-  selector: ${SW_STORAGE:h2}
-  h2:
-    driver: org.h2.jdbcx.JdbcDataSource
-    url: jdbc:h2:mem:skywalking-oap-db
-    user: sa
-```
-
-## OpenSearch
-
-OpenSearch storage shares the same configurations as ElasticSearch 7.
-In order to activate ElasticSearch 7 as storage, set storage provider to **elasticsearch7**.
-Please download the `apache-skywalking-bin-es7.tar.gz` if you want to use OpenSearch as storage.
-
-## ElasticSearch
-
-**NOTE:** Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public
-License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch
-version 7.11. So please choose the suitable ElasticSearch version according to your usage.
-
-- In order to activate ElasticSearch 6 as storage, set storage provider to **elasticsearch**
-- In order to activate ElasticSearch 7 as storage, set storage provider to **elasticsearch7**
-
-**Required ElasticSearch 6.3.2 or higher. HTTP RestHighLevelClient is used to connect server.**
-
-- For ElasticSearch 6.3.2 ~ 7.0.0 (excluded), please download `apache-skywalking-bin.tar.gz`.
-- For ElasticSearch 7.0.0 ~ 8.0.0 (excluded), please download `apache-skywalking-bin-es7.tar.gz`.
-
-For now, ElasticSearch 6 and ElasticSearch 7 share the same configurations as follows:
-
-```yaml
-storage:
-  selector: ${SW_STORAGE:elasticsearch}
-  elasticsearch:
-    nameSpace: ${SW_NAMESPACE:""}
-    clusterNodes: ${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}
-    protocol: ${SW_STORAGE_ES_HTTP_PROTOCOL:"http"}
-    trustStorePath: ${SW_STORAGE_ES_SSL_JKS_PATH:""}
-    trustStorePass: ${SW_STORAGE_ES_SSL_JKS_PASS:""}
-    user: ${SW_ES_USER:""}
-    password: ${SW_ES_PASSWORD:""}
-    secretsManagementFile: ${SW_ES_SECRETS_MANAGEMENT_FILE:""} # Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.
-    dayStep: ${SW_STORAGE_DAY_STEP:1} # Represent the number of days in the one minute/hour/day index.
-    indexShardsNumber: ${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1} # Shard number of new indexes
-    indexReplicasNumber: ${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1} # Replicas number of new indexes
-    # Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.
-    superDatasetDayStep: ${SW_SUPERDATASET_STORAGE_DAY_STEP:-1} # Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0
-    superDatasetIndexShardsFactor: ${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5} #  This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin and Jaeger traces.
-    superDatasetIndexReplicasNumber: ${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0} # Represent the replicas number in the super size dataset record index, the default value is 0.
-    indexTemplateOrder: ${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0} # the order of index template
-    bulkActions: ${SW_STORAGE_ES_BULK_ACTIONS:1000} # Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requests
-    flushInterval: ${SW_STORAGE_ES_FLUSH_INTERVAL:10} # flush the bulk every 10 seconds whatever the number of requests
-    concurrentRequests: ${SW_STORAGE_ES_CONCURRENT_REQUESTS:2} # the number of concurrent requests
-    resultWindowMaxSize: ${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}
-    metadataQueryMaxSize: ${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}
-    segmentQueryMaxSize: ${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}
-    profileTaskQueryMaxSize: ${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}
-    oapAnalyzer: ${SW_STORAGE_ES_OAP_ANALYZER:"{\"analyzer\":{\"oap_analyzer\":{\"type\":\"stop\"}}}"} # the oap analyzer.
-    oapLogAnalyzer: ${SW_STORAGE_ES_OAP_LOG_ANALYZER:"{\"analyzer\":{\"oap_log_analyzer\":{\"type\":\"standard\"}}}"} # the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.
-    advanced: ${SW_STORAGE_ES_ADVANCED:""}
-```
-
-### ElasticSearch 6 With Https SSL Encrypting communications.
-
-Example: 
-
-```yaml
-storage:
-  selector: ${SW_STORAGE:elasticsearch}
-  elasticsearch:
-    # nameSpace: ${SW_NAMESPACE:""}
-    user: ${SW_ES_USER:""} # User needs to be set when Http Basic authentication is enabled
-    password: ${SW_ES_PASSWORD:""} # Password to be set when Http Basic authentication is enabled
-    clusterNodes: ${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}
-    trustStorePath: ${SW_SW_STORAGE_ES_SSL_JKS_PATH:"../es_keystore.jks"}
-    trustStorePass: ${SW_SW_STORAGE_ES_SSL_JKS_PASS:""}
-    protocol: ${SW_STORAGE_ES_HTTP_PROTOCOL:"https"}
-    ...
-```
-- File at `trustStorePath` is being monitored. Once it is changed, the ElasticSearch client will reconnect.
-- `trustStorePass` could be changed in the runtime through [**Secrets Management File Of ElasticSearch Authentication**](#secrets-management-file-of-elasticsearch-authentication).
-
-### Daily Index Step
-Daily index step(`storage/elasticsearch/dayStep`, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.
-
-In most cases, users don't need to change the value manually, as SkyWalking is designed to observe large scale distributed systems.
-But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment.
-This value could be increased to 5 (or more), if users could ensure a single index could support the metrics and traces for these days (5 in this case).
-
-For example, if dayStep == 11, 
-1. Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101.
-1. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.
-
-`storage/elasticsearch/superDatasetDayStep` overrides the `storage/elasticsearch/dayStep` if the value is positive.
-This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.
- 
-NOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are commended to set TTL = 40.
-
-### Secrets Management File Of ElasticSearch Authentication
-The value of `secretsManagementFile` should point to the secrets management file absolute path. 
-The file includes username, password, and JKS password of the ElasticSearch server in the properties format.
-```properties
-user=xxx
-password=yyy
-trustStorePass=zzz
-```
-
-The major difference between using `user, password, trustStorePass` configs in the `application.yaml` file is that the **Secrets Management File** is being watched by the OAP server. 
-Once it is changed manually or through a 3rd party tool, such as [Vault](https://github.com/hashicorp/vault), 
-the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file,
-the `user/password` will be overrided.
-
-### Advanced Configurations For Elasticsearch Index
-You can add advanced configurations in `JSON` format to set `ElasticSearch index settings` by following [ElasticSearch doc](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html)
-
-For example, set [translog](https://www.elastic.co/guide/en/elasticsearch/reference/master/index-modules-translog.html) settings:
-
-```yaml
-storage:
-  elasticsearch:
-    # ......
-    advanced: ${SW_STORAGE_ES_ADVANCED:"{\"index.translog.durability\":\"request\",\"index.translog.sync_interval\":\"5s\"}"}
-```
-
-### Recommended ElasticSearch server-side configurations
-You could add the following configuration to `elasticsearch.yml`, and set the value based on your environment.
-
-```yml
-# In tracing scenario, consider to set more than this at least.
-thread_pool.index.queue_size: 1000 # Only suitable for ElasticSearch 6
-thread_pool.write.queue_size: 1000 # Suitable for ElasticSearch 6 and 7
-
-# When you face query error at trace page, remember to check this.
-index.max_result_window: 1000000
-```
-
-We strongly recommend that you read more about these configurations from ElasticSearch's official document, since they have a direct impact on the performance of ElasticSearch.
-
-
-### ElasticSearch 7 with Zipkin trace extension
-This implementation is very similar to `elasticsearch7`, except that it extends to support Zipkin span storage.
-The configurations are largely the same.
-```yaml
-storage:
-  selector: ${SW_STORAGE:zipkin-elasticsearch7}
-  zipkin-elasticsearch7:
-    nameSpace: ${SW_NAMESPACE:""}
-    clusterNodes: ${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}
-    protocol: ${SW_STORAGE_ES_HTTP_PROTOCOL:"http"}
-    user: ${SW_ES_USER:""}
-    password: ${SW_ES_PASSWORD:""}
-    indexShardsNumber: ${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:2}
-    indexReplicasNumber: ${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:0}
-    # Batch process setting, refer to https://www.elastic.co/guide/en/elasticsearch/client/java-api/5.5/java-docs-bulk-processor.html
-    bulkActions: ${SW_STORAGE_ES_BULK_ACTIONS:2000} # Execute the bulk every 2000 requests
-    bulkSize: ${SW_STORAGE_ES_BULK_SIZE:20} # flush the bulk every 20mb
-    flushInterval: ${SW_STORAGE_ES_FLUSH_INTERVAL:10} # flush the bulk every 10 seconds whatever the number of requests
-    concurrentRequests: ${SW_STORAGE_ES_CONCURRENT_REQUESTS:2} # the number of concurrent requests
-```
-
-### About Namespace
-When namespace is set, all index names in ElasticSearch will use it as prefix.
-
-## MySQL
-Active MySQL as storage, set storage provider to **mysql**. 
-
-**NOTE:** MySQL driver is NOT allowed in Apache official distribution and source codes. 
-Please download MySQL driver on your own. Copy the connection driver jar to `oap-libs`.
-
-```yaml
-storage:
-  selector: ${SW_STORAGE:mysql}
-  mysql:
-    properties:
-      jdbcUrl: ${SW_JDBC_URL:"jdbc:mysql://localhost:3306/swtest"}
-      dataSource.user: ${SW_DATA_SOURCE_USER:root}
-      dataSource.password: ${SW_DATA_SOURCE_PASSWORD:root@1234}
-      dataSource.cachePrepStmts: ${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}
-      dataSource.prepStmtCacheSize: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}
-      dataSource.prepStmtCacheSqlLimit: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}
-      dataSource.useServerPrepStmts: ${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}
-    metadataQueryMaxSize: ${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}
-```
-All connection-related settings, including URL link, username, and password are found in `application.yml`. 
-Only part of the settings are listed here. See the [HikariCP](https://github.com/brettwooldridge/HikariCP) connection pool document for full settings.
-
-## TiDB
-Tested TiDB Server 4.0.8 version and MySQL Client driver 8.0.13 version are currently available.
-Activate TiDB as storage, and set storage provider to **tidb**. 
-
-```yaml
-storage:
-  selector: ${SW_STORAGE:tidb}
-  tidb:
-    properties:
-      jdbcUrl: ${SW_JDBC_URL:"jdbc:mysql://localhost:4000/swtest"}
-      dataSource.user: ${SW_DATA_SOURCE_USER:root}
-      dataSource.password: ${SW_DATA_SOURCE_PASSWORD:""}
-      dataSource.cachePrepStmts: ${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}
-      dataSource.prepStmtCacheSize: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}
-      dataSource.prepStmtCacheSqlLimit: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}
-      dataSource.useServerPrepStmts: ${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}
-      dataSource.useAffectedRows: ${SW_DATA_SOURCE_USE_AFFECTED_ROWS:true}
-    metadataQueryMaxSize: ${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}
-    maxSizeOfArrayColumn: ${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}
-    numOfSearchableValuesPerTag: ${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}
-```
-All connection-related settings, including URL link, username, and password are found in `application.yml`. 
-For details on settings, refer to the configuration of *MySQL* above.
-
-## InfluxDB
-InfluxDB storage provides a time-series database as a new storage option.
-
-```yaml
-storage:
-  selector: ${SW_STORAGE:influxdb}
-  influxdb:
-    url: ${SW_STORAGE_INFLUXDB_URL:http://localhost:8086}
-    user: ${SW_STORAGE_INFLUXDB_USER:root}
-    password: ${SW_STORAGE_INFLUXDB_PASSWORD:}
-    database: ${SW_STORAGE_INFLUXDB_DATABASE:skywalking}
-    actions: ${SW_STORAGE_INFLUXDB_ACTIONS:1000} # the number of actions to collect
-    duration: ${SW_STORAGE_INFLUXDB_DURATION:1000} # the time to wait at most (milliseconds)
-    fetchTaskLogMaxSize: ${SW_STORAGE_INFLUXDB_FETCH_TASK_LOG_MAX_SIZE:5000} # the max number of fetch task log in a request
-```
-All connection related settings, including URL link, username, and password are found in `application.yml`. For metadata storage provider settings, refer to the configurations of **H2/MySQL** above.
-
-## PostgreSQL
-PostgreSQL jdbc driver uses version 42.2.18. It supports PostgreSQL 8.2 or newer.
-Activate PostgreSQL as storage, and set storage provider to **postgresql**. 
-
-```yaml
-storage:
-  selector: ${SW_STORAGE:postgresql}
-  postgresql:
-    properties:
-      jdbcUrl: ${SW_JDBC_URL:"jdbc:postgresql://localhost:5432/skywalking"}
-      dataSource.user: ${SW_DATA_SOURCE_USER:postgres}
-      dataSource.password: ${SW_DATA_SOURCE_PASSWORD:123456}
-      dataSource.cachePrepStmts: ${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}
-      dataSource.prepStmtCacheSize: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}
-      dataSource.prepStmtCacheSqlLimit: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}
-      dataSource.useServerPrepStmts: ${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}
-    metadataQueryMaxSize: ${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}
-    maxSizeOfArrayColumn: ${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}
-    numOfSearchableValuesPerTag: ${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}
-```
-All connection-related settings, including URL link, username, and password are found in `application.yml`. 
-Only part of the settings are listed here. Please follow [HikariCP](https://github.com/brettwooldridge/HikariCP) connection pool document for full settings.
-
-## More storage extension solutions
-Follow the [Storage extension development guide](../../guides/storage-extention.md) 
-in the [Project Extensions document](../../guides/README.md#project-extensions).
diff --git a/docs/en/setup/backend/backend-telemetry.md b/docs/en/setup/backend/backend-telemetry.md
deleted file mode 100644
index 871f24e..0000000
--- a/docs/en/setup/backend/backend-telemetry.md
+++ /dev/null
@@ -1,193 +0,0 @@
-# Telemetry for backend
-The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team,
-we provide the telemetry for the OAP backend itself. 
-
-By default, the telemetry is disabled by setting `selector` to `none`, like this:
-
-```yaml
-telemetry:
-  selector: ${SW_TELEMETRY:none}
-  none:
-  prometheus:
-    host: ${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}
-    port: ${SW_TELEMETRY_PROMETHEUS_PORT:1234}
-    sslEnabled: ${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}
-    sslKeyPath: ${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:""}
-    sslCertChainPath: ${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:""}
-```
-
-You may also set `Prometheus` to enable them. For more information, refer to the details below.
-
-## Self Observability
-### Static IP or hostname
-SkyWalking supports collecting telemetry data into OAP backend directly. Users could check them out through UI or
-GraphQL API.
-
-Add the following configuration to enable self-observability related modules.
-
-1. Set up prometheus telemetry.
-```yaml
-telemetry:
-  selector: ${SW_TELEMETRY:prometheus}
-  prometheus:
-    host: 127.0.0.1
-    port: 1543
-```
-
-2. Set up prometheus fetcher.
-
-```yaml
-prometheus-fetcher:
-  selector: ${SW_PROMETHEUS_FETCHER:default}
-  default:
-    enabledRules: ${SW_PROMETHEUS_FETCHER_ENABLED_RULES:"self"}
-``` 
-
-3. Make sure `config/fetcher-prom-rules/self.yaml` exists. 
-
-Once you deploy an oap-server cluster, the target host should be replaced with a dedicated IP or hostname. For instances,
-there are three OAP servers in your cluster. Their host is `service1`, `service2`, and `service3` respectively. You should
-update each `self.yaml` to switch the target host.
-
-service1: 
-```yaml
-fetcherInterval: PT15S
-fetcherTimeout: PT10S
-metricsPath: /metrics
-staticConfig:
-  # targets will be labeled as "instance"
-  targets:
-    - service1:1234
-  labels:
-    service: oap-server
-...
-```
-
-service2: 
-```yaml
-fetcherInterval: PT15S
-fetcherTimeout: PT10S
-metricsPath: /metrics
-staticConfig:
-  # targets will be labeled as "instance"
-  targets:
-    - service2:1234
-  labels:
-    service: oap-server
-...
-```
-
-service3: 
-```yaml
-fetcherInterval: PT15S
-fetcherTimeout: PT10S
-metricsPath: /metrics
-staticConfig:
-  # targets will be labeled as "instance"
-  targets:
-    - service3:1234
-  labels:
-    service: oap-server
-...
-```
-### Service discovery (k8s)
-If you deploy an oap-server cluster on k8s, the oap-server instance (pod) would not have a static IP or hostname. We can leverage [OpenTelemetry Collector](https://opentelemetry.io/docs/collector/getting-started/#kubernetes) to discover the oap-server instance, and scrape & transfer the metrics to OAP [OpenTelemetry receiver](backend-receivers.md#opentelemetry-receiver). 
-
-On how to install SkyWalking on k8s, you can refer to [Apache SkyWalking Kubernetes](https://github.com/apache/skywalking-kubernetes).
-
-Set this up following these steps:
-
-1. Set up oap-server.
-- Set the metrics port.
-  ```
-  prometheus-port: 1234
-  ```
-- Set environment variables.
-  ```
-  SW_TELEMETRY=prometheus 
-  SW_OTEL_RECEIVER=default 
-  SW_OTEL_RECEIVER_ENABLED_OC_RULES=oap
-  ```
-
-  Here is an example to install by Apache SkyWalking Kubernetes:
-  ```
-  helm -n istio-system install skywalking skywalking \
-               --set elasticsearch.replicas=1 \
-               --set elasticsearch.minimumMasterNodes=1 \
-               --set elasticsearch.imageTag=7.5.1 \
-               --set oap.replicas=2 \
-               --set ui.image.repository=$HUB/skywalking-ui \
-               --set ui.image.tag=$TAG \
-               --set oap.image.tag=$TAG \
-               --set oap.image.repository=$HUB/skywalking-oap \
-               --set oap.storageType=elasticsearch7 \
-               --set oap.ports.prometheus-port=1234 \ # <<< Expose self observability metrics port
-               --set oap.env.SW_TELEMETRY=prometheus \
-               --set oap.env.SW_OTEL_RECEIVER=default \ # <<< Enable Otel receiver
-               --set oap.env.SW_OTEL_RECEIVER_ENABLED_OC_RULES=oap # <<< Add oap analyzer for Otel metrics
-  ```
-2. Set up OpenTelemetry Collector and config a scrape job:
-``` yaml
-- job_name: 'skywalking'
-  metrics_path: '/metrics'
-  kubernetes_sd_configs:
-  - role: pod
-  relabel_configs:
-  - source_labels: [__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]
-    action: keep
-    regex: oap;prometheus-port  
-  - source_labels: []
-    target_label: service
-    replacement: oap-server
-  - source_labels: [__meta_kubernetes_pod_name]
-    target_label: host_name
-    regex: (.+)
-    replacement: $$1 
-```
-For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to [otel-collector-oap.yaml](otel-collector-oap.yaml).
-
-
-
-___
-
-**NOTE**: Since Apr 21, 2021, the **Grafana** project has been relicensed to **AGPL-v3**, and is no longer licensed for Apache 2.0. Check the LICENSE details.
-The following Prometheus + Grafana solution is optional, rather than recommended.
-
-## Prometheus
-Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking's backend.
-
-Set `prometheus` to provider. The endpoint opens at `http://0.0.0.0:1234/` and `http://0.0.0.0:1234/metrics`.
-```yaml
-telemetry:
-  selector: ${SW_TELEMETRY:prometheus}
-  prometheus:
-```
-
-Set host and port if needed.
-```yaml
-telemetry:
-  selector: ${SW_TELEMETRY:prometheus}
-  prometheus:
-    host: 127.0.0.1
-    port: 1543
-```
-
-Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once
-changes are applied to them.
-```yaml
-telemetry:
-  selector: ${SW_TELEMETRY:prometheus}
-  prometheus:
-    host: 127.0.0.1
-    port: 1543
-    sslEnabled: true
-    sslKeyPath: /etc/ssl/key.pem
-    sslCertChainPath: /etc/ssl/cert-chain.pem
-```
-
-### Grafana Visualization
-Provide the Grafana dashboard settings. 
-Check [SkyWalking OAP Cluster Monitor Dashboard](grafana-cluster.json) config and [SkyWalking OAP Instance Monitor Dashboard](grafana-instance.json) config.
-
-
-
diff --git a/docs/en/setup/backend/backend-token-auth.md b/docs/en/setup/backend/backend-token-auth.md
deleted file mode 100644
index 78c62b5..0000000
--- a/docs/en/setup/backend/backend-token-auth.md
+++ /dev/null
@@ -1,42 +0,0 @@
-# Token Authentication
-## Supported version
-7.0.0+
-
-## Why do we need token authentication after TLS?
-TLS is about transport security, which makes sure that a network can be trusted. 
-On the other hand, token authentication is about monitoring **whether application data can be trusted**.
-
-## Token 
-In the current version, token is considered a simple string.
-
-### Set Token
-1. Set token in agent.config file
-```properties
-# Authentication active is based on backend setting, see application.yml for more details.
-agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx}
-```
-
-2. Set token in `application.yml` file
-```yaml
-······
-receiver-sharing-server:
-  default:
-    authentication: ${SW_AUTHENTICATION:""}
-······
-```
-
-## Authentication failure
-The Skywalking OAP verifies every request from the agent, and only allows requests whose token matches the one configured in `application.yml` to pass through.
-
-If the token does not match, you will see the following log in the agent:
-```
-org.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED
-```
-
-## FAQ
-### Can I use token authentication instead of TLS?
-No, you shouldn't. Of course it's technically possible, but token and TLS are used for untrusted network environments. In these circumstances,
-TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.
-
-### Do you support other authentication mechanisms, such as ak/sk?
-Not for now. But we welcome contributions on this feature. 
diff --git a/docs/en/setup/backend/backend-vm-monitoring.md b/docs/en/setup/backend/backend-vm-monitoring.md
deleted file mode 100644
index 57e19e7..0000000
--- a/docs/en/setup/backend/backend-vm-monitoring.md
+++ /dev/null
@@ -1,41 +0,0 @@
-# VMs monitoring 
-SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs, and leverages OpenTelemetry Collector to transfer the metrics to
-[OpenTelemetry receiver](backend-receivers.md#opentelemetry-receiver) and into the [Meter System](./../../concepts-and-designs/meter.md).  
-We define the VM entity as a `Service` in OAP, and use `vm::` as a prefix to identify it.  
-
-## Data flow
-1. The Prometheus node-exporter collects metrics data from the VMs.
-2. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via the OpenCensus gRPC Exporter.
-3. The SkyWalking OAP Server parses the expression with [MAL](../../concepts-and-designs/mal.md) to filter/calculate/aggregate and store the results. 
-
-
-## Setup
-
-1. Setup [Prometheus node-exporter](https://prometheus.io/docs/guides/node-exporter/).
-2. Setup [OpenTelemetry Collector ](https://opentelemetry.io/docs/collector/). This is an example for OpenTelemetry Collector configuration [otel-collector-config.yaml](../../../../test/e2e/e2e-test/docker/promOtelVM/otel-collector-config.yaml).
-3. Config SkyWalking [OpenTelemetry receiver](backend-receivers.md#opentelemetry-receiver).
-   
-## Supported Metrics
-
-| Monitoring Panel | Unit | Metric Name | Description | Data Source |
-|-----|-----|-----|-----|-----|
-| CPU Usage | % | cpu_total_percentage | The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. | Prometheus node-exporter |
-| Memory RAM Usage | MB | meter_vm_memory_used | The total RAM usage | Prometheus node-exporter |
-| Memory Swap Usage | % | meter_vm_memory_swap_percentage | The percentage usage of swap memory | Prometheus node-exporter |
-| CPU Average Used | % | meter_vm_cpu_average_used | The percentage usage of the CPU core in each mode | Prometheus node-exporter |
-| CPU Load |  | meter_vm_cpu_load1<br />meter_vm_cpu_load5<br />meter_vm_cpu_load15 | The CPU 1m / 5m / 15m average load | Prometheus node-exporter |
-| Memory RAM | MB | meter_vm_memory_total<br />meter_vm_memory_available<br />meter_vm_memory_used | The RAM statistics, including Total / Available / Used | Prometheus node-exporter |
-| Memory Swap | MB | meter_vm_memory_swap_free<br />meter_vm_memory_swap_total | Swap memory statistics, including Free / Total | Prometheus node-exporter |
-| File System Mountpoint Usage | % | meter_vm_filesystem_percentage | The percentage usage of the file system at each mount point | Prometheus node-exporter |
-| Disk R/W | KB/s | meter_vm_disk_read,meter_vm_disk_written | The disk read and written | Prometheus node-exporter |
-| Network Bandwidth Usage | KB/s | meter_vm_network_receive<br />meter_vm_network_transmit | The network receive and transmit | Prometheus node-exporter |
-| Network Status |  | meter_vm_tcp_curr_estab<br />meter_vm_tcp_tw<br />meter_vm_tcp_alloc<br />meter_vm_sockets_used<br />meter_vm_udp_inuse | The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use | Prometheus node-exporter |
-| Filefd Allocated |  | meter_vm_filefd_allocated | The number of file descriptors allocated | Prometheus node-exporter |
-
-## Customizing 
-You can customize your own metrics/expression/dashboard panel.   
-The metrics definition and expression rules are found in `/config/otel-oc-rules/vm.yaml`.  
-The dashboard panel confirmations are found in `/config/ui-initialized-templates/vm.yml`.
-
-## Blog
-For more details, see blog article [SkyWalking 8.4 provides infrastructure monitoring](https://skywalking.apache.org/blog/2021-02-07-infrastructure-monitoring/).
diff --git a/docs/en/setup/backend/backend-zabbix.md b/docs/en/setup/backend/backend-zabbix.md
deleted file mode 100644
index 97d0b19..0000000
--- a/docs/en/setup/backend/backend-zabbix.md
+++ /dev/null
@@ -1,73 +0,0 @@
-# Zabbix Receiver
-The Zabbix receiver acceps metrics of [Zabbix Agent Active Checks protocol](https://www.zabbix.com/documentation/current/manual/appendix/items/activepassive#active_checks) format into the [Meter System](./../../concepts-and-designs/meter.md).
-Zabbix Agent is based on GPL-2.0 License.
-
-## Module definition
-```yaml
-receiver-zabbix:
-  selector: ${SW_RECEIVER_ZABBIX:default}
-  default:
-    # Export tcp port, Zabbix agent could connected and transport data
-    port: 10051
-    # Bind to host
-    host: 0.0.0.0
-    # Enable config when receive agent request
-    activeFiles: agent
-```
-
-## Configuration file
-The Zabbix receiver is configured via a configuration file that defines everything related to receiving 
- from agents, as well as which rule files to load.
- 
-The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files
-are located at `$CLASSPATH/zabbix-rules`.
-
-The file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.
-
-An example for Zabbix agent configuration could be found [here](../../../../test/e2e/e2e-test/docker/zabbix/zabbix_agentd.conf).
-You could find details on Zabbix agent items from [Zabbix Agent documentation](https://www.zabbix.com/documentation/current/manual/config/items/itemtypes/zabbix_agent).
-
-### Configuration file
-
-```yaml
-# insert metricPrefix into metric name:  <metricPrefix>_<raw_metric_name>
-metricPrefix: <string>
-# expSuffix is appended to all expression in this file.
-expSuffix: <string>
-# Datasource from Zabbix Item keys.
-requiredZabbixItemKeys:
- - <zabbix item keys>
-# Support agent entities information.
-entities:
-  # Allow hostname patterns to build metrics.
-  hostPatterns:
-    - <regex string>
-  # Customized metrics label before parse to meter system.
-  labels:
-    [- <labels> ]
-# Metrics rule allow you to recompute queries.
-metrics:
-  [ - <metrics_rules> ]
-```
-
-#### <labels>
-
-```yaml
-# Define the label name. The label value must query from `value` or `fromItem` attribute.
-name: <string>
-# Appoint value to label.
-[value: <string>]
-# Query label value from Zabbix Agent Item key.
-[fromItem: <string>]
-```
-
-#### <metric_rules>
-
-```yaml
-# The name of rule, which combinates with a prefix 'meter_' as the index/table name in storage.
-name: <string>
-# MAL expression.
-exp: <string>
-```
-
-For more on MAL, please refer to [mal.md](../../concepts-and-designs/mal.md).
diff --git a/docs/en/setup/backend/configuration-vocabulary.md b/docs/en/setup/backend/configuration-vocabulary.md
deleted file mode 100644
index f6db25e..0000000
--- a/docs/en/setup/backend/configuration-vocabulary.md
+++ /dev/null
@@ -1,297 +0,0 @@
-# Configuration Vocabulary
-The Configuration Vocabulary lists all available configurations provided by `application.yml`.
-
-Module | Provider | Settings | Value(s) and Explanation | System Environment Variable¹ | Default |
------------ | ---------- | --------- | --------- |--------- |--------- |
-core|default|role|Option values: `Mixed/Receiver/Aggregator`. **Receiver** mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. **Mixer** means both Receiver and Aggregator. |SW_CORE_ROLE|Mixed|
-| - | - | restHost| Binding IP of RESTful services. Services include GraphQL query and HTTP data report. |SW_CORE_REST_HOST|0.0.0.0|
-| - | - | restPort | Binding port of RESTful services. | SW_CORE_REST_PORT|12800|
-| - | - | restContextPath| Web context path of RESTful services. | SW_CORE_REST_CONTEXT_PATH|/|
-| - | - | restMinThreads| Minimum thread number of RESTful services. | SW_CORE_REST_JETTY_MIN_THREADS|1|
-| - | - | restMaxThreads| Maximum thread number of RESTful services. | SW_CORE_REST_JETTY_MAX_THREADS|200|
-| - | - | restIdleTimeOut| Connector idle timeout of RESTful services (in milliseconds). | SW_CORE_REST_JETTY_IDLE_TIMEOUT|30000|
-| - | - | restAcceptorPriorityDelta| Thread priority delta to give to acceptor threads of RESTful services. | SW_CORE_REST_JETTY_DELTA|0|
-| - | - | restAcceptQueueSize| ServerSocketChannel Backlog of RESTful services. | SW_CORE_REST_JETTY_QUEUE_SIZE|0|
-| - | - | httpMaxRequestHeaderSize| Maximum request header size accepted. | SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE|8192|
-| - | - | gRPCHost| Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. |SW_CORE_GRPC_HOST|0.0.0.0|
-| - | - | gRPCPort| Binding port of gRPC services. | SW_CORE_GRPC_PORT|11800|
-| - | - | gRPCSslEnabled| Activates SSL for gRPC services. | SW_CORE_GRPC_SSL_ENABLED|false|
-| - | - | gRPCSslKeyPath| File path of gRPC SSL key. | SW_CORE_GRPC_SSL_KEY_PATH| - |
-| - | - | gRPCSslCertChainPath| File path of gRPC SSL cert chain. | SW_CORE_GRPC_SSL_CERT_CHAIN_PATH| - |
-| - | - | gRPCSslTrustedCAPath| File path of gRPC trusted CA. | SW_CORE_GRPC_SSL_TRUSTED_CA_PATH| - |
-| - | - | downsampling| Activated level of down sampling aggregation. | | Hour,Day|
-| - | - | persistentPeriod| Execution period of the persistent timer (in seconds). | | 25 |
-| - | - | enableDataKeeperExecutor| Controller of TTL scheduler. Once disabled, TTL wouldn't work. |SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR|true|
-| - | - | dataKeeperExecutePeriod| Execution period of TTL scheduler (in minutes). Execution doesn't mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this.|SW_CORE_DATA_KEEPER_EXECUTE_PERIOD|5|
-| - | - | recordDataTTL| The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. |SW_CORE_RECORD_DATA_TTL|3|
-| - | - | metricsDataTTL| The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL >= recordDataTTL. Minimum value is 2. | SW_CORE_METRICS_DATA_TTL|7|
-| - | - | l1FlushPeriod| The period of L1 aggregation flush to L2 aggregation (in milliseconds). | SW_CORE_L1_AGGREGATION_FLUSH_PERIOD | 500 |
-| - | - | storageSessionTimeout| The threshold of session time (in milliseconds). Default value is 70000. | SW_CORE_STORAGE_SESSION_TIMEOUT | 70000 |
-| - | - | enableDatabaseSession| Cache metrics data for 1 minute to reduce database queries, and if the OAP cluster changes within that minute. |SW_CORE_ENABLE_DATABASE_SESSION|true|
-| - | - | topNReportPeriod|The execution period (in minutes) of top N sampler, which saves sampled data into the storage. |SW_CORE_TOPN_REPORT_PERIOD|10|
-| - | - | activeExtraModelColumns|Appends entity names (e.g. service names) into metrics storage entities. |SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS|false|
-| - | - | serviceNameMaxLength| Maximum length limit of service names. |SW_SERVICE_NAME_MAX_LENGTH|70|
-| - | - | instanceNameMaxLength| Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200.|SW_INSTANCE_NAME_MAX_LENGTH|70|
-| - | - | endpointNameMaxLength| Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240.|SW_ENDPOINT_NAME_MAX_LENGTH|150|
-| - | - | searchableTracesTags | Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. | SW_SEARCHABLE_TAG_KEYS | http.method,status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker|
-| - | - | searchableLogsTags | Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. | SW_SEARCHABLE_LOGS_TAG_KEYS | level |
-| - | - | searchableAlarmTags | Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. | SW_SEARCHABLE_ALARM_TAG_KEYS | level |
-| - | - | gRPCThreadPoolSize| Pool size of gRPC server. | SW_CORE_GRPC_THREAD_POOL_SIZE | CPU core * 4|
-| - | - | gRPCThreadPoolQueueSize| Queue size of gRPC server. | SW_CORE_GRPC_POOL_QUEUE_SIZE | 10000|
-| - | - | maxConcurrentCallsPerConnection | The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. | SW_CORE_GRPC_MAX_CONCURRENT_CALL | - |
-| - | - | maxMessageSize | Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. | SW_CORE_GRPC_MAX_MESSAGE_SIZE | 4M(based on Netty) |
-| - | - | remoteTimeout | Timeout for cluster internal communication (in seconds). | - |20|
-| - | - | maxSizeOfNetworkAddressAlias| The maximum size of network address detected in the system being monitored. | - | 1_000_000|
-| - | - | maxPageSizeOfQueryProfileSnapshot| The maximum size for snapshot analysis in an OAP query. | - | 500 |
-| - | - | maxSizeOfAnalyzeProfileSnapshot| The maximum number of snapshots analyzed by the OAP. | - | 12000 |
-| - | - | prepareThreads| The number of threads used to prepare metrics data to the storage. | SW_CORE_PREPARE_THREADS | 2 |
-| - | - | enableEndpointNameGroupingByOpenapi | Automatically groups endpoints by the given OpenAPI definitions. | SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPAENAPI | true |
-|cluster|standalone| - | Standalone is not suitable for running on a single node running. No configuration available. | - | - |
-| - | zookeeper|nameSpace| The namespace, represented by root path, isolates the configurations in Zookeeper.|SW_NAMESPACE| `/`, root path|
-| - | - | hostPort| Hosts and ports of Zookeeper Cluster. |SW_CLUSTER_ZK_HOST_PORT| localhost:2181|
-| - | - | baseSleepTimeMs| The period of Zookeeper client between two retries (in milliseconds). |SW_CLUSTER_ZK_SLEEP_TIME|1000|
-| - | - | maxRetries| The maximum retry time. |SW_CLUSTER_ZK_MAX_RETRIES|3|
-| - | - | enableACL| Opens ACL using `schema` and `expression`. |SW_ZK_ENABLE_ACL| false|
-| - | - | schema | Schema for the authorization. |SW_ZK_SCHEMA|digest|
-| - | - | expression | Expression for the authorization. |SW_ZK_EXPRESSION|skywalking:skywalking|
-| - | - | internalComHost| The hostname registered in Zookeeper for the internal communication of OAP cluster. | - | -|
-| - | - | internalComPort| The port registered in Zookeeper for the internal communication of OAP cluster. | - | -1|
-| - | kubernetes| namespace| Namespace deployed by SkyWalking in k8s. |SW_CLUSTER_K8S_NAMESPACE|default|
-| - | - | labelSelector| Labels used for filtering OAP deployment in k8s. |SW_CLUSTER_K8S_LABEL| app=collector,release=skywalking|
-| - | - | uidEnvName| Environment variable name for reading uid. | SW_CLUSTER_K8S_UID|SKYWALKING_COLLECTOR_UID|
-| - | consul| serviceName| Service name for SkyWalking cluster. |SW_SERVICE_NAME|SkyWalking_OAP_Cluster|
-| - | - | hostPort| Hosts and ports for Consul cluster.| SW_CLUSTER_CONSUL_HOST_PORT|localhost:8500|
-| - | - | aclToken| ACL Token of Consul. Empty string means `without ALC token`. | SW_CLUSTER_CONSUL_ACLTOKEN | - |
-| - | - | internalComHost| The hostname registered in Consul for internal communications of the OAP cluster. | - | -|
-| - | - | internalComPort| The port registered in Consul for internal communications of the OAP cluster. | - | -1|
-| - | etcd| serviceName| Service name for SkyWalking cluster. |SW_CLUSTER_ETCD_SERVICE_NAME|SkyWalking_OAP_Cluster|
-| - | - | endpoints| Hosts and ports for etcd cluster. | SW_CLUSTER_ETCD_ENDPOINTS|localhost:2379|
-| - | - | namespace | Namespace for SkyWalking cluster. |SW_CLUSTER_ETCD_NAMESPACE | /skywalking |
-| - | - | authentication | Indicates whether there is authentication. | SW_CLUSTER_ETCD_AUTHENTICATION | false |
-| - | - | user | Etcd auth username. | SW_CLUSTER_ETCD_USER | |
-| - | - | password | Etcd auth password. | SW_CLUSTER_ETCD_PASSWORD | |
-| - | Nacos| serviceName| Service name for SkyWalking cluster. |SW_SERVICE_NAME|SkyWalking_OAP_Cluster|
-| - | - | hostPort| Hosts and ports for Nacos cluster.| SW_CLUSTER_NACOS_HOST_PORT|localhost:8848|
-| - | - | namespace| Namespace used by SkyWalking node coordination. | SW_CLUSTER_NACOS_NAMESPACE|public|
-| - | - | internalComHost| The hostname registered in Nacos for internal communications of the OAP cluster. | - | -|
-| - | - | internalComPort| The port registered in Nacos for internal communications of the OAP cluster. | - | -1|
-| - | - | username | Nacos Auth username. | SW_CLUSTER_NACOS_USERNAME | - |
-| - | - | password | Nacos Auth password. | SW_CLUSTER_NACOS_PASSWORD | - |
-| - | - | accessKey | Nacos Auth accessKey. | SW_CLUSTER_NACOS_ACCESSKEY | - |
-| - | - | secretKey | Nacos Auth secretKey.  | SW_CLUSTER_NACOS_SECRETKEY | - |
-| storage|elasticsearch| - | ElasticSearch 6 storage implementation. | - | - |
-| - | - | nameSpace | Prefix of indexes created and used by SkyWalking. | SW_NAMESPACE | - |
-| - | - | clusterNodes | ElasticSearch cluster nodes for client connection.| SW_STORAGE_ES_CLUSTER_NODES |localhost|
-| - | - | protocol | HTTP or HTTPs. | SW_STORAGE_ES_HTTP_PROTOCOL | HTTP|
-| - | - | connectTimeout | Connect timeout of ElasticSearch client (in milliseconds). | SW_STORAGE_ES_CONNECT_TIMEOUT | 500|
-| - | - | socketTimeout | Socket timeout of ElasticSearch client (in milliseconds). | SW_STORAGE_ES_SOCKET_TIMEOUT | 30000|
-| - | - | user| Username of ElasticSearch cluster. | SW_ES_USER | - |
-| - | - | password | Password of ElasticSearch cluster. | SW_ES_PASSWORD | - |
-| - | - | trustStorePath | Trust JKS file path. Only works when username and password are enabled. | SW_STORAGE_ES_SSL_JKS_PATH | - |
-| - | - | trustStorePass | Trust JKS file password. Only works when username and password are enabled. | SW_STORAGE_ES_SSL_JKS_PASS | - |
-| - | - | secretsManagementFile| Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. |SW_ES_SECRETS_MANAGEMENT_FILE | - |
-| - | - | dayStep| Represents the number of days in the one-minute/hour/day index. | SW_STORAGE_DAY_STEP | 1|
-| - | - | indexShardsNumber | Shard number of new indexes. | SW_STORAGE_ES_INDEX_SHARDS_NUMBER | 1 |
-| - | - | indexReplicasNumber | Replicas number of new indexes. | SW_STORAGE_ES_INDEX_REPLICAS_NUMBER | 0 |
-| - | - | superDatasetDayStep | Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. |SW_SUPERDATASET_STORAGE_DAY_STEP|-1 |
-| - | - | superDatasetIndexShardsFactor | Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. |SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR|5 |
-| - | - | superDatasetIndexReplicasNumber | Represents the replicas number in the super size dataset record index. |SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER|0 |
-| - | - | indexTemplateOrder| The order of index template. | SW_STORAGE_ES_INDEX_TEMPLATE_ORDER| 0|
-| - | - | bulkActions| Async bulk size of the record data batch execution. | SW_STORAGE_ES_BULK_ACTIONS| 5000|
-| - | - | flushInterval| Period of flush (in seconds). Does not matter whether `bulkActions` is reached or not. INT(flushInterval * 2/3) is used for index refresh period. | SW_STORAGE_ES_FLUSH_INTERVAL | 15 (index refresh period = 10)|
-| - | - | concurrentRequests| The number of concurrent requests allowed to be executed. | SW_STORAGE_ES_CONCURRENT_REQUESTS| 2 |
-| - | - | resultWindowMaxSize | The maximum size of dataset when the OAP loads cache, such as network aliases. | SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE | 10000|
-| - | - | metadataQueryMaxSize | The maximum size of metadata per query. | SW_STORAGE_ES_QUERY_MAX_SIZE | 5000 |
-| - | - | segmentQueryMaxSize | The maximum size of trace segments per query. | SW_STORAGE_ES_QUERY_SEGMENT_SIZE | 200|
-| - | - | profileTaskQueryMaxSize | The maximum size of profile task per query. | SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE | 200|
-| - | - | advanced | All settings of ElasticSearch index creation. The value should be in JSON format. | SW_STORAGE_ES_ADVANCED | - |
-| - |elasticsearch7| - | ElasticSearch 7 storage implementation. | - | - |
-| - | - | nameSpace | Prefix of indexes created and used by SkyWalking. | SW_NAMESPACE | - |
-| - | - | clusterNodes | ElasticSearch cluster nodes for client connection.| SW_STORAGE_ES_CLUSTER_NODES |localhost|
-| - | - | protocol | HTTP or HTTPs. | SW_STORAGE_ES_HTTP_PROTOCOL | HTTP|
-| - | - | connectTimeout | Connect timeout of ElasticSearch client (in milliseconds). | SW_STORAGE_ES_CONNECT_TIMEOUT | 500|
-| - | - | socketTimeout | Socket timeout of ElasticSearch client (in milliseconds). | SW_STORAGE_ES_SOCKET_TIMEOUT | 30000|
-| - | - | user| Username of ElasticSearch cluster.| SW_ES_USER | - |
-| - | - | password | Password of ElasticSearch cluster. | SW_ES_PASSWORD | - |
-| - | - | trustStorePath | Trust JKS file path. Only works when username and password are enabled. | SW_STORAGE_ES_SSL_JKS_PATH | - |
-| - | - | trustStorePass | Trust JKS file password. Only works when username and password are enabled. | SW_STORAGE_ES_SSL_JKS_PASS | - |
-| - | - | secretsManagementFile| Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated at runtime. |SW_ES_SECRETS_MANAGEMENT_FILE | - |
-| - | - | dayStep| Represents the number of days in the one-minute/hour/day index. | SW_STORAGE_DAY_STEP | 1|
-| - | - | indexShardsNumber | Shard number of new indexes. | SW_STORAGE_ES_INDEX_SHARDS_NUMBER | 1 |
-| - | - | indexReplicasNumber | Replicas number of new indexes. | SW_STORAGE_ES_INDEX_REPLICAS_NUMBER | 0 |
-| - | - | superDatasetDayStep | Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. |SW_SUPERDATASET_STORAGE_DAY_STEP|-1 |
-| - | - | superDatasetIndexShardsFactor | Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. |SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR|5 |
-| - | - | superDatasetIndexReplicasNumber | Represents the replicas number in the super size dataset record index. |SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER|0 |
-| - | - | indexTemplateOrder| The order of index template. | SW_STORAGE_ES_INDEX_TEMPLATE_ORDER| 0|
-| - | - | bulkActions| Async bulk size of data batch execution. | SW_STORAGE_ES_BULK_ACTIONS| 5000|
-| - | - | flushInterval| Period of flush (in seconds). Does not matter whether `bulkActions` is reached or not. INT(flushInterval * 2/3) is used for index refresh period. | SW_STORAGE_ES_FLUSH_INTERVAL | 15 (index refresh period = 10)|
-| - | - | concurrentRequests| The number of concurrent requests allowed to be executed. | SW_STORAGE_ES_CONCURRENT_REQUESTS| 2 |
-| - | - | resultWindowMaxSize | The maximum size of dataset when the OAP loads cache, such as network aliases. | SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE | 10000|
-| - | - | metadataQueryMaxSize | The maximum size of metadata per query. | SW_STORAGE_ES_QUERY_MAX_SIZE | 5000 |
-| - | - | segmentQueryMaxSize | The maximum size of trace segments per query. | SW_STORAGE_ES_QUERY_SEGMENT_SIZE | 200|
-| - | - | profileTaskQueryMaxSize | The maximum size of profile task per query. | SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE | 200|
-| - | - | advanced | All settings of ElasticSearch index creation. The value should be in JSON format. | SW_STORAGE_ES_ADVANCED | - |
-| - |h2| - |  H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. | - | - |
-| - | - | driver | H2 JDBC driver. | SW_STORAGE_H2_DRIVER | org.h2.jdbcx.JdbcDataSource|
-| - | - | url | H2 connection URL. Defaults to H2 memory mode. | SW_STORAGE_H2_URL | jdbc:h2:mem:skywalking-oap-db |
-| - | - | user | Username of H2 database. | SW_STORAGE_H2_USER | sa |
-| - | - | password | Password of H2 database. | - | - | 
-| - | - | metadataQueryMaxSize | The maximum size of metadata per query. | SW_STORAGE_H2_QUERY_MAX_SIZE | 5000 |
-| - | - | maxSizeOfArrayColumn | Some entities (e.g. trace segments) include the logic column with multiple values. In H2, we use multiple physical columns to host the values: e.g. change column_a with values [1,2,3,4,5] to `column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5`. | SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN | 20 |
-| - | - | numOfSearchableValuesPerTag | In a trace segment, this includes multiple spans with multiple tags. Different spans may have the same tag key, e.g. multiple HTTP exit spans all have their own `http.method` tags. This configuration sets the limit on the maximum number of values for the same tag key. | SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG | 2 |
-| - |mysql| - | MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. | - | - |
-| - | - | properties | Hikari connection pool configurations. | - | Listed in the `application.yaml`. |
-| - | - | metadataQueryMaxSize | The maximum size of metadata per query. | SW_STORAGE_MYSQL_QUERY_MAX_SIZE | 5000 |
-| - | - | maxSizeOfArrayColumn | Some entities (e.g. trace segments) include the logic column with multiple values. In MySQL, we use multiple physical columns to host the values, e.g. change column_a with values [1,2,3,4,5] to `column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5`. | SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN | 20 |
-| - | - | numOfSearchableValuesPerTag | In a trace segment, this includes multiple spans with multiple tags. Different spans may have same tag key, e.g. multiple HTTP exit spans all have their own `http.method` tags. This configuration sets the limit on the maximum number of values for the same tag key. | SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG | 2 |
-| - |postgresql| - | PostgreSQL storage. | - | - |
-| - | - | properties | Hikari connection pool configurations. | - | Listed in the `application.yaml`. |
-| - | - | metadataQueryMaxSize | The maximum size of metadata per query. | SW_STORAGE_MYSQL_QUERY_MAX_SIZE | 5000 |
-| - | - | maxSizeOfArrayColumn | Some entities (e.g. trace segments) include the logic column with multiple values. In PostgreSQL, we use multiple physical columns to host the values, e.g. change column_a with values [1,2,3,4,5] to `column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5` | SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN | 20 |
-| - | - | numOfSearchableValuesPerTag | In a trace segment, this includes multiple spans with multiple tags. Different spans may have same tag key, e.g. multiple HTTP exit spans all have their own `http.method` tags. This configuration sets the limit on the maximum number of values for the same tag key. | SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG | 2 |
-| - |influxdb| - | InfluxDB storage. |- | - |
-| - | - | url| InfluxDB connection URL. | SW_STORAGE_INFLUXDB_URL | http://localhost:8086|
-| - | - | user | User name of InfluxDB. | SW_STORAGE_INFLUXDB_USER | root|
-| - | - | password | Password of InfluxDB. | SW_STORAGE_INFLUXDB_PASSWORD | -|
-| - | - | database | Database of InfluxDB. | SW_STORAGE_INFLUXDB_DATABASE | skywalking |
-| - | - | actions | The number of actions to collect. | SW_STORAGE_INFLUXDB_ACTIONS | 1000 |
-| - | - | duration | The maximum waiting time (in milliseconds). | SW_STORAGE_INFLUXDB_DURATION | 1000|
-| - | - | batchEnabled | If true, write points with batch API. | SW_STORAGE_INFLUXDB_BATCH_ENABLED | true|
-| - | - | fetchTaskLogMaxSize | The maximum number of fetch task log in a request. | SW_STORAGE_INFLUXDB_FETCH_TASK_LOG_MAX_SIZE | 5000|
-| - | - | connectionResponseFormat | The response format of connection to influxDB. It can only be MSGPACK or JSON. | SW_STORAGE_INFLUXDB_CONNECTION_RESPONSE_FORMAT | MSGPACK |
-| agent-analyzer | default | Agent Analyzer. | SW_AGENT_ANALYZER | default |
-| - | -| sampleRate| Sampling rate for receiving trace. Precise to 1/10000. 10000 means a sampling rate of 100% by default.|SW_TRACE_SAMPLE_RATE|10000|
-| - | - |slowDBAccessThreshold| The slow database access threshold (in milliseconds). |SW_SLOW_DB_THRESHOLD|default:200,mongodb:100|
-| - | - |forceSampleErrorSegment| When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. |SW_FORCE_SAMPLE_ERROR_SEGMENT|true|
-| - | - |segmentStatusAnalysisStrategy| Determines the final segment status from span status. Available values are `FROM_SPAN_STATUS` , `FROM_ENTRY_SPAN`, and `FROM_FIRST_SPAN`. `FROM_SPAN_STATUS` indicates that the segment status would be error if any span has an error status. `FROM_ENTRY_SPAN` means that the segment status would only be determined by the status of entry spans. `FROM_FIRST_SPAN` means that the segment status would only be determined by the status of the first span. |SW_ [...]
-| - | - |noUpstreamRealAddressAgents| Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can't collect the real peer IP address. |SW_NO_UPSTREAM_REAL_ADDRESS|6000,9000|
-| - | - |slowTraceSegmentThreshold| Setting this threshold on latency (in milliseconds) would cause the slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is `-1`, which means that slow traces would not be sampled. |SW_SLOW_TRACE_SEGMENT_THRESHOLD|-1|
-| - | - |meterAnalyzerActiveFiles| Indicates which files could be instrumented and analyzed. Multiple files are split by ",". |SW_METER_ANALYZER_ACTIVE_FILES||
-| receiver-sharing-server|default| Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. | - | - |
-| - | - | restHost| Binding IP of RESTful services. Services include GraphQL query and HTTP data report. | SW_RECEIVER_SHARING_REST_HOST | - |
-| - | - | restPort | Binding port of RESTful services. | SW_RECEIVER_SHARING_REST_PORT | - |
-| - | - | restContextPath| Web context path of RESTful services. | SW_RECEIVER_SHARING_REST_CONTEXT_PATH | - |
-| - | - | restMinThreads| Minimum thread number of RESTful services. | SW_RECEIVER_SHARING_JETTY_MIN_THREADS|1|
-| - | - | restMaxThreads| Maximum thread number of RESTful services. | SW_RECEIVER_SHARING_JETTY_MAX_THREADS|200|
-| - | - | restIdleTimeOut| Connector idle timeout of RESTful services (in milliseconds). | SW_RECEIVER_SHARING_JETTY_IDLE_TIMEOUT|30000|
-| - | - | restAcceptorPriorityDelta| Thread priority delta to give to acceptor threads of RESTful services. | SW_RECEIVER_SHARING_JETTY_DELTA|0|
-| - | - | restAcceptQueueSize| ServerSocketChannel backlog of RESTful services. | SW_RECEIVER_SHARING_JETTY_QUEUE_SIZE|0|
-| - | - | httpMaxRequestHeaderSize| Maximum request header size accepted. | SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE|8192|
-| - | - | gRPCHost| Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. | SW_RECEIVER_GRPC_HOST | 0.0.0.0. Not Activated |
-| - | - | gRPCPort| Binding port of gRPC services. | SW_RECEIVER_GRPC_PORT | Not Activated |
-| - | - | gRPCThreadPoolSize| Pool size of gRPC server. | SW_RECEIVER_GRPC_THREAD_POOL_SIZE | CPU core * 4|
-| - | - | gRPCThreadPoolQueueSize| Queue size of gRPC server. | SW_RECEIVER_GRPC_POOL_QUEUE_SIZE | 10000|
-| - | - | gRPCSslEnabled| Activates SSL for gRPC services. | SW_RECEIVER_GRPC_SSL_ENABLED | false |
-| - | - | gRPCSslKeyPath| File path of gRPC SSL key. | SW_RECEIVER_GRPC_SSL_KEY_PATH | - |
-| - | - | gRPCSslCertChainPath| File path of gRPC SSL cert chain. | SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH | - |
-| - | - | maxConcurrentCallsPerConnection | The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. | SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL | - |
-| - | - | authentication | The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. | SW_AUTHENTICATION | - |
-| log-analyzer | default | Log Analyzer. | SW_LOG_ANALYZER | default |
-| - | - | lalFiles | The LAL configuration file names (without file extension) to be activated. Read [LAL](../../concepts-and-designs/lal.md) for more details. | SW_LOG_LAL_FILES | default |
-| - | - | malFiles | The MAL configuration file names (without file extension) to be activated. Read [LAL](../../concepts-and-designs/lal.md) for more details. | SW_LOG_MAL_FILES | "" |
-| event-analyzer | default | Event Analyzer. | SW_EVENT_ANALYZER | default |
-| receiver-register|default| Read [receiver doc](backend-receivers.md) for more details. | - | - |
-| receiver-trace|default| Read [receiver doc](backend-receivers.md) for more details. | - | - |
-| receiver-jvm| default| Read [receiver doc](backend-receivers.md) for more details. | - | - |
-| receiver-clr| default| Read [receiver doc](backend-receivers.md) for more details. | - | - |
-| receiver-profile| default| Read [receiver doc](backend-receivers.md) for more details. | - | - |
-| receiver-zabbix| default| Read [receiver doc](backend-zabbix.md) for more details. | - | - |
-| - | - | port| Exported TCP port. Zabbix agent could connect and transport data. | SW_RECEIVER_ZABBIX_PORT | 10051 |
-| - | - | host| Binds to host. | SW_RECEIVER_ZABBIX_HOST | 0.0.0.0 |
-| - | - | activeFiles| Enables config when agent request is received. | SW_RECEIVER_ZABBIX_ACTIVE_FILES | agent |
-| service-mesh| default| Read [receiver doc](backend-receivers.md) for more details. | - | - |
-| envoy-metric| default| Read [receiver doc](backend-receivers.md) for more details. | - | - |
-| - | - | acceptMetricsService | Starts Envoy Metrics Service analysis. | SW_ENVOY_METRIC_SERVICE | true|
-| - | - | alsHTTPAnalysis | Starts Envoy HTTP Access Log Service analysis. Value = `k8s-mesh` means starting the analysis. | SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS | - |
-| - | - | alsTCPAnalysis | Starts Envoy TCP Access Log Service analysis. Value = `k8s-mesh` means starting the analysis. | SW_ENVOY_METRIC_ALS_TCP_ANALYSIS | - |
-| - | - | k8sServiceNameRule | `k8sServiceNameRule` allows you to customize the service name in ALS via Kubernetes metadata. The available variables are `pod` and `service`. E.g. you can use `${service.metadata.name}-${pod.metadata.labels.version}` to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes(`''`) to avoid being evaluated by the shell. | - |
-| receiver-otel | default | Read [receiver doc](backend-receivers.md) for more details. | - | - |
-| - | - | enabledHandlers| Enabled handlers for otel. | SW_OTEL_RECEIVER_ENABLED_HANDLERS | - |
-| - | - | enabledOcRules| Enabled metric rules for OC handler. | SW_OTEL_RECEIVER_ENABLED_OC_RULES | - |
-| receiver_zipkin |default| Read [receiver doc](backend-receivers.md). | - | - |
-| - | - | restHost| Binding IP of RESTful services. |SW_RECEIVER_ZIPKIN_HOST|0.0.0.0|
-| - | - | restPort | Binding port of RESTful services. | SW_RECEIVER_ZIPKIN_PORT|9411|
-| - | - | restContextPath| Web context path of RESTful services. | SW_RECEIVER_ZIPKIN_CONTEXT_PATH|/|
-| receiver_jaeger | default| Read [receiver doc](backend-receivers.md). | - | - |
-| - | - | gRPCHost|Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. | SW_RECEIVER_JAEGER_HOST | - |
-| - | - | gRPCPort| Binding port of gRPC services. | SW_RECEIVER_JAEGER_PORT | - |
-| - | - | gRPCThreadPoolSize| Pool size of gRPC server. | - | CPU core * 4|
-| - | - | gRPCThreadPoolQueueSize| Queue size of gRPC server. | - | 10000|
-| - | - | maxConcurrentCallsPerConnection | The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. | - | - |
-| - | - | maxMessageSize | Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. | - | 4M(based on Netty) |
-| prometheus-fetcher | default | Read [fetcher doc](backend-fetcher.md) for more details. | - | - |
-| - | - | enabledRules | Enabled rules. | SW_PROMETHEUS_FETCHER_ENABLED_RULES | self |
-| - | - | maxConvertWorker | The maximize meter convert worker. | SW_PROMETHEUS_FETCHER_NUM_CONVERT_WORKER | -1(by default, half the number of CPU core(s)) |   
-| kafka-fetcher | default | Read [fetcher doc](backend-fetcher.md) for more details. | - | - |
-| - | - | bootstrapServers | A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. | SW_KAFKA_FETCHER_SERVERS | localhost:9092 |
-| - | - | namespace | Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in `agent.config`. The property is named `plugin.kafka.namespace`. | SW_NAMESPACE | - |
-| - | - | groupId | A unique string that identifies the consumer group to which this consumer belongs.| - | skywalking-consumer |
-| - | - | consumePartitions | Indicates which PartitionId(s) of the topics is/are assigned to the OAP server. Separated by commas if multiple. | SW_KAFKA_FETCHER_CONSUME_PARTITIONS | - |
-| - | - | isSharding | True when OAP Server is in cluster. | SW_KAFKA_FETCHER_IS_SHARDING | false |
-| - | - | createTopicIfNotExist | If true, this creates Kafka topic (if it does not already exist). | - | true |
-| - | - | partitions | The number of partitions for the topic being created. | SW_KAFKA_FETCHER_PARTITIONS | 3 |
-| - | - | enableNativeProtoLog | Enables fetching and handling native proto log data. | SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG | false |
-| - | - | enableNativeJsonLog | Enables fetching and handling native json log data. | SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG | false |
-| - | - | replicationFactor | The replication factor for each partition in the topic being created. | SW_KAFKA_FETCHER_PARTITIONS_FACTOR | 2 |
-| - | - | kafkaHandlerThreadPoolSize | Pool size of Kafka message handler executor. | SW_KAFKA_HANDLER_THREAD_POOL_SIZE | CPU core * 2 |
-| - | - | kafkaHandlerThreadPoolQueueSize | Queue size of Kafka message handler executor. | SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE | 10000 |
-| - | - | topicNameOfMeters | Kafka topic name for meter system data. | - | skywalking-meters |
-| - | - | topicNameOfMetrics | Kafka topic name for JVM metrics data. | - | skywalking-metrics |
-| - | - | topicNameOfProfiling | Kafka topic name for profiling data. | - | skywalking-profilings |
-| - | - | topicNameOfTracingSegments | Kafka topic name for tracing data. | - | skywalking-segments |
-| - | - | topicNameOfManagements | Kafka topic name for service instance reporting and registration. | - | skywalking-managements |
-| - | - | topicNameOfLogs | Kafka topic name for native proto log data. | - | skywalking-logs |
-| - | - | topicNameOfJsonLogs | Kafka topic name for native json log data. | - | skywalking-logs-json |
-| receiver-browser | default | Read [receiver doc](backend-receivers.md) for more details. | - | - | - |
-| - | - | sampleRate | Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. | SW_RECEIVER_BROWSER_SAMPLE_RATE | 10000 |
-| query | graphql | - | GraphQL query implementation. | - |
-| - | - | path | Root path of GraphQL query and mutation. | SW_QUERY_GRAPHQL_PATH | /graphql|
-| - | - | enableLogTestTool | Enable the log testing API to test the LAL. **NOTE**: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. | SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL | false |
-| alarm | default | - | Read [alarm doc](backend-alarm.md) for more details. | - |
-| telemetry | - | - | Read [telemetry doc](backend-telemetry.md) for more details. | - |
-| - | none| - | No op implementation. | - |
-| - | prometheus| host | Binding host for Prometheus server fetching data. | SW_TELEMETRY_PROMETHEUS_HOST|0.0.0.0|
-| - | - | port|  Binding port for Prometheus server fetching data. |SW_TELEMETRY_PROMETHEUS_PORT|1234|
-| configuration | - | - | Read [dynamic configuration doc](dynamic-config.md) for more details. | - |
-| - | grpc| host | DCS server binding hostname. | SW_DCS_SERVER_HOST | - |
-| - | - | port | DCS server binding port. | SW_DCS_SERVER_PORT | 80 |
-| - | - | clusterName | Cluster name when reading the latest configuration from DSC server. | SW_DCS_CLUSTER_NAME | SkyWalking|
-| - | - | period | The period of reading data from DSC server by the OAP (in seconds). | SW_DCS_PERIOD | 20 |
-| - | apollo| apolloMeta| `apollo.meta` in Apollo. | SW_CONFIG_APOLLO | http://106.12.25.204:8080 | 
-| - | - | apolloCluster | `apollo.cluster` in Apollo. | SW_CONFIG_APOLLO_CLUSTER | default|
-| - | - | apolloEnv | `env` in Apollo. | SW_CONFIG_APOLLO_ENV | - |
-| - | - | appId | `app.id` in Apollo. | SW_CONFIG_APOLLO_APP_ID | skywalking |
-| - | - | period | The period of data sync (in seconds). | SW_CONFIG_APOLLO_PERIOD | 60 |
-| - | zookeeper|nameSpace| The namespace (represented by root path) that isolates the configurations in the Zookeeper. |SW_CONFIG_ZK_NAMESPACE| `/`, root path|
-| - | - | hostPort| Hosts and ports of Zookeeper Cluster. |SW_CONFIG_ZK_HOST_PORT| localhost:2181|
-| - | - | baseSleepTimeMs|The period of Zookeeper client between two retries (in milliseconds). |SW_CONFIG_ZK_BASE_SLEEP_TIME_MS|1000|
-| - | - | maxRetries| The maximum retry time. |SW_CONFIG_ZK_MAX_RETRIES|3|
-| - | - | period | The period of data sync (in seconds). | SW_CONFIG_ZK_PERIOD | 60 |
-| - | etcd| endpoints | Hosts and ports for etcd cluster (separated by commas if multiple). | SW_CONFIG_ETCD_ENDPOINTS | localhost:2379 | 
-| - | - | namespace | Namespace for SkyWalking cluster. |SW_CONFIG_ETCD_NAMESPACE | /skywalking |
-| - | - | authentication | Indicates whether there is authentication. | SW_CONFIG_ETCD_AUTHENTICATION | false |
-| - | - | user | Etcd auth username. | SW_CONFIG_ETCD_USER | |
-| - | - | password | Etcd auth password. | SW_CONFIG_ETCD_PASSWORD | |
-| - | - | period | The period of data sync (in seconds). | SW_CONFIG_ZK_PERIOD | 60
-| - | consul | hostPort| Hosts and ports for Consul cluster.| SW_CONFIG_CONSUL_HOST_AND_PORTS|localhost:8500|
-| - | - | aclToken| ACL Token of Consul. Empty string means `without ACL token`.| SW_CONFIG_CONSUL_ACL_TOKEN | - |
-| - | - | period | The period of data sync (in seconds). | SW_CONFIG_CONSUL_PERIOD | 60 |
-| - | k8s-configmap | namespace | Deployment namespace of the config map. |SW_CLUSTER_K8S_NAMESPACE|default|
-| - | - | labelSelector| Labels for locating configmap. |SW_CLUSTER_K8S_LABEL|app=collector,release=skywalking|
-| - | - | period | The period of data sync (in seconds). | SW_CONFIG_ZK_PERIOD | 60 |
-| - | nacos | serverAddr | Nacos Server Host. | SW_CONFIG_NACOS_SERVER_ADDR | 127.0.0.1|
-| - | - | port | Nacos Server Port. | SW_CONFIG_NACOS_SERVER_PORT | 8848 |
-| - | - | group | Nacos Configuration namespace. | SW_CONFIG_NACOS_SERVER_NAMESPACE | - |
-| - | - | period | The period of data sync (in seconds). | SW_CONFIG_CONFIG_NACOS_PERIOD | 60 |
-| - | - | username | Nacos Auth username. | SW_CONFIG_NACOS_USERNAME | - |
-| - | - | password | Nacos Auth password. | SW_CONFIG_NACOS_PASSWORD | - |
-| - | - | accessKey | Nacos Auth accessKey. | SW_CONFIG_NACOS_ACCESSKEY | - |
-| - | - | secretKey | Nacos Auth secretKey.  | SW_CONFIG_NACOS_SECRETKEY | - |
-| exporter | grpc | targetHost | The host of target gRPC server for receiving export data. | SW_EXPORTER_GRPC_HOST | 127.0.0.1 |
-| - | - | targetPort | The port of target gRPC server for receiving export data. | SW_EXPORTER_GRPC_PORT | 9870 |
-| health-checker | default | checkIntervalSeconds | The period of checking OAP internal health status (in seconds). | SW_HEALTH_CHECKER_INTERVAL_SECONDS | 5 |
-| configuration-discovery | default | disableMessageDigest | If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. | SW_DISABLE_MESSAGE_DIGEST | false
-| receiver-event|default| Read [receiver doc](backend-receivers.md) for more details. | - | - |
-
-## Note
-¹ System Environment Variable name could be declared and changed in `application.yml`. The names listed here are simply provided in the default `application.yml` file.
diff --git a/docs/en/setup/backend/dynamic-config.md b/docs/en/setup/backend/dynamic-config.md
deleted file mode 100755
index 3e7e073..0000000
--- a/docs/en/setup/backend/dynamic-config.md
+++ /dev/null
@@ -1,150 +0,0 @@
-# Dynamic Configuration
-SkyWalking Configurations are mostly set through `application.yml` and OS system environment variables.
-At the same time, some of them support dynamic settings from upstream management system.
-
-Currently, SkyWalking supports the following dynamic configurations.
-
-| Config Key | Value Description | Value Format Example |
-|:----:|:----:|:----:|
-|agent-analyzer.default.slowDBAccessThreshold| Thresholds of slow Database statement. Overrides `receiver-trace/default/slowDBAccessThreshold` of `application.yml`. | default:200,mongodb:50|
-|agent-analyzer.default.uninstrumentedGateways| The uninstrumented gateways. Overrides `gateways.yml`. | Same as [`gateways.yml`](uninstrumented-gateways.md#configuration-format). |
-|alarm.default.alarm-settings| The alarm settings. Overrides `alarm-settings.yml`. | Same as [`alarm-settings.yml`](backend-alarm.md). |
-|core.default.apdexThreshold| The apdex threshold settings. Overrides `service-apdex-threshold.yml`. | Same as [`service-apdex-threshold.yml`](apdex-threshold.md). |
-|core.default.endpoint-name-grouping| The endpoint name grouping setting. Overrides `endpoint-name-grouping.yml`. | Same as [`endpoint-name-grouping.yml`](endpoint-grouping-rules.md). |
-|core.default.log4j-xml| The log4j xml configuration. Overrides `log4j2.xml`. | Same as [`log4j2.xml`](dynamical-logging.md). |
-|agent-analyzer.default.sampleRate| Trace sampling. Overrides `receiver-trace/default/sampleRate` of `application.yml`. | 10000 |
-|agent-analyzer.default.slowTraceSegmentThreshold| Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is `-1`, which means slow traces will not be sampled. Overrides `receiver-trace/default/slowTraceSegmentThreshold` of `application.yml`. | -1 |
-|configuration-discovery.default.agentConfigurations| The ConfigurationDiscovery settings. | See [`configuration-discovery.md`](../service-agent/java-agent/configuration-discovery.md). |
-
-This feature depends on upstream service, so it is **DISABLED** by default.
-
-```yaml
-configuration:
-  selector: ${SW_CONFIGURATION:none}
-  none:
-  grpc:
-    host: ${SW_DCS_SERVER_HOST:""}
-    port: ${SW_DCS_SERVER_PORT:80}
-    clusterName: ${SW_DCS_CLUSTER_NAME:SkyWalking}
-    period: ${SW_DCS_PERIOD:20}
-  # ... other implementations
-```
-
-## Dynamic Configuration Service, DCS
-[Dynamic Configuration Service](../../../../oap-server/server-configuration/grpc-configuration-sync/src/main/proto/configuration-service.proto) 
-is a gRPC service which requires implementation of the upstream system.
-The SkyWalking OAP fetches the configuration from the implementation (any system), after you open the implementation like this:
-
-```yaml
-configuration:
-  selector: ${SW_CONFIGURATION:grpc}
-  grpc:
-    host: ${SW_DCS_SERVER_HOST:""}
-    port: ${SW_DCS_SERVER_PORT:80}
-    clusterName: ${SW_DCS_CLUSTER_NAME:SkyWalking}
-    period: ${SW_DCS_PERIOD:20}
-```
-
-## Dynamic Configuration Zookeeper Implementation
-[Zookeeper](https://github.com/apache/zookeeper) is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:
-
-```yaml
-configuration:
-  selector: ${SW_CONFIGURATION:zookeeper}
-  zookeeper:
-    period: ${SW_CONFIG_ZK_PERIOD:60} # Unit seconds, sync period. Default fetch every 60 seconds.
-    nameSpace: ${SW_CONFIG_ZK_NAMESPACE:/default}
-    hostPort: ${SW_CONFIG_ZK_HOST_PORT:localhost:2181}
-    # Retry Policy
-    baseSleepTimeMs: ${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000} # initial amount of time to wait between retries
-    maxRetries: ${SW_CONFIG_ZK_MAX_RETRIES:3} # max number of times to retry
-```
-
-The **nameSpace** is the ZooKeeper path. The config key and value are the properties of the `namespace` folder.
-
-## Dynamic Configuration Etcd Implementation
-
-[Etcd](https://github.com/etcd-io/etcd) is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:
-
-```yaml
-configuration:
-  selector: ${SW_CONFIGURATION:etcd}
-  etcd:
-    period: ${SW_CONFIG_ETCD_PERIOD:60} # Unit seconds, sync period. Default fetch every 60 seconds.
-    endpoints: ${SW_CONFIG_ETCD_ENDPOINTS:localhost:2379}
-    namespace: ${SW_CONFIG_ETCD_NAMESPACE:/skywalking}
-    authentication: ${SW_CONFIG_ETCD_AUTHENTICATION:false}
-    user: ${SW_CONFIG_ETCD_USER:}
-    password: ${SW_CONFIG_ETCD_password:}
-```
-
-**NOTE**: Only the v3 protocol is supported since 8.7.0. 
-
-## Dynamic Configuration Consul Implementation
-
-[Consul](https://github.com/rickfast/consul-client) is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:
-
-```yaml
-configuration:
-  selector: ${SW_CONFIGURATION:consul}
-  consul:
-    # Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500
-    hostAndPorts: ${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}
-    # Sync period in seconds. Defaults to 60 seconds.
-    period: ${SW_CONFIG_CONSUL_PERIOD:1}
-    # Consul aclToken
-    aclToken: ${SW_CONFIG_CONSUL_ACL_TOKEN:""}
-```
-
-## Dynamic Configuration Apollo Implementation
-
-[Apollo](https://github.com/ctripcorp/apollo/) is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:
-
-```yaml
-configuration:
-  selector: ${SW_CONFIGURATION:apollo}
-  apollo:
-    apolloMeta: ${SW_CONFIG_APOLLO:http://106.12.25.204:8080}
-    apolloCluster: ${SW_CONFIG_APOLLO_CLUSTER:default}
-    apolloEnv: ${SW_CONFIG_APOLLO_ENV:""}
-    appId: ${SW_CONFIG_APOLLO_APP_ID:skywalking}
-    period: ${SW_CONFIG_APOLLO_PERIOD:5}
-```
-
-## Dynamic Configuration Kuberbetes Configmap Implementation
-
-[configmap](https://kubernetes.io/docs/concepts/configuration/configmap/) is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:
-
-```yaml
-configuration:
-  selector: ${SW_CONFIGURATION:k8s-configmap}
-  # [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)
-  k8s-configmap:
-      # Sync period in seconds. Defaults to 60 seconds.
-      period: ${SW_CONFIG_CONFIGMAP_PERIOD:60}
-      # Which namespace is confiigmap deployed in.
-      namespace: ${SW_CLUSTER_K8S_NAMESPACE:default}
-      # Labelselector is used to locate specific configmap
-      labelSelector: ${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}
-```
-## Dynamic Configuration Nacos Implementation
-
-[Nacos](https://github.com/alibaba/nacos) is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:
-
-```yaml
-configuration:
-  selector: ${SW_CONFIGURATION:nacos}
-  nacos:
-    # Nacos Server Host
-    serverAddr: ${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}
-    # Nacos Server Port
-    port: ${SW_CONFIG_NACOS_SERVER_PORT:8848}
-    # Nacos Configuration Group
-    group: ${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}
-    # Nacos Configuration namespace
-    namespace: ${SW_CONFIG_NACOS_SERVER_NAMESPACE:}
-    # Unit seconds, sync period. Default fetch every 60 seconds.
-    period: ${SW_CONFIG_NACOS_PERIOD:60}
-    # the name of current cluster, set the name if you want to upstream system known.
-    clusterName: ${SW_CONFIG_NACOS_CLUSTER_NAME:default}
-```
diff --git a/docs/en/setup/backend/dynamical-logging.md b/docs/en/setup/backend/dynamical-logging.md
deleted file mode 100644
index 50766a2..0000000
--- a/docs/en/setup/backend/dynamical-logging.md
+++ /dev/null
@@ -1,50 +0,0 @@
-# Dynamical Logging
-
-The OAP server leverages `log4j2` to manage the logging system. `log4j2` supports changing the configuration 
-at runtime, but you have to update the XML configuration file manually, which could be time-consuming and prone to manmade mistakes.
-
-Dynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP `log4j` 
-configurations through a single operation.
-
-The key of the configuration item is `core.default.log4j-xml`, and you can select any of the configuration implements 
-to store the content of `log4j.xml`. In the booting phase, once the core module gets started, `core.default.log4j-xml`
-would come into the OAP log4j context.
-
-If the configuration is changed after the OAP has started, you have to wait for a while for the changes to be applied. 
-The default value is `60` seconds, which you could change through `configuration.<configuration implement>.peroid` in `application.yaml`.
-
-If you remove `core.default.log4j-xml` from the configuration center or disable the configuration module, `log4j.xml` in the `config` directory would be affected.
-
-> Caveat: The OAP only supports the XML configuration format.
-
-This is an example on how to config dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration
-clusters following the same procedures.
-
-```yaml
-apiVersion: v1
-data:
-  core.default.log4j-xml: |-
-    <Configuration status="WARN">
-       <Appenders>
-         <Console name="Console" target="SYSTEM_OUT">
-           <PatternLayout charset="UTF-8" pattern="%d - %c - %L [%t] %-5p %x - %m%n"/>
-         </Console>
-       </Appenders>
-       <Loggers>
-         <logger name="io.grpc.netty" level="INFO"/>
-         <logger name="org.apache.skywalking.oap.server.configuration.api" level="TRACE"/>
-         <logger name="org.apache.skywalking.oap.server.configuration.configmap" level="DEBUG"/>
-         <Root level="WARN">
-           <AppenderRef ref="Console"/>
-         </Root>
-        </Loggers>
-    </Configuration>
-kind: ConfigMap
-metadata:
-  labels:
-    app: collector
-    release: skywalking
-  name: skywalking-oap
-  namespace: default
-```
-
diff --git a/docs/en/setup/backend/endpoint-grouping-rules.md b/docs/en/setup/backend/endpoint-grouping-rules.md
deleted file mode 100644
index f79be96..0000000
--- a/docs/en/setup/backend/endpoint-grouping-rules.md
+++ /dev/null
@@ -1,304 +0,0 @@
-# Group Parameterized Endpoints
-In most cases, endpoints are detected automatically through language agents, service mesh observability solutions,
-or meter system configurations.
-
-There are some special cases, especially when REST style URI is used, where the application codes include the parameter in the endpoint name,
-such as putting order ID in the URI. Examples are `/prod/ORDER123` and `/prod/ORDER123`. But logically, most would expect to
-have an endpoint name like `prod/{order-id}`. This is a specially designed feature in parameterized endpoint grouping.
-
-If the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.
-
-There are two approaches in which SkyWalking supports endpoint grouping:
-1. Endpoint name grouping by OpenAPI definitions.
-2. Endpoint name grouping by custom configurations.
-
-Both grouping approaches can work together in sequence.
-
-## Endpoint name grouping by OpenAPI definitions
-The OpenAPI definitions are documents based on the [OpenAPI Specification (OAS)](https://www.openapis.org/), which is used to define a standard, language-agnostic interface for HTTP APIs.
-
-SkyWalking now supports `OAS v2.0+`. It could parse the documents `(yaml)` and build grouping rules from them automatically.
-
-
-### How to use
-1. Add `Specification Extensions` for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:<br />
-   `${METHOD}` is a reserved placeholder which represents the HTTP method, e.g. `POST/GET...` <br />.
-   `${PATH}` is a reserved placeholder which represents the path, e.g. `/products/{id}`.
-
-   | Extension Name | Required | Description | Default Value |
-   |-----|-----|-----|-----|
-   | x-sw-service-name | false | The service name to which these endpoints belong. | The directory name to which the OpenAPI definition documents belong. |
-   | x-sw-endpoint-name-match-rule | false | The rule used to match the endpoint. | `${METHOD}:${PATH}` |
-   | x-sw-endpoint-name-format | false | The endpoint name after grouping. | `${METHOD}:${PATH}` |
-
-   These extensions are under `OpenAPI Object`. For example, the document below has a full custom config:
-
-``` yaml
-openapi: 3.0.0
-x-sw-service-name: serviceB
-x-sw-endpoint-name-match-rule: "${METHOD}:${PATH}"
-x-sw-endpoint-name-format: "${METHOD}:${PATH}"
-
-info:
-  description: OpenAPI definition for SkyWalking test.
-  version: v2
-  title: Product API
-  ...
-```
-
-   We highly recommend using the default config. The custom config (`x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format`) is considered part of the match rules (regex pattern).
-   We have provided some use cases in `org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest`. You may validate your custom config as well.
-
-2. All OpenAPI definition documents are located in the `openapi-definitions` directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set `x-sw-service-name`. For example:
-  ```
-├── openapi-definitions
-│   ├── serviceA
-│   │   ├── customerAPI-v1.yaml
-│   │   └── productAPI-v1.yaml
-│   └── serviceB
-│       └── productAPI-v2.yaml
-```
-3. The feature is enabled by default. You can disable it by setting the `Core Module` configuration `${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPAENAPI:false}`.
-
-### Rules match priority 
-We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:
-1. The exact path being matched. 
-   E.g. `/products or /products/inventory`
-2. The path which has less variables.
-   E.g. In the case of `/products/{var1}/{var2} and /products/{var1}/abc`, endpoint name `/products/123/abc` will match the second one.
-3. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be `1`.
-   E.g. In the case of `/products/abc/{var1} and products/{var12345}/ef`, endpoint name `/products/abc/ef` will match the first one, because `length("abc") = 3` is larger than `length("ef") = 2`.
-### Examples
-If we have an OpenAPI definition doc `productAPI-v2.yaml` in directory `serviceB`, it will look like this:
-```yaml
-
-openapi: 3.0.0
-
-info:
-  description: OpenAPI definition for SkyWalking test.
-  version: v2
-  title: Product API
-
-tags:
-  - name: product
-    description: product
-  - name: relatedProducts
-    description: Related Products
-
-paths:
-  /products:
-    get:
-      tags:
-        - product
-      summary: Get all products list
-      description: Get all products list.
-      operationId: getProducts
-      responses:
-        "200":
-          description: Success
-          content:
-            application/json:
-              schema:
-                type: array
-                items:
-                  $ref: "#/components/schemas/Product"
-  /products/{region}/{country}:
-    get:
-      tags:
-        - product
-      summary: Get products regional
-      description: Get products regional with the given id.
-      operationId: getProductRegional
-      parameters:
-        - name: region
-          in: path
-          description: Products region
-          required: true
-          schema:
-            type: string
-        - name: country
-          in: path
-          description: Products country
-          required: true
-          schema:
-            type: string
-      responses:
-        "200":
-          description: successful operation
-          content:
-            application/json:
-              schema:
-                $ref: "#/components/schemas/Product"
-        "400":
-          description: Invalid parameters supplied
-  /products/{id}:
-    get:
-      tags:
-        - product
-      summary: Get product details
-      description: Get product details with the given id.
-      operationId: getProduct
-      parameters:
-        - name: id
-          in: path
-          description: Product id
-          required: true
-          schema:
-            type: integer
-            format: int64
-      responses:
-        "200":
-          description: successful operation
-          content:
-            application/json:
-              schema:
-                $ref: "#/components/schemas/ProductDetails"
-        "400":
-          description: Invalid product id
-    post:
-      tags:
-        - product
-      summary: Update product details
-      description: Update product details with the given id.
-      operationId: updateProduct
-      parameters:
-        - name: id
-          in: path
-          description: Product id
-          required: true
-          schema:
-            type: integer
-            format: int64
-        - name: name
-          in: query
-          description: Product name
-          required: true
-          schema:
-            type: string
-      responses:
-        "200":
-          description: successful operation
-    delete:
-      tags:
-        - product
-      summary: Delete product details
-      description: Delete product details with the given id.
-      operationId: deleteProduct
-      parameters:
-        - name: id
-          in: path
-          description: Product id
-          required: true
-          schema:
-            type: integer
-            format: int64
-      responses:
-        "200":
-          description: successful operation
-  /products/{id}/relatedProducts:
-    get:
-      tags:
-        - relatedProducts
-      summary: Get related products
-      description: Get related products with the given product id.
-      operationId: getRelatedProducts
-      parameters:
-        - name: id
-          in: path
-          description: Product id
-          required: true
-          schema:
-            type: integer
-            format: int64
-      responses:
-        "200":
-          description: successful operation
-          content:
-            application/json:
-              schema:
-                $ref: "#/components/schemas/RelatedProducts"
-        "400":
-          description: Invalid product id
-
-components:
-  schemas:
-    Product:
-      type: object
-      description: Product id and name
-      properties:
-        id:
-          type: integer
-          format: int64
-          description: Product id
-        name:
-          type: string
-          description: Product name
-      required:
-        - id
-        - name
-    ProductDetails:
-      type: object
-      description: Product details
-      properties:
-        id:
-          type: integer
-          format: int64
-          description: Product id
-        name:
-          type: string
-          description: Product name
-        description:
-          type: string
-          description: Product description
-      required:
-        - id
-        - name
-    RelatedProducts:
-      type: object
-      description: Related Products
-      properties:
-        id:
-          type: integer
-          format: int32
-          description: Product id
-        relatedProducts:
-          type: array
-          description: List of related products
-          items:
-            $ref: "#/components/schemas/Product"
-
-
-```
-
-Here are some use cases:
-
-   | Incoming Endpiont | Incoming Service | x-sw-service-name | x-sw-endpoint-name-match-rule | x-sw-endpoint-name-format | Matched | Grouping Result |
-   |-----|-----|-----|-----|-----|-----|-----|
-   | `GET:/products` | serviceB | default | default | default | true | `GET:/products` |
-   | `GET:/products/123` | serviceB | default | default | default |  true | `GET:/products{id}` |
-   | `GET:/products/asia/cn` | serviceB | default | default | default | true | `GET:/products/{region}/{country}` |
-   | `GET:/products/123/abc/efg` | serviceB | default | default | default |  false | `GET:/products/123/abc/efg` | 
-   | `<GET>:/products/123` | serviceB | default | default | default | false | `<GET>:/products/123`|
-   | `GET:/products/123` | serviceC | default | default | default | false | `GET:/products/123` |
-   | `GET:/products/123` | serviceC | serviceC | default | default | true | `GET:/products/123` |
-   | `<GET>:/products/123` | serviceB | default | `<${METHOD}>:${PATH}` | `<${METHOD}>:${PATH}` | true | `<GET>:/products/{id}` |
-   | `GET:/products/123` | serviceB | default | default | `${PATH}:<${METHOD}>` | true | `/products/{id}:<GET>` |
-   | `/products/123:<GET>` | serviceB | default | `${PATH}:<${METHOD}>` | default | true | `GET:/products/{id}` |
-
-
-## Endpoint name grouping by custom configuration
-Currently, a user could set up grouping rules through the static YAML file named `endpoint-name-grouping.yml`,
-or use [Dynamic Configuration](dynamic-config.md) to initialize and update endpoint grouping rules.
-
-### Configuration Format
-Both the static local file and dynamic configuration value share the same YAML format.
-
-```yaml
-grouping:
-  # Endpoint of the service would follow the following rules
-  - service-name: serviceA
-    rules:
-      # Logic name when the regex expression matched.
-      - endpoint-name: /prod/{id}
-        regex: \/prod\/.+
-```
diff --git a/docs/en/setup/backend/grafana-cluster.json b/docs/en/setup/backend/grafana-cluster.json
deleted file mode 100644
index ba99a90..0000000
--- a/docs/en/setup/backend/grafana-cluster.json
+++ /dev/null
@@ -1,4453 +0,0 @@
-{
-  "__inputs": [
-    {
-      "name": "DS_PROMETHEUS-SW",
-      "label": "prometheus-sw",
-      "description": "",
-      "type": "datasource",
-      "pluginId": "prometheus",
-      "pluginName": "Prometheus"
-    }
-  ],
-  "annotations": {
-    "list": [
-      {
-        "builtIn": 1,
-        "datasource": "${DS_PROMETHEUS-SW}",
-        "enable": true,
-        "hide": true,
-        "iconColor": "rgba(0, 211, 255, 1)",
-        "name": "Annotations & Alerts",
-        "type": "dashboard"
-      }
-    ]
-  },
-  "description": "SkyWalking OAP Cluster Monitor Dashboard",
-  "editable": true,
-  "gnetId": null,
-  "graphTooltip": 0,
-  "id": null,
-  "iteration": 1615788933524,
-  "links": [],
-  "panels": [
-    {
-      "datasource": null,
-      "gridPos": {
-        "h": 1,
-        "w": 24,
-        "x": 0,
-        "y": 0
-      },
-      "id": 37,
-      "title": "Basic Info",
-      "type": "row"
-    },
-    {
-      "cacheTimeout": null,
-      "colorBackground": false,
-      "colorValue": false,
-      "colors": [
-        "#299c46",
-        "rgba(237, 129, 40, 0.89)",
-        "#d44a3a"
-      ],
-      "datasource": "$datasource",
-      "format": "none",
-      "gauge": {
-        "maxValue": 100,
-        "minValue": 0,
-        "show": false,
-        "thresholdLabels": false,
-        "thresholdMarkers": true
-      },
-      "gridPos": {
-        "h": 6,
-        "w": 8,
-        "x": 0,
-        "y": 1
-      },
-      "id": 13,
-      "interval": null,
-      "links": [],
-      "mappingType": 1,
-      "mappingTypes": [
-        {
-          "name": "value to text",
-          "value": 1
-        },
-        {
-          "name": "range to text",
-          "value": 2
-        }
-      ],
-      "maxDataPoints": 100,
-      "nullPointMode": "connected",
-      "nullText": null,
-      "options": {},
-      "postfix": "",
-      "postfixFontSize": "50%",
-      "prefix": "",
-      "prefixFontSize": "50%",
-      "rangeMaps": [
-        {
-          "from": "null",
-          "text": "N/A",
-          "to": "null"
-        }
-      ],
-      "sparkline": {
-        "fillColor": "rgba(31, 118, 189, 0.18)",
-        "full": false,
-        "lineColor": "rgb(31, 120, 193)",
-        "show": false
-      },
-      "tableColumn": "",
-      "targets": [
-        {
-          "expr": "count(uptime{job=\"$job\"})",
-          "refId": "A"
-        }
-      ],
-      "thresholds": "",
-      "title": "Instances Number",
-      "type": "singlestat",
-      "valueFontSize": "80%",
-      "valueMaps": [
-        {
-          "op": "=",
-          "text": "N/A",
-          "value": "null"
-        }
-      ],
-      "valueName": "current"
-    },
-    {
-      "cacheTimeout": null,
-      "columns": [
-        {
-          "text": "Current",
-          "value": "current"
-        }
-      ],
-      "datasource": "$datasource",
-      "fontSize": "100%",
-      "gridPos": {
-        "h": 6,
-        "w": 8,
-        "x": 8,
-        "y": 1
-      },
-      "id": 38,
-      "links": [],
-      "options": {},
-      "pageSize": null,
-      "showHeader": true,
-      "sort": {
-        "col": 1,
-        "desc": false
-      },
-      "styles": [
-        {
-          "alias": "",
-          "colorMode": null,
-          "colors": [
-            "rgba(245, 54, 54, 0.9)",
-            "rgba(237, 129, 40, 0.89)",
-            "rgba(50, 172, 45, 0.97)"
-          ],
-          "dateFormat": "YYYY-MM-DD HH:mm:ss",
-          "decimals": 2,
-          "mappingType": 1,
-          "pattern": "Metric",
-          "thresholds": [],
-          "type": "string",
-          "unit": "short"
-        },
-        {
-          "alias": "",
-          "colorMode": null,
-          "colors": [
-            "rgba(245, 54, 54, 0.9)",
-            "rgba(237, 129, 40, 0.89)",
-            "rgba(50, 172, 45, 0.97)"
-          ],
-          "dateFormat": "YYYY-MM-DD HH:mm:ss",
-          "decimals": 2,
-          "mappingType": 1,
-          "pattern": "Current",
-          "thresholds": [],
-          "type": "date",
-          "unit": "short"
-        }
-      ],
-      "targets": [
-        {
-          "expr": "process_start_time_seconds{job=\"$job\"}*1000",
-          "legendFormat": "{{instance}}",
-          "refId": "A"
-        }
-      ],
-      "title": "Instances Start Time",
-      "transform": "timeseries_aggregations",
-      "type": "table"
-    },
-    {
-      "cacheTimeout": null,
-      "columns": [
-        {
-          "text": "Current",
-          "value": "current"
-        }
-      ],
-      "datasource": "$datasource",
-      "fontSize": "100%",
-      "gridPos": {
-        "h": 6,
-        "w": 8,
-        "x": 16,
-        "y": 1
-      },
-      "id": 39,
-      "links": [],
-      "options": {},
-      "pageSize": null,
-      "showHeader": true,
-      "sort": {
-        "col": 1,
-        "desc": false
-      },
-      "styles": [
-        {
-          "alias": "",
-          "colorMode": null,
-          "colors": [
-            "rgba(245, 54, 54, 0.9)",
-            "rgba(237, 129, 40, 0.89)",
-            "rgba(50, 172, 45, 0.97)"
-          ],
-          "dateFormat": "YYYY-MM-DD HH:mm:ss",
-          "decimals": 2,
-          "mappingType": 1,
-          "pattern": "Metric",
-          "thresholds": [],
-          "type": "string",
-          "unit": "short"
-        },
-        {
-          "alias": "",
-          "colorMode": null,
-          "colors": [
-            "rgba(245, 54, 54, 0.9)",
-            "rgba(237, 129, 40, 0.89)",
-            "rgba(50, 172, 45, 0.97)"
-          ],
-          "dateFormat": "YYYY-MM-DD HH:mm:ss",
-          "decimals": 2,
-          "mappingType": 1,
-          "pattern": "Current",
-          "thresholds": [],
-          "type": "number",
-          "unit": "s"
-        }
-      ],
-      "targets": [
-        {
-          "expr": "time() - process_start_time_seconds{job=\"$job\"}",
-          "legendFormat": "{{instance}}",
-          "refId": "A"
-        }
-      ],
-      "title": "Instances Run Time",
-      "transform": "timeseries_aggregations",
-      "type": "table"
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "cacheTimeout": null,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "$datasource",
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 6,
-        "w": 8,
-        "x": 0,
-        "y": 7
-      },
-      "hiddenSeries": false,
-      "id": 20,
-      "legend": {
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "show": true,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "options": {
-        "dataLinks": []
-      },
-      "percentage": false,
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "increase(process_cpu_seconds_total{job=\"$job\"}[1m]) * 100 / 60",
-          "legendFormat": "{{instance}}",
-          "refId": "A"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "CPU * Cores number",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "percent",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "cacheTimeout": null,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "$datasource",
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 6,
-        "w": 8,
-        "x": 8,
-        "y": 7
-      },
-      "hiddenSeries": false,
-      "id": 22,
-      "legend": {
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "show": true,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "options": {
-        "dataLinks": []
-      },
-      "percentage": false,
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "jvm_memory_bytes_used{job=\"$job\",area=\"heap\"}",
-          "legendFormat": "{{instance}}-used",
-          "refId": "A"
-        },
-        {
-          "expr": "jvm_memory_bytes_committed{job=\"$job\",area=\"heap\"}",
-          "legendFormat": "{{instance}}-committed",
-          "refId": "B"
-        },
-        {
-          "expr": "jvm_memory_bytes_max{job=\"$job\",area=\"heap\"}",
-          "legendFormat": "{{instance}}-max",
-          "refId": "C"
-        },
-        {
-          "expr": "jvm_memory_bytes_init{job=\"$job\",area=\"heap\"}",
-          "legendFormat": "{{instance}}-init",
-          "refId": "D"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "Heap Memory",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "decbytes",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "cacheTimeout": null,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "$datasource",
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 6,
-        "w": 8,
-        "x": 16,
-        "y": 7
-      },
-      "hiddenSeries": false,
-      "id": 21,
-      "interval": "1m",
-      "legend": {
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "show": true,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "options": {
-        "dataLinks": []
-      },
-      "percentage": false,
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "increase(jvm_gc_collection_seconds_sum{job=\"$job\"}[1m])",
-          "interval": "1m",
-          "legendFormat": "{{instance}}-{{gc}}",
-          "refId": "A"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "GC Time (Increment) / Minute",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "s",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "cacheTimeout": null,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "$datasource",
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 6,
-        "w": 8,
-        "x": 0,
-        "y": 13
-      },
-      "hiddenSeries": false,
-      "id": 27,
-      "legend": {
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "show": true,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "options": {
-        "dataLinks": []
-      },
-      "percentage": false,
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "jvm_memory_pool_bytes_used{job=\"$job\"}",
-          "legendFormat": "{{instance}}-{{pool}}-used",
-          "refId": "A"
-        },
-        {
-          "expr": "jvm_memory_pool_bytes_committed{job=\"$job\"}",
-          "legendFormat": "{{instance}}-{{pool}}-committed",
-          "refId": "B"
-        },
-        {
-          "expr": "jvm_memory_pool_bytes_max{job=\"$job\"}",
-          "legendFormat": "{{instance}}-{{pool}}-max",
-          "refId": "C"
-        },
-        {
-          "expr": "jvm_memory_pool_bytes_init{job=\"$job\"}",
-          "legendFormat": "{{instance}}-{{pool}}-init",
-          "refId": "D"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "JVM Memory Pool",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "decbytes",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "cacheTimeout": null,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "$datasource",
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 6,
-        "w": 8,
-        "x": 8,
-        "y": 13
-      },
-      "hiddenSeries": false,
-      "id": 26,
-      "legend": {
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "show": true,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "options": {
-        "dataLinks": []
-      },
-      "percentage": false,
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "jvm_memory_bytes_used{job=\"$job\",area=\"nonheap\"}",
-          "legendFormat": "{{instance}}-used",
-          "refId": "A"
-        },
-        {
-          "expr": "jvm_memory_bytes_committed{job=\"$job\",area=\"nonheap\"}",
-          "legendFormat": "{{instance}}-committed",
-          "refId": "B"
-        },
-        {
-          "expr": "jvm_memory_bytes_max{job=\"$job\",area=\"nonheap\"}",
-          "legendFormat": "{{instance}}-max",
-          "refId": "C"
-        },
-        {
-          "expr": "jvm_memory_bytes_init{job=\"$job\",area=\"nonheap\"}",
-          "legendFormat": "{{instance}}-init",
-          "refId": "D"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "Nonheap Memory",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "decbytes",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "cacheTimeout": null,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "$datasource",
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 6,
-        "w": 8,
-        "x": 16,
-        "y": 13
-      },
-      "hiddenSeries": false,
-      "id": 33,
-      "interval": "1m",
-      "legend": {
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "show": true,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "options": {
-        "dataLinks": []
-      },
-      "percentage": false,
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "increase(jvm_gc_collection_seconds_count{job=\"$job\"}[1m])",
-          "interval": "1m",
-          "legendFormat": "{{instance}}-{{gc}}",
-          "refId": "A"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "GC Count (Increment) / Minute",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "cacheTimeout": null,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "$datasource",
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 6,
-        "w": 8,
-        "x": 0,
-        "y": 19
-      },
-      "hiddenSeries": false,
-      "id": 40,
-      "legend": {
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "show": true,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "options": {
-        "dataLinks": []
-      },
-      "percentage": false,
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "jvm_threads_current{job=\"$job\"}",
-          "legendFormat": "{{instance}}-Current",
-          "refId": "A"
-        },
-        {
-          "expr": "jvm_threads_daemon{job=\"$job\"}",
-          "legendFormat": "{{instance}}-Daemon",
-          "refId": "B"
-        },
-        {
-          "expr": "jvm_threads_peak{job=\"$job\"}",
-          "legendFormat": "{{instance}}-Peak",
-          "refId": "C"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "JVM Thread Count",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "cacheTimeout": null,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "$datasource",
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 6,
-        "w": 8,
-        "x": 8,
-        "y": 19
-      },
-      "hiddenSeries": false,
-      "id": 41,
-      "legend": {
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "show": true,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "options": {
-        "dataLinks": []
-      },
-      "percentage": false,
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "jvm_threads_state{job=\"$job\"}",
-          "legendFormat": "{{instance}}-{{state}}",
-          "refId": "A"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "JVM Thread State",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "cacheTimeout": null,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "$datasource",
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 6,
-        "w": 8,
-        "x": 16,
-        "y": 19
-      },
-      "hiddenSeries": false,
-      "id": 42,
-      "legend": {
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "show": true,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "options": {
-        "dataLinks": []
-      },
-      "percentage": false,
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "jvm_classes_loaded{job=\"$job\"}",
-          "legendFormat": "{{instance}}",
-          "refId": "A"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "Class Loaded",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "collapsed": false,
-      "datasource": null,
-      "gridPos": {
-        "h": 1,
-        "w": 24,
-        "x": 0,
-        "y": 25
-      },
-      "id": 35,
-      "panels": [],
-      "title": "OAP Info",
-      "type": "row"
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "$datasource",
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 6,
-        "w": 8,
-        "x": 0,
-        "y": 26
-      },
-      "hiddenSeries": false,
-      "id": 2,
-      "legend": {
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "show": true,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "options": {
-        "dataLinks": []
-      },
-      "percentage": false,
-      "pointradius": 5,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "sum(rate(trace_in_latency_count{job=\"$job\"}[1m]))",
-          "legendFormat": "total",
-          "refId": "B"
-        },
-        {
-          "expr": "rate(trace_in_latency_count{job=\"$job\"}[1m])",
-          "legendFormat": "{{instance}}-{{protocol}}",
-          "refId": "A"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "Trace Segment Received Count / Second",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "$datasource",
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 6,
-        "w": 8,
-        "x": 8,
-        "y": 26
-      },
-      "hiddenSeries": false,
-      "id": 6,
-      "interval": "1m",
-      "legend": {
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "show": true,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "options": {
-        "dataLinks": []
-      },
-      "percentage": false,
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "increase(trace_in_latency_sum{job=\"$job\"}[1m])",
-          "format": "time_series",
-          "hide": false,
-          "instant": false,
-          "interval": "1m",
-          "intervalFactor": 1,
-          "legendFormat": "{{instance}}-{{protocol}}",
-          "refId": "A"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "Trace Segment Analysis Time (Increment) / Minute",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "s",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "$datasource",
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 6,
-        "w": 8,
-        "x": 16,
-        "y": 26
-      },
-      "hiddenSeries": false,
-      "id": 11,
-      "interval": "1m",
-      "legend": {
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "show": true,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "options": {
-        "dataLinks": []
-      },
-      "percentage": false,
-      "pointradius": 5,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "increase(trace_analysis_error_count{job=\"$job\"}[1m]) ",
-          "interval": "1m",
-          "legendFormat": "{{instance}}-{{protocol}}",
-          "refId": "A"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "Trace Segment Analysis Error Count (Increment) / Minute",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "$datasource",
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 6,
-        "w": 8,
-        "x": 0,
-        "y": 32
-      },
-      "hiddenSeries": false,
-      "id": 24,
-      "legend": {
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "show": true,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "options": {
-        "dataLinks": []
-      },
-      "percentage": false,
-      "pointradius": 5,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "sum(rate(mesh_analysis_latency_count{job=\"$job\"}[1m]))",
-          "legendFormat": "total",
-          "refId": "A"
-        },
-        {
-          "expr": "rate(mesh_analysis_latency_count{job=\"$job\"}[1m])",
-          "legendFormat": "{{instance}}",
-          "refId": "B"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "Mesh Received Count / Second",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "$datasource",
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 6,
-        "w": 8,
-        "x": 8,
-        "y": 32
-      },
-      "hiddenSeries": false,
-      "id": 25,
-      "interval": "1m",
-      "legend": {
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "show": true,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "options": {
-        "dataLinks": []
-      },
-      "percentage": false,
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "increase(mesh_analysis_latency_sum{job=\"$job\"}[1m])",
-          "format": "time_series",
-          "hide": false,
-          "instant": false,
-          "interval": "1m",
-          "intervalFactor": 1,
-          "legendFormat": "{{instance}}",
-          "refId": "A"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "Mesh Analysis Time (Increment) / Minute",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "s",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
... 11104 lines suppressed ...