You are viewing a plain text version of this content. The canonical link for it is here.
Posted to notifications@skywalking.apache.org by wu...@apache.org on 2020/03/15 15:28:29 UTC

[skywalking] branch master updated: Add selector property in application.yml (#4514)

This is an automated email from the ASF dual-hosted git repository.

wusheng pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/skywalking.git


The following commit(s) were added to refs/heads/master by this push:
     new 69e3d80  Add selector property in application.yml (#4514)
69e3d80 is described below

commit 69e3d80e24b203bef4d86bfce2ef201a961373b0
Author: kezhenxu94 <ke...@apache.org>
AuthorDate: Sun Mar 15 23:28:14 2020 +0800

    Add selector property in application.yml (#4514)
    
    ### Motivation
    
     Ease the efforts of modifying configurations in automatic workflow and scripts, like Docker and helm chart.
    
     ### Modification
    
     Add a `selector` property into `application.yml` to enable switching configuration simply by environment variables or system properties, and filter the unselected options, leaving only selected one.
    
     ### Result
    
     - We can switch a configuration by environment variables or system property
    
     - Closes #4511
---
 .github/workflows/e2e.yaml                         |   8 +-
 dist-material/application.yml                      | 378 ++++++++++-------
 docker/oap-es7/docker-entrypoint.sh                | 468 +-------------------
 docker/oap/docker-entrypoint.sh                    | 471 +--------------------
 docs/en/setup/backend/backend-cluster.md           |  60 +--
 docs/en/setup/backend/backend-setup.md             |  43 +-
 docs/en/setup/backend/backend-storage.md           |  19 +-
 docs/en/setup/backend/backend-telemetry.md         |  22 +-
 docs/en/setup/backend/dynamic-config.md            |  32 +-
 .../starter/config/ApplicationConfigLoader.java    |  56 ++-
 .../src/main/resources/application.yml             | 362 +++++++++-------
 .../config/ApplicationConfigLoaderTestCase.java    |   1 +
 .../src/test/resources/application.yml             | 181 --------
 .../library/module/ApplicationConfiguration.java   |   4 +-
 .../library/module/ModuleConfigException.java      |   4 +
 .../util/PropertyPlaceholderHelperTest.java        |  20 +-
 .../src/test/resources/application.yml             |  41 +-
 .../plugin/influxdb/InfluxStorageProvider.java     |   2 +-
 .../e2e-cluster-with-gateway-test-runner/pom.xml   |   2 +
 .../src/docker/clusterize.awk                      |  96 -----
 .../src/docker/rc.d/rc0-prepare.sh                 |   2 -
 .../e2e-cluster/e2e-cluster-test-runner/pom.xml    |   2 +
 .../src/docker/clusterize.awk                      | 101 -----
 .../src/docker/rc.d/rc0-prepare.sh                 |   2 -
 test/e2e/e2e-influxdb/src/docker/application.yml   |   2 +-
 .../src/docker/adapt_storage.awk                   |   2 +-
 test/e2e/e2e-ttl/e2e-ttl-es/pom.xml                |   1 +
 .../e2e-ttl/e2e-ttl-es/src/docker/es_storage.awk   |  68 ---
 .../e2e-ttl-es/src/docker/rc.d/rc0-prepare.sh      |   2 -
 test/e2e/e2e-ttl/e2e-ttl-influxdb/pom.xml          |   1 +
 .../e2e-ttl-influxdb/src/docker/influx_storage.awk |  64 ---
 .../src/docker/rc.d/rc0-prepare.sh                 |   2 -
 test/e2e/run.sh                                    |   6 +
 tools/profile-exporter/application.yml             | 133 +++---
 34 files changed, 717 insertions(+), 1941 deletions(-)

diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml
index e8add44..eaf082c 100644
--- a/.github/workflows/e2e.yaml
+++ b/.github/workflows/e2e.yaml
@@ -94,15 +94,15 @@ jobs:
       - name: Cluster Tests (InfluxDB/ZK/JDK8)
         run: export E2E_VERSION=jdk8-1.5 && bash -x test/e2e/run.sh e2e-cluster/e2e-cluster-test-runner --storage=influxdb
       - name: Cluster With Gateway Tests (ES6/ZK/JDK8)
-        run: export E2E_VERSION=jdk8-1.5 && bash -x test/e2e/run.sh e2e-cluster-with-gateway/e2e-cluster-with-gateway-test-runner
+        run: export E2E_VERSION=jdk8-1.5 && bash -x test/e2e/run.sh e2e-cluster-with-gateway/e2e-cluster-with-gateway-test-runner --storage=elasticsearch
       - name: Cluster Tests (ES7/ZK/JDK8)
         run: export E2E_VERSION=jdk8-1.5 DIST_PACKAGE=apache-skywalking-apm-bin-es7.tar.gz ES_VERSION=7.4.2 && bash -x test/e2e/run.sh e2e-cluster/e2e-cluster-test-runner --storage=elasticsearch
       - name: TTL ES Tests(JDK8)
-        run: export E2E_VERSION=jdk8-1.5 && bash -x test/e2e/run.sh e2e-ttl/e2e-ttl-es
+        run: export E2E_VERSION=jdk8-1.5 && bash -x test/e2e/run.sh e2e-ttl/e2e-ttl-es --storage=elasticsearch
       - name: TTL ES7 Tests(JDK8)
-        run: export E2E_VERSION=jdk8-1.5 DIST_PACKAGE=apache-skywalking-apm-bin-es7.tar.gz ES_VERSION=7.4.2 && bash -x test/e2e/run.sh e2e-ttl/e2e-ttl-es
+        run: export E2E_VERSION=jdk8-1.5 DIST_PACKAGE=apache-skywalking-apm-bin-es7.tar.gz ES_VERSION=7.4.2 && bash -x test/e2e/run.sh e2e-ttl/e2e-ttl-es --storage=elasticsearch7
       - name: TTL InfluxDB Tests(JDK8)
-        run: export E2E_VERSION=jdk8-1.5 && bash -x test/e2e/run.sh e2e-ttl/e2e-ttl-influxdb
+        run: export E2E_VERSION=jdk8-1.5 && bash -x test/e2e/run.sh e2e-ttl/e2e-ttl-influxdb --storage=influxdb
 
   Compatibilities:
     runs-on: ubuntu-latest
diff --git a/dist-material/application.yml b/dist-material/application.yml
index 404a9d6..c692829 100644
--- a/dist-material/application.yml
+++ b/dist-material/application.yml
@@ -15,40 +15,42 @@
 # limitations under the License.
 
 cluster:
+  selector: ${SW_CLUSTER:standalone}
   standalone:
   # Please check your ZooKeeper is 3.5+, However, it is also compatible with ZooKeeper 3.4.x. Replace the ZooKeeper 3.5+
   # library the oap-libs folder with your ZooKeeper 3.4.x library.
-#  zookeeper:
-#    nameSpace: ${SW_NAMESPACE:""}
-#    hostPort: ${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}
-#    #Retry Policy
-#    baseSleepTimeMs: ${SW_CLUSTER_ZK_SLEEP_TIME:1000} # initial amount of time to wait between retries
-#    maxRetries: ${SW_CLUSTER_ZK_MAX_RETRIES:3} # max number of times to retry
-#    # Enable ACL
-#    enableACL: ${SW_ZK_ENABLE_ACL:false} # disable ACL in default
-#    schema: ${SW_ZK_SCHEMA:digest} # only support digest schema
-#    expression: ${SW_ZK_EXPRESSION:skywalking:skywalking}
-#  kubernetes:
-#    watchTimeoutSeconds: ${SW_CLUSTER_K8S_WATCH_TIMEOUT:60}
-#    namespace: ${SW_CLUSTER_K8S_NAMESPACE:default}
-#    labelSelector: ${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}
-#    uidEnvName: ${SW_CLUSTER_K8S_UID:SKYWALKING_COLLECTOR_UID}
-#  consul:
-#    serviceName: ${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
-#     Consul cluster nodes, example: 10.0.0.1:8500,10.0.0.2:8500,10.0.0.3:8500
-#    hostPort: ${SW_CLUSTER_CONSUL_HOST_PORT:localhost:8500}
-#    # Consul aclToken
-#    #aclToken: ${SW_CLUSTER_CONSUL_ACLTOKEN}
-#  nacos:
-#    serviceName: ${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
-#    hostPort: ${SW_CLUSTER_NACOS_HOST_PORT:localhost:8848}
-#  # Nacos Configuration namespace
-#    namespace: 'public'
-#  etcd:
-#    serviceName: ${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
-#     etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379
-#    hostPort: ${SW_CLUSTER_ETCD_HOST_PORT:localhost:2379}
+  zookeeper:
+    nameSpace: ${SW_NAMESPACE:""}
+    hostPort: ${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}
+    # Retry Policy
+    baseSleepTimeMs: ${SW_CLUSTER_ZK_SLEEP_TIME:1000} # initial amount of time to wait between retries
+    maxRetries: ${SW_CLUSTER_ZK_MAX_RETRIES:3} # max number of times to retry
+    # Enable ACL
+    enableACL: ${SW_ZK_ENABLE_ACL:false} # disable ACL in default
+    schema: ${SW_ZK_SCHEMA:digest} # only support digest schema
+    expression: ${SW_ZK_EXPRESSION:skywalking:skywalking}
+  kubernetes:
+    watchTimeoutSeconds: ${SW_CLUSTER_K8S_WATCH_TIMEOUT:60}
+    namespace: ${SW_CLUSTER_K8S_NAMESPACE:default}
+    labelSelector: ${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}
+    uidEnvName: ${SW_CLUSTER_K8S_UID:SKYWALKING_COLLECTOR_UID}
+  consul:
+    serviceName: ${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
+    # Consul cluster nodes, example: 10.0.0.1:8500,10.0.0.2:8500,10.0.0.3:8500
+    hostPort: ${SW_CLUSTER_CONSUL_HOST_PORT:localhost:8500}
+    aclToken: ${SW_CLUSTER_CONSUL_ACLTOKEN:""}
+  nacos:
+    serviceName: ${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
+    hostPort: ${SW_CLUSTER_NACOS_HOST_PORT:localhost:8848}
+    # Nacos Configuration namespace
+    namespace: ${SW_CLUSTER_NACOS_NAMESPACE:"public"}
+  etcd:
+    serviceName: ${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
+    # etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379
+    hostPort: ${SW_CLUSTER_ETCD_HOST_PORT:localhost:2379}
+
 core:
+  selector: ${SW_CORE:default}
   default:
     # Mixed: Receive agent data, Level 1 aggregate, Level 2 aggregate
     # Receiver: Receive agent data, Level 1 aggregate
@@ -83,102 +85,112 @@ core:
     # and it will cause more load for memory, network of OAP and storage.
     # But, being activated, user could see the name in the storage entities, which make users easier to use 3rd party tool, such as Kibana->ES, to query the data by themselves.
     activeExtraModelColumns: ${SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS:false}
+
 storage:
-#  elasticsearch:
-#    nameSpace: ${SW_NAMESPACE:""}
-#    clusterNodes: ${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}
-#    protocol: ${SW_STORAGE_ES_HTTP_PROTOCOL:"http"}
-#    trustStorePath: ${SW_SW_STORAGE_ES_SSL_JKS_PATH:"../es_keystore.jks"}
-#    trustStorePass: ${SW_SW_STORAGE_ES_SSL_JKS_PASS:""}
-#    enablePackedDownsampling: ${SW_STORAGE_ENABLE_PACKED_DOWNSAMPLING:true} # Hour and Day metrics will be merged into minute index.
-#    dayStep: ${SW_STORAGE_DAY_STEP:1} # Represent the number of days in the one minute/hour/day index.
-#    user: ${SW_ES_USER:""}
-#    password: ${SW_ES_PASSWORD:""}
-#    secretsManagementFile: ${SW_ES_SECRETS_MANAGEMENT_FILE:""} # Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.
-#    indexShardsNumber: ${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:2}
-#    indexReplicasNumber: ${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:0}
-#    # Those data TTL settings will override the same settings in core module.
-#    recordDataTTL: ${SW_STORAGE_ES_RECORD_DATA_TTL:7} # Unit is day
-#    otherMetricsDataTTL: ${SW_STORAGE_ES_OTHER_METRIC_DATA_TTL:45} # Unit is day
-#    monthMetricsDataTTL: ${SW_STORAGE_ES_MONTH_METRIC_DATA_TTL:18} # Unit is month
-#    # Batch process setting, refer to https://www.elastic.co/guide/en/elasticsearch/client/java-api/5.5/java-docs-bulk-processor.html
-#    bulkActions: ${SW_STORAGE_ES_BULK_ACTIONS:1000} # Execute the bulk every 1000 requests
-#    flushInterval: ${SW_STORAGE_ES_FLUSH_INTERVAL:10} # flush the bulk every 10 seconds whatever the number of requests
-#    concurrentRequests: ${SW_STORAGE_ES_CONCURRENT_REQUESTS:2} # the number of concurrent requests
-#    resultWindowMaxSize: ${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}
-#    metadataQueryMaxSize: ${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}
-#    segmentQueryMaxSize: ${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}
-#    profileTaskQueryMaxSize: ${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}
-#  elasticsearch7:
-#    nameSpace: ${SW_NAMESPACE:""}
-#    clusterNodes: ${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}
-#    protocol: ${SW_STORAGE_ES_HTTP_PROTOCOL:"http"}
-#    trustStorePath: ${SW_SW_STORAGE_ES_SSL_JKS_PATH:"../es_keystore.jks"}
-#    trustStorePass: ${SW_SW_STORAGE_ES_SSL_JKS_PASS:""}
-#    enablePackedDownsampling: ${SW_STORAGE_ENABLE_PACKED_DOWNSAMPLING:true} # Hour and Day metrics will be merged into minute index.
-#    dayStep: ${SW_STORAGE_DAY_STEP:1} # Represent the number of days in the one minute/hour/day index.
-#    user: ${SW_ES_USER:""}
-#    password: ${SW_ES_PASSWORD:""}
-#    secretsManagementFile: ${SW_ES_SECRETS_MANAGEMENT_FILE:""} # Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.
-#    indexShardsNumber: ${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:2}
-#    indexReplicasNumber: ${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:0}
-#    # Those data TTL settings will override the same settings in core module.
-#    recordDataTTL: ${SW_STORAGE_ES_RECORD_DATA_TTL:7} # Unit is day
-#    otherMetricsDataTTL: ${SW_STORAGE_ES_OTHER_METRIC_DATA_TTL:45} # Unit is day
-#    monthMetricsDataTTL: ${SW_STORAGE_ES_MONTH_METRIC_DATA_TTL:18} # Unit is month
-#    # Batch process setting, refer to https://www.elastic.co/guide/en/elasticsearch/client/java-api/5.5/java-docs-bulk-processor.html
-#    bulkActions: ${SW_STORAGE_ES_BULK_ACTIONS:1000} # Execute the bulk every 1000 requests
-#    flushInterval: ${SW_STORAGE_ES_FLUSH_INTERVAL:10} # flush the bulk every 10 seconds whatever the number of requests
-#    concurrentRequests: ${SW_STORAGE_ES_CONCURRENT_REQUESTS:2} # the number of concurrent requests
-#    resultWindowMaxSize: ${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}
-#    metadataQueryMaxSize: ${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}
-#    segmentQueryMaxSize: ${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}
-#    profileTaskQueryMaxSize: ${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}
+  selector: ${SW_STORAGE:h2}
+  elasticsearch:
+    nameSpace: ${SW_NAMESPACE:""}
+    clusterNodes: ${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}
+    protocol: ${SW_STORAGE_ES_HTTP_PROTOCOL:"http"}
+    trustStorePath: ${SW_SW_STORAGE_ES_SSL_JKS_PATH:"../es_keystore.jks"}
+    trustStorePass: ${SW_SW_STORAGE_ES_SSL_JKS_PASS:""}
+    user: ${SW_ES_USER:""}
+    password: ${SW_ES_PASSWORD:""}
+    secretsManagementFile: ${SW_ES_SECRETS_MANAGEMENT_FILE:""} # Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.
+    enablePackedDownsampling: ${SW_STORAGE_ENABLE_PACKED_DOWNSAMPLING:true} # Hour and Day metrics will be merged into minute index.
+    dayStep: ${SW_STORAGE_DAY_STEP:1} # Represent the number of days in the one minute/hour/day index.
+    indexShardsNumber: ${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:2}
+    indexReplicasNumber: ${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:0}
+    # Those data TTL settings will override the same settings in core module.
+    recordDataTTL: ${SW_STORAGE_ES_RECORD_DATA_TTL:7} # Unit is day
+    otherMetricsDataTTL: ${SW_STORAGE_ES_OTHER_METRIC_DATA_TTL:45} # Unit is day
+    monthMetricsDataTTL: ${SW_STORAGE_ES_MONTH_METRIC_DATA_TTL:18} # Unit is month
+    # Batch process setting, refer to https://www.elastic.co/guide/en/elasticsearch/client/java-api/5.5/java-docs-bulk-processor.html
+    bulkActions: ${SW_STORAGE_ES_BULK_ACTIONS:1000} # Execute the bulk every 1000 requests
+    flushInterval: ${SW_STORAGE_ES_FLUSH_INTERVAL:10} # flush the bulk every 10 seconds whatever the number of requests
+    concurrentRequests: ${SW_STORAGE_ES_CONCURRENT_REQUESTS:2} # the number of concurrent requests
+    resultWindowMaxSize: ${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}
+    metadataQueryMaxSize: ${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}
+    segmentQueryMaxSize: ${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}
+    profileTaskQueryMaxSize: ${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}
+    advanced: ${SW_STORAGE_ES_ADVANCED:""}
+  elasticsearch7:
+    nameSpace: ${SW_NAMESPACE:""}
+    clusterNodes: ${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}
+    protocol: ${SW_STORAGE_ES_HTTP_PROTOCOL:"http"}
+    trustStorePath: ${SW_SW_STORAGE_ES_SSL_JKS_PATH:"../es_keystore.jks"}
+    trustStorePass: ${SW_SW_STORAGE_ES_SSL_JKS_PASS:""}
+    enablePackedDownsampling: ${SW_STORAGE_ENABLE_PACKED_DOWNSAMPLING:true} # Hour and Day metrics will be merged into minute index.
+    dayStep: ${SW_STORAGE_DAY_STEP:1} # Represent the number of days in the one minute/hour/day index.
+    user: ${SW_ES_USER:""}
+    password: ${SW_ES_PASSWORD:""}
+    secretsManagementFile: ${SW_ES_SECRETS_MANAGEMENT_FILE:""} # Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.
+    indexShardsNumber: ${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:2}
+    indexReplicasNumber: ${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:0}
+    # Those data TTL settings will override the same settings in core module.
+    recordDataTTL: ${SW_STORAGE_ES_RECORD_DATA_TTL:7} # Unit is day
+    otherMetricsDataTTL: ${SW_STORAGE_ES_OTHER_METRIC_DATA_TTL:45} # Unit is day
+    monthMetricsDataTTL: ${SW_STORAGE_ES_MONTH_METRIC_DATA_TTL:18} # Unit is month
+    # Batch process setting, refer to https://www.elastic.co/guide/en/elasticsearch/client/java-api/5.5/java-docs-bulk-processor.html
+    bulkActions: ${SW_STORAGE_ES_BULK_ACTIONS:1000} # Execute the bulk every 1000 requests
+    flushInterval: ${SW_STORAGE_ES_FLUSH_INTERVAL:10} # flush the bulk every 10 seconds whatever the number of requests
+    concurrentRequests: ${SW_STORAGE_ES_CONCURRENT_REQUESTS:2} # the number of concurrent requests
+    resultWindowMaxSize: ${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}
+    metadataQueryMaxSize: ${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}
+    segmentQueryMaxSize: ${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}
+    profileTaskQueryMaxSize: ${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}
+    advanced: ${SW_STORAGE_ES_ADVANCED:""}
   h2:
     driver: ${SW_STORAGE_H2_DRIVER:org.h2.jdbcx.JdbcDataSource}
     url: ${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db}
     user: ${SW_STORAGE_H2_USER:sa}
     metadataQueryMaxSize: ${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}
-#  mysql:
-#    properties:
-#      jdbcUrl: ${SW_JDBC_URL:"jdbc:mysql://localhost:3306/swtest"}
-#      dataSource.user: ${SW_DATA_SOURCE_USER:root}
-#      dataSource.password: ${SW_DATA_SOURCE_PASSWORD:root@1234}
-#      dataSource.cachePrepStmts: ${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}
-#      dataSource.prepStmtCacheSize: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}
-#      dataSource.prepStmtCacheSqlLimit: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}
-#      dataSource.useServerPrepStmts: ${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}
-#    metadataQueryMaxSize: ${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}
-#  influx:
-#    # Metadata storage provider configuration
-#    metabaseType: ${SW_STORAGE_METABASE_TYPE:H2} # There are 2 options as Metabase provider, H2 or MySQL.
-#    h2Props:
-#      dataSourceClassName: ${SW_STORAGE_METABASE_DRIVER:org.h2.jdbcx.JdbcDataSource}
-#      dataSource.url: ${SW_STORAGE_METABASE_URL:jdbc:h2:mem:skywalking-oap-db}
-#      dataSource.user: ${SW_STORAGE_METABASE_USER:sa}
-#      dataSource.password: ${SW_STORAGE_METABASE_PASSWORD:}
-#    mysqlProps:
-#      jdbcUrl: ${SW_STORAGE_METABASE_URL:"jdbc:mysql://localhost:3306/swtest"}
-#      dataSource.user: ${SW_STORAGE_METABASE_USER:root}
-#      dataSource.password: ${SW_STORAGE_METABASE_PASSWORD:root@1234}
-#      dataSource.cachePrepStmts: ${SW_STORAGE_METABASE_CACHE_PREP_STMTS:true}
-#      dataSource.prepStmtCacheSize: ${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_SIZE:250}
-#      dataSource.prepStmtCacheSqlLimit: ${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_LIMIT:2048}
-#      dataSource.useServerPrepStmts: ${SW_STORAGE_METABASE_USE_SERVER_PREP_STMTS:true}
-#    metadataQueryMaxSize: ${SW_STORAGE_METABASE_QUERY_MAX_SIZE:5000}
-#    # InfluxDB configuration
-#    url: ${SW_STORAGE_INFLUXDB_URL:http://localhost:8086}
-#    user: ${SW_STORAGE_INFLUXDB_USER:root}
-#    password: ${SW_STORAGE_INFLUXDB_PASSWORD:}
-#    database: ${SW_STORAGE_INFLUXDB_DATABASE:skywalking}
-#    actions: ${SW_STORAGE_INFLUXDB_ACTIONS:1000} # the number of actions to collect
-#    duration: ${SW_STORAGE_INFLUXDB_DURATION:1000} # the time to wait at most (milliseconds)
-#    fetchTaskLogMaxSize: ${SW_STORAGE_INFLUXDB_FETCH_TASK_LOG_MAX_SIZE:5000} # the max number of fetch task log in a request
+  mysql:
+    properties:
+      jdbcUrl: ${SW_JDBC_URL:"jdbc:mysql://localhost:3306/swtest"}
+      dataSource.user: ${SW_DATA_SOURCE_USER:root}
+      dataSource.password: ${SW_DATA_SOURCE_PASSWORD:root@1234}
+      dataSource.cachePrepStmts: ${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}
+      dataSource.prepStmtCacheSize: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}
+      dataSource.prepStmtCacheSqlLimit: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}
+      dataSource.useServerPrepStmts: ${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}
+    metadataQueryMaxSize: ${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}
+  influxdb:
+    # Metadata storage provider configuration
+    metabaseType: ${SW_STORAGE_METABASE_TYPE:H2} # There are 2 options as Metabase provider, H2 or MySQL.
+    h2Props:
+      dataSourceClassName: ${SW_STORAGE_METABASE_DRIVER:org.h2.jdbcx.JdbcDataSource}
+      dataSource.url: ${SW_STORAGE_METABASE_URL:jdbc:h2:mem:skywalking-oap-db}
+      dataSource.user: ${SW_STORAGE_METABASE_USER:sa}
+      dataSource.password: ${SW_STORAGE_METABASE_PASSWORD:}
+    mysqlProps:
+      jdbcUrl: ${SW_STORAGE_METABASE_URL:"jdbc:mysql://localhost:3306/swtest"}
+      dataSource.user: ${SW_STORAGE_METABASE_USER:root}
+      dataSource.password: ${SW_STORAGE_METABASE_PASSWORD:root@1234}
+      dataSource.cachePrepStmts: ${SW_STORAGE_METABASE_CACHE_PREP_STMTS:true}
+      dataSource.prepStmtCacheSize: ${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_SIZE:250}
+      dataSource.prepStmtCacheSqlLimit: ${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_LIMIT:2048}
+      dataSource.useServerPrepStmts: ${SW_STORAGE_METABASE_USE_SERVER_PREP_STMTS:true}
+    metadataQueryMaxSize: ${SW_STORAGE_METABASE_QUERY_MAX_SIZE:5000}
+    # InfluxDB configuration
+    url: ${SW_STORAGE_INFLUXDB_URL:http://localhost:8086}
+    user: ${SW_STORAGE_INFLUXDB_USER:root}
+    password: ${SW_STORAGE_INFLUXDB_PASSWORD:}
+    database: ${SW_STORAGE_INFLUXDB_DATABASE:skywalking}
+    actions: ${SW_STORAGE_INFLUXDB_ACTIONS:1000} # the number of actions to collect
+    duration: ${SW_STORAGE_INFLUXDB_DURATION:1000} # the time to wait at most (milliseconds)
+    fetchTaskLogMaxSize: ${SW_STORAGE_INFLUXDB_FETCH_TASK_LOG_MAX_SIZE:5000} # the max number of fetch task log in a request
+
 receiver-sharing-server:
+  selector: ${SW_RECEIVER_SHARING_SERVER:default}
   default:
+    authentication: ${SW_AUTHENTICATION:""}
 receiver-register:
+  selector: ${SW_RECEIVER_REGISTER:default}
   default:
+
 receiver-trace:
+  selector: ${SW_RECEIVER_TRACE:default}
   default:
     bufferPath: ${SW_RECEIVER_BUFFER_PATH:../trace-buffer/}  # Path to trace buffer files, suggest to use absolute path
     bufferOffsetMaxFileSize: ${SW_RECEIVER_BUFFER_OFFSET_MAX_FILE_SIZE:100} # Unit is MB
@@ -186,77 +198,117 @@ receiver-trace:
     bufferFileCleanWhenRestart: ${SW_RECEIVER_BUFFER_FILE_CLEAN_WHEN_RESTART:false}
     sampleRate: ${SW_TRACE_SAMPLE_RATE:10000} # The sample rate precision is 1/10000. 10000 means 100% sample in default.
     slowDBAccessThreshold: ${SW_SLOW_DB_THRESHOLD:default:200,mongodb:100} # The slow database access thresholds. Unit ms.
+
 receiver-jvm:
+  selector: ${SW_RECEIVER_JVM:default}
   default:
+
 receiver-clr:
+  selector: ${SW_RECEIVER_CLR:default}
   default:
+
 receiver-profile:
+  selector: ${SW_RECEIVER_PROFILE:default}
   default:
+
 service-mesh:
+  selector: ${SW_SERVICE_MESH:default}
   default:
     bufferPath: ${SW_SERVICE_MESH_BUFFER_PATH:../mesh-buffer/}  # Path to trace buffer files, suggest to use absolute path
     bufferOffsetMaxFileSize: ${SW_SERVICE_MESH_OFFSET_MAX_FILE_SIZE:100} # Unit is MB
     bufferDataMaxFileSize: ${SW_SERVICE_MESH_BUFFER_DATA_MAX_FILE_SIZE:500} # Unit is MB
     bufferFileCleanWhenRestart: ${SW_SERVICE_MESH_BUFFER_FILE_CLEAN_WHEN_RESTART:false}
+
 istio-telemetry:
+  selector: ${SW_ISTIO_TELEMETRY:default}
   default:
+
 envoy-metric:
+  selector: ${SW_ENVOY_METRIC:default}
   default:
-#    alsHTTPAnalysis: ${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:k8s-mesh}
-#receiver_zipkin:
-#  default:
-#    host: ${SW_RECEIVER_ZIPKIN_HOST:0.0.0.0}
-#    port: ${SW_RECEIVER_ZIPKIN_PORT:9411}
-#    contextPath: ${SW_RECEIVER_ZIPKIN_CONTEXT_PATH:/}
+    alsHTTPAnalysis: ${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:""}
+
+receiver_zipkin:
+  selector: ${SW_RECEIVER_ZIPKIN:-}
+  default:
+    host: ${SW_RECEIVER_ZIPKIN_HOST:0.0.0.0}
+    port: ${SW_RECEIVER_ZIPKIN_PORT:9411}
+    contextPath: ${SW_RECEIVER_ZIPKIN_CONTEXT_PATH:/}
+
+receiver_jaeger:
+  selector: ${SW_RECEIVER_JAEGER:-}
+  default:
+    gRPCHost: ${SW_RECEIVER_JAEGER_HOST:0.0.0.0}
+    gRPCPort: ${SW_RECEIVER_JAEGER_PORT:14250}
+
 query:
+  selector: ${SW_QUERY:graphql}
   graphql:
     path: ${SW_QUERY_GRAPHQL_PATH:/graphql}
+
 alarm:
+  selector: ${SW_ALARM:default}
   default:
+
 telemetry:
+  selector: ${SW_TELEMETRY:none}
   none:
+  prometheus:
+    host: ${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}
+    port: ${SW_TELEMETRY_PROMETHEUS_PORT:1234}
+  so11y:
+    prometheusExporterEnabled: ${SW_TELEMETRY_SO11Y_PROMETHEUS_ENABLED:true}
+    prometheusExporterHost: ${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}
+    prometheusExporterPort: ${SW_TELEMETRY_PROMETHEUS_PORT:1234}
+
+receiver-so11y:
+  selector: ${SW_RECEIVER_SO11Y:-}
+  default:
+
 configuration:
+  selector: ${SW_CONFIGURATION:none}
   none:
-#  apollo:
-#    apolloMeta: http://106.12.25.204:8080
-#    apolloCluster: default
-#    # apolloEnv: # defaults to null
-#    appId: skywalking
-#    period: 5
-#  nacos:
-#    # Nacos Server Host
-#    serverAddr: 127.0.0.1
-#    # Nacos Server Port
-#    port: 8848
-#    # Nacos Configuration Group
-#    group: 'skywalking'
-#    # Nacos Configuration namespace
-#    namespace: ''
-#    # Unit seconds, sync period. Default fetch every 60 seconds.
-#    period : 60
-#    # the name of current cluster, set the name if you want to upstream system known.
-#    clusterName: "default"
-#  zookeeper:
-#    period : 60 # Unit seconds, sync period. Default fetch every 60 seconds.
-#    nameSpace: /default
-#    hostPort: localhost:2181
-#    #Retry Policy
-#    baseSleepTimeMs: 1000 # initial amount of time to wait between retries
-#    maxRetries: 3 # max number of times to retry
-#  etcd:
-#    period : 60 # Unit seconds, sync period. Default fetch every 60 seconds.
-#    group :  'skywalking'
-#    serverAddr: localhost:2379
-#    clusterName: "default"
-#  consul:
-#    # Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500
-#    hostAndPorts: ${consul.address}
-#    # Sync period in seconds. Defaults to 60 seconds.
-#    period: 1
-#    # Consul aclToken
-#    #aclToken: ${consul.aclToken}
+  apollo:
+    apolloMeta: http://106.12.25.204:8080
+    apolloCluster: default
+    apolloEnv: ""
+    appId: skywalking
+    period: 5
+  nacos:
+    # Nacos Server Host
+    serverAddr: 127.0.0.1
+    # Nacos Server Port
+    port: 8848
+    # Nacos Configuration Group
+    group: 'skywalking'
+    # Nacos Configuration namespace
+    namespace: ''
+    # Unit seconds, sync period. Default fetch every 60 seconds.
+    period : 60
+    # the name of current cluster, set the name if you want to upstream system known.
+    clusterName: "default"
+  zookeeper:
+    period : 60 # Unit seconds, sync period. Default fetch every 60 seconds.
+    nameSpace: /default
+    hostPort: localhost:2181
+    # Retry Policy
+    baseSleepTimeMs: 1000 # initial amount of time to wait between retries
+    maxRetries: 3 # max number of times to retry
+  etcd:
+    period : 60 # Unit seconds, sync period. Default fetch every 60 seconds.
+    group :  'skywalking'
+    serverAddr: localhost:2379
+    clusterName: "default"
+  consul:
+    # Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500
+    hostAndPorts: ${consul.address}
+    # Sync period in seconds. Defaults to 60 seconds.
+    period: 1
+    # Consul aclToken
+    #aclToken: ${consul.aclToken}
 
-#exporter:
-#  grpc:
-#    targetHost: ${SW_EXPORTER_GRPC_HOST:127.0.0.1}
-#    targetPort: ${SW_EXPORTER_GRPC_PORT:9870}
+exporter:
+  selector: ${SW_EXPORTER:-}
+  grpc:
+    targetHost: ${SW_EXPORTER_GRPC_HOST:127.0.0.1}
+    targetPort: ${SW_EXPORTER_GRPC_PORT:9870}
diff --git a/docker/oap-es7/docker-entrypoint.sh b/docker/oap-es7/docker-entrypoint.sh
index 18619d3..ae6a44b 100755
--- a/docker/oap-es7/docker-entrypoint.sh
+++ b/docker/oap-es7/docker-entrypoint.sh
@@ -18,478 +18,18 @@
 
 set -e
 
-var_application_file="config/application.yml"
-
-generateClusterStandalone() {
-    echo "cluster:" >> ${var_application_file}
-    echo "  standalone:" >> ${var_application_file}
-}
-
-generateClusterZookeeper() {
-    cat <<EOT >> ${var_application_file}
-cluster:
-  zookeeper:
-    nameSpace: \${SW_NAMESPACE:""}
-    hostPort: \${SW_CLUSTER_ZK_HOST_PORT:zookeeper:2181}
-    #Retry Policy
-    baseSleepTimeMs: \${SW_CLUSTER_ZK_SLEEP_TIME:1000} # initial amount of time to wait between retries
-    maxRetries: \${SW_CLUSTER_ZK_MAX_RETRIES:3} # max number of times to retry
-    # Enable ACL
-    enableACL: \${SW_ZK_ENABLE_ACL:false} # disable ACL in default
-    schema: \${SW_ZK_SCHEMA:digest} # only support digest schema
-    expression: \${SW_ZK_EXPRESSION:skywalking:skywalking}
-EOT
-}
-
-generateClusterK8s() {
-    cat <<EOT >> ${var_application_file}
-cluster:
-  kubernetes:
-    watchTimeoutSeconds: \${SW_CLUSTER_K8S_WATCH_TIMEOUT:60}
-    namespace: \${SW_CLUSTER_K8S_NAMESPACE:default}
-    labelSelector: \${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}
-    uidEnvName: \${SW_CLUSTER_K8S_UID:SKYWALKING_COLLECTOR_UID}
-EOT
-}
-
-generateClusterConsul() {
-     cat <<EOT >> ${var_application_file}
-cluster:
-  consul:
-    serviceName: \${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
-    # Consul cluster nodes, example: 10.0.0.1:8500,10.0.0.2:8500,10.0.0.3:8500
-    hostPort: \${SW_CLUSTER_CONSUL_HOST_PORT:consul:8500}
-EOT
-}
-
-generateClusterEtcd() {
-    cat <<EOT >> ${var_application_file}
-cluster:
-  etcd:
-    serviceName: \${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
-    # Etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379
-    hostPort: \${SW_CLUSTER_ETCD_HOST_PORT:etcd:2379}
-EOT
-}
-
-generateClusterNacos() {
-    cat <<EOT >> ${var_application_file}
-cluster:
-  nacos:
-    serviceName: \${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
-    namespace: \${SW_CLUSTER_NACOS_NAMESPACE:""}
-    hostPort: \${SW_CLUSTER_NACOS_HOST_PORT:nacos:8848}
-EOT
-}
+echo "[Entrypoint] Apache SkyWalking Docker Image"
 
-generateStorageElastisearch() {
-if [[ "$SW_RECEIVER_ZIPKIN_ENABLED" = "true" ]]; then
-    cat <<EOT >> ${var_application_file}
-storage:
-  zipkin-elasticsearch:
-EOT
-elif [[ "$SW_RECEIVER_JAEGER_ENABLED" = "true" ]]; then
-    cat <<EOT >> ${var_application_file}
-storage:
-  jaeger-elasticsearch:
-EOT
-else
-    cat <<EOT >> ${var_application_file}
-storage:
-  elasticsearch7:
-EOT
+if [[ "$SW_TELEMETRY" = "so11y" ]]; then
+    export SW_RECEIVER_SO11Y=default
+    echo "Set SW_RECEIVER_SO11Y to ${SW_RECEIVER_SO11Y}"
 fi
-cat <<EOT >> ${var_application_file}
-    nameSpace: \${SW_NAMESPACE:""}
-    clusterNodes: \${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}
-    protocol: \${SW_STORAGE_ES_HTTP_PROTOCOL:"http"}
-    user: \${SW_ES_USER:""}
-    password: \${SW_ES_PASSWORD:""}
-    indexShardsNumber: \${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:2}
-    indexReplicasNumber: \${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:0}
-    # Those data TTL settings will override the same settings in core module.
-    recordDataTTL: \${SW_STORAGE_ES_RECORD_DATA_TTL:7} # Unit is day
-    otherMetricsDataTTL: \${SW_STORAGE_ES_OTHER_METRIC_DATA_TTL:45} # Unit is day
-    monthMetricsDataTTL: \${SW_STORAGE_ES_MONTH_METRIC_DATA_TTL:18} # Unit is month
-    # Batch process setting, refer to https://www.elastic.co/guide/en/elasticsearch/client/java-api/5.5/java-docs-bulk-processor.html
-    bulkActions: \${SW_STORAGE_ES_BULK_ACTIONS:2000} # Execute the bulk every 2000 requests
-    bulkSize: \${SW_STORAGE_ES_BULK_SIZE:20} # flush the bulk every 20mb
-    flushInterval: \${SW_STORAGE_ES_FLUSH_INTERVAL:10} # flush the bulk every 10 seconds whatever the number of requests
-    concurrentRequests: \${SW_STORAGE_ES_CONCURRENT_REQUESTS:2} # the number of concurrent requests
-    resultWindowMaxSize: \${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}
-    metadataQueryMaxSize: \${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}
-    segmentQueryMaxSize: \${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}
-EOT
-}
-
-generateStorageH2() {
-    cat <<EOT >> ${var_application_file}
-storage:
-  h2:
-    driver: \${SW_STORAGE_H2_DRIVER:org.h2.jdbcx.JdbcDataSource}
-    url: \${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db}
-    user: \${SW_STORAGE_H2_USER:sa}
-    metadataQueryMaxSize: \${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}
-EOT
-}
-
-generateStorageMySQL() {
-    cat <<EOT >> ${var_application_file}
-storage:
-  mysql:
-    properties:
-        jdbcUrl: ${SW_JDBC_URL:"jdbc:mysql://localhost:3306/swtest"}
-        dataSource.user: ${SW_DATA_SOURCE_USER:root}
-        dataSource.password: ${SW_DATA_SOURCE_PASSWORD:root@1234}
-        dataSource.cachePrepStmts: ${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}
-        dataSource.prepStmtCacheSize: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}
-        dataSource.prepStmtCacheSqlLimit: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}
-        dataSource.useServerPrepStmts: ${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}
-    metadataQueryMaxSize: \${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}
-EOT
-}
-
-generateStorageInfluxDB() {
-    cat <<EOT >> ${var_application_file}
-storage:
-  influx:
-    # Metadata storage provider configuration
-    metabaseType: \${SW_STORAGE_METABASE_TYPE:H2} # There are 2 options as Metabase provider, H2 or MySQL.
-    h2Props:
-      dataSourceClassName: \${SW_STORAGE_METABASE_DRIVER:org.h2.jdbcx.JdbcDataSource}
-      dataSource.url: \${SW_STORAGE_METABASE_URL:jdbc:h2:mem:skywalking-oap-db}
-      dataSource.user: \${SW_STORAGE_METABASE_USER:sa}
-      dataSource.password: \${SW_STORAGE_METABASE_PASSWORD:}
-    mysqlProps:
-      jdbcUrl: \${SW_STORAGE_METABASE_URL:"jdbc:mysql://localhost:3306/swtest"}
-      dataSource.user: \${SW_STORAGE_METABASE_USER:root}
-      dataSource.password: \${SW_STORAGE_METABASE_PASSWORD:root@1234}
-      dataSource.cachePrepStmts: \${SW_STORAGE_METABASE_CACHE_PREP_STMTS:true}
-      dataSource.prepStmtCacheSize: \${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_SIZE:250}
-      dataSource.prepStmtCacheSqlLimit: \${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_LIMIT:2048}
-      dataSource.useServerPrepStmts: \${SW_STORAGE_METABASE_USE_SERVER_PREP_STMTS:true}
-    metadataQueryMaxSize: \${SW_STORAGE_METABASE_QUERY_MAX_SIZE:5000}
-    # InfluxDB configuration
-    url: \${SW_STORAGE_INFLUXDB_URL:http://localhost:8086}
-    user: \${SW_STORAGE_INFLUXDB_USER:root}
-    password: \${SW_STORAGE_INFLUXDB_PASSWORD:}
-    database: \${SW_STORAGE_INFLUXDB_DATABASE:skywalking}
-    actions: \${SW_STORAGE_INFLUXDB_ACTIONS:1000} # the number of actions to collect
-    duration: \${SW_STORAGE_INFLUXDB_DURATION:1000} # the time to wait at most (milliseconds)
-    fetchTaskLogMaxSize: \${SW_STORAGE_INFLUXDB_FETCH_TASK_LOG_MAX_SIZE:5000} # the max number of fetch task log in a request
-EOT
-}
-
-generateConfigurationNone() {
-    cat <<EOT >> ${var_application_file}
-configuration:
-  none:
-EOT
-}
-
-generateConfigurationApollo() {
-    cat <<EOT >> ${var_application_file}
-configuration:
-  apollo:
-    apolloMeta: \${SW_CONFIGURATION_APOLLO_META:http://apollo:8080}
-    apolloCluster: \${SW_CONFIGURATION_APOLLO_CLUSTER:default}
-    apolloEnv: \${SW_CONFIGURATION_APOLLO_ENV:""}
-    appId: \${SW_CONFIGURATION_APOLLO_APP_ID:skywalking}
-    period: \${SW_CONFIGURATION_APOLLO_PERIOD:5}
-EOT
-}
-
-generateConfigurationNacos() {
-    cat <<EOT >> ${var_application_file}
-configuration:
-  nacos:
-    # Nacos Server Host
-    serverAddr: \${SW_CONFIGURATION_NACOS_SERVER_ADDR:nacos}
-    # Nacos Server Port
-    port: \${SW_CONFIGURATION_NACOS_PORT:8848}
-    # Nacos Configuration Group
-    group: \${SW_CONFIGURATION_NACOS_GROUP:skywalking}
-    # Nacos Configuration namespace
-    namespace: \${SW_CONFIGURATION_NACOS_NAMESPACE:""}
-    # Unit seconds, sync period. Default fetch every 60 seconds.
-    period : \${SW_CONFIGURATION_NACOS_PERIOD:5}
-    # the name of current cluster, set the name if you want to upstream system known.
-    clusterName: \${SW_CONFIGURATION_NACOS_CLUSTER_NAME:default}
-EOT
-}
-
-generateConfigurationZookeeper() {
-    cat <<EOT >> ${var_application_file}
-configuration:
-  zookeeper:
-    period: \${SW_CONFIGURATION_ZOOKEEPER_PERIOD:60} # Unit seconds, sync period. Default fetch every 60 seconds.
-    nameSpace: \${SW_CONFIGURATION_ZOOKEEPER_NAMESPACE:/default}
-    hostPort: \${SW_CONFIGURATION_ZOOKEEPER_HOST_PATH:localhost:2181}
-    #Retry Policy
-    baseSleepTimeMs: \${SW_CONFIGURATION_ZOOKEEPER_BASE_SLEEP_TIME_MS:1000} # initial amount of time to wait between retries
-    maxRetries: \${SW_CONFIGURATION_ZOOKEEPER_MAX_RETRIES:3}3 # max number of times to retry
-EOT
-}
-
-generateConfigurationGRPC() {
-    cat <<EOT >> ${var_application_file}
-configuration:
-  grpc:
-    host: \${SW_CONFIGURATION_GRPC_HOST:127.0.0.1}
-    port: \${SW_CONFIGURATION_GRPC_PORT:9555}
-    period: \${SW_CONFIGURATION_GRPC_PERIOD:60}
-    clusterName: \${SW_CONFIGURATION_GRPC_CLUSTER_NAME:"default"}
-EOT
-}
-
-generateConfigurationConsul() {
-    cat <<EOT >> ${var_application_file}
-configuration:
-  consul:
-    # Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500
-    hostAndPorts: \${SW_CONFIGURATION_CONSUL_ADDRESS:127.0.0.1:8500}
-    # Sync period in seconds. Defaults to 60 seconds.
-    period: \${SW_CONFIGURATION_CONSUL_PERIOD:60}
-EOT
-}
-
-generateTelemetryNone() {
-    cat <<EOT >> ${var_application_file}
-telemetry:
-  none:
-EOT
-}
-
-generateTelemetryPrometheus() {
-    cat <<EOT >> ${var_application_file}
-telemetry:
-  prometheus:
-    host: \${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}
-    port: \${SW_TELEMETRY_PROMETHEUS_PORT:1234}
-EOT
-}
-
-generateTelemetrySo11y() {
-    cat <<EOT >> ${var_application_file}
-telemetry:
-  so11y:
-    prometheusExporterEnabled: \${SW_TELEMETRY_SO11Y_PROMETHEUS_ENABLED:true}
-    prometheusExporterHost: \${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}
-    prometheusExporterPort: \${SW_TELEMETRY_PROMETHEUS_PORT:1234}
-EOT
-}
-
-validateVariables() {
-    name=$1; value=$2; list=$3
-    valid=false
-    for c in ${list} ; do
-        if [[ "$c" = "$value" ]]; then
-            valid=true
-        fi
-    done
 
-    if ! ${valid}; then
-        echo "Error: $name=$value please specify $name = $list"
-        exit 1
-    fi
-}
-
-generateApplicationYaml() {
-    # validate
-    [[ -z "$SW_CLUSTER" ]] && [[ -z "$SW_STORAGE" ]] && [[ -z "$SW_CONFIGURATION" ]] \
-        && [[ -z "$SW_TELEMETRY" ]] \
-        && { echo "Error: please specify \"SW_CLUSTER\" \"SW_STORAGE\" \"SW_CONFIGURATION\" \"SW_TELEMETRY\""; exit 1; }
-
-    validateVariables "SW_CLUSTER" "$SW_CLUSTER" "standalone zookeeper kubernetes consul etcd nacos"
-
-    validateVariables "SW_STORAGE" "$SW_STORAGE" "elasticsearch h2 mysql influxdb"
-
-    validateVariables "SW_CONFIGURATION" "$SW_CONFIGURATION" "none apollo nacos zookeeper"
-
-    validateVariables "SW_TELEMETRY" "$SW_TELEMETRY" "none prometheus so11y"
-
-    echo "# Generated by 'docker-entrypoint.sh'" > ${var_application_file}
-    #generate cluster
-    case ${SW_CLUSTER} in
-    standalone) generateClusterStandalone;;
-    zookeeper) generateClusterZookeeper;;
-    kubernetes) generateClusterK8s;;
-    consul) generateClusterConsul;;
-    etcd) generateClusterEtcd;;
-    nacos) generateClusterNacos;;
-    esac
-
-    #generate core
-    cat <<EOT >> ${var_application_file}
-core:
-  default:
-    # Mixed: Receive agent data, Level 1 aggregate, Level 2 aggregate
-    # Receiver: Receive agent data, Level 1 aggregate
-    # Aggregator: Level 2 aggregate
-    role: \${SW_CORE_ROLE:Mixed} # Mixed/Receiver/Aggregator
-    restHost: \${SW_CORE_REST_HOST:0.0.0.0}
-    restPort: \${SW_CORE_REST_PORT:12800}
-    restContextPath: \${SW_CORE_REST_CONTEXT_PATH:/}
-    gRPCHost: \${SW_CORE_GRPC_HOST:0.0.0.0}
-    gRPCPort: \${SW_CORE_GRPC_PORT:11800}
-    gRPCSslEnabled: \${SW_CORE_GRPC_SSL_ENABLED:false}
-    gRPCSslKeyPath: \${SW_CORE_GRPC_SSL_KEY_PATH:""}
-    gRPCSslCertChainPath: \${SW_CORE_GRPC_SSL_CERT_CHAIN_PATH:""}
-    gRPCSslTrustedCAPath: \${SW_CORE_GRPC_SSL_TRUSTED_CA_PATH:""}
-    downsampling:
-    - Hour
-    - Day
-    - Month
-    # Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.
-    enableDataKeeperExecutor: \${SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR:true} # Turn it off then automatically metrics data delete will be close.
-    dataKeeperExecutePeriod: \${SW_CORE_DATA_KEEPER_EXECUTE_PERIOD:5} # How often the data keeper executor runs periodically, unit is minute
-    recordDataTTL: \${SW_CORE_RECORD_DATA_TTL:90} # Unit is minute
-    minuteMetricsDataTTL: \${SW_CORE_MINUTE_METRIC_DATA_TTL:90} # Unit is minute
-    hourMetricsDataTTL: \${SW_CORE_HOUR_METRIC_DATA_TTL:36} # Unit is hour
-    dayMetricsDataTTL: \${SW_CORE_DAY_METRIC_DATA_TTL:45} # Unit is day
-    monthMetricsDataTTL: \${SW_CORE_MONTH_METRIC_DATA_TTL:18} # Unit is month
-    # Cache metric data for 1 minute to reduce database queries, and if the OAP cluster changes within that minute,
-    # the metrics may not be accurate within that minute.
-    enableDatabaseSession: \${SW_CORE_ENABLE_DATABASE_SESSION:true}
-    topNReportPeriod: \${SW_CORE_TOPN_REPORT_PERIOD:10}
-EOT
-
-    # generate storage
-    case ${SW_STORAGE} in
-    elasticsearch) generateStorageElastisearch;;
-    h2) generateStorageH2;;
-    mysql) generateStorageMySQL;;
-    influxdb) generateStorageInfluxDB;;
-    esac
-
-    cat <<EOT >> ${var_application_file}
-receiver-sharing-server:
-  default:
-   restHost: \${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}
-   restPort: \${SW_RECEIVER_SHARING_REST_PORT:0}
-   restContextPath: \${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}
-   gRPCHost: \${SW_RECEIVER_SHARING_GRPC_HOST:0.0.0.0}
-   gRPCPort: \${SW_RECEIVER_SHARING_GRPC_PORT:0}
-   maxConcurrentCallsPerConnection: \${SW_RECEIVER_SHARING_MAX_CONCURRENT_CALL:0}
-   maxMessageSize: \${SW_RECEIVER_SHARING_MAX_MESSAGE_SIZE:0}
-   gRPCThreadPoolSize: \${SW_RECEIVER_SHARING_GRPC_THREAD_POOL_SIZE:0}
-   gRPCThreadPoolQueueSize: \${SW_RECEIVER_SHARING_GRPC_THREAD_POOL_QUEUE_SIZE:0}
-   authentication: \${SW_AUTHENTICATION:""}
-   gRPCSslEnabled: \${SW_RECEIVER_SHARING_GRPC_SSL_ENABLED:false}
-   gRPCSslKeyPath: \${SW_RECEIVER_SHARING_GRPC_SSL_KEY_PATH:""}
-   gRPCSslCertChainPath: \${SW_RECEIVER_SHARING_GRPC_SSL_CERT_CHAIN_PATH:""}
-receiver-register:
-  default:
-receiver-trace:
-  default:
-    bufferPath: \${SW_RECEIVER_BUFFER_PATH:../trace-buffer/}  # Path to trace buffer files, suggest to use absolute path
-    bufferOffsetMaxFileSize: \${SW_RECEIVER_BUFFER_OFFSET_MAX_FILE_SIZE:100} # Unit is MB
-    bufferDataMaxFileSize: \${SW_RECEIVER_BUFFER_DATA_MAX_FILE_SIZE:500} # Unit is MB
-    bufferFileCleanWhenRestart: \${SW_RECEIVER_BUFFER_FILE_CLEAN_WHEN_RESTART:false}
-    sampleRate: \${SW_TRACE_SAMPLE_RATE:10000} # The sample rate precision is 1/10000. 10000 means 100% sample in default.
-    slowDBAccessThreshold: \${SW_SLOW_DB_THRESHOLD:default:200,mongodb:100} # The slow database access thresholds. Unit ms.
-receiver-jvm:
-  default:
-receiver-clr:
-  default:
-service-mesh:
-  default:
-    bufferPath: \${SW_SERVICE_MESH_BUFFER_PATH:../mesh-buffer/}  # Path to trace buffer files, suggest to use absolute path
-    bufferOffsetMaxFileSize: \${SW_SERVICE_MESH_OFFSET_MAX_FILE_SIZE:100} # Unit is MB
-    bufferDataMaxFileSize: \${SW_SERVICE_MESH_BUFFER_DATA_MAX_FILE_SIZE:500} # Unit is MB
-    bufferFileCleanWhenRestart: \${SW_SERVICE_MESH_BUFFER_FILE_CLEAN_WHEN_RESTART:false}
-istio-telemetry:
-  default:
-query:
-  graphql:
-    path: \${SW_QUERY_GRAPHQL_PATH:/graphql}
-alarm:
-  default:
-EOT
-    # generate telemetry
-    case ${SW_TELEMETRY} in
-    none) generateTelemetryNone;;
-    prometheus) generateTelemetryPrometheus;;
-    so11y) generateTelemetrySo11y;;
-    esac
-
-    # generate configuration
-    case ${SW_CONFIGURATION} in
-    none) generateConfigurationNone;;
-    apollo) generateConfigurationApollo;;
-    nacos) generateConfigurationNacos;;
-    zookeeper) generateConfigurationZookeeper;;
-    consul) generateConfigurationConsul;;
-    grpc) generateConfigurationGRPC;;
-    esac
-
-    cat <<EOT >> ${var_application_file}
-envoy-metric:
-  default:
-EOT
-    if [[ "$SW_ENVOY_ALS_ENABLED" = "true" ]]; then
-        cat <<EOT >> ${var_application_file}
-    alsHTTPAnalysis: \${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:k8s-mesh}
-EOT
-    fi
-
-    if [[ "$SW_RECEIVER_ZIPKIN_ENABLED" = "true" ]]; then
-        cat <<EOT >> ${var_application_file}
-receiver_zipkin:
-  default:
-    host: \${SW_RECEIVER_ZIPKIN_HOST:0.0.0.0}
-    port: \${SW_RECEIVER_ZIPKIN_PORT:9411}
-    contextPath: \${SW_RECEIVER_ZIPKIN_CONTEXT_PATH:/}
-EOT
-    fi
-
-    if [[ "$SW_RECEIVER_JAEGER_ENABLED" = "true" ]]; then
-        cat <<EOT >> ${var_application_file}
-receiver_jaeger:
-  default:
-    gRPCHost: \${SW_RECEIVER_JAEGER_HOST:0.0.0.0}
-    gRPCPort: \${SW_RECEIVER_JAEGER_PORT:14250}
-EOT
-    fi
-
-    if [[ "$SW_TELEMETRY" = "so11y" ]]; then
-        cat <<EOT >> ${var_application_file}
-receiver-so11y:
-  default:
-EOT
-    fi
-
-    if [[ "$SW_EXPORTER_ENABLED" = "true" ]]; then
-        cat <<EOT >> ${var_application_file}
-exporter:
-  grpc:
-    targetHost: \${SW_EXPORTER_GRPC_HOST:127.0.0.1}
-    targetPort: \${SW_EXPORTER_GRPC_PORT:9870}
-EOT
-    fi
-}
-
-echo "[Entrypoint] Apache SkyWalking Docker Image"
-
-SW_CLUSTER=${SW_CLUSTER:-standalone}
-SW_STORAGE=${SW_STORAGE:-h2}
-SW_CONFIGURATION=${SW_CONFIGURATION:-none}
-SW_TELEMETRY=${SW_TELEMETRY:-none}
 EXT_LIB_DIR=/skywalking/ext-libs
 EXT_CONFIG_DIR=/skywalking/ext-config
 
-# If user wants to override application.yml, the one generated by docker-entrypoint.sh should be ignored.
-[[ -f ${EXT_CONFIG_DIR}/application.yml ]] && SW_L0AD_CONFIG_FILE_FROM_VOLUME=true
-
 # Override configuration files
 cp -vfR ${EXT_CONFIG_DIR}/ config/
-if [[ -z "$SW_L0AD_CONFIG_FILE_FROM_VOLUME" ]] || [[ "$SW_L0AD_CONFIG_FILE_FROM_VOLUME" != "true" ]]; then
-    generateApplicationYaml
-    echo "Generated application.yml"
-    echo "-------------------------"
-    cat ${var_application_file}
-    echo "-------------------------"
-fi
 
 CLASSPATH="config:$CLASSPATH"
 for i in oap-libs/*.jar
diff --git a/docker/oap/docker-entrypoint.sh b/docker/oap/docker-entrypoint.sh
index a7467c6..ae6a44b 100755
--- a/docker/oap/docker-entrypoint.sh
+++ b/docker/oap/docker-entrypoint.sh
@@ -18,481 +18,18 @@
 
 set -e
 
-var_application_file="config/application.yml"
-
-generateClusterStandalone() {
-    echo "cluster:" >> ${var_application_file}
-    echo "  standalone:" >> ${var_application_file}
-}
-
-generateClusterZookeeper() {
-    cat <<EOT >> ${var_application_file}
-cluster:
-  zookeeper:
-    nameSpace: \${SW_NAMESPACE:""}
-    hostPort: \${SW_CLUSTER_ZK_HOST_PORT:zookeeper:2181}
-    #Retry Policy
-    baseSleepTimeMs: \${SW_CLUSTER_ZK_SLEEP_TIME:1000} # initial amount of time to wait between retries
-    maxRetries: \${SW_CLUSTER_ZK_MAX_RETRIES:3} # max number of times to retry
-    # Enable ACL
-    enableACL: \${SW_ZK_ENABLE_ACL:false} # disable ACL in default
-    schema: \${SW_ZK_SCHEMA:digest} # only support digest schema
-    expression: \${SW_ZK_EXPRESSION:skywalking:skywalking}
-EOT
-}
-
-generateClusterK8s() {
-    cat <<EOT >> ${var_application_file}
-cluster:
-  kubernetes:
-    watchTimeoutSeconds: \${SW_CLUSTER_K8S_WATCH_TIMEOUT:60}
-    namespace: \${SW_CLUSTER_K8S_NAMESPACE:default}
-    labelSelector: \${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}
-    uidEnvName: \${SW_CLUSTER_K8S_UID:SKYWALKING_COLLECTOR_UID}
-EOT
-}
-
-generateClusterConsul() {
-     cat <<EOT >> ${var_application_file}
-cluster:
-  consul:
-    serviceName: \${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
-    # Consul cluster nodes, example: 10.0.0.1:8500,10.0.0.2:8500,10.0.0.3:8500
-    hostPort: \${SW_CLUSTER_CONSUL_HOST_PORT:consul:8500}
-EOT
-}
-
-generateClusterEtcd() {
-    cat <<EOT >> ${var_application_file}
-cluster:
-  etcd:
-    serviceName: \${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
-    # Etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379
-    hostPort: \${SW_CLUSTER_ETCD_HOST_PORT:etcd:2379}
-EOT
-}
-
-generateClusterNacos() {
-    cat <<EOT >> ${var_application_file}
-cluster:
-  nacos:
-    serviceName: \${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
-    namespace: \${SW_CLUSTER_NACOS_NAMESPACE:""}
-    hostPort: \${SW_CLUSTER_NACOS_HOST_PORT:nacos:8848}
-EOT
-}
+echo "[Entrypoint] Apache SkyWalking Docker Image"
 
-generateStorageElastisearch() {
-if [[ "$SW_RECEIVER_ZIPKIN_ENABLED" = "true" ]]; then
-    cat <<EOT >> ${var_application_file}
-storage:
-  zipkin-elasticsearch:
-EOT
-elif [[ "$SW_RECEIVER_JAEGER_ENABLED" = "true" ]]; then
-    cat <<EOT >> ${var_application_file}
-storage:
-  jaeger-elasticsearch:
-EOT
-else
-    cat <<EOT >> ${var_application_file}
-storage:
-  elasticsearch:
-EOT
+if [[ "$SW_TELEMETRY" = "so11y" ]]; then
+    export SW_RECEIVER_SO11Y=default
+    echo "Set SW_RECEIVER_SO11Y to ${SW_RECEIVER_SO11Y}"
 fi
-cat <<EOT >> ${var_application_file}
-    nameSpace: \${SW_NAMESPACE:""}
-    clusterNodes: \${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}
-    protocol: \${SW_STORAGE_ES_HTTP_PROTOCOL:"http"}
-    user: \${SW_ES_USER:""}
-    password: \${SW_ES_PASSWORD:""}
-    indexShardsNumber: \${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:2}
-    indexReplicasNumber: \${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:0}
-    # Those data TTL settings will override the same settings in core module.
-    recordDataTTL: \${SW_STORAGE_ES_RECORD_DATA_TTL:7} # Unit is day
-    otherMetricsDataTTL: \${SW_STORAGE_ES_OTHER_METRIC_DATA_TTL:45} # Unit is day
-    monthMetricsDataTTL: \${SW_STORAGE_ES_MONTH_METRIC_DATA_TTL:18} # Unit is month
-    # Batch process setting, refer to https://www.elastic.co/guide/en/elasticsearch/client/java-api/5.5/java-docs-bulk-processor.html
-    bulkActions: \${SW_STORAGE_ES_BULK_ACTIONS:2000} # Execute the bulk every 2000 requests
-    bulkSize: \${SW_STORAGE_ES_BULK_SIZE:20} # flush the bulk every 20mb
-    flushInterval: \${SW_STORAGE_ES_FLUSH_INTERVAL:10} # flush the bulk every 10 seconds whatever the number of requests
-    concurrentRequests: \${SW_STORAGE_ES_CONCURRENT_REQUESTS:2} # the number of concurrent requests
-    resultWindowMaxSize: \${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}
-    metadataQueryMaxSize: \${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}
-    segmentQueryMaxSize: \${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}
-    profileTaskQueryMaxSize: \${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}
-EOT
-}
-
-generateStorageH2() {
-    cat <<EOT >> ${var_application_file}
-storage:
-  h2:
-    driver: \${SW_STORAGE_H2_DRIVER:org.h2.jdbcx.JdbcDataSource}
-    url: \${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db}
-    user: \${SW_STORAGE_H2_USER:sa}
-    metadataQueryMaxSize: \${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}
-EOT
-}
-
-generateStorageMySQL() {
-    cat <<EOT >> ${var_application_file}
-storage:
-  mysql:
-    properties:
-        jdbcUrl: \${SW_JDBC_URL:"jdbc:mysql://localhost:3306/swtest"}
-        dataSource.user: \${SW_DATA_SOURCE_USER:root}
-        dataSource.password: \${SW_DATA_SOURCE_PASSWORD:root@1234}
-        dataSource.cachePrepStmts: \${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}
-        dataSource.prepStmtCacheSize: \${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}
-        dataSource.prepStmtCacheSqlLimit: \${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}
-        dataSource.useServerPrepStmts: \${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}
-    metadataQueryMaxSize: \${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}
-EOT
-}
-
-generateStorageInfluxDB() {
-    cat <<EOT >> ${var_application_file}
-storage:
-  influx:
-    # Metadata storage provider configuration
-    metabaseType: \${SW_STORAGE_METABASE_TYPE:H2} # There are 2 options as Metabase provider, H2 or MySQL.
-    h2Props:
-      dataSourceClassName: \${SW_STORAGE_METABASE_DRIVER:org.h2.jdbcx.JdbcDataSource}
-      dataSource.url: \${SW_STORAGE_METABASE_URL:jdbc:h2:mem:skywalking-oap-db}
-      dataSource.user: \${SW_STORAGE_METABASE_USER:sa}
-      dataSource.password: \${SW_STORAGE_METABASE_PASSWORD:}
-    mysqlProps:
-      jdbcUrl: \${SW_STORAGE_METABASE_URL:"jdbc:mysql://localhost:3306/swtest"}
-      dataSource.user: \${SW_STORAGE_METABASE_USER:root}
-      dataSource.password: \${SW_STORAGE_METABASE_PASSWORD:root@1234}
-      dataSource.cachePrepStmts: \${SW_STORAGE_METABASE_CACHE_PREP_STMTS:true}
-      dataSource.prepStmtCacheSize: \${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_SIZE:250}
-      dataSource.prepStmtCacheSqlLimit: \${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_LIMIT:2048}
-      dataSource.useServerPrepStmts: \${SW_STORAGE_METABASE_USE_SERVER_PREP_STMTS:true}
-    metadataQueryMaxSize: \${SW_STORAGE_METABASE_QUERY_MAX_SIZE:5000}
-    # InfluxDB configuration
-    url: \${SW_STORAGE_INFLUXDB_URL:http://localhost:8086}
-    user: \${SW_STORAGE_INFLUXDB_USER:root}
-    password: \${SW_STORAGE_INFLUXDB_PASSWORD:}
-    database: \${SW_STORAGE_INFLUXDB_DATABASE:skywalking}
-    actions: \${SW_STORAGE_INFLUXDB_ACTIONS:1000} # the number of actions to collect
-    duration: \${SW_STORAGE_INFLUXDB_DURATION:1000} # the time to wait at most (milliseconds)
-    fetchTaskLogMaxSize: \${SW_STORAGE_INFLUXDB_FETCH_TASK_LOG_MAX_SIZE:5000} # the max number of fetch task log in a request
-EOT
-}
-
-generateConfigurationNone() {
-    cat <<EOT >> ${var_application_file}
-configuration:
-  none:
-EOT
-}
-
-generateConfigurationApollo() {
-    cat <<EOT >> ${var_application_file}
-configuration:
-  apollo:
-    apolloMeta: \${SW_CONFIGURATION_APOLLO_META:http://apollo:8080}
-    apolloCluster: \${SW_CONFIGURATION_APOLLO_CLUSTER:default}
-    apolloEnv: \${SW_CONFIGURATION_APOLLO_ENV:""}
-    appId: \${SW_CONFIGURATION_APOLLO_APP_ID:skywalking}
-    period: \${SW_CONFIGURATION_APOLLO_PERIOD:5}
-EOT
-}
-
-generateConfigurationNacos() {
-    cat <<EOT >> ${var_application_file}
-configuration:
-  nacos:
-    # Nacos Server Host
-    serverAddr: \${SW_CONFIGURATION_NACOS_SERVER_ADDR:nacos}
-    # Nacos Server Port
-    port: \${SW_CONFIGURATION_NACOS_PORT:8848}
-    # Nacos Configuration Group
-    group: \${SW_CONFIGURATION_NACOS_GROUP:skywalking}
-    # Nacos Configuration namespace
-    namespace: \${SW_CONFIGURATION_NACOS_NAMESPACE:""}
-    # Unit seconds, sync period. Default fetch every 60 seconds.
-    period : \${SW_CONFIGURATION_NACOS_PERIOD:5}
-    # the name of current cluster, set the name if you want to upstream system known.
-    clusterName: \${SW_CONFIGURATION_NACOS_CLUSTER_NAME:default}
-EOT
-}
-
-generateConfigurationZookeeper() {
-    cat <<EOT >> ${var_application_file}
-configuration:
-  zookeeper:
-    period: \${SW_CONFIGURATION_ZOOKEEPER_PERIOD:60} # Unit seconds, sync period. Default fetch every 60 seconds.
-    nameSpace: \${SW_CONFIGURATION_ZOOKEEPER_NAMESPACE:/default}
-    hostPort: \${SW_CONFIGURATION_ZOOKEEPER_HOST_PATH:localhost:2181}
-    #Retry Policy
-    baseSleepTimeMs: \${SW_CONFIGURATION_ZOOKEEPER_BASE_SLEEP_TIME_MS:1000} # initial amount of time to wait between retries
-    maxRetries: \${SW_CONFIGURATION_ZOOKEEPER_MAX_RETRIES:3}3 # max number of times to retry
-EOT
-}
-
-generateConfigurationGRPC() {
-    cat <<EOT >> ${var_application_file}
-configuration:
-  grpc:
-    host: \${SW_CONFIGURATION_GRPC_HOST:127.0.0.1}
-    port: \${SW_CONFIGURATION_GRPC_PORT:9555}
-    period: \${SW_CONFIGURATION_GRPC_PERIOD:60}
-    clusterName: \${SW_CONFIGURATION_GRPC_CLUSTER_NAME:"default"}
-EOT
-}
-
-generateConfigurationConsul() {
-    cat <<EOT >> ${var_application_file}
-configuration:
-  consul:
-    # Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500
-    hostAndPorts: \${SW_CONFIGURATION_CONSUL_ADDRESS:127.0.0.1:8500}
-    # Sync period in seconds. Defaults to 60 seconds.
-    period: \${SW_CONFIGURATION_CONSUL_PERIOD:60}
-EOT
-}
-
-generateTelemetryNone() {
-    cat <<EOT >> ${var_application_file}
-telemetry:
-  none:
-EOT
-}
-
-generateTelemetryPrometheus() {
-    cat <<EOT >> ${var_application_file}
-telemetry:
-  prometheus:
-    host: \${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}
-    port: \${SW_TELEMETRY_PROMETHEUS_PORT:1234}
-EOT
-}
-
-generateTelemetrySo11y() {
-    cat <<EOT >> ${var_application_file}
-telemetry:
-  so11y:
-    prometheusExporterEnabled: \${SW_TELEMETRY_SO11Y_PROMETHEUS_ENABLED:true}
-    prometheusExporterHost: \${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}
-    prometheusExporterPort: \${SW_TELEMETRY_PROMETHEUS_PORT:1234}
-EOT
-}
-
-validateVariables() {
-    name=$1; value=$2; list=$3
-    valid=false
-    for c in ${list} ; do
-        if [[ "$c" = "$value" ]]; then
-            valid=true
-        fi
-    done
 
-    if ! ${valid}; then
-        echo "Error: $name=$value please specify $name = $list"
-        exit 1
-    fi
-}
-
-generateApplicationYaml() {
-    # validate
-    [[ -z "$SW_CLUSTER" ]] && [[ -z "$SW_STORAGE" ]] && [[ -z "$SW_CONFIGURATION" ]] \
-        && [[ -z "$SW_TELEMETRY" ]] \
-        && { echo "Error: please specify \"SW_CLUSTER\" \"SW_STORAGE\" \"SW_CONFIGURATION\" \"SW_TELEMETRY\""; exit 1; }
-
-    validateVariables "SW_CLUSTER" "$SW_CLUSTER" "standalone zookeeper kubernetes consul etcd nacos"
-
-    validateVariables "SW_STORAGE" "$SW_STORAGE" "elasticsearch h2 mysql influxdb"
-
-    validateVariables "SW_CONFIGURATION" "$SW_CONFIGURATION" "none apollo nacos zookeeper"
-
-    validateVariables "SW_TELEMETRY" "$SW_TELEMETRY" "none prometheus so11y"
-
-    echo "# Generated by 'docker-entrypoint.sh'" > ${var_application_file}
-    #generate cluster
-    case ${SW_CLUSTER} in
-    standalone) generateClusterStandalone;;
-    zookeeper) generateClusterZookeeper;;
-    kubernetes) generateClusterK8s;;
-    consul) generateClusterConsul;;
-    etcd) generateClusterEtcd;;
-    nacos) generateClusterNacos;;
-    esac
-
-    #generate core
-    cat <<EOT >> ${var_application_file}
-core:
-  default:
-    # Mixed: Receive agent data, Level 1 aggregate, Level 2 aggregate
-    # Receiver: Receive agent data, Level 1 aggregate
-    # Aggregator: Level 2 aggregate
-    role: \${SW_CORE_ROLE:Mixed} # Mixed/Receiver/Aggregator
-    restHost: \${SW_CORE_REST_HOST:0.0.0.0}
-    restPort: \${SW_CORE_REST_PORT:12800}
-    restContextPath: \${SW_CORE_REST_CONTEXT_PATH:/}
-    gRPCHost: \${SW_CORE_GRPC_HOST:0.0.0.0}
-    gRPCPort: \${SW_CORE_GRPC_PORT:11800}
-    gRPCSslEnabled: \${SW_CORE_GRPC_SSL_ENABLED:false}
-    gRPCSslKeyPath: \${SW_CORE_GRPC_SSL_KEY_PATH:""}
-    gRPCSslCertChainPath: \${SW_CORE_GRPC_SSL_CERT_CHAIN_PATH:""}
-    gRPCSslTrustedCAPath: \${SW_CORE_GRPC_SSL_TRUSTED_CA_PATH:""}
-    downsampling:
-    - Hour
-    - Day
-    - Month
-    # Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.
-    enableDataKeeperExecutor: \${SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR:true} # Turn it off then automatically metrics data delete will be close.
-    dataKeeperExecutePeriod: \${SW_CORE_DATA_KEEPER_EXECUTE_PERIOD:5} # How often the data keeper executor runs periodically, unit is minute
-    recordDataTTL: \${SW_CORE_RECORD_DATA_TTL:90} # Unit is minute
-    minuteMetricsDataTTL: \${SW_CORE_MINUTE_METRIC_DATA_TTL:90} # Unit is minute
-    hourMetricsDataTTL: \${SW_CORE_HOUR_METRIC_DATA_TTL:36} # Unit is hour
-    dayMetricsDataTTL: \${SW_CORE_DAY_METRIC_DATA_TTL:45} # Unit is day
-    monthMetricsDataTTL: \${SW_CORE_MONTH_METRIC_DATA_TTL:18} # Unit is month
-    # Cache metric data for 1 minute to reduce database queries, and if the OAP cluster changes within that minute,
-    # the metrics may not be accurate within that minute.
-    enableDatabaseSession: \${SW_CORE_ENABLE_DATABASE_SESSION:true}
-    topNReportPeriod: \${SW_CORE_TOPN_REPORT_PERIOD:10}
-EOT
-
-    # generate storage
-    case ${SW_STORAGE} in
-    elasticsearch) generateStorageElastisearch;;
-    h2) generateStorageH2;;
-    mysql) generateStorageMySQL;;
-    influxdb) generateStorageInfluxDB;;
-    esac
-
-    cat <<EOT >> ${var_application_file}
-receiver-sharing-server:
-  default:
-   restHost: \${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}
-   restPort: \${SW_RECEIVER_SHARING_REST_PORT:0}
-   restContextPath: \${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}
-   gRPCHost: \${SW_RECEIVER_SHARING_GRPC_HOST:0.0.0.0}
-   gRPCPort: \${SW_RECEIVER_SHARING_GRPC_PORT:0}
-   maxConcurrentCallsPerConnection: \${SW_RECEIVER_SHARING_MAX_CONCURRENT_CALL:0}
-   maxMessageSize: \${SW_RECEIVER_SHARING_MAX_MESSAGE_SIZE:0}
-   gRPCThreadPoolSize: \${SW_RECEIVER_SHARING_GRPC_THREAD_POOL_SIZE:0}
-   gRPCThreadPoolQueueSize: \${SW_RECEIVER_SHARING_GRPC_THREAD_POOL_QUEUE_SIZE:0}
-   authentication: \${SW_AUTHENTICATION:""}
-   gRPCSslEnabled: \${SW_RECEIVER_SHARING_GRPC_SSL_ENABLED:false}
-   gRPCSslKeyPath: \${SW_RECEIVER_SHARING_GRPC_SSL_KEY_PATH:""}
-   gRPCSslCertChainPath: \${SW_RECEIVER_SHARING_GRPC_SSL_CERT_CHAIN_PATH:""}
-receiver-register:
-  default:
-receiver-trace:
-  default:
-    bufferPath: \${SW_RECEIVER_BUFFER_PATH:../trace-buffer/}  # Path to trace buffer files, suggest to use absolute path
-    bufferOffsetMaxFileSize: \${SW_RECEIVER_BUFFER_OFFSET_MAX_FILE_SIZE:100} # Unit is MB
-    bufferDataMaxFileSize: \${SW_RECEIVER_BUFFER_DATA_MAX_FILE_SIZE:500} # Unit is MB
-    bufferFileCleanWhenRestart: \${SW_RECEIVER_BUFFER_FILE_CLEAN_WHEN_RESTART:false}
-    sampleRate: \${SW_TRACE_SAMPLE_RATE:10000} # The sample rate precision is 1/10000. 10000 means 100% sample in default.
-    slowDBAccessThreshold: \${SW_SLOW_DB_THRESHOLD:default:200,mongodb:100} # The slow database access thresholds. Unit ms.
-receiver-jvm:
-  default:
-receiver-clr:
-  default:
-receiver-profile:
-  default:
-service-mesh:
-  default:
-    bufferPath: \${SW_SERVICE_MESH_BUFFER_PATH:../mesh-buffer/}  # Path to trace buffer files, suggest to use absolute path
-    bufferOffsetMaxFileSize: \${SW_SERVICE_MESH_OFFSET_MAX_FILE_SIZE:100} # Unit is MB
-    bufferDataMaxFileSize: \${SW_SERVICE_MESH_BUFFER_DATA_MAX_FILE_SIZE:500} # Unit is MB
-    bufferFileCleanWhenRestart: \${SW_SERVICE_MESH_BUFFER_FILE_CLEAN_WHEN_RESTART:false}
-istio-telemetry:
-  default:
-query:
-  graphql:
-    path: \${SW_QUERY_GRAPHQL_PATH:/graphql}
-alarm:
-  default:
-EOT
-    # generate telemetry
-    case ${SW_TELEMETRY} in
-    none) generateTelemetryNone;;
-    prometheus) generateTelemetryPrometheus;;
-    so11y) generateTelemetrySo11y;;
-    esac
-
-    # generate configuration
-    case ${SW_CONFIGURATION} in
-    none) generateConfigurationNone;;
-    apollo) generateConfigurationApollo;;
-    nacos) generateConfigurationNacos;;
-    zookeeper) generateConfigurationZookeeper;;
-    consul) generateConfigurationConsul;;
-    grpc) generateConfigurationGRPC;;
-    esac
-
-    cat <<EOT >> ${var_application_file}
-envoy-metric:
-  default:
-EOT
-    if [[ "$SW_ENVOY_ALS_ENABLED" = "true" ]]; then
-        cat <<EOT >> ${var_application_file}
-    alsHTTPAnalysis: \${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:k8s-mesh}
-EOT
-    fi
-
-    if [[ "$SW_RECEIVER_ZIPKIN_ENABLED" = "true" ]]; then
-        cat <<EOT >> ${var_application_file}
-receiver_zipkin:
-  default:
-    host: \${SW_RECEIVER_ZIPKIN_HOST:0.0.0.0}
-    port: \${SW_RECEIVER_ZIPKIN_PORT:9411}
-    contextPath: \${SW_RECEIVER_ZIPKIN_CONTEXT_PATH:/}
-EOT
-    fi
-
-    if [[ "$SW_RECEIVER_JAEGER_ENABLED" = "true" ]]; then
-        cat <<EOT >> ${var_application_file}
-receiver_jaeger:
-  default:
-    gRPCHost: \${SW_RECEIVER_JAEGER_HOST:0.0.0.0}
-    gRPCPort: \${SW_RECEIVER_JAEGER_PORT:14250}
-EOT
-    fi
-
-    if [[ "$SW_TELEMETRY" = "so11y" ]]; then
-        cat <<EOT >> ${var_application_file}
-receiver-so11y:
-  default:
-EOT
-    fi
-
-    if [[ "$SW_EXPORTER_ENABLED" = "true" ]]; then
-        cat <<EOT >> ${var_application_file}
-exporter:
-  grpc:
-    targetHost: \${SW_EXPORTER_GRPC_HOST:127.0.0.1}
-    targetPort: \${SW_EXPORTER_GRPC_PORT:9870}
-EOT
-    fi
-}
-
-echo "[Entrypoint] Apache SkyWalking Docker Image"
-
-SW_CLUSTER=${SW_CLUSTER:-standalone}
-SW_STORAGE=${SW_STORAGE:-h2}
-SW_CONFIGURATION=${SW_CONFIGURATION:-none}
-SW_TELEMETRY=${SW_TELEMETRY:-none}
 EXT_LIB_DIR=/skywalking/ext-libs
 EXT_CONFIG_DIR=/skywalking/ext-config
 
-# If user wants to override application.yml, the one generated by docker-entrypoint.sh should be ignored.
-[[ -f ${EXT_CONFIG_DIR}/application.yml ]] && SW_L0AD_CONFIG_FILE_FROM_VOLUME=true
-
 # Override configuration files
 cp -vfR ${EXT_CONFIG_DIR}/ config/
-if [[ -z "$SW_L0AD_CONFIG_FILE_FROM_VOLUME" ]] || [[ "$SW_L0AD_CONFIG_FILE_FROM_VOLUME" != "true" ]]; then
-    generateApplicationYaml
-    echo "Generated application.yml"
-    echo "-------------------------"
-    cat ${var_application_file}
-    echo "-------------------------"
-fi
 
 CLASSPATH="config:$CLASSPATH"
 for i in oap-libs/*.jar
diff --git a/docs/en/setup/backend/backend-cluster.md b/docs/en/setup/backend/backend-cluster.md
index b184edf..8c21250 100644
--- a/docs/en/setup/backend/backend-cluster.md
+++ b/docs/en/setup/backend/backend-cluster.md
@@ -1,5 +1,5 @@
 # Cluster Management
-In many product environments, backend need to support high throughputs and provide HA to keep robustness,
+In many product environments, backend needs to support high throughput and provides HA to keep robustness,
 so you should need cluster management always in product env.
  
 Backend provides several ways to do cluster management. Choose the one you need/want.
@@ -12,24 +12,18 @@ by using k8s native APIs to manage cluster.
 - [Nacos](#nacos). Use Nacos to coordinate backend instances.
 - [Etcd](#etcd). Use Etcd to coordinate backend instances.
 
+In the `application.yml`, there're default configurations for the aforementioned coordinators under the section `cluster`,
+you can specify one of them in the `selector` property to enable it.
+
 ## Zookeeper coordinator
-Zookeeper is a very common and wide used cluster coordinator. Set the **cluster** module's implementor
-to **zookeeper** in the yml to active. 
+Zookeeper is a very common and wide used cluster coordinator. Set the **cluster/selector** to **zookeeper** in the yml to enable.
 
 Required Zookeeper version, 3.4+
 
 ```yaml
 cluster:
-  zookeeper:
-    nameSpace: ${SW_NAMESPACE:""}
-    hostPort: ${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}
-    # Retry Policy
-    baseSleepTimeMs: 1000 # initial amount of time to wait between retries
-    maxRetries: 3 # max number of times to retry
-    # Enable ACL
-    enableACL: ${SW_ZK_ENABLE_ACL:false} # disable ACL in default
-    schema: ${SW_ZK_SCHEMA:digest} # only support digest schema
-    expression: ${SW_ZK_EXPRESSION:skywalking:skywalking}
+  selector: ${SW_CLUSTER:zookeeper}
+  # other configurations
 ```
 
 - `hostPort` is the list of zookeeper servers. Format is `IP1:PORT1,IP2:PORT2,...,IPn:PORTn`
@@ -65,30 +59,22 @@ zookeeper:
 
 ## Kubernetes
 Require backend cluster are deployed inside kubernetes, guides are in [Deploy in kubernetes](backend-k8s.md).
-Set implementor to `kubernetes`.
+Set the selector to `kubernetes`.
 
 ```yaml
 cluster:
-  kubernetes:
-    watchTimeoutSeconds: 60
-    namespace: default
-    labelSelector: app=collector,release=skywalking
-    uidEnvName: SKYWALKING_COLLECTOR_UID
+  selector: ${SW_CLUSTER:kubernetes}
+  # other configurations
 ```
 
 ## Consul
 Now, consul is becoming a famous system, many of companies and developers using consul to be 
-their service discovery solution. Set the **cluster** module's implementor to **consul** in 
-the yml to active. 
+their service discovery solution. Set the **cluster/selector** to **nacos** in the yml to enable.
 
 ```yaml
 cluster:
-  consul:
-    serviceName: ${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
-    # Consul cluster agents, example, 1. client agent, 127.0.0.1:8500 2. server agent, 10.0.0.1:8500,10.0.0.2:8500,10.0.0.3:8500
-    hostPort: ${SW_CLUSTER_CONSUL_HOST_PORT:localhost:8500}
-    # aclToken of connection consul (optional)
-    aclToken: ${SW_CLUSTER_CONSUL_ACLTOKEN}
+  selector: ${SW_CLUSTER:consul}
+  # other configurations
 ```
 
 Same as Zookeeper coordinator,
@@ -99,27 +85,19 @@ The following setting are provided to set the host and port manually, based on y
 
 
 ## Nacos
-Set the **cluster** module's implementor to **nacos** in 
-the yml to active. 
+Set the **cluster/selector** to **nacos** in the yml to enable.
 
 ```yaml
 cluster:
-  nacos:
-    serviceName: ${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
-    # Nacos cluster nodes, example: 10.0.0.1:8848,10.0.0.2:8848,10.0.0.3:8848
-    hostPort: ${SW_CLUSTER_NACOS_HOST_PORT:localhost:8848}
-    # Nacos Configuration namespace
-    namespace: ${SW_CLUSTER_NACOS_NAMESPACE:"public"}
+  selector: ${SW_CLUSTER:nacos}
+  # other configurations
 ```
 
 ## Etcd
-Set the **cluster** module's implementor to **etcd** in
-the yml to active.
+Set the **cluster/selector** to **etcd** in the yml to enable.
 
 ```yaml
 cluster:
-  etcd:
-    serviceName: ${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
-    #etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379
-    hostPort: ${SW_CLUSTER_ETCD_HOST_PORT:localhost:2379}
+  selector: ${SW_CLUSTER:etcd}
+  # other configurations
 ```
diff --git a/docs/en/setup/backend/backend-setup.md b/docs/en/setup/backend/backend-setup.md
index b755de9..a93e54b 100755
--- a/docs/en/setup/backend/backend-setup.md
+++ b/docs/en/setup/backend/backend-setup.md
@@ -14,26 +14,43 @@ End user can switch or assemble the collector features by their own requirements
 
 So, in `application.yml`, there are three levels.
 1. **Level 1**, module name. Meaning this module is active in running mode.
-1. **Level 2**, provider name. Set the provider of the module.
+1. **Level 2**, provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect,
+if there is only one provider listed, the `selector` is optional and can be omitted.
 1. **Level 3**. settings of the provider.
 
 Example:
+
 ```yaml
-core:
-  default:
-    restHost: 0.0.0.0
-    restPort: 12800
-    restContextPath: /
-    gRPCHost: 0.0.0.0
-    gRPCPort: 11800
+storage:
+  selector: mysql # the mysql storage will actually be activated, while the h2 storage takes no effect
+  h2:
+    driver: ${SW_STORAGE_H2_DRIVER:org.h2.jdbcx.JdbcDataSource}
+    url: ${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db}
+    user: ${SW_STORAGE_H2_USER:sa}
+    metadataQueryMaxSize: ${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}
+  mysql:
+    properties:
+      jdbcUrl: ${SW_JDBC_URL:"jdbc:mysql://localhost:3306/swtest"}
+      dataSource.user: ${SW_DATA_SOURCE_USER:root}
+      dataSource.password: ${SW_DATA_SOURCE_PASSWORD:root@1234}
+      dataSource.cachePrepStmts: ${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}
+      dataSource.prepStmtCacheSize: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}
+      dataSource.prepStmtCacheSqlLimit: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}
+      dataSource.useServerPrepStmts: ${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}
+    metadataQueryMaxSize: ${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}
+  # other configurations
 ```
-1. **core** is the module.
-1. **default** is the default implementor of core module.
-1. `restHost`, `restPort`, ... `gRPCHost` are all setting items of the implementor.
+
+1. **`core`** is the module.
+1. **`selector`** selects one out of the all providers listed below, the unselected ones take no effect as if they were deleted.
+1. **`default`** is the default implementor of core module.
+1. `driver`, `url`, ... `metadataQueryMaxSize` are all setting items of the implementor.
 
 At the same time, modules includes required and optional, the required modules provide the skeleton of backend,
-even modularization supported pluggable, remove those modules are meaningless. We highly recommend you don't try to
-change APIs of those modules, unless you understand SkyWalking project and its codes very well.
+even modularization supported pluggable, removing those modules are meaningless, for optional modules, some of them have
+a provider implementation called `none`, meaning it only provides a shell with no actual logic, typically such as telemetry.
+Setting `-` to the `selector` means this whole module will be excluded at runtime.
+We highly recommend you don't try to change APIs of those modules, unless you understand SkyWalking project and its codes very well.
 
 List the required modules here
 1. **Core**. Do basic and major skeleton of all data analysis and stream dispatch.
diff --git a/docs/en/setup/backend/backend-storage.md b/docs/en/setup/backend/backend-storage.md
index 83a6229..169de97 100644
--- a/docs/en/setup/backend/backend-storage.md
+++ b/docs/en/setup/backend/backend-storage.md
@@ -1,6 +1,11 @@
 # Backend storage
-SkyWalking storage is pluggable, we have provided the following storage solutions, you could easily 
-use is by changing the `application.yml`
+SkyWalking storage is pluggable, we have provided the following storage solutions, you could easily
+use one of them by specifying it as the `selector` in the `application.yml`:
+
+```yaml
+storage:
+  selector: ${SW_STORAGE:elasticsearch7}
+```
 
 Native supported storage
 - H2
@@ -21,6 +26,7 @@ you could set the target to H2 in **Embedded**, **Server** and **Mixed** modes.
 Setting fragment example
 ```yaml
 storage:
+  selector: ${SW_STORAGE:h2}
   h2:
     driver: org.h2.jdbcx.JdbcDataSource
     url: jdbc:h2:mem:skywalking-oap-db
@@ -40,6 +46,7 @@ For now, ElasticSearch 6 and ElasticSearch 7 share the same configurations, as f
 
 ```yaml
 storage:
+  selector: ${SW_STORAGE:elasticsearch}
   elasticsearch:
     # nameSpace: ${SW_NAMESPACE:""}
     # user: ${SW_ES_USER:""} # User needs to be set when Http Basic authentication is enabled
@@ -86,6 +93,7 @@ example:
 
 ```yaml
 storage:
+  selector: ${SW_STORAGE:elasticsearch}
   elasticsearch:
     # nameSpace: ${SW_NAMESPACE:""}
     user: ${SW_ES_USER:""} # User needs to be set when Http Basic authentication is enabled
@@ -173,6 +181,7 @@ This implementation shares most of `elasticsearch`, just extend to support zipki
 It has all same configs.
 ```yaml
 storage:
+  selector: ${SW_STORAGE:zipkin-elasticsearch}
   zipkin-elasticsearch:
     nameSpace: ${SW_NAMESPACE:""}
     clusterNodes: ${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}
@@ -197,6 +206,7 @@ This implementation shares most of `elasticsearch`, just extend to support zipki
 It has all same configs.
 ```yaml
 storage:
+  selector: ${SW_STORAGE:jaeger-elasticsearch}
   jaeger-elasticsearch:
     nameSpace: ${SW_NAMESPACE:""}
     clusterNodes: ${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}
@@ -233,6 +243,7 @@ Please download MySQL driver by yourself. Copy the connection driver jar to `oap
 
 ```yaml
 storage:
+  selector: ${SW_STORAGE:mysql}
   mysql:
     properties:
       jdbcUrl: ${SW_JDBC_URL:"jdbc:mysql://localhost:3306/swtest"}
@@ -253,6 +264,7 @@ Active TiDB as storage, set storage provider to **mysql**.
 
 ```yaml
 storage:
+  selector: ${SW_STORAGE:mysql}
   mysql:
     properties:
       jdbcUrl: ${SW_JDBC_URL:"jdbc:mysql://localhost:3306/swtest"}
@@ -272,7 +284,8 @@ InfluxDB as storage since SkyWalking 7.0. It depends on `H2/MySQL` storage-plugi
 
 ```yaml
 storage:
-  influx:
+  selector: ${SW_STORAGE:influxdb}
+  influxdb:
     # Metadata storage provider configuration
     metabaseType: ${SW_STORAGE_METABASE_TYPE:H2} # There are 2 options as Metabase provider, H2 or MySQL.
     h2Props:
diff --git a/docs/en/setup/backend/backend-telemetry.md b/docs/en/setup/backend/backend-telemetry.md
index 2115478..fa31889 100644
--- a/docs/en/setup/backend/backend-telemetry.md
+++ b/docs/en/setup/backend/backend-telemetry.md
@@ -1,23 +1,36 @@
 # Telemetry for backend
-In default, the telemetry is off, like this
+By default, the telemetry is disabled by setting `selector` to `none`, like this
+
 ```yaml
 telemetry:
+  selector: ${SW_TELEMETRY:none}
   none:
+  prometheus:
+    host: ${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}
+    port: ${SW_TELEMETRY_PROMETHEUS_PORT:1234}
+  so11y:
+    prometheusExporterEnabled: ${SW_TELEMETRY_SO11Y_PROMETHEUS_ENABLED:true}
+    prometheusExporterHost: ${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}
+    prometheusExporterPort: ${SW_TELEMETRY_PROMETHEUS_PORT:1234}
 ```
 
+but you can set one of `prometheus` or `so11y` to enable them, for more information, refer to the details below.
+
 ## Prometheus
 Prometheus is supported as telemetry implementor. 
-By using this, prometheus collects metrics from skywalking backend.
+By using this, prometheus collects metrics from SkyWalking backend.
 
 Set `prometheus` to provider. The endpoint open at `http://0.0.0.0:1234/` and `http://0.0.0.0:1234/metrics`.
 ```yaml
 telemetry:
+  selector: ${SW_TELEMETRY:prometheus}
   prometheus:
 ```
 
 Set host and port if needed.
 ```yaml
 telemetry:
+  selector: ${SW_TELEMETRY:prometheus}
   prometheus:
     host: 127.0.0.1
     port: 1543
@@ -38,15 +51,18 @@ Adding following configuration to enable `so11y`(self-observability) related mod
 
 ```yaml
 receiver-so11y:
+  selector: ${SW_RECEIVER_SO11Y:default}
   default:
 telemetry:
-  so11y:
+  selector: ${SW_TELEMETRY:so11y}
+  # ... other configurations
 ```
 
 Another example represents how to combine `promethues` and `so11y`. Adding some items in `so11y` to make it happen.
 
 ```yaml
 telemetry:
+  selector: ${SW_TELEMETRY:so11y}
   so11y:
     prometheusExporterEnabled: true
     prometheusExporterHost: 0.0.0.0
diff --git a/docs/en/setup/backend/dynamic-config.md b/docs/en/setup/backend/dynamic-config.md
index dffb658..ce740ab 100755
--- a/docs/en/setup/backend/dynamic-config.md
+++ b/docs/en/setup/backend/dynamic-config.md
@@ -12,10 +12,18 @@ Right now, SkyWalking supports following dynamic configurations.
 |core.default.apdexThreshold| The apdex threshold settings, will override `service-apdex-threshold.yml`. | same as [`service-apdex-threshold.yml`](apdex-threshold.md) |
 
 
-This feature depends on upstream service, so it is **OFF** as default.
+This feature depends on upstream service, so it is **DISABLED** by default.
+
 ```yaml
 configuration:
+  selector: ${SW_CONFIGURATION:none}
   none:
+  apollo:
+    apolloMeta: http://106.12.25.204:8080
+    apolloCluster: default
+    appId: skywalking
+    period: 5
+  # ... other implementations
 ```
 
 ## Dynamic Configuration Service, DCS
@@ -25,13 +33,10 @@ The SkyWalking OAP fetches the configuration from the implementation(any system)
 
 ```yaml
 configuration:
+  selector: ${SW_CONFIGURATION:grpc}
   grpc:
-    # Upstream system hostname
     host: 127.0.0.1
-    # Upstream system port
     port: 9555
-    #period : 60 # Unit seconds, sync period. Default fetch every 60 seconds.
-    #clusterName: "default" # the name of current cluster, set the name if you want to upstream system known.  
 ```
 
 ## Dynamic Configuration Apollo Implementation
@@ -40,10 +45,10 @@ configuration:
 
 ```yaml
 configuration:
+  selector: ${SW_CONFIGURATION:apollo}
   apollo:
     apolloMeta: <your apollo meta address>
     apolloCluster: default
-    # apolloEnv: # defaults to null
     appId: skywalking
     period: 5
 ```
@@ -54,19 +59,15 @@ configuration:
 
 ```yaml
 configuration:
+  selector: ${SW_CONFIGURATION:nacos}
   nacos:
-    # Nacos Server Host
     serverAddr: 127.0.0.1
-    # Nacos Server Port
     port: 8848
-    # Nacos Configuration Group
     group: 'skywalking'
-    # Nacos Configuration namespace
     namespace: ''
-    # Unit seconds, sync period. Default fetch every 60 seconds.
     period : 60
-    # the name of current cluster, set the name if you want to upstream system known.
     clusterName: "default"
+  # ... other configurations
 ```
 
 
@@ -76,13 +77,14 @@ configuration:
 
 ```yaml
 configuration:
+  selector: ${SW_CONFIGURATION:zookeeper}
   zookeeper:
     period : 60 # Unit seconds, sync period. Default fetch every 60 seconds.
     nameSpace: /default
     hostPort: localhost:2181
-    #Retry Policy
     baseSleepTimeMs: 1000 # initial amount of time to wait between retries
     maxRetries: 3 # max number of times to retry
+  # ... other configurations
 ```
 
 ## Dynamic Configuration Etcd Implementation
@@ -91,11 +93,13 @@ configuration:
 
 ```yaml
 configuration:
+  selector: ${SW_CONFIGURATION:etcd}
   etcd:
     period : 60 # Unit seconds, sync period. Default fetch every 60 seconds.
     group :  'skywalking'
     serverAddr: localhost:2379
     clusterName: "default"
+  # ... other configurations
 ```
 
 ## Dynamic Configuration Consul Implementation
@@ -104,6 +108,7 @@ configuration:
 
 ```yaml
 configuration:
+  selector: ${SW_CONFIGURATION:consul}
   consul:
     # Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500
     hostAndPorts: 127.0.0.1:8500
@@ -111,6 +116,7 @@ configuration:
     period: 60
     # aclToken of connection consul (optional)
     aclToken: ${consul.aclToken}
+  # ... other configurations
 ```
 
 
diff --git a/oap-server/server-bootstrap/src/main/java/org/apache/skywalking/oap/server/starter/config/ApplicationConfigLoader.java b/oap-server/server-bootstrap/src/main/java/org/apache/skywalking/oap/server/starter/config/ApplicationConfigLoader.java
index 9ea3773..cd612f3 100644
--- a/oap-server/server-bootstrap/src/main/java/org/apache/skywalking/oap/server/starter/config/ApplicationConfigLoader.java
+++ b/oap-server/server-bootstrap/src/main/java/org/apache/skywalking/oap/server/starter/config/ApplicationConfigLoader.java
@@ -20,10 +20,13 @@ package org.apache.skywalking.oap.server.starter.config;
 
 import java.io.FileNotFoundException;
 import java.io.Reader;
+import java.util.HashSet;
 import java.util.Map;
 import java.util.Properties;
+import java.util.Set;
 import org.apache.skywalking.apm.util.PropertyPlaceholderHelper;
 import org.apache.skywalking.oap.server.library.module.ApplicationConfiguration;
+import org.apache.skywalking.oap.server.library.module.ProviderNotFoundException;
 import org.apache.skywalking.oap.server.library.util.CollectionUtils;
 import org.apache.skywalking.oap.server.library.util.ResourceUtils;
 import org.slf4j.Logger;
@@ -40,6 +43,9 @@ public class ApplicationConfigLoader implements ConfigLoader<ApplicationConfigur
 
     private static final Logger logger = LoggerFactory.getLogger(ApplicationConfigLoader.class);
 
+    private static final String DISABLE_SELECTOR = "-";
+    private static final String SELECTOR = "selector";
+
     private final Yaml yaml = new Yaml();
 
     @Override
@@ -54,15 +60,17 @@ public class ApplicationConfigLoader implements ConfigLoader<ApplicationConfigur
     private void loadConfig(ApplicationConfiguration configuration) throws ConfigFileNotFoundException {
         try {
             Reader applicationReader = ResourceUtils.read("application.yml");
-            Map<String, Map<String, Map<String, ?>>> moduleConfig = yaml.loadAs(applicationReader, Map.class);
+            Map<String, Map<String, Object>> moduleConfig = yaml.loadAs(applicationReader, Map.class);
             if (CollectionUtils.isNotEmpty(moduleConfig)) {
+                selectConfig(moduleConfig);
                 moduleConfig.forEach((moduleName, providerConfig) -> {
                     if (providerConfig.size() > 0) {
                         logger.info("Get a module define from application.yml, module name: {}", moduleName);
                         ApplicationConfiguration.ModuleConfiguration moduleConfiguration = configuration.addModule(moduleName);
-                        providerConfig.forEach((providerName, propertiesConfig) -> {
+                        providerConfig.forEach((providerName, config) -> {
                             logger.info("Get a provider define belong to {} module, provider name: {}", moduleName, providerName);
-                            Properties properties = new Properties();
+                            final Map<String, ?> propertiesConfig = (Map<String, ?>) config;
+                            final Properties properties = new Properties();
                             if (propertiesConfig != null) {
                                 propertiesConfig.forEach((propertyName, propertyValue) -> {
                                     if (propertyValue instanceof Map) {
@@ -105,6 +113,48 @@ public class ApplicationConfigLoader implements ConfigLoader<ApplicationConfigur
         }
     }
 
+    private void selectConfig(final Map<String, Map<String, Object>> moduleConfiguration) {
+        final Set<String> modulesWithoutProvider = new HashSet<>();
+        for (final Map.Entry<String, Map<String, Object>> entry : moduleConfiguration.entrySet()) {
+            final String moduleName = entry.getKey();
+            final Map<String, Object> providerConfig = entry.getValue();
+            if (!providerConfig.containsKey(SELECTOR)) {
+                continue;
+            }
+            final String selector = (String) providerConfig.get(SELECTOR);
+            final String resolvedSelector = PropertyPlaceholderHelper.INSTANCE.replacePlaceholders(
+                selector, System.getProperties()
+            );
+            providerConfig.entrySet().removeIf(e -> !resolvedSelector.equals(e.getKey()));
+
+            if (!providerConfig.isEmpty()) {
+                continue;
+            }
+
+            if (!DISABLE_SELECTOR.equals(resolvedSelector)) {
+                throw new ProviderNotFoundException(
+                    "no provider found for module " + moduleName + ", " +
+                        "if you're sure it's not required module and want to remove it, " +
+                        "set the selector to -"
+                );
+            }
+
+            // now the module can be safely removed
+            modulesWithoutProvider.add(moduleName);
+        }
+
+        moduleConfiguration.entrySet().removeIf(e -> {
+            final String module = e.getKey();
+            final boolean shouldBeRemoved = modulesWithoutProvider.contains(module);
+
+            if (shouldBeRemoved) {
+                logger.info("Remove module {} without any provider", module);
+            }
+
+            return shouldBeRemoved;
+        });
+    }
+
     private void overrideModuleSettings(ApplicationConfiguration configuration, String key, String value) {
         int moduleAndConfigSeparator = key.indexOf('.');
         if (moduleAndConfigSeparator <= 0) {
diff --git a/oap-server/server-bootstrap/src/main/resources/application.yml b/oap-server/server-bootstrap/src/main/resources/application.yml
index ee10672..7c79625 100755
--- a/oap-server/server-bootstrap/src/main/resources/application.yml
+++ b/oap-server/server-bootstrap/src/main/resources/application.yml
@@ -14,40 +14,42 @@
 # limitations under the License.
 
 cluster:
+  selector: ${SW_CLUSTER:standalone}
   standalone:
-#   Please check your ZooKeeper is 3.5+, However, it is also compatible with ZooKeeper 3.4.x. Replace the ZooKeeper 3.5+
-#   library the oap-libs folder with your ZooKeeper 3.4.x library.
-#  zookeeper:
-#    nameSpace: ${SW_NAMESPACE:""}
-#    hostPort: ${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}
-#    #Retry Policy
-#    baseSleepTimeMs: ${SW_CLUSTER_ZK_SLEEP_TIME:1000} # initial amount of time to wait between retries
-#    maxRetries: ${SW_CLUSTER_ZK_MAX_RETRIES:3} # max number of times to retry
-#    # Enable ACL
-#    enableACL: ${SW_ZK_ENABLE_ACL:false} # disable ACL in default
-#    schema: ${SW_ZK_SCHEMA:digest} # only support digest schema
-#    expression: ${SW_ZK_EXPRESSION:skywalking:skywalking}
-#  kubernetes:
-#    watchTimeoutSeconds: ${SW_CLUSTER_K8S_WATCH_TIMEOUT:60}
-#    namespace: ${SW_CLUSTER_K8S_NAMESPACE:default}
-#    labelSelector: ${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}
-#    uidEnvName: ${SW_CLUSTER_K8S_UID:SKYWALKING_COLLECTOR_UID}
-#  consul:
-#    serviceName: ${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
-#     Consul cluster nodes, example: 10.0.0.1:8500,10.0.0.2:8500,10.0.0.3:8500
-#    hostPort: ${SW_CLUSTER_CONSUL_HOST_PORT:localhost:8500}
-#    # Consul aclToken
-#    #aclToken: ${SW_CLUSTER_CONSUL_ACLTOKEN}
-#  nacos:
-#    serviceName: ${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
-#    # Nacos Configuration namespace
-#    namespace: ${SW_CLUSTER_NACOS_NAMESPACE:"public"}
-#    hostPort: ${SW_CLUSTER_NACOS_HOST_PORT:localhost:8848}
-#  etcd:
-#    serviceName: ${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
-#     etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379
-#    hostPort: ${SW_CLUSTER_ETCD_HOST_PORT:localhost:2379}
+  # Please check your ZooKeeper is 3.5+, However, it is also compatible with ZooKeeper 3.4.x. Replace the ZooKeeper 3.5+
+  # library the oap-libs folder with your ZooKeeper 3.4.x library.
+  zookeeper:
+    nameSpace: ${SW_NAMESPACE:""}
+    hostPort: ${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}
+    # Retry Policy
+    baseSleepTimeMs: ${SW_CLUSTER_ZK_SLEEP_TIME:1000} # initial amount of time to wait between retries
+    maxRetries: ${SW_CLUSTER_ZK_MAX_RETRIES:3} # max number of times to retry
+    # Enable ACL
+    enableACL: ${SW_ZK_ENABLE_ACL:false} # disable ACL in default
+    schema: ${SW_ZK_SCHEMA:digest} # only support digest schema
+    expression: ${SW_ZK_EXPRESSION:skywalking:skywalking}
+  kubernetes:
+    watchTimeoutSeconds: ${SW_CLUSTER_K8S_WATCH_TIMEOUT:60}
+    namespace: ${SW_CLUSTER_K8S_NAMESPACE:default}
+    labelSelector: ${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}
+    uidEnvName: ${SW_CLUSTER_K8S_UID:SKYWALKING_COLLECTOR_UID}
+  consul:
+    serviceName: ${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
+    # Consul cluster nodes, example: 10.0.0.1:8500,10.0.0.2:8500,10.0.0.3:8500
+    hostPort: ${SW_CLUSTER_CONSUL_HOST_PORT:localhost:8500}
+    aclToken: ${SW_CLUSTER_CONSUL_ACLTOKEN:""}
+  nacos:
+    serviceName: ${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
+    hostPort: ${SW_CLUSTER_NACOS_HOST_PORT:localhost:8848}
+    # Nacos Configuration namespace
+    namespace: ${SW_CLUSTER_NACOS_NAMESPACE:"public"}
+  etcd:
+    serviceName: ${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
+    # etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379
+    hostPort: ${SW_CLUSTER_ETCD_HOST_PORT:localhost:2379}
+
 core:
+  selector: ${SW_CORE:default}
   default:
     # Mixed: Receive agent data, Level 1 aggregate, Level 2 aggregate
     # Receiver: Receive agent data, Level 1 aggregate
@@ -82,39 +84,41 @@ core:
     # and it will cause more load for memory, network of OAP and storage.
     # But, being activated, user could see the name in the storage entities, which make users easier to use 3rd party tool, such as Kibana->ES, to query the data by themselves.
     activeExtraModelColumns: ${SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS:false}
+
 storage:
-#  elasticsearch:
-#    nameSpace: ${SW_NAMESPACE:""}
-#    clusterNodes: ${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}
-#    protocol: ${SW_STORAGE_ES_HTTP_PROTOCOL:"http"}
-#    #trustStorePath: ${SW_SW_STORAGE_ES_SSL_JKS_PATH:"../es_keystore.jks"}
-#    #trustStorePass: ${SW_SW_STORAGE_ES_SSL_JKS_PASS:""}
-#    user: ${SW_ES_USER:""}
-#    password: ${SW_ES_PASSWORD:""}
-#    secretsManagementFile: ${SW_ES_SECRETS_MANAGEMENT_FILE:""} # Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.
-#    enablePackedDownsampling: ${SW_STORAGE_ENABLE_PACKED_DOWNSAMPLING:true} # Hour and Day metrics will be merged into minute index.
-#    dayStep: ${SW_STORAGE_DAY_STEP:1} # Represent the number of days in the one minute/hour/day index.
-#    indexShardsNumber: ${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:2}
-#    indexReplicasNumber: ${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:0}
-#    # Those data TTL settings will override the same settings in core module.
-#    recordDataTTL: ${SW_STORAGE_ES_RECORD_DATA_TTL:7} # Unit is day
-#    otherMetricsDataTTL: ${SW_STORAGE_ES_OTHER_METRIC_DATA_TTL:45} # Unit is day
-#    monthMetricsDataTTL: ${SW_STORAGE_ES_MONTH_METRIC_DATA_TTL:18} # Unit is month
-#    # Batch process setting, refer to https://www.elastic.co/guide/en/elasticsearch/client/java-api/5.5/java-docs-bulk-processor.html
-#    bulkActions: ${SW_STORAGE_ES_BULK_ACTIONS:1000} # Execute the bulk every 1000 requests
-#    flushInterval: ${SW_STORAGE_ES_FLUSH_INTERVAL:10} # flush the bulk every 10 seconds whatever the number of requests
-#    concurrentRequests: ${SW_STORAGE_ES_CONCURRENT_REQUESTS:2} # the number of concurrent requests
-#    resultWindowMaxSize: ${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}
-#    metadataQueryMaxSize: ${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}
-#    segmentQueryMaxSize: ${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}
-#    profileTaskQueryMaxSize: ${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}
-#    advanced: ${SW_STORAGE_ES_ADVANCED:""}
+  selector: ${SW_STORAGE:h2}
+  elasticsearch:
+    nameSpace: ${SW_NAMESPACE:""}
+    clusterNodes: ${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}
+    protocol: ${SW_STORAGE_ES_HTTP_PROTOCOL:"http"}
+    trustStorePath: ${SW_SW_STORAGE_ES_SSL_JKS_PATH:"../es_keystore.jks"}
+    trustStorePass: ${SW_SW_STORAGE_ES_SSL_JKS_PASS:""}
+    user: ${SW_ES_USER:""}
+    password: ${SW_ES_PASSWORD:""}
+    secretsManagementFile: ${SW_ES_SECRETS_MANAGEMENT_FILE:""} # Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.
+    enablePackedDownsampling: ${SW_STORAGE_ENABLE_PACKED_DOWNSAMPLING:true} # Hour and Day metrics will be merged into minute index.
+    dayStep: ${SW_STORAGE_DAY_STEP:1} # Represent the number of days in the one minute/hour/day index.
+    indexShardsNumber: ${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:2}
+    indexReplicasNumber: ${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:0}
+    # Those data TTL settings will override the same settings in core module.
+    recordDataTTL: ${SW_STORAGE_ES_RECORD_DATA_TTL:7} # Unit is day
+    otherMetricsDataTTL: ${SW_STORAGE_ES_OTHER_METRIC_DATA_TTL:45} # Unit is day
+    monthMetricsDataTTL: ${SW_STORAGE_ES_MONTH_METRIC_DATA_TTL:18} # Unit is month
+    # Batch process setting, refer to https://www.elastic.co/guide/en/elasticsearch/client/java-api/5.5/java-docs-bulk-processor.html
+    bulkActions: ${SW_STORAGE_ES_BULK_ACTIONS:1000} # Execute the bulk every 1000 requests
+    flushInterval: ${SW_STORAGE_ES_FLUSH_INTERVAL:10} # flush the bulk every 10 seconds whatever the number of requests
+    concurrentRequests: ${SW_STORAGE_ES_CONCURRENT_REQUESTS:2} # the number of concurrent requests
+    resultWindowMaxSize: ${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}
+    metadataQueryMaxSize: ${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}
+    segmentQueryMaxSize: ${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}
+    profileTaskQueryMaxSize: ${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}
+    advanced: ${SW_STORAGE_ES_ADVANCED:""}
   elasticsearch7:
     nameSpace: ${SW_NAMESPACE:""}
     clusterNodes: ${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}
     protocol: ${SW_STORAGE_ES_HTTP_PROTOCOL:"http"}
-    #trustStorePath: ${SW_SW_STORAGE_ES_SSL_JKS_PATH:"../es_keystore.jks"}
-    #trustStorePass: ${SW_SW_STORAGE_ES_SSL_JKS_PASS:""}
+    trustStorePath: ${SW_SW_STORAGE_ES_SSL_JKS_PATH:"../es_keystore.jks"}
+    trustStorePass: ${SW_SW_STORAGE_ES_SSL_JKS_PASS:""}
     enablePackedDownsampling: ${SW_STORAGE_ENABLE_PACKED_DOWNSAMPLING:true} # Hour and Day metrics will be merged into minute index.
     dayStep: ${SW_STORAGE_DAY_STEP:1} # Represent the number of days in the one minute/hour/day index.
     user: ${SW_ES_USER:""}
@@ -133,56 +137,62 @@ storage:
     resultWindowMaxSize: ${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}
     metadataQueryMaxSize: ${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}
     segmentQueryMaxSize: ${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}
+    profileTaskQueryMaxSize: ${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}
     advanced: ${SW_STORAGE_ES_ADVANCED:""}
-#  h2:
-#    driver: ${SW_STORAGE_H2_DRIVER:org.h2.jdbcx.JdbcDataSource}
-#    url: ${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db}
-#    user: ${SW_STORAGE_H2_USER:sa}
-#    metadataQueryMaxSize: ${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}
-#  mysql:
-#    properties:
-#      jdbcUrl: ${SW_JDBC_URL:"jdbc:mysql://localhost:3306/swtest"}
-#      dataSource.user: ${SW_DATA_SOURCE_USER:root}
-#      dataSource.password: ${SW_DATA_SOURCE_PASSWORD:root@1234}
-#      dataSource.cachePrepStmts: ${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}
-#      dataSource.prepStmtCacheSize: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}
-#      dataSource.prepStmtCacheSqlLimit: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}
-#      dataSource.useServerPrepStmts: ${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}
-#    metadataQueryMaxSize: ${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}
-#  influx:
-#    # Metadata storage provider configuration
-#    metabaseType: ${SW_STORAGE_METABASE_TYPE:H2} # There are 2 options as Metabase provider, H2 or MySQL.
-#    h2Props:
-#      dataSourceClassName: ${SW_STORAGE_METABASE_DRIVER:org.h2.jdbcx.JdbcDataSource}
-#      dataSource.url: ${SW_STORAGE_METABASE_URL:jdbc:h2:mem:skywalking-oap-db}
-#      dataSource.user: ${SW_STORAGE_METABASE_USER:sa}
-#      dataSource.password: ${SW_STORAGE_METABASE_PASSWORD:}
-#    mysqlProps:
-#      jdbcUrl: ${SW_STORAGE_METABASE_URL:"jdbc:mysql://localhost:3306/swtest"}
-#      dataSource.user: ${SW_STORAGE_METABASE_USER:root}
-#      dataSource.password: ${SW_STORAGE_METABASE_PASSWORD:root@1234}
-#      dataSource.cachePrepStmts: ${SW_STORAGE_METABASE_CACHE_PREP_STMTS:true}
-#      dataSource.prepStmtCacheSize: ${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_SIZE:250}
-#      dataSource.prepStmtCacheSqlLimit: ${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_LIMIT:2048}
-#      dataSource.useServerPrepStmts: ${SW_STORAGE_METABASE_USE_SERVER_PREP_STMTS:true}
-#    metadataQueryMaxSize: ${SW_STORAGE_METABASE_QUERY_MAX_SIZE:5000}
-#    # InfluxDB configuration
-#    url: ${SW_STORAGE_INFLUXDB_URL:http://localhost:8086}
-#    user: ${SW_STORAGE_INFLUXDB_USER:root}
-#    password: ${SW_STORAGE_INFLUXDB_PASSWORD:}
-#    database: ${SW_STORAGE_INFLUXDB_DATABASE:skywalking}
-#    actions: ${SW_STORAGE_INFLUXDB_ACTIONS:1000} # the number of actions to collect
-#    duration: ${SW_STORAGE_INFLUXDB_DURATION:1000} # the time to wait at most (milliseconds)
-#    fetchTaskLogMaxSize: ${SW_STORAGE_INFLUXDB_FETCH_TASK_LOG_MAX_SIZE:5000} # the max number of fetch task log in a request
+  h2:
+    driver: ${SW_STORAGE_H2_DRIVER:org.h2.jdbcx.JdbcDataSource}
+    url: ${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db}
+    user: ${SW_STORAGE_H2_USER:sa}
+    metadataQueryMaxSize: ${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}
+  mysql:
+    properties:
+      jdbcUrl: ${SW_JDBC_URL:"jdbc:mysql://localhost:3306/swtest"}
+      dataSource.user: ${SW_DATA_SOURCE_USER:root}
+      dataSource.password: ${SW_DATA_SOURCE_PASSWORD:root@1234}
+      dataSource.cachePrepStmts: ${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}
+      dataSource.prepStmtCacheSize: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}
+      dataSource.prepStmtCacheSqlLimit: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}
+      dataSource.useServerPrepStmts: ${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}
+    metadataQueryMaxSize: ${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}
+  influxdb:
+    # Metadata storage provider configuration
+    metabaseType: ${SW_STORAGE_METABASE_TYPE:H2} # There are 2 options as Metabase provider, H2 or MySQL.
+    h2Props:
+      dataSourceClassName: ${SW_STORAGE_METABASE_DRIVER:org.h2.jdbcx.JdbcDataSource}
+      dataSource.url: ${SW_STORAGE_METABASE_URL:jdbc:h2:mem:skywalking-oap-db}
+      dataSource.user: ${SW_STORAGE_METABASE_USER:sa}
+      dataSource.password: ${SW_STORAGE_METABASE_PASSWORD:}
+    mysqlProps:
+      jdbcUrl: ${SW_STORAGE_METABASE_URL:"jdbc:mysql://localhost:3306/swtest"}
+      dataSource.user: ${SW_STORAGE_METABASE_USER:root}
+      dataSource.password: ${SW_STORAGE_METABASE_PASSWORD:root@1234}
+      dataSource.cachePrepStmts: ${SW_STORAGE_METABASE_CACHE_PREP_STMTS:true}
+      dataSource.prepStmtCacheSize: ${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_SIZE:250}
+      dataSource.prepStmtCacheSqlLimit: ${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_LIMIT:2048}
+      dataSource.useServerPrepStmts: ${SW_STORAGE_METABASE_USE_SERVER_PREP_STMTS:true}
+    metadataQueryMaxSize: ${SW_STORAGE_METABASE_QUERY_MAX_SIZE:5000}
+    # InfluxDB configuration
+    url: ${SW_STORAGE_INFLUXDB_URL:http://localhost:8086}
+    user: ${SW_STORAGE_INFLUXDB_USER:root}
+    password: ${SW_STORAGE_INFLUXDB_PASSWORD:}
+    database: ${SW_STORAGE_INFLUXDB_DATABASE:skywalking}
+    actions: ${SW_STORAGE_INFLUXDB_ACTIONS:1000} # the number of actions to collect
+    duration: ${SW_STORAGE_INFLUXDB_DURATION:1000} # the time to wait at most (milliseconds)
+    fetchTaskLogMaxSize: ${SW_STORAGE_INFLUXDB_FETCH_TASK_LOG_MAX_SIZE:5000} # the max number of fetch task log in a request
+
 receiver-sharing-server:
+  selector: ${SW_RECEIVER_SHARING_SERVER:default}
   default:
     authentication: ${SW_AUTHENTICATION:""}
     gRPCSslEnabled: ${SW_RECEIVER_SHARING_GRPC_SSL_ENABLED:false}
     gRPCSslKeyPath: ${SW_RECEIVER_SHARING_GRPC_SSL_KEY_PATH:""}
     gRPCSslCertChainPath: ${SW_RECEIVER_SHARING_GRPC_SSL_CERT_CHAIN_PATH:""}
 receiver-register:
+  selector: ${SW_RECEIVER_REGISTER:default}
   default:
+
 receiver-trace:
+  selector: ${SW_RECEIVER_TRACE:default}
   default:
     bufferPath: ${SW_RECEIVER_BUFFER_PATH:../trace-buffer/}  # Path to trace buffer files, suggest to use absolute path
     bufferOffsetMaxFileSize: ${SW_RECEIVER_BUFFER_OFFSET_MAX_FILE_SIZE:100} # Unit is MB
@@ -190,91 +200,117 @@ receiver-trace:
     bufferFileCleanWhenRestart: ${SW_RECEIVER_BUFFER_FILE_CLEAN_WHEN_RESTART:false}
     sampleRate: ${SW_TRACE_SAMPLE_RATE:10000} # The sample rate precision is 1/10000. 10000 means 100% sample in default.
     slowDBAccessThreshold: ${SW_SLOW_DB_THRESHOLD:default:200,mongodb:100} # The slow database access thresholds. Unit ms.
+
 receiver-jvm:
+  selector: ${SW_RECEIVER_JVM:default}
   default:
+
 receiver-clr:
+  selector: ${SW_RECEIVER_CLR:default}
   default:
-#receiver-so11y:
-#  default:
+
+receiver-profile:
+  selector: ${SW_RECEIVER_PROFILE:default}
+  default:
+
 service-mesh:
+  selector: ${SW_SERVICE_MESH:default}
   default:
     bufferPath: ${SW_SERVICE_MESH_BUFFER_PATH:../mesh-buffer/}  # Path to trace buffer files, suggest to use absolute path
     bufferOffsetMaxFileSize: ${SW_SERVICE_MESH_OFFSET_MAX_FILE_SIZE:100} # Unit is MB
     bufferDataMaxFileSize: ${SW_SERVICE_MESH_BUFFER_DATA_MAX_FILE_SIZE:500} # Unit is MB
     bufferFileCleanWhenRestart: ${SW_SERVICE_MESH_BUFFER_FILE_CLEAN_WHEN_RESTART:false}
+
 istio-telemetry:
+  selector: ${SW_ISTIO_TELEMETRY:default}
   default:
+
 envoy-metric:
+  selector: ${SW_ENVOY_METRIC:default}
   default:
-receiver-profile:
+    alsHTTPAnalysis: ${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:""}
+
+receiver_zipkin:
+  selector: ${SW_RECEIVER_ZIPKIN:-}
+  default:
+    host: ${SW_RECEIVER_ZIPKIN_HOST:0.0.0.0}
+    port: ${SW_RECEIVER_ZIPKIN_PORT:9411}
+    contextPath: ${SW_RECEIVER_ZIPKIN_CONTEXT_PATH:/}
+
+receiver_jaeger:
+  selector: ${SW_RECEIVER_JAEGER:-}
   default:
-#    alsHTTPAnalysis: ${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:k8s-mesh}
-#receiver_zipkin:
-#  default:
-#    host: ${SW_RECEIVER_ZIPKIN_HOST:0.0.0.0}
-#    port: ${SW_RECEIVER_ZIPKIN_PORT:9411}
-#    contextPath: ${SW_RECEIVER_ZIPKIN_CONTEXT_PATH:/}
-#receiver_jaeger:
-#  default:
-#    gRPCHost: ${SW_RECEIVER_JAEGER_HOST:0.0.0.0}
-#    gRPCPort: ${SW_RECEIVER_JAEGER_PORT:14250}
+    gRPCHost: ${SW_RECEIVER_JAEGER_HOST:0.0.0.0}
+    gRPCPort: ${SW_RECEIVER_JAEGER_PORT:14250}
+
 query:
+  selector: ${SW_QUERY:graphql}
   graphql:
     path: ${SW_QUERY_GRAPHQL_PATH:/graphql}
+
 alarm:
+  selector: ${SW_ALARM:default}
   default:
+
 telemetry:
+  selector: ${SW_TELEMETRY:none}
   none:
-#  prometheus:
-#    host: ${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}
-#    port: ${SW_TELEMETRY_PROMETHEUS_PORT:1234}
-#  so11y:
-#    prometheusExporterEnabled: ${SW_TELEMETRY_SO11Y_PROMETHEUS_ENABLED:true}
-#    prometheusExporterHost: ${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}
-#    prometheusExporterPort: ${SW_TELEMETRY_PROMETHEUS_PORT:1234}
+  prometheus:
+    host: ${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}
+    port: ${SW_TELEMETRY_PROMETHEUS_PORT:1234}
+  so11y:
+    prometheusExporterEnabled: ${SW_TELEMETRY_SO11Y_PROMETHEUS_ENABLED:true}
+    prometheusExporterHost: ${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}
+    prometheusExporterPort: ${SW_TELEMETRY_PROMETHEUS_PORT:1234}
+
+receiver-so11y:
+  selector: ${SW_RECEIVER_SO11Y:-}
+  default:
+
 configuration:
+  selector: ${SW_CONFIGURATION:none}
   none:
-#  apollo:
-#    apolloMeta: http://106.12.25.204:8080
-#    apolloCluster: default
-#    # apolloEnv: # defaults to null
-#    appId: skywalking
-#    period: 5
-#  nacos:
-#    # Nacos Server Host
-#    serverAddr: 127.0.0.1
-#    # Nacos Server Port
-#    port: 8848
-#    # Nacos Configuration Group
-#    group: 'skywalking'
-#    # Nacos Configuration namespace
-#    namespace: ''
-#    # Unit seconds, sync period. Default fetch every 60 seconds.
-#    period : 5
-#    # the name of current cluster, set the name if you want to upstream system known.
-#    clusterName: "default"
-#  zookeeper:
-#    period : 60 # Unit seconds, sync period. Default fetch every 60 seconds.
-#    nameSpace: /default
-#    hostPort: localhost:2181
-#    #Retry Policy
-#    baseSleepTimeMs: 1000 # initial amount of time to wait between retries
-#    maxRetries: 3 # max number of times to retry
-#  etcd:
-#    period : 60 # Unit seconds, sync period. Default fetch every 60 seconds.
-#    group :  'skywalking'
-#    serverAddr: localhost:2379
-#    clusterName: "default"
-#  consul:
-#    # Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500
-#    hostAndPorts: ${consul.address}
-#    # Sync period in seconds. Defaults to 60 seconds.
-#    period: 1
-#    # Consul aclToken
-#    #aclToken: ${consul.aclToken}
-
-#exporter:
-#  grpc:
-#    targetHost: ${SW_EXPORTER_GRPC_HOST:127.0.0.1}
-#    targetPort: ${SW_EXPORTER_GRPC_PORT:9870}
+  apollo:
+    apolloMeta: http://106.12.25.204:8080
+    apolloCluster: default
+    apolloEnv: ""
+    appId: skywalking
+    period: 5
+  nacos:
+    # Nacos Server Host
+    serverAddr: 127.0.0.1
+    # Nacos Server Port
+    port: 8848
+    # Nacos Configuration Group
+    group: 'skywalking'
+    # Nacos Configuration namespace
+    namespace: ''
+    # Unit seconds, sync period. Default fetch every 60 seconds.
+    period : 60
+    # the name of current cluster, set the name if you want to upstream system known.
+    clusterName: "default"
+  zookeeper:
+    period : 60 # Unit seconds, sync period. Default fetch every 60 seconds.
+    nameSpace: /default
+    hostPort: localhost:2181
+    # Retry Policy
+    baseSleepTimeMs: 1000 # initial amount of time to wait between retries
+    maxRetries: 3 # max number of times to retry
+  etcd:
+    period : 60 # Unit seconds, sync period. Default fetch every 60 seconds.
+    group :  'skywalking'
+    serverAddr: localhost:2379
+    clusterName: "default"
+  consul:
+    # Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500
+    hostAndPorts: ${consul.address}
+    # Sync period in seconds. Defaults to 60 seconds.
+    period: 1
+    # Consul aclToken
+    #aclToken: ${consul.aclToken}
 
+exporter:
+  selector: ${SW_EXPORTER:-}
+  grpc:
+    targetHost: ${SW_EXPORTER_GRPC_HOST:127.0.0.1}
+    targetPort: ${SW_EXPORTER_GRPC_PORT:9870}
diff --git a/oap-server/server-bootstrap/src/test/java/org/apache/skywalking/oap/server/starter/config/ApplicationConfigLoaderTestCase.java b/oap-server/server-bootstrap/src/test/java/org/apache/skywalking/oap/server/starter/config/ApplicationConfigLoaderTestCase.java
index e0a092b..fb0929e 100644
--- a/oap-server/server-bootstrap/src/test/java/org/apache/skywalking/oap/server/starter/config/ApplicationConfigLoaderTestCase.java
+++ b/oap-server/server-bootstrap/src/test/java/org/apache/skywalking/oap/server/starter/config/ApplicationConfigLoaderTestCase.java
@@ -33,6 +33,7 @@ public class ApplicationConfigLoaderTestCase {
 
     @Before
     public void setUp() throws ConfigFileNotFoundException {
+        System.setProperty("SW_STORAGE", "mysql");
         ApplicationConfigLoader configLoader = new ApplicationConfigLoader();
         applicationConfiguration = configLoader.load();
     }
diff --git a/oap-server/server-bootstrap/src/test/resources/application.yml b/oap-server/server-bootstrap/src/test/resources/application.yml
deleted file mode 100755
index 4fabdd8..0000000
--- a/oap-server/server-bootstrap/src/test/resources/application.yml
+++ /dev/null
@@ -1,181 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-cluster:
-  standalone:
-#   Please check your ZooKeeper is 3.5+, However, it is also compatible with ZooKeeper 3.4.x. Replace the ZooKeeper 3.5+
-#   library the oap-libs folder with your ZooKeeper 3.4.x library.
-#  zookeeper:
-#    nameSpace: ${SW_NAMESPACE:""}
-#    hostPort: ${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}
-#    #Retry Policy
-#    baseSleepTimeMs: ${SW_CLUSTER_ZK_SLEEP_TIME:1000} # initial amount of time to wait between retries
-#    maxRetries: ${SW_CLUSTER_ZK_MAX_RETRIES:3} # max number of times to retry
-#    # Enable ACL
-#    enableACL: ${SW_ZK_ENABLE_ACL:false} # disable ACL in default
-#    schema: ${SW_ZK_SCHEMA:digest} # only support digest schema
-#    expression: ${SW_ZK_EXPRESSION:skywalking:skywalking}
-#  kubernetes:
-#    watchTimeoutSeconds: ${SW_CLUSTER_K8S_WATCH_TIMEOUT:60}
-#    namespace: ${SW_CLUSTER_K8S_NAMESPACE:default}
-#    labelSelector: ${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}
-#    uidEnvName: ${SW_CLUSTER_K8S_UID:SKYWALKING_COLLECTOR_UID}
-#  consul:
-#    serviceName: ${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
-#     Consul cluster nodes, example: 10.0.0.1:8500,10.0.0.2:8500,10.0.0.3:8500
-#    hostPort: ${SW_CLUSTER_CONSUL_HOST_PORT:localhost:8500}
-#  nacos:
-#    serviceName: ${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
-#    # Nacos Configuration namespace
-#    namespace: ${SW_CLUSTER_NACOS_NAMESPACE:"public"}
-#    hostPort: ${SW_CLUSTER_NACOS_HOST_PORT:localhost:8848}
-#  etcd:
-#    serviceName: ${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
-#     etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379
-#    hostPort: ${SW_CLUSTER_ETCD_HOST_PORT:localhost:2379}
-core:
-  default:
-    # Mixed: Receive agent data, Level 1 aggregate, Level 2 aggregate
-    # Receiver: Receive agent data, Level 1 aggregate
-    # Aggregator: Level 2 aggregate
-    role: ${SW_CORE_ROLE:Mixed} # Mixed/Receiver/Aggregator
-    restHost: ${SW_CORE_REST_HOST:0.0.0.0}
-    restPort: ${SW_CORE_REST_PORT:12800}
-    restContextPath: ${SW_CORE_REST_CONTEXT_PATH:/}
-    gRPCHost: ${SW_CORE_GRPC_HOST:0.0.0.0}
-    gRPCPort: ${SW_CORE_GRPC_PORT:11800}
-    downsampling:
-      - Hour
-      - Day
-      - Month
-    # Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.
-    enableDataKeeperExecutor: ${SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR:true} # Turn it off then automatically metrics data delete will be close.
-    dataKeeperExecutePeriod: ${SW_CORE_DATA_KEEPER_EXECUTE_PERIOD:5} # How often the data keeper executor runs periodically, unit is minute
-    recordDataTTL: ${SW_CORE_RECORD_DATA_TTL:90} # Unit is minute
-    minuteMetricsDataTTL: ${SW_CORE_MINUTE_METRIC_DATA_TTL:90} # Unit is minute
-    hourMetricsDataTTL: ${SW_CORE_HOUR_METRIC_DATA_TTL:36} # Unit is hour
-    dayMetricsDataTTL: ${SW_CORE_DAY_METRIC_DATA_TTL:45} # Unit is day
-    monthMetricsDataTTL: ${SW_CORE_MONTH_METRIC_DATA_TTL:18} # Unit is month
-    # Cache metric data for 1 minute to reduce database queries, and if the OAP cluster changes within that minute,
-    # the metrics may not be accurate within that minute.
-    enableDatabaseSession: ${SW_CORE_ENABLE_DATABASE_SESSION:true}
-storage:
-  mysql:
-    properties:
-      jdbcUrl: ${SW_JDBC_URL:"jdbc:mysql://localhost:3306/swtest"}
-      dataSource.user: ${SW_DATA_SOURCE_USER:root}
-      dataSource.password: ${SW_DATA_SOURCE_PASSWORD:root@1234}
-      dataSource.cachePrepStmts: ${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}
-      dataSource.prepStmtCacheSize: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}
-      dataSource.prepStmtCacheSqlLimit: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}
-      dataSource.useServerPrepStmts: ${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}
-    metadataQueryMaxSize: ${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}
-receiver-sharing-server:
-  default:
-receiver-register:
-  default:
-receiver-trace:
-  default:
-    bufferPath: ${SW_RECEIVER_BUFFER_PATH:../trace-buffer/}  # Path to trace buffer files, suggest to use absolute path
-    bufferOffsetMaxFileSize: ${SW_RECEIVER_BUFFER_OFFSET_MAX_FILE_SIZE:100} # Unit is MB
-    bufferDataMaxFileSize: ${SW_RECEIVER_BUFFER_DATA_MAX_FILE_SIZE:500} # Unit is MB
-    bufferFileCleanWhenRestart: ${SW_RECEIVER_BUFFER_FILE_CLEAN_WHEN_RESTART:false}
-    sampleRate: ${SW_TRACE_SAMPLE_RATE:10000} # The sample rate precision is 1/10000. 10000 means 100% sample in default.
-    slowDBAccessThreshold: ${SW_SLOW_DB_THRESHOLD:default:200,mongodb:100} # The slow database access thresholds. Unit ms.
-receiver-jvm:
-  default:
-receiver-clr:
-  default:
-#receiver-so11y:
-#  default:
-receiver-profile:
-  default:
-service-mesh:
-  default:
-    bufferPath: ${SW_SERVICE_MESH_BUFFER_PATH:../mesh-buffer/}  # Path to trace buffer files, suggest to use absolute path
-    bufferOffsetMaxFileSize: ${SW_SERVICE_MESH_OFFSET_MAX_FILE_SIZE:100} # Unit is MB
-    bufferDataMaxFileSize: ${SW_SERVICE_MESH_BUFFER_DATA_MAX_FILE_SIZE:500} # Unit is MB
-    bufferFileCleanWhenRestart: ${SW_SERVICE_MESH_BUFFER_FILE_CLEAN_WHEN_RESTART:false}
-istio-telemetry:
-  default:
-envoy-metric:
-  default:
-#    alsHTTPAnalysis: ${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:k8s-mesh}
-#receiver_zipkin:
-#  default:
-#    host: ${SW_RECEIVER_ZIPKIN_HOST:0.0.0.0}
-#    port: ${SW_RECEIVER_ZIPKIN_PORT:9411}
-#    contextPath: ${SW_RECEIVER_ZIPKIN_CONTEXT_PATH:/}
-#receiver_jaeger:
-#  default:
-#    gRPCHost: ${SW_RECEIVER_JAEGER_HOST:0.0.0.0}
-#    gRPCPort: ${SW_RECEIVER_JAEGER_PORT:14250}
-query:
-  graphql:
-    path: ${SW_QUERY_GRAPHQL_PATH:/graphql}
-alarm:
-  default:
-telemetry:
-  none:
-#  prometheus:
-#    host: ${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}
-#    port: ${SW_TELEMETRY_PROMETHEUS_PORT:1234}
-#  so11y:
-#    prometheusExporterEnabled: ${SW_TELEMETRY_SO11Y_PROMETHEUS_ENABLED:true}
-#    prometheusExporterHost: ${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}
-#    prometheusExporterPort: ${SW_TELEMETRY_PROMETHEUS_PORT:1234}
-configuration:
-  none:
-#  apollo:
-#    apolloMeta: http://106.12.25.204:8080
-#    apolloCluster: default
-#    # apolloEnv: # defaults to null
-#    appId: skywalking
-#    period: 5
-#  nacos:
-#    # Nacos Server Host
-#    serverAddr: 127.0.0.1
-#    # Nacos Server Port
-#    port: 8848
-#    # Nacos Configuration Group
-#    group: 'skywalking'
-#    # Nacos Configuration namespace
-#    namespace: ''
-#    # Unit seconds, sync period. Default fetch every 60 seconds.
-#    period : 5
-#    # the name of current cluster, set the name if you want to upstream system known.
-#    clusterName: "default"
-#  zookeeper:
-#    period : 60 # Unit seconds, sync period. Default fetch every 60 seconds.
-#    nameSpace: /default
-#    hostPort: localhost:2181
-#    #Retry Policy
-#    baseSleepTimeMs: 1000 # initial amount of time to wait between retries
-#    maxRetries: 3 # max number of times to retry
-#  etcd:
-#    period : 60 # Unit seconds, sync period. Default fetch every 60 seconds.
-#    group :  'skywalking'
-#    serverAddr: localhost:2379
-#    clusterName: "default"
-#  consul:
-#    # Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500
-#    hostAndPorts: ${consul.address}
-#    # Sync period in seconds. Defaults to 60 seconds.
-#    period: 1
-
-#exporter:
-#  grpc:
-#    targetHost: ${SW_EXPORTER_GRPC_HOST:127.0.0.1}
-#    targetPort: ${SW_EXPORTER_GRPC_PORT:9870}
diff --git a/oap-server/server-library/library-module/src/main/java/org/apache/skywalking/oap/server/library/module/ApplicationConfiguration.java b/oap-server/server-library/library-module/src/main/java/org/apache/skywalking/oap/server/library/module/ApplicationConfiguration.java
index abb5e1e..f4f0586 100644
--- a/oap-server/server-library/library-module/src/main/java/org/apache/skywalking/oap/server/library/module/ApplicationConfiguration.java
+++ b/oap-server/server-library/library-module/src/main/java/org/apache/skywalking/oap/server/library/module/ApplicationConfiguration.java
@@ -48,7 +48,7 @@ public class ApplicationConfiguration {
     /**
      * The configurations about a certain module.
      */
-    public class ModuleConfiguration {
+    public static class ModuleConfiguration {
         private HashMap<String, ProviderConfiguration> providers = new HashMap<>();
 
         private ModuleConfiguration() {
@@ -72,7 +72,7 @@ public class ApplicationConfiguration {
     /**
      * The configuration about a certain provider of a module.
      */
-    public class ProviderConfiguration {
+    public static class ProviderConfiguration {
         private Properties properties;
 
         ProviderConfiguration(Properties properties) {
diff --git a/oap-server/server-library/library-module/src/main/java/org/apache/skywalking/oap/server/library/module/ModuleConfigException.java b/oap-server/server-library/library-module/src/main/java/org/apache/skywalking/oap/server/library/module/ModuleConfigException.java
index b70829d..837412b 100644
--- a/oap-server/server-library/library-module/src/main/java/org/apache/skywalking/oap/server/library/module/ModuleConfigException.java
+++ b/oap-server/server-library/library-module/src/main/java/org/apache/skywalking/oap/server/library/module/ModuleConfigException.java
@@ -20,6 +20,10 @@ package org.apache.skywalking.oap.server.library.module;
 
 public class ModuleConfigException extends Exception {
 
+    public ModuleConfigException(String message) {
+        super(message);
+    }
+
     public ModuleConfigException(String message, Throwable cause) {
         super(message, cause);
     }
diff --git a/oap-server/server-library/library-util/src/test/java/org/apache/skywalking/oap/server/library/util/PropertyPlaceholderHelperTest.java b/oap-server/server-library/library-util/src/test/java/org/apache/skywalking/oap/server/library/util/PropertyPlaceholderHelperTest.java
index 38aad4a..71bff49 100644
--- a/oap-server/server-library/library-util/src/test/java/org/apache/skywalking/oap/server/library/util/PropertyPlaceholderHelperTest.java
+++ b/oap-server/server-library/library-util/src/test/java/org/apache/skywalking/oap/server/library/util/PropertyPlaceholderHelperTest.java
@@ -47,11 +47,13 @@ public class PropertyPlaceholderHelperTest {
     @Before
     public void init() throws FileNotFoundException {
         Reader applicationReader = ResourceUtils.read("application.yml");
-        Map<String, Map<String, Map<String, ?>>> moduleConfig = yaml.loadAs(applicationReader, Map.class);
+        Map<String, Map<String, Object>> moduleConfig = yaml.loadAs(applicationReader, Map.class);
         if (CollectionUtils.isNotEmpty(moduleConfig)) {
             moduleConfig.forEach((moduleName, providerConfig) -> {
+                selectConfig(providerConfig);
                 if (providerConfig.size() > 0) {
-                    providerConfig.forEach((name, propertiesConfig) -> {
+                    providerConfig.forEach((name, config) -> {
+                        final Map<String, ?> propertiesConfig = (Map<String, ?>) config;
                         if (propertiesConfig != null) {
                             propertiesConfig.forEach((key, value) -> properties.put(key, value));
                         }
@@ -92,4 +94,18 @@ public class PropertyPlaceholderHelperTest {
         //revert environment variables changes after the test for safe.
         environmentVariables.clear("REST_HOST");
     }
+
+    private void selectConfig(final Map<String, Object> configuration) {
+        if (configuration.size() <= 1) {
+            return;
+        }
+        if (configuration.containsKey("selector")) {
+            final String selector = (String) configuration.get("selector");
+            final String resolvedSelector = PropertyPlaceholderHelper.INSTANCE.replacePlaceholders(
+                selector, System.getProperties()
+            );
+            configuration.entrySet().removeIf(e -> !resolvedSelector.equals(e.getKey()));
+        }
+    }
+
 }
diff --git a/oap-server/server-library/library-util/src/test/resources/application.yml b/oap-server/server-library/library-util/src/test/resources/application.yml
index a95e875..cc03461 100755
--- a/oap-server/server-library/library-util/src/test/resources/application.yml
+++ b/oap-server/server-library/library-util/src/test/resources/application.yml
@@ -14,17 +14,19 @@
 # limitations under the License.
 
 cluster:
+  selector: ${SW_CLUSTER:standalone}
   standalone:
-#  zookeeper:
-#    hostPort: localhost:2181
-#    # Retry Policy
-#    baseSleepTimeMs: 1000 # initial amount of time to wait between retries
-#    maxRetries: 3 # max number of times to retry
-#  kubernetes:
-#    watchTimeoutSeconds: 60
-#    namespace: default
-#    labelSelector: app=collector,release=skywalking
-#    uidEnvName: SKYWALKING_COLLECTOR_UID
+  zookeeper:
+    hostPort: localhost:2181
+    # Retry Policy
+    baseSleepTimeMs: 1000 # initial amount of time to wait between retries
+    maxRetries: 3 # max number of times to retry
+  kubernetes:
+    watchTimeoutSeconds: 60
+    namespace: default
+    labelSelector: app=collector,release=skywalking
+    uidEnvName: SKYWALKING_COLLECTOR_UID
+
 core:
   default:
     restHost: ${REST_HOST:0.0.0.0}
@@ -42,7 +44,9 @@ core:
     hourMetricsDataTTL: ${HOUR_METRIC_DATA_TTL:36} # Unit is hour
     dayMetricsDataTTL: ${DAY_METRIC_DATA_TTL:45} # Unit is day
     monthMetricsDataTTL: ${MONTH_METRIC_DATA_TTL:18} # Unit is month
+
 storage:
+  selector: ${SW_STORAGE:elasticsearch}
   elasticsearch:
     clusterNodes: ${ES_CLUSTER_ADDRESS:localhost:9200}
     indexShardsNumber: ${ES_INDEX_SHARDS_NUMBER:2}
@@ -52,32 +56,41 @@ storage:
     bulkSize: ${ES_BULK_SIZE:20} # flush the bulk every 20mb
     flushInterval: ${ES_FLUSH_INTERVAL:10} # flush the bulk every 10 seconds whatever the number of requests
     concurrentRequests: ${ES_CONCURRENT_REQUESTS:2} # the number of concurrent requests
-#  h2:
-#    driver: org.h2.jdbcx.JdbcDataSource
-#    url: jdbc:h2:mem:skywalking-oap-db
-#    user: sa
+
+  h2:
+    driver: org.h2.jdbcx.JdbcDataSource
+    url: jdbc:h2:mem:skywalking-oap-db
+    user: sa
+
 receiver-register:
   default:
+
 receiver-trace:
   default:
     bufferPath: ${RECEIVER_BUFFER_PATH:../trace-buffer/}  # Path to trace buffer files, suggest to use absolute path
     bufferOffsetMaxFileSize: ${RECEIVER_BUFFER_OFFSET_MAX_FILE_SIZE:100} # Unit is MB
     bufferDataMaxFileSize: ${RECEIVER_BUFFER_DATA_MAX_FILE_SIZE:500} # Unit is MB
     bufferFileCleanWhenRestart: ${RECEIVER_BUFFER_FILE_CLEAN_WHEN_RESTART:false}
+
 receiver-jvm:
   default:
+
 receiver-profile:
   default:
+
 service-mesh:
   default:
     bufferPath: ${SERVICE_MESH_BUFFER_PATH:../mesh-buffer/}  # Path to trace buffer files, suggest to use absolute path
     bufferOffsetMaxFileSize: ${SERVICE_MESH_OFFSET_MAX_FILE_SIZE:100} # Unit is MB
     bufferDataMaxFileSize: ${SERVICE_MESH_BUFFER_DATA_MAX_FILE_SIZE:500} # Unit is MB
     bufferFileCleanWhenRestart: ${SERVICE_MESH_BUFFER_FILE_CLEAN_WHEN_RESTART:false}
+
 istio-telemetry:
   default:
+
 query:
   graphql:
     path: ${QUERY_GRAPHQL_PATH:/graphql}
+
 alarm:
   default:
diff --git a/oap-server/server-storage-plugin/storage-influxdb-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/influxdb/InfluxStorageProvider.java b/oap-server/server-storage-plugin/storage-influxdb-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/influxdb/InfluxStorageProvider.java
index 1a95ee3..e294ccb 100644
--- a/oap-server/server-storage-plugin/storage-influxdb-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/influxdb/InfluxStorageProvider.java
+++ b/oap-server/server-storage-plugin/storage-influxdb-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/influxdb/InfluxStorageProvider.java
@@ -86,7 +86,7 @@ public class InfluxStorageProvider extends ModuleProvider {
 
     @Override
     public String name() {
-        return "influx";
+        return "influxdb";
     }
 
     @Override
diff --git a/test/e2e/e2e-cluster-with-gateway/e2e-cluster-with-gateway-test-runner/pom.xml b/test/e2e/e2e-cluster-with-gateway/e2e-cluster-with-gateway-test-runner/pom.xml
index 2dbfab7..d8c3b5e 100755
--- a/test/e2e/e2e-cluster-with-gateway/e2e-cluster-with-gateway-test-runner/pom.xml
+++ b/test/e2e/e2e-cluster-with-gateway/e2e-cluster-with-gateway-test-runner/pom.xml
@@ -114,10 +114,12 @@
                             <run>
                                 <env>
                                     <MODE>cluster</MODE>
+                                    <SW_STORAGE>${SW_STORAGE}</SW_STORAGE>
                                     <ES_VERSION>${elasticsearch.version}</ES_VERSION>
                                     <SW_STORAGE_ES_CLUSTER_NODES>
                                         ${e2e.container.name.prefix}-elasticsearch:9200
                                     </SW_STORAGE_ES_CLUSTER_NODES>
+                                    <SW_CLUSTER>zookeeper</SW_CLUSTER>
                                     <SW_CLUSTER_ZK_HOST_PORT>
                                         ${e2e.container.name.prefix}-zookeeper:2181
                                     </SW_CLUSTER_ZK_HOST_PORT>
diff --git a/test/e2e/e2e-cluster-with-gateway/e2e-cluster-with-gateway-test-runner/src/docker/clusterize.awk b/test/e2e/e2e-cluster-with-gateway/e2e-cluster-with-gateway-test-runner/src/docker/clusterize.awk
deleted file mode 100644
index 830ede7..0000000
--- a/test/e2e/e2e-cluster-with-gateway/e2e-cluster-with-gateway-test-runner/src/docker/clusterize.awk
+++ /dev/null
@@ -1,96 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#!/usr/bin/awk -f
-
-BEGIN {
-    in_cluster_section=0;
-    in_cluster_zk_section=0;
-
-    in_storage_section=0;
-    in_storage_es_section=0;
-    in_storage_h2_section=0;
-}
-
-{
-    if (in_cluster_section == 0) {
-        in_cluster_section=$0 ~ /^cluster:$/
-    } else {
-        in_cluster_section=$0 ~ /^(#|\s{2})/
-    }
-    if (in_storage_section == 0) {
-        in_storage_section=$0 ~ /^storage:$/
-    } else {
-        in_storage_section=$0 ~ /^(#|\s{2})/
-    }
-
-    if (in_cluster_section == 1) {
-        # in the cluster: section now
-        # disable standalone module
-        if ($0 ~ /^  standalone:$/) {
-            print "#" $0
-        } else {
-            if (in_cluster_zk_section == 0) {
-                in_cluster_zk_section=$0 ~ /^#?\s+zookeeper:$/
-            } else {
-                in_cluster_zk_section=$0 ~ /^(#\s{4}|\s{2})/
-            }
-            if (in_cluster_zk_section == 1) {
-                # in the cluster.zookeeper section now
-                # uncomment zk config
-                gsub("^#", "", $0)
-                print
-            } else {
-                print
-            }
-        }
-    } else if (in_storage_section == 1) {
-        # in the storage: section now
-        # disable h2 module
-        if (in_storage_es_section == 0) {
-            if (ENVIRON["ES_VERSION"] ~ /^6.+/) {
-                in_storage_es_section=$0 ~ /^#?\s+elasticsearch:$/
-            } else if (ENVIRON["ES_VERSION"] ~ /^7.+/) {
-                in_storage_es_section=$0 ~ /^#?\s+elasticsearch7:$/
-            }
-        } else {
-            in_storage_es_section=$0 ~ /^#?\s{4}/
-        }
-        if (in_storage_h2_section == 0) {
-            in_storage_h2_section=$0 ~ /^#?\s+h2:$/
-        } else {
-            in_storage_h2_section=$0 ~ /^#?\s{4}/
-        }
-        if (in_storage_es_section == 1) {
-            # in the storage.elasticsearch section now
-            # uncomment es config
-            gsub("^#", "", $0)
-            print
-        } else if (in_storage_h2_section == 1) {
-            # comment out h2 config
-            if ($0 !~ /^#/) {
-                print "#" $0
-            } else {
-                print
-            }
-        } else {
-            print
-        }
-    } else {
-        print
-    }
-}
-
diff --git a/test/e2e/e2e-cluster-with-gateway/e2e-cluster-with-gateway-test-runner/src/docker/rc.d/rc0-prepare.sh b/test/e2e/e2e-cluster-with-gateway/e2e-cluster-with-gateway-test-runner/src/docker/rc.d/rc0-prepare.sh
index 92759b5..5bdfd86 100755
--- a/test/e2e/e2e-cluster-with-gateway/e2e-cluster-with-gateway-test-runner/src/docker/rc.d/rc0-prepare.sh
+++ b/test/e2e/e2e-cluster-with-gateway/e2e-cluster-with-gateway-test-runner/src/docker/rc.d/rc0-prepare.sh
@@ -20,8 +20,6 @@ if test "${MODE}" = "cluster"; then
 
     # substitute application.yml to be capable of cluster mode
     cd ${SW_HOME}/config \
-        && gawk -f /clusterize.awk application.yml > clusterized_app.yml \
-        && mv clusterized_app.yml application.yml \
         && echo '
 gateways:
   - name: proxy0
diff --git a/test/e2e/e2e-cluster/e2e-cluster-test-runner/pom.xml b/test/e2e/e2e-cluster/e2e-cluster-test-runner/pom.xml
index 45810a6..83383b9 100755
--- a/test/e2e/e2e-cluster/e2e-cluster-test-runner/pom.xml
+++ b/test/e2e/e2e-cluster/e2e-cluster-test-runner/pom.xml
@@ -111,6 +111,7 @@
                                         <env>
                                             <MODE>cluster</MODE>
                                             <STORAGE>elasticsearch</STORAGE>
+                                            <SW_STORAGE>${SW_STORAGE}</SW_STORAGE>
                                             <ES_VERSION>${elasticsearch.version}</ES_VERSION>
                                             <SW_STORAGE_ES_CLUSTER_NODES>
                                                 ${e2e.container.name.prefix}-elasticsearch:9200
@@ -250,6 +251,7 @@
                                     <run>
                                         <env>
                                             <MODE>cluster</MODE>
+                                            <SW_STORAGE>${SW_STORAGE}</SW_STORAGE>
                                             <STORAGE>influxdb</STORAGE>
                                             <SW_STORAGE_METABASE_TYPE>mysql</SW_STORAGE_METABASE_TYPE>
                                             <SW_STORAGE_METABASE_URL>
diff --git a/test/e2e/e2e-cluster/e2e-cluster-test-runner/src/docker/clusterize.awk b/test/e2e/e2e-cluster/e2e-cluster-test-runner/src/docker/clusterize.awk
deleted file mode 100644
index 135b33f..0000000
--- a/test/e2e/e2e-cluster/e2e-cluster-test-runner/src/docker/clusterize.awk
+++ /dev/null
@@ -1,101 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#!/usr/bin/awk -f
-
-BEGIN {
-    in_cluster_section=0;
-    in_cluster_zk_section=0;
-
-    in_storage_section=0;
-    in_storage_h2_section=0;
-    in_storage_selected=0;
-}
-
-{
-    if (in_cluster_section == 0) {
-        in_cluster_section=$0 ~ /^cluster:$/
-    } else {
-        in_cluster_section=$0 ~ /^(#|\s{2})/
-    }
-    if (in_storage_section == 0) {
-        in_storage_section=$0 ~ /^storage:$/
-    } else {
-        in_storage_section=$0 ~ /^(#|\s{2})/
-    }
-
-    if (in_cluster_section == 1) {
-        # in the cluster: section now
-        # disable standalone module
-        if ($0 ~ /^  standalone:$/) {
-            print "#" $0
-        } else {
-            if (in_cluster_zk_section == 0) {
-                in_cluster_zk_section=$0 ~ /^#?\s+zookeeper:$/
-            } else {
-                in_cluster_zk_section=$0 ~ /^(#\s{4}|\s{2})/
-            }
-            if (in_cluster_zk_section == 1) {
-                # in the cluster.zookeeper section now
-                # uncomment zk config
-                gsub("^#", "", $0)
-                print
-            } else {
-                print
-            }
-        }
-    } else if (in_storage_section == 1) {
-        # in the storage: section now
-        # disable h2 module
-        if (in_storage_selected == 0) {
-            if (ENVIRON["STORAGE"] == "elasticsearch") {
-                if (ENVIRON["ES_VERSION"] ~ /^6.+/) {
-                    in_storage_selected=$0 ~ /^#?\s+elasticsearch:$/
-                } else if (ENVIRON["ES_VERSION"] ~ /^7.+/) {
-                    in_storage_selected=$0 ~ /^#?\s+elasticsearch7:$/
-                }
-            } else if (ENVIRON["STORAGE"] == "influxdb") {
-                in_storage_selected=$0 ~ /^#?\s+influx:$/
-            }
-        } else {
-            in_storage_selected=$0 ~ /^#?\s{4}/
-        }
-
-        if (in_storage_h2_section == 0) {
-            in_storage_h2_section=$0 ~ /^#?\s+h2:$/
-        } else {
-            in_storage_h2_section=$0 ~ /^#?\s{4}/
-        }
-
-        if (in_storage_selected == 1) {
-            # enable selected storage
-            # uncomment es/influx config
-            gsub("^#", "", $0)
-            print
-        } else if (in_storage_h2_section == 1) {
-            # comment out h2 config
-            if ($0 !~ /^#/) {
-                print "#" $0
-            } else {
-                print
-            }
-        } else {
-            print
-        }
-    } else {
-        print
-    }
-}
\ No newline at end of file
diff --git a/test/e2e/e2e-cluster/e2e-cluster-test-runner/src/docker/rc.d/rc0-prepare.sh b/test/e2e/e2e-cluster/e2e-cluster-test-runner/src/docker/rc.d/rc0-prepare.sh
index 1173933..7ac08c1 100755
--- a/test/e2e/e2e-cluster/e2e-cluster-test-runner/src/docker/rc.d/rc0-prepare.sh
+++ b/test/e2e/e2e-cluster/e2e-cluster-test-runner/src/docker/rc.d/rc0-prepare.sh
@@ -29,8 +29,6 @@ if test "${MODE}" = "cluster"; then
 
     # substitute application.yml to be capable of cluster mode
     cd ${SW_HOME}/config \
-        && gawk -f /clusterize.awk application.yml > clusterized_app.yml \
-        && mv clusterized_app.yml application.yml \
         && sed '/<Loggers>/a<logger name="org.apache.skywalking.oap.server.receiver.trace.provider.UninstrumentedGatewaysConfig" level="DEBUG"/>\
         \n<logger name="org.apache.skywalking.oap.server.receiver.trace.provider.parser.listener.service.ServiceMappingSpanListener" level="DEBUG"/>' log4j2.xml > log4j2debuggable.xml \
         && mv log4j2debuggable.xml log4j2.xml
diff --git a/test/e2e/e2e-influxdb/src/docker/application.yml b/test/e2e/e2e-influxdb/src/docker/application.yml
index ff7b7cb..003c57d 100644
--- a/test/e2e/e2e-influxdb/src/docker/application.yml
+++ b/test/e2e/e2e-influxdb/src/docker/application.yml
@@ -108,7 +108,7 @@ storage:
 #      dataSource.prepStmtCacheSqlLimit: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}
 #      dataSource.useServerPrepStmts: ${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}
 #    metadataQueryMaxSize: ${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}
-  influx:
+  influxdb:
     # Metadata storage provider configuration
     metabaseType: ${SW_STORAGE_METABASE_TYPE:H2} # There are 2 options as Metabase provider, H2 or MySQL.
     h2Props:
diff --git a/test/e2e/e2e-profile/e2e-profile-test-runner/src/docker/adapt_storage.awk b/test/e2e/e2e-profile/e2e-profile-test-runner/src/docker/adapt_storage.awk
index f9c2b0e..fc1d7f8 100644
--- a/test/e2e/e2e-profile/e2e-profile-test-runner/src/docker/adapt_storage.awk
+++ b/test/e2e/e2e-profile/e2e-profile-test-runner/src/docker/adapt_storage.awk
@@ -40,7 +40,7 @@ BEGIN {
             } else if (ENVIRON["STORAGE"] ~ /^h2.*$/) {
                 in_storage_type_section=$0 ~ /^#?\s+h2:$/
             } else if (ENVIRON["STORAGE"] ~ /^influx.*$/) {
-                in_storage_type_section=$0 ~ /^#?\s+influx:$/
+                in_storage_type_section=$0 ~ /^#?\s+influxdb:$/
             }
         } else {
             in_storage_type_section=$0 ~ /^#?\s{4}/
diff --git a/test/e2e/e2e-ttl/e2e-ttl-es/pom.xml b/test/e2e/e2e-ttl/e2e-ttl-es/pom.xml
index bee43a7..72845f1 100644
--- a/test/e2e/e2e-ttl/e2e-ttl-es/pom.xml
+++ b/test/e2e/e2e-ttl/e2e-ttl-es/pom.xml
@@ -85,6 +85,7 @@
                             <run>
                                 <env>
                                     <MODE>standalone</MODE>
+                                    <SW_STORAGE>${SW_STORAGE}</SW_STORAGE>
                                     <ES_VERSION>${elasticsearch.version}</ES_VERSION>
                                     <SW_STORAGE_ES_CLUSTER_NODES>
                                         ${e2e.container.name.prefix}-elasticsearch:9200
diff --git a/test/e2e/e2e-ttl/e2e-ttl-es/src/docker/es_storage.awk b/test/e2e/e2e-ttl/e2e-ttl-es/src/docker/es_storage.awk
deleted file mode 100644
index 2487b5a..0000000
--- a/test/e2e/e2e-ttl/e2e-ttl-es/src/docker/es_storage.awk
+++ /dev/null
@@ -1,68 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#!/usr/bin/awk -f
-
-BEGIN {
-    in_storage_section=0;
-    in_storage_es_section=0;
-    in_storage_h2_section=0;
-}
-
-{
-    if (in_storage_section == 0) {
-        in_storage_section=$0 ~ /^storage:$/
-    } else {
-        in_storage_section=$0 ~ /^(#|\s{2})/
-    }
-
-    if (in_storage_section == 1) {
-        # in the storage: section now
-        # disable h2 module
-        if (in_storage_es_section == 0) {
-            if (ENVIRON["ES_VERSION"] ~ /^6.+/) {
-                in_storage_es_section=$0 ~ /^#?\s+elasticsearch:$/
-            } else if (ENVIRON["ES_VERSION"] ~ /^7.+/) {
-                in_storage_es_section=$0 ~ /^#?\s+elasticsearch7:$/
-            }
-        } else {
-            in_storage_es_section=$0 ~ /^#?\s{4}/
-        }
-        if (in_storage_h2_section == 0) {
-            in_storage_h2_section=$0 ~ /^#?\s+h2:$/
-        } else {
-            in_storage_h2_section=$0 ~ /^#?\s{4}/
-        }
-        if (in_storage_es_section == 1) {
-            # in the storage.elasticsearch section now
-            # uncomment es config
-            gsub("^#", "", $0)
-            print
-        } else if (in_storage_h2_section == 1) {
-            # comment out h2 config
-            if ($0 !~ /^#/) {
-                print "#" $0
-            } else {
-                print
-            }
-        } else {
-            print
-        }
-    } else {
-        print
-    }
-}
-
diff --git a/test/e2e/e2e-ttl/e2e-ttl-es/src/docker/rc.d/rc0-prepare.sh b/test/e2e/e2e-ttl/e2e-ttl-es/src/docker/rc.d/rc0-prepare.sh
index 2bf0862..93a5d53 100755
--- a/test/e2e/e2e-ttl/e2e-ttl-es/src/docker/rc.d/rc0-prepare.sh
+++ b/test/e2e/e2e-ttl/e2e-ttl-es/src/docker/rc.d/rc0-prepare.sh
@@ -19,8 +19,6 @@ original_wd=$(pwd)
 
 # substitute application.yml to be capable of es mode
 cd ${SW_HOME}/config \
-    && gawk -f /es_storage.awk application.yml > es_storage_app.yml \
-    && mv es_storage_app.yml application.yml \
     && cp /ttl_official_analysis.oal official_analysis.oal \
     && sed '/<Loggers>/a<logger name="org.apache.skywalking.oap.server.storage" level="DEBUG"/>' log4j2.xml > log4j2debuggable.xml \
     && mv log4j2debuggable.xml log4j2.xml
diff --git a/test/e2e/e2e-ttl/e2e-ttl-influxdb/pom.xml b/test/e2e/e2e-ttl/e2e-ttl-influxdb/pom.xml
index d407446..a0cbd69 100644
--- a/test/e2e/e2e-ttl/e2e-ttl-influxdb/pom.xml
+++ b/test/e2e/e2e-ttl/e2e-ttl-influxdb/pom.xml
@@ -94,6 +94,7 @@
                             <run>
                                 <env>
                                     <MODE>standalone</MODE>
+                                    <SW_STORAGE>${SW_STORAGE}</SW_STORAGE>
                                     <SW_STORAGE_INFLUXDB_URL>http://${e2e.container.name.prefix}-influxdb:8086</SW_STORAGE_INFLUXDB_URL>
                                 </env>
                                 <dependsOn>
diff --git a/test/e2e/e2e-ttl/e2e-ttl-influxdb/src/docker/influx_storage.awk b/test/e2e/e2e-ttl/e2e-ttl-influxdb/src/docker/influx_storage.awk
deleted file mode 100644
index 28ece11..0000000
--- a/test/e2e/e2e-ttl/e2e-ttl-influxdb/src/docker/influx_storage.awk
+++ /dev/null
@@ -1,64 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#!/usr/bin/awk -f
-
-BEGIN {
-    in_storage_section=0;
-    in_storage_influx_section=0;
-    in_storage_h2_section=0;
-}
-
-{
-    if (in_storage_section == 0) {
-        in_storage_section=$0 ~ /^storage:$/
-    } else {
-        in_storage_section=$0 ~ /^(#|\s{2})/
-    }
-
-    if (in_storage_section == 1) {
-        # in the storage: section now
-        # disable h2 module
-        if (in_storage_influx_section == 0) {
-            in_storage_influx_section=$0 ~ /^#?\s+influx:$/
-        } else {
-            in_storage_influx_section=$0 ~ /^#?\s{4}/
-        }
-        if (in_storage_h2_section == 0) {
-            in_storage_h2_section=$0 ~ /^#?\s+h2:$/
-        } else {
-            in_storage_h2_section=$0 ~ /^#?\s{4}/
-        }
-        if (in_storage_influx_section == 1) {
-            # in the storage.influxdb section now
-            # uncomment influx config
-            gsub("^#", "", $0)
-            print
-        } else if (in_storage_h2_section == 1) {
-            # comment out h2 config
-            if ($0 !~ /^#/) {
-                print "#" $0
-            } else {
-                print
-            }
-        } else {
-            print
-        }
-    } else {
-        print
-    }
-}
-
diff --git a/test/e2e/e2e-ttl/e2e-ttl-influxdb/src/docker/rc.d/rc0-prepare.sh b/test/e2e/e2e-ttl/e2e-ttl-influxdb/src/docker/rc.d/rc0-prepare.sh
index e79e59b..93a5d53 100755
--- a/test/e2e/e2e-ttl/e2e-ttl-influxdb/src/docker/rc.d/rc0-prepare.sh
+++ b/test/e2e/e2e-ttl/e2e-ttl-influxdb/src/docker/rc.d/rc0-prepare.sh
@@ -19,8 +19,6 @@ original_wd=$(pwd)
 
 # substitute application.yml to be capable of es mode
 cd ${SW_HOME}/config \
-    && gawk -f /influx_storage.awk application.yml > influx_storage_app.yml \
-    && mv influx_storage_app.yml application.yml \
     && cp /ttl_official_analysis.oal official_analysis.oal \
     && sed '/<Loggers>/a<logger name="org.apache.skywalking.oap.server.storage" level="DEBUG"/>' log4j2.xml > log4j2debuggable.xml \
     && mv log4j2debuggable.xml log4j2.xml
diff --git a/test/e2e/run.sh b/test/e2e/run.sh
index 453c823..0d6f43d 100755
--- a/test/e2e/run.sh
+++ b/test/e2e/run.sh
@@ -69,10 +69,16 @@ do
   # so we give each test a separate distribution folder here
   mkdir -p "$test_case" && tar -zxf dist/${DIST_PACKAGE} -C "$test_case"
 
+  SW_STORAGE=${storage}
+  if [[ ${storage} = "elasticsearch" && ${ES_VERSION} =~ ^7 ]]; then
+    SW_STORAGE=elasticsearch7
+  fi
+
   ./mvnw --batch-mode -Dbuild.id="${BUILD_ID:-local}" \
          -De2e.container.version="${E2E_VERSION}" \
          -Delasticsearch.version="${ES_VERSION}" \
          -Dsw.home="${base_dir}/$test_case/${DIST_PACKAGE//.tar.gz/}" \
+         -DSW_STORAGE=${SW_STORAGE} \
          `if [ ! -z "${storage}" ] ; then echo -P"${storage}"; fi` \
          -f test/e2e/pom.xml -pl "$test_case" -am verify
 
diff --git a/tools/profile-exporter/application.yml b/tools/profile-exporter/application.yml
index a34a5a2..9dc7004 100644
--- a/tools/profile-exporter/application.yml
+++ b/tools/profile-exporter/application.yml
@@ -16,31 +16,32 @@
 core:
   tool-profile-mock-core:
 storage:
-  #  elasticsearch:
-  #    nameSpace: ${SW_NAMESPACE:""}
-  #    clusterNodes: ${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}
-  #    protocol: ${SW_STORAGE_ES_HTTP_PROTOCOL:"http"}
-  #    #trustStorePath: ${SW_SW_STORAGE_ES_SSL_JKS_PATH:"../es_keystore.jks"}
-  #    #trustStorePass: ${SW_SW_STORAGE_ES_SSL_JKS_PASS:""}
-  #    user: ${SW_ES_USER:""}
-  #    password: ${SW_ES_PASSWORD:""}
-  #    enablePackedDownsampling: ${SW_STORAGE_ENABLE_PACKED_DOWNSAMPLING:true} # Hour and Day metrics will be merged into minute index.
-  #    dayStep: ${SW_STORAGE_DAY_STEP:1} # Represent the number of days in the one minute/hour/day index.
-  #    indexShardsNumber: ${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:2}
-  #    indexReplicasNumber: ${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:0}
-  #    # Those data TTL settings will override the same settings in core module.
-  #    recordDataTTL: ${SW_STORAGE_ES_RECORD_DATA_TTL:7} # Unit is day
-  #    otherMetricsDataTTL: ${SW_STORAGE_ES_OTHER_METRIC_DATA_TTL:45} # Unit is day
-  #    monthMetricsDataTTL: ${SW_STORAGE_ES_MONTH_METRIC_DATA_TTL:18} # Unit is month
-  #    # Batch process setting, refer to https://www.elastic.co/guide/en/elasticsearch/client/java-api/5.5/java-docs-bulk-processor.html
-  #    bulkActions: ${SW_STORAGE_ES_BULK_ACTIONS:1000} # Execute the bulk every 1000 requests
-  #    flushInterval: ${SW_STORAGE_ES_FLUSH_INTERVAL:10} # flush the bulk every 10 seconds whatever the number of requests
-  #    concurrentRequests: ${SW_STORAGE_ES_CONCURRENT_REQUESTS:2} # the number of concurrent requests
-  #    resultWindowMaxSize: ${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}
-  #    metadataQueryMaxSize: ${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}
-  #    segmentQueryMaxSize: ${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}
-  #    profileTaskQueryMaxSize: ${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}
-  #    advanced: ${SW_STORAGE_ES_ADVANCED:""}
+  selector: ${SW_STORAGE:h2}
+  elasticsearch:
+    nameSpace: ${SW_NAMESPACE:""}
+    clusterNodes: ${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}
+    protocol: ${SW_STORAGE_ES_HTTP_PROTOCOL:"http"}
+    #trustStorePath: ${SW_SW_STORAGE_ES_SSL_JKS_PATH:"../es_keystore.jks"}
+    #trustStorePass: ${SW_SW_STORAGE_ES_SSL_JKS_PASS:""}
+    user: ${SW_ES_USER:""}
+    password: ${SW_ES_PASSWORD:""}
+    enablePackedDownsampling: ${SW_STORAGE_ENABLE_PACKED_DOWNSAMPLING:true} # Hour and Day metrics will be merged into minute index.
+    dayStep: ${SW_STORAGE_DAY_STEP:1} # Represent the number of days in the one minute/hour/day index.
+    indexShardsNumber: ${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:2}
+    indexReplicasNumber: ${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:0}
+    # Those data TTL settings will override the same settings in core module.
+    recordDataTTL: ${SW_STORAGE_ES_RECORD_DATA_TTL:7} # Unit is day
+    otherMetricsDataTTL: ${SW_STORAGE_ES_OTHER_METRIC_DATA_TTL:45} # Unit is day
+    monthMetricsDataTTL: ${SW_STORAGE_ES_MONTH_METRIC_DATA_TTL:18} # Unit is month
+    # Batch process setting, refer to https://www.elastic.co/guide/en/elasticsearch/client/java-api/5.5/java-docs-bulk-processor.html
+    bulkActions: ${SW_STORAGE_ES_BULK_ACTIONS:1000} # Execute the bulk every 1000 requests
+    flushInterval: ${SW_STORAGE_ES_FLUSH_INTERVAL:10} # flush the bulk every 10 seconds whatever the number of requests
+    concurrentRequests: ${SW_STORAGE_ES_CONCURRENT_REQUESTS:2} # the number of concurrent requests
+    resultWindowMaxSize: ${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}
+    metadataQueryMaxSize: ${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}
+    segmentQueryMaxSize: ${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}
+    profileTaskQueryMaxSize: ${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}
+    advanced: ${SW_STORAGE_ES_ADVANCED:""}
   elasticsearch7:
     nameSpace: ${SW_NAMESPACE:""}
     clusterNodes: ${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}
@@ -65,45 +66,47 @@ storage:
     metadataQueryMaxSize: ${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}
     segmentQueryMaxSize: ${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}
     advanced: ${SW_STORAGE_ES_ADVANCED:""}
-#  h2:
-#    driver: ${SW_STORAGE_H2_DRIVER:org.h2.jdbcx.JdbcDataSource}
-#    url: ${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db}
-#    user: ${SW_STORAGE_H2_USER:sa}
-#    metadataQueryMaxSize: ${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}
-#  mysql:
-#    properties:
-#      jdbcUrl: ${SW_JDBC_URL:"jdbc:mysql://localhost:3306/swtest"}
-#      dataSource.user: ${SW_DATA_SOURCE_USER:root}
-#      dataSource.password: ${SW_DATA_SOURCE_PASSWORD:root@1234}
-#      dataSource.cachePrepStmts: ${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}
-#      dataSource.prepStmtCacheSize: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}
-#      dataSource.prepStmtCacheSqlLimit: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}
-#      dataSource.useServerPrepStmts: ${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}
-#    metadataQueryMaxSize: ${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}
-#  influx:
-#    # Metadata storage provider configuration
-#    metabaseType: ${SW_STORAGE_METABASE_TYPE:H2} # There are 2 options as Metabase provider, H2 or MySQL.
-#    h2Props:
-#      dataSourceClassName: ${SW_STORAGE_METABASE_DRIVER:org.h2.jdbcx.JdbcDataSource}
-#      dataSource.url: ${SW_STORAGE_METABASE_URL:jdbc:h2:mem:skywalking-oap-db}
-#      dataSource.user: ${SW_STORAGE_METABASE_USER:sa}
-#      dataSource.password: ${SW_STORAGE_METABASE_PASSWORD:}
-#    mysqlProps:
-#      jdbcUrl: ${SW_STORAGE_METABASE_URL:"jdbc:mysql://localhost:3306/swtest"}
-#      dataSource.user: ${SW_STORAGE_METABASE_USER:root}
-#      dataSource.password: ${SW_STORAGE_METABASE_PASSWORD:root@1234}
-#      dataSource.cachePrepStmts: ${SW_STORAGE_METABASE_CACHE_PREP_STMTS:true}
-#      dataSource.prepStmtCacheSize: ${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_SIZE:250}
-#      dataSource.prepStmtCacheSqlLimit: ${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_LIMIT:2048}
-#      dataSource.useServerPrepStmts: ${SW_STORAGE_METABASE_USE_SERVER_PREP_STMTS:true}
-#    metadataQueryMaxSize: ${SW_STORAGE_METABASE_QUERY_MAX_SIZE:5000}
-#    # InfluxDB configuration
-#    url: ${SW_STORAGE_INFLUXDB_URL:http://localhost:8086}
-#    user: ${SW_STORAGE_INFLUXDB_USER:root}
-#    password: ${SW_STORAGE_INFLUXDB_PASSWORD:}
-#    database: ${SW_STORAGE_INFLUXDB_DATABASE:skywalking}
-#    actions: ${SW_STORAGE_INFLUXDB_ACTIONS:1000} # the number of actions to collect
-#    duration: ${SW_STORAGE_INFLUXDB_DURATION:1000} # the time to wait at most (milliseconds)
-#    fetchTaskLogMaxSize: ${SW_STORAGE_INFLUXDB_FETCH_TASK_LOG_MAX_SIZE:5000} # the max number of fetch task log in a request
+  h2:
+    driver: ${SW_STORAGE_H2_DRIVER:org.h2.jdbcx.JdbcDataSource}
+    url: ${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db}
+    user: ${SW_STORAGE_H2_USER:sa}
+    metadataQueryMaxSize: ${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}
+  mysql:
+    properties:
+      jdbcUrl: ${SW_JDBC_URL:"jdbc:mysql://localhost:3306/swtest"}
+      dataSource.user: ${SW_DATA_SOURCE_USER:root}
+      dataSource.password: ${SW_DATA_SOURCE_PASSWORD:root@1234}
+      dataSource.cachePrepStmts: ${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}
+      dataSource.prepStmtCacheSize: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}
+      dataSource.prepStmtCacheSqlLimit: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}
+      dataSource.useServerPrepStmts: ${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}
+    metadataQueryMaxSize: ${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}
+  influxdb:
+    # Metadata storage provider configuration
+    metabaseType: ${SW_STORAGE_METABASE_TYPE:H2} # There are 2 options as Metabase provider, H2 or MySQL.
+    h2Props:
+      dataSourceClassName: ${SW_STORAGE_METABASE_DRIVER:org.h2.jdbcx.JdbcDataSource}
+      dataSource.url: ${SW_STORAGE_METABASE_URL:jdbc:h2:mem:skywalking-oap-db}
+      dataSource.user: ${SW_STORAGE_METABASE_USER:sa}
+      dataSource.password: ${SW_STORAGE_METABASE_PASSWORD:}
+    mysqlProps:
+      jdbcUrl: ${SW_STORAGE_METABASE_URL:"jdbc:mysql://localhost:3306/swtest"}
+      dataSource.user: ${SW_STORAGE_METABASE_USER:root}
+      dataSource.password: ${SW_STORAGE_METABASE_PASSWORD:root@1234}
+      dataSource.cachePrepStmts: ${SW_STORAGE_METABASE_CACHE_PREP_STMTS:true}
+      dataSource.prepStmtCacheSize: ${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_SIZE:250}
+      dataSource.prepStmtCacheSqlLimit: ${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_LIMIT:2048}
+      dataSource.useServerPrepStmts: ${SW_STORAGE_METABASE_USE_SERVER_PREP_STMTS:true}
+    metadataQueryMaxSize: ${SW_STORAGE_METABASE_QUERY_MAX_SIZE:5000}
+    # InfluxDB configuration
+    url: ${SW_STORAGE_INFLUXDB_URL:http://localhost:8086}
+    user: ${SW_STORAGE_INFLUXDB_USER:root}
+    password: ${SW_STORAGE_INFLUXDB_PASSWORD:}
+    database: ${SW_STORAGE_INFLUXDB_DATABASE:skywalking}
+    actions: ${SW_STORAGE_INFLUXDB_ACTIONS:1000} # the number of actions to collect
+    duration: ${SW_STORAGE_INFLUXDB_DURATION:1000} # the time to wait at most (milliseconds)
+    fetchTaskLogMaxSize: ${SW_STORAGE_INFLUXDB_FETCH_TASK_LOG_MAX_SIZE:5000} # the max number of fetch task log in a request
+
 telemetry:
+  selector: ${SW_TELEMETRY:none}
   none: