You are viewing a plain text version of this content. The canonical link for it is here.
Posted to notifications@skywalking.apache.org by GitBox <gi...@apache.org> on 2020/12/29 02:30:25 UTC

[GitHub] [skywalking] akankshagupta18 edited a comment on issue #991: cluster module no provider exists

akankshagupta18 edited a comment on issue #991:
URL: https://github.com/apache/skywalking/issues/991#issuecomment-751924179


   @wu-sheng 
   Hi, I just faced similar issue. 
   es version: 7
   skywalking version: apache-skywalking-apm-8.3.0.tar.gz
   my system: mac
   
   **error in oap server log:**
   
   2020-12-29 07:28:08,436 - org.apache.skywalking.oap.server.starter.OAPServerBootstrap - 57 [main] ERROR [] - storage module no provider found.
   org.apache.skywalking.oap.server.library.module.ProviderNotFoundException: storage module no provider found.
   	at org.apache.skywalking.oap.server.library.module.ModuleDefine.prepare(ModuleDefine.java:86) ~[library-module-8.3.0.jar:8.3.0]
   	at org.apache.skywalking.oap.server.library.module.ModuleManager.init(ModuleManager.java:47) ~[library-module-8.3.0.jar:8.3.0]
   	at org.apache.skywalking.oap.server.starter.OAPServerBootstrap.start(OAPServerBootstrap.java:43) [server-bootstrap-8.3.0.jar:8.3.0]
   	at org.apache.skywalking.oap.server.starter.OAPServerStartUp.main(OAPServerStartUp.java:26) [server-starter-8.3.0.jar:8.3.0]
   
   
   **application.yml**
   
   storage:
     selector: ${SW_STORAGE:elasticsearch7}
     elasticsearch:
       nameSpace: ${SW_NAMESPACE:""}
       clusterNodes: ${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}
       protocol: ${SW_STORAGE_ES_HTTP_PROTOCOL:"http"}
       user: ${SW_ES_USER:""}
       password: ${SW_ES_PASSWORD:""}
       trustStorePath: ${SW_STORAGE_ES_SSL_JKS_PATH:""}
       trustStorePass: ${SW_STORAGE_ES_SSL_JKS_PASS:""}
       secretsManagementFile: ${SW_ES_SECRETS_MANAGEMENT_FILE:""} # Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.
       dayStep: ${SW_STORAGE_DAY_STEP:1} # Represent the number of days in the one minute/hour/day index.
       indexShardsNumber: ${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1} # Shard number of new indexes
       indexReplicasNumber: ${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1} # Replicas number of new indexes
       # Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.
       superDatasetDayStep: ${SW_SUPERDATASET_STORAGE_DAY_STEP:-1} # Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0
       superDatasetIndexShardsFactor: ${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5} #  This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin and Jaeger traces.
       superDatasetIndexReplicasNumber: ${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0} # Represent the replicas number in the super size dataset record index, the default value is 0.
       bulkActions: ${SW_STORAGE_ES_BULK_ACTIONS:1000} # Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requests
       syncBulkActions: ${SW_STORAGE_ES_SYNC_BULK_ACTIONS:50000} # Execute the sync bulk metrics data every ${SW_STORAGE_ES_SYNC_BULK_ACTIONS} requests
       flushInterval: ${SW_STORAGE_ES_FLUSH_INTERVAL:10} # flush the bulk every 10 seconds whatever the number of requests
       concurrentRequests: ${SW_STORAGE_ES_CONCURRENT_REQUESTS:2} # the number of concurrent requests
       resultWindowMaxSize: ${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}
       metadataQueryMaxSize: ${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}
       segmentQueryMaxSize: ${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}
       profileTaskQueryMaxSize: ${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}
       advanced: ${SW_STORAGE_ES_ADVANCED:""}
     elasticsearch7:
       nameSpace: ${SW_NAMESPACE:""}
       clusterNodes: ${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}
       protocol: ${SW_STORAGE_ES_HTTP_PROTOCOL:"http"}
       trustStorePath: ${SW_STORAGE_ES_SSL_JKS_PATH:""}
       trustStorePass: ${SW_STORAGE_ES_SSL_JKS_PASS:""}
       dayStep: ${SW_STORAGE_DAY_STEP:1} # Represent the number of days in the one minute/hour/day index.
       indexShardsNumber: ${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1} # Shard number of new indexes
       indexReplicasNumber: ${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1} # Replicas number of new indexes
       # Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.
       superDatasetDayStep: ${SW_SUPERDATASET_STORAGE_DAY_STEP:-1} # Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0
       superDatasetIndexShardsFactor: ${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5} #  This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin and Jaeger traces.
       superDatasetIndexReplicasNumber: ${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0} # Represent the replicas number in the super size dataset record index, the default value is 0.
       user: ${SW_ES_USER:""}
       password: ${SW_ES_PASSWORD:""}
       secretsManagementFile: ${SW_ES_SECRETS_MANAGEMENT_FILE:""} # Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.
       bulkActions: ${SW_STORAGE_ES_BULK_ACTIONS:1000} # Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requests
       syncBulkActions: ${SW_STORAGE_ES_SYNC_BULK_ACTIONS:50000} # Execute the sync bulk metrics data every ${SW_STORAGE_ES_SYNC_BULK_ACTIONS} requests
       flushInterval: ${SW_STORAGE_ES_FLUSH_INTERVAL:10} # flush the bulk every 10 seconds whatever the number of requests
       concurrentRequests: ${SW_STORAGE_ES_CONCURRENT_REQUESTS:2} # the number of concurrent requests
       resultWindowMaxSize: ${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}
       metadataQueryMaxSize: ${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}
       segmentQueryMaxSize: ${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}
       profileTaskQueryMaxSize: ${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}
       advanced: ${SW_STORAGE_ES_ADVANCED:""}
   


----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org