You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@streampark.apache.org by be...@apache.org on 2022/10/09 15:24:32 UTC

[incubator-streampark] branch dev updated: [Improve] switch Chinese Notes in *.properties file to English Notes (#1787)

This is an automated email from the ASF dual-hosted git repository.

benjobs pushed a commit to branch dev
in repository https://gitbox.apache.org/repos/asf/incubator-streampark.git


The following commit(s) were added to refs/heads/dev by this push:
     new f1ee6ece5 [Improve] switch Chinese Notes in *.properties file to English Notes (#1787)
f1ee6ece5 is described below

commit f1ee6ece5a393eddf316dcd50e546ae3c37e959f
Author: Gerry <33...@users.noreply.github.com>
AuthorDate: Sun Oct 9 23:24:26 2022 +0800

    [Improve] switch Chinese Notes in *.properties file to English Notes (#1787)
---
 .../conf/streamx-console-config/spy.properties     |  14 +--
 .../src/main/resources/spy.properties              |  14 +--
 .../streampark-spark-cli/default.properties        | 135 +++++++++------------
 .../assembly/conf/prod/test.properties             |  89 ++++++--------
 4 files changed, 102 insertions(+), 150 deletions(-)

diff --git a/deploy/helm/streamx/conf/streamx-console-config/spy.properties b/deploy/helm/streamx/conf/streamx-console-config/spy.properties
index 36dc7587c..a8a406220 100755
--- a/deploy/helm/streamx/conf/streamx-console-config/spy.properties
+++ b/deploy/helm/streamx/conf/streamx-console-config/spy.properties
@@ -15,16 +15,16 @@
 # limitations under the License.
 #
 
-# p6spy\u914D\u7F6E\u548C\u4F7F\u7528 https://p6spy.readthedocs.io/en/latest/configandusage.html
-# \u4F7F\u7528\u65E5\u5FD7\u7CFB\u7EDF\u8BB0\u5F55sql
+# p6spy configuration and use https://p6spy.readthedocs.io/en/latest/configandusage.htmlp6spy configuration and use https://p6spy.readthedocs.io/en/latest/configandusage.html
+# Use logging system to record sql
 appender=com.p6spy.engine.spy.appender.Slf4JLogger
-# \u81EA\u5B9A\u4E49\u65E5\u5FD7\u6253\u5370
+# Customized log printing
 logMessageFormat=org.apache.streampark.console.base.config.P6spySqlFormatConfig
-# \u662F\u5426\u5F00\u542F\u6162sql\u8BB0\u5F55
+# Whether to enable slow sql logging
 outagedetection=true
-# \u6162SQL\u8BB0\u5F55\u6807\u51C6 \u79D2
+# Slow SQL logging standards:second
 outagedetectioninterval=2
-# \u662F\u5426\u5F00\u542F\u65E5\u5FD7\u8FC7\u6EE4 \u9ED8\u8BA4false\uFF0C \u8FD9\u9879\u914D\u7F6E\u662F\u5426\u751F\u6548\u524D\u63D0\u662F\u914D\u7F6E\u4E86 include/exclude/sqlexpression
+# Whether to enable log filtering default false, this configuration takes effect if include/exclude/sqlexpression is configuredz
 filter=true
-# \u8FC7\u6EE4 Log \u65F6\u6240\u6392\u9664\u7684\u8868\u540D\u5217\u8868\uFF0C\u4EE5\u9017\u53F7\u5206\u9694 \u9ED8\u8BA4\u4E3A\u7A7A
+# Comma-separated list of table names excluded when filtering Logs Default is empty
 exclude=QRTZ
diff --git a/streampark-console/streampark-console-service/src/main/resources/spy.properties b/streampark-console/streampark-console-service/src/main/resources/spy.properties
index 36dc7587c..c6ea274b8 100644
--- a/streampark-console/streampark-console-service/src/main/resources/spy.properties
+++ b/streampark-console/streampark-console-service/src/main/resources/spy.properties
@@ -15,16 +15,16 @@
 # limitations under the License.
 #
 
-# p6spy\u914D\u7F6E\u548C\u4F7F\u7528 https://p6spy.readthedocs.io/en/latest/configandusage.html
-# \u4F7F\u7528\u65E5\u5FD7\u7CFB\u7EDF\u8BB0\u5F55sql
+# p6spy configuration and use https://p6spy.readthedocs.io/en/latest/configandusage.html
+# Use logging system to record sql
 appender=com.p6spy.engine.spy.appender.Slf4JLogger
-# \u81EA\u5B9A\u4E49\u65E5\u5FD7\u6253\u5370
+# Customized log printing
 logMessageFormat=org.apache.streampark.console.base.config.P6spySqlFormatConfig
-# \u662F\u5426\u5F00\u542F\u6162sql\u8BB0\u5F55
+# Whether to enable slow sql logging
 outagedetection=true
-# \u6162SQL\u8BB0\u5F55\u6807\u51C6 \u79D2
+# Slow SQL logging standards:second
 outagedetectioninterval=2
-# \u662F\u5426\u5F00\u542F\u65E5\u5FD7\u8FC7\u6EE4 \u9ED8\u8BA4false\uFF0C \u8FD9\u9879\u914D\u7F6E\u662F\u5426\u751F\u6548\u524D\u63D0\u662F\u914D\u7F6E\u4E86 include/exclude/sqlexpression
+# Whether to enable log filtering default false, this configuration takes effect if include/exclude/sqlexpression is configured
 filter=true
-# \u8FC7\u6EE4 Log \u65F6\u6240\u6392\u9664\u7684\u8868\u540D\u5217\u8868\uFF0C\u4EE5\u9017\u53F7\u5206\u9694 \u9ED8\u8BA4\u4E3A\u7A7A
+# Comma-separated list of table names excluded when filtering Logs Default is empty
 exclude=QRTZ
diff --git a/streampark-spark/streampark-spark-cli/default.properties b/streampark-spark/streampark-spark-cli/default.properties
index df3d70f7c..b1fcbedcb 100644
--- a/streampark-spark/streampark-spark-cli/default.properties
+++ b/streampark-spark/streampark-spark-cli/default.properties
@@ -21,139 +21,113 @@
 #                   user config                      #
 #                                                    #
 ######################################################
-#\u5FC5\u987B\u8BBE\u7F6E,\u6267\u884Cclass\u7684\u5168\u5305\u540D\u79F0
+#Must be set to execute the full package name of the class
 spark.app.main=
 spark.app.params=
 spark.app.id=123
+
 ######################################################
 #                                                    #
 #                spark config                        #
 #                                                    #
 ######################################################
-#\u6267\u884C\u96C6\u7FA4\u8BBE\u7F6E,\u4E0D\u7528\u8BBE\u7F6E,\u4E00\u822C\u4F7F\u7528YARN
+#Perform cluster setup, no setup, generally use YARN
 spark.master=yarn
-#YARN\u90E8\u7F72\u6A21\u5F0F
-#default=cluster
+#YARN Deployment Model;default=cluster
 spark.submit.deployMode=cluster
-#spark-streaming\u6BCF\u4E2A\u6279\u6B21\u95F4\u9694\u65F6\u95F4
-#default=300
+#spark-streaming per batch interval;default=300
 spark.batch.duration=300
-#spark on yarn\u7684\u4EFB\u52A1\u63D0\u4EA4\u961F\u5217
-#default=defalut
+#Task submission queue for spark on yarn;default=defalut
 spark.yarn.queue=default
-#spark \u4EFB\u52A1\u540D\u79F0\u914D\u7F6E,\u5EFA\u8BAE\u4FDD\u6301\u4EFB\u52A1\u540D\u79F0\u5168\u5C40\u552F\u4E00
-#\u8FD9\u6837\u53EF\u4EE5\u5728\u8BBE\u8BA1\u4EFB\u52A1\u5931\u8D25\u7684\u65F6\u5019\u6839\u636E\u540D\u79F0\u505A\u4E00\u4E9B\u552F\u4E00\u5904\u7406
-#\u4E0D\u8BBE\u7F6E\u4F7F\u7528\u7C7B\u5168\u540D.App
+#spark task name configuration, it is recommended to keep the task name globally unique;This allows for some unique handling based on the name when the design task fails;Not set to use the full name of the class.App
 spark.app.name=
-#spark\u7F51\u7EDC\u5E8F\u5217\u5316\u65B9\u5F0F,\u9ED8\u8BA4\u662FJavaSerializer,\u53EF\u9488\u5BF9\u6240\u6709\u7C7B\u578B\u4F46\u901F\u5EA6\u8F83\u6162
-#\u8FD9\u91CC\u4F7F\u7528\u63A8\u8350\u7684Kryo\u65B9\u5F0F
-#kafka-0.10\u5FC5\u987B\u4F7F\u7528\u6B64\u65B9\u5F0F
+#The spark network serialization method, the default is JavaSerializer, which can be used for all types but is slower;The recommended Kryo method is used here,kafka-0.10 must use this method
 spark.serializer=org.apache.spark.serializer.KryoSerializer
-#++++++++++++++++++++++Driver\u8282\u70B9\u76F8\u5173\u914D\u7F6E+++++++++++++++++++++++++++
-#Driver\u8282\u70B9\u4F7F\u7528\u5185\u5B58\u5927\u5C0F\u8BBE\u7F6E
-#default=512MB
+
+#++++++++++++++++++++++Driver node related configuration+++++++++++++++++++++++++++
+#Driver node uses memory size setting;default=512MB
 spark.driver.memory=512MB
-#Driver\u8282\u70B9\u4F7F\u7528\u7684cpu\u4E2A\u6570\u8BBE\u7F6E
-#default=1
+#The number of cpu's used by Driver nodes is set;default=1
 spark.driver.cores=1
-#Driver\u8282\u70B9\u6784\u5EFA\u65F6spark-jar\u548Cuser-jar\u51B2\u7A81\u65F6\u4F18\u5148\u4F7F\u7528\u7528\u6237\u63D0\u4F9B\u7684,\u8FD9\u662F\u4E00\u4E2A\u5B9E\u9A8C\u6027\u8D28\u7684\u53C2\u6570\u53EA\u5BF9cluster\u6A21\u5F0F\u6709\u6548
-#default=false
+#When driver node build spark-jar and user-jar conflict, the user-supplied one is used first, which is an experimental parameter only valid for cluster mode;default=false
 spark.driver.userClassPathFirst=false
-#++++++++++++++++++++++Executor\u8282\u70B9\u76F8\u5173\u914D\u7F6E+++++++++++++++++++++++++
-#Executor\u4E2A\u6570\u8BBE\u7F6E
-#default=1
+
+#++++++++++++++++++++++Executor node related configuration+++++++++++++++++++++++++
+#Number of Executor settings,default=1
 spark.executor.instances=1
-#Executor\u4F7F\u7528cpu\u4E2A\u6570\u8BBE\u7F6E
-#default=1
+#Executor uses the number of cpu settings;default=1
 spark.executor.cores=1
-#Executor\u4F7F\u7528\u5185\u5B58\u5927\u5C0F\u8BBE\u7F6E
-#default=512MB
+#Executor uses memory size settings;default=512MB
 spark.executor.memory=512MB
-#\u540Cdriver\u8282\u70B9\u914D\u7F6E\u4F5C\u7528\u76F8\u540C,\u4F46\u662F\u662F\u9488\u5BF9executor\u7684
-#default=false
+#The same as the driver node configuration, but for the executor;default=false
 spark.executor.userClassPathFirst=true
-#++++++++++++++++++++++++Executor\u52A8\u6001\u5206\u914D\u76F8\u5173\u914D\u7F6E++++++++++++++++++++
-#Executor\u52A8\u6001\u5206\u914D\u7684\u524D\u7F6E\u670D\u52A1
-#default=false
+
+#++++++++++++++++++++++++Executor dynamic allocation-related configuration++++++++++++++++++++
+#Executor dynamically assigned predecessor services;default=false
 spark.shuffle.service.enabled=true
-#\u670D\u52A1\u5BF9\u5E94\u7684\u7AEF\u53E3,\u6B64\u7AEF\u53E3\u670D\u52A1\u662F\u914D\u7F6E\u5728yarn-site\u4E2D\u7684,\u7531NodeManager\u670D\u52A1\u52A0\u8F7D\u542F\u52A8
-#default=7337
+#The port corresponding to the service, which is configured in the yarn-site, is loaded and started by the NodeManager service;default=7337
 spark.shuffle.service.port=7337
-#\u914D\u7F6E\u662F\u5426\u542F\u7528\u8D44\u6E90\u52A8\u6001\u5206\u914D,\u6B64\u52A8\u6001\u5206\u914D\u662F\u9488\u5BF9executor\u7684,\u9700\u8981yarn\u96C6\u7FA4\u914D\u7F6E\u652F\u6301\u52A8\u6001\u5206\u914D
-#default=false
+#Configure whether to enable dynamic resource allocation, this dynamic allocation is for executor, need yarn cluster configuration support dynamic allocation;default=false
 spark.dynamicAllocation.enabled=true
-#\u91CA\u653E\u7A7A\u95F2\u7684executor\u7684\u65F6\u95F4
-#default=60s
+#Time to release an idle executor;default=60s
 spark.dynamicAllocation.executorIdleTimeout=60s
-#\u6709\u7F13\u5B58\u7684executor\u7A7A\u95F2\u91CA\u653E\u65F6\u95F4
-#default=infinity(\u9ED8\u8BA4\u4E0D\u91CA\u653E)
+#Idle release time for executor with cache;default=infinity(No release by default)
 spark.dynamicAllocation.cachedExecutorIdleTimeout=-1
-#\u521D\u59CB\u5316executor\u7684\u4E2A\u6570,\u5982\u679C\u8BBE\u7F6Espark.executor.instances\u8C01\u5C0F\u7528\u8C01
-#default=minExecutors(\u4E0D\u8BBE\u7F6E\u4F7F\u7528\u6B64\u9879\u914D\u7F6E\u503C)
+#Initialize the number of executors, if you set spark.executor.instances to use whoever is small;default=minExecutors(Not set to use this configuration value)
 spark.dynamicAllocation.initialExecutors=1
-#executor\u52A8\u6001\u5206\u914D\u53EF\u5206\u914D\u6700\u5927\u6570\u91CF
-#default=infinity
+#executor dynamically allocates the maximum number of allocations that can be made;default=infinity
 spark.dynamicAllocation.maxExecutors=60
-#executor\u52A8\u6001\u6536\u7F29\u7684\u6700\u5C0F\u6570\u91CF
-#default=0
+#Minimum number of executor dynamic shrinkage;default=0
 spark.dynamicAllocation.minExecutors=1
-#\u6279\u6B21\u8C03\u5EA6\u5EF6\u8FDF\u591A\u957F\u65F6\u95F4\u5F00\u59CB\u589E\u52A0executor
-#default=1s
+#How long does the batch scheduling delay start adding executor;default=1s
 spark.dynamicAllocation.schedulerBacklogTimeout=1s
-#\u540C\u4E0A,\u4F46\u662F\u662F\u9488\u5BF9\u4E4B\u540E\u7684\u8BF7\u6C42
-#default=SchedulerBacklogTimeout(\u4E0D\u8BBE\u7F6E\u4F7F\u7528\u6B64\u9879\u914D\u7F6E\u503C)
+#as above, but for subsequent requests;default=SchedulerBacklogTimeout(Not set to use this configuration value)
 spark.dynamicAllocation.sustainedSchedulerBacklogTimeout=1s
+
 ######################################################
 #                                                    #
 #             StreamPark-Spark Kafka Source             #
 #                   base config                      #
 #                                                    #
 ######################################################
-#spark.source.kafka.consume\u540E\u9762\u7684\u914D\u7F6E\u662F\u6807\u51C6kafka\u914D\u7F6E
-#kafka\u6D88\u8D39\u7684topics\u914D\u7F6E,\u53EF\u4EE5\u914D\u7F6E\u591A\u4E2A,\u6BCF\u4E2Atopic\u4E4B\u95F4\u7528\u9017\u53F7[,]\u9694\u5F00
-#default=
+#The configuration behind spark.source.kafka.consume is the standard kafka configuration
+#kafka consumption of topics configuration, can be configured multiple, each topic separated by a comma [,]
 spark.source.kafka.consume.topics=
-#kafka consumer\u7684group id.
-#default=kafka.consumer.001
+#kafka consumer\u7684group id;default=kafka.consumer.001
 spark.source.kafka.consume.group.id=kafka.consumer.001
-#kafka\u96C6\u7FA4\u7684\u4E3B\u673A\u548C\u7AEF\u53E3\u53F7,\u53EF\u4EE5\u914D\u7F6E\u591A\u4E2A,\u6BCF\u4E2A\u4E3B\u673A\u4E4B\u95F4\u7528\u9017\u53F7[,]\u9694\u5F00
-#default=
+#kafka cluster host and port number, you can configure more than one, each host is separated by a comma [,]
 spark.source.kafka.consume.bootstrap.servers=
-#\u7B2C\u4E00\u6B21\u6D88\u8D39kafka topic\u7684\u65F6\u5019\u6307\u5B9A\u4ECE\u4EC0\u4E48\u4F4D\u7F6E\u6D88\u8D39 \u6709\u4E24\u4E2A\u53EF\u9009\u503Clatest[\u6700\u65B0\u4F4D\u7F6E],earliest[\u6700\u65E9\u4F4D\u7F6E]
-#default=earliest
+#Specify the location from which the kafka topic is consumed for the first time. There are two optional values latest[latest location],earliest[earliest location].;default=earliest
 spark.source.kafka.consume.auto.offset.reset=earliest
-#spark\u6D88\u8D39kafka\u7684\u65F6\u5019\u5982\u4F55\u7BA1\u7406offset \u8FD9\u91CC\u53EF\u9009\u7684\u503C\u6709\u4E09\u79CDhbase,redis,kafka\u6BCF\u79CD\u503C\u5BF9\u5E94\u4E00\u79CD\u5B58\u50A8\u65B9\u5F0F
-#default=kafka
+#spark consumption kafka when how to manage offset here are three optional values hbase, redis, kafka each value corresponds to a storage method;default=kafka
 spark.source.kafka.offset.store.type=kafka
-#\u81EA\u5B9A\u4E49spark\u7BA1\u7406kafka offset\u7684\u65B9\u6CD5,\u9700\u8981\u6307\u5B9A\u4E00\u4E2A\u81EA\u5B9A\u4E49\u7C7B\u7684\u540D\u79F0
+#Custom method for managing kafka offset in spark, need to specify a custom class name
 #spark.source.kafka.offset.store.class=none
-#\u65B0\u7248\u672Ckafka\u4F7F\u7528\u7684key\u5E8F\u5217\u5316\u65B9\u5F0F
-#default=java.Serialization
+#The new version of kafka uses the key serialization method;default=java.Serialization
 spark.source.kafka.consume.key.deserializer=org.apache.kafka.common.serialization.StringDeserializer
-#\u6700\u65B0\u7248kafka\u4F7F\u7528\u7684value\u5E8F\u5217\u5316\u65B9\u5F0F
-#default=java.Serialization
+#The latest version of kafka uses the value serialization method;default=java.Serialization
 spark.source.kafka.consume.value.deserializer=org.apache.kafka.common.serialization.StringDeserializer
-#\u83B7\u53D6\u4E00\u6B21\u6570\u636E\u7684\u6700\u5927\u957F\u5EA6,\u6B64\u503C\u7684\u5927\u5C0F\u9700\u8981kafka server\u7AEF\u652F\u6301
-#default=10485760
+#Get the maximum length of data at a time, the size of this value needs to be supported by the kafka server;default=10485760
 spark.source.kafka.consume.max.partition.fetch.bytes=10485760
-#\u83B7\u53D6\u4E00\u6B21\u6570\u636E\u8BF7\u6C42\u7684\u6700\u5927\u7B49\u5F85\u65F6\u95F4
-#default=3000
+#Get the maximum wait time for a data request;default=3000
 spark.source.kafka.consume.fetch.max.wait.ms=3000
+
 ######################################################
 #                                                    #
 #              StreamPark-Spark Redis Sink              #
 #                   base config                      #
 #                                                    #
 ######################################################
-#StreamPark redis sink\u9700\u8981\u7684\u51E0\u4E2A\u914D\u7F6E
-#redis\u4E3B\u673A
+#StreamPark redis sink requires several configurations
+#redis host
 spark.sink.redis.host=
-#redis\u7AEF\u53E3
+#redis port
 spark.sink.redis.port=6379
-#redis\u6570\u636E\u5E93
+#redis database
 spark.sink.redis.db=0
-#redis\u8FDE\u63A5\u8D85\u65F6\u65F6\u95F4
+#redis connection timeout
 spark.sink.redis.timeout=30
+
 ######################################################
 #                                                    #
 #                StreamPark-Spark Monitor               #
@@ -162,13 +136,12 @@ spark.sink.redis.timeout=30
 ######################################################
 #default=0
 spark.monitor.congestion.batch=0
-#\u5806\u79EF\u591A\u5C11\u4E2A\u6279\u6B21\u4E4B\u540Ekill\u6389\u4EFB\u52A1,\u9ED8\u8BA4\u662F0\u4E0Dkill,\u914D\u5408\u4EFB\u52A1\u81EA\u52A8\u91CD\u542F\u529F\u80FD\u53EF\u6709\u6548\u91CD\u542F\u5806\u79EF\u4EFB\u52A1\u4F7F\u6062\u590D
-#default=0
+#How many batches are stacked and then kill the task, the default is 0 without kill, with the automatic task restart function can effectively restart the stacked tasks to restore;default=0
 spark.monitor.suicide.batch=0
-#zk\u5730\u5740
+#zookeeper address
 spark.monitor.zookeeper=zookeeper://127.0.0.1:2181
-#\u9489\u9489\u673A\u5668\u4EBA\u53D1\u9001\u6D88\u606F\u7684api\u5730\u5740,\u9700\u8981\u4ECEhttp\u5F00\u5934\u7684\u5168\u8DEF\u5F84
+#The api address of the nailbot to send messages, need the full path starting from http
 spark.monitor.dingding.url=https://oapi.dingtalk.com/robot/send?access_token=d4d19790b4d4b83bfbeeb9f67e75ed5b1c2e3a40968e9d908df7c691c0f78afe
-#\u8981@\u7684\u624B\u673A\u53F7
+#To @'s cell phone number
 spark.monitor.dingding.user=
 
diff --git a/streampark-spark/streampark-spark-test/assembly/conf/prod/test.properties b/streampark-spark/streampark-spark-test/assembly/conf/prod/test.properties
index 54e833af6..c384728ff 100644
--- a/streampark-spark/streampark-spark-test/assembly/conf/prod/test.properties
+++ b/streampark-spark/streampark-spark-test/assembly/conf/prod/test.properties
@@ -21,11 +21,10 @@
 #                   user config                      #
 #                                                    #
 ######################################################
-#\u5FC5\u987B\u8BBE\u7F6E,\u6267\u884Cclass\u7684\u5168\u5305\u540D\u79F0
+
+#Must be set to execute the full package name of the class
 spark.main.class=org.apache.streampark.spark.test.HelloStreamParkApp
-#spark \u4EFB\u52A1\u540D\u79F0\u914D\u7F6E,\u5EFA\u8BAE\u4FDD\u6301\u4EFB\u52A1\u540D\u79F0\u5168\u5C40\u552F\u4E00
-#\u8FD9\u6837\u53EF\u4EE5\u5728\u8BBE\u8BA1\u4EFB\u52A1\u5931\u8D25\u7684\u65F6\u5019\u6839\u636E\u540D\u79F0\u505A\u4E00\u4E9B\u552F\u4E00\u5904\u7406
-#\u4E0D\u8BBE\u7F6E\u4F7F\u7528\u7C7B\u5168\u540D.App
+#spark task name configuration, it is recommended to keep the task name globally unique;This allows for some unique handling based on the name when the design task fails;Not set to use the full name of the class.App
 spark.app.name=HelloStreamParkApp
 spark.app.conf.version=10
 
@@ -34,74 +33,55 @@ spark.app.conf.version=10
 #                spark config                        #
 #                                                    #
 ######################################################
-#\u6267\u884C\u96C6\u7FA4\u8BBE\u7F6E,\u4E0D\u7528\u8BBE\u7F6E,\u4E00\u822C\u4F7F\u7528YARN
+#Perform cluster setup, no setup, generally use YARN
 spark.master=yarn
-#YARN\u90E8\u7F72\u6A21\u5F0F
-#default=cluster
+#YARN deployment mode;default=cluster
 spark.submit.deployMode=cluster
-#spark-streaming\u6BCF\u4E2A\u6279\u6B21\u95F4\u9694\u65F6\u95F4
-#default=300
+#spark-streaming interval time per batch;default=300
 spark.batch.duration=5
-#spark on yarn\u7684\u4EFB\u52A1\u63D0\u4EA4\u961F\u5217
-#default=defalut
+#Task submission queue of spark on yarn;default=defalut
 spark.yarn.queue=default
-#spark\u7F51\u7EDC\u5E8F\u5217\u5316\u65B9\u5F0F,\u9ED8\u8BA4\u662FJavaSerializer,\u53EF\u9488\u5BF9\u6240\u6709\u7C7B\u578B\u4F46\u901F\u5EA6\u8F83\u6162
-#\u8FD9\u91CC\u4F7F\u7528\u63A8\u8350\u7684Kryo\u65B9\u5F0F
-#kafka-0.10\u5FC5\u987B\u4F7F\u7528\u6B64\u65B9\u5F0F
+#The spark network serialization method, the default is JavaSerializer, which can be used for all types but is slower;The recommended Kryo method is used here,kafka-0.10 must use this method
 spark.serializer=org.apache.spark.serializer.KryoSerializer
-#++++++++++++++++++++++Driver\u8282\u70B9\u76F8\u5173\u914D\u7F6E+++++++++++++++++++++++++++
-#Driver\u8282\u70B9\u4F7F\u7528\u5185\u5B58\u5927\u5C0F\u8BBE\u7F6E
-#default=512MB
+
+#++++++++++++++++++++++Driver node related configuration+++++++++++++++++++++++++++
+#Driver node uses memory size setting;default=512MB
 spark.driver.memory=512MB
-#Driver\u8282\u70B9\u4F7F\u7528\u7684cpu\u4E2A\u6570\u8BBE\u7F6E
-#default=1
+#The number of cpu's used by Driver nodes is set;default=1
 spark.driver.cores=1
-#Driver\u8282\u70B9\u6784\u5EFA\u65F6spark-jar\u548Cuser-jar\u51B2\u7A81\u65F6\u4F18\u5148\u4F7F\u7528\u7528\u6237\u63D0\u4F9B\u7684,\u8FD9\u662F\u4E00\u4E2A\u5B9E\u9A8C\u6027\u8D28\u7684\u53C2\u6570\u53EA\u5BF9cluster\u6A21\u5F0F\u6709\u6548
-#default=false
+#When driver node build spark-jar and user-jar conflict, the user-supplied one is used first, which is an experimental parameter only valid for cluster mode;default=false
 spark.driver.userClassPathFirst=false
-#++++++++++++++++++++++Executor\u8282\u70B9\u76F8\u5173\u914D\u7F6E+++++++++++++++++++++++++
-#Executor\u4E2A\u6570\u8BBE\u7F6E
-#default=1
+
+#++++++++++++++++++++++Executor node related configuration+++++++++++++++++++++++++
+#Number of Executor settings,default=1
 spark.executor.instances=1
-#Executor\u4F7F\u7528cpu\u4E2A\u6570\u8BBE\u7F6E
-#default=1
+#Executor uses the number of cpu settings;default=1
 spark.executor.cores=1
-#Executor\u4F7F\u7528\u5185\u5B58\u5927\u5C0F\u8BBE\u7F6E
-#default=512MB
+#Executor uses memory size settings;default=512MB
 spark.executor.memory=512MB
-#\u540Cdriver\u8282\u70B9\u914D\u7F6E\u4F5C\u7528\u76F8\u540C,\u4F46\u662F\u662F\u9488\u5BF9executor\u7684
-#default=false
+#The same as the driver node configuration, but for the executor;default=false
 spark.executor.userClassPathFirst=true
-#++++++++++++++++++++++++Executor\u52A8\u6001\u5206\u914D\u76F8\u5173\u914D\u7F6E++++++++++++++++++++
-#Executor\u52A8\u6001\u5206\u914D\u7684\u524D\u7F6E\u670D\u52A1
-#default=false
+
+#++++++++++++++++++++++++Executor dynamic allocation-related configuration++++++++++++++++++++
+#Executor dynamically assigned predecessor services;default=false
 spark.shuffle.service.enabled=true
-#\u670D\u52A1\u5BF9\u5E94\u7684\u7AEF\u53E3,\u6B64\u7AEF\u53E3\u670D\u52A1\u662F\u914D\u7F6E\u5728yarn-site\u4E2D\u7684,\u7531NodeManager\u670D\u52A1\u52A0\u8F7D\u542F\u52A8
-#default=7337
+#The port corresponding to the service, which is configured in the yarn-site, is loaded and started by the NodeManager service;default=7337
 spark.shuffle.service.port=7337
-#\u914D\u7F6E\u662F\u5426\u542F\u7528\u8D44\u6E90\u52A8\u6001\u5206\u914D,\u6B64\u52A8\u6001\u5206\u914D\u662F\u9488\u5BF9executor\u7684,\u9700\u8981yarn\u96C6\u7FA4\u914D\u7F6E\u652F\u6301\u52A8\u6001\u5206\u914D
-#default=false
+#Configure whether to enable dynamic resource allocation, this dynamic allocation is for executor, need yarn cluster configuration support dynamic allocation;default=false
 spark.dynamicAllocation.enabled=true
-#\u91CA\u653E\u7A7A\u95F2\u7684executor\u7684\u65F6\u95F4
-#default=60s
+#Time to release an idle executor;default=60s
 spark.dynamicAllocation.executorIdleTimeout=60s
-#\u6709\u7F13\u5B58\u7684executor\u7A7A\u95F2\u91CA\u653E\u65F6\u95F4
-#default=infinity(\u9ED8\u8BA4\u4E0D\u91CA\u653E)
+#Idle release time for executor with cache;default=infinity(No release by default)
 spark.dynamicAllocation.cachedExecutorIdleTimeout=30
-#\u521D\u59CB\u5316executor\u7684\u4E2A\u6570,\u5982\u679C\u8BBE\u7F6Espark.executor.instances\u8C01\u5C0F\u7528\u8C01
-#default=minExecutors(\u4E0D\u8BBE\u7F6E\u4F7F\u7528\u6B64\u9879\u914D\u7F6E\u503C)
+#Initialize the number of executors, if you set spark.executor.instances to use whoever is small;default=minExecutors(Not set to use this configuration value)
 spark.dynamicAllocation.initialExecutors=1
-#executor\u52A8\u6001\u5206\u914D\u53EF\u5206\u914D\u6700\u5927\u6570\u91CF
-#default=infinity
+#executor dynamically allocates the maximum number of allocations that can be made;default=infinity
 spark.dynamicAllocation.maxExecutors=60
-#executor\u52A8\u6001\u6536\u7F29\u7684\u6700\u5C0F\u6570\u91CF
-#default=0
+#Minimum number of executor dynamic shrinkage;default=0
 spark.dynamicAllocation.minExecutors=1
-#\u6279\u6B21\u8C03\u5EA6\u5EF6\u8FDF\u591A\u957F\u65F6\u95F4\u5F00\u59CB\u589E\u52A0executor
-#default=1s
+#How long does the batch scheduling delay start adding executor;default=1s
 spark.dynamicAllocation.schedulerBacklogTimeout=1s
-#\u540C\u4E0A,\u4F46\u662F\u662F\u9488\u5BF9\u4E4B\u540E\u7684\u8BF7\u6C42
-#default=SchedulerBacklogTimeout(\u4E0D\u8BBE\u7F6E\u4F7F\u7528\u6B64\u9879\u914D\u7F6E\u503C)
+#as above, but for subsequent requests;default=SchedulerBacklogTimeout(Not set to use this configuration value)
 spark.dynamicAllocation.sustainedSchedulerBacklogTimeout=1s
 
 ######################################################
@@ -117,7 +97,7 @@ spark.source.kafka.consume.key.deserializer=org.apache.kafka.common.serializatio
 spark.source.kafka.consume.value.deserializer=org.apache.kafka.common.serialization.StringDeserializer
 spark.source.kafka.consume.fetch.max.wait.ms=3000
 spark.source.kafka.consume.repartition=60
-#\u7B2C\u4E00\u6B21\u6D88\u8D39kafka topic\u7684\u65F6\u5019\u6307\u5B9A\u4ECE\u4EC0\u4E48\u4F4D\u7F6E\u6D88\u8D39 \u6709\u4E24\u4E2A\u53EF\u9009\u503Clatest[\u6700\u65B0\u4F4D\u7F6E],earliest[\u6700\u65E9\u4F4D\u7F6E]
+#Specify the location from which the kafka topic is consumed for the first time. There are two optional values latest[latest location],earliest[earliest location].
 spark.source.kafka.consume.auto.offset.reset=earliest
 spark.source.kafka.offset.store.type=mysql
 spark.source.kafka.offset.store.mysql.table=consumer_offsets
@@ -142,10 +122,9 @@ spark.sink.mysql.password=123456
 ######################################################
 #default=0
 spark.monitor.congestion.batch=0
-#\u5806\u79EF\u591A\u5C11\u4E2A\u6279\u6B21\u4E4B\u540Ekill\u6389\u4EFB\u52A1,\u9ED8\u8BA4\u662F0\u4E0Dkill,\u914D\u5408\u4EFB\u52A1\u81EA\u52A8\u91CD\u542F\u529F\u80FD\u53EF\u6709\u6548\u91CD\u542F\u5806\u79EF\u4EFB\u52A1\u4F7F\u6062\u590D
-#default=0
+#How many batches are stacked and then kill the task, the default is 0 without kill, with the automatic task restart function can effectively restart the stacked tasks to restore;default=0
 spark.monitor.suicide.batch=0
-#zk\u5730\u5740
+#zk address
 spark.monitor.zookeeper=localhost:2181
 #spark.monitor.dingding.url=https://oapi.dingtalk.com/robot/send?access_token=d4d19790b4d4b83bfbeeb9f67e75ed5b1c2e3a40968e9d908df7c691c0f78afe
 spark.monitor.dingding.user=