You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@dolphinscheduler.apache.org by gi...@apache.org on 2022/03/04 15:59:35 UTC

[dolphinscheduler-website] branch asf-site updated: Automated deployment: 668390cd0ff1f6379d40bff0267a9f53106c83f1

This is an automated email from the ASF dual-hosted git repository.

github-bot pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/dolphinscheduler-website.git


The following commit(s) were added to refs/heads/asf-site by this push:
     new f4f0b2b  Automated deployment: 668390cd0ff1f6379d40bff0267a9f53106c83f1
f4f0b2b is described below

commit f4f0b2b49a0cdc012035db403c0cd23bdf194f45
Author: github-actions[bot] <gi...@users.noreply.github.com>
AuthorDate: Fri Mar 4 15:59:28 2022 +0000

    Automated deployment: 668390cd0ff1f6379d40bff0267a9f53106c83f1
---
 .../About_DolphinScheduler.html                    |   8 +-
 .../About_DolphinScheduler.json                    |   2 +-
 en-us/docs/2.0.3/user_doc/architecture/cache.html  |  10 +-
 en-us/docs/2.0.3/user_doc/architecture/cache.json  |   2 +-
 .../2.0.3/user_doc/architecture/configuration.html |  54 ++++----
 .../2.0.3/user_doc/architecture/configuration.json |   2 +-
 en-us/docs/2.0.3/user_doc/architecture/design.html | 147 +++++++++++----------
 en-us/docs/2.0.3/user_doc/architecture/design.json |   2 +-
 .../2.0.3/user_doc/architecture/designplus.html    |  10 +-
 .../2.0.3/user_doc/architecture/designplus.json    |   2 +-
 .../2.0.3/user_doc/architecture/load-balance.html  |  20 +--
 .../2.0.3/user_doc/architecture/load-balance.json  |   2 +-
 .../docs/2.0.3/user_doc/architecture/metadata.html |  20 +--
 .../docs/2.0.3/user_doc/architecture/metadata.json |   2 +-
 .../user_doc/architecture/task-structure.html      |  31 ++---
 .../user_doc/architecture/task-structure.json      |   2 +-
 .../guide/alert/alert_plugin_user_guide.html       |   3 +-
 .../guide/alert/alert_plugin_user_guide.json       |   2 +-
 .../user_doc/guide/alert/enterprise-wechat.html    |   1 +
 .../user_doc/guide/alert/enterprise-wechat.json    |   2 +-
 .../docs/2.0.3/user_doc/guide/datasource/hive.html |   2 +-
 .../docs/2.0.3/user_doc/guide/datasource/hive.json |   2 +-
 .../user_doc/guide/datasource/postgresql.html      |   2 +-
 .../user_doc/guide/datasource/postgresql.json      |   2 +-
 .../2.0.3/user_doc/guide/expansion-reduction.html  |  18 +--
 .../2.0.3/user_doc/guide/expansion-reduction.json  |   2 +-
 en-us/docs/2.0.3/user_doc/guide/flink-call.html    |  22 +--
 en-us/docs/2.0.3/user_doc/guide/flink-call.json    |   2 +-
 .../2.0.3/user_doc/guide/installation/cluster.html |  10 +-
 .../2.0.3/user_doc/guide/installation/cluster.json |   2 +-
 .../2.0.3/user_doc/guide/installation/docker.html  |  72 +++++-----
 .../2.0.3/user_doc/guide/installation/docker.json  |   2 +-
 .../user_doc/guide/installation/hardware.html      |   8 +-
 .../user_doc/guide/installation/hardware.json      |   2 +-
 .../user_doc/guide/installation/kubernetes.html    |  38 +++---
 .../user_doc/guide/installation/kubernetes.json    |   2 +-
 .../guide/installation/pseudo-cluster.html         |  20 +--
 .../guide/installation/pseudo-cluster.json         |   2 +-
 .../user_doc/guide/installation/standalone.html    |   6 +-
 .../user_doc/guide/installation/standalone.json    |   2 +-
 en-us/docs/2.0.3/user_doc/guide/monitor.html       |  16 +--
 en-us/docs/2.0.3/user_doc/guide/monitor.json       |   2 +-
 .../guide/observability/skywalking-agent.html      |  12 +-
 .../guide/observability/skywalking-agent.json      |   2 +-
 en-us/docs/2.0.3/user_doc/guide/open-api.html      |   8 +-
 en-us/docs/2.0.3/user_doc/guide/open-api.json      |   2 +-
 .../2.0.3/user_doc/guide/parameter/context.html    |   4 +-
 .../2.0.3/user_doc/guide/parameter/context.json    |   2 +-
 .../2.0.3/user_doc/guide/project/project-list.html |   4 +-
 .../2.0.3/user_doc/guide/project/project-list.json |   2 +-
 .../user_doc/guide/project/task-instance.html      |   2 +-
 .../user_doc/guide/project/task-instance.json      |   2 +-
 .../guide/project/workflow-definition.html         |  12 +-
 .../guide/project/workflow-definition.json         |   2 +-
 .../user_doc/guide/project/workflow-instance.html  |  12 +-
 .../user_doc/guide/project/workflow-instance.json  |   2 +-
 en-us/docs/2.0.3/user_doc/guide/resource.html      |  10 +-
 en-us/docs/2.0.3/user_doc/guide/resource.json      |   2 +-
 en-us/docs/2.0.3/user_doc/guide/security.html      |  12 +-
 en-us/docs/2.0.3/user_doc/guide/security.json      |   2 +-
 .../docs/2.0.3/user_doc/guide/task/conditions.html |   2 +-
 .../docs/2.0.3/user_doc/guide/task/conditions.json |   2 +-
 en-us/docs/2.0.3/user_doc/guide/task/datax.html    |   2 +-
 en-us/docs/2.0.3/user_doc/guide/task/datax.json    |   2 +-
 .../docs/2.0.3/user_doc/guide/task/dependent.html  |   2 +-
 .../docs/2.0.3/user_doc/guide/task/dependent.json  |   2 +-
 en-us/docs/2.0.3/user_doc/guide/task/flink.html    |   8 +-
 en-us/docs/2.0.3/user_doc/guide/task/flink.json    |   2 +-
 .../docs/2.0.3/user_doc/guide/task/map-reduce.html |   6 +-
 .../docs/2.0.3/user_doc/guide/task/map-reduce.json |   2 +-
 en-us/docs/2.0.3/user_doc/guide/task/spark.html    |   8 +-
 en-us/docs/2.0.3/user_doc/guide/task/spark.json    |   2 +-
 en-us/docs/2.0.3/user_doc/guide/task/sql.html      |   6 +-
 en-us/docs/2.0.3/user_doc/guide/task/sql.json      |   2 +-
 en-us/docs/2.0.3/user_doc/guide/upgrade.html       |  16 +--
 en-us/docs/2.0.3/user_doc/guide/upgrade.json       |   2 +-
 .../About_DolphinScheduler.html                    |   8 +-
 .../About_DolphinScheduler.json                    |   2 +-
 en-us/docs/latest/user_doc/architecture/cache.html |  10 +-
 en-us/docs/latest/user_doc/architecture/cache.json |   2 +-
 .../user_doc/architecture/configuration.html       |  54 ++++----
 .../user_doc/architecture/configuration.json       |   2 +-
 .../docs/latest/user_doc/architecture/design.html  | 147 +++++++++++----------
 .../docs/latest/user_doc/architecture/design.json  |   2 +-
 .../latest/user_doc/architecture/designplus.html   |  10 +-
 .../latest/user_doc/architecture/designplus.json   |   2 +-
 .../latest/user_doc/architecture/load-balance.html |  20 +--
 .../latest/user_doc/architecture/load-balance.json |   2 +-
 .../latest/user_doc/architecture/metadata.html     |  20 +--
 .../latest/user_doc/architecture/metadata.json     |   2 +-
 .../user_doc/architecture/task-structure.html      |  31 ++---
 .../user_doc/architecture/task-structure.json      |   2 +-
 .../guide/alert/alert_plugin_user_guide.html       |   3 +-
 .../guide/alert/alert_plugin_user_guide.json       |   2 +-
 .../user_doc/guide/alert/enterprise-wechat.html    |   1 +
 .../user_doc/guide/alert/enterprise-wechat.json    |   2 +-
 .../latest/user_doc/guide/datasource/hive.html     |   2 +-
 .../latest/user_doc/guide/datasource/hive.json     |   2 +-
 .../user_doc/guide/datasource/postgresql.html      |   2 +-
 .../user_doc/guide/datasource/postgresql.json      |   2 +-
 .../latest/user_doc/guide/expansion-reduction.html |  18 +--
 .../latest/user_doc/guide/expansion-reduction.json |   2 +-
 en-us/docs/latest/user_doc/guide/flink-call.html   |  22 +--
 en-us/docs/latest/user_doc/guide/flink-call.json   |   2 +-
 .../user_doc/guide/installation/cluster.html       |  10 +-
 .../user_doc/guide/installation/cluster.json       |   2 +-
 .../latest/user_doc/guide/installation/docker.html |  72 +++++-----
 .../latest/user_doc/guide/installation/docker.json |   2 +-
 .../user_doc/guide/installation/hardware.html      |   8 +-
 .../user_doc/guide/installation/hardware.json      |   2 +-
 .../user_doc/guide/installation/kubernetes.html    |  38 +++---
 .../user_doc/guide/installation/kubernetes.json    |   2 +-
 .../guide/installation/pseudo-cluster.html         |  20 +--
 .../guide/installation/pseudo-cluster.json         |   2 +-
 .../user_doc/guide/installation/standalone.html    |   6 +-
 .../user_doc/guide/installation/standalone.json    |   2 +-
 en-us/docs/latest/user_doc/guide/monitor.html      |  16 +--
 en-us/docs/latest/user_doc/guide/monitor.json      |   2 +-
 .../guide/observability/skywalking-agent.html      |  12 +-
 .../guide/observability/skywalking-agent.json      |   2 +-
 en-us/docs/latest/user_doc/guide/open-api.html     |   8 +-
 en-us/docs/latest/user_doc/guide/open-api.json     |   2 +-
 .../latest/user_doc/guide/parameter/context.html   |   4 +-
 .../latest/user_doc/guide/parameter/context.json   |   2 +-
 .../user_doc/guide/project/project-list.html       |   4 +-
 .../user_doc/guide/project/project-list.json       |   2 +-
 .../user_doc/guide/project/task-instance.html      |   2 +-
 .../user_doc/guide/project/task-instance.json      |   2 +-
 .../guide/project/workflow-definition.html         |  12 +-
 .../guide/project/workflow-definition.json         |   2 +-
 .../user_doc/guide/project/workflow-instance.html  |  12 +-
 .../user_doc/guide/project/workflow-instance.json  |   2 +-
 en-us/docs/latest/user_doc/guide/resource.html     |  10 +-
 en-us/docs/latest/user_doc/guide/resource.json     |   2 +-
 en-us/docs/latest/user_doc/guide/security.html     |  12 +-
 en-us/docs/latest/user_doc/guide/security.json     |   2 +-
 .../latest/user_doc/guide/task/conditions.html     |   2 +-
 .../latest/user_doc/guide/task/conditions.json     |   2 +-
 en-us/docs/latest/user_doc/guide/task/datax.html   |   2 +-
 en-us/docs/latest/user_doc/guide/task/datax.json   |   2 +-
 .../docs/latest/user_doc/guide/task/dependent.html |   2 +-
 .../docs/latest/user_doc/guide/task/dependent.json |   2 +-
 en-us/docs/latest/user_doc/guide/task/flink.html   |   8 +-
 en-us/docs/latest/user_doc/guide/task/flink.json   |   2 +-
 .../latest/user_doc/guide/task/map-reduce.html     |   6 +-
 .../latest/user_doc/guide/task/map-reduce.json     |   2 +-
 en-us/docs/latest/user_doc/guide/task/spark.html   |   8 +-
 en-us/docs/latest/user_doc/guide/task/spark.json   |   2 +-
 en-us/docs/latest/user_doc/guide/task/sql.html     |   6 +-
 en-us/docs/latest/user_doc/guide/task/sql.json     |   2 +-
 en-us/docs/latest/user_doc/guide/upgrade.html      |  16 +--
 en-us/docs/latest/user_doc/guide/upgrade.json      |   2 +-
 152 files changed, 726 insertions(+), 714 deletions(-)

diff --git a/en-us/docs/2.0.3/user_doc/About_DolphinScheduler/About_DolphinScheduler.html b/en-us/docs/2.0.3/user_doc/About_DolphinScheduler/About_DolphinScheduler.html
index 6cbf84c..25043bd 100644
--- a/en-us/docs/2.0.3/user_doc/About_DolphinScheduler/About_DolphinScheduler.html
+++ b/en-us/docs/2.0.3/user_doc/About_DolphinScheduler/About_DolphinScheduler.html
@@ -12,19 +12,19 @@
 <body>
   <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <p>Apache DolphinScheduler is a cloud-native visual Big Data workflow scheduler system, committed to “solving complex big-data task dependencies and triggering relationships in data OPS orchestration so that various types of big data tasks can be used out of the box”.</p>
-<h1>High Reliability</h1>
+<h2>High Reliability</h2>
 <ul>
 <li>Decentralized multi-master and multi-worker, HA is supported by itself, overload processing</li>
 </ul>
-<h1>User-Friendly</h1>
+<h2>User-Friendly</h2>
 <ul>
 <li>All process definition operations are visualized, Visualization process defines key information at a glance, One-click deployment</li>
 </ul>
-<h1>Rich Scenarios</h1>
+<h2>Rich Scenarios</h2>
 <ul>
 <li>Support multi-tenant. Support many task types e.g., spark,flink,hive, mr, shell, python, sub_process</li>
 </ul>
-<h1>High Expansibility</h1>
+<h2>High Expansibility</h2>
 <ul>
 <li>Support custom task types, Distributed scheduling, and the overall scheduling capability will increase linearly with the scale of the cluster</li>
 </ul>
diff --git a/en-us/docs/2.0.3/user_doc/About_DolphinScheduler/About_DolphinScheduler.json b/en-us/docs/2.0.3/user_doc/About_DolphinScheduler/About_DolphinScheduler.json
index 72217ed..72d761a 100644
--- a/en-us/docs/2.0.3/user_doc/About_DolphinScheduler/About_DolphinScheduler.json
+++ b/en-us/docs/2.0.3/user_doc/About_DolphinScheduler/About_DolphinScheduler.json
@@ -1,6 +1,6 @@
 {
   "filename": "About_DolphinScheduler.md",
-  "__html": "<h1>About DolphinScheduler</h1>\n<p>Apache DolphinScheduler is a cloud-native visual Big Data workflow scheduler system, committed to “solving complex big-data task dependencies and triggering relationships in data OPS orchestration so that various types of big data tasks can be used out of the box”.</p>\n<h1>High Reliability</h1>\n<ul>\n<li>Decentralized multi-master and multi-worker, HA is supported by itself, overload processing</li>\n</ul>\n<h1>User-Friendly</h1>\n<ul>\n [...]
+  "__html": "<h1>About DolphinScheduler</h1>\n<p>Apache DolphinScheduler is a cloud-native visual Big Data workflow scheduler system, committed to “solving complex big-data task dependencies and triggering relationships in data OPS orchestration so that various types of big data tasks can be used out of the box”.</p>\n<h2>High Reliability</h2>\n<ul>\n<li>Decentralized multi-master and multi-worker, HA is supported by itself, overload processing</li>\n</ul>\n<h2>User-Friendly</h2>\n<ul>\n [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/About_DolphinScheduler/About_DolphinScheduler.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/architecture/cache.html b/en-us/docs/2.0.3/user_doc/architecture/cache.html
index c707024..75f4cd2 100644
--- a/en-us/docs/2.0.3/user_doc/architecture/cache.html
+++ b/en-us/docs/2.0.3/user_doc/architecture/cache.html
@@ -10,11 +10,11 @@
   <link rel="stylesheet" href="/build/vendor.23870e5.css">
 </head>
 <body>
-  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
-<h4>Purpose</h4>
+  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+<h2>Purpose</h2>
 <p>Due to the master-server scheduling process, there will be a large number of database read operations, such as <code>tenant</code>, <code>user</code>, <code>processDefinition</code>, etc. On the one hand, it will put a lot of pressure on the DB, and on the other hand, it will slow down the entire core scheduling process.</p>
 <p>Considering that this part of the business data is a scenario where more reads and less writes are performed, a cache module is introduced to reduce the DB read pressure and speed up the core scheduling process;</p>
-<h4>Cache settings</h4>
+<h2>Cache Settings</h2>
 <pre><code class="language-yaml"><span class="hljs-attr">spring:</span>
   <span class="hljs-attr">cache:</span>
     <span class="hljs-comment"># default disable cache, you can enable by `type: caffeine`</span>
@@ -30,9 +30,9 @@
 </code></pre>
 <p>The cache-module use <a href="https://spring.io/guides/gs/caching/">spring-cache</a>, so you can set cache config in the spring application.yaml directly. Default disable cache, and you can enable it by <code>type: caffeine</code>.</p>
 <p>With the config of <a href="https://github.com/ben-manes/caffeine">caffeine</a>, you can set the cache size, expire time, etc.</p>
-<h4>Cache Read</h4>
+<h2>Cache Read</h2>
 <p>The cache adopts the annotation <code>@Cacheable</code> of spring-cache and is configured in the mapper layer. For example: <code>TenantMapper</code>.</p>
-<h4>Cache Evict</h4>
+<h2>Cache Evict</h2>
 <p>The business data update comes from the api-server, and the cache end is in the master-server. So it is necessary to monitor the data update of the api-server (aspect intercept <code>@CacheEvict</code>), and the master-server will be notified when the cache eviction is required.</p>
 <p>It should be noted that the final strategy for cache update comes from the user's expiration strategy configuration in caffeine, so please configure it in conjunction with the business;</p>
 <p>The sequence diagram is shown in the following figure:</p>
diff --git a/en-us/docs/2.0.3/user_doc/architecture/cache.json b/en-us/docs/2.0.3/user_doc/architecture/cache.json
index d71595c..7357e19 100644
--- a/en-us/docs/2.0.3/user_doc/architecture/cache.json
+++ b/en-us/docs/2.0.3/user_doc/architecture/cache.json
@@ -1,6 +1,6 @@
 {
   "filename": "cache.md",
-  "__html": "<h3>Cache</h3>\n<h4>Purpose</h4>\n<p>Due to the master-server scheduling process, there will be a large number of database read operations, such as <code>tenant</code>, <code>user</code>, <code>processDefinition</code>, etc. On the one hand, it will put a lot of pressure on the DB, and on the other hand, it will slow down the entire core scheduling process.</p>\n<p>Considering that this part of the business data is a scenario where more reads and less writes are performed, a [...]
+  "__html": "<h1>Cache</h1>\n<h2>Purpose</h2>\n<p>Due to the master-server scheduling process, there will be a large number of database read operations, such as <code>tenant</code>, <code>user</code>, <code>processDefinition</code>, etc. On the one hand, it will put a lot of pressure on the DB, and on the other hand, it will slow down the entire core scheduling process.</p>\n<p>Considering that this part of the business data is a scenario where more reads and less writes are performed, a [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/architecture/cache.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/architecture/configuration.html b/en-us/docs/2.0.3/user_doc/architecture/configuration.html
index afe9708..c534520 100644
--- a/en-us/docs/2.0.3/user_doc/architecture/configuration.html
+++ b/en-us/docs/2.0.3/user_doc/architecture/configuration.html
@@ -11,13 +11,13 @@
 </head>
 <body>
   <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
-<h1>Preface</h1>
+<h1>Configuration</h1>
+<h2>Preface</h2>
 <p>This document explains the DolphinScheduler application configurations according to DolphinScheduler-1.3.x versions.</p>
-<h1>Directory Structure</h1>
+<h2>Directory Structure</h2>
 <p>Currently, all the configuration files are under [conf ] directory. Please check the following simplified DolphinScheduler installation directories to have a direct view about the position [conf] directory in and configuration files inside. This document only describes DolphinScheduler configurations and other modules are not going into.</p>
 <p>[Note: the DolphinScheduler (hereinafter called the ‘DS’) .]</p>
-<pre><code>
-├─bin                               DS application commands directory
+<pre><code>├─bin                               DS application commands directory
 │  ├─dolphinscheduler-daemon.sh         startup/shutdown DS application 
 │  ├─start-all.sh                  A     startup all DS services with configurations
 │  ├─stop-all.sh                        shutdown all DS services with configurations
@@ -51,14 +51,12 @@
 │  ├─upgrade-dolphinscheduler.sh        DS database upgrade script
 │  ├─monitor-server.sh                  DS monitor-server start script       
 │  ├─scp-hosts.sh                       transfer installation files script                                     
-│  ├─remove-zk-node.sh                  cleanup zookeeper caches script       
+│  ├─remove-zk-node.sh                  cleanup ZooKeeper caches script       
 ├─ui                                front-end web resources directory
 ├─lib                               DS .jar dependencies directory
 ├─install.sh                        auto-setup DS services script
-
-
 </code></pre>
-<h1>Configurations in Details</h1>
+<h2>Configurations in Details</h2>
 <table>
 <thead>
 <tr>
@@ -130,8 +128,8 @@
 </tr>
 </tbody>
 </table>
-<h2><a href="http://1.dolphinscheduler-daemon.sh">1.dolphinscheduler-daemon.sh</a> [startup/shutdown DS application]</h2>
-<p><a href="http://dolphinscheduler-daemon.sh">dolphinscheduler-daemon.sh</a> is responsible for DS startup &amp; shutdown.
+<h3><a href="http://dolphinscheduler-daemon.sh">dolphinscheduler-daemon.sh</a> [startup/shutdown DS application]</h3>
+<p><a href="http://dolphinscheduler-daemon.sh">dolphinscheduler-daemon.sh</a> is responsible for DS startup and shutdown.
 Essentially, <a href="http://start-all.sh/stop-all.sh">start-all.sh/stop-all.sh</a> startup/shutdown the cluster via <a href="http://dolphinscheduler-daemon.sh">dolphinscheduler-daemon.sh</a>.
 Currently, DS just makes a basic config, please config further JVM options based on your practical situation of resources.</p>
 <p>Default simplified parameters are:</p>
@@ -150,7 +148,7 @@ Currently, DS just makes a basic config, please config further JVM options bas
 <blockquote>
 <p>&quot;-XX:DisableExplicitGC&quot; is not recommended due to may lead to memory link (DS dependent on Netty to communicate).</p>
 </blockquote>
-<h2>2.datasource.properties [datasource config properties]</h2>
+<h3>datasource.properties [datasource config properties]</h3>
 <p>DS uses Druid to manage database connections and default simplified configs are:</p>
 <table>
 <thead>
@@ -263,7 +261,7 @@ Currently, DS just makes a basic config, please config further JVM options bas
 </tr>
 </tbody>
 </table>
-<h2>3.registry.properties [registry config properties, default is zookeeper]</h2>
+<h3>registry.properties [registry config properties, default is zookeeper]</h3>
 <table>
 <thead>
 <tr>
@@ -281,12 +279,12 @@ Currently, DS just makes a basic config, please config further JVM options bas
 <tr>
 <td>registry.servers</td>
 <td>localhost:2181</td>
-<td>zookeeper cluster connection info</td>
+<td>ZooKeeper cluster connection info</td>
 </tr>
 <tr>
 <td>registry.namespace</td>
 <td>dolphinscheduler</td>
-<td>DS is stored under zookeeper root directory(Start without /)</td>
+<td>DS is stored under ZooKeeper root directory(Start without /)</td>
 </tr>
 <tr>
 <td><a href="http://registry.base.sleep.time.ms">registry.base.sleep.time.ms</a></td>
@@ -315,7 +313,7 @@ Currently, DS just makes a basic config, please config further JVM options bas
 </tr>
 </tbody>
 </table>
-<h2>4.common.properties [hadoop、s3、yarn config properties]</h2>
+<h3>common.properties [hadoop、s3、yarn config properties]</h3>
 <p>Currently, common.properties mainly configures hadoop/s3a related configurations.</p>
 <table>
 <thead>
@@ -418,7 +416,7 @@ Currently, DS just makes a basic config, please config further JVM options bas
 </tr>
 </tbody>
 </table>
-<h2>5.application-api.properties [API-service log config]</h2>
+<h3>application-api.properties [API-service log config]</h3>
 <table>
 <thead>
 <tr>
@@ -480,7 +478,7 @@ Currently, DS just makes a basic config, please config further JVM options bas
 </tr>
 </tbody>
 </table>
-<h2>6.master.properties [master-service log config]</h2>
+<h3>master.properties [master-service log config]</h3>
 <table>
 <thead>
 <tr>
@@ -542,7 +540,7 @@ Currently, DS just makes a basic config, please config further JVM options bas
 </tr>
 </tbody>
 </table>
-<h2>7.worker.properties [worker-service log config]</h2>
+<h3>worker.properties [worker-service log config]</h3>
 <table>
 <thead>
 <tr>
@@ -584,7 +582,7 @@ Currently, DS just makes a basic config, please config further JVM options bas
 </tr>
 </tbody>
 </table>
-<h2>8.alert.properties [alert-service log config]</h2>
+<h3>alert.properties [alert-service log config]</h3>
 <table>
 <thead>
 <tr>
@@ -706,7 +704,7 @@ Currently, DS just makes a basic config, please config further JVM options bas
 </tr>
 </tbody>
 </table>
-<h2>9.quartz.properties [quartz config properties]</h2>
+<h3>quartz.properties [quartz config properties]</h3>
 <p>This part describes quartz configs and please configure them based on your practical situation and resources.</p>
 <table>
 <thead>
@@ -809,22 +807,22 @@ Currently, DS just makes a basic config, please config further JVM options bas
 </tr>
 </tbody>
 </table>
-<h2>10.install_config.conf [DS environment variables configuration script[install/start DS]]</h2>
+<h3>install_config.conf [DS environment variables configuration script[install/start DS]]</h3>
 <p>install_config.conf is a bit complicated and is mainly used in the following two places.</p>
 <ul>
-<li>1.DS cluster auto installation</li>
+<li>DS Cluster Auto Installation</li>
 </ul>
 <blockquote>
 <p>System will load configs in the install_config.conf and auto-configure files below, based on the file content when executing '<a href="http://install.sh">install.sh</a>'.
 Files such as <a href="http://dolphinscheduler-daemon.sh">dolphinscheduler-daemon.sh</a>、datasource.properties、registry.properties、common.properties、application-api.properties、master.properties、worker.properties、alert.properties、quartz.properties and etc.</p>
 </blockquote>
 <ul>
-<li>2.Startup/shutdown DS cluster</li>
+<li>Startup/Shutdown DS Cluster</li>
 </ul>
 <blockquote>
 <p>The system will load masters, workers, alertServer, apiServers and other parameters inside the file to startup/shutdown DS cluster.</p>
 </blockquote>
-<p>File content as follows:</p>
+<h4>File Content as Follows:</h4>
 <pre><code class="language-bash">
 <span class="hljs-comment"># Note:  please escape the character if the file contains special characters such as `.*[]^${}\+?|()@#&amp;`.</span>
 <span class="hljs-comment">#   eg: `[` escape to `\[`</span>
@@ -832,7 +830,7 @@ Files such as <a href="http://dolphinscheduler-daemon.sh">dolphinscheduler-daemo
 <span class="hljs-comment"># Database type (DS currently only supports PostgreSQL and MySQL)</span>
 dbtype=<span class="hljs-string">&quot;mysql&quot;</span>
 
-<span class="hljs-comment"># Database url &amp; port</span>
+<span class="hljs-comment"># Database url and port</span>
 dbhost=<span class="hljs-string">&quot;192.168.xx.xx:3306&quot;</span>
 
 <span class="hljs-comment"># Database name</span>
@@ -845,7 +843,7 @@ username=<span class="hljs-string">&quot;xx&quot;</span>
 <span class="hljs-comment"># Database password</span>
 password=<span class="hljs-string">&quot;xx&quot;</span>
 
-<span class="hljs-comment"># Zookeeper url</span>
+<span class="hljs-comment"># ZooKeeper url</span>
 zkQuorum=<span class="hljs-string">&quot;192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181&quot;</span>
 
 <span class="hljs-comment"># DS installation path, such as &#x27;/data1_1T/dolphinscheduler&#x27;</span>
@@ -946,7 +944,7 @@ alertServer=<span class="hljs-string">&quot;ds3&quot;</span>
 <span class="hljs-comment"># Host deploy API-service</span>
 apiServers=<span class="hljs-string">&quot;ds1&quot;</span>
 </code></pre>
-<h2>11.dolphinscheduler_env.sh [load environment variables configs]</h2>
+<h3>11.dolphinscheduler_env.sh [load environment variables configs]</h3>
 <p>When using shell to commit tasks, DS will load environment variables inside dolphinscheduler_env.sh into the host.
 Types of tasks involved are: Shell task、Python task、Spark task、Flink task、Datax task and etc.</p>
 <pre><code class="language-bash"><span class="hljs-built_in">export</span> HADOOP_HOME=/opt/soft/hadoop
@@ -962,7 +960,7 @@ Types of tasks involved are: Shell task、Python task、Spark task、Flink task
 <span class="hljs-built_in">export</span> PATH=<span class="hljs-variable">$HADOOP_HOME</span>/bin:<span class="hljs-variable">$SPARK_HOME1</span>/bin:<span class="hljs-variable">$SPARK_HOME2</span>/bin:<span class="hljs-variable">$PYTHON_HOME</span>:<span class="hljs-variable">$JAVA_HOME</span>/bin:<span class="hljs-variable">$HIVE_HOME</span>/bin:<span class="hljs-variable">$PATH</span>:<span class="hljs-variable">$FLINK_HOME</span>/bin:<span class="hljs-variable">$DATAX_HOME</span>:<s [...]
 
 </code></pre>
-<h2>12. Services logback configs</h2>
+<h3>12. Services logback configs</h3>
 <table>
 <thead>
 <tr>
diff --git a/en-us/docs/2.0.3/user_doc/architecture/configuration.json b/en-us/docs/2.0.3/user_doc/architecture/configuration.json
index 54ec905..bb47228 100644
--- a/en-us/docs/2.0.3/user_doc/architecture/configuration.json
+++ b/en-us/docs/2.0.3/user_doc/architecture/configuration.json
@@ -1,6 +1,6 @@
 {
   "filename": "configuration.md",
-  "__html": "<!-- markdown-link-check-disable -->\n<h1>Preface</h1>\n<p>This document explains the DolphinScheduler application configurations according to DolphinScheduler-1.3.x versions.</p>\n<h1>Directory Structure</h1>\n<p>Currently, all the configuration files are under [conf ] directory. Please check the following simplified DolphinScheduler installation directories to have a direct view about the position [conf] directory in and configuration files inside. This document only descr [...]
+  "__html": "<!-- markdown-link-check-disable -->\n<h1>Configuration</h1>\n<h2>Preface</h2>\n<p>This document explains the DolphinScheduler application configurations according to DolphinScheduler-1.3.x versions.</p>\n<h2>Directory Structure</h2>\n<p>Currently, all the configuration files are under [conf ] directory. Please check the following simplified DolphinScheduler installation directories to have a direct view about the position [conf] directory in and configuration files inside.  [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/architecture/configuration.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/architecture/design.html b/en-us/docs/2.0.3/user_doc/architecture/design.html
index 5372c6c..0256f7e 100644
--- a/en-us/docs/2.0.3/user_doc/architecture/design.html
+++ b/en-us/docs/2.0.3/user_doc/architecture/design.html
@@ -10,34 +10,34 @@
   <link rel="stylesheet" href="/build/vendor.23870e5.css">
 </head>
 <body>
-  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <p>Before explaining the architecture of the scheduling system, let's first understand the commonly used terms of the
 scheduling system</p>
-<h3>1.System Structure</h3>
-<h4>1.1 System architecture diagram</h4>
+<h2>System Structure</h2>
+<h3>System Architecture Diagram</h3>
 <p align="center">
   <img src="/img/architecture-1.3.0.jpg" alt="System architecture diagram"  width="70%" />
   <p align="center">
         <em>System architecture diagram</em>
   </p>
 </p>
-<h4>1.2 Start process activity diagram</h4>
+<h3>Start Process Activity Diagram</h3>
 <p align="center">
   <img src="/img/master-process-2.0-en.png" alt="Start process activity diagram"  width="70%" />
   <p align="center">
         <em>Start process activity diagram</em>
   </p>
 </p>
-<h4>1.3 Architecture description</h4>
+<h3>Architecture Description</h3>
 <ul>
 <li>
 <p><strong>MasterServer</strong></p>
 <p>MasterServer adopts a distributed and centerless design concept. MasterServer is mainly responsible for DAG task
 segmentation, task submission monitoring, and monitoring the health status of other MasterServer and WorkerServer at
-the same time. When the MasterServer service starts, register a temporary node with Zookeeper, and perform fault
-tolerance by monitoring changes in the temporary node of Zookeeper. MasterServer provides monitoring services based on
+the same time. When the MasterServer service starts, register a temporary node with ZooKeeper, and perform fault
+tolerance by monitoring changes in the temporary node of ZooKeeper. MasterServer provides monitoring services based on
 netty.</p>
-<h5>The service mainly includes:</h5>
+<h4>The Service Mainly Includes:</h4>
 <ul>
 <li>
 <p><strong>MasterSchedulerService</strong> is a scanning thread that scans the <strong>command</strong> table in the database regularly,
@@ -59,21 +59,24 @@ for, and uses the thread pool to process the state events of the workflow</p>
 <li>
 <p><strong>WorkerServer</strong></p>
 <pre><code>WorkerServer also adopts a distributed centerless design concept, supports custom task plug-ins, and is mainly responsible for task execution and log services.
-When the WorkerServer service starts, it registers a temporary node with Zookeeper and maintains a heartbeat.
+When the WorkerServer service starts, it registers a temporary node with ZooKeeper and maintains a heartbeat.
 </code></pre>
+<h4>The Service Mainly Includes</h4>
+<ul>
+<li>
+<p><strong>WorkerManagerThread</strong> mainly receives tasks sent by the master through netty, and calls <strong>TaskExecuteThread</strong> corresponding executors according to different task types.</p>
+</li>
+<li>
+<p><strong>RetryReportTaskStatusThread</strong> mainly reports the task status to the master through netty. If the report fails, the report will always be retried.</p>
+</li>
+<li>
+<p><strong>LoggerServer</strong> is a log service that provides log fragment viewing, refreshing and downloading functions</p>
 </li>
 </ul>
-<h5>The service mainly includes</h5>
-<pre><code>- **WorkerManagerThread** mainly receives tasks sent by the master through netty, and calls **TaskExecuteThread** corresponding executors according to different task types.
- 
-- **RetryReportTaskStatusThread** mainly reports the task status to the master through netty. If the report fails, the report will always be retried.
-
-- **LoggerServer** is a log service that provides log fragment viewing, refreshing and downloading functions
-</code></pre>
-<ul>
+</li>
 <li>
 <p><strong>Registry</strong></p>
-<p>The registry is implemented as a plug-in, and Zookeeper is supported by default. The MasterServer and WorkerServer
+<p>The registry is implemented as a plug-in, and ZooKeeper is supported by default. The MasterServer and WorkerServer
 nodes in the system use the registry for cluster management and fault tolerance. In addition, the system also performs
 event monitoring and distributed locks based on the registry.</p>
 </li>
@@ -94,9 +97,9 @@ the node and so on.</p>
 at <a href="../guide/homepage.md">Introduction to Functions</a> section。</p>
 </li>
 </ul>
-<h4>1.4 Architecture design ideas</h4>
-<h5>One、Decentralization VS centralization</h5>
-<h6>Centralized thinking</h6>
+<h3>Architecture Design Ideas</h3>
+<h4>Decentralization VS Centralization</h4>
+<h5>Centralized Thinking</h5>
 <p>The centralized design concept is relatively simple. The nodes in the distributed cluster are divided into roles
 according to roles, which are roughly divided into two roles:</p>
 <p align="center">
@@ -120,7 +123,7 @@ different machines, it will cause the Master to be overloaded. If the Scheduler
 can only submit jobs on a certain machine. When there are more parallel tasks, the pressure on the slave may be
 greater.</li>
 </ul>
-<h6>Decentralized</h6>
+<h5>Decentralized</h5>
  <p align="center">
    <img src="https://analysys.github.io/easyscheduler_docs_cn/images/decentralization.png" alt="Decentralization"  width="50%" />
  </p>
@@ -143,13 +146,13 @@ preset, and when the cluster fails, the nodes of the cluster will automatically
 managers&quot; To preside over the work. The most typical case is Etcd implemented by ZooKeeper and Go language.</p>
 </li>
 <li>
-<p>The decentralization of DolphinScheduler is that the Master/Worker is registered in Zookeeper to realize the
+<p>The decentralization of DolphinScheduler is that the Master/Worker is registered in ZooKeeper to realize the
 non-centralization of the Master cluster and the Worker cluster. The sharding mechanism is used to fairly distribute
 the workflow for execution on the master, and tasks are sent to the workers for execution through different sending
 strategies. Specific task</p>
 </li>
 </ul>
-<h5>Second, the master execution process</h5>
+<h4>The Master Execution Process</h4>
 <ol>
 <li>
 <p>DolphinScheduler uses the sharding algorithm to modulate the command and assigns it according to the sort id of the
@@ -158,8 +161,6 @@ workflow instance</p>
 </li>
 <li>
 <p>DolphinScheduler's process of workflow:</p>
-</li>
-</ol>
 <ul>
 <li>Start the workflow through UI or API calls, and persist a command to the database</li>
 <li>The Master scans the Command table through the sharding algorithm, generates a workflow instance ProcessInstance, and
@@ -172,7 +173,9 @@ EventExecuteService event queue</li>
 <li>EventExecuteService calls WorkflowExecuteThread according to the event queue to submit subsequent tasks and modify
 workflow status</li>
 </ul>
-<h5>Three、Insufficient thread loop waiting problem</h5>
+</li>
+</ol>
+<h4>Insufficient Thread Loop Waiting Problem</h4>
 <ul>
 <li>If there is no sub-process in a DAG, if the number of data in the Command is greater than the threshold set by the
 thread pool, the process directly waits or fails.</li>
@@ -195,10 +198,10 @@ to execute again.</li>
 </ol>
 <p>note: The Master Scheduler thread is executed by FIFO when acquiring the Command.</p>
 <p>So we chose the third way to solve the problem of insufficient threads.</p>
-<h5>Four、Fault-tolerant design</h5>
+<h4>Fault-Tolerant Design</h4>
 <p>Fault tolerance is divided into service downtime fault tolerance and task retry, and service downtime fault tolerance is
 divided into master fault tolerance and worker fault tolerance.</p>
-<h6>1. Downtime fault tolerance</h6>
+<h5>Downtime Fault Tolerance</h5>
 <p>The service fault-tolerance design relies on ZooKeeper's Watcher mechanism, and the implementation principle is shown in the figure:</p>
  <p align="center">
    <img src="https://analysys.github.io/easyscheduler_docs_cn/images/fault-tolerant.png" alt="DolphinScheduler fault-tolerant design"  width="40%" />
@@ -223,7 +226,7 @@ Among them, the Master monitors the directories of other Masters and Workers. If
 <p>Fault-tolerant content: When sending the remove event of the Worker node, the Master only fault-tolerant task instances. Before fault-tolerant, it compares the start time of the instance with the server start-up time, and skips fault-tolerance if after the server start time;</p>
 <p>Fault-tolerant post-processing: Once the Master Scheduler thread finds that the task instance is in the &quot;fault-tolerant&quot; state, it takes over the task and resubmits it.</p>
 <p>Note: Due to &quot;network jitter&quot;, the node may lose its heartbeat with ZooKeeper in a short period of time, and the node's remove event may occur. For this situation, we use the simplest way, that is, once the node and ZooKeeper timeout connection occurs, then directly stop the Master or Worker service.</p>
-<h6>2.Task failed and try again</h6>
+<h5>Task Failed and Try Again</h5>
 <p>Here we must first distinguish the concepts of task failure retry, process failure recovery, and process failure rerun:</p>
 <ul>
 <li>Task failure retry is at the task level and is automatically performed by the scheduling system. For example, if a
@@ -248,7 +251,7 @@ automatically retry until it succeeds or exceeds the configured number of retrie
 supported. But the tasks in the logical node support retry.</p>
 <p>If there is a task failure in the workflow that reaches the maximum number of retries, the workflow will fail to stop,
 and the failed workflow can be manually rerun or process recovery operation</p>
-<h5>Five、Task priority design</h5>
+<h4>Task Priority Design</h4>
 <p>In the early scheduling design, if there is no priority design and the fair scheduling design is used, the task
 submitted first may be completed at the same time as the task submitted later, and the process or task priority cannot
 be set, so We have redesigned this, and our current design is as follows:</p>
@@ -282,7 +285,7 @@ shown below</p>
 </ul>
 </li>
 </ul>
-<h5>Six、Logback and netty implement log access</h5>
+<h4>Logback and Netty Implement Log Access</h4>
 <ul>
 <li>
 <p>Since Web (UI) and Worker are not necessarily on the same machine, viewing the log cannot be like querying a local
@@ -307,52 +310,52 @@ log information.</p>
 file.</li>
 <li>FileAppender is mainly implemented as follows:</li>
 </ul>
-<pre><code class="language-java"><span class="hljs-comment">/**
- * task log appender
- */</span>
-<span class="hljs-keyword">public</span> <span class="hljs-class"><span class="hljs-keyword">class</span> <span class="hljs-title">TaskLogAppender</span> <span class="hljs-keyword">extends</span> <span class="hljs-title">FileAppender</span>&lt;<span class="hljs-title">ILoggingEvent</span>&gt; </span>{
-
-    ...
+<pre><code class="language-java"> <span class="hljs-comment">/**
+  * task log appender
+  */</span>
+ <span class="hljs-keyword">public</span> <span class="hljs-class"><span class="hljs-keyword">class</span> <span class="hljs-title">TaskLogAppender</span> <span class="hljs-keyword">extends</span> <span class="hljs-title">FileAppender</span>&lt;<span class="hljs-title">ILoggingEvent</span>&gt; </span>{
+ 
+     ...
 
-   <span class="hljs-meta">@Override</span>
-   <span class="hljs-function"><span class="hljs-keyword">protected</span> <span class="hljs-keyword">void</span> <span class="hljs-title">append</span><span class="hljs-params">(ILoggingEvent event)</span> </span>{
+    <span class="hljs-meta">@Override</span>
+    <span class="hljs-function"><span class="hljs-keyword">protected</span> <span class="hljs-keyword">void</span> <span class="hljs-title">append</span><span class="hljs-params">(ILoggingEvent event)</span> </span>{
 
-       <span class="hljs-keyword">if</span> (currentlyActiveFile == <span class="hljs-keyword">null</span>){
-           currentlyActiveFile = getFile();
-       }
-       String activeFile = currentlyActiveFile;
-       <span class="hljs-comment">// thread name: taskThreadName-processDefineId_processInstanceId_taskInstanceId</span>
-       String threadName = event.getThreadName();
-       String[] threadNameArr = threadName.split(<span class="hljs-string">&quot;-&quot;</span>);
-       <span class="hljs-comment">// logId = processDefineId_processInstanceId_taskInstanceId</span>
-       String logId = threadNameArr[<span class="hljs-number">1</span>];
-       ...
-       <span class="hljs-keyword">super</span>.subAppend(event);
-   }
+        <span class="hljs-keyword">if</span> (currentlyActiveFile == <span class="hljs-keyword">null</span>){
+            currentlyActiveFile = getFile();
+        }
+        String activeFile = currentlyActiveFile;
+        <span class="hljs-comment">// thread name: taskThreadName-processDefineId_processInstanceId_taskInstanceId</span>
+        String threadName = event.getThreadName();
+        String[] threadNameArr = threadName.split(<span class="hljs-string">&quot;-&quot;</span>);
+        <span class="hljs-comment">// logId = processDefineId_processInstanceId_taskInstanceId</span>
+        String logId = threadNameArr[<span class="hljs-number">1</span>];
+        ...
+        <span class="hljs-keyword">super</span>.subAppend(event);
+    }
 }
-
-
-Generate logs in the form of /process definition id/process instance id/task instance id.log
-
-- Filter to match the thread name starting with TaskLogInfo:
-
-- TaskLogFilter is implemented as follows:
-
-```java
-<span class="hljs-comment">/**
-*  task log filter
-*/</span>
+</code></pre>
+<p>Generate logs in the form of /process definition id/process instance id/task instance id.log</p>
+<ul>
+<li>
+<p>Filter to match the thread name starting with TaskLogInfo:</p>
+</li>
+<li>
+<p>TaskLogFilter is implemented as follows:</p>
+</li>
+</ul>
+<pre><code class="language-java"> <span class="hljs-comment">/**
+ *  task log filter
+ */</span>
 <span class="hljs-keyword">public</span> <span class="hljs-class"><span class="hljs-keyword">class</span> <span class="hljs-title">TaskLogFilter</span> <span class="hljs-keyword">extends</span> <span class="hljs-title">Filter</span>&lt;<span class="hljs-title">ILoggingEvent</span>&gt; </span>{
 
-   <span class="hljs-meta">@Override</span>
-   <span class="hljs-function"><span class="hljs-keyword">public</span> FilterReply <span class="hljs-title">decide</span><span class="hljs-params">(ILoggingEvent event)</span> </span>{
-       <span class="hljs-keyword">if</span> (event.getThreadName().startsWith(<span class="hljs-string">&quot;TaskLogInfo-&quot;</span>)){
-           <span class="hljs-keyword">return</span> FilterReply.ACCEPT;
-       }
-       <span class="hljs-keyword">return</span> FilterReply.DENY;
-   }
+    <span class="hljs-meta">@Override</span>
+    <span class="hljs-function"><span class="hljs-keyword">public</span> FilterReply <span class="hljs-title">decide</span><span class="hljs-params">(ILoggingEvent event)</span> </span>{
+        <span class="hljs-keyword">if</span> (event.getThreadName().startsWith(<span class="hljs-string">&quot;TaskLogInfo-&quot;</span>)){
+            <span class="hljs-keyword">return</span> FilterReply.ACCEPT;
+        }
+        <span class="hljs-keyword">return</span> FilterReply.DENY;
+    }
 }
-
 </code></pre>
 </div></section><footer class="footer-container"><div class="footer-body"><div><h3>About us</h3><h4>Do you need feedback? Please contact us through the following ways.</h4></div><div class="contact-container"><ul><li><a href="/en-us/community/development/subscribe.html"><img class="img-base" src="/img/emailgray.png"/><img class="img-change" src="/img/emailblue.png"/><p>Email List</p></a></li><li><a href="https://twitter.com/dolphinschedule"><img class="img-base" src="/img/twittergray.png [...]
   <script src="//cdn.jsdelivr.net/npm/react@15.6.2/dist/react-with-addons.min.js"></script>
diff --git a/en-us/docs/2.0.3/user_doc/architecture/design.json b/en-us/docs/2.0.3/user_doc/architecture/design.json
index fe9dad8..fbe403e 100644
--- a/en-us/docs/2.0.3/user_doc/architecture/design.json
+++ b/en-us/docs/2.0.3/user_doc/architecture/design.json
@@ -1,6 +1,6 @@
 {
   "filename": "design.md",
-  "__html": "<h2>System Architecture Design</h2>\n<p>Before explaining the architecture of the scheduling system, let's first understand the commonly used terms of the\nscheduling system</p>\n<h3>1.System Structure</h3>\n<h4>1.1 System architecture diagram</h4>\n<p align=\"center\">\n  <img src=\"/img/architecture-1.3.0.jpg\" alt=\"System architecture diagram\"  width=\"70%\" />\n  <p align=\"center\">\n        <em>System architecture diagram</em>\n  </p>\n</p>\n<h4>1.2 Start process act [...]
+  "__html": "<h1>System Architecture Design</h1>\n<p>Before explaining the architecture of the scheduling system, let's first understand the commonly used terms of the\nscheduling system</p>\n<h2>System Structure</h2>\n<h3>System Architecture Diagram</h3>\n<p align=\"center\">\n  <img src=\"/img/architecture-1.3.0.jpg\" alt=\"System architecture diagram\"  width=\"70%\" />\n  <p align=\"center\">\n        <em>System architecture diagram</em>\n  </p>\n</p>\n<h3>Start Process Activity Diag [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/architecture/design.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/architecture/designplus.html b/en-us/docs/2.0.3/user_doc/architecture/designplus.html
index b390936..d35c382 100644
--- a/en-us/docs/2.0.3/user_doc/architecture/designplus.html
+++ b/en-us/docs/2.0.3/user_doc/architecture/designplus.html
@@ -10,10 +10,10 @@
   <link rel="stylesheet" href="/build/vendor.23870e5.css">
 </head>
 <body>
-  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <p>Before explaining the architecture of the scheduling system, let's first understand the commonly used terms of the
 scheduling system</p>
-<h3>1.Glossary</h3>
+<h2>Glossary</h2>
 <p><strong>DAG:</strong> The full name is Directed Acyclic Graph, referred to as DAG. Task tasks in the workflow are assembled in the
 form of a directed acyclic graph, and topological traversal is performed from nodes with zero degrees of entry until
 there are no subsequent nodes. Examples are as follows:</p>
@@ -48,7 +48,7 @@ provided. <strong>Continue</strong> refers to regardless of the status of the ta
 failure. <strong>End</strong> means that once a failed task is found, Kill will also run the parallel task at the same time, and the
 process fails and ends</p>
 <p><strong>Complement</strong>: Supplement historical data,Supports <strong>interval parallel and serial</strong> two complement methods</p>
-<h3>2.Module introduction</h3>
+<h2>Module Introduction</h2>
 <ul>
 <li>
 <p>dolphinscheduler-alert alarm module, providing AlertServer service.</p>
@@ -69,14 +69,14 @@ process fails and ends</p>
 <p>dolphinscheduler-server MasterServer and WorkerServer services</p>
 </li>
 <li>
-<p>dolphinscheduler-service service module, including Quartz, Zookeeper, log client access service, easy to call server
+<p>dolphinscheduler-service service module, including Quartz, ZooKeeper, log client access service, easy to call server
 module and api module</p>
 </li>
 <li>
 <p>dolphinscheduler-ui front-end module</p>
 </li>
 </ul>
-<h3>Sum up</h3>
+<h2>Sum Up</h2>
 <p>From the perspective of scheduling, this article preliminarily introduces the architecture principles and implementation
 ideas of the big data distributed workflow scheduling system-DolphinScheduler. To be continued</p>
 </div></section><footer class="footer-container"><div class="footer-body"><div><h3>About us</h3><h4>Do you need feedback? Please contact us through the following ways.</h4></div><div class="contact-container"><ul><li><a href="/en-us/community/development/subscribe.html"><img class="img-base" src="/img/emailgray.png"/><img class="img-change" src="/img/emailblue.png"/><p>Email List</p></a></li><li><a href="https://twitter.com/dolphinschedule"><img class="img-base" src="/img/twittergray.png [...]
diff --git a/en-us/docs/2.0.3/user_doc/architecture/designplus.json b/en-us/docs/2.0.3/user_doc/architecture/designplus.json
index 470c03d..e49da02 100644
--- a/en-us/docs/2.0.3/user_doc/architecture/designplus.json
+++ b/en-us/docs/2.0.3/user_doc/architecture/designplus.json
@@ -1,6 +1,6 @@
 {
   "filename": "designplus.md",
-  "__html": "<h2>System Architecture Design</h2>\n<p>Before explaining the architecture of the scheduling system, let's first understand the commonly used terms of the\nscheduling system</p>\n<h3>1.Glossary</h3>\n<p><strong>DAG:</strong> The full name is Directed Acyclic Graph, referred to as DAG. Task tasks in the workflow are assembled in the\nform of a directed acyclic graph, and topological traversal is performed from nodes with zero degrees of entry until\nthere are no subsequent no [...]
+  "__html": "<h1>System Architecture Design</h1>\n<p>Before explaining the architecture of the scheduling system, let's first understand the commonly used terms of the\nscheduling system</p>\n<h2>Glossary</h2>\n<p><strong>DAG:</strong> The full name is Directed Acyclic Graph, referred to as DAG. Task tasks in the workflow are assembled in the\nform of a directed acyclic graph, and topological traversal is performed from nodes with zero degrees of entry until\nthere are no subsequent node [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/architecture/designplus.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/architecture/load-balance.html b/en-us/docs/2.0.3/user_doc/architecture/load-balance.html
index 27a3067..1a19d36 100644
--- a/en-us/docs/2.0.3/user_doc/architecture/load-balance.html
+++ b/en-us/docs/2.0.3/user_doc/architecture/load-balance.html
@@ -10,30 +10,30 @@
   <link rel="stylesheet" href="/build/vendor.23870e5.css">
 </head>
 <body>
-  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <p>Load balancing refers to the reasonable allocation of server pressure through routing algorithms (usually in cluster environments) to achieve the maximum optimization of server performance.</p>
-<h3>DolphinScheduler-Worker load balancing algorithms</h3>
+<h2>DolphinScheduler-Worker Load Balancing Algorithms</h2>
 <p>DolphinScheduler-Master allocates tasks to workers, and by default provides three algorithms:</p>
 <p>Weighted random (random)</p>
 <p>Smoothing polling (roundrobin)</p>
 <p>Linear load (lowerweight)</p>
 <p>The default configuration is the linear load.</p>
 <p>As the routing is done on the client side, the master service, you can change master.host.selector in master.properties to configure the algorithm what you want.</p>
-<p>eg: master.host.selector = random (case-insensitive)</p>
-<h3>Worker load balancing configuration</h3>
+<p>e.g. master.host.selector = random (case-insensitive)</p>
+<h2>Worker Load Balancing Configuration</h2>
 <p>The configuration file is worker.properties</p>
-<h4>weight</h4>
+<h3>Weight</h3>
 <p>All of the above load algorithms are weighted based on weights, which affect the outcome of the triage. You can set different weights for different machines by modifying the worker.weight value.</p>
-<h4>Preheating</h4>
+<h3>Preheating</h3>
 <p>With JIT optimisation in mind, we will let the worker run at low power for a period of time after startup so that it can gradually reach its optimal state, a process we call preheating. If you are interested, you can read some articles about JIT.</p>
 <p>So the worker will gradually reach its maximum weight over time after it starts (by default ten minutes, we don't provide a configuration item, you can change it and submit a PR if needed).</p>
-<h3>Load balancing algorithm breakdown</h3>
-<h4>Random (weighted)</h4>
+<h2>Load Balancing Algorithm Breakdown</h2>
+<h3>Random (Weighted)</h3>
 <p>This algorithm is relatively simple, one of the matched workers is selected at random (the weighting affects his weighting).</p>
-<h4>Smoothed polling (weighted)</h4>
+<h3>Smoothed Polling (Weighted)</h3>
 <p>An obvious drawback of the weighted polling algorithm. Namely, under certain specific weights, weighted polling scheduling generates an uneven sequence of instances, and this unsmoothed load may cause some instances to experience transient high loads, leading to a risk of system downtime. To address this scheduling flaw, we provide a smooth weighted polling algorithm.</p>
 <p>Each worker is given two weights, weight (which remains constant after warm-up is complete) and current_weight (which changes dynamically), for each route. The current_weight + weight is iterated over all the workers, and the weight of all the workers is added up and counted as total_weight, then the worker with the largest current_weight is selected as the worker for this task. current_weight-total_weight.</p>
-<h4>Linear weighting (default algorithm)</h4>
+<h3>Linear Weighting (Default Algorithm)</h3>
 <p>The algorithm reports its own load information to the registry at regular intervals. We base our judgement on two main pieces of information</p>
 <ul>
 <li>load average (default is the number of CPU cores * 2)</li>
diff --git a/en-us/docs/2.0.3/user_doc/architecture/load-balance.json b/en-us/docs/2.0.3/user_doc/architecture/load-balance.json
index b334287..807ade1 100644
--- a/en-us/docs/2.0.3/user_doc/architecture/load-balance.json
+++ b/en-us/docs/2.0.3/user_doc/architecture/load-balance.json
@@ -1,6 +1,6 @@
 {
   "filename": "load-balance.md",
-  "__html": "<h3>Load Balance</h3>\n<p>Load balancing refers to the reasonable allocation of server pressure through routing algorithms (usually in cluster environments) to achieve the maximum optimization of server performance.</p>\n<h3>DolphinScheduler-Worker load balancing algorithms</h3>\n<p>DolphinScheduler-Master allocates tasks to workers, and by default provides three algorithms:</p>\n<p>Weighted random (random)</p>\n<p>Smoothing polling (roundrobin)</p>\n<p>Linear load (lowerwei [...]
+  "__html": "<h1>Load Balance</h1>\n<p>Load balancing refers to the reasonable allocation of server pressure through routing algorithms (usually in cluster environments) to achieve the maximum optimization of server performance.</p>\n<h2>DolphinScheduler-Worker Load Balancing Algorithms</h2>\n<p>DolphinScheduler-Master allocates tasks to workers, and by default provides three algorithms:</p>\n<p>Weighted random (random)</p>\n<p>Smoothing polling (roundrobin)</p>\n<p>Linear load (lowerwei [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/architecture/load-balance.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/architecture/metadata.html b/en-us/docs/2.0.3/user_doc/architecture/metadata.html
index a0cc15e..51920de 100644
--- a/en-us/docs/2.0.3/user_doc/architecture/metadata.html
+++ b/en-us/docs/2.0.3/user_doc/architecture/metadata.html
@@ -12,7 +12,7 @@
 <body>
   <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <p><a name="V5KOl"></a></p>
-<h3>Dolphin Scheduler 2.0 DB Table Overview</h3>
+<h2>Dolphin Scheduler 2.0 DB Table Overview</h2>
 <table>
 <thead>
 <tr>
@@ -121,9 +121,9 @@
 </table>
 <hr>
 <p><a name="XCLy1"></a></p>
-<h3>E-R Diagram</h3>
+<h2>E-R Diagram</h2>
 <p><a name="5hWWZ"></a></p>
-<h4>User Queue DataSource</h4>
+<h3>User Queue DataSource</h3>
 <p><img src="/img/metadata-erd/user-queue-datasource.png" alt="image.png"></p>
 <ul>
 <li>Multiple users can belong to one tenant</li>
@@ -131,7 +131,7 @@
 <li>The user_id field in the t_ds_datasource table indicates the user who created the data source. The user_id in t_ds_relation_datasource_user indicates the user who has permission to the data source.
 <a name="7euSN"></a></li>
 </ul>
-<h4>Project Resource Alert</h4>
+<h3>Project Resource Alert</h3>
 <p><img src="/img/metadata-erd/project-resource-alert.png" alt="image.png"></p>
 <ul>
 <li>User can have multiple projects, User project authorization completes the relationship binding using project_id and user_id in t_ds_relation_project_user table</li>
@@ -140,7 +140,7 @@
 <li>The user_id in the t_ds_udfs table represents the user who created the UDF, and the user_id in the t_ds_relation_udfs_user table represents a user who has permission to the UDF
 <a name="JEw4v"></a></li>
 </ul>
-<h4>Command Process Task</h4>
+<h3>Command Process Task</h3>
 <p><img src="/img/metadata-erd/command.png" alt="image.png"><br /><img src="/img/metadata-erd/process-task.png" alt="image.png"></p>
 <ul>
 <li>A project has multiple process definitions, a process definition can generate multiple process instances, and a process instance can generate multiple task instances</li>
@@ -150,9 +150,9 @@
 </ul>
 <hr>
 <p><a name="yd79T"></a></p>
-<h3>Core Table Schema</h3>
+<h2>Core Table Schema</h2>
 <p><a name="6bVhH"></a></p>
-<h4>t_ds_process_definition</h4>
+<h3>t_ds_process_definition</h3>
 <table>
 <thead>
 <tr>
@@ -255,7 +255,7 @@
 </tbody>
 </table>
 <p><a name="t5uxM"></a></p>
-<h4>t_ds_process_instance</h4>
+<h3>t_ds_process_instance</h3>
 <table>
 <thead>
 <tr>
@@ -428,7 +428,7 @@
 </tbody>
 </table>
 <p><a name="tHZsY"></a></p>
-<h4>t_ds_task_instance</h4>
+<h3>t_ds_task_instance</h3>
 <table>
 <thead>
 <tr>
@@ -551,7 +551,7 @@
 </tbody>
 </table>
 <p><a name="gLGtm"></a></p>
-<h4>t_ds_command</h4>
+<h3>t_ds_command</h3>
 <table>
 <thead>
 <tr>
diff --git a/en-us/docs/2.0.3/user_doc/architecture/metadata.json b/en-us/docs/2.0.3/user_doc/architecture/metadata.json
index 9e16188..67564f7 100644
--- a/en-us/docs/2.0.3/user_doc/architecture/metadata.json
+++ b/en-us/docs/2.0.3/user_doc/architecture/metadata.json
@@ -1,6 +1,6 @@
 {
   "filename": "metadata.md",
-  "__html": "<h1>Dolphin Scheduler 2.0.3 MetaData</h1>\n<p><a name=\"V5KOl\"></a></p>\n<h3>Dolphin Scheduler 2.0 DB Table Overview</h3>\n<table>\n<thead>\n<tr>\n<th style=\"text-align:center\">Table Name</th>\n<th style=\"text-align:center\">Comment</th>\n</tr>\n</thead>\n<tbody>\n<tr>\n<td style=\"text-align:center\">t_ds_access_token</td>\n<td style=\"text-align:center\">token for access ds backend</td>\n</tr>\n<tr>\n<td style=\"text-align:center\">t_ds_alert</td>\n<td style=\"text-ali [...]
+  "__html": "<h1>Dolphin Scheduler 2.0.3 MetaData</h1>\n<p><a name=\"V5KOl\"></a></p>\n<h2>Dolphin Scheduler 2.0 DB Table Overview</h2>\n<table>\n<thead>\n<tr>\n<th style=\"text-align:center\">Table Name</th>\n<th style=\"text-align:center\">Comment</th>\n</tr>\n</thead>\n<tbody>\n<tr>\n<td style=\"text-align:center\">t_ds_access_token</td>\n<td style=\"text-align:center\">token for access ds backend</td>\n</tr>\n<tr>\n<td style=\"text-align:center\">t_ds_alert</td>\n<td style=\"text-ali [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/architecture/metadata.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/architecture/task-structure.html b/en-us/docs/2.0.3/user_doc/architecture/task-structure.html
index e19d296..d20cacd 100644
--- a/en-us/docs/2.0.3/user_doc/architecture/task-structure.html
+++ b/en-us/docs/2.0.3/user_doc/architecture/task-structure.html
@@ -10,7 +10,8 @@
   <link rel="stylesheet" href="/build/vendor.23870e5.css">
 </head>
 <body>
-  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+<h2>Overall Tasks Storage Structure</h2>
 <p>All tasks created in DolphinScheduler are saved in the t_ds_process_definition table.</p>
 <p>The following shows the 't_ds_process_definition' table structure:</p>
 <table>
@@ -198,8 +199,8 @@
     <span class="hljs-string">&quot;timeout&quot;</span>:0
 }
 </code></pre>
-<h1>The Detailed Explanation of The Storage Structure of Each Task Type</h1>
-<h2>Shell Nodes</h2>
+<h2>The Detailed Explanation of The Storage Structure of Each Task Type</h2>
+<h3>Shell Nodes</h3>
 <p><strong>The node data structure is as follows:</strong></p>
 <table>
 <thead>
@@ -413,7 +414,7 @@
 }
 
 </code></pre>
-<h2>SQL Node</h2>
+<h3>SQL Node</h3>
 <p>Perform data query and update operations on the specified datasource through SQL.</p>
 <p><strong>The node data structure is as follows:</strong></p>
 <table>
@@ -720,10 +721,10 @@
     ]
 }
 </code></pre>
-<h2>PROCEDURE [stored procedures] Node</h2>
+<h3>Procedure [stored procedures] Node</h3>
 <p><strong>The node data structure is as follows:</strong>
 <strong>Node data example:</strong></p>
-<h2>SPARK Node</h2>
+<h3>Spark Node</h3>
 <p><strong>The node data structure is as follows:</strong></p>
 <table>
 <thead>
@@ -1037,7 +1038,7 @@
     ]
 }
 </code></pre>
-<h2>MapReduce(MR) Node</h2>
+<h3>MapReduce(MR) Node</h3>
 <p><strong>The node data structure is as follows:</strong></p>
 <table>
 <thead>
@@ -1288,7 +1289,7 @@
     ]
 }
 </code></pre>
-<h2>Python Node</h2>
+<h3>Python Node</h3>
 <p><strong>The node data structure is as follows:</strong></p>
 <table>
 <thead>
@@ -1501,7 +1502,7 @@
     ]
 }
 </code></pre>
-<h2>Flink Node</h2>
+<h3>Flink Node</h3>
 <p><strong>The node data structure is as follows:</strong></p>
 <table>
 <thead>
@@ -1798,7 +1799,7 @@
     ]
 }
 </code></pre>
-<h2>HTTP Node</h2>
+<h3>HTTP Node</h3>
 <p><strong>The node data structure is as follows:</strong></p>
 <table>
 <thead>
@@ -2043,7 +2044,7 @@
     ]
 }
 </code></pre>
-<h2>DataX Node</h2>
+<h3>DataX Node</h3>
 <p><strong>The node data structure is as follows:</strong></p>
 <table>
 <thead>
@@ -2340,7 +2341,7 @@
     ]
 }
 </code></pre>
-<h2>Sqoop Node</h2>
+<h3>Sqoop Node</h3>
 <p><strong>The node data structure is as follows:</strong></p>
 <table>
 <thead>
@@ -2583,7 +2584,7 @@
             ]
         }
 </code></pre>
-<h2>Condition Branch Node</h2>
+<h3>Condition Branch Node</h3>
 <p><strong>The node data structure is as follows:</strong></p>
 <table>
 <thead>
@@ -2766,7 +2767,7 @@
     ]
 }
 </code></pre>
-<h2>Subprocess Node</h2>
+<h3>Subprocess Node</h3>
 <p><strong>The node data structure is as follows:</strong></p>
 <table>
 <thead>
@@ -2951,7 +2952,7 @@
             ]
         }
 </code></pre>
-<h2>DEPENDENT Node</h2>
+<h3>Dependent Node</h3>
 <p><strong>The node data structure is as follows:</strong></p>
 <table>
 <thead>
diff --git a/en-us/docs/2.0.3/user_doc/architecture/task-structure.json b/en-us/docs/2.0.3/user_doc/architecture/task-structure.json
index 85ba5d3..bca7b58 100644
--- a/en-us/docs/2.0.3/user_doc/architecture/task-structure.json
+++ b/en-us/docs/2.0.3/user_doc/architecture/task-structure.json
@@ -1,6 +1,6 @@
 {
   "filename": "task-structure.md",
-  "__html": "<h1>Overall Tasks Storage Structure</h1>\n<p>All tasks created in DolphinScheduler are saved in the t_ds_process_definition table.</p>\n<p>The following shows the 't_ds_process_definition' table structure:</p>\n<table>\n<thead>\n<tr>\n<th>No.</th>\n<th>field</th>\n<th>type</th>\n<th>description</th>\n</tr>\n</thead>\n<tbody>\n<tr>\n<td>1</td>\n<td>id</td>\n<td>int(11)</td>\n<td>primary key</td>\n</tr>\n<tr>\n<td>2</td>\n<td>name</td>\n<td>varchar(255)</td>\n<td>process defin [...]
+  "__html": "<h1>Task Structure</h1>\n<h2>Overall Tasks Storage Structure</h2>\n<p>All tasks created in DolphinScheduler are saved in the t_ds_process_definition table.</p>\n<p>The following shows the 't_ds_process_definition' table structure:</p>\n<table>\n<thead>\n<tr>\n<th>No.</th>\n<th>field</th>\n<th>type</th>\n<th>description</th>\n</tr>\n</thead>\n<tbody>\n<tr>\n<td>1</td>\n<td>id</td>\n<td>int(11)</td>\n<td>primary key</td>\n</tr>\n<tr>\n<td>2</td>\n<td>name</td>\n<td>varchar(255 [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/architecture/task-structure.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/alert/alert_plugin_user_guide.html b/en-us/docs/2.0.3/user_doc/guide/alert/alert_plugin_user_guide.html
index 1922b36..3a2f007 100644
--- a/en-us/docs/2.0.3/user_doc/guide/alert/alert_plugin_user_guide.html
+++ b/en-us/docs/2.0.3/user_doc/guide/alert/alert_plugin_user_guide.html
@@ -10,7 +10,8 @@
   <link rel="stylesheet" href="/build/vendor.23870e5.css">
 </head>
 <body>
-  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+<h2>How to Create Alert Plugins and Alert Groups</h2>
 <p>In version 2.0.3, users need to create alert instances, and then associate them with alert groups, and an alert group can use multiple alert instances, and we will notify them one by one.</p>
 <p>First of all, you need to go to the Security Center, select Alarm Group Management, then click Alarm Instance Management on the left, then create an alarm instance, then select the corresponding alarm plug-in and fill in the relevant alarm parameters.</p>
 <p>Then select Alarm Group Management, create an alarm group, and select the corresponding alarm instance.</p>
diff --git a/en-us/docs/2.0.3/user_doc/guide/alert/alert_plugin_user_guide.json b/en-us/docs/2.0.3/user_doc/guide/alert/alert_plugin_user_guide.json
index 7f603fd..998370c 100644
--- a/en-us/docs/2.0.3/user_doc/guide/alert/alert_plugin_user_guide.json
+++ b/en-us/docs/2.0.3/user_doc/guide/alert/alert_plugin_user_guide.json
@@ -1,6 +1,6 @@
 {
   "filename": "alert_plugin_user_guide.md",
-  "__html": "<h2>How to create alert plugins and alert groups</h2>\n<p>In version 2.0.3, users need to create alert instances, and then associate them with alert groups, and an alert group can use multiple alert instances, and we will notify them one by one.</p>\n<p>First of all, you need to go to the Security Center, select Alarm Group Management, then click Alarm Instance Management on the left, then create an alarm instance, then select the corresponding alarm plug-in and fill in the  [...]
+  "__html": "<h1>Alert Component User Guide</h1>\n<h2>How to Create Alert Plugins and Alert Groups</h2>\n<p>In version 2.0.3, users need to create alert instances, and then associate them with alert groups, and an alert group can use multiple alert instances, and we will notify them one by one.</p>\n<p>First of all, you need to go to the Security Center, select Alarm Group Management, then click Alarm Instance Management on the left, then create an alarm instance, then select the corresp [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/alert/alert_plugin_user_guide.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/alert/enterprise-wechat.html b/en-us/docs/2.0.3/user_doc/guide/alert/enterprise-wechat.html
index b919c39..a5fce36 100644
--- a/en-us/docs/2.0.3/user_doc/guide/alert/enterprise-wechat.html
+++ b/en-us/docs/2.0.3/user_doc/guide/alert/enterprise-wechat.html
@@ -11,6 +11,7 @@
 </head>
 <body>
   <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+<h2>How to Create Enterprise WeChat Alert</h2>
 <p>If you need to use Enterprise WeChat to alert, please create an alarm Instance in warning instance manage, and then choose the wechat plugin. The configuration example of enterprise WeChat is as follows</p>
 <p><img src="/img/alert/enterprise-wechat-plugin.png" alt="enterprise-wechat-plugin"></p>
 <p>Where send type corresponds to app and appchat respectively:</p>
diff --git a/en-us/docs/2.0.3/user_doc/guide/alert/enterprise-wechat.json b/en-us/docs/2.0.3/user_doc/guide/alert/enterprise-wechat.json
index 2531470..985577e 100644
--- a/en-us/docs/2.0.3/user_doc/guide/alert/enterprise-wechat.json
+++ b/en-us/docs/2.0.3/user_doc/guide/alert/enterprise-wechat.json
@@ -1,6 +1,6 @@
 {
   "filename": "enterprise-wechat.md",
-  "__html": "<h1>Enterprise WeChat</h1>\n<p>If you need to use Enterprise WeChat to alert, please create an alarm Instance in warning instance manage, and then choose the wechat plugin. The configuration example of enterprise WeChat is as follows</p>\n<p><img src=\"/img/alert/enterprise-wechat-plugin.png\" alt=\"enterprise-wechat-plugin\"></p>\n<p>Where send type corresponds to app and appchat respectively:</p>\n<p>APP: <a href=\"https://work.weixin.qq.com/api/doc/90000/90135/90236\">htt [...]
+  "__html": "<h1>Enterprise WeChat</h1>\n<h2>How to Create Enterprise WeChat Alert</h2>\n<p>If you need to use Enterprise WeChat to alert, please create an alarm Instance in warning instance manage, and then choose the wechat plugin. The configuration example of enterprise WeChat is as follows</p>\n<p><img src=\"/img/alert/enterprise-wechat-plugin.png\" alt=\"enterprise-wechat-plugin\"></p>\n<p>Where send type corresponds to app and appchat respectively:</p>\n<p>APP: <a href=\"https://wo [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/alert/enterprise-wechat.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/datasource/hive.html b/en-us/docs/2.0.3/user_doc/guide/datasource/hive.html
index ae86c46..1625ed0 100644
--- a/en-us/docs/2.0.3/user_doc/guide/datasource/hive.html
+++ b/en-us/docs/2.0.3/user_doc/guide/datasource/hive.html
@@ -31,7 +31,7 @@
 configure <code>common.properties</code>. It is helpful when you try to set env before running HIVE SQL. Parameter
 <code>support.hive.oneSession</code> default value is <code>false</code> and SQL would run in different session if their more than one.</p>
 </blockquote>
-<h2>Use HiveServer2 HA Zookeeper</h2>
+<h2>Use HiveServer2 HA ZooKeeper</h2>
  <p align="center">
     <img src="/img/hive1-en.png" width="80%" />
   </p>
diff --git a/en-us/docs/2.0.3/user_doc/guide/datasource/hive.json b/en-us/docs/2.0.3/user_doc/guide/datasource/hive.json
index 775d12c..6c2403a 100644
--- a/en-us/docs/2.0.3/user_doc/guide/datasource/hive.json
+++ b/en-us/docs/2.0.3/user_doc/guide/datasource/hive.json
@@ -1,6 +1,6 @@
 {
   "filename": "hive.md",
-  "__html": "<h1>HIVE</h1>\n<h2>Use HiveServer2</h2>\n <p align=\"center\">\n    <img src=\"/img/hive-en.png\" width=\"80%\" />\n  </p>\n<ul>\n<li>Data source: select HIVE</li>\n<li>Data source name: enter the name of the data source</li>\n<li>Description: Enter a description of the data source</li>\n<li>IP/Host Name: Enter the IP connected to HIVE</li>\n<li>Port: Enter the port connected to HIVE</li>\n<li>Username: Set the username for connecting to HIVE</li>\n<li>Password: Set the pass [...]
+  "__html": "<h1>HIVE</h1>\n<h2>Use HiveServer2</h2>\n <p align=\"center\">\n    <img src=\"/img/hive-en.png\" width=\"80%\" />\n  </p>\n<ul>\n<li>Data source: select HIVE</li>\n<li>Data source name: enter the name of the data source</li>\n<li>Description: Enter a description of the data source</li>\n<li>IP/Host Name: Enter the IP connected to HIVE</li>\n<li>Port: Enter the port connected to HIVE</li>\n<li>Username: Set the username for connecting to HIVE</li>\n<li>Password: Set the pass [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/datasource/hive.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/datasource/postgresql.html b/en-us/docs/2.0.3/user_doc/guide/datasource/postgresql.html
index f55ca9e..7f4f0f8 100644
--- a/en-us/docs/2.0.3/user_doc/guide/datasource/postgresql.html
+++ b/en-us/docs/2.0.3/user_doc/guide/datasource/postgresql.html
@@ -10,7 +10,7 @@
   <link rel="stylesheet" href="/build/vendor.23870e5.css">
 </head>
 <body>
-  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <ul>
 <li>Data source: select POSTGRESQL</li>
 <li>Data source name: enter the name of the data source</li>
diff --git a/en-us/docs/2.0.3/user_doc/guide/datasource/postgresql.json b/en-us/docs/2.0.3/user_doc/guide/datasource/postgresql.json
index 8a4a79d..83afa6d 100644
--- a/en-us/docs/2.0.3/user_doc/guide/datasource/postgresql.json
+++ b/en-us/docs/2.0.3/user_doc/guide/datasource/postgresql.json
@@ -1,6 +1,6 @@
 {
   "filename": "postgresql.md",
-  "__html": "<h1>POSTGRESQL</h1>\n<ul>\n<li>Data source: select POSTGRESQL</li>\n<li>Data source name: enter the name of the data source</li>\n<li>Description: Enter a description of the data source</li>\n<li>IP/Host Name: Enter the IP to connect to POSTGRESQL</li>\n<li>Port: Enter the port to connect to POSTGRESQL</li>\n<li>Username: Set the username for connecting to POSTGRESQL</li>\n<li>Password: Set the password for connecting to POSTGRESQL</li>\n<li>Database name: Enter the name of  [...]
+  "__html": "<h1>PostgreSQL</h1>\n<ul>\n<li>Data source: select POSTGRESQL</li>\n<li>Data source name: enter the name of the data source</li>\n<li>Description: Enter a description of the data source</li>\n<li>IP/Host Name: Enter the IP to connect to POSTGRESQL</li>\n<li>Port: Enter the port to connect to POSTGRESQL</li>\n<li>Username: Set the username for connecting to POSTGRESQL</li>\n<li>Password: Set the password for connecting to POSTGRESQL</li>\n<li>Database name: Enter the name of  [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/datasource/postgresql.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/expansion-reduction.html b/en-us/docs/2.0.3/user_doc/guide/expansion-reduction.html
index e255c42..396d48f 100644
--- a/en-us/docs/2.0.3/user_doc/guide/expansion-reduction.html
+++ b/en-us/docs/2.0.3/user_doc/guide/expansion-reduction.html
@@ -12,19 +12,19 @@
 <body>
   <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <h1>DolphinScheduler Expansion and Reduction</h1>
-<h2>1. Expansion</h2>
+<h2>Expansion</h2>
 <p>This article describes how to add a new master service or worker service to an existing DolphinScheduler cluster.</p>
 <pre><code> Attention: There cannot be more than one master service process or worker service process on a physical machine.
        If the physical machine where the expansion master or worker node is located has already installed the scheduled service, skip to [1.4 Modify configuration] Edit the configuration file `conf/config/install_config.conf` on **all ** nodes, add masters or workers parameter, and restart the scheduling cluster.
 </code></pre>
-<h3>1.1 Basic software installation (please install the mandatory items yourself)</h3>
+<h3>Basic Software Installation</h3>
 <ul>
 <li>[required] <a href="https://www.oracle.com/technetwork/java/javase/downloads/index.html">JDK</a> (1.8+):Must be installed, please install and configure JAVA_HOME and PATH variables under /etc/profile</li>
 <li>[optional] If the expansion is a worker node, you need to consider whether to install an external client, such as Hadoop, Hive, Spark Client.</li>
 </ul>
 <pre><code class="language-markdown"> Attention: DolphinScheduler itself does not depend on Hadoop, Hive, Spark, but will only call their Client for the corresponding task submission.
 </code></pre>
-<h3>1.2 Get installation package</h3>
+<h3>Get Installation Package</h3>
 <ul>
 <li>Check which version of DolphinScheduler is used in your existing environment, and get the installation package of the corresponding version, if the versions are different, there may be compatibility problems.</li>
 <li>Confirm the unified installation directory of other nodes, this article assumes that DolphinScheduler is installed in /opt/ directory, and the full path is /opt/dolphinscheduler.</li>
@@ -41,7 +41,7 @@ mv apache-dolphinscheduler-2.0.3-bin  dolphinscheduler
 </code></pre>
 <pre><code class="language-markdown"> Attention: The installation package can be copied directly from an existing environment to an expanded physical machine for use.
 </code></pre>
-<h3>1.3 Create Deployment Users</h3>
+<h3>Create Deployment Users</h3>
 <ul>
 <li>Create deployment users on <strong>all</strong> expansion machines, and be sure to configure sudo-free. If we plan to deploy scheduling on four expansion machines, ds1, ds2, ds3, and ds4, we first need to create deployment users on each machine</li>
 </ul>
@@ -61,7 +61,7 @@ sed -i &#x27;s/Defaults    requirett/#Defaults    requirett/g&#x27; /etc/sudoers
 <span class="hljs-bullet"> -</span> If you find the line &quot;Default requiretty&quot; in the /etc/sudoers file, please also comment it out.
 <span class="hljs-bullet"> -</span> If resource uploads are used, you also need to assign read and write permissions to the deployment user on <span class="hljs-code">`HDFS or MinIO`</span>.
 </code></pre>
-<h3>1.4 Modify configuration</h3>
+<h3>Modify Configuration</h3>
 <ul>
 <li>
 <p>From an existing node such as Master/Worker, copy the conf directory directly to replace the conf directory in the new node. After copying, check if the configuration items are correct.</p>
@@ -124,7 +124,7 @@ workers=&quot;existing worker01:default,existing worker02:default,ds3:default,ds
 </ul>
 <pre><code class="language-shell">sudo chown -R dolphinscheduler:dolphinscheduler dolphinscheduler
 </code></pre>
-<h3>1.4. Restart the cluster &amp; verify</h3>
+<h3>Restart the Cluster and Verify</h3>
 <ul>
 <li>restart the cluster</li>
 </ul>
@@ -170,10 +170,10 @@ sh bin/dolphinscheduler-daemon.sh start alert-server   # start alert  service
 </code></pre>
 <p>If the above services are started normally and the scheduling system page is normal, check whether there is an expanded Master or Worker service in the [Monitor] of the web system. If it exists, the expansion is complete.</p>
 <hr>
-<h2>2. Reduction</h2>
+<h2>Reduction</h2>
 <p>The reduction is to reduce the master or worker services for the existing DolphinScheduler cluster.
 There are two steps for shrinking. After performing the following two steps, the shrinking operation can be completed.</p>
-<h3>2.1 Stop the service on the scaled-down node</h3>
+<h3>Stop the Service on the Scaled-Down Node</h3>
 <ul>
 <li>If you are scaling down the master node, identify the physical machine where the master service is located, and stop the master service on the physical machine.</li>
 <li>If the worker node is scaled down, determine the physical machine where the worker service is to be scaled down and stop the worker and logger services on the physical machine.</li>
@@ -210,7 +210,7 @@ sh bin/dolphinscheduler-daemon.sh start alert-server  # start alert  service
     AlertServer          ----- alert  service
 </code></pre>
 <p>If the corresponding master service or worker service does not exist, then the master/worker service is successfully shut down.</p>
-<h3>2.2 Modify the configuration file</h3>
+<h3>Modify the Configuration File</h3>
 <ul>
 <li>
 <p>modify the configuration file <code>conf/config/install_config.conf</code> on the <strong>all</strong> nodes, synchronizing the following configuration.</p>
diff --git a/en-us/docs/2.0.3/user_doc/guide/expansion-reduction.json b/en-us/docs/2.0.3/user_doc/guide/expansion-reduction.json
index 31fd48f..68ce02c 100644
--- a/en-us/docs/2.0.3/user_doc/guide/expansion-reduction.json
+++ b/en-us/docs/2.0.3/user_doc/guide/expansion-reduction.json
@@ -1,6 +1,6 @@
 {
   "filename": "expansion-reduction.md",
-  "__html": "<!-- markdown-link-check-disable -->\n<h1>DolphinScheduler Expansion and Reduction</h1>\n<h2>1. Expansion</h2>\n<p>This article describes how to add a new master service or worker service to an existing DolphinScheduler cluster.</p>\n<pre><code> Attention: There cannot be more than one master service process or worker service process on a physical machine.\n       If the physical machine where the expansion master or worker node is located has already installed the scheduled [...]
+  "__html": "<!-- markdown-link-check-disable -->\n<h1>DolphinScheduler Expansion and Reduction</h1>\n<h2>Expansion</h2>\n<p>This article describes how to add a new master service or worker service to an existing DolphinScheduler cluster.</p>\n<pre><code> Attention: There cannot be more than one master service process or worker service process on a physical machine.\n       If the physical machine where the expansion master or worker node is located has already installed the scheduled se [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/expansion-reduction.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/flink-call.html b/en-us/docs/2.0.3/user_doc/guide/flink-call.html
index 9d4c155..1587d25 100644
--- a/en-us/docs/2.0.3/user_doc/guide/flink-call.html
+++ b/en-us/docs/2.0.3/user_doc/guide/flink-call.html
@@ -11,7 +11,7 @@
 </head>
 <body>
   <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
-<h3>Create a queue</h3>
+<h2>Create a Queue</h2>
 <ol>
 <li>Log in to the scheduling system, click &quot;Security&quot;, then click &quot;Queue manage&quot; on the left, and click &quot;Create queue&quot; to create a queue.</li>
 <li>Fill in the name and value of the queue, and click &quot;Submit&quot;</li>
@@ -19,7 +19,7 @@
 <p align="center">
    <img src="/img/api/create_queue.png" width="80%" />
  </p>
-<h3>Create a tenant</h3>
+<h2>Create a Tenant</h2>
 <pre><code>1. The tenant corresponds to a Linux user, which the user worker uses to submit jobs. If Linux OS environment does not have this user, the worker will create this user when executing the script.
 2. Both the tenant and the tenant code are unique and cannot be repeated, just like a person has a name and id number.  
 3. After creating a tenant, there will be a folder in the HDFS relevant directory.  
@@ -27,11 +27,11 @@
 <p align="center">
    <img src="/img/api/create_tenant.png" width="80%" />
  </p>
-<h3>Create a user</h3>
+<h2>Create a User</h2>
 <p align="center">
    <img src="/img/api/create_user.png" width="80%" />
  </p>
-<h3>Create a token</h3>
+<h2>Create a Token</h2>
 <ol>
 <li>Log in to the scheduling system, click &quot;Security&quot;, then click &quot;Token manage&quot; on the left, and click &quot;Create token&quot; to create a token.</li>
 </ol>
@@ -44,7 +44,7 @@
 <p align="center">
    <img src="/img/create-token-en1.png" width="80%" />
  </p>
-<h3>Use token</h3>
+<h2>Use Token</h2>
 <ol>
 <li>
 <p>Open the API documentation page</p>
@@ -73,21 +73,21 @@
 <p align="center">
    <img src="/img/test-api.png" width="80%" />
  </p>  
-<h3>User authorization</h3>
+<h2>User Authorization</h2>
 <p align="center">
    <img src="/img/api/user_authorization.png" width="80%" />
  </p>
-<h3>User login</h3>
+<h2>User Login</h2>
 <pre><code>http://192.168.1.163:12345/dolphinscheduler/ui/#/monitor/servers/master
 </code></pre>
 <p align="center">
    <img src="/img/api/user_login.png" width="80%" />
  </p>
-<h3>Upload the resource</h3>
+<h2>Upload the Resource</h2>
 <p align="center">
    <img src="/img/api/upload_resource.png" width="80%" />
  </p>
-<h3>Create a workflow</h3>
+<h2>Create a Workflow</h2>
 <p align="center">
    <img src="/img/api/create_workflow1.png" width="80%" />
  </p>
@@ -100,11 +100,11 @@
 <p align="center">
    <img src="/img/api/create_workflow4.png" width="80%" />
  </p>
-<h3>View the execution result</h3>
+<h2>View the Execution Result</h2>
 <p align="center">
    <img src="/img/api/execution_result.png" width="80%" />
  </p>
-<h3>View log</h3>
+<h2>View Log</h2>
 <p align="center">
    <img src="/img/api/log.png" width="80%" />
  </p>
diff --git a/en-us/docs/2.0.3/user_doc/guide/flink-call.json b/en-us/docs/2.0.3/user_doc/guide/flink-call.json
index 469dae6..148fe4b 100644
--- a/en-us/docs/2.0.3/user_doc/guide/flink-call.json
+++ b/en-us/docs/2.0.3/user_doc/guide/flink-call.json
@@ -1,6 +1,6 @@
 {
   "filename": "flink-call.md",
-  "__html": "<h1>Flink Calls Operating steps</h1>\n<h3>Create a queue</h3>\n<ol>\n<li>Log in to the scheduling system, click &quot;Security&quot;, then click &quot;Queue manage&quot; on the left, and click &quot;Create queue&quot; to create a queue.</li>\n<li>Fill in the name and value of the queue, and click &quot;Submit&quot;</li>\n</ol>\n<p align=\"center\">\n   <img src=\"/img/api/create_queue.png\" width=\"80%\" />\n </p>\n<h3>Create a tenant</h3>\n<pre><code>1. The tenant correspon [...]
+  "__html": "<h1>Flink Calls Operating steps</h1>\n<h2>Create a Queue</h2>\n<ol>\n<li>Log in to the scheduling system, click &quot;Security&quot;, then click &quot;Queue manage&quot; on the left, and click &quot;Create queue&quot; to create a queue.</li>\n<li>Fill in the name and value of the queue, and click &quot;Submit&quot;</li>\n</ol>\n<p align=\"center\">\n   <img src=\"/img/api/create_queue.png\" width=\"80%\" />\n </p>\n<h2>Create a Tenant</h2>\n<pre><code>1. The tenant correspon [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/flink-call.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/installation/cluster.html b/en-us/docs/2.0.3/user_doc/guide/installation/cluster.html
index 7bf6293..5fec774 100644
--- a/en-us/docs/2.0.3/user_doc/guide/installation/cluster.html
+++ b/en-us/docs/2.0.3/user_doc/guide/installation/cluster.html
@@ -15,9 +15,9 @@
 <p>If you are a green hand and want to experience DolphinScheduler, we recommended you install follow <a href="standalone.md">Standalone</a>. If you want to experience more complete functions or schedule large tasks number, we recommended you install follow <a href="pseudo-cluster.md">pseudo-cluster deployment</a>. If you want to using DolphinScheduler in production, we recommended you follow <a href="cluster.md">cluster deployment</a> or <a href="kubernetes.md">kubernetes</a></p>
 <h2>Deployment Step</h2>
 <p>Cluster deployment uses the same scripts and configuration files as we deploy in <a href="pseudo-cluster.md">pseudo-cluster deployment</a>, so the prepare and required are the same as pseudo-cluster deployment. The difference is that <a href="pseudo-cluster.md">pseudo-cluster deployment</a> is for one machine, while cluster deployment (Cluster) for multiple. and the steps of &quot;Modify configuration&quot; are quite different between pseudo-cluster deployment and cluster deployment.</p>
-<h3>Prepare &amp;&amp; DolphinScheduler startup environment</h3>
-<p>Because of cluster deployment for multiple machine, so you have to run you &quot;Prepare&quot; and &quot;startup&quot; in every machine in <a href="pseudo-cluster.md">pseudo-cluster.md</a>, except section &quot;Configure machine SSH password-free login&quot;, &quot;Start zookeeper&quot;, &quot;Initialize the database&quot;, which is only for deployment or just need an single server</p>
-<h3>Modify configuration</h3>
+<h3>Prepare and DolphinScheduler Startup Environment</h3>
+<p>Because of cluster deployment for multiple machine, so you have to run you &quot;Prepare&quot; and &quot;startup&quot; in every machine in <a href="pseudo-cluster.md">pseudo-cluster.md</a>, except section &quot;Configure machine SSH password-free login&quot;, &quot;Start ZooKeeper&quot;, &quot;Initialize the database&quot;, which is only for deployment or just need an single server</p>
+<h3>Modify Configuration</h3>
 <p>This is a step that is quite different from <a href="pseudo-cluster.md">pseudo-cluster.md</a>, because the deployment script will transfer the resources required for installation machine to each deployment machine using <code>scp</code>. And we have to declare all machine we want to install DolphinScheduler and then run script <code>install.sh</code>. The configuration file is under the path <code>conf/config/install_config.conf</code>, here we only need to modify section <strong>INST [...]
 <pre><code class="language-shell"><span class="hljs-meta">#</span><span class="bash"> ---------------------------------------------------------</span>
 <span class="hljs-meta">#</span><span class="bash"> INSTALL MACHINE</span>
@@ -32,7 +32,9 @@ alertServer=&quot;ds4&quot;
 apiServers=&quot;ds5&quot;
 pythonGatewayServers=&quot;ds5&quot;
 </code></pre>
-<h2>Start DolphinScheduler &amp;&amp; Login DolphinScheduler &amp;&amp; Server Start And Stop</h2>
+<h2>Start and Login DolphinScheduler</h2>
+<p>Same as <a href="http://pseudo-cluster.md">pseudo-cluster.md</a>](<a href="http://pseudo-cluster.md">pseudo-cluster.md</a>)</p>
+<h2>Start and Stop Server</h2>
 <p>Same as <a href="http://pseudo-cluster.md">pseudo-cluster.md</a>](<a href="http://pseudo-cluster.md">pseudo-cluster.md</a>)</p>
 </div></section><footer class="footer-container"><div class="footer-body"><div><h3>About us</h3><h4>Do you need feedback? Please contact us through the following ways.</h4></div><div class="contact-container"><ul><li><a href="/en-us/community/development/subscribe.html"><img class="img-base" src="/img/emailgray.png"/><img class="img-change" src="/img/emailblue.png"/><p>Email List</p></a></li><li><a href="https://twitter.com/dolphinschedule"><img class="img-base" src="/img/twittergray.png [...]
   <script src="//cdn.jsdelivr.net/npm/react@15.6.2/dist/react-with-addons.min.js"></script>
diff --git a/en-us/docs/2.0.3/user_doc/guide/installation/cluster.json b/en-us/docs/2.0.3/user_doc/guide/installation/cluster.json
index 2c493f8..83e43a2 100644
--- a/en-us/docs/2.0.3/user_doc/guide/installation/cluster.json
+++ b/en-us/docs/2.0.3/user_doc/guide/installation/cluster.json
@@ -1,6 +1,6 @@
 {
   "filename": "cluster.md",
-  "__html": "<h1>Cluster Deployment</h1>\n<p>Cluster deployment is to deploy the DolphinScheduler on multiple machines for running a large number of tasks in production.</p>\n<p>If you are a green hand and want to experience DolphinScheduler, we recommended you install follow <a href=\"standalone.md\">Standalone</a>. If you want to experience more complete functions or schedule large tasks number, we recommended you install follow <a href=\"pseudo-cluster.md\">pseudo-cluster deployment</ [...]
+  "__html": "<h1>Cluster Deployment</h1>\n<p>Cluster deployment is to deploy the DolphinScheduler on multiple machines for running a large number of tasks in production.</p>\n<p>If you are a green hand and want to experience DolphinScheduler, we recommended you install follow <a href=\"standalone.md\">Standalone</a>. If you want to experience more complete functions or schedule large tasks number, we recommended you install follow <a href=\"pseudo-cluster.md\">pseudo-cluster deployment</ [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/installation/cluster.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/installation/docker.html b/en-us/docs/2.0.3/user_doc/guide/installation/docker.html
index a8d5ff9..40c8025 100644
--- a/en-us/docs/2.0.3/user_doc/guide/installation/docker.html
+++ b/en-us/docs/2.0.3/user_doc/guide/installation/docker.html
@@ -16,12 +16,12 @@
 <li><a href="https://docs.docker.com/engine/install/">Docker</a> 1.13.1+</li>
 <li><a href="https://docs.docker.com/compose/">Docker Compose</a> 1.11.0+</li>
 </ul>
-<h2>How to use this Docker image</h2>
+<h2>How to Use this Docker Image</h2>
 <p>Here're 3 ways to quickly install DolphinScheduler</p>
-<h3>The First Way: Start a DolphinScheduler by docker-compose (recommended)</h3>
+<h3>The First Way: Start a DolphinScheduler by Docker Compose (Recommended)</h3>
 <p>In this way, you need to install <a href="https://docs.docker.com/compose/">docker-compose</a> as a prerequisite, please install it yourself according to the rich docker-compose installation guidance on the Internet</p>
 <p>For Windows 7-10, you can install <a href="https://github.com/docker/toolbox/releases">Docker Toolbox</a>. For Windows 10 64-bit, you can install <a href="https://docs.docker.com/docker-for-windows/install/">Docker Desktop</a>, and pay attention to the <a href="https://docs.docker.com/docker-for-windows/install/#system-requirements">system requirements</a></p>
-<h4>0. Configure memory not less than 4GB</h4>
+<h4>Configure Memory not Less Than 4GB</h4>
 <p>For Mac user, click <code>Docker Desktop -&gt; Preferences -&gt; Resources -&gt; Memory</code></p>
 <p>For Windows Docker Toolbox user, two items need to be configured:</p>
 <ul>
@@ -33,9 +33,9 @@
 <li><strong>Hyper-V mode</strong>: Click <code>Docker Desktop -&gt; Settings -&gt; Resources -&gt; Memory</code></li>
 <li><strong>WSL 2 mode</strong>: Refer to <a href="https://docs.microsoft.com/en-us/windows/wsl/wsl-config#configure-global-options-with-wslconfig">WSL 2 utility VM</a></li>
 </ul>
-<h4>1. Download the Source Code Package</h4>
+<h4>Download the Source Code Package</h4>
 <p>Please download the source code package apache-dolphinscheduler-2.0.3-src.tar.gz, download address: <a href="/en-us/download/download.html">download</a></p>
-<h4>2. Pull Image and Start the Service</h4>
+<h4>Pull Image and Start the Service</h4>
 <blockquote>
 <p>For Mac and Linux user, open <strong>Terminal</strong>
 For Windows Docker Toolbox user, open <strong>Docker Quickstart Terminal</strong>
@@ -51,24 +51,24 @@ $ docker-compose up -d
 <p>PowerShell should use <code>cd apache-dolphinscheduler-2.0.3-src\docker\docker-swarm</code></p>
 </blockquote>
 <p>The <strong>PostgreSQL</strong> (with username <code>root</code>, password <code>root</code> and database <code>dolphinscheduler</code>) and <strong>ZooKeeper</strong> services will start by default</p>
-<h4>3. Login</h4>
+<h4>Login</h4>
 <p>Visit the Web UI: <a href="http://localhost:12345/dolphinscheduler">http://localhost:12345/dolphinscheduler</a> (The local address is <a href="http://localhost:12345/dolphinscheduler">http://localhost:12345/dolphinscheduler</a>)</p>
 <p>The default username is <code>admin</code> and the default password is <code>dolphinscheduler123</code></p>
 <p align="center">
   <img src="/img/login_en.png" width="60%" />
 </p>
 <p>Please refer to the <code>Quick Start</code> in the chapter <a href="../quick-start.md">Quick Start</a> to explore how to use DolphinScheduler</p>
-<h3>The Second Way: Start via specifying the existing PostgreSQL and ZooKeeper service</h3>
+<h3>The Second Way: Start via Specifying the Existing PostgreSQL and ZooKeeper Service</h3>
 <p>In this way, you need to install <a href="https://docs.docker.com/engine/install/">docker</a> as a prerequisite, please install it yourself according to the rich docker installation guidance on the Internet</p>
-<h4>1. Basic Required Software (please install by yourself)</h4>
+<h4>Basic Required Software</h4>
 <ul>
 <li><a href="https://www.postgresql.org/download/">PostgreSQL</a> (8.2.15+)</li>
 <li><a href="https://zookeeper.apache.org/releases.html">ZooKeeper</a> (3.4.6+)</li>
 <li><a href="https://docs.docker.com/engine/install/">Docker</a> (1.13.1+)</li>
 </ul>
-<h4>2. Please login to the PostgreSQL database and create a database named <code>dolphinscheduler</code></h4>
-<h4>3. Initialize the database, import <code>sql/dolphinscheduler_postgre.sql</code> to create tables and initial data</h4>
-<h4>4. Download the DolphinScheduler Image</h4>
+<h4>Please Login to the PostgreSQL Database and Create a Database Named <code>dolphinscheduler</code></h4>
+<h4>Initialize the Database, Import <code>sql/dolphinscheduler_postgre.sql</code> to Create Tables and Initial Data</h4>
+<h4>Download the DolphinScheduler Image</h4>
 <p>We have already uploaded user-oriented DolphinScheduler image to the Docker repository so that you can pull the image from the docker repository:</p>
 <pre><code>docker pull dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler:2.0.3
 </code></pre>
@@ -81,9 +81,9 @@ $ docker-compose up -d
 apache/dolphinscheduler:2.0.3 all
 </code></pre>
 <p>Note: database username test and password test need to be replaced with your actual PostgreSQL username and password, 192.168.x.x need to be replaced with your relate PostgreSQL and ZooKeeper host IP</p>
-<h4>6. Login</h4>
+<h4>Login</h4>
 <p>Same as above</p>
-<h3>The Third Way: Start a standalone DolphinScheduler server</h3>
+<h3>The Third Way: Start a Standalone DolphinScheduler Server</h3>
 <p>The following services are automatically started when the container starts:</p>
 <pre><code>     MasterServer         ----- master service
      WorkerServer         ----- worker service
@@ -305,7 +305,7 @@ apache/dolphinscheduler:2.0.3 python-gateway
 </tbody>
 </table>
 <h2>FAQ</h2>
-<h3>How to manage DolphinScheduler by docker-compose?</h3>
+<h3>How to Manage DolphinScheduler by Docker Compose?</h3>
 <p>Start, restart, stop or list containers:</p>
 <pre><code>docker-compose start
 docker-compose restart
@@ -318,7 +318,7 @@ docker-compose ps
 <p>Stop containers and remove containers, networks and volumes:</p>
 <pre><code>docker-compose down -v
 </code></pre>
-<h3>How to view the logs of a container?</h3>
+<h3>How to View the Logs of a Container?</h3>
 <p>List all running containers:</p>
 <pre><code>docker ps
 docker ps --format &quot;{{.Names}}&quot; # only print names
@@ -328,14 +328,14 @@ docker ps --format &quot;{{.Names}}&quot; # only print names
 docker logs -f docker-swarm_dolphinscheduler-api_1 # follow log output
 docker logs --tail 10 docker-swarm_dolphinscheduler-api_1 # show last 10 lines from the end of the logs
 </code></pre>
-<h3>How to scale master and worker by docker-compose?</h3>
+<h3>How to Scale Master and Worker by Docker Compose?</h3>
 <p>Scale master to 2 instances:</p>
 <pre><code>docker-compose up -d --scale dolphinscheduler-master=2 dolphinscheduler-master
 </code></pre>
 <p>Scale worker to 3 instances:</p>
 <pre><code>docker-compose up -d --scale dolphinscheduler-worker=3 dolphinscheduler-worker
 </code></pre>
-<h3>How to deploy DolphinScheduler on Docker Swarm?</h3>
+<h3>How to Deploy DolphinScheduler on Docker Swarm?</h3>
 <p>Assuming that the Docker Swarm cluster has been created (If there is no Docker Swarm cluster, please refer to <a href="https://docs.docker.com/engine/swarm/swarm-tutorial/create-swarm/">create-swarm</a>)</p>
 <p>Start a stack named dolphinscheduler:</p>
 <pre><code>docker stack deploy -c docker-stack.yml dolphinscheduler
@@ -349,15 +349,15 @@ docker logs --tail 10 docker-swarm_dolphinscheduler-api_1 # show last 10 lines f
 <p>Remove the volumes of the stack named dolphinscheduler:</p>
 <pre><code>docker volume rm -f $(docker volume ls --format &quot;{{.Name}}&quot; | grep -e &quot;^dolphinscheduler&quot;)
 </code></pre>
-<h3>How to scale master and worker on Docker Swarm?</h3>
+<h3>How to Scale Master and Worker on Docker Swarm?</h3>
 <p>Scale master of the stack named dolphinscheduler to 2 instances:</p>
 <pre><code>docker service scale dolphinscheduler_dolphinscheduler-master=2
 </code></pre>
 <p>Scale worker of the stack named dolphinscheduler to 3 instances:</p>
 <pre><code>docker service scale dolphinscheduler_dolphinscheduler-worker=3
 </code></pre>
-<h3>How to build a Docker image?</h3>
-<h4>Build from the source code (Require Maven 3.3+ &amp; JDK 1.8+)</h4>
+<h3>How to Build a Docker Image?</h3>
+<h4>Build from the Source Code (Require Maven 3.3+ and JDK 1.8+)</h4>
 <p>In Unix-Like, execute in Terminal:</p>
 <pre><code class="language-bash">$ bash ./docker/build/hooks/build
 </code></pre>
@@ -365,7 +365,7 @@ docker logs --tail 10 docker-swarm_dolphinscheduler-api_1 # show last 10 lines f
 <pre><code class="language-bat"><span class="hljs-function">C:\<span class="hljs-title">dolphinscheduler</span>-<span class="hljs-title">src</span>&gt;.\<span class="hljs-title">docker</span>\<span class="hljs-title">build</span>\<span class="hljs-title">hooks</span>\<span class="hljs-title">build.bat</span>
 </span></code></pre>
 <p>Please read <code>./docker/build/hooks/build</code> <code>./docker/build/hooks/build.bat</code> script files if you don't understand</p>
-<h4>Build from the binary distribution (Not require Maven 3.3+ &amp; JDK 1.8+)</h4>
+<h4>Build from the Binary Distribution (Not require Maven 3.3+ and JDK 1.8+)</h4>
 <p>Please download the binary distribution package apache-dolphinscheduler-2.0.3-bin.tar.gz, download address: <a href="/en-us/download/download.html">download</a>. And put apache-dolphinscheduler-2.0.3-bin.tar.gz into the <code>apache-dolphinscheduler-2.0.3-src/docker/build</code> directory, execute in Terminal or PowerShell:</p>
 <pre><code>$ cd apache-dolphinscheduler-2.0.3-src/docker/build
 $ docker build --build-arg VERSION=2.0.3 -t apache/dolphinscheduler:2.0.3 .
@@ -373,7 +373,7 @@ $ docker build --build-arg VERSION=2.0.3 -t apache/dolphinscheduler:2.0.3 .
 <blockquote>
 <p>PowerShell should use <code>cd apache-dolphinscheduler-2.0.3-src/docker/build</code></p>
 </blockquote>
-<h4>Build multi-platform images</h4>
+<h4>Build Multi-Platform Images</h4>
 <p>Currently support to build images including <code>linux/amd64</code> and <code>linux/arm64</code> platform architecture, requirements:</p>
 <ol>
 <li>Support <a href="https://docs.docker.com/engine/reference/commandline/buildx/">docker buildx</a></li>
@@ -383,7 +383,7 @@ $ docker build --build-arg VERSION=2.0.3 -t apache/dolphinscheduler:2.0.3 .
 <pre><code class="language-bash">$ docker login <span class="hljs-comment"># login to push apache/dolphinscheduler</span>
 $ bash ./docker/build/hooks/build
 </code></pre>
-<h3>How to add an environment variable for Docker?</h3>
+<h3>How to Add an Environment Variable for Docker?</h3>
 <p>If you would like to do additional initialization in an image derived from this one, add one or more environment variables under <code>/root/start-init-conf.sh</code>, and modify template files in <code>/opt/dolphinscheduler/conf/*.tpl</code>.</p>
 <p>For example, to add an environment variable <code>SECURITY_AUTHENTICATION_TYPE</code> in <code>/root/start-init-conf.sh</code>:</p>
 <pre><code>export SECURITY_AUTHENTICATION_TYPE=PASSWORD
@@ -400,7 +400,7 @@ EOF
 &quot;</span> &gt; <span class="hljs-variable">${DOLPHINSCHEDULER_HOME}</span>/conf/<span class="hljs-variable">${line%.*}</span>
 <span class="hljs-keyword">done</span>
 </code></pre>
-<h3>How to use MySQL as the DolphinScheduler's database instead of PostgreSQL?</h3>
+<h3>How to Use MySQL as the DolphinScheduler's Database Instead of PostgreSQL?</h3>
 <blockquote>
 <p>Because of the commercial license, we cannot directly use the driver of MySQL.</p>
 <p>If you want to use MySQL, you can build a new image based on the <code>apache/dolphinscheduler</code> image as follows.</p>
@@ -453,7 +453,7 @@ DATABASE_PARAMS=useUnicode=true&amp;characterEncoding=UTF-8
 <ol start="8">
 <li>Run a dolphinscheduler (See <strong>How to use this docker image</strong>)</li>
 </ol>
-<h3>How to support MySQL datasource in <code>Datasource manage</code>?</h3>
+<h3>How to Support MySQL Datasource in <code>Datasource manage</code>?</h3>
 <blockquote>
 <p>Because of the commercial license, we cannot directly use the driver of MySQL.</p>
 <p>If you want to add MySQL datasource, you can build a new image based on the <code>apache/dolphinscheduler</code> image as follows.</p>
@@ -488,7 +488,7 @@ COPY mysql-connector-java-8.0.16.jar /opt/dolphinscheduler/lib
 <p>Add a MySQL datasource in <code>Datasource manage</code></p>
 </li>
 </ol>
-<h3>How to support Oracle datasource in <code>Datasource manage</code>?</h3>
+<h3>How to Support Oracle Datasource in <code>Datasource manage</code>?</h3>
 <blockquote>
 <p>Because of the commercial license, we cannot directly use the driver of Oracle.</p>
 <p>If you want to add Oracle datasource, you can build a new image based on the <code>apache/dolphinscheduler</code> image as follows.</p>
@@ -523,7 +523,7 @@ COPY ojdbc8-19.9.0.0.jar /opt/dolphinscheduler/lib
 <p>Add an Oracle datasource in <code>Datasource manage</code></p>
 </li>
 </ol>
-<h3>How to support Python 2 pip and custom requirements.txt?</h3>
+<h3>How to Support Python 2 pip and Custom requirements.txt?</h3>
 <ol>
 <li>Create a new <code>Dockerfile</code> to install pip:</li>
 </ol>
@@ -556,7 +556,7 @@ RUN apt-get update &amp;&amp; \
 <p>Verify pip under a new Python task</p>
 </li>
 </ol>
-<h3>How to support Python 3?</h3>
+<h3>How to Support Python 3?</h3>
 <ol>
 <li>Create a new <code>Dockerfile</code> to install Python 3:</li>
 </ol>
@@ -590,7 +590,7 @@ RUN apt-get update &amp;&amp; \
 <p>Verify Python 3 under a new Python task</p>
 </li>
 </ol>
-<h3>How to support Hadoop, Spark, Flink, Hive or DataX?</h3>
+<h3>How to Support Hadoop, Spark, Flink, Hive or DataX?</h3>
 <p>Take Spark 2.4.7 as an example:</p>
 <ol>
 <li>
@@ -639,7 +639,7 @@ ln -s spark-2.4.7-bin-hadoop2.7 spark2 <span class="hljs-comment"># or just mv</
 </ol>
 <p>Spark on YARN (Deploy Mode is <code>cluster</code> or <code>client</code>) requires Hadoop support. Similar to Spark support, the operation of supporting Hadoop is almost the same as the previous steps</p>
 <p>Ensure that <code>$HADOOP_HOME</code> and <code>$HADOOP_CONF_DIR</code> exists</p>
-<h3>How to support Spark 3?</h3>
+<h3>How to Support Spark 3?</h3>
 <p>In fact, the way to submit applications with <code>spark-submit</code> is the same, regardless of Spark 1, 2 or 3. In other words, the semantics of <code>SPARK_HOME2</code> is the second <code>SPARK_HOME</code> instead of <code>SPARK2</code>'s <code>HOME</code>, so just set <code>SPARK_HOME2=/path/to/spark3</code></p>
 <p>Take Spark 3.1.1 as an example:</p>
 <ol>
@@ -672,7 +672,7 @@ ln -s spark-3.1.1-bin-hadoop2.7 spark2 <span class="hljs-comment"># or just mv</
 <pre><code>$SPARK_HOME2/bin/spark-submit --class org.apache.spark.examples.SparkPi $SPARK_HOME2/examples/jars/spark-examples_2.12-3.1.1.jar
 </code></pre>
 <p>Check whether the task log contains the output like <code>Pi is roughly 3.146015</code></p>
-<h3>How to support shared storage between Master, Worker and Api server?</h3>
+<h3>How to Support Shared Storage between Master, Worker and Api server?</h3>
 <blockquote>
 <p><strong>Note</strong>: If it is deployed on a single machine by <code>docker-compose</code>, step 1 and 2 can be skipped directly, and execute the command like <code>docker cp hadoop-3.2.2.tar.gz docker-swarm_dolphinscheduler-worker_1:/opt/soft</code> to put Hadoop into the shared directory <code>/opt/soft</code> in the container</p>
 </blockquote>
@@ -698,7 +698,7 @@ ln -s spark-3.1.1-bin-hadoop2.7 spark2 <span class="hljs-comment"># or just mv</
 <p>Ensure that <code>$HADOOP_HOME</code> and <code>$HADOOP_CONF_DIR</code> are correct</p>
 </li>
 </ol>
-<h3>How to support local file resource storage instead of HDFS and S3?</h3>
+<h3>How to Support Local File Resource Storage Instead of HDFS and S3?</h3>
 <blockquote>
 <p><strong>Note</strong>: If it is deployed on a single machine by <code>docker-compose</code>, step 2 can be skipped directly</p>
 </blockquote>
@@ -721,7 +721,7 @@ FS_DEFAULT_FS=file:///
       <span class="hljs-attr">o:</span> <span class="hljs-string">&quot;addr=10.40.0.199,nolock,soft,rw&quot;</span>
       <span class="hljs-attr">device:</span> <span class="hljs-string">&quot;:/path/to/resource/dir&quot;</span>
 </code></pre>
-<h3>How to support S3 resource storage like MinIO?</h3>
+<h3>How to Support S3 Resource Storage Like MinIO?</h3>
 <p>Take MinIO as an example: Modify the following environment variables in <code>config.env.sh</code></p>
 <pre><code>RESOURCE_STORAGE_TYPE=S3
 RESOURCE_UPLOAD_PATH=/dolphinscheduler
@@ -734,7 +734,7 @@ FS_S3A_SECRET_KEY=MINIO_SECRET_KEY
 <blockquote>
 <p><strong>Note</strong>: <code>MINIO_IP</code> can only use IP instead of the domain name, because DolphinScheduler currently doesn't support S3 path style access</p>
 </blockquote>
-<h3>How to configure SkyWalking?</h3>
+<h3>How to Configure SkyWalking?</h3>
 <p>Modify SkyWalking environment variables in <code>config.env.sh</code>:</p>
 <pre><code>SKYWALKING_ENABLE=true
 SW_AGENT_COLLECTOR_BACKEND_SERVICES=127.0.0.1:11800
@@ -769,10 +769,10 @@ SW_GRPC_LOG_SERVER_PORT=11800
 <p><strong>Note</strong>: You must specify it when starting a standalone dolphinscheduler server. Like <code>master-server</code>, <code>worker-server</code>, <code>api-server</code>, <code>alert-server</code>.</p>
 <h3>ZooKeeper</h3>
 <p><strong><code>ZOOKEEPER_QUORUM</code></strong></p>
-<p>This environment variable sets zookeeper quorum. The default value is <code>127.0.0.1:2181</code>.</p>
+<p>This environment variable sets ZooKeeper quorum. The default value is <code>127.0.0.1:2181</code>.</p>
 <p><strong>Note</strong>: You must specify it when starting a standalone dolphinscheduler server. Like <code>master-server</code>, <code>worker-server</code>, <code>api-server</code>.</p>
 <p><strong><code>ZOOKEEPER_ROOT</code></strong></p>
-<p>This environment variable sets zookeeper root directory for dolphinscheduler. The default value is <code>/dolphinscheduler</code>.</p>
+<p>This environment variable sets ZooKeeper root directory for dolphinscheduler. The default value is <code>/dolphinscheduler</code>.</p>
 <h3>Common</h3>
 <p><strong><code>DOLPHINSCHEDULER_OPTS</code></strong></p>
 <p>This environment variable sets JVM options for dolphinscheduler, suitable for <code>master-server</code>, <code>worker-server</code>, <code>api-server</code>, <code>alert-server</code>, <code>logger-server</code>. The default value is empty.</p>
diff --git a/en-us/docs/2.0.3/user_doc/guide/installation/docker.json b/en-us/docs/2.0.3/user_doc/guide/installation/docker.json
index 3d231ac..bf1f64b 100644
--- a/en-us/docs/2.0.3/user_doc/guide/installation/docker.json
+++ b/en-us/docs/2.0.3/user_doc/guide/installation/docker.json
@@ -1,6 +1,6 @@
 {
   "filename": "docker.md",
-  "__html": "<h1>QuickStart in Docker</h1>\n<h2>Prerequisites</h2>\n<ul>\n<li><a href=\"https://docs.docker.com/engine/install/\">Docker</a> 1.13.1+</li>\n<li><a href=\"https://docs.docker.com/compose/\">Docker Compose</a> 1.11.0+</li>\n</ul>\n<h2>How to use this Docker image</h2>\n<p>Here're 3 ways to quickly install DolphinScheduler</p>\n<h3>The First Way: Start a DolphinScheduler by docker-compose (recommended)</h3>\n<p>In this way, you need to install <a href=\"https://docs.docker.co [...]
+  "__html": "<h1>QuickStart in Docker</h1>\n<h2>Prerequisites</h2>\n<ul>\n<li><a href=\"https://docs.docker.com/engine/install/\">Docker</a> 1.13.1+</li>\n<li><a href=\"https://docs.docker.com/compose/\">Docker Compose</a> 1.11.0+</li>\n</ul>\n<h2>How to Use this Docker Image</h2>\n<p>Here're 3 ways to quickly install DolphinScheduler</p>\n<h3>The First Way: Start a DolphinScheduler by Docker Compose (Recommended)</h3>\n<p>In this way, you need to install <a href=\"https://docs.docker.co [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/installation/docker.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/installation/hardware.html b/en-us/docs/2.0.3/user_doc/guide/installation/hardware.html
index e75c2d6..a23e966 100644
--- a/en-us/docs/2.0.3/user_doc/guide/installation/hardware.html
+++ b/en-us/docs/2.0.3/user_doc/guide/installation/hardware.html
@@ -12,7 +12,7 @@
 <body>
   <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <p>DolphinScheduler, as an open-source distributed workflow task scheduling system, can be well deployed and run in Intel architecture server environments and mainstream virtualization environments, and supports mainstream Linux operating system environments.</p>
-<h2>1. Linux Operating System Version Requirements</h2>
+<h2>Linux Operating System Version Requirements</h2>
 <table>
 <thead>
 <tr>
@@ -43,7 +43,7 @@
 <p><strong>Attention:</strong>
 The above Linux operating systems can run on physical servers and mainstream virtualization environments such as VMware, KVM, and XEN.</p>
 </blockquote>
-<h2>2. Recommended Server Configuration</h2>
+<h2>Recommended Server Configuration</h2>
 <p>DolphinScheduler supports 64-bit hardware platforms with Intel x86-64 architecture. The following recommendation is made for server hardware configuration in a production environment:</p>
 <h3>Production Environment</h3>
 <table>
@@ -73,7 +73,7 @@ The above Linux operating systems can run on physical servers and mainstream vir
 <li>The hard disk size configuration is recommended by more than 50GB. The system disk and data disk are separated.</li>
 </ul>
 </blockquote>
-<h2>3. Network Requirements</h2>
+<h2>Network Requirements</h2>
 <p>DolphinScheduler provides the following network port configurations for normal operation:</p>
 <table>
 <thead>
@@ -108,7 +108,7 @@ The above Linux operating systems can run on physical servers and mainstream vir
 <li>Administrators can adjust relevant ports on the network side and host-side according to the deployment plan of DolphinScheduler components in the actual environment.</li>
 </ul>
 </blockquote>
-<h2>4. Browser Requirements</h2>
+<h2>Browser Requirements</h2>
 <p>DolphinScheduler recommends Chrome and the latest browsers which using Chrome Kernel to access the front-end visual operator page.</p>
 </div></section><footer class="footer-container"><div class="footer-body"><div><h3>About us</h3><h4>Do you need feedback? Please contact us through the following ways.</h4></div><div class="contact-container"><ul><li><a href="/en-us/community/development/subscribe.html"><img class="img-base" src="/img/emailgray.png"/><img class="img-change" src="/img/emailblue.png"/><p>Email List</p></a></li><li><a href="https://twitter.com/dolphinschedule"><img class="img-base" src="/img/twittergray.png [...]
   <script src="//cdn.jsdelivr.net/npm/react@15.6.2/dist/react-with-addons.min.js"></script>
diff --git a/en-us/docs/2.0.3/user_doc/guide/installation/hardware.json b/en-us/docs/2.0.3/user_doc/guide/installation/hardware.json
index 43220a5..a161e83 100644
--- a/en-us/docs/2.0.3/user_doc/guide/installation/hardware.json
+++ b/en-us/docs/2.0.3/user_doc/guide/installation/hardware.json
@@ -1,6 +1,6 @@
 {
   "filename": "hardware.md",
-  "__html": "<h1>Hardware Environment</h1>\n<p>DolphinScheduler, as an open-source distributed workflow task scheduling system, can be well deployed and run in Intel architecture server environments and mainstream virtualization environments, and supports mainstream Linux operating system environments.</p>\n<h2>1. Linux Operating System Version Requirements</h2>\n<table>\n<thead>\n<tr>\n<th style=\"text-align:left\">OS</th>\n<th style=\"text-align:center\">Version</th>\n</tr>\n</thead>\n [...]
+  "__html": "<h1>Hardware Environment</h1>\n<p>DolphinScheduler, as an open-source distributed workflow task scheduling system, can be well deployed and run in Intel architecture server environments and mainstream virtualization environments, and supports mainstream Linux operating system environments.</p>\n<h2>Linux Operating System Version Requirements</h2>\n<table>\n<thead>\n<tr>\n<th style=\"text-align:left\">OS</th>\n<th style=\"text-align:center\">Version</th>\n</tr>\n</thead>\n<tb [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/installation/hardware.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/installation/kubernetes.html b/en-us/docs/2.0.3/user_doc/guide/installation/kubernetes.html
index eae8b5b..361df8d 100644
--- a/en-us/docs/2.0.3/user_doc/guide/installation/kubernetes.html
+++ b/en-us/docs/2.0.3/user_doc/guide/installation/kubernetes.html
@@ -19,7 +19,7 @@
 <li><a href="https://kubernetes.io/">Kubernetes</a> 1.12+</li>
 <li>PV provisioner support in the underlying infrastructure</li>
 </ul>
-<h2>Installing the Chart</h2>
+<h2>Install the Chart</h2>
 <p>Please download the source code package apache-dolphinscheduler-2.0.3-src.tar.gz, download address: <a href="/en-us/download/download.html">download</a></p>
 <p>To install the chart with the release name <code>dolphinscheduler</code>, please execute the following commands:</p>
 <pre><code>$ tar -zxvf apache-dolphinscheduler-2.0.3-src.tar.gz
@@ -60,7 +60,7 @@ NODE_PORT=$(kubectl get svc {{ template <span class="hljs-string">&quot;dolphins
 <p>And then access the web: http://<span class="katex"><span class="katex-mathml"><math><semantics><mrow><mi>N</mi><mi>O</mi><mi>D</mi><msub><mi>E</mi><mi>I</mi></msub><mi>P</mi><mo>:</mo></mrow><annotation encoding="application/x-tex">NODE_IP:</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="strut" style="height:0.68333em;"></span><span class="strut bottom" style="height:0.83333em;vertical-align:-0.15em;"></span><span class="base textstyle u [...]
 <p>The default username is <code>admin</code> and the default password is <code>dolphinscheduler123</code></p>
 <p>Please refer to the <code>Quick Start</code> in the chapter <a href="../quick-start.md">Quick Start</a> to explore how to use DolphinScheduler</p>
-<h2>Uninstalling the Chart</h2>
+<h2>Uninstall the Chart</h2>
 <p>To uninstall/delete the <code>dolphinscheduler</code> deployment:</p>
 <pre><code class="language-bash">$ helm uninstall dolphinscheduler
 </code></pre>
@@ -236,7 +236,7 @@ NODE_PORT=$(kubectl get svc {{ template <span class="hljs-string">&quot;dolphins
 </tbody>
 </table>
 <h2>FAQ</h2>
-<h3>How to view the logs of a pod container?</h3>
+<h3>How to View the Logs of a Pod Container?</h3>
 <p>List all pods (aka <code>po</code>):</p>
 <pre><code>kubectl get po
 kubectl get po -n test # with test namespace
@@ -246,7 +246,7 @@ kubectl get po -n test # with test namespace
 kubectl logs -f dolphinscheduler-master-0 # follow log output
 kubectl logs --tail 10 dolphinscheduler-master-0 -n test # show last 10 lines from the end of the logs
 </code></pre>
-<h3>How to scale api, master and worker on Kubernetes?</h3>
+<h3>How to Scale api, master and worker on Kubernetes?</h3>
 <p>List all deployments (aka <code>deploy</code>):</p>
 <pre><code>kubectl get deploy
 kubectl get deploy -n test # with test namespace
@@ -267,7 +267,7 @@ kubectl scale --replicas=2 sts dolphinscheduler-master -n test # with test names
 <pre><code>kubectl scale --replicas=6 sts dolphinscheduler-worker
 kubectl scale --replicas=6 sts dolphinscheduler-worker -n test # with test namespace
 </code></pre>
-<h3>How to use MySQL as the DolphinScheduler's database instead of PostgreSQL?</h3>
+<h3>How to Use MySQL as the DolphinScheduler's Database Instead of PostgreSQL?</h3>
 <blockquote>
 <p>Because of the commercial license, we cannot directly use the driver of MySQL.</p>
 <p>If you want to use MySQL, you can build a new image based on the <code>apache/dolphinscheduler</code> image as follows.</p>
@@ -315,7 +315,7 @@ COPY mysql-connector-java-8.0.16.jar /opt/dolphinscheduler/lib
 <ol start="8">
 <li>Run a DolphinScheduler release in Kubernetes (See <strong>Installing the Chart</strong>)</li>
 </ol>
-<h3>How to support MySQL datasource in <code>Datasource manage</code>?</h3>
+<h3>How to Support MySQL Datasource in <code>Datasource manage</code>?</h3>
 <blockquote>
 <p>Because of the commercial license, we cannot directly use the driver of MySQL.</p>
 <p>If you want to add MySQL datasource, you can build a new image based on the <code>apache/dolphinscheduler</code> image as follows.</p>
@@ -350,7 +350,7 @@ COPY mysql-connector-java-8.0.16.jar /opt/dolphinscheduler/lib
 <p>Add a MySQL datasource in <code>Datasource manage</code></p>
 </li>
 </ol>
-<h3>How to support Oracle datasource in <code>Datasource manage</code>?</h3>
+<h3>How to Support Oracle Datasource in <code>Datasource manage</code>?</h3>
 <blockquote>
 <p>Because of the commercial license, we cannot directly use the driver of Oracle.</p>
 <p>If you want to add Oracle datasource, you can build a new image based on the <code>apache/dolphinscheduler</code> image as follows.</p>
@@ -385,7 +385,7 @@ COPY ojdbc8-19.9.0.0.jar /opt/dolphinscheduler/lib
 <p>Add an Oracle datasource in <code>Datasource manage</code></p>
 </li>
 </ol>
-<h3>How to support Python 2 pip and custom requirements.txt?</h3>
+<h3>How to Support Python 2 pip and Custom requirements.txt?</h3>
 <ol>
 <li>Create a new <code>Dockerfile</code> to install pip:</li>
 </ol>
@@ -418,7 +418,7 @@ RUN apt-get update &amp;&amp; \
 <p>Verify pip under a new Python task</p>
 </li>
 </ol>
-<h3>How to support Python 3?</h3>
+<h3>How to Support Python 3?</h3>
 <ol>
 <li>Create a new <code>Dockerfile</code> to install Python 3:</li>
 </ol>
@@ -452,7 +452,7 @@ RUN apt-get update &amp;&amp; \
 <p>Verify Python 3 under a new Python task</p>
 </li>
 </ol>
-<h3>How to support Hadoop, Spark, Flink, Hive or DataX?</h3>
+<h3>How to Support Hadoop, Spark, Flink, Hive or DataX?</h3>
 <p>Take Spark 2.4.7 as an example:</p>
 <ol>
 <li>
@@ -506,7 +506,7 @@ ln -s spark-2.4.7-bin-hadoop2.7 spark2 <span class="hljs-comment"># or just mv</
 </ol>
 <p>Spark on YARN (Deploy Mode is <code>cluster</code> or <code>client</code>) requires Hadoop support. Similar to Spark support, the operation of supporting Hadoop is almost the same as the previous steps</p>
 <p>Ensure that <code>$HADOOP_HOME</code> and <code>$HADOOP_CONF_DIR</code> exists</p>
-<h3>How to support Spark 3?</h3>
+<h3>How to Support Spark 3?</h3>
 <p>In fact, the way to submit applications with <code>spark-submit</code> is the same, regardless of Spark 1, 2 or 3. In other words, the semantics of <code>SPARK_HOME2</code> is the second <code>SPARK_HOME</code> instead of <code>SPARK2</code>'s <code>HOME</code>, so just set <code>SPARK_HOME2=/path/to/spark3</code></p>
 <p>Take Spark 3.1.1 as an example:</p>
 <ol>
@@ -544,7 +544,7 @@ ln -s spark-3.1.1-bin-hadoop2.7 spark2 <span class="hljs-comment"># or just mv</
 <pre><code>$SPARK_HOME2/bin/spark-submit --class org.apache.spark.examples.SparkPi $SPARK_HOME2/examples/jars/spark-examples_2.12-3.1.1.jar
 </code></pre>
 <p>Check whether the task log contains the output like <code>Pi is roughly 3.146015</code></p>
-<h3>How to support shared storage between Master, Worker and Api server?</h3>
+<h3>How to Support Shared Storage Between Master, Worker and Api Server?</h3>
 <p>For example, Master, Worker and API server may use Hadoop at the same time</p>
 <ol>
 <li>Modify the following configurations in <code>values.yaml</code></li>
@@ -570,7 +570,7 @@ ln -s spark-3.1.1-bin-hadoop2.7 spark2 <span class="hljs-comment"># or just mv</
 <p>Ensure that <code>$HADOOP_HOME</code> and <code>$HADOOP_CONF_DIR</code> are correct</p>
 </li>
 </ol>
-<h3>How to support local file resource storage instead of HDFS and S3?</h3>
+<h3>How to Support Local File Resource Storage Instead of HDFS and S3?</h3>
 <p>Modify the following configurations in <code>values.yaml</code></p>
 <pre><code class="language-yaml"><span class="hljs-attr">common:</span>
   <span class="hljs-attr">configmap:</span>
@@ -588,7 +588,7 @@ ln -s spark-3.1.1-bin-hadoop2.7 spark2 <span class="hljs-comment"># or just mv</
 <blockquote>
 <p><strong>Note</strong>: <code>storageClassName</code> must support the access mode: <code>ReadWriteMany</code></p>
 </blockquote>
-<h3>How to support S3 resource storage like MinIO?</h3>
+<h3>How to Support S3 Resource Storage Like MinIO?</h3>
 <p>Take MinIO as an example: Modify the following configurations in <code>values.yaml</code></p>
 <pre><code class="language-yaml"><span class="hljs-attr">common:</span>
   <span class="hljs-attr">configmap:</span>
@@ -603,7 +603,7 @@ ln -s spark-3.1.1-bin-hadoop2.7 spark2 <span class="hljs-comment"># or just mv</
 <blockquote>
 <p><strong>Note</strong>: <code>MINIO_IP</code> can only use IP instead of domain name, because DolphinScheduler currently doesn't support S3 path style access</p>
 </blockquote>
-<h3>How to configure SkyWalking?</h3>
+<h3>How to Configure SkyWalking?</h3>
 <p>Modify SKYWALKING configurations in <code>values.yaml</code>:</p>
 <pre><code class="language-yaml"><span class="hljs-attr">common:</span>
   <span class="hljs-attr">configmap:</span>
@@ -739,7 +739,7 @@ ln -s spark-3.1.1-bin-hadoop2.7 spark2 <span class="hljs-comment"># or just mv</
 </tr>
 <tr>
 <td><code>zookeeper.enabled</code></td>
-<td>If not exists external Zookeeper, by default, the DolphinScheduler will use a internal Zookeeper</td>
+<td>If not exists external ZooKeeper, by default, the DolphinScheduler will use a internal Zookeeper</td>
 <td><code>true</code></td>
 </tr>
 <tr>
@@ -759,7 +759,7 @@ ln -s spark-3.1.1-bin-hadoop2.7 spark2 <span class="hljs-comment"># or just mv</
 </tr>
 <tr>
 <td><code>zookeeper.persistence.storageClass</code></td>
-<td>Zookeeper data persistent volume storage class. If set to &quot;-&quot;, storageClassName: &quot;&quot;, which disables dynamic provisioning</td>
+<td>ZooKeeper data persistent volume storage class. If set to &quot;-&quot;, storageClassName: &quot;&quot;, which disables dynamic provisioning</td>
 <td><code>-</code></td>
 </tr>
 <tr>
@@ -769,12 +769,12 @@ ln -s spark-3.1.1-bin-hadoop2.7 spark2 <span class="hljs-comment"># or just mv</
 </tr>
 <tr>
 <td><code>externalZookeeper.zookeeperQuorum</code></td>
-<td>If exists external Zookeeper, and set <code>zookeeper.enabled</code> value to false. Specify Zookeeper quorum</td>
+<td>If exists external ZooKeeper, and set <code>zookeeper.enabled</code> value to false. Specify Zookeeper quorum</td>
 <td><code>127.0.0.1:2181</code></td>
 </tr>
 <tr>
 <td><code>externalZookeeper.zookeeperRoot</code></td>
-<td>If exists external Zookeeper, and set <code>zookeeper.enabled</code> value to false. Specify dolphinscheduler root directory in Zookeeper</td>
+<td>If exists external ZooKeeper, and set <code>zookeeper.enabled</code> value to false. Specify dolphinscheduler root directory in Zookeeper</td>
 <td><code>/dolphinscheduler</code></td>
 </tr>
 <tr>
diff --git a/en-us/docs/2.0.3/user_doc/guide/installation/kubernetes.json b/en-us/docs/2.0.3/user_doc/guide/installation/kubernetes.json
index 4327429..e6e0adb 100644
--- a/en-us/docs/2.0.3/user_doc/guide/installation/kubernetes.json
+++ b/en-us/docs/2.0.3/user_doc/guide/installation/kubernetes.json
@@ -1,6 +1,6 @@
 {
   "filename": "kubernetes.md",
-  "__html": "<h1>QuickStart in Kubernetes</h1>\n<p>Kubernetes deployment is deploy DolphinScheduler in a Kubernetes cluster, which can schedule a large number of tasks and can be used in production.</p>\n<p>If you are a green hand and want to experience DolphinScheduler, we recommended you install follow <a href=\"standalone.md\">Standalone</a>. If you want to experience more complete functions or schedule large tasks number, we recommended you install follow <a href=\"pseudo-cluster.md\ [...]
+  "__html": "<h1>QuickStart in Kubernetes</h1>\n<p>Kubernetes deployment is deploy DolphinScheduler in a Kubernetes cluster, which can schedule a large number of tasks and can be used in production.</p>\n<p>If you are a green hand and want to experience DolphinScheduler, we recommended you install follow <a href=\"standalone.md\">Standalone</a>. If you want to experience more complete functions or schedule large tasks number, we recommended you install follow <a href=\"pseudo-cluster.md\ [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/installation/kubernetes.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/installation/pseudo-cluster.html b/en-us/docs/2.0.3/user_doc/guide/installation/pseudo-cluster.html
index c98510d..ca413a1 100644
--- a/en-us/docs/2.0.3/user_doc/guide/installation/pseudo-cluster.html
+++ b/en-us/docs/2.0.3/user_doc/guide/installation/pseudo-cluster.html
@@ -30,8 +30,8 @@
 <blockquote>
 <p><strong><em>Note:</em></strong> DolphinScheduler itself does not depend on Hadoop, Hive, Spark, but if you need to run tasks that depend on them, you need to have the corresponding environment support</p>
 </blockquote>
-<h2>DolphinScheduler startup environment</h2>
-<h3>Configure user exemption and permissions</h3>
+<h2>DolphinScheduler Startup Environment</h2>
+<h3>Configure User Exemption and Permissions</h3>
 <p>Create a deployment user, and be sure to configure <code>sudo</code> without password. We here make a example for user dolphinscheduler.</p>
 <pre><code class="language-shell"><span class="hljs-meta">#</span><span class="bash"> To create a user, login as root</span>
 useradd dolphinscheduler
@@ -53,7 +53,7 @@ chown -R dolphinscheduler:dolphinscheduler apache-dolphinscheduler-*-bin
 <li>If you find the line &quot;Defaults requirest&quot; in the <code>/etc/sudoers</code> file, please comment it</li>
 </ul>
 </blockquote>
-<h3>Configure machine SSH password-free login</h3>
+<h3>Configure Machine SSH Password-Free Login</h3>
 <p>Since resources need to be sent to different machines during installation, SSH password-free login is required between each machine. The steps to configure password-free login are as follows</p>
 <pre><code class="language-shell">su dolphinscheduler
 
@@ -64,12 +64,12 @@ chmod 600 ~/.ssh/authorized_keys
 <blockquote>
 <p><strong><em>Notice:</em></strong> After the configuration is complete, you can run the command <code>ssh localhost</code> to test if it work or not, if you can login with ssh without password.</p>
 </blockquote>
-<h3>Start zookeeper</h3>
-<p>Go to the zookeeper installation directory, copy configure file <code>zoo_sample.cfg</code> to <code>conf/zoo.cfg</code>, and change value of dataDir in <code>conf/zoo.cfg</code> to <code>dataDir=./tmp/zookeeper</code></p>
-<pre><code class="language-shell"><span class="hljs-meta">#</span><span class="bash"> Start zookeeper</span>
+<h3>Start ZooKeeper</h3>
+<p>Go to the ZooKeeper installation directory, copy configure file <code>zoo_sample.cfg</code> to <code>conf/zoo.cfg</code>, and change value of dataDir in <code>conf/zoo.cfg</code> to <code>dataDir=./tmp/zookeeper</code></p>
+<pre><code class="language-shell"><span class="hljs-meta">#</span><span class="bash"> Start ZooKeeper</span>
 ./bin/zkServer.sh start
 </code></pre>
-<h2>Modify configuration</h2>
+<h2>Modify Configuration</h2>
 <p>After completing the preparation of the basic environment, you need to modify the configuration file according to your environment. The configuration file is in the path of <code>conf/config/install_config.conf</code>. Generally, you just needs to modify the <strong>INSTALL MACHINE, DolphinScheduler ENV, Database, Registry Server</strong> part to complete the deployment, the following describes the parameters that must be modified</p>
 <pre><code class="language-shell"><span class="hljs-meta">#</span><span class="bash"> ---------------------------------------------------------</span>
 <span class="hljs-meta">#</span><span class="bash"> INSTALL MACHINE</span>
@@ -108,10 +108,10 @@ SPRING_DATASOURCE_PASSWORD=&quot;dolphinscheduler&quot;
 #</span><span class="bash"> ---------------------------------------------------------</span>
 <span class="hljs-meta">#</span><span class="bash"> Registry Server</span>
 <span class="hljs-meta">#</span><span class="bash"> ---------------------------------------------------------</span>
-<span class="hljs-meta">#</span><span class="bash"> Registration center address, the address of zookeeper service</span>
+<span class="hljs-meta">#</span><span class="bash"> Registration center address, the address of ZooKeeper service</span>
 registryServers=&quot;localhost:2181&quot;
 </code></pre>
-<h2>Initialize the database</h2>
+<h2>Initialize the Database</h2>
 <p>DolphinScheduler metadata is stored in relational database. Currently, PostgreSQL and MySQL are supported. If you use MySQL, you need to manually download <a href="https://downloads.MySQL.com/archives/c-j/">mysql-connector-java driver</a> (8.0.16) and move it to the lib directory of DolphinScheduler. Let's take MySQL as an example for how to initialize the database</p>
 <pre><code class="language-shell">mysql -uroot -p
 <span class="hljs-meta">
@@ -136,7 +136,7 @@ mysql&gt;</span><span class="bash"> flush privileges;</span>
 </blockquote>
 <h2>Login DolphinScheduler</h2>
 <p>The browser access address <a href="http://localhost:12345/dolphinscheduler">http://localhost:12345/dolphinscheduler</a> can login DolphinScheduler UI. The default username and password are <strong>admin/dolphinscheduler123</strong></p>
-<h2>Start or stop server</h2>
+<h2>Start or Stop Server</h2>
 <pre><code class="language-shell"><span class="hljs-meta">#</span><span class="bash"> Stop all DolphinScheduler server</span>
 sh ./bin/stop-all.sh
 <span class="hljs-meta">
diff --git a/en-us/docs/2.0.3/user_doc/guide/installation/pseudo-cluster.json b/en-us/docs/2.0.3/user_doc/guide/installation/pseudo-cluster.json
index a9ddaf0..74456af 100644
--- a/en-us/docs/2.0.3/user_doc/guide/installation/pseudo-cluster.json
+++ b/en-us/docs/2.0.3/user_doc/guide/installation/pseudo-cluster.json
@@ -1,6 +1,6 @@
 {
   "filename": "pseudo-cluster.md",
-  "__html": "<h1>Pseudo-Cluster Deployment</h1>\n<p>The purpose of pseudo-cluster deployment is to deploy the DolphinScheduler service on a single machine. In this mode, DolphinScheduler's master, worker, api server, and logger server are all on the same machine.</p>\n<p>If you are a green hand and want to experience DolphinScheduler, we recommended you install follow <a href=\"standalone.md\">Standalone</a>. If you want to experience more complete functions or schedule large tasks numbe [...]
+  "__html": "<h1>Pseudo-Cluster Deployment</h1>\n<p>The purpose of pseudo-cluster deployment is to deploy the DolphinScheduler service on a single machine. In this mode, DolphinScheduler's master, worker, api server, and logger server are all on the same machine.</p>\n<p>If you are a green hand and want to experience DolphinScheduler, we recommended you install follow <a href=\"standalone.md\">Standalone</a>. If you want to experience more complete functions or schedule large tasks numbe [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/installation/pseudo-cluster.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/installation/standalone.html b/en-us/docs/2.0.3/user_doc/guide/installation/standalone.html
index e29308c..2411357 100644
--- a/en-us/docs/2.0.3/user_doc/guide/installation/standalone.html
+++ b/en-us/docs/2.0.3/user_doc/guide/installation/standalone.html
@@ -14,7 +14,7 @@
 <p>Standalone only for quick look for DolphinScheduler.</p>
 <p>If you are a green hand and want to experience DolphinScheduler, we recommended you install follow <a href="standalone.md">Standalone</a>. If you want to experience more complete functions or schedule large tasks number, we recommended you install follow <a href="pseudo-cluster.md">pseudo-cluster deployment</a>. If you want to using DolphinScheduler in production, we recommended you follow <a href="cluster.md">cluster deployment</a> or <a href="kubernetes.md">kubernetes</a></p>
 <blockquote>
-<p><strong><em>Note:</em></strong> Standalone only recommends the use of less than 20 workflows, because it uses H2 Database, Zookeeper Testing Server, too many tasks may cause instability</p>
+<p><strong><em>Note:</em></strong> Standalone only recommends the use of less than 20 workflows, because it uses H2 Database, ZooKeeper Testing Server, too many tasks may cause instability</p>
 </blockquote>
 <h2>Prepare</h2>
 <ul>
@@ -22,7 +22,7 @@
 <li>Binary package: Download the DolphinScheduler binary package at <a href="https://dolphinscheduler.apache.org/en-us/download/download.html">download page</a></li>
 </ul>
 <h2>Start DolphinScheduler Standalone Server</h2>
-<h3>Extract and start DolphinScheduler</h3>
+<h3>Extract and Start DolphinScheduler</h3>
 <p>There is a standalone startup script in the binary compressed package, which can be quickly started after extract. Switch to a user with sudo permission and run the script</p>
 <pre><code class="language-shell"><span class="hljs-meta">#</span><span class="bash"> Extract and start Standalone Server</span>
 tar -xvzf apache-dolphinscheduler-*-bin.tar.gz
@@ -31,7 +31,7 @@ sh ./bin/dolphinscheduler-daemon.sh start standalone-server
 </code></pre>
 <h3>Login DolphinScheduler</h3>
 <p>The browser access address <a href="http://localhost:12345/dolphinscheduler">http://localhost:12345/dolphinscheduler</a> can login DolphinScheduler UI. The default username and password are <strong>admin/dolphinscheduler123</strong></p>
-<h2>start/stop server</h2>
+<h3>Start or Stop Server</h3>
 <p>The script <code>./bin/dolphinscheduler-daemon.sh</code> can not only quickly start standalone, but also stop the service operation. All the commands are as follows</p>
 <pre><code class="language-shell"><span class="hljs-meta">#</span><span class="bash"> Start Standalone Server</span>
 sh ./bin/dolphinscheduler-daemon.sh start standalone-server
diff --git a/en-us/docs/2.0.3/user_doc/guide/installation/standalone.json b/en-us/docs/2.0.3/user_doc/guide/installation/standalone.json
index da513c1..0fe533d 100644
--- a/en-us/docs/2.0.3/user_doc/guide/installation/standalone.json
+++ b/en-us/docs/2.0.3/user_doc/guide/installation/standalone.json
@@ -1,6 +1,6 @@
 {
   "filename": "standalone.md",
-  "__html": "<h1>Standalone</h1>\n<p>Standalone only for quick look for DolphinScheduler.</p>\n<p>If you are a green hand and want to experience DolphinScheduler, we recommended you install follow <a href=\"standalone.md\">Standalone</a>. If you want to experience more complete functions or schedule large tasks number, we recommended you install follow <a href=\"pseudo-cluster.md\">pseudo-cluster deployment</a>. If you want to using DolphinScheduler in production, we recommended you foll [...]
+  "__html": "<h1>Standalone</h1>\n<p>Standalone only for quick look for DolphinScheduler.</p>\n<p>If you are a green hand and want to experience DolphinScheduler, we recommended you install follow <a href=\"standalone.md\">Standalone</a>. If you want to experience more complete functions or schedule large tasks number, we recommended you install follow <a href=\"pseudo-cluster.md\">pseudo-cluster deployment</a>. If you want to using DolphinScheduler in production, we recommended you foll [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/installation/standalone.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/monitor.html b/en-us/docs/2.0.3/user_doc/guide/monitor.html
index 48c737e..ef84c66 100644
--- a/en-us/docs/2.0.3/user_doc/guide/monitor.html
+++ b/en-us/docs/2.0.3/user_doc/guide/monitor.html
@@ -11,47 +11,47 @@
 </head>
 <body>
   <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
-<h2>Service management</h2>
+<h2>Service Management</h2>
 <ul>
 <li>Service management is mainly to monitor and display the health status and basic information of each service in the system</li>
 </ul>
-<h2>master monitoring</h2>
+<h2>Monitor Master Server</h2>
 <ul>
 <li>Mainly related to master information.</li>
 </ul>
 <p align="center">
    <img src="/img/master-jk-en.png" width="80%" />
  </p>
-<h2>worker monitoring</h2>
+<h2>Monitor Worker Server</h2>
 <ul>
 <li>Mainly related to worker information.</li>
 </ul>
 <p align="center">
    <img src="/img/worker-jk-en.png" width="80%" />
  </p>
-<h2>Zookeeper monitoring</h2>
+<h2>Monitor ZooKeeper</h2>
 <ul>
 <li>Mainly related configuration information of each worker and master in ZooKeeper.</li>
 </ul>
 <p alignlinux ="center">
    <img src="/img/zookeeper-monitor-en.png" width="80%" />
  </p>
-<h2>DB monitoring</h2>
+<h2>Monitor DB</h2>
 <ul>
 <li>Mainly the health of the DB</li>
 </ul>
 <p align="center">
    <img src="/img/mysql-jk-en.png" width="80%" />
  </p>
-<h2>Statistics management</h2>
+<h2>Statistics Management</h2>
 <p align="center">
    <img src="/img/statistics-en.png" width="80%" />
  </p>
 <ul>
 <li>Number of commands to be executed: statistics on the t_ds_command table</li>
 <li>The number of failed commands: statistics on the t_ds_error_command table</li>
-<li>Number of tasks to run: Count the data of task_queue in Zookeeper</li>
-<li>Number of tasks to be killed: Count the data of task_kill in Zookeeper</li>
+<li>Number of tasks to run: Count the data of task_queue in ZooKeeper</li>
+<li>Number of tasks to be killed: Count the data of task_kill in ZooKeeper</li>
 </ul>
 </div></section><footer class="footer-container"><div class="footer-body"><div><h3>About us</h3><h4>Do you need feedback? Please contact us through the following ways.</h4></div><div class="contact-container"><ul><li><a href="/en-us/community/development/subscribe.html"><img class="img-base" src="/img/emailgray.png"/><img class="img-change" src="/img/emailblue.png"/><p>Email List</p></a></li><li><a href="https://twitter.com/dolphinschedule"><img class="img-base" src="/img/twittergray.png [...]
   <script src="//cdn.jsdelivr.net/npm/react@15.6.2/dist/react-with-addons.min.js"></script>
diff --git a/en-us/docs/2.0.3/user_doc/guide/monitor.json b/en-us/docs/2.0.3/user_doc/guide/monitor.json
index d3f3323..0e67a38 100644
--- a/en-us/docs/2.0.3/user_doc/guide/monitor.json
+++ b/en-us/docs/2.0.3/user_doc/guide/monitor.json
@@ -1,6 +1,6 @@
 {
   "filename": "monitor.md",
-  "__html": "<h1>Monitor</h1>\n<h2>Service management</h2>\n<ul>\n<li>Service management is mainly to monitor and display the health status and basic information of each service in the system</li>\n</ul>\n<h2>master monitoring</h2>\n<ul>\n<li>Mainly related to master information.</li>\n</ul>\n<p align=\"center\">\n   <img src=\"/img/master-jk-en.png\" width=\"80%\" />\n </p>\n<h2>worker monitoring</h2>\n<ul>\n<li>Mainly related to worker information.</li>\n</ul>\n<p align=\"center\">\n   [...]
+  "__html": "<h1>Monitor</h1>\n<h2>Service Management</h2>\n<ul>\n<li>Service management is mainly to monitor and display the health status and basic information of each service in the system</li>\n</ul>\n<h2>Monitor Master Server</h2>\n<ul>\n<li>Mainly related to master information.</li>\n</ul>\n<p align=\"center\">\n   <img src=\"/img/master-jk-en.png\" width=\"80%\" />\n </p>\n<h2>Monitor Worker Server</h2>\n<ul>\n<li>Mainly related to worker information.</li>\n</ul>\n<p align=\"cente [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/monitor.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/observability/skywalking-agent.html b/en-us/docs/2.0.3/user_doc/guide/observability/skywalking-agent.html
index a9084db..b012756 100644
--- a/en-us/docs/2.0.3/user_doc/guide/observability/skywalking-agent.html
+++ b/en-us/docs/2.0.3/user_doc/guide/observability/skywalking-agent.html
@@ -13,9 +13,9 @@
   <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <p>The dolphinscheduler-skywalking module provides <a href="https://skywalking.apache.org/">SkyWalking</a> monitor agent for the Dolphinscheduler project.</p>
 <p>This document describes how to enable SkyWalking 8.4+ support with this module (recommended to use SkyWalking 8.5.0).</p>
-<h1>Installation</h1>
+<h2>Installation</h2>
 <p>The following configuration is used to enable SkyWalking agent.</p>
-<h3>Through environment variable configuration (for Docker Compose)</h3>
+<h3>Through Environment Variable Configuration (for Docker Compose)</h3>
 <p>Modify SkyWalking environment variables in <code>docker/docker-swarm/config.env.sh</code>:</p>
 <pre><code>SKYWALKING_ENABLE=true
 SW_AGENT_COLLECTOR_BACKEND_SERVICES=127.0.0.1:11800
@@ -25,7 +25,7 @@ SW_GRPC_LOG_SERVER_PORT=11800
 <p>And run</p>
 <pre><code class="language-shell"><span class="hljs-meta">$</span><span class="bash"> docker-compose up -d</span>
 </code></pre>
-<h3>Through environment variable configuration (for Docker)</h3>
+<h3>Through Environment Variable Configuration (for Docker)</h3>
 <pre><code class="language-shell"><span class="hljs-meta">$</span><span class="bash"> docker run -d --name dolphinscheduler \
 -e DATABASE_HOST=<span class="hljs-string">&quot;192.168.x.x&quot;</span> -e DATABASE_PORT=<span class="hljs-string">&quot;5432&quot;</span> -e DATABASE_DATABASE=<span class="hljs-string">&quot;dolphinscheduler&quot;</span> \
 -e DATABASE_USERNAME=<span class="hljs-string">&quot;test&quot;</span> -e DATABASE_PASSWORD=<span class="hljs-string">&quot;test&quot;</span> \
@@ -37,7 +37,7 @@ SW_GRPC_LOG_SERVER_PORT=11800
 -p 12345:12345 \
 apache/dolphinscheduler:2.0.3 all</span>
 </code></pre>
-<h3>Through install_config.conf configuration (for DolphinScheduler <a href="http://install.sh">install.sh</a>)</h3>
+<h3>Through install_config.conf Configuration (for DolphinScheduler <a href="http://install.sh">install.sh</a>)</h3>
 <p>Add the following configurations to <code>${workDir}/conf/config/install_config.conf</code>.</p>
 <pre><code class="language-properties"><span class="hljs-comment">
 # SkyWalking config</span>
@@ -51,9 +51,9 @@ apache/dolphinscheduler:2.0.3 all</span>
 <span class="hljs-attr">skywalkingLogReporterPort</span>=<span class="hljs-string">&quot;11800&quot;</span>
 
 </code></pre>
-<h1>Usage</h1>
+<h2>Usage</h2>
 <h3>Import Dashboard</h3>
-<h4>Import DolphinScheduler Dashboard to SkyWalking Sever</h4>
+<h4>Import DolphinScheduler Dashboard to SkyWalking Server</h4>
 <p>Copy the <code>${dolphinscheduler.home}/ext/skywalking-agent/dashboard/dolphinscheduler.yml</code> file into <code>${skywalking-oap-server.home}/config/ui-initialized-templates/</code> directory, and restart SkyWalking oap-server.</p>
 <h4>View DolphinScheduler Dashboard</h4>
 <p>If you have opened SkyWalking dashboard with a browser before, you need to clear the browser cache.</p>
diff --git a/en-us/docs/2.0.3/user_doc/guide/observability/skywalking-agent.json b/en-us/docs/2.0.3/user_doc/guide/observability/skywalking-agent.json
index 1907c39..77029d5 100644
--- a/en-us/docs/2.0.3/user_doc/guide/observability/skywalking-agent.json
+++ b/en-us/docs/2.0.3/user_doc/guide/observability/skywalking-agent.json
@@ -1,6 +1,6 @@
 {
   "filename": "skywalking-agent.md",
-  "__html": "<h1>SkyWalking Agent</h1>\n<p>The dolphinscheduler-skywalking module provides <a href=\"https://skywalking.apache.org/\">SkyWalking</a> monitor agent for the Dolphinscheduler project.</p>\n<p>This document describes how to enable SkyWalking 8.4+ support with this module (recommended to use SkyWalking 8.5.0).</p>\n<h1>Installation</h1>\n<p>The following configuration is used to enable SkyWalking agent.</p>\n<h3>Through environment variable configuration (for Docker Compose)</ [...]
+  "__html": "<h1>SkyWalking Agent</h1>\n<p>The dolphinscheduler-skywalking module provides <a href=\"https://skywalking.apache.org/\">SkyWalking</a> monitor agent for the Dolphinscheduler project.</p>\n<p>This document describes how to enable SkyWalking 8.4+ support with this module (recommended to use SkyWalking 8.5.0).</p>\n<h2>Installation</h2>\n<p>The following configuration is used to enable SkyWalking agent.</p>\n<h3>Through Environment Variable Configuration (for Docker Compose)</ [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/observability/skywalking-agent.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/open-api.html b/en-us/docs/2.0.3/user_doc/guide/open-api.html
index 591b0bf..808754c 100644
--- a/en-us/docs/2.0.3/user_doc/guide/open-api.html
+++ b/en-us/docs/2.0.3/user_doc/guide/open-api.html
@@ -14,7 +14,7 @@
 <h2>Background</h2>
 <p>Generally, projects and processes are created through pages, but integration with third-party systems requires API calls to manage projects and workflows.</p>
 <h2>The Operation Steps of DS API Calls</h2>
-<h3>Create a token</h3>
+<h3>Create a Token</h3>
 <ol>
 <li>Log in to the scheduling system, click &quot;Security&quot;, then click &quot;Token manage&quot; on the left, and click &quot;Create token&quot; to create a token.</li>
 </ol>
@@ -27,7 +27,7 @@
 <p align="center">
    <img src="/img/create-token-en1.png" width="80%" />
  </p>
-<h3>Use token</h3>
+<h3>Use Token</h3>
 <ol>
 <li>Open the API documentation page
 <blockquote>
@@ -51,7 +51,7 @@
 <p align="center">
    <img src="/img/test-api.png" width="80%" />
  </p>  
-<h3>Create a project</h3>
+<h3>Create a Project</h3>
 <p>Here is an example of creating a project named &quot;wudl-flink-test&quot;:</p>
 <p align="center">
    <img src="/img/api/create_project1.png" width="80%" />
@@ -64,7 +64,7 @@
  </p>
 The returned msg information is "success", indicating that we have successfully created the project through API.
 <p>If you are interested in the source code of the project, please continue to read the following:</p>
-<h3>Appendix:The source code of creating a project</h3>
+<h3>Appendix:The Source Code of Creating a Project</h3>
 <p align="center">
    <img src="/img/api/create_source1.png" width="80%" />
  </p>
diff --git a/en-us/docs/2.0.3/user_doc/guide/open-api.json b/en-us/docs/2.0.3/user_doc/guide/open-api.json
index 5ebf21c..f74b0c0 100644
--- a/en-us/docs/2.0.3/user_doc/guide/open-api.json
+++ b/en-us/docs/2.0.3/user_doc/guide/open-api.json
@@ -1,6 +1,6 @@
 {
   "filename": "open-api.md",
-  "__html": "<h1>Open API</h1>\n<h2>Background</h2>\n<p>Generally, projects and processes are created through pages, but integration with third-party systems requires API calls to manage projects and workflows.</p>\n<h2>The Operation Steps of DS API Calls</h2>\n<h3>Create a token</h3>\n<ol>\n<li>Log in to the scheduling system, click &quot;Security&quot;, then click &quot;Token manage&quot; on the left, and click &quot;Create token&quot; to create a token.</li>\n</ol>\n<p align=\"center\ [...]
+  "__html": "<h1>Open API</h1>\n<h2>Background</h2>\n<p>Generally, projects and processes are created through pages, but integration with third-party systems requires API calls to manage projects and workflows.</p>\n<h2>The Operation Steps of DS API Calls</h2>\n<h3>Create a Token</h3>\n<ol>\n<li>Log in to the scheduling system, click &quot;Security&quot;, then click &quot;Token manage&quot; on the left, and click &quot;Create token&quot; to create a token.</li>\n</ol>\n<p align=\"center\ [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/open-api.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/parameter/context.html b/en-us/docs/2.0.3/user_doc/guide/parameter/context.html
index 2f419e2..f15522a 100644
--- a/en-us/docs/2.0.3/user_doc/guide/parameter/context.html
+++ b/en-us/docs/2.0.3/user_doc/guide/parameter/context.html
@@ -12,11 +12,11 @@
 <body>
   <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <p>DolphinScheduler provides the ability to refer to each other between parameters, including: local parameters refer to global parameters, and upstream and downstream parameter transfer. Because of the existence of references, it involves the priority of parameters when the parameter names are the same. see also <a href="priority.md">Parameter Priority</a></p>
-<h2>Local task use global parameter</h2>
+<h2>Local Task Use Global Parameter</h2>
 <p>The premise of local tasks referencing global parameters is that you have already defined <a href="global.md">Global Parameter</a>. The usage is similar to the usage in <a href="local.md">local parameters</a>, but the value of the parameter needs to be configured as the key in the global parameter</p>
 <p><img src="/img/global_parameter.png" alt="parameter-call-global-in-local"></p>
 <p>As shown in the figure above, <code>${biz_date}</code> and <code>${biz_curdate}</code> are examples of local parameters referencing global parameters. Observe the last line of the above figure, local_param_bizdate uses ${global_bizdate} to refer to the global parameter. In the shell script, you can use ${local_param_bizdate} to refer to the value of the global variable global_bizdate, or set the value of local_param_bizdate directly through JDBC. In the same way, local_param refers to [...]
-<h2>Pass parameter from upstream task to downstream</h2>
+<h2>Pass Parameter from Upstream Task to Downstream</h2>
 <p>DolphinScheduler Parameter transfer between tasks is allowed, and the current transfer direction only supports one-way transfer from upstream to downstream. The task types currently supporting this feature are:</p>
 <ul>
 <li><a href="../task/shell.md">Shell</a></li>
diff --git a/en-us/docs/2.0.3/user_doc/guide/parameter/context.json b/en-us/docs/2.0.3/user_doc/guide/parameter/context.json
index ee9ecf8..22086a7 100644
--- a/en-us/docs/2.0.3/user_doc/guide/parameter/context.json
+++ b/en-us/docs/2.0.3/user_doc/guide/parameter/context.json
@@ -1,6 +1,6 @@
 {
   "filename": "context.md",
-  "__html": "<h1>Parameter Context</h1>\n<p>DolphinScheduler provides the ability to refer to each other between parameters, including: local parameters refer to global parameters, and upstream and downstream parameter transfer. Because of the existence of references, it involves the priority of parameters when the parameter names are the same. see also <a href=\"priority.md\">Parameter Priority</a></p>\n<h2>Local task use global parameter</h2>\n<p>The premise of local tasks referencing  [...]
+  "__html": "<h1>Parameter Context</h1>\n<p>DolphinScheduler provides the ability to refer to each other between parameters, including: local parameters refer to global parameters, and upstream and downstream parameter transfer. Because of the existence of references, it involves the priority of parameters when the parameter names are the same. see also <a href=\"priority.md\">Parameter Priority</a></p>\n<h2>Local Task Use Global Parameter</h2>\n<p>The premise of local tasks referencing  [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/parameter/context.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/project/project-list.html b/en-us/docs/2.0.3/user_doc/guide/project/project-list.html
index cd57c3d..4199a1a 100644
--- a/en-us/docs/2.0.3/user_doc/guide/project/project-list.html
+++ b/en-us/docs/2.0.3/user_doc/guide/project/project-list.html
@@ -11,7 +11,7 @@
 </head>
 <body>
   <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
-<h2>Create project</h2>
+<h2>Create Project</h2>
 <ul>
 <li>
 <p>Click &quot;Project Management&quot; to enter the project management page, click the &quot;Create Project&quot; button, enter the project name, project description, and click &quot;Submit&quot; to create a new project.</p>
@@ -20,7 +20,7 @@
 </p>
 </li>
 </ul>
-<h2>Project home</h2>
+<h2>Project Home</h2>
 <ul>
 <li>
 <p>Click the project name link on the project management page to enter the project home page, as shown in the figure below, the project home page contains the task status statistics, process status statistics, and workflow definition statistics of the project. The introduction for those metric:</p>
diff --git a/en-us/docs/2.0.3/user_doc/guide/project/project-list.json b/en-us/docs/2.0.3/user_doc/guide/project/project-list.json
index 4110a15..1a9807f 100644
--- a/en-us/docs/2.0.3/user_doc/guide/project/project-list.json
+++ b/en-us/docs/2.0.3/user_doc/guide/project/project-list.json
@@ -1,6 +1,6 @@
 {
   "filename": "project-list.md",
-  "__html": "<h1>Project</h1>\n<h2>Create project</h2>\n<ul>\n<li>\n<p>Click &quot;Project Management&quot; to enter the project management page, click the &quot;Create Project&quot; button, enter the project name, project description, and click &quot;Submit&quot; to create a new project.</p>\n<p align=\"center\">\n    <img src=\"/img/create_project_en1.png\" width=\"80%\" />\n</p>\n</li>\n</ul>\n<h2>Project home</h2>\n<ul>\n<li>\n<p>Click the project name link on the project management  [...]
+  "__html": "<h1>Project</h1>\n<h2>Create Project</h2>\n<ul>\n<li>\n<p>Click &quot;Project Management&quot; to enter the project management page, click the &quot;Create Project&quot; button, enter the project name, project description, and click &quot;Submit&quot; to create a new project.</p>\n<p align=\"center\">\n    <img src=\"/img/create_project_en1.png\" width=\"80%\" />\n</p>\n</li>\n</ul>\n<h2>Project Home</h2>\n<ul>\n<li>\n<p>Click the project name link on the project management  [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/project/project-list.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/project/task-instance.html b/en-us/docs/2.0.3/user_doc/guide/project/task-instance.html
index 822fd2a..9bdff1f 100644
--- a/en-us/docs/2.0.3/user_doc/guide/project/task-instance.html
+++ b/en-us/docs/2.0.3/user_doc/guide/project/task-instance.html
@@ -10,7 +10,7 @@
   <link rel="stylesheet" href="/build/vendor.23870e5.css">
 </head>
 <body>
-  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <ul>
 <li>
 <p>Click Project Management -&gt; Workflow -&gt; Task Instance to enter the task instance page, as shown in the figure below, click the name of the workflow instance, you can jump to the workflow instance DAG chart to view the task status.</p>
diff --git a/en-us/docs/2.0.3/user_doc/guide/project/task-instance.json b/en-us/docs/2.0.3/user_doc/guide/project/task-instance.json
index 95ae7f9..b298fcb 100644
--- a/en-us/docs/2.0.3/user_doc/guide/project/task-instance.json
+++ b/en-us/docs/2.0.3/user_doc/guide/project/task-instance.json
@@ -1,6 +1,6 @@
 {
   "filename": "task-instance.md",
-  "__html": "<h2>Task instance</h2>\n<ul>\n<li>\n<p>Click Project Management -&gt; Workflow -&gt; Task Instance to enter the task instance page, as shown in the figure below, click the name of the workflow instance, you can jump to the workflow instance DAG chart to view the task status.</p>\n   <p align=\"center\">\n      <img src=\"/img/task-list-en.png\" width=\"80%\" />\n   </p>\n</li>\n<li>\n<p><span id=taskLog>View log:</span>Click the &quot;view log&quot; button in the operation c [...]
+  "__html": "<h1>Task Instance</h1>\n<ul>\n<li>\n<p>Click Project Management -&gt; Workflow -&gt; Task Instance to enter the task instance page, as shown in the figure below, click the name of the workflow instance, you can jump to the workflow instance DAG chart to view the task status.</p>\n   <p align=\"center\">\n      <img src=\"/img/task-list-en.png\" width=\"80%\" />\n   </p>\n</li>\n<li>\n<p><span id=taskLog>View log:</span>Click the &quot;view log&quot; button in the operation c [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/project/task-instance.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/project/workflow-definition.html b/en-us/docs/2.0.3/user_doc/guide/project/workflow-definition.html
index 96969c4..9972ea8 100644
--- a/en-us/docs/2.0.3/user_doc/guide/project/workflow-definition.html
+++ b/en-us/docs/2.0.3/user_doc/guide/project/workflow-definition.html
@@ -10,8 +10,8 @@
   <link rel="stylesheet" href="/build/vendor.23870e5.css">
 </head>
 <body>
-  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
-<h2><span id=creatDag> Create workflow definition</span></h2>
+  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+<h2><span id=creatDag> Create Workflow Definition</span></h2>
 <ul>
 <li>
 <p>Click Project Management -&gt; Workflow -&gt; Workflow Definition to enter the workflow definition page, and click the &quot;Create Workflow&quot; button to enter the <strong>workflow DAG edit</strong> page, as shown in the following figure:</p>
@@ -59,7 +59,7 @@
 <blockquote>
 <p>For other types of tasks, please refer to <a href="#TaskParamers">Task Node Type and Parameter Settings</a>.</p>
 </blockquote>
-<h2>Workflow definition operation function</h2>
+<h2>Workflow Definition Operation Function</h2>
 <p>Click Project Management -&gt; Workflow -&gt; Workflow Definition to enter the workflow definition page, as shown below:</p>
 <p align="center">
 <img src="/img/work_list_en.png" width="80%" />
@@ -79,7 +79,7 @@ The operation functions of the workflow definition list are as follows:
 </p>
 </li>
 </ul>
-<h2><span id=runWorkflow>Run the workflow</span></h2>
+<h2><span id=runWorkflow>Run the Workflow</span></h2>
 <ul>
 <li>
 <p>Click Project Management -&gt; Workflow -&gt; Workflow Definition to enter the workflow definition page, as shown in the figure below, click the &quot;Go Online&quot; button <img src="/img/online.png" width="35"/>,Go online workflow.</p>
@@ -117,7 +117,7 @@ The operation functions of the workflow definition list are as follows:
 </blockquote>
 </li>
 </ul>
-<h2><span id=creatTiming>Workflow timing</span></h2>
+<h2><span id=creatTiming>Workflow Timing</span></h2>
 <ul>
 <li>Create timing: Click Project Management-&gt;Workflow-&gt;Workflow Definition, enter the workflow definition page, go online the workflow, click the &quot;timing&quot; button <img src="/img/timing.png" width="35"/> ,The timing parameter setting dialog box pops up, as shown in the figure below:<p align="center">
     <img src="/img/time_schedule_en.png" width="80%" />
@@ -135,7 +135,7 @@ The operation functions of the workflow definition list are as follows:
 </p>
 </li>
 </ul>
-<h2>Import workflow</h2>
+<h2>Import Workflow</h2>
 <p>Click Project Management -&gt; Workflow -&gt; Workflow Definition to enter the workflow definition page, click the &quot;Import Workflow&quot; button to import the local workflow file, the workflow definition list displays the imported workflow, and the status is offline.</p>
 </div></section><footer class="footer-container"><div class="footer-body"><div><h3>About us</h3><h4>Do you need feedback? Please contact us through the following ways.</h4></div><div class="contact-container"><ul><li><a href="/en-us/community/development/subscribe.html"><img class="img-base" src="/img/emailgray.png"/><img class="img-change" src="/img/emailblue.png"/><p>Email List</p></a></li><li><a href="https://twitter.com/dolphinschedule"><img class="img-base" src="/img/twittergray.png [...]
   <script src="//cdn.jsdelivr.net/npm/react@15.6.2/dist/react-with-addons.min.js"></script>
diff --git a/en-us/docs/2.0.3/user_doc/guide/project/workflow-definition.json b/en-us/docs/2.0.3/user_doc/guide/project/workflow-definition.json
index dac03dc..1662ac7 100644
--- a/en-us/docs/2.0.3/user_doc/guide/project/workflow-definition.json
+++ b/en-us/docs/2.0.3/user_doc/guide/project/workflow-definition.json
@@ -1,6 +1,6 @@
 {
   "filename": "workflow-definition.md",
-  "__html": "<h1>Workflow definition</h1>\n<h2><span id=creatDag> Create workflow definition</span></h2>\n<ul>\n<li>\n<p>Click Project Management -&gt; Workflow -&gt; Workflow Definition to enter the workflow definition page, and click the &quot;Create Workflow&quot; button to enter the <strong>workflow DAG edit</strong> page, as shown in the following figure:</p>\n<p align=\"center\">\n    <img src=\"/img/dag5.png\" width=\"80%\" />\n</p>\n</li>\n<li>\n<p>Drag in the toolbar <img src=\" [...]
+  "__html": "<h1>Workflow Definition</h1>\n<h2><span id=creatDag> Create Workflow Definition</span></h2>\n<ul>\n<li>\n<p>Click Project Management -&gt; Workflow -&gt; Workflow Definition to enter the workflow definition page, and click the &quot;Create Workflow&quot; button to enter the <strong>workflow DAG edit</strong> page, as shown in the following figure:</p>\n<p align=\"center\">\n    <img src=\"/img/dag5.png\" width=\"80%\" />\n</p>\n</li>\n<li>\n<p>Drag in the toolbar <img src=\" [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/project/workflow-definition.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/project/workflow-instance.html b/en-us/docs/2.0.3/user_doc/guide/project/workflow-instance.html
index a6dbf68..fd76885 100644
--- a/en-us/docs/2.0.3/user_doc/guide/project/workflow-instance.html
+++ b/en-us/docs/2.0.3/user_doc/guide/project/workflow-instance.html
@@ -10,8 +10,8 @@
   <link rel="stylesheet" href="/build/vendor.23870e5.css">
 </head>
 <body>
-  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
-<h2>View workflow instance</h2>
+  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+<h2>View Workflow Instance</h2>
 <ul>
 <li>Click Project Management -&gt; Workflow -&gt; Workflow Instance to enter the Workflow Instance page, as shown in the figure below:   <p align="center">
       <img src="/img/instance-list-en.png" width="80%" />
@@ -22,7 +22,7 @@
 </p>
 </li>
 </ul>
-<h2>View task log</h2>
+<h2>View Task Log</h2>
 <ul>
 <li>Enter the workflow instance page, click the workflow name, enter the DAG view page, double-click the task node, as shown in the following figure: <p align="center">
    <img src="/img/instanceViewLog-en.png" width="80%" />
@@ -33,7 +33,7 @@
  </p>
 </li>
 </ul>
-<h2>View task history</h2>
+<h2>View Task History</h2>
 <ul>
 <li>Click Project Management -&gt; Workflow -&gt; Workflow Instance to enter the workflow instance page, and click the workflow name to enter the workflow DAG page;</li>
 <li>Double-click the task node, as shown in the figure below, click &quot;View History&quot; to jump to the task instance page, and display a list of task instances running by the workflow instance <p align="center">
@@ -41,7 +41,7 @@
  </p>
 </li>
 </ul>
-<h2>View operating parameters</h2>
+<h2>View Operating Parameters</h2>
 <ul>
 <li>Click Project Management -&gt; Workflow -&gt; Workflow Instance to enter the workflow instance page, and click the workflow name to enter the workflow DAG page;</li>
 <li>Click the icon in the upper left corner <img src="/img/run_params_button.png" width="35"/>,View the startup parameters of the workflow instance; click the icon <img src="/img/global_param.png" width="35"/>,View the global and local parameters of the workflow instance, as shown in the following figure: <p align="center">
@@ -49,7 +49,7 @@
  </p>
 </li>
 </ul>
-<h2>Workflow instance operation function</h2>
+<h2>Workflow Instance Operation Function</h2>
 <p>Click Project Management -&gt; Workflow -&gt; Workflow Instance to enter the Workflow Instance page, as shown in the figure below:</p>
   <p align="center">
     <img src="/img/instance-list-en.png" width="80%" />
diff --git a/en-us/docs/2.0.3/user_doc/guide/project/workflow-instance.json b/en-us/docs/2.0.3/user_doc/guide/project/workflow-instance.json
index 7eb80e1..9d9b1cc 100644
--- a/en-us/docs/2.0.3/user_doc/guide/project/workflow-instance.json
+++ b/en-us/docs/2.0.3/user_doc/guide/project/workflow-instance.json
@@ -1,6 +1,6 @@
 {
   "filename": "workflow-instance.md",
-  "__html": "<h1>Workflow instance</h1>\n<h2>View workflow instance</h2>\n<ul>\n<li>Click Project Management -&gt; Workflow -&gt; Workflow Instance to enter the Workflow Instance page, as shown in the figure below:   <p align=\"center\">\n      <img src=\"/img/instance-list-en.png\" width=\"80%\" />\n   </p>\n</li>\n<li>Click the workflow name to enter the DAG view page to view the task execution status, as shown in the figure below.<p align=\"center\">\n  <img src=\"/img/instance-runs-e [...]
+  "__html": "<h1>Workflow Instance</h1>\n<h2>View Workflow Instance</h2>\n<ul>\n<li>Click Project Management -&gt; Workflow -&gt; Workflow Instance to enter the Workflow Instance page, as shown in the figure below:   <p align=\"center\">\n      <img src=\"/img/instance-list-en.png\" width=\"80%\" />\n   </p>\n</li>\n<li>Click the workflow name to enter the DAG view page to view the task execution status, as shown in the figure below.<p align=\"center\">\n  <img src=\"/img/instance-runs-e [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/project/workflow-instance.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/resource.html b/en-us/docs/2.0.3/user_doc/guide/resource.html
index 662163d..3a37abd 100644
--- a/en-us/docs/2.0.3/user_doc/guide/resource.html
+++ b/en-us/docs/2.0.3/user_doc/guide/resource.html
@@ -19,7 +19,7 @@
 <li>If you using Hadoop cluster with HA, you need to enable HDFS resource upload, and you need to copy the <code>core-site.xml</code> and <code>hdfs-site.xml</code> under the Hadoop cluster to <code>/opt/dolphinscheduler/conf</code>, otherwise Skip step</li>
 </ul>
 </blockquote>
-<h2>hdfs resource configuration</h2>
+<h2>HDFS Resource Configuration</h2>
 <ul>
 <li>Upload resource files and udf functions, all uploaded files and resources will be stored on hdfs, so the following configuration items are required:</li>
 </ul>
@@ -53,7 +53,7 @@ conf/common/hadoop.properties
 <li>Only one address needs to be configured for yarn.resourcemanager.ha.rm.ids and yarn.application.status.address, and the other address is empty.</li>
 <li>You need to copy core-site.xml and hdfs-site.xml from the conf directory of the Hadoop cluster to the conf directory of the dolphinscheduler project, and restart the api-server service.</li>
 </ul>
-<h2>File management</h2>
+<h2>File Management</h2>
 <blockquote>
 <p>It is the management of various resource files, including creating basic txt/log/sh/conf/py/java and other files, uploading jar packages and other types of files, and can do edit, rename, download, delete and other operations.</p>
 </blockquote>
@@ -107,8 +107,8 @@ conf/common/hadoop.properties
 </blockquote>
 </li>
 </ul>
-<h2>UDF management</h2>
-<h3>Resource management</h3>
+<h2>UDF Management</h2>
+<h3>Resource Management</h3>
 <blockquote>
 <p>The resource management and file management functions are similar. The difference is that the resource management is the uploaded UDF function, and the file management uploads the user program, script and configuration file.
 Operation function: rename, download, delete.</p>
@@ -120,7 +120,7 @@ Operation function: rename, download, delete.</p>
 </blockquote>
 </li>
 </ul>
-<h3>Function management</h3>
+<h3>Function Management</h3>
 <ul>
 <li>Create UDF function
 <blockquote>
diff --git a/en-us/docs/2.0.3/user_doc/guide/resource.json b/en-us/docs/2.0.3/user_doc/guide/resource.json
index df72329..990d641 100644
--- a/en-us/docs/2.0.3/user_doc/guide/resource.json
+++ b/en-us/docs/2.0.3/user_doc/guide/resource.json
@@ -1,6 +1,6 @@
 {
   "filename": "resource.md",
-  "__html": "<h1>Resource Center</h1>\n<p>If you want to use the resource upload function, you can select the local file directory for a single machine(this operation does not need to deploy Hadoop). Or you can also upload to a Hadoop or MinIO cluster, at this time, you need to have Hadoop (2.6+) or MinIO and other related environments</p>\n<blockquote>\n<p><strong><em>Note:</em></strong></p>\n<ul>\n<li>If the resource upload function is used, the deployment user in <a href=\"installatio [...]
+  "__html": "<h1>Resource Center</h1>\n<p>If you want to use the resource upload function, you can select the local file directory for a single machine(this operation does not need to deploy Hadoop). Or you can also upload to a Hadoop or MinIO cluster, at this time, you need to have Hadoop (2.6+) or MinIO and other related environments</p>\n<blockquote>\n<p><strong><em>Note:</em></strong></p>\n<ul>\n<li>If the resource upload function is used, the deployment user in <a href=\"installatio [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/resource.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/security.html b/en-us/docs/2.0.3/user_doc/guide/security.html
index 5a6eba0..367a74d 100644
--- a/en-us/docs/2.0.3/user_doc/guide/security.html
+++ b/en-us/docs/2.0.3/user_doc/guide/security.html
@@ -15,7 +15,7 @@
 <li>Only the administrator account in the security center has the authority to operate. It has functions such as queue management, tenant management, user management, alarm group management, worker group management, token management, etc. In the user management module, resources, data sources, projects, etc. Authorization</li>
 <li>Administrator login, default user name and password: admin/dolphinscheduler123</li>
 </ul>
-<h2>Create queue</h2>
+<h2>Create Queue</h2>
 <ul>
 <li>Queue is used when the &quot;queue&quot; parameter is needed to execute programs such as spark and mapreduce.</li>
 <li>The administrator enters the Security Center-&gt;Queue Management page and clicks the &quot;Create Queue&quot; button to create a queue.</li>
@@ -23,7 +23,7 @@
 <p align="center">
    <img src="/img/create-queue-en.png" width="80%" />
  </p>
-<h2>Add tenant</h2>
+<h2>Add Tenant</h2>
 <ul>
 <li>The tenant corresponds to the Linux user, which is used by the worker to submit the job. Task will fail if Linux does not exists this user. You can set the parameter <code>worker.tenant.auto.create</code> as <code>true</code> in configuration file <code>worker.properties</code>. After that DolphinScheduler would create user if not exists, The property <code>worker.tenant.auto.create=true</code> requests worker run <code>sudo</code> command without password.</li>
 <li>Tenant Code: <strong>Tenant Code is the only user on Linux and cannot be repeated</strong></li>
@@ -32,7 +32,7 @@
  <p align="center">
     <img src="/img/addtenant-en.png" width="80%" />
   </p>
-<h2>Create normal user</h2>
+<h2>Create Normal User</h2>
 <ul>
 <li>
 <p>Users are divided into <strong>administrator users</strong> and <strong>normal users</strong></p>
@@ -63,7 +63,7 @@
 <li>The administrator enters the Security Center-&gt;User Management page and clicks the &quot;Edit&quot; button. When editing user information, enter the new password to modify the user password.</li>
 <li>After a normal user logs in, click the user information in the user name drop-down box to enter the password modification page, enter the password and confirm the password and click the &quot;Edit&quot; button, then the password modification is successful.</li>
 </ul>
-<h2>Create alarm group</h2>
+<h2>Create Alarm Group</h2>
 <ul>
 <li>The alarm group is a parameter set at startup. After the process ends, the status of the process and other information will be sent to the alarm group in the form of email.</li>
 </ul>
@@ -74,7 +74,7 @@
   <img src="/img/mail-en.png" width="80%" />
 </li>
 </ul>
-<h2>Token management</h2>
+<h2>Token Management</h2>
 <blockquote>
 <p>Since the back-end interface has login check, token management provides a way to perform various operations on the system by calling the interface.</p>
 </blockquote>
@@ -145,7 +145,7 @@
 <ul>
 <li>Resources, data sources, and UDF function authorization are the same as project authorization.</li>
 </ul>
-<h2>Worker grouping</h2>
+<h2>Worker Grouping</h2>
 <p>Each worker node will belong to its own worker group, and the default group is &quot;default&quot;.</p>
 <p>When the task is executed, the task can be assigned to the specified worker group, and the task will be executed by the worker node in the group.</p>
 <blockquote>
diff --git a/en-us/docs/2.0.3/user_doc/guide/security.json b/en-us/docs/2.0.3/user_doc/guide/security.json
index 352c4c9..fac62d6 100644
--- a/en-us/docs/2.0.3/user_doc/guide/security.json
+++ b/en-us/docs/2.0.3/user_doc/guide/security.json
@@ -1,6 +1,6 @@
 {
   "filename": "security.md",
-  "__html": "<h1>Security</h1>\n<ul>\n<li>Only the administrator account in the security center has the authority to operate. It has functions such as queue management, tenant management, user management, alarm group management, worker group management, token management, etc. In the user management module, resources, data sources, projects, etc. Authorization</li>\n<li>Administrator login, default user name and password: admin/dolphinscheduler123</li>\n</ul>\n<h2>Create queue</h2>\n<ul>\ [...]
+  "__html": "<h1>Security</h1>\n<ul>\n<li>Only the administrator account in the security center has the authority to operate. It has functions such as queue management, tenant management, user management, alarm group management, worker group management, token management, etc. In the user management module, resources, data sources, projects, etc. Authorization</li>\n<li>Administrator login, default user name and password: admin/dolphinscheduler123</li>\n</ul>\n<h2>Create Queue</h2>\n<ul>\ [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/security.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/task/conditions.html b/en-us/docs/2.0.3/user_doc/guide/task/conditions.html
index 0af5488..c3ca5be 100644
--- a/en-us/docs/2.0.3/user_doc/guide/task/conditions.html
+++ b/en-us/docs/2.0.3/user_doc/guide/task/conditions.html
@@ -43,7 +43,7 @@
 </ul>
 </li>
 </ul>
-<h2>Related task</h2>
+<h2>Related Task</h2>
 <p><a href="switch.md">switch</a>: <a href="conditions.md">Condition</a>task mainly executes the corresponding branch based on the execution status (success, failure) of the upstream node. The <a href="switch.md">Switch</a> task mainly executes the corresponding branch based on the value of the <a href="../parameter/global.md">global parameter</a> and the judgment expression result written by the user.</p>
 </div></section><footer class="footer-container"><div class="footer-body"><div><h3>About us</h3><h4>Do you need feedback? Please contact us through the following ways.</h4></div><div class="contact-container"><ul><li><a href="/en-us/community/development/subscribe.html"><img class="img-base" src="/img/emailgray.png"/><img class="img-change" src="/img/emailblue.png"/><p>Email List</p></a></li><li><a href="https://twitter.com/dolphinschedule"><img class="img-base" src="/img/twittergray.png [...]
   <script src="//cdn.jsdelivr.net/npm/react@15.6.2/dist/react-with-addons.min.js"></script>
diff --git a/en-us/docs/2.0.3/user_doc/guide/task/conditions.json b/en-us/docs/2.0.3/user_doc/guide/task/conditions.json
index 159bfe1..f5b1466 100644
--- a/en-us/docs/2.0.3/user_doc/guide/task/conditions.json
+++ b/en-us/docs/2.0.3/user_doc/guide/task/conditions.json
@@ -1,6 +1,6 @@
 {
   "filename": "conditions.md",
-  "__html": "<h1>Conditions</h1>\n<p>Conditions is a condition node, determining which downstream task should be run based on the condition set to it. For now, the Conditions support multiple upstream tasks, but only two downstream tasks. When the number of upstream tasks exceeds one, complex upstream dependencies can be achieved through <code>and</code> and <code>or</code> operators.</p>\n<h2>Create</h2>\n<p>Drag in the toolbar<img src=\"/img/conditions.png\" width=\"20\"/>The task node [...]
+  "__html": "<h1>Conditions</h1>\n<p>Conditions is a condition node, determining which downstream task should be run based on the condition set to it. For now, the Conditions support multiple upstream tasks, but only two downstream tasks. When the number of upstream tasks exceeds one, complex upstream dependencies can be achieved through <code>and</code> and <code>or</code> operators.</p>\n<h2>Create</h2>\n<p>Drag in the toolbar<img src=\"/img/conditions.png\" width=\"20\"/>The task node [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/task/conditions.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/task/datax.html b/en-us/docs/2.0.3/user_doc/guide/task/datax.html
index c241e86..f27e4d2 100644
--- a/en-us/docs/2.0.3/user_doc/guide/task/datax.html
+++ b/en-us/docs/2.0.3/user_doc/guide/task/datax.html
@@ -10,7 +10,7 @@
   <link rel="stylesheet" href="/build/vendor.23870e5.css">
 </head>
 <body>
-  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <ul>
 <li>
 <p>Drag in the toolbar<img src="/img/datax.png" width="35"/>Task node into the drawing board</p>
diff --git a/en-us/docs/2.0.3/user_doc/guide/task/datax.json b/en-us/docs/2.0.3/user_doc/guide/task/datax.json
index 6ab67d3..7d82360 100644
--- a/en-us/docs/2.0.3/user_doc/guide/task/datax.json
+++ b/en-us/docs/2.0.3/user_doc/guide/task/datax.json
@@ -1,6 +1,6 @@
 {
   "filename": "datax.md",
-  "__html": "<h1>DATAX</h1>\n<ul>\n<li>\n<p>Drag in the toolbar<img src=\"/img/datax.png\" width=\"35\"/>Task node into the drawing board</p>\n<p align=\"center\">\n <img src=\"/img/datax-en.png\" width=\"80%\" />\n</p>\n</li>\n<li>\n<p>Custom template: When you turn on the custom template switch, you can customize the content of the json configuration file of the datax node (applicable when the control configuration does not meet the requirements)</p>\n</li>\n<li>\n<p>Data source: selec [...]
+  "__html": "<h1>DataX</h1>\n<ul>\n<li>\n<p>Drag in the toolbar<img src=\"/img/datax.png\" width=\"35\"/>Task node into the drawing board</p>\n<p align=\"center\">\n <img src=\"/img/datax-en.png\" width=\"80%\" />\n</p>\n</li>\n<li>\n<p>Custom template: When you turn on the custom template switch, you can customize the content of the json configuration file of the datax node (applicable when the control configuration does not meet the requirements)</p>\n</li>\n<li>\n<p>Data source: selec [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/task/datax.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/task/dependent.html b/en-us/docs/2.0.3/user_doc/guide/task/dependent.html
index 6a66e10..6ebcfac 100644
--- a/en-us/docs/2.0.3/user_doc/guide/task/dependent.html
+++ b/en-us/docs/2.0.3/user_doc/guide/task/dependent.html
@@ -10,7 +10,7 @@
   <link rel="stylesheet" href="/build/vendor.23870e5.css">
 </head>
 <body>
-  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <ul>
 <li>Dependent nodes are <strong>dependency check nodes</strong>. For example, process A depends on the successful execution of process B yesterday, and the dependent node will check whether process B has a successful execution yesterday.</li>
 </ul>
diff --git a/en-us/docs/2.0.3/user_doc/guide/task/dependent.json b/en-us/docs/2.0.3/user_doc/guide/task/dependent.json
index 82629c8..178b1f8 100644
--- a/en-us/docs/2.0.3/user_doc/guide/task/dependent.json
+++ b/en-us/docs/2.0.3/user_doc/guide/task/dependent.json
@@ -1,6 +1,6 @@
 {
   "filename": "dependent.md",
-  "__html": "<h1>DEPENDENT</h1>\n<ul>\n<li>Dependent nodes are <strong>dependency check nodes</strong>. For example, process A depends on the successful execution of process B yesterday, and the dependent node will check whether process B has a successful execution yesterday.</li>\n</ul>\n<blockquote>\n<p>Drag the <img src=\"https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_DEPENDENT.png\" alt=\"PNG\"> task node in the toolbar to the drawing board, as shown in the following [...]
+  "__html": "<h1>Dependent</h1>\n<ul>\n<li>Dependent nodes are <strong>dependency check nodes</strong>. For example, process A depends on the successful execution of process B yesterday, and the dependent node will check whether process B has a successful execution yesterday.</li>\n</ul>\n<blockquote>\n<p>Drag the <img src=\"https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_DEPENDENT.png\" alt=\"PNG\"> task node in the toolbar to the drawing board, as shown in the following [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/task/dependent.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/task/flink.html b/en-us/docs/2.0.3/user_doc/guide/task/flink.html
index 671a15d..4a4bb6a 100644
--- a/en-us/docs/2.0.3/user_doc/guide/task/flink.html
+++ b/en-us/docs/2.0.3/user_doc/guide/task/flink.html
@@ -13,7 +13,7 @@
   <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <h2>Overview</h2>
 <p>Flink task type for executing Flink programs. For Flink nodes, the worker submits the task by using the flink command <code>flink run</code>. See <a href="https://nightlies.apache.org/flink/flink-docs-release-1.14/docs/deployment/cli/">flink cli</a> for more details.</p>
-<h2>Create task</h2>
+<h2>Create Task</h2>
 <ul>
 <li>Click Project Management -&gt; Project Name -&gt; Workflow Definition, and click the &quot;Create Workflow&quot; button to enter the DAG editing page.</li>
 <li>Drag the <img src="/img/tasks/icons/flink.png" width="15"/> from the toolbar to the drawing board.</li>
@@ -50,13 +50,13 @@
 <li><strong>Predecessor task</strong>: Selecting a predecessor task for the current task will set the selected predecessor task as upstream of the current task.</li>
 </ul>
 <h2>Task Example</h2>
-<h3>Execute the WordCount program</h3>
+<h3>Execute the WordCount Program</h3>
 <p>This is a common introductory case in the Big Data ecosystem, which often applied to computational frameworks such as MapReduce, Flink and Spark. The main purpose is to count the number of identical words in the input text. (Flink's releases come with this example job)</p>
-<h4>Uploading the main package</h4>
+<h4>Upload the Main Package</h4>
 <p>When using the Flink task node, you will need to use the Resource Centre to upload the jar package for the executable. Refer to the <a href="../resource.md">resource center</a>.</p>
 <p>After configuring the Resource Centre, you can upload the required target files directly using drag and drop.</p>
 <p><img src="/img/tasks/demo/upload_flink.png" alt="resource_upload"></p>
-<h4>Configuring Flink nodes</h4>
+<h4>Configure Flink Nodes</h4>
 <p>Simply configure the required content according to the parameter descriptions above.</p>
 <p><img src="/img/tasks/demo/flink_task.png" alt="demo-flink-simple"></p>
 <h2>Notice</h2>
diff --git a/en-us/docs/2.0.3/user_doc/guide/task/flink.json b/en-us/docs/2.0.3/user_doc/guide/task/flink.json
index 365ea1f..4303352 100644
--- a/en-us/docs/2.0.3/user_doc/guide/task/flink.json
+++ b/en-us/docs/2.0.3/user_doc/guide/task/flink.json
@@ -1,6 +1,6 @@
 {
   "filename": "flink.md",
-  "__html": "<h1>Flink</h1>\n<h2>Overview</h2>\n<p>Flink task type for executing Flink programs. For Flink nodes, the worker submits the task by using the flink command <code>flink run</code>. See <a href=\"https://nightlies.apache.org/flink/flink-docs-release-1.14/docs/deployment/cli/\">flink cli</a> for more details.</p>\n<h2>Create task</h2>\n<ul>\n<li>Click Project Management -&gt; Project Name -&gt; Workflow Definition, and click the &quot;Create Workflow&quot; button to enter the D [...]
+  "__html": "<h1>Flink</h1>\n<h2>Overview</h2>\n<p>Flink task type for executing Flink programs. For Flink nodes, the worker submits the task by using the flink command <code>flink run</code>. See <a href=\"https://nightlies.apache.org/flink/flink-docs-release-1.14/docs/deployment/cli/\">flink cli</a> for more details.</p>\n<h2>Create Task</h2>\n<ul>\n<li>Click Project Management -&gt; Project Name -&gt; Workflow Definition, and click the &quot;Create Workflow&quot; button to enter the D [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/task/flink.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/task/map-reduce.html b/en-us/docs/2.0.3/user_doc/guide/task/map-reduce.html
index cc2e583..e5d895c 100644
--- a/en-us/docs/2.0.3/user_doc/guide/task/map-reduce.html
+++ b/en-us/docs/2.0.3/user_doc/guide/task/map-reduce.html
@@ -58,13 +58,13 @@
 <li><strong>User-defined parameter</strong>: It is a user-defined parameter of the MapReduce part, which will replace the content with ${variable} in the script</li>
 </ul>
 <h2>Task Example</h2>
-<h3>Execute the WordCount program</h3>
+<h3>Execute the WordCount Program</h3>
 <p>This example is a common introductory type of MapReduce application, which is designed to count the number of identical words in the input text.</p>
-<h4>Uploading the main package</h4>
+<h4>Upload the Main Package</h4>
 <p>When using the MapReduce task node, you will need to use the Resource Centre to upload the jar package for the executable. Refer to the <a href="../resource.md">resource centre</a>.</p>
 <p>After configuring the Resource Centre, you can upload the required target files directly using drag and drop.</p>
 <p><img src="/img/tasks/demo/resource_upload.png" alt="resource_upload"></p>
-<h4>Configuring MapReduce nodes</h4>
+<h4>Configure MapReduce Nodes</h4>
 <p>Simply configure the required content according to the parameter descriptions above.</p>
 <p><img src="/img/tasks/demo/mr.png" alt="demo-mr-simple"></p>
 </div></section><footer class="footer-container"><div class="footer-body"><div><h3>About us</h3><h4>Do you need feedback? Please contact us through the following ways.</h4></div><div class="contact-container"><ul><li><a href="/en-us/community/development/subscribe.html"><img class="img-base" src="/img/emailgray.png"/><img class="img-change" src="/img/emailblue.png"/><p>Email List</p></a></li><li><a href="https://twitter.com/dolphinschedule"><img class="img-base" src="/img/twittergray.png [...]
diff --git a/en-us/docs/2.0.3/user_doc/guide/task/map-reduce.json b/en-us/docs/2.0.3/user_doc/guide/task/map-reduce.json
index 8203db0..7dc92ef 100644
--- a/en-us/docs/2.0.3/user_doc/guide/task/map-reduce.json
+++ b/en-us/docs/2.0.3/user_doc/guide/task/map-reduce.json
@@ -1,6 +1,6 @@
 {
   "filename": "map-reduce.md",
-  "__html": "<h1>MapReduce</h1>\n<h2>Overview</h2>\n<ul>\n<li>MapReduce(MR) task type for executing MapReduce programs. For MapReduce nodes, the worker submits the task by using the Hadoop command <code>hadoop jar</code>. See <a href=\"https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/CommandsManual.html#jar\">Hadoop Command Manual</a> for more details.</li>\n</ul>\n<h2>Create Task</h2>\n<ul>\n<li>Click Project Management-Project Name-Workflow Definition, and click [...]
+  "__html": "<h1>MapReduce</h1>\n<h2>Overview</h2>\n<ul>\n<li>MapReduce(MR) task type for executing MapReduce programs. For MapReduce nodes, the worker submits the task by using the Hadoop command <code>hadoop jar</code>. See <a href=\"https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/CommandsManual.html#jar\">Hadoop Command Manual</a> for more details.</li>\n</ul>\n<h2>Create Task</h2>\n<ul>\n<li>Click Project Management-Project Name-Workflow Definition, and click [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/task/map-reduce.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/task/spark.html b/en-us/docs/2.0.3/user_doc/guide/task/spark.html
index 1a034f9..f380446 100644
--- a/en-us/docs/2.0.3/user_doc/guide/task/spark.html
+++ b/en-us/docs/2.0.3/user_doc/guide/task/spark.html
@@ -13,7 +13,7 @@
   <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <h2>Overview</h2>
 <p>Spark task type for executing Spark programs. For Spark nodes, the worker submits the task by using the spark command <code>spark submit</code>. See <a href="https://spark.apache.org/docs/3.2.1/submitting-applications.html#launching-applications-with-spark-submit">spark-submit</a> for more details.</p>
-<h2>Create task</h2>
+<h2>Create Task</h2>
 <ul>
 <li>Click Project Management -&gt; Project Name -&gt; Workflow Definition, and click the &quot;Create Workflow&quot; button to enter the DAG editing page.</li>
 <li>Drag the <img src="/img/tasks/icons/spark.png" width="15"/> from the toolbar to the drawing board.</li>
@@ -47,13 +47,13 @@
 <li><strong>Predecessor task</strong>: Selecting a predecessor task for the current task will set the selected predecessor task as upstream of the current task.</li>
 </ul>
 <h2>Task Example</h2>
-<h3>Execute the WordCount program</h3>
+<h3>Execute the WordCount Program</h3>
 <p>This is a common introductory case in the Big Data ecosystem, which often applied to computational frameworks such as MapReduce, Flink and Spark. The main purpose is to count the number of identical words in the input text.</p>
-<h4>Uploading the main package</h4>
+<h4>Upload the Main Package</h4>
 <p>When using the Spark task node, you will need to use the Resource Center to upload the jar package for the executable. Refer to the <a href="../resource.md">resource center</a>.</p>
 <p>After configuring the Resource Center, you can upload the required target files directly using drag and drop.</p>
 <p><img src="/img/tasks/demo/upload_spark.png" alt="resource_upload"></p>
-<h4>Configuring Spark nodes</h4>
+<h4>Configure Spark Nodes</h4>
 <p>Simply configure the required content according to the parameter descriptions above.</p>
 <p><img src="/img/tasks/demo/spark_task.png" alt="demo-spark-simple"></p>
 <h2>Notice</h2>
diff --git a/en-us/docs/2.0.3/user_doc/guide/task/spark.json b/en-us/docs/2.0.3/user_doc/guide/task/spark.json
index 3989ac6..f525035 100644
--- a/en-us/docs/2.0.3/user_doc/guide/task/spark.json
+++ b/en-us/docs/2.0.3/user_doc/guide/task/spark.json
@@ -1,6 +1,6 @@
 {
   "filename": "spark.md",
-  "__html": "<h1>Spark</h1>\n<h2>Overview</h2>\n<p>Spark task type for executing Spark programs. For Spark nodes, the worker submits the task by using the spark command <code>spark submit</code>. See <a href=\"https://spark.apache.org/docs/3.2.1/submitting-applications.html#launching-applications-with-spark-submit\">spark-submit</a> for more details.</p>\n<h2>Create task</h2>\n<ul>\n<li>Click Project Management -&gt; Project Name -&gt; Workflow Definition, and click the &quot;Create Work [...]
+  "__html": "<h1>Spark</h1>\n<h2>Overview</h2>\n<p>Spark task type for executing Spark programs. For Spark nodes, the worker submits the task by using the spark command <code>spark submit</code>. See <a href=\"https://spark.apache.org/docs/3.2.1/submitting-applications.html#launching-applications-with-spark-submit\">spark-submit</a> for more details.</p>\n<h2>Create Task</h2>\n<ul>\n<li>Click Project Management -&gt; Project Name -&gt; Workflow Definition, and click the &quot;Create Work [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/task/spark.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/task/sql.html b/en-us/docs/2.0.3/user_doc/guide/task/sql.html
index c5270e7..0dc2d60 100644
--- a/en-us/docs/2.0.3/user_doc/guide/task/sql.html
+++ b/en-us/docs/2.0.3/user_doc/guide/task/sql.html
@@ -13,7 +13,7 @@
   <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <h2>Overview</h2>
 <p>SQL task, used to connect to database and execute SQL.</p>
-<h2>create data source</h2>
+<h2>Create Data Source</h2>
 <p>Refer to <a href="../datasource/introduction.md">Data Source</a></p>
 <h2>Create Task</h2>
 <ul>
@@ -32,10 +32,10 @@
 <li>Post-sql: Post-sql is executed after the sql statement.</li>
 </ul>
 <h2>Task Example</h2>
-<h3>Create a temporary table in hive and write data</h3>
+<h3>Create a Temporary Table in Hive and Write Data</h3>
 <p>This example creates a temporary table <code>tmp_hello_world</code> in hive and write a row of data. Before creating a temporary table, we need to ensure that the table does not exist, so we will use custom parameters to obtain the time of the day as the suffix of the table name every time we run, so that this task can run every day. The format of the created table name is: <code>tmp_hello_world_{yyyyMMdd}</code>.</p>
 <p><img src="/img/tasks/demo/hive-sql.png" alt="hive-sql"></p>
-<h3>After running the task successfully, query the results in hive.</h3>
+<h3>After Running the Task Successfully, Query the Results in Hive.</h3>
 <p>Log in to the bigdata cluster and use 'hive' command or 'beeline' or 'JDBC' and other methods to connect to the 'Apache Hive' for the query. The query SQL is <code>select * from tmp_hello_world_{yyyyMMdd}</code>, please replace '{yyyyMMdd}' with the date of the running day. The query screenshot is as follows:</p>
 <p><img src="/img/tasks/demo/hive-result.png" alt="hive-sql"></p>
 <h2>Notice</h2>
diff --git a/en-us/docs/2.0.3/user_doc/guide/task/sql.json b/en-us/docs/2.0.3/user_doc/guide/task/sql.json
index 927740b..03b3532 100644
--- a/en-us/docs/2.0.3/user_doc/guide/task/sql.json
+++ b/en-us/docs/2.0.3/user_doc/guide/task/sql.json
@@ -1,6 +1,6 @@
 {
   "filename": "sql.md",
-  "__html": "<h1>SQL</h1>\n<h2>Overview</h2>\n<p>SQL task, used to connect to database and execute SQL.</p>\n<h2>create data source</h2>\n<p>Refer to <a href=\"../datasource/introduction.md\">Data Source</a></p>\n<h2>Create Task</h2>\n<ul>\n<li>Click Project Management-Project Name-Workflow Definition, and click the &quot;Create Workflow&quot; button to enter the DAG editing page.</li>\n<li>Drag <img src=\"/img/tasks/icons/sql.png\" width=\"25\"/> from the toolbar to the drawing board.</ [...]
+  "__html": "<h1>SQL</h1>\n<h2>Overview</h2>\n<p>SQL task, used to connect to database and execute SQL.</p>\n<h2>Create Data Source</h2>\n<p>Refer to <a href=\"../datasource/introduction.md\">Data Source</a></p>\n<h2>Create Task</h2>\n<ul>\n<li>Click Project Management-Project Name-Workflow Definition, and click the &quot;Create Workflow&quot; button to enter the DAG editing page.</li>\n<li>Drag <img src=\"/img/tasks/icons/sql.png\" width=\"25\"/> from the toolbar to the drawing board.</ [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/task/sql.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/2.0.3/user_doc/guide/upgrade.html b/en-us/docs/2.0.3/user_doc/guide/upgrade.html
index ff81c2a..ececb07 100644
--- a/en-us/docs/2.0.3/user_doc/guide/upgrade.html
+++ b/en-us/docs/2.0.3/user_doc/guide/upgrade.html
@@ -10,16 +10,16 @@
   <link rel="stylesheet" href="/build/vendor.23870e5.css">
 </head>
 <body>
-  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
-<h2>1. Back Up Previous Version's Files and Database.</h2>
-<h2>2. Stop All Services of DolphinScheduler.</h2>
+  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+<h2>Back Up Previous Version's Files and Database</h2>
+<h2>Stop All Services of DolphinScheduler</h2>
 <p><code>sh ./script/stop-all.sh</code></p>
-<h2>3. Download the New Version's Installation Package.</h2>
+<h2>Download the New Version's Installation Package</h2>
 <ul>
 <li><a href="/en-us/download/download.html">Download</a> the latest version of the installation packages.</li>
 <li>The following upgrade operations need to be performed in the new version's directory.</li>
 </ul>
-<h2>4. Database Upgrade</h2>
+<h2>Database Upgrade</h2>
 <ul>
 <li>
 <p>Modify the following properties in <code>conf/config/install_config.conf</code>.</p>
@@ -42,8 +42,8 @@ SPRING_DATASOURCE_PASSWORD=&quot;dolphinscheduler&quot;
 <p><code>sh ./script/create-dolphinscheduler.sh</code></p>
 </li>
 </ul>
-<h2>5. Backend Service Upgrade.</h2>
-<h3>5.1 Modify the Content in <code>conf/config/install_config.conf</code> File.</h3>
+<h2>Backend Service Upgrade</h2>
+<h3>Modify the Content in <code>conf/config/install_config.conf</code> File</h3>
 <ul>
 <li>Standalone Deployment please refer the [6, Modify running arguments] in <a href="./installation/standalone.md">Standalone-Deployment</a>.</li>
 <li>Cluster Deployment please refer the [6, Modify running arguments] in <a href="./installation/cluster.md">Cluster-Deployment</a>.</li>
@@ -77,7 +77,7 @@ SPRING_DATASOURCE_PASSWORD=&quot;dolphinscheduler&quot;
 <pre><code class="language-shell"><span class="hljs-meta">#</span><span class="bash">worker service is deployed on <span class="hljs-built_in">which</span> machine, and also specify <span class="hljs-built_in">which</span> worker group this worker belongs to.</span> 
 workers=&quot;ds1:service1,ds2:service2,ds3:service2&quot;
 </code></pre>
-<h3>5.2 Execute Deploy Script.</h3>
+<h3>Execute Deploy Script</h3>
 <pre><code class="language-shell">`sh install.sh`
 </code></pre>
 </div></section><footer class="footer-container"><div class="footer-body"><div><h3>About us</h3><h4>Do you need feedback? Please contact us through the following ways.</h4></div><div class="contact-container"><ul><li><a href="/en-us/community/development/subscribe.html"><img class="img-base" src="/img/emailgray.png"/><img class="img-change" src="/img/emailblue.png"/><p>Email List</p></a></li><li><a href="https://twitter.com/dolphinschedule"><img class="img-base" src="/img/twittergray.png [...]
diff --git a/en-us/docs/2.0.3/user_doc/guide/upgrade.json b/en-us/docs/2.0.3/user_doc/guide/upgrade.json
index 58df0fb..c7bb7dc 100644
--- a/en-us/docs/2.0.3/user_doc/guide/upgrade.json
+++ b/en-us/docs/2.0.3/user_doc/guide/upgrade.json
@@ -1,6 +1,6 @@
 {
   "filename": "upgrade.md",
-  "__html": "<h1>DolphinScheduler upgrade documentation</h1>\n<h2>1. Back Up Previous Version's Files and Database.</h2>\n<h2>2. Stop All Services of DolphinScheduler.</h2>\n<p><code>sh ./script/stop-all.sh</code></p>\n<h2>3. Download the New Version's Installation Package.</h2>\n<ul>\n<li><a href=\"/en-us/download/download.html\">Download</a> the latest version of the installation packages.</li>\n<li>The following upgrade operations need to be performed in the new version's directory.</ [...]
+  "__html": "<h1>DolphinScheduler Upgrade Documentation</h1>\n<h2>Back Up Previous Version's Files and Database</h2>\n<h2>Stop All Services of DolphinScheduler</h2>\n<p><code>sh ./script/stop-all.sh</code></p>\n<h2>Download the New Version's Installation Package</h2>\n<ul>\n<li><a href=\"/en-us/download/download.html\">Download</a> the latest version of the installation packages.</li>\n<li>The following upgrade operations need to be performed in the new version's directory.</li>\n</ul>\n [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/upgrade.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/About_DolphinScheduler/About_DolphinScheduler.html b/en-us/docs/latest/user_doc/About_DolphinScheduler/About_DolphinScheduler.html
index 6cbf84c..25043bd 100644
--- a/en-us/docs/latest/user_doc/About_DolphinScheduler/About_DolphinScheduler.html
+++ b/en-us/docs/latest/user_doc/About_DolphinScheduler/About_DolphinScheduler.html
@@ -12,19 +12,19 @@
 <body>
   <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <p>Apache DolphinScheduler is a cloud-native visual Big Data workflow scheduler system, committed to “solving complex big-data task dependencies and triggering relationships in data OPS orchestration so that various types of big data tasks can be used out of the box”.</p>
-<h1>High Reliability</h1>
+<h2>High Reliability</h2>
 <ul>
 <li>Decentralized multi-master and multi-worker, HA is supported by itself, overload processing</li>
 </ul>
-<h1>User-Friendly</h1>
+<h2>User-Friendly</h2>
 <ul>
 <li>All process definition operations are visualized, Visualization process defines key information at a glance, One-click deployment</li>
 </ul>
-<h1>Rich Scenarios</h1>
+<h2>Rich Scenarios</h2>
 <ul>
 <li>Support multi-tenant. Support many task types e.g., spark,flink,hive, mr, shell, python, sub_process</li>
 </ul>
-<h1>High Expansibility</h1>
+<h2>High Expansibility</h2>
 <ul>
 <li>Support custom task types, Distributed scheduling, and the overall scheduling capability will increase linearly with the scale of the cluster</li>
 </ul>
diff --git a/en-us/docs/latest/user_doc/About_DolphinScheduler/About_DolphinScheduler.json b/en-us/docs/latest/user_doc/About_DolphinScheduler/About_DolphinScheduler.json
index 72217ed..72d761a 100644
--- a/en-us/docs/latest/user_doc/About_DolphinScheduler/About_DolphinScheduler.json
+++ b/en-us/docs/latest/user_doc/About_DolphinScheduler/About_DolphinScheduler.json
@@ -1,6 +1,6 @@
 {
   "filename": "About_DolphinScheduler.md",
-  "__html": "<h1>About DolphinScheduler</h1>\n<p>Apache DolphinScheduler is a cloud-native visual Big Data workflow scheduler system, committed to “solving complex big-data task dependencies and triggering relationships in data OPS orchestration so that various types of big data tasks can be used out of the box”.</p>\n<h1>High Reliability</h1>\n<ul>\n<li>Decentralized multi-master and multi-worker, HA is supported by itself, overload processing</li>\n</ul>\n<h1>User-Friendly</h1>\n<ul>\n [...]
+  "__html": "<h1>About DolphinScheduler</h1>\n<p>Apache DolphinScheduler is a cloud-native visual Big Data workflow scheduler system, committed to “solving complex big-data task dependencies and triggering relationships in data OPS orchestration so that various types of big data tasks can be used out of the box”.</p>\n<h2>High Reliability</h2>\n<ul>\n<li>Decentralized multi-master and multi-worker, HA is supported by itself, overload processing</li>\n</ul>\n<h2>User-Friendly</h2>\n<ul>\n [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/About_DolphinScheduler/About_DolphinScheduler.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/architecture/cache.html b/en-us/docs/latest/user_doc/architecture/cache.html
index c707024..75f4cd2 100644
--- a/en-us/docs/latest/user_doc/architecture/cache.html
+++ b/en-us/docs/latest/user_doc/architecture/cache.html
@@ -10,11 +10,11 @@
   <link rel="stylesheet" href="/build/vendor.23870e5.css">
 </head>
 <body>
-  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
-<h4>Purpose</h4>
+  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+<h2>Purpose</h2>
 <p>Due to the master-server scheduling process, there will be a large number of database read operations, such as <code>tenant</code>, <code>user</code>, <code>processDefinition</code>, etc. On the one hand, it will put a lot of pressure on the DB, and on the other hand, it will slow down the entire core scheduling process.</p>
 <p>Considering that this part of the business data is a scenario where more reads and less writes are performed, a cache module is introduced to reduce the DB read pressure and speed up the core scheduling process;</p>
-<h4>Cache settings</h4>
+<h2>Cache Settings</h2>
 <pre><code class="language-yaml"><span class="hljs-attr">spring:</span>
   <span class="hljs-attr">cache:</span>
     <span class="hljs-comment"># default disable cache, you can enable by `type: caffeine`</span>
@@ -30,9 +30,9 @@
 </code></pre>
 <p>The cache-module use <a href="https://spring.io/guides/gs/caching/">spring-cache</a>, so you can set cache config in the spring application.yaml directly. Default disable cache, and you can enable it by <code>type: caffeine</code>.</p>
 <p>With the config of <a href="https://github.com/ben-manes/caffeine">caffeine</a>, you can set the cache size, expire time, etc.</p>
-<h4>Cache Read</h4>
+<h2>Cache Read</h2>
 <p>The cache adopts the annotation <code>@Cacheable</code> of spring-cache and is configured in the mapper layer. For example: <code>TenantMapper</code>.</p>
-<h4>Cache Evict</h4>
+<h2>Cache Evict</h2>
 <p>The business data update comes from the api-server, and the cache end is in the master-server. So it is necessary to monitor the data update of the api-server (aspect intercept <code>@CacheEvict</code>), and the master-server will be notified when the cache eviction is required.</p>
 <p>It should be noted that the final strategy for cache update comes from the user's expiration strategy configuration in caffeine, so please configure it in conjunction with the business;</p>
 <p>The sequence diagram is shown in the following figure:</p>
diff --git a/en-us/docs/latest/user_doc/architecture/cache.json b/en-us/docs/latest/user_doc/architecture/cache.json
index d71595c..7357e19 100644
--- a/en-us/docs/latest/user_doc/architecture/cache.json
+++ b/en-us/docs/latest/user_doc/architecture/cache.json
@@ -1,6 +1,6 @@
 {
   "filename": "cache.md",
-  "__html": "<h3>Cache</h3>\n<h4>Purpose</h4>\n<p>Due to the master-server scheduling process, there will be a large number of database read operations, such as <code>tenant</code>, <code>user</code>, <code>processDefinition</code>, etc. On the one hand, it will put a lot of pressure on the DB, and on the other hand, it will slow down the entire core scheduling process.</p>\n<p>Considering that this part of the business data is a scenario where more reads and less writes are performed, a [...]
+  "__html": "<h1>Cache</h1>\n<h2>Purpose</h2>\n<p>Due to the master-server scheduling process, there will be a large number of database read operations, such as <code>tenant</code>, <code>user</code>, <code>processDefinition</code>, etc. On the one hand, it will put a lot of pressure on the DB, and on the other hand, it will slow down the entire core scheduling process.</p>\n<p>Considering that this part of the business data is a scenario where more reads and less writes are performed, a [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/architecture/cache.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/architecture/configuration.html b/en-us/docs/latest/user_doc/architecture/configuration.html
index afe9708..c534520 100644
--- a/en-us/docs/latest/user_doc/architecture/configuration.html
+++ b/en-us/docs/latest/user_doc/architecture/configuration.html
@@ -11,13 +11,13 @@
 </head>
 <body>
   <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
-<h1>Preface</h1>
+<h1>Configuration</h1>
+<h2>Preface</h2>
 <p>This document explains the DolphinScheduler application configurations according to DolphinScheduler-1.3.x versions.</p>
-<h1>Directory Structure</h1>
+<h2>Directory Structure</h2>
 <p>Currently, all the configuration files are under [conf ] directory. Please check the following simplified DolphinScheduler installation directories to have a direct view about the position [conf] directory in and configuration files inside. This document only describes DolphinScheduler configurations and other modules are not going into.</p>
 <p>[Note: the DolphinScheduler (hereinafter called the ‘DS’) .]</p>
-<pre><code>
-├─bin                               DS application commands directory
+<pre><code>├─bin                               DS application commands directory
 │  ├─dolphinscheduler-daemon.sh         startup/shutdown DS application 
 │  ├─start-all.sh                  A     startup all DS services with configurations
 │  ├─stop-all.sh                        shutdown all DS services with configurations
@@ -51,14 +51,12 @@
 │  ├─upgrade-dolphinscheduler.sh        DS database upgrade script
 │  ├─monitor-server.sh                  DS monitor-server start script       
 │  ├─scp-hosts.sh                       transfer installation files script                                     
-│  ├─remove-zk-node.sh                  cleanup zookeeper caches script       
+│  ├─remove-zk-node.sh                  cleanup ZooKeeper caches script       
 ├─ui                                front-end web resources directory
 ├─lib                               DS .jar dependencies directory
 ├─install.sh                        auto-setup DS services script
-
-
 </code></pre>
-<h1>Configurations in Details</h1>
+<h2>Configurations in Details</h2>
 <table>
 <thead>
 <tr>
@@ -130,8 +128,8 @@
 </tr>
 </tbody>
 </table>
-<h2><a href="http://1.dolphinscheduler-daemon.sh">1.dolphinscheduler-daemon.sh</a> [startup/shutdown DS application]</h2>
-<p><a href="http://dolphinscheduler-daemon.sh">dolphinscheduler-daemon.sh</a> is responsible for DS startup &amp; shutdown.
+<h3><a href="http://dolphinscheduler-daemon.sh">dolphinscheduler-daemon.sh</a> [startup/shutdown DS application]</h3>
+<p><a href="http://dolphinscheduler-daemon.sh">dolphinscheduler-daemon.sh</a> is responsible for DS startup and shutdown.
 Essentially, <a href="http://start-all.sh/stop-all.sh">start-all.sh/stop-all.sh</a> startup/shutdown the cluster via <a href="http://dolphinscheduler-daemon.sh">dolphinscheduler-daemon.sh</a>.
 Currently, DS just makes a basic config, please config further JVM options based on your practical situation of resources.</p>
 <p>Default simplified parameters are:</p>
@@ -150,7 +148,7 @@ Currently, DS just makes a basic config, please config further JVM options bas
 <blockquote>
 <p>&quot;-XX:DisableExplicitGC&quot; is not recommended due to may lead to memory link (DS dependent on Netty to communicate).</p>
 </blockquote>
-<h2>2.datasource.properties [datasource config properties]</h2>
+<h3>datasource.properties [datasource config properties]</h3>
 <p>DS uses Druid to manage database connections and default simplified configs are:</p>
 <table>
 <thead>
@@ -263,7 +261,7 @@ Currently, DS just makes a basic config, please config further JVM options bas
 </tr>
 </tbody>
 </table>
-<h2>3.registry.properties [registry config properties, default is zookeeper]</h2>
+<h3>registry.properties [registry config properties, default is zookeeper]</h3>
 <table>
 <thead>
 <tr>
@@ -281,12 +279,12 @@ Currently, DS just makes a basic config, please config further JVM options bas
 <tr>
 <td>registry.servers</td>
 <td>localhost:2181</td>
-<td>zookeeper cluster connection info</td>
+<td>ZooKeeper cluster connection info</td>
 </tr>
 <tr>
 <td>registry.namespace</td>
 <td>dolphinscheduler</td>
-<td>DS is stored under zookeeper root directory(Start without /)</td>
+<td>DS is stored under ZooKeeper root directory(Start without /)</td>
 </tr>
 <tr>
 <td><a href="http://registry.base.sleep.time.ms">registry.base.sleep.time.ms</a></td>
@@ -315,7 +313,7 @@ Currently, DS just makes a basic config, please config further JVM options bas
 </tr>
 </tbody>
 </table>
-<h2>4.common.properties [hadoop、s3、yarn config properties]</h2>
+<h3>common.properties [hadoop、s3、yarn config properties]</h3>
 <p>Currently, common.properties mainly configures hadoop/s3a related configurations.</p>
 <table>
 <thead>
@@ -418,7 +416,7 @@ Currently, DS just makes a basic config, please config further JVM options bas
 </tr>
 </tbody>
 </table>
-<h2>5.application-api.properties [API-service log config]</h2>
+<h3>application-api.properties [API-service log config]</h3>
 <table>
 <thead>
 <tr>
@@ -480,7 +478,7 @@ Currently, DS just makes a basic config, please config further JVM options bas
 </tr>
 </tbody>
 </table>
-<h2>6.master.properties [master-service log config]</h2>
+<h3>master.properties [master-service log config]</h3>
 <table>
 <thead>
 <tr>
@@ -542,7 +540,7 @@ Currently, DS just makes a basic config, please config further JVM options bas
 </tr>
 </tbody>
 </table>
-<h2>7.worker.properties [worker-service log config]</h2>
+<h3>worker.properties [worker-service log config]</h3>
 <table>
 <thead>
 <tr>
@@ -584,7 +582,7 @@ Currently, DS just makes a basic config, please config further JVM options bas
 </tr>
 </tbody>
 </table>
-<h2>8.alert.properties [alert-service log config]</h2>
+<h3>alert.properties [alert-service log config]</h3>
 <table>
 <thead>
 <tr>
@@ -706,7 +704,7 @@ Currently, DS just makes a basic config, please config further JVM options bas
 </tr>
 </tbody>
 </table>
-<h2>9.quartz.properties [quartz config properties]</h2>
+<h3>quartz.properties [quartz config properties]</h3>
 <p>This part describes quartz configs and please configure them based on your practical situation and resources.</p>
 <table>
 <thead>
@@ -809,22 +807,22 @@ Currently, DS just makes a basic config, please config further JVM options bas
 </tr>
 </tbody>
 </table>
-<h2>10.install_config.conf [DS environment variables configuration script[install/start DS]]</h2>
+<h3>install_config.conf [DS environment variables configuration script[install/start DS]]</h3>
 <p>install_config.conf is a bit complicated and is mainly used in the following two places.</p>
 <ul>
-<li>1.DS cluster auto installation</li>
+<li>DS Cluster Auto Installation</li>
 </ul>
 <blockquote>
 <p>System will load configs in the install_config.conf and auto-configure files below, based on the file content when executing '<a href="http://install.sh">install.sh</a>'.
 Files such as <a href="http://dolphinscheduler-daemon.sh">dolphinscheduler-daemon.sh</a>、datasource.properties、registry.properties、common.properties、application-api.properties、master.properties、worker.properties、alert.properties、quartz.properties and etc.</p>
 </blockquote>
 <ul>
-<li>2.Startup/shutdown DS cluster</li>
+<li>Startup/Shutdown DS Cluster</li>
 </ul>
 <blockquote>
 <p>The system will load masters, workers, alertServer, apiServers and other parameters inside the file to startup/shutdown DS cluster.</p>
 </blockquote>
-<p>File content as follows:</p>
+<h4>File Content as Follows:</h4>
 <pre><code class="language-bash">
 <span class="hljs-comment"># Note:  please escape the character if the file contains special characters such as `.*[]^${}\+?|()@#&amp;`.</span>
 <span class="hljs-comment">#   eg: `[` escape to `\[`</span>
@@ -832,7 +830,7 @@ Files such as <a href="http://dolphinscheduler-daemon.sh">dolphinscheduler-daemo
 <span class="hljs-comment"># Database type (DS currently only supports PostgreSQL and MySQL)</span>
 dbtype=<span class="hljs-string">&quot;mysql&quot;</span>
 
-<span class="hljs-comment"># Database url &amp; port</span>
+<span class="hljs-comment"># Database url and port</span>
 dbhost=<span class="hljs-string">&quot;192.168.xx.xx:3306&quot;</span>
 
 <span class="hljs-comment"># Database name</span>
@@ -845,7 +843,7 @@ username=<span class="hljs-string">&quot;xx&quot;</span>
 <span class="hljs-comment"># Database password</span>
 password=<span class="hljs-string">&quot;xx&quot;</span>
 
-<span class="hljs-comment"># Zookeeper url</span>
+<span class="hljs-comment"># ZooKeeper url</span>
 zkQuorum=<span class="hljs-string">&quot;192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181&quot;</span>
 
 <span class="hljs-comment"># DS installation path, such as &#x27;/data1_1T/dolphinscheduler&#x27;</span>
@@ -946,7 +944,7 @@ alertServer=<span class="hljs-string">&quot;ds3&quot;</span>
 <span class="hljs-comment"># Host deploy API-service</span>
 apiServers=<span class="hljs-string">&quot;ds1&quot;</span>
 </code></pre>
-<h2>11.dolphinscheduler_env.sh [load environment variables configs]</h2>
+<h3>11.dolphinscheduler_env.sh [load environment variables configs]</h3>
 <p>When using shell to commit tasks, DS will load environment variables inside dolphinscheduler_env.sh into the host.
 Types of tasks involved are: Shell task、Python task、Spark task、Flink task、Datax task and etc.</p>
 <pre><code class="language-bash"><span class="hljs-built_in">export</span> HADOOP_HOME=/opt/soft/hadoop
@@ -962,7 +960,7 @@ Types of tasks involved are: Shell task、Python task、Spark task、Flink task
 <span class="hljs-built_in">export</span> PATH=<span class="hljs-variable">$HADOOP_HOME</span>/bin:<span class="hljs-variable">$SPARK_HOME1</span>/bin:<span class="hljs-variable">$SPARK_HOME2</span>/bin:<span class="hljs-variable">$PYTHON_HOME</span>:<span class="hljs-variable">$JAVA_HOME</span>/bin:<span class="hljs-variable">$HIVE_HOME</span>/bin:<span class="hljs-variable">$PATH</span>:<span class="hljs-variable">$FLINK_HOME</span>/bin:<span class="hljs-variable">$DATAX_HOME</span>:<s [...]
 
 </code></pre>
-<h2>12. Services logback configs</h2>
+<h3>12. Services logback configs</h3>
 <table>
 <thead>
 <tr>
diff --git a/en-us/docs/latest/user_doc/architecture/configuration.json b/en-us/docs/latest/user_doc/architecture/configuration.json
index 54ec905..bb47228 100644
--- a/en-us/docs/latest/user_doc/architecture/configuration.json
+++ b/en-us/docs/latest/user_doc/architecture/configuration.json
@@ -1,6 +1,6 @@
 {
   "filename": "configuration.md",
-  "__html": "<!-- markdown-link-check-disable -->\n<h1>Preface</h1>\n<p>This document explains the DolphinScheduler application configurations according to DolphinScheduler-1.3.x versions.</p>\n<h1>Directory Structure</h1>\n<p>Currently, all the configuration files are under [conf ] directory. Please check the following simplified DolphinScheduler installation directories to have a direct view about the position [conf] directory in and configuration files inside. This document only descr [...]
+  "__html": "<!-- markdown-link-check-disable -->\n<h1>Configuration</h1>\n<h2>Preface</h2>\n<p>This document explains the DolphinScheduler application configurations according to DolphinScheduler-1.3.x versions.</p>\n<h2>Directory Structure</h2>\n<p>Currently, all the configuration files are under [conf ] directory. Please check the following simplified DolphinScheduler installation directories to have a direct view about the position [conf] directory in and configuration files inside.  [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/architecture/configuration.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/architecture/design.html b/en-us/docs/latest/user_doc/architecture/design.html
index 5372c6c..0256f7e 100644
--- a/en-us/docs/latest/user_doc/architecture/design.html
+++ b/en-us/docs/latest/user_doc/architecture/design.html
@@ -10,34 +10,34 @@
   <link rel="stylesheet" href="/build/vendor.23870e5.css">
 </head>
 <body>
-  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <p>Before explaining the architecture of the scheduling system, let's first understand the commonly used terms of the
 scheduling system</p>
-<h3>1.System Structure</h3>
-<h4>1.1 System architecture diagram</h4>
+<h2>System Structure</h2>
+<h3>System Architecture Diagram</h3>
 <p align="center">
   <img src="/img/architecture-1.3.0.jpg" alt="System architecture diagram"  width="70%" />
   <p align="center">
         <em>System architecture diagram</em>
   </p>
 </p>
-<h4>1.2 Start process activity diagram</h4>
+<h3>Start Process Activity Diagram</h3>
 <p align="center">
   <img src="/img/master-process-2.0-en.png" alt="Start process activity diagram"  width="70%" />
   <p align="center">
         <em>Start process activity diagram</em>
   </p>
 </p>
-<h4>1.3 Architecture description</h4>
+<h3>Architecture Description</h3>
 <ul>
 <li>
 <p><strong>MasterServer</strong></p>
 <p>MasterServer adopts a distributed and centerless design concept. MasterServer is mainly responsible for DAG task
 segmentation, task submission monitoring, and monitoring the health status of other MasterServer and WorkerServer at
-the same time. When the MasterServer service starts, register a temporary node with Zookeeper, and perform fault
-tolerance by monitoring changes in the temporary node of Zookeeper. MasterServer provides monitoring services based on
+the same time. When the MasterServer service starts, register a temporary node with ZooKeeper, and perform fault
+tolerance by monitoring changes in the temporary node of ZooKeeper. MasterServer provides monitoring services based on
 netty.</p>
-<h5>The service mainly includes:</h5>
+<h4>The Service Mainly Includes:</h4>
 <ul>
 <li>
 <p><strong>MasterSchedulerService</strong> is a scanning thread that scans the <strong>command</strong> table in the database regularly,
@@ -59,21 +59,24 @@ for, and uses the thread pool to process the state events of the workflow</p>
 <li>
 <p><strong>WorkerServer</strong></p>
 <pre><code>WorkerServer also adopts a distributed centerless design concept, supports custom task plug-ins, and is mainly responsible for task execution and log services.
-When the WorkerServer service starts, it registers a temporary node with Zookeeper and maintains a heartbeat.
+When the WorkerServer service starts, it registers a temporary node with ZooKeeper and maintains a heartbeat.
 </code></pre>
+<h4>The Service Mainly Includes</h4>
+<ul>
+<li>
+<p><strong>WorkerManagerThread</strong> mainly receives tasks sent by the master through netty, and calls <strong>TaskExecuteThread</strong> corresponding executors according to different task types.</p>
+</li>
+<li>
+<p><strong>RetryReportTaskStatusThread</strong> mainly reports the task status to the master through netty. If the report fails, the report will always be retried.</p>
+</li>
+<li>
+<p><strong>LoggerServer</strong> is a log service that provides log fragment viewing, refreshing and downloading functions</p>
 </li>
 </ul>
-<h5>The service mainly includes</h5>
-<pre><code>- **WorkerManagerThread** mainly receives tasks sent by the master through netty, and calls **TaskExecuteThread** corresponding executors according to different task types.
- 
-- **RetryReportTaskStatusThread** mainly reports the task status to the master through netty. If the report fails, the report will always be retried.
-
-- **LoggerServer** is a log service that provides log fragment viewing, refreshing and downloading functions
-</code></pre>
-<ul>
+</li>
 <li>
 <p><strong>Registry</strong></p>
-<p>The registry is implemented as a plug-in, and Zookeeper is supported by default. The MasterServer and WorkerServer
+<p>The registry is implemented as a plug-in, and ZooKeeper is supported by default. The MasterServer and WorkerServer
 nodes in the system use the registry for cluster management and fault tolerance. In addition, the system also performs
 event monitoring and distributed locks based on the registry.</p>
 </li>
@@ -94,9 +97,9 @@ the node and so on.</p>
 at <a href="../guide/homepage.md">Introduction to Functions</a> section。</p>
 </li>
 </ul>
-<h4>1.4 Architecture design ideas</h4>
-<h5>One、Decentralization VS centralization</h5>
-<h6>Centralized thinking</h6>
+<h3>Architecture Design Ideas</h3>
+<h4>Decentralization VS Centralization</h4>
+<h5>Centralized Thinking</h5>
 <p>The centralized design concept is relatively simple. The nodes in the distributed cluster are divided into roles
 according to roles, which are roughly divided into two roles:</p>
 <p align="center">
@@ -120,7 +123,7 @@ different machines, it will cause the Master to be overloaded. If the Scheduler
 can only submit jobs on a certain machine. When there are more parallel tasks, the pressure on the slave may be
 greater.</li>
 </ul>
-<h6>Decentralized</h6>
+<h5>Decentralized</h5>
  <p align="center">
    <img src="https://analysys.github.io/easyscheduler_docs_cn/images/decentralization.png" alt="Decentralization"  width="50%" />
  </p>
@@ -143,13 +146,13 @@ preset, and when the cluster fails, the nodes of the cluster will automatically
 managers&quot; To preside over the work. The most typical case is Etcd implemented by ZooKeeper and Go language.</p>
 </li>
 <li>
-<p>The decentralization of DolphinScheduler is that the Master/Worker is registered in Zookeeper to realize the
+<p>The decentralization of DolphinScheduler is that the Master/Worker is registered in ZooKeeper to realize the
 non-centralization of the Master cluster and the Worker cluster. The sharding mechanism is used to fairly distribute
 the workflow for execution on the master, and tasks are sent to the workers for execution through different sending
 strategies. Specific task</p>
 </li>
 </ul>
-<h5>Second, the master execution process</h5>
+<h4>The Master Execution Process</h4>
 <ol>
 <li>
 <p>DolphinScheduler uses the sharding algorithm to modulate the command and assigns it according to the sort id of the
@@ -158,8 +161,6 @@ workflow instance</p>
 </li>
 <li>
 <p>DolphinScheduler's process of workflow:</p>
-</li>
-</ol>
 <ul>
 <li>Start the workflow through UI or API calls, and persist a command to the database</li>
 <li>The Master scans the Command table through the sharding algorithm, generates a workflow instance ProcessInstance, and
@@ -172,7 +173,9 @@ EventExecuteService event queue</li>
 <li>EventExecuteService calls WorkflowExecuteThread according to the event queue to submit subsequent tasks and modify
 workflow status</li>
 </ul>
-<h5>Three、Insufficient thread loop waiting problem</h5>
+</li>
+</ol>
+<h4>Insufficient Thread Loop Waiting Problem</h4>
 <ul>
 <li>If there is no sub-process in a DAG, if the number of data in the Command is greater than the threshold set by the
 thread pool, the process directly waits or fails.</li>
@@ -195,10 +198,10 @@ to execute again.</li>
 </ol>
 <p>note: The Master Scheduler thread is executed by FIFO when acquiring the Command.</p>
 <p>So we chose the third way to solve the problem of insufficient threads.</p>
-<h5>Four、Fault-tolerant design</h5>
+<h4>Fault-Tolerant Design</h4>
 <p>Fault tolerance is divided into service downtime fault tolerance and task retry, and service downtime fault tolerance is
 divided into master fault tolerance and worker fault tolerance.</p>
-<h6>1. Downtime fault tolerance</h6>
+<h5>Downtime Fault Tolerance</h5>
 <p>The service fault-tolerance design relies on ZooKeeper's Watcher mechanism, and the implementation principle is shown in the figure:</p>
  <p align="center">
    <img src="https://analysys.github.io/easyscheduler_docs_cn/images/fault-tolerant.png" alt="DolphinScheduler fault-tolerant design"  width="40%" />
@@ -223,7 +226,7 @@ Among them, the Master monitors the directories of other Masters and Workers. If
 <p>Fault-tolerant content: When sending the remove event of the Worker node, the Master only fault-tolerant task instances. Before fault-tolerant, it compares the start time of the instance with the server start-up time, and skips fault-tolerance if after the server start time;</p>
 <p>Fault-tolerant post-processing: Once the Master Scheduler thread finds that the task instance is in the &quot;fault-tolerant&quot; state, it takes over the task and resubmits it.</p>
 <p>Note: Due to &quot;network jitter&quot;, the node may lose its heartbeat with ZooKeeper in a short period of time, and the node's remove event may occur. For this situation, we use the simplest way, that is, once the node and ZooKeeper timeout connection occurs, then directly stop the Master or Worker service.</p>
-<h6>2.Task failed and try again</h6>
+<h5>Task Failed and Try Again</h5>
 <p>Here we must first distinguish the concepts of task failure retry, process failure recovery, and process failure rerun:</p>
 <ul>
 <li>Task failure retry is at the task level and is automatically performed by the scheduling system. For example, if a
@@ -248,7 +251,7 @@ automatically retry until it succeeds or exceeds the configured number of retrie
 supported. But the tasks in the logical node support retry.</p>
 <p>If there is a task failure in the workflow that reaches the maximum number of retries, the workflow will fail to stop,
 and the failed workflow can be manually rerun or process recovery operation</p>
-<h5>Five、Task priority design</h5>
+<h4>Task Priority Design</h4>
 <p>In the early scheduling design, if there is no priority design and the fair scheduling design is used, the task
 submitted first may be completed at the same time as the task submitted later, and the process or task priority cannot
 be set, so We have redesigned this, and our current design is as follows:</p>
@@ -282,7 +285,7 @@ shown below</p>
 </ul>
 </li>
 </ul>
-<h5>Six、Logback and netty implement log access</h5>
+<h4>Logback and Netty Implement Log Access</h4>
 <ul>
 <li>
 <p>Since Web (UI) and Worker are not necessarily on the same machine, viewing the log cannot be like querying a local
@@ -307,52 +310,52 @@ log information.</p>
 file.</li>
 <li>FileAppender is mainly implemented as follows:</li>
 </ul>
-<pre><code class="language-java"><span class="hljs-comment">/**
- * task log appender
- */</span>
-<span class="hljs-keyword">public</span> <span class="hljs-class"><span class="hljs-keyword">class</span> <span class="hljs-title">TaskLogAppender</span> <span class="hljs-keyword">extends</span> <span class="hljs-title">FileAppender</span>&lt;<span class="hljs-title">ILoggingEvent</span>&gt; </span>{
-
-    ...
+<pre><code class="language-java"> <span class="hljs-comment">/**
+  * task log appender
+  */</span>
+ <span class="hljs-keyword">public</span> <span class="hljs-class"><span class="hljs-keyword">class</span> <span class="hljs-title">TaskLogAppender</span> <span class="hljs-keyword">extends</span> <span class="hljs-title">FileAppender</span>&lt;<span class="hljs-title">ILoggingEvent</span>&gt; </span>{
+ 
+     ...
 
-   <span class="hljs-meta">@Override</span>
-   <span class="hljs-function"><span class="hljs-keyword">protected</span> <span class="hljs-keyword">void</span> <span class="hljs-title">append</span><span class="hljs-params">(ILoggingEvent event)</span> </span>{
+    <span class="hljs-meta">@Override</span>
+    <span class="hljs-function"><span class="hljs-keyword">protected</span> <span class="hljs-keyword">void</span> <span class="hljs-title">append</span><span class="hljs-params">(ILoggingEvent event)</span> </span>{
 
-       <span class="hljs-keyword">if</span> (currentlyActiveFile == <span class="hljs-keyword">null</span>){
-           currentlyActiveFile = getFile();
-       }
-       String activeFile = currentlyActiveFile;
-       <span class="hljs-comment">// thread name: taskThreadName-processDefineId_processInstanceId_taskInstanceId</span>
-       String threadName = event.getThreadName();
-       String[] threadNameArr = threadName.split(<span class="hljs-string">&quot;-&quot;</span>);
-       <span class="hljs-comment">// logId = processDefineId_processInstanceId_taskInstanceId</span>
-       String logId = threadNameArr[<span class="hljs-number">1</span>];
-       ...
-       <span class="hljs-keyword">super</span>.subAppend(event);
-   }
+        <span class="hljs-keyword">if</span> (currentlyActiveFile == <span class="hljs-keyword">null</span>){
+            currentlyActiveFile = getFile();
+        }
+        String activeFile = currentlyActiveFile;
+        <span class="hljs-comment">// thread name: taskThreadName-processDefineId_processInstanceId_taskInstanceId</span>
+        String threadName = event.getThreadName();
+        String[] threadNameArr = threadName.split(<span class="hljs-string">&quot;-&quot;</span>);
+        <span class="hljs-comment">// logId = processDefineId_processInstanceId_taskInstanceId</span>
+        String logId = threadNameArr[<span class="hljs-number">1</span>];
+        ...
+        <span class="hljs-keyword">super</span>.subAppend(event);
+    }
 }
-
-
-Generate logs in the form of /process definition id/process instance id/task instance id.log
-
-- Filter to match the thread name starting with TaskLogInfo:
-
-- TaskLogFilter is implemented as follows:
-
-```java
-<span class="hljs-comment">/**
-*  task log filter
-*/</span>
+</code></pre>
+<p>Generate logs in the form of /process definition id/process instance id/task instance id.log</p>
+<ul>
+<li>
+<p>Filter to match the thread name starting with TaskLogInfo:</p>
+</li>
+<li>
+<p>TaskLogFilter is implemented as follows:</p>
+</li>
+</ul>
+<pre><code class="language-java"> <span class="hljs-comment">/**
+ *  task log filter
+ */</span>
 <span class="hljs-keyword">public</span> <span class="hljs-class"><span class="hljs-keyword">class</span> <span class="hljs-title">TaskLogFilter</span> <span class="hljs-keyword">extends</span> <span class="hljs-title">Filter</span>&lt;<span class="hljs-title">ILoggingEvent</span>&gt; </span>{
 
-   <span class="hljs-meta">@Override</span>
-   <span class="hljs-function"><span class="hljs-keyword">public</span> FilterReply <span class="hljs-title">decide</span><span class="hljs-params">(ILoggingEvent event)</span> </span>{
-       <span class="hljs-keyword">if</span> (event.getThreadName().startsWith(<span class="hljs-string">&quot;TaskLogInfo-&quot;</span>)){
-           <span class="hljs-keyword">return</span> FilterReply.ACCEPT;
-       }
-       <span class="hljs-keyword">return</span> FilterReply.DENY;
-   }
+    <span class="hljs-meta">@Override</span>
+    <span class="hljs-function"><span class="hljs-keyword">public</span> FilterReply <span class="hljs-title">decide</span><span class="hljs-params">(ILoggingEvent event)</span> </span>{
+        <span class="hljs-keyword">if</span> (event.getThreadName().startsWith(<span class="hljs-string">&quot;TaskLogInfo-&quot;</span>)){
+            <span class="hljs-keyword">return</span> FilterReply.ACCEPT;
+        }
+        <span class="hljs-keyword">return</span> FilterReply.DENY;
+    }
 }
-
 </code></pre>
 </div></section><footer class="footer-container"><div class="footer-body"><div><h3>About us</h3><h4>Do you need feedback? Please contact us through the following ways.</h4></div><div class="contact-container"><ul><li><a href="/en-us/community/development/subscribe.html"><img class="img-base" src="/img/emailgray.png"/><img class="img-change" src="/img/emailblue.png"/><p>Email List</p></a></li><li><a href="https://twitter.com/dolphinschedule"><img class="img-base" src="/img/twittergray.png [...]
   <script src="//cdn.jsdelivr.net/npm/react@15.6.2/dist/react-with-addons.min.js"></script>
diff --git a/en-us/docs/latest/user_doc/architecture/design.json b/en-us/docs/latest/user_doc/architecture/design.json
index fe9dad8..fbe403e 100644
--- a/en-us/docs/latest/user_doc/architecture/design.json
+++ b/en-us/docs/latest/user_doc/architecture/design.json
@@ -1,6 +1,6 @@
 {
   "filename": "design.md",
-  "__html": "<h2>System Architecture Design</h2>\n<p>Before explaining the architecture of the scheduling system, let's first understand the commonly used terms of the\nscheduling system</p>\n<h3>1.System Structure</h3>\n<h4>1.1 System architecture diagram</h4>\n<p align=\"center\">\n  <img src=\"/img/architecture-1.3.0.jpg\" alt=\"System architecture diagram\"  width=\"70%\" />\n  <p align=\"center\">\n        <em>System architecture diagram</em>\n  </p>\n</p>\n<h4>1.2 Start process act [...]
+  "__html": "<h1>System Architecture Design</h1>\n<p>Before explaining the architecture of the scheduling system, let's first understand the commonly used terms of the\nscheduling system</p>\n<h2>System Structure</h2>\n<h3>System Architecture Diagram</h3>\n<p align=\"center\">\n  <img src=\"/img/architecture-1.3.0.jpg\" alt=\"System architecture diagram\"  width=\"70%\" />\n  <p align=\"center\">\n        <em>System architecture diagram</em>\n  </p>\n</p>\n<h3>Start Process Activity Diag [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/architecture/design.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/architecture/designplus.html b/en-us/docs/latest/user_doc/architecture/designplus.html
index b390936..d35c382 100644
--- a/en-us/docs/latest/user_doc/architecture/designplus.html
+++ b/en-us/docs/latest/user_doc/architecture/designplus.html
@@ -10,10 +10,10 @@
   <link rel="stylesheet" href="/build/vendor.23870e5.css">
 </head>
 <body>
-  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <p>Before explaining the architecture of the scheduling system, let's first understand the commonly used terms of the
 scheduling system</p>
-<h3>1.Glossary</h3>
+<h2>Glossary</h2>
 <p><strong>DAG:</strong> The full name is Directed Acyclic Graph, referred to as DAG. Task tasks in the workflow are assembled in the
 form of a directed acyclic graph, and topological traversal is performed from nodes with zero degrees of entry until
 there are no subsequent nodes. Examples are as follows:</p>
@@ -48,7 +48,7 @@ provided. <strong>Continue</strong> refers to regardless of the status of the ta
 failure. <strong>End</strong> means that once a failed task is found, Kill will also run the parallel task at the same time, and the
 process fails and ends</p>
 <p><strong>Complement</strong>: Supplement historical data,Supports <strong>interval parallel and serial</strong> two complement methods</p>
-<h3>2.Module introduction</h3>
+<h2>Module Introduction</h2>
 <ul>
 <li>
 <p>dolphinscheduler-alert alarm module, providing AlertServer service.</p>
@@ -69,14 +69,14 @@ process fails and ends</p>
 <p>dolphinscheduler-server MasterServer and WorkerServer services</p>
 </li>
 <li>
-<p>dolphinscheduler-service service module, including Quartz, Zookeeper, log client access service, easy to call server
+<p>dolphinscheduler-service service module, including Quartz, ZooKeeper, log client access service, easy to call server
 module and api module</p>
 </li>
 <li>
 <p>dolphinscheduler-ui front-end module</p>
 </li>
 </ul>
-<h3>Sum up</h3>
+<h2>Sum Up</h2>
 <p>From the perspective of scheduling, this article preliminarily introduces the architecture principles and implementation
 ideas of the big data distributed workflow scheduling system-DolphinScheduler. To be continued</p>
 </div></section><footer class="footer-container"><div class="footer-body"><div><h3>About us</h3><h4>Do you need feedback? Please contact us through the following ways.</h4></div><div class="contact-container"><ul><li><a href="/en-us/community/development/subscribe.html"><img class="img-base" src="/img/emailgray.png"/><img class="img-change" src="/img/emailblue.png"/><p>Email List</p></a></li><li><a href="https://twitter.com/dolphinschedule"><img class="img-base" src="/img/twittergray.png [...]
diff --git a/en-us/docs/latest/user_doc/architecture/designplus.json b/en-us/docs/latest/user_doc/architecture/designplus.json
index 470c03d..e49da02 100644
--- a/en-us/docs/latest/user_doc/architecture/designplus.json
+++ b/en-us/docs/latest/user_doc/architecture/designplus.json
@@ -1,6 +1,6 @@
 {
   "filename": "designplus.md",
-  "__html": "<h2>System Architecture Design</h2>\n<p>Before explaining the architecture of the scheduling system, let's first understand the commonly used terms of the\nscheduling system</p>\n<h3>1.Glossary</h3>\n<p><strong>DAG:</strong> The full name is Directed Acyclic Graph, referred to as DAG. Task tasks in the workflow are assembled in the\nform of a directed acyclic graph, and topological traversal is performed from nodes with zero degrees of entry until\nthere are no subsequent no [...]
+  "__html": "<h1>System Architecture Design</h1>\n<p>Before explaining the architecture of the scheduling system, let's first understand the commonly used terms of the\nscheduling system</p>\n<h2>Glossary</h2>\n<p><strong>DAG:</strong> The full name is Directed Acyclic Graph, referred to as DAG. Task tasks in the workflow are assembled in the\nform of a directed acyclic graph, and topological traversal is performed from nodes with zero degrees of entry until\nthere are no subsequent node [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/architecture/designplus.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/architecture/load-balance.html b/en-us/docs/latest/user_doc/architecture/load-balance.html
index 27a3067..1a19d36 100644
--- a/en-us/docs/latest/user_doc/architecture/load-balance.html
+++ b/en-us/docs/latest/user_doc/architecture/load-balance.html
@@ -10,30 +10,30 @@
   <link rel="stylesheet" href="/build/vendor.23870e5.css">
 </head>
 <body>
-  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <p>Load balancing refers to the reasonable allocation of server pressure through routing algorithms (usually in cluster environments) to achieve the maximum optimization of server performance.</p>
-<h3>DolphinScheduler-Worker load balancing algorithms</h3>
+<h2>DolphinScheduler-Worker Load Balancing Algorithms</h2>
 <p>DolphinScheduler-Master allocates tasks to workers, and by default provides three algorithms:</p>
 <p>Weighted random (random)</p>
 <p>Smoothing polling (roundrobin)</p>
 <p>Linear load (lowerweight)</p>
 <p>The default configuration is the linear load.</p>
 <p>As the routing is done on the client side, the master service, you can change master.host.selector in master.properties to configure the algorithm what you want.</p>
-<p>eg: master.host.selector = random (case-insensitive)</p>
-<h3>Worker load balancing configuration</h3>
+<p>e.g. master.host.selector = random (case-insensitive)</p>
+<h2>Worker Load Balancing Configuration</h2>
 <p>The configuration file is worker.properties</p>
-<h4>weight</h4>
+<h3>Weight</h3>
 <p>All of the above load algorithms are weighted based on weights, which affect the outcome of the triage. You can set different weights for different machines by modifying the worker.weight value.</p>
-<h4>Preheating</h4>
+<h3>Preheating</h3>
 <p>With JIT optimisation in mind, we will let the worker run at low power for a period of time after startup so that it can gradually reach its optimal state, a process we call preheating. If you are interested, you can read some articles about JIT.</p>
 <p>So the worker will gradually reach its maximum weight over time after it starts (by default ten minutes, we don't provide a configuration item, you can change it and submit a PR if needed).</p>
-<h3>Load balancing algorithm breakdown</h3>
-<h4>Random (weighted)</h4>
+<h2>Load Balancing Algorithm Breakdown</h2>
+<h3>Random (Weighted)</h3>
 <p>This algorithm is relatively simple, one of the matched workers is selected at random (the weighting affects his weighting).</p>
-<h4>Smoothed polling (weighted)</h4>
+<h3>Smoothed Polling (Weighted)</h3>
 <p>An obvious drawback of the weighted polling algorithm. Namely, under certain specific weights, weighted polling scheduling generates an uneven sequence of instances, and this unsmoothed load may cause some instances to experience transient high loads, leading to a risk of system downtime. To address this scheduling flaw, we provide a smooth weighted polling algorithm.</p>
 <p>Each worker is given two weights, weight (which remains constant after warm-up is complete) and current_weight (which changes dynamically), for each route. The current_weight + weight is iterated over all the workers, and the weight of all the workers is added up and counted as total_weight, then the worker with the largest current_weight is selected as the worker for this task. current_weight-total_weight.</p>
-<h4>Linear weighting (default algorithm)</h4>
+<h3>Linear Weighting (Default Algorithm)</h3>
 <p>The algorithm reports its own load information to the registry at regular intervals. We base our judgement on two main pieces of information</p>
 <ul>
 <li>load average (default is the number of CPU cores * 2)</li>
diff --git a/en-us/docs/latest/user_doc/architecture/load-balance.json b/en-us/docs/latest/user_doc/architecture/load-balance.json
index b334287..807ade1 100644
--- a/en-us/docs/latest/user_doc/architecture/load-balance.json
+++ b/en-us/docs/latest/user_doc/architecture/load-balance.json
@@ -1,6 +1,6 @@
 {
   "filename": "load-balance.md",
-  "__html": "<h3>Load Balance</h3>\n<p>Load balancing refers to the reasonable allocation of server pressure through routing algorithms (usually in cluster environments) to achieve the maximum optimization of server performance.</p>\n<h3>DolphinScheduler-Worker load balancing algorithms</h3>\n<p>DolphinScheduler-Master allocates tasks to workers, and by default provides three algorithms:</p>\n<p>Weighted random (random)</p>\n<p>Smoothing polling (roundrobin)</p>\n<p>Linear load (lowerwei [...]
+  "__html": "<h1>Load Balance</h1>\n<p>Load balancing refers to the reasonable allocation of server pressure through routing algorithms (usually in cluster environments) to achieve the maximum optimization of server performance.</p>\n<h2>DolphinScheduler-Worker Load Balancing Algorithms</h2>\n<p>DolphinScheduler-Master allocates tasks to workers, and by default provides three algorithms:</p>\n<p>Weighted random (random)</p>\n<p>Smoothing polling (roundrobin)</p>\n<p>Linear load (lowerwei [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/architecture/load-balance.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/architecture/metadata.html b/en-us/docs/latest/user_doc/architecture/metadata.html
index a0cc15e..51920de 100644
--- a/en-us/docs/latest/user_doc/architecture/metadata.html
+++ b/en-us/docs/latest/user_doc/architecture/metadata.html
@@ -12,7 +12,7 @@
 <body>
   <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <p><a name="V5KOl"></a></p>
-<h3>Dolphin Scheduler 2.0 DB Table Overview</h3>
+<h2>Dolphin Scheduler 2.0 DB Table Overview</h2>
 <table>
 <thead>
 <tr>
@@ -121,9 +121,9 @@
 </table>
 <hr>
 <p><a name="XCLy1"></a></p>
-<h3>E-R Diagram</h3>
+<h2>E-R Diagram</h2>
 <p><a name="5hWWZ"></a></p>
-<h4>User Queue DataSource</h4>
+<h3>User Queue DataSource</h3>
 <p><img src="/img/metadata-erd/user-queue-datasource.png" alt="image.png"></p>
 <ul>
 <li>Multiple users can belong to one tenant</li>
@@ -131,7 +131,7 @@
 <li>The user_id field in the t_ds_datasource table indicates the user who created the data source. The user_id in t_ds_relation_datasource_user indicates the user who has permission to the data source.
 <a name="7euSN"></a></li>
 </ul>
-<h4>Project Resource Alert</h4>
+<h3>Project Resource Alert</h3>
 <p><img src="/img/metadata-erd/project-resource-alert.png" alt="image.png"></p>
 <ul>
 <li>User can have multiple projects, User project authorization completes the relationship binding using project_id and user_id in t_ds_relation_project_user table</li>
@@ -140,7 +140,7 @@
 <li>The user_id in the t_ds_udfs table represents the user who created the UDF, and the user_id in the t_ds_relation_udfs_user table represents a user who has permission to the UDF
 <a name="JEw4v"></a></li>
 </ul>
-<h4>Command Process Task</h4>
+<h3>Command Process Task</h3>
 <p><img src="/img/metadata-erd/command.png" alt="image.png"><br /><img src="/img/metadata-erd/process-task.png" alt="image.png"></p>
 <ul>
 <li>A project has multiple process definitions, a process definition can generate multiple process instances, and a process instance can generate multiple task instances</li>
@@ -150,9 +150,9 @@
 </ul>
 <hr>
 <p><a name="yd79T"></a></p>
-<h3>Core Table Schema</h3>
+<h2>Core Table Schema</h2>
 <p><a name="6bVhH"></a></p>
-<h4>t_ds_process_definition</h4>
+<h3>t_ds_process_definition</h3>
 <table>
 <thead>
 <tr>
@@ -255,7 +255,7 @@
 </tbody>
 </table>
 <p><a name="t5uxM"></a></p>
-<h4>t_ds_process_instance</h4>
+<h3>t_ds_process_instance</h3>
 <table>
 <thead>
 <tr>
@@ -428,7 +428,7 @@
 </tbody>
 </table>
 <p><a name="tHZsY"></a></p>
-<h4>t_ds_task_instance</h4>
+<h3>t_ds_task_instance</h3>
 <table>
 <thead>
 <tr>
@@ -551,7 +551,7 @@
 </tbody>
 </table>
 <p><a name="gLGtm"></a></p>
-<h4>t_ds_command</h4>
+<h3>t_ds_command</h3>
 <table>
 <thead>
 <tr>
diff --git a/en-us/docs/latest/user_doc/architecture/metadata.json b/en-us/docs/latest/user_doc/architecture/metadata.json
index 9e16188..67564f7 100644
--- a/en-us/docs/latest/user_doc/architecture/metadata.json
+++ b/en-us/docs/latest/user_doc/architecture/metadata.json
@@ -1,6 +1,6 @@
 {
   "filename": "metadata.md",
-  "__html": "<h1>Dolphin Scheduler 2.0.3 MetaData</h1>\n<p><a name=\"V5KOl\"></a></p>\n<h3>Dolphin Scheduler 2.0 DB Table Overview</h3>\n<table>\n<thead>\n<tr>\n<th style=\"text-align:center\">Table Name</th>\n<th style=\"text-align:center\">Comment</th>\n</tr>\n</thead>\n<tbody>\n<tr>\n<td style=\"text-align:center\">t_ds_access_token</td>\n<td style=\"text-align:center\">token for access ds backend</td>\n</tr>\n<tr>\n<td style=\"text-align:center\">t_ds_alert</td>\n<td style=\"text-ali [...]
+  "__html": "<h1>Dolphin Scheduler 2.0.3 MetaData</h1>\n<p><a name=\"V5KOl\"></a></p>\n<h2>Dolphin Scheduler 2.0 DB Table Overview</h2>\n<table>\n<thead>\n<tr>\n<th style=\"text-align:center\">Table Name</th>\n<th style=\"text-align:center\">Comment</th>\n</tr>\n</thead>\n<tbody>\n<tr>\n<td style=\"text-align:center\">t_ds_access_token</td>\n<td style=\"text-align:center\">token for access ds backend</td>\n</tr>\n<tr>\n<td style=\"text-align:center\">t_ds_alert</td>\n<td style=\"text-ali [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/architecture/metadata.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/architecture/task-structure.html b/en-us/docs/latest/user_doc/architecture/task-structure.html
index e19d296..d20cacd 100644
--- a/en-us/docs/latest/user_doc/architecture/task-structure.html
+++ b/en-us/docs/latest/user_doc/architecture/task-structure.html
@@ -10,7 +10,8 @@
   <link rel="stylesheet" href="/build/vendor.23870e5.css">
 </head>
 <body>
-  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+<h2>Overall Tasks Storage Structure</h2>
 <p>All tasks created in DolphinScheduler are saved in the t_ds_process_definition table.</p>
 <p>The following shows the 't_ds_process_definition' table structure:</p>
 <table>
@@ -198,8 +199,8 @@
     <span class="hljs-string">&quot;timeout&quot;</span>:0
 }
 </code></pre>
-<h1>The Detailed Explanation of The Storage Structure of Each Task Type</h1>
-<h2>Shell Nodes</h2>
+<h2>The Detailed Explanation of The Storage Structure of Each Task Type</h2>
+<h3>Shell Nodes</h3>
 <p><strong>The node data structure is as follows:</strong></p>
 <table>
 <thead>
@@ -413,7 +414,7 @@
 }
 
 </code></pre>
-<h2>SQL Node</h2>
+<h3>SQL Node</h3>
 <p>Perform data query and update operations on the specified datasource through SQL.</p>
 <p><strong>The node data structure is as follows:</strong></p>
 <table>
@@ -720,10 +721,10 @@
     ]
 }
 </code></pre>
-<h2>PROCEDURE [stored procedures] Node</h2>
+<h3>Procedure [stored procedures] Node</h3>
 <p><strong>The node data structure is as follows:</strong>
 <strong>Node data example:</strong></p>
-<h2>SPARK Node</h2>
+<h3>Spark Node</h3>
 <p><strong>The node data structure is as follows:</strong></p>
 <table>
 <thead>
@@ -1037,7 +1038,7 @@
     ]
 }
 </code></pre>
-<h2>MapReduce(MR) Node</h2>
+<h3>MapReduce(MR) Node</h3>
 <p><strong>The node data structure is as follows:</strong></p>
 <table>
 <thead>
@@ -1288,7 +1289,7 @@
     ]
 }
 </code></pre>
-<h2>Python Node</h2>
+<h3>Python Node</h3>
 <p><strong>The node data structure is as follows:</strong></p>
 <table>
 <thead>
@@ -1501,7 +1502,7 @@
     ]
 }
 </code></pre>
-<h2>Flink Node</h2>
+<h3>Flink Node</h3>
 <p><strong>The node data structure is as follows:</strong></p>
 <table>
 <thead>
@@ -1798,7 +1799,7 @@
     ]
 }
 </code></pre>
-<h2>HTTP Node</h2>
+<h3>HTTP Node</h3>
 <p><strong>The node data structure is as follows:</strong></p>
 <table>
 <thead>
@@ -2043,7 +2044,7 @@
     ]
 }
 </code></pre>
-<h2>DataX Node</h2>
+<h3>DataX Node</h3>
 <p><strong>The node data structure is as follows:</strong></p>
 <table>
 <thead>
@@ -2340,7 +2341,7 @@
     ]
 }
 </code></pre>
-<h2>Sqoop Node</h2>
+<h3>Sqoop Node</h3>
 <p><strong>The node data structure is as follows:</strong></p>
 <table>
 <thead>
@@ -2583,7 +2584,7 @@
             ]
         }
 </code></pre>
-<h2>Condition Branch Node</h2>
+<h3>Condition Branch Node</h3>
 <p><strong>The node data structure is as follows:</strong></p>
 <table>
 <thead>
@@ -2766,7 +2767,7 @@
     ]
 }
 </code></pre>
-<h2>Subprocess Node</h2>
+<h3>Subprocess Node</h3>
 <p><strong>The node data structure is as follows:</strong></p>
 <table>
 <thead>
@@ -2951,7 +2952,7 @@
             ]
         }
 </code></pre>
-<h2>DEPENDENT Node</h2>
+<h3>Dependent Node</h3>
 <p><strong>The node data structure is as follows:</strong></p>
 <table>
 <thead>
diff --git a/en-us/docs/latest/user_doc/architecture/task-structure.json b/en-us/docs/latest/user_doc/architecture/task-structure.json
index 85ba5d3..bca7b58 100644
--- a/en-us/docs/latest/user_doc/architecture/task-structure.json
+++ b/en-us/docs/latest/user_doc/architecture/task-structure.json
@@ -1,6 +1,6 @@
 {
   "filename": "task-structure.md",
-  "__html": "<h1>Overall Tasks Storage Structure</h1>\n<p>All tasks created in DolphinScheduler are saved in the t_ds_process_definition table.</p>\n<p>The following shows the 't_ds_process_definition' table structure:</p>\n<table>\n<thead>\n<tr>\n<th>No.</th>\n<th>field</th>\n<th>type</th>\n<th>description</th>\n</tr>\n</thead>\n<tbody>\n<tr>\n<td>1</td>\n<td>id</td>\n<td>int(11)</td>\n<td>primary key</td>\n</tr>\n<tr>\n<td>2</td>\n<td>name</td>\n<td>varchar(255)</td>\n<td>process defin [...]
+  "__html": "<h1>Task Structure</h1>\n<h2>Overall Tasks Storage Structure</h2>\n<p>All tasks created in DolphinScheduler are saved in the t_ds_process_definition table.</p>\n<p>The following shows the 't_ds_process_definition' table structure:</p>\n<table>\n<thead>\n<tr>\n<th>No.</th>\n<th>field</th>\n<th>type</th>\n<th>description</th>\n</tr>\n</thead>\n<tbody>\n<tr>\n<td>1</td>\n<td>id</td>\n<td>int(11)</td>\n<td>primary key</td>\n</tr>\n<tr>\n<td>2</td>\n<td>name</td>\n<td>varchar(255 [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/architecture/task-structure.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/alert/alert_plugin_user_guide.html b/en-us/docs/latest/user_doc/guide/alert/alert_plugin_user_guide.html
index 1922b36..3a2f007 100644
--- a/en-us/docs/latest/user_doc/guide/alert/alert_plugin_user_guide.html
+++ b/en-us/docs/latest/user_doc/guide/alert/alert_plugin_user_guide.html
@@ -10,7 +10,8 @@
   <link rel="stylesheet" href="/build/vendor.23870e5.css">
 </head>
 <body>
-  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+<h2>How to Create Alert Plugins and Alert Groups</h2>
 <p>In version 2.0.3, users need to create alert instances, and then associate them with alert groups, and an alert group can use multiple alert instances, and we will notify them one by one.</p>
 <p>First of all, you need to go to the Security Center, select Alarm Group Management, then click Alarm Instance Management on the left, then create an alarm instance, then select the corresponding alarm plug-in and fill in the relevant alarm parameters.</p>
 <p>Then select Alarm Group Management, create an alarm group, and select the corresponding alarm instance.</p>
diff --git a/en-us/docs/latest/user_doc/guide/alert/alert_plugin_user_guide.json b/en-us/docs/latest/user_doc/guide/alert/alert_plugin_user_guide.json
index 7f603fd..998370c 100644
--- a/en-us/docs/latest/user_doc/guide/alert/alert_plugin_user_guide.json
+++ b/en-us/docs/latest/user_doc/guide/alert/alert_plugin_user_guide.json
@@ -1,6 +1,6 @@
 {
   "filename": "alert_plugin_user_guide.md",
-  "__html": "<h2>How to create alert plugins and alert groups</h2>\n<p>In version 2.0.3, users need to create alert instances, and then associate them with alert groups, and an alert group can use multiple alert instances, and we will notify them one by one.</p>\n<p>First of all, you need to go to the Security Center, select Alarm Group Management, then click Alarm Instance Management on the left, then create an alarm instance, then select the corresponding alarm plug-in and fill in the  [...]
+  "__html": "<h1>Alert Component User Guide</h1>\n<h2>How to Create Alert Plugins and Alert Groups</h2>\n<p>In version 2.0.3, users need to create alert instances, and then associate them with alert groups, and an alert group can use multiple alert instances, and we will notify them one by one.</p>\n<p>First of all, you need to go to the Security Center, select Alarm Group Management, then click Alarm Instance Management on the left, then create an alarm instance, then select the corresp [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/alert/alert_plugin_user_guide.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/alert/enterprise-wechat.html b/en-us/docs/latest/user_doc/guide/alert/enterprise-wechat.html
index b919c39..a5fce36 100644
--- a/en-us/docs/latest/user_doc/guide/alert/enterprise-wechat.html
+++ b/en-us/docs/latest/user_doc/guide/alert/enterprise-wechat.html
@@ -11,6 +11,7 @@
 </head>
 <body>
   <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+<h2>How to Create Enterprise WeChat Alert</h2>
 <p>If you need to use Enterprise WeChat to alert, please create an alarm Instance in warning instance manage, and then choose the wechat plugin. The configuration example of enterprise WeChat is as follows</p>
 <p><img src="/img/alert/enterprise-wechat-plugin.png" alt="enterprise-wechat-plugin"></p>
 <p>Where send type corresponds to app and appchat respectively:</p>
diff --git a/en-us/docs/latest/user_doc/guide/alert/enterprise-wechat.json b/en-us/docs/latest/user_doc/guide/alert/enterprise-wechat.json
index 2531470..985577e 100644
--- a/en-us/docs/latest/user_doc/guide/alert/enterprise-wechat.json
+++ b/en-us/docs/latest/user_doc/guide/alert/enterprise-wechat.json
@@ -1,6 +1,6 @@
 {
   "filename": "enterprise-wechat.md",
-  "__html": "<h1>Enterprise WeChat</h1>\n<p>If you need to use Enterprise WeChat to alert, please create an alarm Instance in warning instance manage, and then choose the wechat plugin. The configuration example of enterprise WeChat is as follows</p>\n<p><img src=\"/img/alert/enterprise-wechat-plugin.png\" alt=\"enterprise-wechat-plugin\"></p>\n<p>Where send type corresponds to app and appchat respectively:</p>\n<p>APP: <a href=\"https://work.weixin.qq.com/api/doc/90000/90135/90236\">htt [...]
+  "__html": "<h1>Enterprise WeChat</h1>\n<h2>How to Create Enterprise WeChat Alert</h2>\n<p>If you need to use Enterprise WeChat to alert, please create an alarm Instance in warning instance manage, and then choose the wechat plugin. The configuration example of enterprise WeChat is as follows</p>\n<p><img src=\"/img/alert/enterprise-wechat-plugin.png\" alt=\"enterprise-wechat-plugin\"></p>\n<p>Where send type corresponds to app and appchat respectively:</p>\n<p>APP: <a href=\"https://wo [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/alert/enterprise-wechat.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/datasource/hive.html b/en-us/docs/latest/user_doc/guide/datasource/hive.html
index ae86c46..1625ed0 100644
--- a/en-us/docs/latest/user_doc/guide/datasource/hive.html
+++ b/en-us/docs/latest/user_doc/guide/datasource/hive.html
@@ -31,7 +31,7 @@
 configure <code>common.properties</code>. It is helpful when you try to set env before running HIVE SQL. Parameter
 <code>support.hive.oneSession</code> default value is <code>false</code> and SQL would run in different session if their more than one.</p>
 </blockquote>
-<h2>Use HiveServer2 HA Zookeeper</h2>
+<h2>Use HiveServer2 HA ZooKeeper</h2>
  <p align="center">
     <img src="/img/hive1-en.png" width="80%" />
   </p>
diff --git a/en-us/docs/latest/user_doc/guide/datasource/hive.json b/en-us/docs/latest/user_doc/guide/datasource/hive.json
index 775d12c..6c2403a 100644
--- a/en-us/docs/latest/user_doc/guide/datasource/hive.json
+++ b/en-us/docs/latest/user_doc/guide/datasource/hive.json
@@ -1,6 +1,6 @@
 {
   "filename": "hive.md",
-  "__html": "<h1>HIVE</h1>\n<h2>Use HiveServer2</h2>\n <p align=\"center\">\n    <img src=\"/img/hive-en.png\" width=\"80%\" />\n  </p>\n<ul>\n<li>Data source: select HIVE</li>\n<li>Data source name: enter the name of the data source</li>\n<li>Description: Enter a description of the data source</li>\n<li>IP/Host Name: Enter the IP connected to HIVE</li>\n<li>Port: Enter the port connected to HIVE</li>\n<li>Username: Set the username for connecting to HIVE</li>\n<li>Password: Set the pass [...]
+  "__html": "<h1>HIVE</h1>\n<h2>Use HiveServer2</h2>\n <p align=\"center\">\n    <img src=\"/img/hive-en.png\" width=\"80%\" />\n  </p>\n<ul>\n<li>Data source: select HIVE</li>\n<li>Data source name: enter the name of the data source</li>\n<li>Description: Enter a description of the data source</li>\n<li>IP/Host Name: Enter the IP connected to HIVE</li>\n<li>Port: Enter the port connected to HIVE</li>\n<li>Username: Set the username for connecting to HIVE</li>\n<li>Password: Set the pass [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/datasource/hive.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/datasource/postgresql.html b/en-us/docs/latest/user_doc/guide/datasource/postgresql.html
index f55ca9e..7f4f0f8 100644
--- a/en-us/docs/latest/user_doc/guide/datasource/postgresql.html
+++ b/en-us/docs/latest/user_doc/guide/datasource/postgresql.html
@@ -10,7 +10,7 @@
   <link rel="stylesheet" href="/build/vendor.23870e5.css">
 </head>
 <body>
-  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <ul>
 <li>Data source: select POSTGRESQL</li>
 <li>Data source name: enter the name of the data source</li>
diff --git a/en-us/docs/latest/user_doc/guide/datasource/postgresql.json b/en-us/docs/latest/user_doc/guide/datasource/postgresql.json
index 8a4a79d..83afa6d 100644
--- a/en-us/docs/latest/user_doc/guide/datasource/postgresql.json
+++ b/en-us/docs/latest/user_doc/guide/datasource/postgresql.json
@@ -1,6 +1,6 @@
 {
   "filename": "postgresql.md",
-  "__html": "<h1>POSTGRESQL</h1>\n<ul>\n<li>Data source: select POSTGRESQL</li>\n<li>Data source name: enter the name of the data source</li>\n<li>Description: Enter a description of the data source</li>\n<li>IP/Host Name: Enter the IP to connect to POSTGRESQL</li>\n<li>Port: Enter the port to connect to POSTGRESQL</li>\n<li>Username: Set the username for connecting to POSTGRESQL</li>\n<li>Password: Set the password for connecting to POSTGRESQL</li>\n<li>Database name: Enter the name of  [...]
+  "__html": "<h1>PostgreSQL</h1>\n<ul>\n<li>Data source: select POSTGRESQL</li>\n<li>Data source name: enter the name of the data source</li>\n<li>Description: Enter a description of the data source</li>\n<li>IP/Host Name: Enter the IP to connect to POSTGRESQL</li>\n<li>Port: Enter the port to connect to POSTGRESQL</li>\n<li>Username: Set the username for connecting to POSTGRESQL</li>\n<li>Password: Set the password for connecting to POSTGRESQL</li>\n<li>Database name: Enter the name of  [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/datasource/postgresql.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/expansion-reduction.html b/en-us/docs/latest/user_doc/guide/expansion-reduction.html
index e255c42..396d48f 100644
--- a/en-us/docs/latest/user_doc/guide/expansion-reduction.html
+++ b/en-us/docs/latest/user_doc/guide/expansion-reduction.html
@@ -12,19 +12,19 @@
 <body>
   <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <h1>DolphinScheduler Expansion and Reduction</h1>
-<h2>1. Expansion</h2>
+<h2>Expansion</h2>
 <p>This article describes how to add a new master service or worker service to an existing DolphinScheduler cluster.</p>
 <pre><code> Attention: There cannot be more than one master service process or worker service process on a physical machine.
        If the physical machine where the expansion master or worker node is located has already installed the scheduled service, skip to [1.4 Modify configuration] Edit the configuration file `conf/config/install_config.conf` on **all ** nodes, add masters or workers parameter, and restart the scheduling cluster.
 </code></pre>
-<h3>1.1 Basic software installation (please install the mandatory items yourself)</h3>
+<h3>Basic Software Installation</h3>
 <ul>
 <li>[required] <a href="https://www.oracle.com/technetwork/java/javase/downloads/index.html">JDK</a> (1.8+):Must be installed, please install and configure JAVA_HOME and PATH variables under /etc/profile</li>
 <li>[optional] If the expansion is a worker node, you need to consider whether to install an external client, such as Hadoop, Hive, Spark Client.</li>
 </ul>
 <pre><code class="language-markdown"> Attention: DolphinScheduler itself does not depend on Hadoop, Hive, Spark, but will only call their Client for the corresponding task submission.
 </code></pre>
-<h3>1.2 Get installation package</h3>
+<h3>Get Installation Package</h3>
 <ul>
 <li>Check which version of DolphinScheduler is used in your existing environment, and get the installation package of the corresponding version, if the versions are different, there may be compatibility problems.</li>
 <li>Confirm the unified installation directory of other nodes, this article assumes that DolphinScheduler is installed in /opt/ directory, and the full path is /opt/dolphinscheduler.</li>
@@ -41,7 +41,7 @@ mv apache-dolphinscheduler-2.0.3-bin  dolphinscheduler
 </code></pre>
 <pre><code class="language-markdown"> Attention: The installation package can be copied directly from an existing environment to an expanded physical machine for use.
 </code></pre>
-<h3>1.3 Create Deployment Users</h3>
+<h3>Create Deployment Users</h3>
 <ul>
 <li>Create deployment users on <strong>all</strong> expansion machines, and be sure to configure sudo-free. If we plan to deploy scheduling on four expansion machines, ds1, ds2, ds3, and ds4, we first need to create deployment users on each machine</li>
 </ul>
@@ -61,7 +61,7 @@ sed -i &#x27;s/Defaults    requirett/#Defaults    requirett/g&#x27; /etc/sudoers
 <span class="hljs-bullet"> -</span> If you find the line &quot;Default requiretty&quot; in the /etc/sudoers file, please also comment it out.
 <span class="hljs-bullet"> -</span> If resource uploads are used, you also need to assign read and write permissions to the deployment user on <span class="hljs-code">`HDFS or MinIO`</span>.
 </code></pre>
-<h3>1.4 Modify configuration</h3>
+<h3>Modify Configuration</h3>
 <ul>
 <li>
 <p>From an existing node such as Master/Worker, copy the conf directory directly to replace the conf directory in the new node. After copying, check if the configuration items are correct.</p>
@@ -124,7 +124,7 @@ workers=&quot;existing worker01:default,existing worker02:default,ds3:default,ds
 </ul>
 <pre><code class="language-shell">sudo chown -R dolphinscheduler:dolphinscheduler dolphinscheduler
 </code></pre>
-<h3>1.4. Restart the cluster &amp; verify</h3>
+<h3>Restart the Cluster and Verify</h3>
 <ul>
 <li>restart the cluster</li>
 </ul>
@@ -170,10 +170,10 @@ sh bin/dolphinscheduler-daemon.sh start alert-server   # start alert  service
 </code></pre>
 <p>If the above services are started normally and the scheduling system page is normal, check whether there is an expanded Master or Worker service in the [Monitor] of the web system. If it exists, the expansion is complete.</p>
 <hr>
-<h2>2. Reduction</h2>
+<h2>Reduction</h2>
 <p>The reduction is to reduce the master or worker services for the existing DolphinScheduler cluster.
 There are two steps for shrinking. After performing the following two steps, the shrinking operation can be completed.</p>
-<h3>2.1 Stop the service on the scaled-down node</h3>
+<h3>Stop the Service on the Scaled-Down Node</h3>
 <ul>
 <li>If you are scaling down the master node, identify the physical machine where the master service is located, and stop the master service on the physical machine.</li>
 <li>If the worker node is scaled down, determine the physical machine where the worker service is to be scaled down and stop the worker and logger services on the physical machine.</li>
@@ -210,7 +210,7 @@ sh bin/dolphinscheduler-daemon.sh start alert-server  # start alert  service
     AlertServer          ----- alert  service
 </code></pre>
 <p>If the corresponding master service or worker service does not exist, then the master/worker service is successfully shut down.</p>
-<h3>2.2 Modify the configuration file</h3>
+<h3>Modify the Configuration File</h3>
 <ul>
 <li>
 <p>modify the configuration file <code>conf/config/install_config.conf</code> on the <strong>all</strong> nodes, synchronizing the following configuration.</p>
diff --git a/en-us/docs/latest/user_doc/guide/expansion-reduction.json b/en-us/docs/latest/user_doc/guide/expansion-reduction.json
index 31fd48f..68ce02c 100644
--- a/en-us/docs/latest/user_doc/guide/expansion-reduction.json
+++ b/en-us/docs/latest/user_doc/guide/expansion-reduction.json
@@ -1,6 +1,6 @@
 {
   "filename": "expansion-reduction.md",
-  "__html": "<!-- markdown-link-check-disable -->\n<h1>DolphinScheduler Expansion and Reduction</h1>\n<h2>1. Expansion</h2>\n<p>This article describes how to add a new master service or worker service to an existing DolphinScheduler cluster.</p>\n<pre><code> Attention: There cannot be more than one master service process or worker service process on a physical machine.\n       If the physical machine where the expansion master or worker node is located has already installed the scheduled [...]
+  "__html": "<!-- markdown-link-check-disable -->\n<h1>DolphinScheduler Expansion and Reduction</h1>\n<h2>Expansion</h2>\n<p>This article describes how to add a new master service or worker service to an existing DolphinScheduler cluster.</p>\n<pre><code> Attention: There cannot be more than one master service process or worker service process on a physical machine.\n       If the physical machine where the expansion master or worker node is located has already installed the scheduled se [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/expansion-reduction.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/flink-call.html b/en-us/docs/latest/user_doc/guide/flink-call.html
index 9d4c155..1587d25 100644
--- a/en-us/docs/latest/user_doc/guide/flink-call.html
+++ b/en-us/docs/latest/user_doc/guide/flink-call.html
@@ -11,7 +11,7 @@
 </head>
 <body>
   <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
-<h3>Create a queue</h3>
+<h2>Create a Queue</h2>
 <ol>
 <li>Log in to the scheduling system, click &quot;Security&quot;, then click &quot;Queue manage&quot; on the left, and click &quot;Create queue&quot; to create a queue.</li>
 <li>Fill in the name and value of the queue, and click &quot;Submit&quot;</li>
@@ -19,7 +19,7 @@
 <p align="center">
    <img src="/img/api/create_queue.png" width="80%" />
  </p>
-<h3>Create a tenant</h3>
+<h2>Create a Tenant</h2>
 <pre><code>1. The tenant corresponds to a Linux user, which the user worker uses to submit jobs. If Linux OS environment does not have this user, the worker will create this user when executing the script.
 2. Both the tenant and the tenant code are unique and cannot be repeated, just like a person has a name and id number.  
 3. After creating a tenant, there will be a folder in the HDFS relevant directory.  
@@ -27,11 +27,11 @@
 <p align="center">
    <img src="/img/api/create_tenant.png" width="80%" />
  </p>
-<h3>Create a user</h3>
+<h2>Create a User</h2>
 <p align="center">
    <img src="/img/api/create_user.png" width="80%" />
  </p>
-<h3>Create a token</h3>
+<h2>Create a Token</h2>
 <ol>
 <li>Log in to the scheduling system, click &quot;Security&quot;, then click &quot;Token manage&quot; on the left, and click &quot;Create token&quot; to create a token.</li>
 </ol>
@@ -44,7 +44,7 @@
 <p align="center">
    <img src="/img/create-token-en1.png" width="80%" />
  </p>
-<h3>Use token</h3>
+<h2>Use Token</h2>
 <ol>
 <li>
 <p>Open the API documentation page</p>
@@ -73,21 +73,21 @@
 <p align="center">
    <img src="/img/test-api.png" width="80%" />
  </p>  
-<h3>User authorization</h3>
+<h2>User Authorization</h2>
 <p align="center">
    <img src="/img/api/user_authorization.png" width="80%" />
  </p>
-<h3>User login</h3>
+<h2>User Login</h2>
 <pre><code>http://192.168.1.163:12345/dolphinscheduler/ui/#/monitor/servers/master
 </code></pre>
 <p align="center">
    <img src="/img/api/user_login.png" width="80%" />
  </p>
-<h3>Upload the resource</h3>
+<h2>Upload the Resource</h2>
 <p align="center">
    <img src="/img/api/upload_resource.png" width="80%" />
  </p>
-<h3>Create a workflow</h3>
+<h2>Create a Workflow</h2>
 <p align="center">
    <img src="/img/api/create_workflow1.png" width="80%" />
  </p>
@@ -100,11 +100,11 @@
 <p align="center">
    <img src="/img/api/create_workflow4.png" width="80%" />
  </p>
-<h3>View the execution result</h3>
+<h2>View the Execution Result</h2>
 <p align="center">
    <img src="/img/api/execution_result.png" width="80%" />
  </p>
-<h3>View log</h3>
+<h2>View Log</h2>
 <p align="center">
    <img src="/img/api/log.png" width="80%" />
  </p>
diff --git a/en-us/docs/latest/user_doc/guide/flink-call.json b/en-us/docs/latest/user_doc/guide/flink-call.json
index 469dae6..148fe4b 100644
--- a/en-us/docs/latest/user_doc/guide/flink-call.json
+++ b/en-us/docs/latest/user_doc/guide/flink-call.json
@@ -1,6 +1,6 @@
 {
   "filename": "flink-call.md",
-  "__html": "<h1>Flink Calls Operating steps</h1>\n<h3>Create a queue</h3>\n<ol>\n<li>Log in to the scheduling system, click &quot;Security&quot;, then click &quot;Queue manage&quot; on the left, and click &quot;Create queue&quot; to create a queue.</li>\n<li>Fill in the name and value of the queue, and click &quot;Submit&quot;</li>\n</ol>\n<p align=\"center\">\n   <img src=\"/img/api/create_queue.png\" width=\"80%\" />\n </p>\n<h3>Create a tenant</h3>\n<pre><code>1. The tenant correspon [...]
+  "__html": "<h1>Flink Calls Operating steps</h1>\n<h2>Create a Queue</h2>\n<ol>\n<li>Log in to the scheduling system, click &quot;Security&quot;, then click &quot;Queue manage&quot; on the left, and click &quot;Create queue&quot; to create a queue.</li>\n<li>Fill in the name and value of the queue, and click &quot;Submit&quot;</li>\n</ol>\n<p align=\"center\">\n   <img src=\"/img/api/create_queue.png\" width=\"80%\" />\n </p>\n<h2>Create a Tenant</h2>\n<pre><code>1. The tenant correspon [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/flink-call.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/installation/cluster.html b/en-us/docs/latest/user_doc/guide/installation/cluster.html
index 7bf6293..5fec774 100644
--- a/en-us/docs/latest/user_doc/guide/installation/cluster.html
+++ b/en-us/docs/latest/user_doc/guide/installation/cluster.html
@@ -15,9 +15,9 @@
 <p>If you are a green hand and want to experience DolphinScheduler, we recommended you install follow <a href="standalone.md">Standalone</a>. If you want to experience more complete functions or schedule large tasks number, we recommended you install follow <a href="pseudo-cluster.md">pseudo-cluster deployment</a>. If you want to using DolphinScheduler in production, we recommended you follow <a href="cluster.md">cluster deployment</a> or <a href="kubernetes.md">kubernetes</a></p>
 <h2>Deployment Step</h2>
 <p>Cluster deployment uses the same scripts and configuration files as we deploy in <a href="pseudo-cluster.md">pseudo-cluster deployment</a>, so the prepare and required are the same as pseudo-cluster deployment. The difference is that <a href="pseudo-cluster.md">pseudo-cluster deployment</a> is for one machine, while cluster deployment (Cluster) for multiple. and the steps of &quot;Modify configuration&quot; are quite different between pseudo-cluster deployment and cluster deployment.</p>
-<h3>Prepare &amp;&amp; DolphinScheduler startup environment</h3>
-<p>Because of cluster deployment for multiple machine, so you have to run you &quot;Prepare&quot; and &quot;startup&quot; in every machine in <a href="pseudo-cluster.md">pseudo-cluster.md</a>, except section &quot;Configure machine SSH password-free login&quot;, &quot;Start zookeeper&quot;, &quot;Initialize the database&quot;, which is only for deployment or just need an single server</p>
-<h3>Modify configuration</h3>
+<h3>Prepare and DolphinScheduler Startup Environment</h3>
+<p>Because of cluster deployment for multiple machine, so you have to run you &quot;Prepare&quot; and &quot;startup&quot; in every machine in <a href="pseudo-cluster.md">pseudo-cluster.md</a>, except section &quot;Configure machine SSH password-free login&quot;, &quot;Start ZooKeeper&quot;, &quot;Initialize the database&quot;, which is only for deployment or just need an single server</p>
+<h3>Modify Configuration</h3>
 <p>This is a step that is quite different from <a href="pseudo-cluster.md">pseudo-cluster.md</a>, because the deployment script will transfer the resources required for installation machine to each deployment machine using <code>scp</code>. And we have to declare all machine we want to install DolphinScheduler and then run script <code>install.sh</code>. The configuration file is under the path <code>conf/config/install_config.conf</code>, here we only need to modify section <strong>INST [...]
 <pre><code class="language-shell"><span class="hljs-meta">#</span><span class="bash"> ---------------------------------------------------------</span>
 <span class="hljs-meta">#</span><span class="bash"> INSTALL MACHINE</span>
@@ -32,7 +32,9 @@ alertServer=&quot;ds4&quot;
 apiServers=&quot;ds5&quot;
 pythonGatewayServers=&quot;ds5&quot;
 </code></pre>
-<h2>Start DolphinScheduler &amp;&amp; Login DolphinScheduler &amp;&amp; Server Start And Stop</h2>
+<h2>Start and Login DolphinScheduler</h2>
+<p>Same as <a href="http://pseudo-cluster.md">pseudo-cluster.md</a>](<a href="http://pseudo-cluster.md">pseudo-cluster.md</a>)</p>
+<h2>Start and Stop Server</h2>
 <p>Same as <a href="http://pseudo-cluster.md">pseudo-cluster.md</a>](<a href="http://pseudo-cluster.md">pseudo-cluster.md</a>)</p>
 </div></section><footer class="footer-container"><div class="footer-body"><div><h3>About us</h3><h4>Do you need feedback? Please contact us through the following ways.</h4></div><div class="contact-container"><ul><li><a href="/en-us/community/development/subscribe.html"><img class="img-base" src="/img/emailgray.png"/><img class="img-change" src="/img/emailblue.png"/><p>Email List</p></a></li><li><a href="https://twitter.com/dolphinschedule"><img class="img-base" src="/img/twittergray.png [...]
   <script src="//cdn.jsdelivr.net/npm/react@15.6.2/dist/react-with-addons.min.js"></script>
diff --git a/en-us/docs/latest/user_doc/guide/installation/cluster.json b/en-us/docs/latest/user_doc/guide/installation/cluster.json
index 2c493f8..83e43a2 100644
--- a/en-us/docs/latest/user_doc/guide/installation/cluster.json
+++ b/en-us/docs/latest/user_doc/guide/installation/cluster.json
@@ -1,6 +1,6 @@
 {
   "filename": "cluster.md",
-  "__html": "<h1>Cluster Deployment</h1>\n<p>Cluster deployment is to deploy the DolphinScheduler on multiple machines for running a large number of tasks in production.</p>\n<p>If you are a green hand and want to experience DolphinScheduler, we recommended you install follow <a href=\"standalone.md\">Standalone</a>. If you want to experience more complete functions or schedule large tasks number, we recommended you install follow <a href=\"pseudo-cluster.md\">pseudo-cluster deployment</ [...]
+  "__html": "<h1>Cluster Deployment</h1>\n<p>Cluster deployment is to deploy the DolphinScheduler on multiple machines for running a large number of tasks in production.</p>\n<p>If you are a green hand and want to experience DolphinScheduler, we recommended you install follow <a href=\"standalone.md\">Standalone</a>. If you want to experience more complete functions or schedule large tasks number, we recommended you install follow <a href=\"pseudo-cluster.md\">pseudo-cluster deployment</ [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/installation/cluster.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/installation/docker.html b/en-us/docs/latest/user_doc/guide/installation/docker.html
index a8d5ff9..40c8025 100644
--- a/en-us/docs/latest/user_doc/guide/installation/docker.html
+++ b/en-us/docs/latest/user_doc/guide/installation/docker.html
@@ -16,12 +16,12 @@
 <li><a href="https://docs.docker.com/engine/install/">Docker</a> 1.13.1+</li>
 <li><a href="https://docs.docker.com/compose/">Docker Compose</a> 1.11.0+</li>
 </ul>
-<h2>How to use this Docker image</h2>
+<h2>How to Use this Docker Image</h2>
 <p>Here're 3 ways to quickly install DolphinScheduler</p>
-<h3>The First Way: Start a DolphinScheduler by docker-compose (recommended)</h3>
+<h3>The First Way: Start a DolphinScheduler by Docker Compose (Recommended)</h3>
 <p>In this way, you need to install <a href="https://docs.docker.com/compose/">docker-compose</a> as a prerequisite, please install it yourself according to the rich docker-compose installation guidance on the Internet</p>
 <p>For Windows 7-10, you can install <a href="https://github.com/docker/toolbox/releases">Docker Toolbox</a>. For Windows 10 64-bit, you can install <a href="https://docs.docker.com/docker-for-windows/install/">Docker Desktop</a>, and pay attention to the <a href="https://docs.docker.com/docker-for-windows/install/#system-requirements">system requirements</a></p>
-<h4>0. Configure memory not less than 4GB</h4>
+<h4>Configure Memory not Less Than 4GB</h4>
 <p>For Mac user, click <code>Docker Desktop -&gt; Preferences -&gt; Resources -&gt; Memory</code></p>
 <p>For Windows Docker Toolbox user, two items need to be configured:</p>
 <ul>
@@ -33,9 +33,9 @@
 <li><strong>Hyper-V mode</strong>: Click <code>Docker Desktop -&gt; Settings -&gt; Resources -&gt; Memory</code></li>
 <li><strong>WSL 2 mode</strong>: Refer to <a href="https://docs.microsoft.com/en-us/windows/wsl/wsl-config#configure-global-options-with-wslconfig">WSL 2 utility VM</a></li>
 </ul>
-<h4>1. Download the Source Code Package</h4>
+<h4>Download the Source Code Package</h4>
 <p>Please download the source code package apache-dolphinscheduler-2.0.3-src.tar.gz, download address: <a href="/en-us/download/download.html">download</a></p>
-<h4>2. Pull Image and Start the Service</h4>
+<h4>Pull Image and Start the Service</h4>
 <blockquote>
 <p>For Mac and Linux user, open <strong>Terminal</strong>
 For Windows Docker Toolbox user, open <strong>Docker Quickstart Terminal</strong>
@@ -51,24 +51,24 @@ $ docker-compose up -d
 <p>PowerShell should use <code>cd apache-dolphinscheduler-2.0.3-src\docker\docker-swarm</code></p>
 </blockquote>
 <p>The <strong>PostgreSQL</strong> (with username <code>root</code>, password <code>root</code> and database <code>dolphinscheduler</code>) and <strong>ZooKeeper</strong> services will start by default</p>
-<h4>3. Login</h4>
+<h4>Login</h4>
 <p>Visit the Web UI: <a href="http://localhost:12345/dolphinscheduler">http://localhost:12345/dolphinscheduler</a> (The local address is <a href="http://localhost:12345/dolphinscheduler">http://localhost:12345/dolphinscheduler</a>)</p>
 <p>The default username is <code>admin</code> and the default password is <code>dolphinscheduler123</code></p>
 <p align="center">
   <img src="/img/login_en.png" width="60%" />
 </p>
 <p>Please refer to the <code>Quick Start</code> in the chapter <a href="../quick-start.md">Quick Start</a> to explore how to use DolphinScheduler</p>
-<h3>The Second Way: Start via specifying the existing PostgreSQL and ZooKeeper service</h3>
+<h3>The Second Way: Start via Specifying the Existing PostgreSQL and ZooKeeper Service</h3>
 <p>In this way, you need to install <a href="https://docs.docker.com/engine/install/">docker</a> as a prerequisite, please install it yourself according to the rich docker installation guidance on the Internet</p>
-<h4>1. Basic Required Software (please install by yourself)</h4>
+<h4>Basic Required Software</h4>
 <ul>
 <li><a href="https://www.postgresql.org/download/">PostgreSQL</a> (8.2.15+)</li>
 <li><a href="https://zookeeper.apache.org/releases.html">ZooKeeper</a> (3.4.6+)</li>
 <li><a href="https://docs.docker.com/engine/install/">Docker</a> (1.13.1+)</li>
 </ul>
-<h4>2. Please login to the PostgreSQL database and create a database named <code>dolphinscheduler</code></h4>
-<h4>3. Initialize the database, import <code>sql/dolphinscheduler_postgre.sql</code> to create tables and initial data</h4>
-<h4>4. Download the DolphinScheduler Image</h4>
+<h4>Please Login to the PostgreSQL Database and Create a Database Named <code>dolphinscheduler</code></h4>
+<h4>Initialize the Database, Import <code>sql/dolphinscheduler_postgre.sql</code> to Create Tables and Initial Data</h4>
+<h4>Download the DolphinScheduler Image</h4>
 <p>We have already uploaded user-oriented DolphinScheduler image to the Docker repository so that you can pull the image from the docker repository:</p>
 <pre><code>docker pull dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler:2.0.3
 </code></pre>
@@ -81,9 +81,9 @@ $ docker-compose up -d
 apache/dolphinscheduler:2.0.3 all
 </code></pre>
 <p>Note: database username test and password test need to be replaced with your actual PostgreSQL username and password, 192.168.x.x need to be replaced with your relate PostgreSQL and ZooKeeper host IP</p>
-<h4>6. Login</h4>
+<h4>Login</h4>
 <p>Same as above</p>
-<h3>The Third Way: Start a standalone DolphinScheduler server</h3>
+<h3>The Third Way: Start a Standalone DolphinScheduler Server</h3>
 <p>The following services are automatically started when the container starts:</p>
 <pre><code>     MasterServer         ----- master service
      WorkerServer         ----- worker service
@@ -305,7 +305,7 @@ apache/dolphinscheduler:2.0.3 python-gateway
 </tbody>
 </table>
 <h2>FAQ</h2>
-<h3>How to manage DolphinScheduler by docker-compose?</h3>
+<h3>How to Manage DolphinScheduler by Docker Compose?</h3>
 <p>Start, restart, stop or list containers:</p>
 <pre><code>docker-compose start
 docker-compose restart
@@ -318,7 +318,7 @@ docker-compose ps
 <p>Stop containers and remove containers, networks and volumes:</p>
 <pre><code>docker-compose down -v
 </code></pre>
-<h3>How to view the logs of a container?</h3>
+<h3>How to View the Logs of a Container?</h3>
 <p>List all running containers:</p>
 <pre><code>docker ps
 docker ps --format &quot;{{.Names}}&quot; # only print names
@@ -328,14 +328,14 @@ docker ps --format &quot;{{.Names}}&quot; # only print names
 docker logs -f docker-swarm_dolphinscheduler-api_1 # follow log output
 docker logs --tail 10 docker-swarm_dolphinscheduler-api_1 # show last 10 lines from the end of the logs
 </code></pre>
-<h3>How to scale master and worker by docker-compose?</h3>
+<h3>How to Scale Master and Worker by Docker Compose?</h3>
 <p>Scale master to 2 instances:</p>
 <pre><code>docker-compose up -d --scale dolphinscheduler-master=2 dolphinscheduler-master
 </code></pre>
 <p>Scale worker to 3 instances:</p>
 <pre><code>docker-compose up -d --scale dolphinscheduler-worker=3 dolphinscheduler-worker
 </code></pre>
-<h3>How to deploy DolphinScheduler on Docker Swarm?</h3>
+<h3>How to Deploy DolphinScheduler on Docker Swarm?</h3>
 <p>Assuming that the Docker Swarm cluster has been created (If there is no Docker Swarm cluster, please refer to <a href="https://docs.docker.com/engine/swarm/swarm-tutorial/create-swarm/">create-swarm</a>)</p>
 <p>Start a stack named dolphinscheduler:</p>
 <pre><code>docker stack deploy -c docker-stack.yml dolphinscheduler
@@ -349,15 +349,15 @@ docker logs --tail 10 docker-swarm_dolphinscheduler-api_1 # show last 10 lines f
 <p>Remove the volumes of the stack named dolphinscheduler:</p>
 <pre><code>docker volume rm -f $(docker volume ls --format &quot;{{.Name}}&quot; | grep -e &quot;^dolphinscheduler&quot;)
 </code></pre>
-<h3>How to scale master and worker on Docker Swarm?</h3>
+<h3>How to Scale Master and Worker on Docker Swarm?</h3>
 <p>Scale master of the stack named dolphinscheduler to 2 instances:</p>
 <pre><code>docker service scale dolphinscheduler_dolphinscheduler-master=2
 </code></pre>
 <p>Scale worker of the stack named dolphinscheduler to 3 instances:</p>
 <pre><code>docker service scale dolphinscheduler_dolphinscheduler-worker=3
 </code></pre>
-<h3>How to build a Docker image?</h3>
-<h4>Build from the source code (Require Maven 3.3+ &amp; JDK 1.8+)</h4>
+<h3>How to Build a Docker Image?</h3>
+<h4>Build from the Source Code (Require Maven 3.3+ and JDK 1.8+)</h4>
 <p>In Unix-Like, execute in Terminal:</p>
 <pre><code class="language-bash">$ bash ./docker/build/hooks/build
 </code></pre>
@@ -365,7 +365,7 @@ docker logs --tail 10 docker-swarm_dolphinscheduler-api_1 # show last 10 lines f
 <pre><code class="language-bat"><span class="hljs-function">C:\<span class="hljs-title">dolphinscheduler</span>-<span class="hljs-title">src</span>&gt;.\<span class="hljs-title">docker</span>\<span class="hljs-title">build</span>\<span class="hljs-title">hooks</span>\<span class="hljs-title">build.bat</span>
 </span></code></pre>
 <p>Please read <code>./docker/build/hooks/build</code> <code>./docker/build/hooks/build.bat</code> script files if you don't understand</p>
-<h4>Build from the binary distribution (Not require Maven 3.3+ &amp; JDK 1.8+)</h4>
+<h4>Build from the Binary Distribution (Not require Maven 3.3+ and JDK 1.8+)</h4>
 <p>Please download the binary distribution package apache-dolphinscheduler-2.0.3-bin.tar.gz, download address: <a href="/en-us/download/download.html">download</a>. And put apache-dolphinscheduler-2.0.3-bin.tar.gz into the <code>apache-dolphinscheduler-2.0.3-src/docker/build</code> directory, execute in Terminal or PowerShell:</p>
 <pre><code>$ cd apache-dolphinscheduler-2.0.3-src/docker/build
 $ docker build --build-arg VERSION=2.0.3 -t apache/dolphinscheduler:2.0.3 .
@@ -373,7 +373,7 @@ $ docker build --build-arg VERSION=2.0.3 -t apache/dolphinscheduler:2.0.3 .
 <blockquote>
 <p>PowerShell should use <code>cd apache-dolphinscheduler-2.0.3-src/docker/build</code></p>
 </blockquote>
-<h4>Build multi-platform images</h4>
+<h4>Build Multi-Platform Images</h4>
 <p>Currently support to build images including <code>linux/amd64</code> and <code>linux/arm64</code> platform architecture, requirements:</p>
 <ol>
 <li>Support <a href="https://docs.docker.com/engine/reference/commandline/buildx/">docker buildx</a></li>
@@ -383,7 +383,7 @@ $ docker build --build-arg VERSION=2.0.3 -t apache/dolphinscheduler:2.0.3 .
 <pre><code class="language-bash">$ docker login <span class="hljs-comment"># login to push apache/dolphinscheduler</span>
 $ bash ./docker/build/hooks/build
 </code></pre>
-<h3>How to add an environment variable for Docker?</h3>
+<h3>How to Add an Environment Variable for Docker?</h3>
 <p>If you would like to do additional initialization in an image derived from this one, add one or more environment variables under <code>/root/start-init-conf.sh</code>, and modify template files in <code>/opt/dolphinscheduler/conf/*.tpl</code>.</p>
 <p>For example, to add an environment variable <code>SECURITY_AUTHENTICATION_TYPE</code> in <code>/root/start-init-conf.sh</code>:</p>
 <pre><code>export SECURITY_AUTHENTICATION_TYPE=PASSWORD
@@ -400,7 +400,7 @@ EOF
 &quot;</span> &gt; <span class="hljs-variable">${DOLPHINSCHEDULER_HOME}</span>/conf/<span class="hljs-variable">${line%.*}</span>
 <span class="hljs-keyword">done</span>
 </code></pre>
-<h3>How to use MySQL as the DolphinScheduler's database instead of PostgreSQL?</h3>
+<h3>How to Use MySQL as the DolphinScheduler's Database Instead of PostgreSQL?</h3>
 <blockquote>
 <p>Because of the commercial license, we cannot directly use the driver of MySQL.</p>
 <p>If you want to use MySQL, you can build a new image based on the <code>apache/dolphinscheduler</code> image as follows.</p>
@@ -453,7 +453,7 @@ DATABASE_PARAMS=useUnicode=true&amp;characterEncoding=UTF-8
 <ol start="8">
 <li>Run a dolphinscheduler (See <strong>How to use this docker image</strong>)</li>
 </ol>
-<h3>How to support MySQL datasource in <code>Datasource manage</code>?</h3>
+<h3>How to Support MySQL Datasource in <code>Datasource manage</code>?</h3>
 <blockquote>
 <p>Because of the commercial license, we cannot directly use the driver of MySQL.</p>
 <p>If you want to add MySQL datasource, you can build a new image based on the <code>apache/dolphinscheduler</code> image as follows.</p>
@@ -488,7 +488,7 @@ COPY mysql-connector-java-8.0.16.jar /opt/dolphinscheduler/lib
 <p>Add a MySQL datasource in <code>Datasource manage</code></p>
 </li>
 </ol>
-<h3>How to support Oracle datasource in <code>Datasource manage</code>?</h3>
+<h3>How to Support Oracle Datasource in <code>Datasource manage</code>?</h3>
 <blockquote>
 <p>Because of the commercial license, we cannot directly use the driver of Oracle.</p>
 <p>If you want to add Oracle datasource, you can build a new image based on the <code>apache/dolphinscheduler</code> image as follows.</p>
@@ -523,7 +523,7 @@ COPY ojdbc8-19.9.0.0.jar /opt/dolphinscheduler/lib
 <p>Add an Oracle datasource in <code>Datasource manage</code></p>
 </li>
 </ol>
-<h3>How to support Python 2 pip and custom requirements.txt?</h3>
+<h3>How to Support Python 2 pip and Custom requirements.txt?</h3>
 <ol>
 <li>Create a new <code>Dockerfile</code> to install pip:</li>
 </ol>
@@ -556,7 +556,7 @@ RUN apt-get update &amp;&amp; \
 <p>Verify pip under a new Python task</p>
 </li>
 </ol>
-<h3>How to support Python 3?</h3>
+<h3>How to Support Python 3?</h3>
 <ol>
 <li>Create a new <code>Dockerfile</code> to install Python 3:</li>
 </ol>
@@ -590,7 +590,7 @@ RUN apt-get update &amp;&amp; \
 <p>Verify Python 3 under a new Python task</p>
 </li>
 </ol>
-<h3>How to support Hadoop, Spark, Flink, Hive or DataX?</h3>
+<h3>How to Support Hadoop, Spark, Flink, Hive or DataX?</h3>
 <p>Take Spark 2.4.7 as an example:</p>
 <ol>
 <li>
@@ -639,7 +639,7 @@ ln -s spark-2.4.7-bin-hadoop2.7 spark2 <span class="hljs-comment"># or just mv</
 </ol>
 <p>Spark on YARN (Deploy Mode is <code>cluster</code> or <code>client</code>) requires Hadoop support. Similar to Spark support, the operation of supporting Hadoop is almost the same as the previous steps</p>
 <p>Ensure that <code>$HADOOP_HOME</code> and <code>$HADOOP_CONF_DIR</code> exists</p>
-<h3>How to support Spark 3?</h3>
+<h3>How to Support Spark 3?</h3>
 <p>In fact, the way to submit applications with <code>spark-submit</code> is the same, regardless of Spark 1, 2 or 3. In other words, the semantics of <code>SPARK_HOME2</code> is the second <code>SPARK_HOME</code> instead of <code>SPARK2</code>'s <code>HOME</code>, so just set <code>SPARK_HOME2=/path/to/spark3</code></p>
 <p>Take Spark 3.1.1 as an example:</p>
 <ol>
@@ -672,7 +672,7 @@ ln -s spark-3.1.1-bin-hadoop2.7 spark2 <span class="hljs-comment"># or just mv</
 <pre><code>$SPARK_HOME2/bin/spark-submit --class org.apache.spark.examples.SparkPi $SPARK_HOME2/examples/jars/spark-examples_2.12-3.1.1.jar
 </code></pre>
 <p>Check whether the task log contains the output like <code>Pi is roughly 3.146015</code></p>
-<h3>How to support shared storage between Master, Worker and Api server?</h3>
+<h3>How to Support Shared Storage between Master, Worker and Api server?</h3>
 <blockquote>
 <p><strong>Note</strong>: If it is deployed on a single machine by <code>docker-compose</code>, step 1 and 2 can be skipped directly, and execute the command like <code>docker cp hadoop-3.2.2.tar.gz docker-swarm_dolphinscheduler-worker_1:/opt/soft</code> to put Hadoop into the shared directory <code>/opt/soft</code> in the container</p>
 </blockquote>
@@ -698,7 +698,7 @@ ln -s spark-3.1.1-bin-hadoop2.7 spark2 <span class="hljs-comment"># or just mv</
 <p>Ensure that <code>$HADOOP_HOME</code> and <code>$HADOOP_CONF_DIR</code> are correct</p>
 </li>
 </ol>
-<h3>How to support local file resource storage instead of HDFS and S3?</h3>
+<h3>How to Support Local File Resource Storage Instead of HDFS and S3?</h3>
 <blockquote>
 <p><strong>Note</strong>: If it is deployed on a single machine by <code>docker-compose</code>, step 2 can be skipped directly</p>
 </blockquote>
@@ -721,7 +721,7 @@ FS_DEFAULT_FS=file:///
       <span class="hljs-attr">o:</span> <span class="hljs-string">&quot;addr=10.40.0.199,nolock,soft,rw&quot;</span>
       <span class="hljs-attr">device:</span> <span class="hljs-string">&quot;:/path/to/resource/dir&quot;</span>
 </code></pre>
-<h3>How to support S3 resource storage like MinIO?</h3>
+<h3>How to Support S3 Resource Storage Like MinIO?</h3>
 <p>Take MinIO as an example: Modify the following environment variables in <code>config.env.sh</code></p>
 <pre><code>RESOURCE_STORAGE_TYPE=S3
 RESOURCE_UPLOAD_PATH=/dolphinscheduler
@@ -734,7 +734,7 @@ FS_S3A_SECRET_KEY=MINIO_SECRET_KEY
 <blockquote>
 <p><strong>Note</strong>: <code>MINIO_IP</code> can only use IP instead of the domain name, because DolphinScheduler currently doesn't support S3 path style access</p>
 </blockquote>
-<h3>How to configure SkyWalking?</h3>
+<h3>How to Configure SkyWalking?</h3>
 <p>Modify SkyWalking environment variables in <code>config.env.sh</code>:</p>
 <pre><code>SKYWALKING_ENABLE=true
 SW_AGENT_COLLECTOR_BACKEND_SERVICES=127.0.0.1:11800
@@ -769,10 +769,10 @@ SW_GRPC_LOG_SERVER_PORT=11800
 <p><strong>Note</strong>: You must specify it when starting a standalone dolphinscheduler server. Like <code>master-server</code>, <code>worker-server</code>, <code>api-server</code>, <code>alert-server</code>.</p>
 <h3>ZooKeeper</h3>
 <p><strong><code>ZOOKEEPER_QUORUM</code></strong></p>
-<p>This environment variable sets zookeeper quorum. The default value is <code>127.0.0.1:2181</code>.</p>
+<p>This environment variable sets ZooKeeper quorum. The default value is <code>127.0.0.1:2181</code>.</p>
 <p><strong>Note</strong>: You must specify it when starting a standalone dolphinscheduler server. Like <code>master-server</code>, <code>worker-server</code>, <code>api-server</code>.</p>
 <p><strong><code>ZOOKEEPER_ROOT</code></strong></p>
-<p>This environment variable sets zookeeper root directory for dolphinscheduler. The default value is <code>/dolphinscheduler</code>.</p>
+<p>This environment variable sets ZooKeeper root directory for dolphinscheduler. The default value is <code>/dolphinscheduler</code>.</p>
 <h3>Common</h3>
 <p><strong><code>DOLPHINSCHEDULER_OPTS</code></strong></p>
 <p>This environment variable sets JVM options for dolphinscheduler, suitable for <code>master-server</code>, <code>worker-server</code>, <code>api-server</code>, <code>alert-server</code>, <code>logger-server</code>. The default value is empty.</p>
diff --git a/en-us/docs/latest/user_doc/guide/installation/docker.json b/en-us/docs/latest/user_doc/guide/installation/docker.json
index 3d231ac..bf1f64b 100644
--- a/en-us/docs/latest/user_doc/guide/installation/docker.json
+++ b/en-us/docs/latest/user_doc/guide/installation/docker.json
@@ -1,6 +1,6 @@
 {
   "filename": "docker.md",
-  "__html": "<h1>QuickStart in Docker</h1>\n<h2>Prerequisites</h2>\n<ul>\n<li><a href=\"https://docs.docker.com/engine/install/\">Docker</a> 1.13.1+</li>\n<li><a href=\"https://docs.docker.com/compose/\">Docker Compose</a> 1.11.0+</li>\n</ul>\n<h2>How to use this Docker image</h2>\n<p>Here're 3 ways to quickly install DolphinScheduler</p>\n<h3>The First Way: Start a DolphinScheduler by docker-compose (recommended)</h3>\n<p>In this way, you need to install <a href=\"https://docs.docker.co [...]
+  "__html": "<h1>QuickStart in Docker</h1>\n<h2>Prerequisites</h2>\n<ul>\n<li><a href=\"https://docs.docker.com/engine/install/\">Docker</a> 1.13.1+</li>\n<li><a href=\"https://docs.docker.com/compose/\">Docker Compose</a> 1.11.0+</li>\n</ul>\n<h2>How to Use this Docker Image</h2>\n<p>Here're 3 ways to quickly install DolphinScheduler</p>\n<h3>The First Way: Start a DolphinScheduler by Docker Compose (Recommended)</h3>\n<p>In this way, you need to install <a href=\"https://docs.docker.co [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/installation/docker.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/installation/hardware.html b/en-us/docs/latest/user_doc/guide/installation/hardware.html
index e75c2d6..a23e966 100644
--- a/en-us/docs/latest/user_doc/guide/installation/hardware.html
+++ b/en-us/docs/latest/user_doc/guide/installation/hardware.html
@@ -12,7 +12,7 @@
 <body>
   <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <p>DolphinScheduler, as an open-source distributed workflow task scheduling system, can be well deployed and run in Intel architecture server environments and mainstream virtualization environments, and supports mainstream Linux operating system environments.</p>
-<h2>1. Linux Operating System Version Requirements</h2>
+<h2>Linux Operating System Version Requirements</h2>
 <table>
 <thead>
 <tr>
@@ -43,7 +43,7 @@
 <p><strong>Attention:</strong>
 The above Linux operating systems can run on physical servers and mainstream virtualization environments such as VMware, KVM, and XEN.</p>
 </blockquote>
-<h2>2. Recommended Server Configuration</h2>
+<h2>Recommended Server Configuration</h2>
 <p>DolphinScheduler supports 64-bit hardware platforms with Intel x86-64 architecture. The following recommendation is made for server hardware configuration in a production environment:</p>
 <h3>Production Environment</h3>
 <table>
@@ -73,7 +73,7 @@ The above Linux operating systems can run on physical servers and mainstream vir
 <li>The hard disk size configuration is recommended by more than 50GB. The system disk and data disk are separated.</li>
 </ul>
 </blockquote>
-<h2>3. Network Requirements</h2>
+<h2>Network Requirements</h2>
 <p>DolphinScheduler provides the following network port configurations for normal operation:</p>
 <table>
 <thead>
@@ -108,7 +108,7 @@ The above Linux operating systems can run on physical servers and mainstream vir
 <li>Administrators can adjust relevant ports on the network side and host-side according to the deployment plan of DolphinScheduler components in the actual environment.</li>
 </ul>
 </blockquote>
-<h2>4. Browser Requirements</h2>
+<h2>Browser Requirements</h2>
 <p>DolphinScheduler recommends Chrome and the latest browsers which using Chrome Kernel to access the front-end visual operator page.</p>
 </div></section><footer class="footer-container"><div class="footer-body"><div><h3>About us</h3><h4>Do you need feedback? Please contact us through the following ways.</h4></div><div class="contact-container"><ul><li><a href="/en-us/community/development/subscribe.html"><img class="img-base" src="/img/emailgray.png"/><img class="img-change" src="/img/emailblue.png"/><p>Email List</p></a></li><li><a href="https://twitter.com/dolphinschedule"><img class="img-base" src="/img/twittergray.png [...]
   <script src="//cdn.jsdelivr.net/npm/react@15.6.2/dist/react-with-addons.min.js"></script>
diff --git a/en-us/docs/latest/user_doc/guide/installation/hardware.json b/en-us/docs/latest/user_doc/guide/installation/hardware.json
index 43220a5..a161e83 100644
--- a/en-us/docs/latest/user_doc/guide/installation/hardware.json
+++ b/en-us/docs/latest/user_doc/guide/installation/hardware.json
@@ -1,6 +1,6 @@
 {
   "filename": "hardware.md",
-  "__html": "<h1>Hardware Environment</h1>\n<p>DolphinScheduler, as an open-source distributed workflow task scheduling system, can be well deployed and run in Intel architecture server environments and mainstream virtualization environments, and supports mainstream Linux operating system environments.</p>\n<h2>1. Linux Operating System Version Requirements</h2>\n<table>\n<thead>\n<tr>\n<th style=\"text-align:left\">OS</th>\n<th style=\"text-align:center\">Version</th>\n</tr>\n</thead>\n [...]
+  "__html": "<h1>Hardware Environment</h1>\n<p>DolphinScheduler, as an open-source distributed workflow task scheduling system, can be well deployed and run in Intel architecture server environments and mainstream virtualization environments, and supports mainstream Linux operating system environments.</p>\n<h2>Linux Operating System Version Requirements</h2>\n<table>\n<thead>\n<tr>\n<th style=\"text-align:left\">OS</th>\n<th style=\"text-align:center\">Version</th>\n</tr>\n</thead>\n<tb [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/installation/hardware.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/installation/kubernetes.html b/en-us/docs/latest/user_doc/guide/installation/kubernetes.html
index eae8b5b..361df8d 100644
--- a/en-us/docs/latest/user_doc/guide/installation/kubernetes.html
+++ b/en-us/docs/latest/user_doc/guide/installation/kubernetes.html
@@ -19,7 +19,7 @@
 <li><a href="https://kubernetes.io/">Kubernetes</a> 1.12+</li>
 <li>PV provisioner support in the underlying infrastructure</li>
 </ul>
-<h2>Installing the Chart</h2>
+<h2>Install the Chart</h2>
 <p>Please download the source code package apache-dolphinscheduler-2.0.3-src.tar.gz, download address: <a href="/en-us/download/download.html">download</a></p>
 <p>To install the chart with the release name <code>dolphinscheduler</code>, please execute the following commands:</p>
 <pre><code>$ tar -zxvf apache-dolphinscheduler-2.0.3-src.tar.gz
@@ -60,7 +60,7 @@ NODE_PORT=$(kubectl get svc {{ template <span class="hljs-string">&quot;dolphins
 <p>And then access the web: http://<span class="katex"><span class="katex-mathml"><math><semantics><mrow><mi>N</mi><mi>O</mi><mi>D</mi><msub><mi>E</mi><mi>I</mi></msub><mi>P</mi><mo>:</mo></mrow><annotation encoding="application/x-tex">NODE_IP:</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="strut" style="height:0.68333em;"></span><span class="strut bottom" style="height:0.83333em;vertical-align:-0.15em;"></span><span class="base textstyle u [...]
 <p>The default username is <code>admin</code> and the default password is <code>dolphinscheduler123</code></p>
 <p>Please refer to the <code>Quick Start</code> in the chapter <a href="../quick-start.md">Quick Start</a> to explore how to use DolphinScheduler</p>
-<h2>Uninstalling the Chart</h2>
+<h2>Uninstall the Chart</h2>
 <p>To uninstall/delete the <code>dolphinscheduler</code> deployment:</p>
 <pre><code class="language-bash">$ helm uninstall dolphinscheduler
 </code></pre>
@@ -236,7 +236,7 @@ NODE_PORT=$(kubectl get svc {{ template <span class="hljs-string">&quot;dolphins
 </tbody>
 </table>
 <h2>FAQ</h2>
-<h3>How to view the logs of a pod container?</h3>
+<h3>How to View the Logs of a Pod Container?</h3>
 <p>List all pods (aka <code>po</code>):</p>
 <pre><code>kubectl get po
 kubectl get po -n test # with test namespace
@@ -246,7 +246,7 @@ kubectl get po -n test # with test namespace
 kubectl logs -f dolphinscheduler-master-0 # follow log output
 kubectl logs --tail 10 dolphinscheduler-master-0 -n test # show last 10 lines from the end of the logs
 </code></pre>
-<h3>How to scale api, master and worker on Kubernetes?</h3>
+<h3>How to Scale api, master and worker on Kubernetes?</h3>
 <p>List all deployments (aka <code>deploy</code>):</p>
 <pre><code>kubectl get deploy
 kubectl get deploy -n test # with test namespace
@@ -267,7 +267,7 @@ kubectl scale --replicas=2 sts dolphinscheduler-master -n test # with test names
 <pre><code>kubectl scale --replicas=6 sts dolphinscheduler-worker
 kubectl scale --replicas=6 sts dolphinscheduler-worker -n test # with test namespace
 </code></pre>
-<h3>How to use MySQL as the DolphinScheduler's database instead of PostgreSQL?</h3>
+<h3>How to Use MySQL as the DolphinScheduler's Database Instead of PostgreSQL?</h3>
 <blockquote>
 <p>Because of the commercial license, we cannot directly use the driver of MySQL.</p>
 <p>If you want to use MySQL, you can build a new image based on the <code>apache/dolphinscheduler</code> image as follows.</p>
@@ -315,7 +315,7 @@ COPY mysql-connector-java-8.0.16.jar /opt/dolphinscheduler/lib
 <ol start="8">
 <li>Run a DolphinScheduler release in Kubernetes (See <strong>Installing the Chart</strong>)</li>
 </ol>
-<h3>How to support MySQL datasource in <code>Datasource manage</code>?</h3>
+<h3>How to Support MySQL Datasource in <code>Datasource manage</code>?</h3>
 <blockquote>
 <p>Because of the commercial license, we cannot directly use the driver of MySQL.</p>
 <p>If you want to add MySQL datasource, you can build a new image based on the <code>apache/dolphinscheduler</code> image as follows.</p>
@@ -350,7 +350,7 @@ COPY mysql-connector-java-8.0.16.jar /opt/dolphinscheduler/lib
 <p>Add a MySQL datasource in <code>Datasource manage</code></p>
 </li>
 </ol>
-<h3>How to support Oracle datasource in <code>Datasource manage</code>?</h3>
+<h3>How to Support Oracle Datasource in <code>Datasource manage</code>?</h3>
 <blockquote>
 <p>Because of the commercial license, we cannot directly use the driver of Oracle.</p>
 <p>If you want to add Oracle datasource, you can build a new image based on the <code>apache/dolphinscheduler</code> image as follows.</p>
@@ -385,7 +385,7 @@ COPY ojdbc8-19.9.0.0.jar /opt/dolphinscheduler/lib
 <p>Add an Oracle datasource in <code>Datasource manage</code></p>
 </li>
 </ol>
-<h3>How to support Python 2 pip and custom requirements.txt?</h3>
+<h3>How to Support Python 2 pip and Custom requirements.txt?</h3>
 <ol>
 <li>Create a new <code>Dockerfile</code> to install pip:</li>
 </ol>
@@ -418,7 +418,7 @@ RUN apt-get update &amp;&amp; \
 <p>Verify pip under a new Python task</p>
 </li>
 </ol>
-<h3>How to support Python 3?</h3>
+<h3>How to Support Python 3?</h3>
 <ol>
 <li>Create a new <code>Dockerfile</code> to install Python 3:</li>
 </ol>
@@ -452,7 +452,7 @@ RUN apt-get update &amp;&amp; \
 <p>Verify Python 3 under a new Python task</p>
 </li>
 </ol>
-<h3>How to support Hadoop, Spark, Flink, Hive or DataX?</h3>
+<h3>How to Support Hadoop, Spark, Flink, Hive or DataX?</h3>
 <p>Take Spark 2.4.7 as an example:</p>
 <ol>
 <li>
@@ -506,7 +506,7 @@ ln -s spark-2.4.7-bin-hadoop2.7 spark2 <span class="hljs-comment"># or just mv</
 </ol>
 <p>Spark on YARN (Deploy Mode is <code>cluster</code> or <code>client</code>) requires Hadoop support. Similar to Spark support, the operation of supporting Hadoop is almost the same as the previous steps</p>
 <p>Ensure that <code>$HADOOP_HOME</code> and <code>$HADOOP_CONF_DIR</code> exists</p>
-<h3>How to support Spark 3?</h3>
+<h3>How to Support Spark 3?</h3>
 <p>In fact, the way to submit applications with <code>spark-submit</code> is the same, regardless of Spark 1, 2 or 3. In other words, the semantics of <code>SPARK_HOME2</code> is the second <code>SPARK_HOME</code> instead of <code>SPARK2</code>'s <code>HOME</code>, so just set <code>SPARK_HOME2=/path/to/spark3</code></p>
 <p>Take Spark 3.1.1 as an example:</p>
 <ol>
@@ -544,7 +544,7 @@ ln -s spark-3.1.1-bin-hadoop2.7 spark2 <span class="hljs-comment"># or just mv</
 <pre><code>$SPARK_HOME2/bin/spark-submit --class org.apache.spark.examples.SparkPi $SPARK_HOME2/examples/jars/spark-examples_2.12-3.1.1.jar
 </code></pre>
 <p>Check whether the task log contains the output like <code>Pi is roughly 3.146015</code></p>
-<h3>How to support shared storage between Master, Worker and Api server?</h3>
+<h3>How to Support Shared Storage Between Master, Worker and Api Server?</h3>
 <p>For example, Master, Worker and API server may use Hadoop at the same time</p>
 <ol>
 <li>Modify the following configurations in <code>values.yaml</code></li>
@@ -570,7 +570,7 @@ ln -s spark-3.1.1-bin-hadoop2.7 spark2 <span class="hljs-comment"># or just mv</
 <p>Ensure that <code>$HADOOP_HOME</code> and <code>$HADOOP_CONF_DIR</code> are correct</p>
 </li>
 </ol>
-<h3>How to support local file resource storage instead of HDFS and S3?</h3>
+<h3>How to Support Local File Resource Storage Instead of HDFS and S3?</h3>
 <p>Modify the following configurations in <code>values.yaml</code></p>
 <pre><code class="language-yaml"><span class="hljs-attr">common:</span>
   <span class="hljs-attr">configmap:</span>
@@ -588,7 +588,7 @@ ln -s spark-3.1.1-bin-hadoop2.7 spark2 <span class="hljs-comment"># or just mv</
 <blockquote>
 <p><strong>Note</strong>: <code>storageClassName</code> must support the access mode: <code>ReadWriteMany</code></p>
 </blockquote>
-<h3>How to support S3 resource storage like MinIO?</h3>
+<h3>How to Support S3 Resource Storage Like MinIO?</h3>
 <p>Take MinIO as an example: Modify the following configurations in <code>values.yaml</code></p>
 <pre><code class="language-yaml"><span class="hljs-attr">common:</span>
   <span class="hljs-attr">configmap:</span>
@@ -603,7 +603,7 @@ ln -s spark-3.1.1-bin-hadoop2.7 spark2 <span class="hljs-comment"># or just mv</
 <blockquote>
 <p><strong>Note</strong>: <code>MINIO_IP</code> can only use IP instead of domain name, because DolphinScheduler currently doesn't support S3 path style access</p>
 </blockquote>
-<h3>How to configure SkyWalking?</h3>
+<h3>How to Configure SkyWalking?</h3>
 <p>Modify SKYWALKING configurations in <code>values.yaml</code>:</p>
 <pre><code class="language-yaml"><span class="hljs-attr">common:</span>
   <span class="hljs-attr">configmap:</span>
@@ -739,7 +739,7 @@ ln -s spark-3.1.1-bin-hadoop2.7 spark2 <span class="hljs-comment"># or just mv</
 </tr>
 <tr>
 <td><code>zookeeper.enabled</code></td>
-<td>If not exists external Zookeeper, by default, the DolphinScheduler will use a internal Zookeeper</td>
+<td>If not exists external ZooKeeper, by default, the DolphinScheduler will use a internal Zookeeper</td>
 <td><code>true</code></td>
 </tr>
 <tr>
@@ -759,7 +759,7 @@ ln -s spark-3.1.1-bin-hadoop2.7 spark2 <span class="hljs-comment"># or just mv</
 </tr>
 <tr>
 <td><code>zookeeper.persistence.storageClass</code></td>
-<td>Zookeeper data persistent volume storage class. If set to &quot;-&quot;, storageClassName: &quot;&quot;, which disables dynamic provisioning</td>
+<td>ZooKeeper data persistent volume storage class. If set to &quot;-&quot;, storageClassName: &quot;&quot;, which disables dynamic provisioning</td>
 <td><code>-</code></td>
 </tr>
 <tr>
@@ -769,12 +769,12 @@ ln -s spark-3.1.1-bin-hadoop2.7 spark2 <span class="hljs-comment"># or just mv</
 </tr>
 <tr>
 <td><code>externalZookeeper.zookeeperQuorum</code></td>
-<td>If exists external Zookeeper, and set <code>zookeeper.enabled</code> value to false. Specify Zookeeper quorum</td>
+<td>If exists external ZooKeeper, and set <code>zookeeper.enabled</code> value to false. Specify Zookeeper quorum</td>
 <td><code>127.0.0.1:2181</code></td>
 </tr>
 <tr>
 <td><code>externalZookeeper.zookeeperRoot</code></td>
-<td>If exists external Zookeeper, and set <code>zookeeper.enabled</code> value to false. Specify dolphinscheduler root directory in Zookeeper</td>
+<td>If exists external ZooKeeper, and set <code>zookeeper.enabled</code> value to false. Specify dolphinscheduler root directory in Zookeeper</td>
 <td><code>/dolphinscheduler</code></td>
 </tr>
 <tr>
diff --git a/en-us/docs/latest/user_doc/guide/installation/kubernetes.json b/en-us/docs/latest/user_doc/guide/installation/kubernetes.json
index 4327429..e6e0adb 100644
--- a/en-us/docs/latest/user_doc/guide/installation/kubernetes.json
+++ b/en-us/docs/latest/user_doc/guide/installation/kubernetes.json
@@ -1,6 +1,6 @@
 {
   "filename": "kubernetes.md",
-  "__html": "<h1>QuickStart in Kubernetes</h1>\n<p>Kubernetes deployment is deploy DolphinScheduler in a Kubernetes cluster, which can schedule a large number of tasks and can be used in production.</p>\n<p>If you are a green hand and want to experience DolphinScheduler, we recommended you install follow <a href=\"standalone.md\">Standalone</a>. If you want to experience more complete functions or schedule large tasks number, we recommended you install follow <a href=\"pseudo-cluster.md\ [...]
+  "__html": "<h1>QuickStart in Kubernetes</h1>\n<p>Kubernetes deployment is deploy DolphinScheduler in a Kubernetes cluster, which can schedule a large number of tasks and can be used in production.</p>\n<p>If you are a green hand and want to experience DolphinScheduler, we recommended you install follow <a href=\"standalone.md\">Standalone</a>. If you want to experience more complete functions or schedule large tasks number, we recommended you install follow <a href=\"pseudo-cluster.md\ [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/installation/kubernetes.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/installation/pseudo-cluster.html b/en-us/docs/latest/user_doc/guide/installation/pseudo-cluster.html
index c98510d..ca413a1 100644
--- a/en-us/docs/latest/user_doc/guide/installation/pseudo-cluster.html
+++ b/en-us/docs/latest/user_doc/guide/installation/pseudo-cluster.html
@@ -30,8 +30,8 @@
 <blockquote>
 <p><strong><em>Note:</em></strong> DolphinScheduler itself does not depend on Hadoop, Hive, Spark, but if you need to run tasks that depend on them, you need to have the corresponding environment support</p>
 </blockquote>
-<h2>DolphinScheduler startup environment</h2>
-<h3>Configure user exemption and permissions</h3>
+<h2>DolphinScheduler Startup Environment</h2>
+<h3>Configure User Exemption and Permissions</h3>
 <p>Create a deployment user, and be sure to configure <code>sudo</code> without password. We here make a example for user dolphinscheduler.</p>
 <pre><code class="language-shell"><span class="hljs-meta">#</span><span class="bash"> To create a user, login as root</span>
 useradd dolphinscheduler
@@ -53,7 +53,7 @@ chown -R dolphinscheduler:dolphinscheduler apache-dolphinscheduler-*-bin
 <li>If you find the line &quot;Defaults requirest&quot; in the <code>/etc/sudoers</code> file, please comment it</li>
 </ul>
 </blockquote>
-<h3>Configure machine SSH password-free login</h3>
+<h3>Configure Machine SSH Password-Free Login</h3>
 <p>Since resources need to be sent to different machines during installation, SSH password-free login is required between each machine. The steps to configure password-free login are as follows</p>
 <pre><code class="language-shell">su dolphinscheduler
 
@@ -64,12 +64,12 @@ chmod 600 ~/.ssh/authorized_keys
 <blockquote>
 <p><strong><em>Notice:</em></strong> After the configuration is complete, you can run the command <code>ssh localhost</code> to test if it work or not, if you can login with ssh without password.</p>
 </blockquote>
-<h3>Start zookeeper</h3>
-<p>Go to the zookeeper installation directory, copy configure file <code>zoo_sample.cfg</code> to <code>conf/zoo.cfg</code>, and change value of dataDir in <code>conf/zoo.cfg</code> to <code>dataDir=./tmp/zookeeper</code></p>
-<pre><code class="language-shell"><span class="hljs-meta">#</span><span class="bash"> Start zookeeper</span>
+<h3>Start ZooKeeper</h3>
+<p>Go to the ZooKeeper installation directory, copy configure file <code>zoo_sample.cfg</code> to <code>conf/zoo.cfg</code>, and change value of dataDir in <code>conf/zoo.cfg</code> to <code>dataDir=./tmp/zookeeper</code></p>
+<pre><code class="language-shell"><span class="hljs-meta">#</span><span class="bash"> Start ZooKeeper</span>
 ./bin/zkServer.sh start
 </code></pre>
-<h2>Modify configuration</h2>
+<h2>Modify Configuration</h2>
 <p>After completing the preparation of the basic environment, you need to modify the configuration file according to your environment. The configuration file is in the path of <code>conf/config/install_config.conf</code>. Generally, you just needs to modify the <strong>INSTALL MACHINE, DolphinScheduler ENV, Database, Registry Server</strong> part to complete the deployment, the following describes the parameters that must be modified</p>
 <pre><code class="language-shell"><span class="hljs-meta">#</span><span class="bash"> ---------------------------------------------------------</span>
 <span class="hljs-meta">#</span><span class="bash"> INSTALL MACHINE</span>
@@ -108,10 +108,10 @@ SPRING_DATASOURCE_PASSWORD=&quot;dolphinscheduler&quot;
 #</span><span class="bash"> ---------------------------------------------------------</span>
 <span class="hljs-meta">#</span><span class="bash"> Registry Server</span>
 <span class="hljs-meta">#</span><span class="bash"> ---------------------------------------------------------</span>
-<span class="hljs-meta">#</span><span class="bash"> Registration center address, the address of zookeeper service</span>
+<span class="hljs-meta">#</span><span class="bash"> Registration center address, the address of ZooKeeper service</span>
 registryServers=&quot;localhost:2181&quot;
 </code></pre>
-<h2>Initialize the database</h2>
+<h2>Initialize the Database</h2>
 <p>DolphinScheduler metadata is stored in relational database. Currently, PostgreSQL and MySQL are supported. If you use MySQL, you need to manually download <a href="https://downloads.MySQL.com/archives/c-j/">mysql-connector-java driver</a> (8.0.16) and move it to the lib directory of DolphinScheduler. Let's take MySQL as an example for how to initialize the database</p>
 <pre><code class="language-shell">mysql -uroot -p
 <span class="hljs-meta">
@@ -136,7 +136,7 @@ mysql&gt;</span><span class="bash"> flush privileges;</span>
 </blockquote>
 <h2>Login DolphinScheduler</h2>
 <p>The browser access address <a href="http://localhost:12345/dolphinscheduler">http://localhost:12345/dolphinscheduler</a> can login DolphinScheduler UI. The default username and password are <strong>admin/dolphinscheduler123</strong></p>
-<h2>Start or stop server</h2>
+<h2>Start or Stop Server</h2>
 <pre><code class="language-shell"><span class="hljs-meta">#</span><span class="bash"> Stop all DolphinScheduler server</span>
 sh ./bin/stop-all.sh
 <span class="hljs-meta">
diff --git a/en-us/docs/latest/user_doc/guide/installation/pseudo-cluster.json b/en-us/docs/latest/user_doc/guide/installation/pseudo-cluster.json
index a9ddaf0..74456af 100644
--- a/en-us/docs/latest/user_doc/guide/installation/pseudo-cluster.json
+++ b/en-us/docs/latest/user_doc/guide/installation/pseudo-cluster.json
@@ -1,6 +1,6 @@
 {
   "filename": "pseudo-cluster.md",
-  "__html": "<h1>Pseudo-Cluster Deployment</h1>\n<p>The purpose of pseudo-cluster deployment is to deploy the DolphinScheduler service on a single machine. In this mode, DolphinScheduler's master, worker, api server, and logger server are all on the same machine.</p>\n<p>If you are a green hand and want to experience DolphinScheduler, we recommended you install follow <a href=\"standalone.md\">Standalone</a>. If you want to experience more complete functions or schedule large tasks numbe [...]
+  "__html": "<h1>Pseudo-Cluster Deployment</h1>\n<p>The purpose of pseudo-cluster deployment is to deploy the DolphinScheduler service on a single machine. In this mode, DolphinScheduler's master, worker, api server, and logger server are all on the same machine.</p>\n<p>If you are a green hand and want to experience DolphinScheduler, we recommended you install follow <a href=\"standalone.md\">Standalone</a>. If you want to experience more complete functions or schedule large tasks numbe [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/installation/pseudo-cluster.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/installation/standalone.html b/en-us/docs/latest/user_doc/guide/installation/standalone.html
index e29308c..2411357 100644
--- a/en-us/docs/latest/user_doc/guide/installation/standalone.html
+++ b/en-us/docs/latest/user_doc/guide/installation/standalone.html
@@ -14,7 +14,7 @@
 <p>Standalone only for quick look for DolphinScheduler.</p>
 <p>If you are a green hand and want to experience DolphinScheduler, we recommended you install follow <a href="standalone.md">Standalone</a>. If you want to experience more complete functions or schedule large tasks number, we recommended you install follow <a href="pseudo-cluster.md">pseudo-cluster deployment</a>. If you want to using DolphinScheduler in production, we recommended you follow <a href="cluster.md">cluster deployment</a> or <a href="kubernetes.md">kubernetes</a></p>
 <blockquote>
-<p><strong><em>Note:</em></strong> Standalone only recommends the use of less than 20 workflows, because it uses H2 Database, Zookeeper Testing Server, too many tasks may cause instability</p>
+<p><strong><em>Note:</em></strong> Standalone only recommends the use of less than 20 workflows, because it uses H2 Database, ZooKeeper Testing Server, too many tasks may cause instability</p>
 </blockquote>
 <h2>Prepare</h2>
 <ul>
@@ -22,7 +22,7 @@
 <li>Binary package: Download the DolphinScheduler binary package at <a href="https://dolphinscheduler.apache.org/en-us/download/download.html">download page</a></li>
 </ul>
 <h2>Start DolphinScheduler Standalone Server</h2>
-<h3>Extract and start DolphinScheduler</h3>
+<h3>Extract and Start DolphinScheduler</h3>
 <p>There is a standalone startup script in the binary compressed package, which can be quickly started after extract. Switch to a user with sudo permission and run the script</p>
 <pre><code class="language-shell"><span class="hljs-meta">#</span><span class="bash"> Extract and start Standalone Server</span>
 tar -xvzf apache-dolphinscheduler-*-bin.tar.gz
@@ -31,7 +31,7 @@ sh ./bin/dolphinscheduler-daemon.sh start standalone-server
 </code></pre>
 <h3>Login DolphinScheduler</h3>
 <p>The browser access address <a href="http://localhost:12345/dolphinscheduler">http://localhost:12345/dolphinscheduler</a> can login DolphinScheduler UI. The default username and password are <strong>admin/dolphinscheduler123</strong></p>
-<h2>start/stop server</h2>
+<h3>Start or Stop Server</h3>
 <p>The script <code>./bin/dolphinscheduler-daemon.sh</code> can not only quickly start standalone, but also stop the service operation. All the commands are as follows</p>
 <pre><code class="language-shell"><span class="hljs-meta">#</span><span class="bash"> Start Standalone Server</span>
 sh ./bin/dolphinscheduler-daemon.sh start standalone-server
diff --git a/en-us/docs/latest/user_doc/guide/installation/standalone.json b/en-us/docs/latest/user_doc/guide/installation/standalone.json
index da513c1..0fe533d 100644
--- a/en-us/docs/latest/user_doc/guide/installation/standalone.json
+++ b/en-us/docs/latest/user_doc/guide/installation/standalone.json
@@ -1,6 +1,6 @@
 {
   "filename": "standalone.md",
-  "__html": "<h1>Standalone</h1>\n<p>Standalone only for quick look for DolphinScheduler.</p>\n<p>If you are a green hand and want to experience DolphinScheduler, we recommended you install follow <a href=\"standalone.md\">Standalone</a>. If you want to experience more complete functions or schedule large tasks number, we recommended you install follow <a href=\"pseudo-cluster.md\">pseudo-cluster deployment</a>. If you want to using DolphinScheduler in production, we recommended you foll [...]
+  "__html": "<h1>Standalone</h1>\n<p>Standalone only for quick look for DolphinScheduler.</p>\n<p>If you are a green hand and want to experience DolphinScheduler, we recommended you install follow <a href=\"standalone.md\">Standalone</a>. If you want to experience more complete functions or schedule large tasks number, we recommended you install follow <a href=\"pseudo-cluster.md\">pseudo-cluster deployment</a>. If you want to using DolphinScheduler in production, we recommended you foll [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/installation/standalone.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/monitor.html b/en-us/docs/latest/user_doc/guide/monitor.html
index 48c737e..ef84c66 100644
--- a/en-us/docs/latest/user_doc/guide/monitor.html
+++ b/en-us/docs/latest/user_doc/guide/monitor.html
@@ -11,47 +11,47 @@
 </head>
 <body>
   <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
-<h2>Service management</h2>
+<h2>Service Management</h2>
 <ul>
 <li>Service management is mainly to monitor and display the health status and basic information of each service in the system</li>
 </ul>
-<h2>master monitoring</h2>
+<h2>Monitor Master Server</h2>
 <ul>
 <li>Mainly related to master information.</li>
 </ul>
 <p align="center">
    <img src="/img/master-jk-en.png" width="80%" />
  </p>
-<h2>worker monitoring</h2>
+<h2>Monitor Worker Server</h2>
 <ul>
 <li>Mainly related to worker information.</li>
 </ul>
 <p align="center">
    <img src="/img/worker-jk-en.png" width="80%" />
  </p>
-<h2>Zookeeper monitoring</h2>
+<h2>Monitor ZooKeeper</h2>
 <ul>
 <li>Mainly related configuration information of each worker and master in ZooKeeper.</li>
 </ul>
 <p alignlinux ="center">
    <img src="/img/zookeeper-monitor-en.png" width="80%" />
  </p>
-<h2>DB monitoring</h2>
+<h2>Monitor DB</h2>
 <ul>
 <li>Mainly the health of the DB</li>
 </ul>
 <p align="center">
    <img src="/img/mysql-jk-en.png" width="80%" />
  </p>
-<h2>Statistics management</h2>
+<h2>Statistics Management</h2>
 <p align="center">
    <img src="/img/statistics-en.png" width="80%" />
  </p>
 <ul>
 <li>Number of commands to be executed: statistics on the t_ds_command table</li>
 <li>The number of failed commands: statistics on the t_ds_error_command table</li>
-<li>Number of tasks to run: Count the data of task_queue in Zookeeper</li>
-<li>Number of tasks to be killed: Count the data of task_kill in Zookeeper</li>
+<li>Number of tasks to run: Count the data of task_queue in ZooKeeper</li>
+<li>Number of tasks to be killed: Count the data of task_kill in ZooKeeper</li>
 </ul>
 </div></section><footer class="footer-container"><div class="footer-body"><div><h3>About us</h3><h4>Do you need feedback? Please contact us through the following ways.</h4></div><div class="contact-container"><ul><li><a href="/en-us/community/development/subscribe.html"><img class="img-base" src="/img/emailgray.png"/><img class="img-change" src="/img/emailblue.png"/><p>Email List</p></a></li><li><a href="https://twitter.com/dolphinschedule"><img class="img-base" src="/img/twittergray.png [...]
   <script src="//cdn.jsdelivr.net/npm/react@15.6.2/dist/react-with-addons.min.js"></script>
diff --git a/en-us/docs/latest/user_doc/guide/monitor.json b/en-us/docs/latest/user_doc/guide/monitor.json
index d3f3323..0e67a38 100644
--- a/en-us/docs/latest/user_doc/guide/monitor.json
+++ b/en-us/docs/latest/user_doc/guide/monitor.json
@@ -1,6 +1,6 @@
 {
   "filename": "monitor.md",
-  "__html": "<h1>Monitor</h1>\n<h2>Service management</h2>\n<ul>\n<li>Service management is mainly to monitor and display the health status and basic information of each service in the system</li>\n</ul>\n<h2>master monitoring</h2>\n<ul>\n<li>Mainly related to master information.</li>\n</ul>\n<p align=\"center\">\n   <img src=\"/img/master-jk-en.png\" width=\"80%\" />\n </p>\n<h2>worker monitoring</h2>\n<ul>\n<li>Mainly related to worker information.</li>\n</ul>\n<p align=\"center\">\n   [...]
+  "__html": "<h1>Monitor</h1>\n<h2>Service Management</h2>\n<ul>\n<li>Service management is mainly to monitor and display the health status and basic information of each service in the system</li>\n</ul>\n<h2>Monitor Master Server</h2>\n<ul>\n<li>Mainly related to master information.</li>\n</ul>\n<p align=\"center\">\n   <img src=\"/img/master-jk-en.png\" width=\"80%\" />\n </p>\n<h2>Monitor Worker Server</h2>\n<ul>\n<li>Mainly related to worker information.</li>\n</ul>\n<p align=\"cente [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/monitor.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/observability/skywalking-agent.html b/en-us/docs/latest/user_doc/guide/observability/skywalking-agent.html
index a9084db..b012756 100644
--- a/en-us/docs/latest/user_doc/guide/observability/skywalking-agent.html
+++ b/en-us/docs/latest/user_doc/guide/observability/skywalking-agent.html
@@ -13,9 +13,9 @@
   <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <p>The dolphinscheduler-skywalking module provides <a href="https://skywalking.apache.org/">SkyWalking</a> monitor agent for the Dolphinscheduler project.</p>
 <p>This document describes how to enable SkyWalking 8.4+ support with this module (recommended to use SkyWalking 8.5.0).</p>
-<h1>Installation</h1>
+<h2>Installation</h2>
 <p>The following configuration is used to enable SkyWalking agent.</p>
-<h3>Through environment variable configuration (for Docker Compose)</h3>
+<h3>Through Environment Variable Configuration (for Docker Compose)</h3>
 <p>Modify SkyWalking environment variables in <code>docker/docker-swarm/config.env.sh</code>:</p>
 <pre><code>SKYWALKING_ENABLE=true
 SW_AGENT_COLLECTOR_BACKEND_SERVICES=127.0.0.1:11800
@@ -25,7 +25,7 @@ SW_GRPC_LOG_SERVER_PORT=11800
 <p>And run</p>
 <pre><code class="language-shell"><span class="hljs-meta">$</span><span class="bash"> docker-compose up -d</span>
 </code></pre>
-<h3>Through environment variable configuration (for Docker)</h3>
+<h3>Through Environment Variable Configuration (for Docker)</h3>
 <pre><code class="language-shell"><span class="hljs-meta">$</span><span class="bash"> docker run -d --name dolphinscheduler \
 -e DATABASE_HOST=<span class="hljs-string">&quot;192.168.x.x&quot;</span> -e DATABASE_PORT=<span class="hljs-string">&quot;5432&quot;</span> -e DATABASE_DATABASE=<span class="hljs-string">&quot;dolphinscheduler&quot;</span> \
 -e DATABASE_USERNAME=<span class="hljs-string">&quot;test&quot;</span> -e DATABASE_PASSWORD=<span class="hljs-string">&quot;test&quot;</span> \
@@ -37,7 +37,7 @@ SW_GRPC_LOG_SERVER_PORT=11800
 -p 12345:12345 \
 apache/dolphinscheduler:2.0.3 all</span>
 </code></pre>
-<h3>Through install_config.conf configuration (for DolphinScheduler <a href="http://install.sh">install.sh</a>)</h3>
+<h3>Through install_config.conf Configuration (for DolphinScheduler <a href="http://install.sh">install.sh</a>)</h3>
 <p>Add the following configurations to <code>${workDir}/conf/config/install_config.conf</code>.</p>
 <pre><code class="language-properties"><span class="hljs-comment">
 # SkyWalking config</span>
@@ -51,9 +51,9 @@ apache/dolphinscheduler:2.0.3 all</span>
 <span class="hljs-attr">skywalkingLogReporterPort</span>=<span class="hljs-string">&quot;11800&quot;</span>
 
 </code></pre>
-<h1>Usage</h1>
+<h2>Usage</h2>
 <h3>Import Dashboard</h3>
-<h4>Import DolphinScheduler Dashboard to SkyWalking Sever</h4>
+<h4>Import DolphinScheduler Dashboard to SkyWalking Server</h4>
 <p>Copy the <code>${dolphinscheduler.home}/ext/skywalking-agent/dashboard/dolphinscheduler.yml</code> file into <code>${skywalking-oap-server.home}/config/ui-initialized-templates/</code> directory, and restart SkyWalking oap-server.</p>
 <h4>View DolphinScheduler Dashboard</h4>
 <p>If you have opened SkyWalking dashboard with a browser before, you need to clear the browser cache.</p>
diff --git a/en-us/docs/latest/user_doc/guide/observability/skywalking-agent.json b/en-us/docs/latest/user_doc/guide/observability/skywalking-agent.json
index 1907c39..77029d5 100644
--- a/en-us/docs/latest/user_doc/guide/observability/skywalking-agent.json
+++ b/en-us/docs/latest/user_doc/guide/observability/skywalking-agent.json
@@ -1,6 +1,6 @@
 {
   "filename": "skywalking-agent.md",
-  "__html": "<h1>SkyWalking Agent</h1>\n<p>The dolphinscheduler-skywalking module provides <a href=\"https://skywalking.apache.org/\">SkyWalking</a> monitor agent for the Dolphinscheduler project.</p>\n<p>This document describes how to enable SkyWalking 8.4+ support with this module (recommended to use SkyWalking 8.5.0).</p>\n<h1>Installation</h1>\n<p>The following configuration is used to enable SkyWalking agent.</p>\n<h3>Through environment variable configuration (for Docker Compose)</ [...]
+  "__html": "<h1>SkyWalking Agent</h1>\n<p>The dolphinscheduler-skywalking module provides <a href=\"https://skywalking.apache.org/\">SkyWalking</a> monitor agent for the Dolphinscheduler project.</p>\n<p>This document describes how to enable SkyWalking 8.4+ support with this module (recommended to use SkyWalking 8.5.0).</p>\n<h2>Installation</h2>\n<p>The following configuration is used to enable SkyWalking agent.</p>\n<h3>Through Environment Variable Configuration (for Docker Compose)</ [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/observability/skywalking-agent.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/open-api.html b/en-us/docs/latest/user_doc/guide/open-api.html
index 591b0bf..808754c 100644
--- a/en-us/docs/latest/user_doc/guide/open-api.html
+++ b/en-us/docs/latest/user_doc/guide/open-api.html
@@ -14,7 +14,7 @@
 <h2>Background</h2>
 <p>Generally, projects and processes are created through pages, but integration with third-party systems requires API calls to manage projects and workflows.</p>
 <h2>The Operation Steps of DS API Calls</h2>
-<h3>Create a token</h3>
+<h3>Create a Token</h3>
 <ol>
 <li>Log in to the scheduling system, click &quot;Security&quot;, then click &quot;Token manage&quot; on the left, and click &quot;Create token&quot; to create a token.</li>
 </ol>
@@ -27,7 +27,7 @@
 <p align="center">
    <img src="/img/create-token-en1.png" width="80%" />
  </p>
-<h3>Use token</h3>
+<h3>Use Token</h3>
 <ol>
 <li>Open the API documentation page
 <blockquote>
@@ -51,7 +51,7 @@
 <p align="center">
    <img src="/img/test-api.png" width="80%" />
  </p>  
-<h3>Create a project</h3>
+<h3>Create a Project</h3>
 <p>Here is an example of creating a project named &quot;wudl-flink-test&quot;:</p>
 <p align="center">
    <img src="/img/api/create_project1.png" width="80%" />
@@ -64,7 +64,7 @@
  </p>
 The returned msg information is "success", indicating that we have successfully created the project through API.
 <p>If you are interested in the source code of the project, please continue to read the following:</p>
-<h3>Appendix:The source code of creating a project</h3>
+<h3>Appendix:The Source Code of Creating a Project</h3>
 <p align="center">
    <img src="/img/api/create_source1.png" width="80%" />
  </p>
diff --git a/en-us/docs/latest/user_doc/guide/open-api.json b/en-us/docs/latest/user_doc/guide/open-api.json
index 5ebf21c..f74b0c0 100644
--- a/en-us/docs/latest/user_doc/guide/open-api.json
+++ b/en-us/docs/latest/user_doc/guide/open-api.json
@@ -1,6 +1,6 @@
 {
   "filename": "open-api.md",
-  "__html": "<h1>Open API</h1>\n<h2>Background</h2>\n<p>Generally, projects and processes are created through pages, but integration with third-party systems requires API calls to manage projects and workflows.</p>\n<h2>The Operation Steps of DS API Calls</h2>\n<h3>Create a token</h3>\n<ol>\n<li>Log in to the scheduling system, click &quot;Security&quot;, then click &quot;Token manage&quot; on the left, and click &quot;Create token&quot; to create a token.</li>\n</ol>\n<p align=\"center\ [...]
+  "__html": "<h1>Open API</h1>\n<h2>Background</h2>\n<p>Generally, projects and processes are created through pages, but integration with third-party systems requires API calls to manage projects and workflows.</p>\n<h2>The Operation Steps of DS API Calls</h2>\n<h3>Create a Token</h3>\n<ol>\n<li>Log in to the scheduling system, click &quot;Security&quot;, then click &quot;Token manage&quot; on the left, and click &quot;Create token&quot; to create a token.</li>\n</ol>\n<p align=\"center\ [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/open-api.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/parameter/context.html b/en-us/docs/latest/user_doc/guide/parameter/context.html
index 2f419e2..f15522a 100644
--- a/en-us/docs/latest/user_doc/guide/parameter/context.html
+++ b/en-us/docs/latest/user_doc/guide/parameter/context.html
@@ -12,11 +12,11 @@
 <body>
   <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <p>DolphinScheduler provides the ability to refer to each other between parameters, including: local parameters refer to global parameters, and upstream and downstream parameter transfer. Because of the existence of references, it involves the priority of parameters when the parameter names are the same. see also <a href="priority.md">Parameter Priority</a></p>
-<h2>Local task use global parameter</h2>
+<h2>Local Task Use Global Parameter</h2>
 <p>The premise of local tasks referencing global parameters is that you have already defined <a href="global.md">Global Parameter</a>. The usage is similar to the usage in <a href="local.md">local parameters</a>, but the value of the parameter needs to be configured as the key in the global parameter</p>
 <p><img src="/img/global_parameter.png" alt="parameter-call-global-in-local"></p>
 <p>As shown in the figure above, <code>${biz_date}</code> and <code>${biz_curdate}</code> are examples of local parameters referencing global parameters. Observe the last line of the above figure, local_param_bizdate uses ${global_bizdate} to refer to the global parameter. In the shell script, you can use ${local_param_bizdate} to refer to the value of the global variable global_bizdate, or set the value of local_param_bizdate directly through JDBC. In the same way, local_param refers to [...]
-<h2>Pass parameter from upstream task to downstream</h2>
+<h2>Pass Parameter from Upstream Task to Downstream</h2>
 <p>DolphinScheduler Parameter transfer between tasks is allowed, and the current transfer direction only supports one-way transfer from upstream to downstream. The task types currently supporting this feature are:</p>
 <ul>
 <li><a href="../task/shell.md">Shell</a></li>
diff --git a/en-us/docs/latest/user_doc/guide/parameter/context.json b/en-us/docs/latest/user_doc/guide/parameter/context.json
index ee9ecf8..22086a7 100644
--- a/en-us/docs/latest/user_doc/guide/parameter/context.json
+++ b/en-us/docs/latest/user_doc/guide/parameter/context.json
@@ -1,6 +1,6 @@
 {
   "filename": "context.md",
-  "__html": "<h1>Parameter Context</h1>\n<p>DolphinScheduler provides the ability to refer to each other between parameters, including: local parameters refer to global parameters, and upstream and downstream parameter transfer. Because of the existence of references, it involves the priority of parameters when the parameter names are the same. see also <a href=\"priority.md\">Parameter Priority</a></p>\n<h2>Local task use global parameter</h2>\n<p>The premise of local tasks referencing  [...]
+  "__html": "<h1>Parameter Context</h1>\n<p>DolphinScheduler provides the ability to refer to each other between parameters, including: local parameters refer to global parameters, and upstream and downstream parameter transfer. Because of the existence of references, it involves the priority of parameters when the parameter names are the same. see also <a href=\"priority.md\">Parameter Priority</a></p>\n<h2>Local Task Use Global Parameter</h2>\n<p>The premise of local tasks referencing  [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/parameter/context.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/project/project-list.html b/en-us/docs/latest/user_doc/guide/project/project-list.html
index cd57c3d..4199a1a 100644
--- a/en-us/docs/latest/user_doc/guide/project/project-list.html
+++ b/en-us/docs/latest/user_doc/guide/project/project-list.html
@@ -11,7 +11,7 @@
 </head>
 <body>
   <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
-<h2>Create project</h2>
+<h2>Create Project</h2>
 <ul>
 <li>
 <p>Click &quot;Project Management&quot; to enter the project management page, click the &quot;Create Project&quot; button, enter the project name, project description, and click &quot;Submit&quot; to create a new project.</p>
@@ -20,7 +20,7 @@
 </p>
 </li>
 </ul>
-<h2>Project home</h2>
+<h2>Project Home</h2>
 <ul>
 <li>
 <p>Click the project name link on the project management page to enter the project home page, as shown in the figure below, the project home page contains the task status statistics, process status statistics, and workflow definition statistics of the project. The introduction for those metric:</p>
diff --git a/en-us/docs/latest/user_doc/guide/project/project-list.json b/en-us/docs/latest/user_doc/guide/project/project-list.json
index 4110a15..1a9807f 100644
--- a/en-us/docs/latest/user_doc/guide/project/project-list.json
+++ b/en-us/docs/latest/user_doc/guide/project/project-list.json
@@ -1,6 +1,6 @@
 {
   "filename": "project-list.md",
-  "__html": "<h1>Project</h1>\n<h2>Create project</h2>\n<ul>\n<li>\n<p>Click &quot;Project Management&quot; to enter the project management page, click the &quot;Create Project&quot; button, enter the project name, project description, and click &quot;Submit&quot; to create a new project.</p>\n<p align=\"center\">\n    <img src=\"/img/create_project_en1.png\" width=\"80%\" />\n</p>\n</li>\n</ul>\n<h2>Project home</h2>\n<ul>\n<li>\n<p>Click the project name link on the project management  [...]
+  "__html": "<h1>Project</h1>\n<h2>Create Project</h2>\n<ul>\n<li>\n<p>Click &quot;Project Management&quot; to enter the project management page, click the &quot;Create Project&quot; button, enter the project name, project description, and click &quot;Submit&quot; to create a new project.</p>\n<p align=\"center\">\n    <img src=\"/img/create_project_en1.png\" width=\"80%\" />\n</p>\n</li>\n</ul>\n<h2>Project Home</h2>\n<ul>\n<li>\n<p>Click the project name link on the project management  [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/project/project-list.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/project/task-instance.html b/en-us/docs/latest/user_doc/guide/project/task-instance.html
index 822fd2a..9bdff1f 100644
--- a/en-us/docs/latest/user_doc/guide/project/task-instance.html
+++ b/en-us/docs/latest/user_doc/guide/project/task-instance.html
@@ -10,7 +10,7 @@
   <link rel="stylesheet" href="/build/vendor.23870e5.css">
 </head>
 <body>
-  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <ul>
 <li>
 <p>Click Project Management -&gt; Workflow -&gt; Task Instance to enter the task instance page, as shown in the figure below, click the name of the workflow instance, you can jump to the workflow instance DAG chart to view the task status.</p>
diff --git a/en-us/docs/latest/user_doc/guide/project/task-instance.json b/en-us/docs/latest/user_doc/guide/project/task-instance.json
index 95ae7f9..b298fcb 100644
--- a/en-us/docs/latest/user_doc/guide/project/task-instance.json
+++ b/en-us/docs/latest/user_doc/guide/project/task-instance.json
@@ -1,6 +1,6 @@
 {
   "filename": "task-instance.md",
-  "__html": "<h2>Task instance</h2>\n<ul>\n<li>\n<p>Click Project Management -&gt; Workflow -&gt; Task Instance to enter the task instance page, as shown in the figure below, click the name of the workflow instance, you can jump to the workflow instance DAG chart to view the task status.</p>\n   <p align=\"center\">\n      <img src=\"/img/task-list-en.png\" width=\"80%\" />\n   </p>\n</li>\n<li>\n<p><span id=taskLog>View log:</span>Click the &quot;view log&quot; button in the operation c [...]
+  "__html": "<h1>Task Instance</h1>\n<ul>\n<li>\n<p>Click Project Management -&gt; Workflow -&gt; Task Instance to enter the task instance page, as shown in the figure below, click the name of the workflow instance, you can jump to the workflow instance DAG chart to view the task status.</p>\n   <p align=\"center\">\n      <img src=\"/img/task-list-en.png\" width=\"80%\" />\n   </p>\n</li>\n<li>\n<p><span id=taskLog>View log:</span>Click the &quot;view log&quot; button in the operation c [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/project/task-instance.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/project/workflow-definition.html b/en-us/docs/latest/user_doc/guide/project/workflow-definition.html
index 96969c4..9972ea8 100644
--- a/en-us/docs/latest/user_doc/guide/project/workflow-definition.html
+++ b/en-us/docs/latest/user_doc/guide/project/workflow-definition.html
@@ -10,8 +10,8 @@
   <link rel="stylesheet" href="/build/vendor.23870e5.css">
 </head>
 <body>
-  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
-<h2><span id=creatDag> Create workflow definition</span></h2>
+  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+<h2><span id=creatDag> Create Workflow Definition</span></h2>
 <ul>
 <li>
 <p>Click Project Management -&gt; Workflow -&gt; Workflow Definition to enter the workflow definition page, and click the &quot;Create Workflow&quot; button to enter the <strong>workflow DAG edit</strong> page, as shown in the following figure:</p>
@@ -59,7 +59,7 @@
 <blockquote>
 <p>For other types of tasks, please refer to <a href="#TaskParamers">Task Node Type and Parameter Settings</a>.</p>
 </blockquote>
-<h2>Workflow definition operation function</h2>
+<h2>Workflow Definition Operation Function</h2>
 <p>Click Project Management -&gt; Workflow -&gt; Workflow Definition to enter the workflow definition page, as shown below:</p>
 <p align="center">
 <img src="/img/work_list_en.png" width="80%" />
@@ -79,7 +79,7 @@ The operation functions of the workflow definition list are as follows:
 </p>
 </li>
 </ul>
-<h2><span id=runWorkflow>Run the workflow</span></h2>
+<h2><span id=runWorkflow>Run the Workflow</span></h2>
 <ul>
 <li>
 <p>Click Project Management -&gt; Workflow -&gt; Workflow Definition to enter the workflow definition page, as shown in the figure below, click the &quot;Go Online&quot; button <img src="/img/online.png" width="35"/>,Go online workflow.</p>
@@ -117,7 +117,7 @@ The operation functions of the workflow definition list are as follows:
 </blockquote>
 </li>
 </ul>
-<h2><span id=creatTiming>Workflow timing</span></h2>
+<h2><span id=creatTiming>Workflow Timing</span></h2>
 <ul>
 <li>Create timing: Click Project Management-&gt;Workflow-&gt;Workflow Definition, enter the workflow definition page, go online the workflow, click the &quot;timing&quot; button <img src="/img/timing.png" width="35"/> ,The timing parameter setting dialog box pops up, as shown in the figure below:<p align="center">
     <img src="/img/time_schedule_en.png" width="80%" />
@@ -135,7 +135,7 @@ The operation functions of the workflow definition list are as follows:
 </p>
 </li>
 </ul>
-<h2>Import workflow</h2>
+<h2>Import Workflow</h2>
 <p>Click Project Management -&gt; Workflow -&gt; Workflow Definition to enter the workflow definition page, click the &quot;Import Workflow&quot; button to import the local workflow file, the workflow definition list displays the imported workflow, and the status is offline.</p>
 </div></section><footer class="footer-container"><div class="footer-body"><div><h3>About us</h3><h4>Do you need feedback? Please contact us through the following ways.</h4></div><div class="contact-container"><ul><li><a href="/en-us/community/development/subscribe.html"><img class="img-base" src="/img/emailgray.png"/><img class="img-change" src="/img/emailblue.png"/><p>Email List</p></a></li><li><a href="https://twitter.com/dolphinschedule"><img class="img-base" src="/img/twittergray.png [...]
   <script src="//cdn.jsdelivr.net/npm/react@15.6.2/dist/react-with-addons.min.js"></script>
diff --git a/en-us/docs/latest/user_doc/guide/project/workflow-definition.json b/en-us/docs/latest/user_doc/guide/project/workflow-definition.json
index dac03dc..1662ac7 100644
--- a/en-us/docs/latest/user_doc/guide/project/workflow-definition.json
+++ b/en-us/docs/latest/user_doc/guide/project/workflow-definition.json
@@ -1,6 +1,6 @@
 {
   "filename": "workflow-definition.md",
-  "__html": "<h1>Workflow definition</h1>\n<h2><span id=creatDag> Create workflow definition</span></h2>\n<ul>\n<li>\n<p>Click Project Management -&gt; Workflow -&gt; Workflow Definition to enter the workflow definition page, and click the &quot;Create Workflow&quot; button to enter the <strong>workflow DAG edit</strong> page, as shown in the following figure:</p>\n<p align=\"center\">\n    <img src=\"/img/dag5.png\" width=\"80%\" />\n</p>\n</li>\n<li>\n<p>Drag in the toolbar <img src=\" [...]
+  "__html": "<h1>Workflow Definition</h1>\n<h2><span id=creatDag> Create Workflow Definition</span></h2>\n<ul>\n<li>\n<p>Click Project Management -&gt; Workflow -&gt; Workflow Definition to enter the workflow definition page, and click the &quot;Create Workflow&quot; button to enter the <strong>workflow DAG edit</strong> page, as shown in the following figure:</p>\n<p align=\"center\">\n    <img src=\"/img/dag5.png\" width=\"80%\" />\n</p>\n</li>\n<li>\n<p>Drag in the toolbar <img src=\" [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/project/workflow-definition.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/project/workflow-instance.html b/en-us/docs/latest/user_doc/guide/project/workflow-instance.html
index a6dbf68..fd76885 100644
--- a/en-us/docs/latest/user_doc/guide/project/workflow-instance.html
+++ b/en-us/docs/latest/user_doc/guide/project/workflow-instance.html
@@ -10,8 +10,8 @@
   <link rel="stylesheet" href="/build/vendor.23870e5.css">
 </head>
 <body>
-  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
-<h2>View workflow instance</h2>
+  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+<h2>View Workflow Instance</h2>
 <ul>
 <li>Click Project Management -&gt; Workflow -&gt; Workflow Instance to enter the Workflow Instance page, as shown in the figure below:   <p align="center">
       <img src="/img/instance-list-en.png" width="80%" />
@@ -22,7 +22,7 @@
 </p>
 </li>
 </ul>
-<h2>View task log</h2>
+<h2>View Task Log</h2>
 <ul>
 <li>Enter the workflow instance page, click the workflow name, enter the DAG view page, double-click the task node, as shown in the following figure: <p align="center">
    <img src="/img/instanceViewLog-en.png" width="80%" />
@@ -33,7 +33,7 @@
  </p>
 </li>
 </ul>
-<h2>View task history</h2>
+<h2>View Task History</h2>
 <ul>
 <li>Click Project Management -&gt; Workflow -&gt; Workflow Instance to enter the workflow instance page, and click the workflow name to enter the workflow DAG page;</li>
 <li>Double-click the task node, as shown in the figure below, click &quot;View History&quot; to jump to the task instance page, and display a list of task instances running by the workflow instance <p align="center">
@@ -41,7 +41,7 @@
  </p>
 </li>
 </ul>
-<h2>View operating parameters</h2>
+<h2>View Operating Parameters</h2>
 <ul>
 <li>Click Project Management -&gt; Workflow -&gt; Workflow Instance to enter the workflow instance page, and click the workflow name to enter the workflow DAG page;</li>
 <li>Click the icon in the upper left corner <img src="/img/run_params_button.png" width="35"/>,View the startup parameters of the workflow instance; click the icon <img src="/img/global_param.png" width="35"/>,View the global and local parameters of the workflow instance, as shown in the following figure: <p align="center">
@@ -49,7 +49,7 @@
  </p>
 </li>
 </ul>
-<h2>Workflow instance operation function</h2>
+<h2>Workflow Instance Operation Function</h2>
 <p>Click Project Management -&gt; Workflow -&gt; Workflow Instance to enter the Workflow Instance page, as shown in the figure below:</p>
   <p align="center">
     <img src="/img/instance-list-en.png" width="80%" />
diff --git a/en-us/docs/latest/user_doc/guide/project/workflow-instance.json b/en-us/docs/latest/user_doc/guide/project/workflow-instance.json
index 7eb80e1..9d9b1cc 100644
--- a/en-us/docs/latest/user_doc/guide/project/workflow-instance.json
+++ b/en-us/docs/latest/user_doc/guide/project/workflow-instance.json
@@ -1,6 +1,6 @@
 {
   "filename": "workflow-instance.md",
-  "__html": "<h1>Workflow instance</h1>\n<h2>View workflow instance</h2>\n<ul>\n<li>Click Project Management -&gt; Workflow -&gt; Workflow Instance to enter the Workflow Instance page, as shown in the figure below:   <p align=\"center\">\n      <img src=\"/img/instance-list-en.png\" width=\"80%\" />\n   </p>\n</li>\n<li>Click the workflow name to enter the DAG view page to view the task execution status, as shown in the figure below.<p align=\"center\">\n  <img src=\"/img/instance-runs-e [...]
+  "__html": "<h1>Workflow Instance</h1>\n<h2>View Workflow Instance</h2>\n<ul>\n<li>Click Project Management -&gt; Workflow -&gt; Workflow Instance to enter the Workflow Instance page, as shown in the figure below:   <p align=\"center\">\n      <img src=\"/img/instance-list-en.png\" width=\"80%\" />\n   </p>\n</li>\n<li>Click the workflow name to enter the DAG view page to view the task execution status, as shown in the figure below.<p align=\"center\">\n  <img src=\"/img/instance-runs-e [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/project/workflow-instance.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/resource.html b/en-us/docs/latest/user_doc/guide/resource.html
index 662163d..3a37abd 100644
--- a/en-us/docs/latest/user_doc/guide/resource.html
+++ b/en-us/docs/latest/user_doc/guide/resource.html
@@ -19,7 +19,7 @@
 <li>If you using Hadoop cluster with HA, you need to enable HDFS resource upload, and you need to copy the <code>core-site.xml</code> and <code>hdfs-site.xml</code> under the Hadoop cluster to <code>/opt/dolphinscheduler/conf</code>, otherwise Skip step</li>
 </ul>
 </blockquote>
-<h2>hdfs resource configuration</h2>
+<h2>HDFS Resource Configuration</h2>
 <ul>
 <li>Upload resource files and udf functions, all uploaded files and resources will be stored on hdfs, so the following configuration items are required:</li>
 </ul>
@@ -53,7 +53,7 @@ conf/common/hadoop.properties
 <li>Only one address needs to be configured for yarn.resourcemanager.ha.rm.ids and yarn.application.status.address, and the other address is empty.</li>
 <li>You need to copy core-site.xml and hdfs-site.xml from the conf directory of the Hadoop cluster to the conf directory of the dolphinscheduler project, and restart the api-server service.</li>
 </ul>
-<h2>File management</h2>
+<h2>File Management</h2>
 <blockquote>
 <p>It is the management of various resource files, including creating basic txt/log/sh/conf/py/java and other files, uploading jar packages and other types of files, and can do edit, rename, download, delete and other operations.</p>
 </blockquote>
@@ -107,8 +107,8 @@ conf/common/hadoop.properties
 </blockquote>
 </li>
 </ul>
-<h2>UDF management</h2>
-<h3>Resource management</h3>
+<h2>UDF Management</h2>
+<h3>Resource Management</h3>
 <blockquote>
 <p>The resource management and file management functions are similar. The difference is that the resource management is the uploaded UDF function, and the file management uploads the user program, script and configuration file.
 Operation function: rename, download, delete.</p>
@@ -120,7 +120,7 @@ Operation function: rename, download, delete.</p>
 </blockquote>
 </li>
 </ul>
-<h3>Function management</h3>
+<h3>Function Management</h3>
 <ul>
 <li>Create UDF function
 <blockquote>
diff --git a/en-us/docs/latest/user_doc/guide/resource.json b/en-us/docs/latest/user_doc/guide/resource.json
index df72329..990d641 100644
--- a/en-us/docs/latest/user_doc/guide/resource.json
+++ b/en-us/docs/latest/user_doc/guide/resource.json
@@ -1,6 +1,6 @@
 {
   "filename": "resource.md",
-  "__html": "<h1>Resource Center</h1>\n<p>If you want to use the resource upload function, you can select the local file directory for a single machine(this operation does not need to deploy Hadoop). Or you can also upload to a Hadoop or MinIO cluster, at this time, you need to have Hadoop (2.6+) or MinIO and other related environments</p>\n<blockquote>\n<p><strong><em>Note:</em></strong></p>\n<ul>\n<li>If the resource upload function is used, the deployment user in <a href=\"installatio [...]
+  "__html": "<h1>Resource Center</h1>\n<p>If you want to use the resource upload function, you can select the local file directory for a single machine(this operation does not need to deploy Hadoop). Or you can also upload to a Hadoop or MinIO cluster, at this time, you need to have Hadoop (2.6+) or MinIO and other related environments</p>\n<blockquote>\n<p><strong><em>Note:</em></strong></p>\n<ul>\n<li>If the resource upload function is used, the deployment user in <a href=\"installatio [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/resource.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/security.html b/en-us/docs/latest/user_doc/guide/security.html
index 5a6eba0..367a74d 100644
--- a/en-us/docs/latest/user_doc/guide/security.html
+++ b/en-us/docs/latest/user_doc/guide/security.html
@@ -15,7 +15,7 @@
 <li>Only the administrator account in the security center has the authority to operate. It has functions such as queue management, tenant management, user management, alarm group management, worker group management, token management, etc. In the user management module, resources, data sources, projects, etc. Authorization</li>
 <li>Administrator login, default user name and password: admin/dolphinscheduler123</li>
 </ul>
-<h2>Create queue</h2>
+<h2>Create Queue</h2>
 <ul>
 <li>Queue is used when the &quot;queue&quot; parameter is needed to execute programs such as spark and mapreduce.</li>
 <li>The administrator enters the Security Center-&gt;Queue Management page and clicks the &quot;Create Queue&quot; button to create a queue.</li>
@@ -23,7 +23,7 @@
 <p align="center">
    <img src="/img/create-queue-en.png" width="80%" />
  </p>
-<h2>Add tenant</h2>
+<h2>Add Tenant</h2>
 <ul>
 <li>The tenant corresponds to the Linux user, which is used by the worker to submit the job. Task will fail if Linux does not exists this user. You can set the parameter <code>worker.tenant.auto.create</code> as <code>true</code> in configuration file <code>worker.properties</code>. After that DolphinScheduler would create user if not exists, The property <code>worker.tenant.auto.create=true</code> requests worker run <code>sudo</code> command without password.</li>
 <li>Tenant Code: <strong>Tenant Code is the only user on Linux and cannot be repeated</strong></li>
@@ -32,7 +32,7 @@
  <p align="center">
     <img src="/img/addtenant-en.png" width="80%" />
   </p>
-<h2>Create normal user</h2>
+<h2>Create Normal User</h2>
 <ul>
 <li>
 <p>Users are divided into <strong>administrator users</strong> and <strong>normal users</strong></p>
@@ -63,7 +63,7 @@
 <li>The administrator enters the Security Center-&gt;User Management page and clicks the &quot;Edit&quot; button. When editing user information, enter the new password to modify the user password.</li>
 <li>After a normal user logs in, click the user information in the user name drop-down box to enter the password modification page, enter the password and confirm the password and click the &quot;Edit&quot; button, then the password modification is successful.</li>
 </ul>
-<h2>Create alarm group</h2>
+<h2>Create Alarm Group</h2>
 <ul>
 <li>The alarm group is a parameter set at startup. After the process ends, the status of the process and other information will be sent to the alarm group in the form of email.</li>
 </ul>
@@ -74,7 +74,7 @@
   <img src="/img/mail-en.png" width="80%" />
 </li>
 </ul>
-<h2>Token management</h2>
+<h2>Token Management</h2>
 <blockquote>
 <p>Since the back-end interface has login check, token management provides a way to perform various operations on the system by calling the interface.</p>
 </blockquote>
@@ -145,7 +145,7 @@
 <ul>
 <li>Resources, data sources, and UDF function authorization are the same as project authorization.</li>
 </ul>
-<h2>Worker grouping</h2>
+<h2>Worker Grouping</h2>
 <p>Each worker node will belong to its own worker group, and the default group is &quot;default&quot;.</p>
 <p>When the task is executed, the task can be assigned to the specified worker group, and the task will be executed by the worker node in the group.</p>
 <blockquote>
diff --git a/en-us/docs/latest/user_doc/guide/security.json b/en-us/docs/latest/user_doc/guide/security.json
index 352c4c9..fac62d6 100644
--- a/en-us/docs/latest/user_doc/guide/security.json
+++ b/en-us/docs/latest/user_doc/guide/security.json
@@ -1,6 +1,6 @@
 {
   "filename": "security.md",
-  "__html": "<h1>Security</h1>\n<ul>\n<li>Only the administrator account in the security center has the authority to operate. It has functions such as queue management, tenant management, user management, alarm group management, worker group management, token management, etc. In the user management module, resources, data sources, projects, etc. Authorization</li>\n<li>Administrator login, default user name and password: admin/dolphinscheduler123</li>\n</ul>\n<h2>Create queue</h2>\n<ul>\ [...]
+  "__html": "<h1>Security</h1>\n<ul>\n<li>Only the administrator account in the security center has the authority to operate. It has functions such as queue management, tenant management, user management, alarm group management, worker group management, token management, etc. In the user management module, resources, data sources, projects, etc. Authorization</li>\n<li>Administrator login, default user name and password: admin/dolphinscheduler123</li>\n</ul>\n<h2>Create Queue</h2>\n<ul>\ [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/security.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/task/conditions.html b/en-us/docs/latest/user_doc/guide/task/conditions.html
index 0af5488..c3ca5be 100644
--- a/en-us/docs/latest/user_doc/guide/task/conditions.html
+++ b/en-us/docs/latest/user_doc/guide/task/conditions.html
@@ -43,7 +43,7 @@
 </ul>
 </li>
 </ul>
-<h2>Related task</h2>
+<h2>Related Task</h2>
 <p><a href="switch.md">switch</a>: <a href="conditions.md">Condition</a>task mainly executes the corresponding branch based on the execution status (success, failure) of the upstream node. The <a href="switch.md">Switch</a> task mainly executes the corresponding branch based on the value of the <a href="../parameter/global.md">global parameter</a> and the judgment expression result written by the user.</p>
 </div></section><footer class="footer-container"><div class="footer-body"><div><h3>About us</h3><h4>Do you need feedback? Please contact us through the following ways.</h4></div><div class="contact-container"><ul><li><a href="/en-us/community/development/subscribe.html"><img class="img-base" src="/img/emailgray.png"/><img class="img-change" src="/img/emailblue.png"/><p>Email List</p></a></li><li><a href="https://twitter.com/dolphinschedule"><img class="img-base" src="/img/twittergray.png [...]
   <script src="//cdn.jsdelivr.net/npm/react@15.6.2/dist/react-with-addons.min.js"></script>
diff --git a/en-us/docs/latest/user_doc/guide/task/conditions.json b/en-us/docs/latest/user_doc/guide/task/conditions.json
index 159bfe1..f5b1466 100644
--- a/en-us/docs/latest/user_doc/guide/task/conditions.json
+++ b/en-us/docs/latest/user_doc/guide/task/conditions.json
@@ -1,6 +1,6 @@
 {
   "filename": "conditions.md",
-  "__html": "<h1>Conditions</h1>\n<p>Conditions is a condition node, determining which downstream task should be run based on the condition set to it. For now, the Conditions support multiple upstream tasks, but only two downstream tasks. When the number of upstream tasks exceeds one, complex upstream dependencies can be achieved through <code>and</code> and <code>or</code> operators.</p>\n<h2>Create</h2>\n<p>Drag in the toolbar<img src=\"/img/conditions.png\" width=\"20\"/>The task node [...]
+  "__html": "<h1>Conditions</h1>\n<p>Conditions is a condition node, determining which downstream task should be run based on the condition set to it. For now, the Conditions support multiple upstream tasks, but only two downstream tasks. When the number of upstream tasks exceeds one, complex upstream dependencies can be achieved through <code>and</code> and <code>or</code> operators.</p>\n<h2>Create</h2>\n<p>Drag in the toolbar<img src=\"/img/conditions.png\" width=\"20\"/>The task node [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/task/conditions.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/task/datax.html b/en-us/docs/latest/user_doc/guide/task/datax.html
index c241e86..f27e4d2 100644
--- a/en-us/docs/latest/user_doc/guide/task/datax.html
+++ b/en-us/docs/latest/user_doc/guide/task/datax.html
@@ -10,7 +10,7 @@
   <link rel="stylesheet" href="/build/vendor.23870e5.css">
 </head>
 <body>
-  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <ul>
 <li>
 <p>Drag in the toolbar<img src="/img/datax.png" width="35"/>Task node into the drawing board</p>
diff --git a/en-us/docs/latest/user_doc/guide/task/datax.json b/en-us/docs/latest/user_doc/guide/task/datax.json
index 6ab67d3..7d82360 100644
--- a/en-us/docs/latest/user_doc/guide/task/datax.json
+++ b/en-us/docs/latest/user_doc/guide/task/datax.json
@@ -1,6 +1,6 @@
 {
   "filename": "datax.md",
-  "__html": "<h1>DATAX</h1>\n<ul>\n<li>\n<p>Drag in the toolbar<img src=\"/img/datax.png\" width=\"35\"/>Task node into the drawing board</p>\n<p align=\"center\">\n <img src=\"/img/datax-en.png\" width=\"80%\" />\n</p>\n</li>\n<li>\n<p>Custom template: When you turn on the custom template switch, you can customize the content of the json configuration file of the datax node (applicable when the control configuration does not meet the requirements)</p>\n</li>\n<li>\n<p>Data source: selec [...]
+  "__html": "<h1>DataX</h1>\n<ul>\n<li>\n<p>Drag in the toolbar<img src=\"/img/datax.png\" width=\"35\"/>Task node into the drawing board</p>\n<p align=\"center\">\n <img src=\"/img/datax-en.png\" width=\"80%\" />\n</p>\n</li>\n<li>\n<p>Custom template: When you turn on the custom template switch, you can customize the content of the json configuration file of the datax node (applicable when the control configuration does not meet the requirements)</p>\n</li>\n<li>\n<p>Data source: selec [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/task/datax.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/task/dependent.html b/en-us/docs/latest/user_doc/guide/task/dependent.html
index 6a66e10..6ebcfac 100644
--- a/en-us/docs/latest/user_doc/guide/task/dependent.html
+++ b/en-us/docs/latest/user_doc/guide/task/dependent.html
@@ -10,7 +10,7 @@
   <link rel="stylesheet" href="/build/vendor.23870e5.css">
 </head>
 <body>
-  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <ul>
 <li>Dependent nodes are <strong>dependency check nodes</strong>. For example, process A depends on the successful execution of process B yesterday, and the dependent node will check whether process B has a successful execution yesterday.</li>
 </ul>
diff --git a/en-us/docs/latest/user_doc/guide/task/dependent.json b/en-us/docs/latest/user_doc/guide/task/dependent.json
index 82629c8..178b1f8 100644
--- a/en-us/docs/latest/user_doc/guide/task/dependent.json
+++ b/en-us/docs/latest/user_doc/guide/task/dependent.json
@@ -1,6 +1,6 @@
 {
   "filename": "dependent.md",
-  "__html": "<h1>DEPENDENT</h1>\n<ul>\n<li>Dependent nodes are <strong>dependency check nodes</strong>. For example, process A depends on the successful execution of process B yesterday, and the dependent node will check whether process B has a successful execution yesterday.</li>\n</ul>\n<blockquote>\n<p>Drag the <img src=\"https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_DEPENDENT.png\" alt=\"PNG\"> task node in the toolbar to the drawing board, as shown in the following [...]
+  "__html": "<h1>Dependent</h1>\n<ul>\n<li>Dependent nodes are <strong>dependency check nodes</strong>. For example, process A depends on the successful execution of process B yesterday, and the dependent node will check whether process B has a successful execution yesterday.</li>\n</ul>\n<blockquote>\n<p>Drag the <img src=\"https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_DEPENDENT.png\" alt=\"PNG\"> task node in the toolbar to the drawing board, as shown in the following [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/task/dependent.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/task/flink.html b/en-us/docs/latest/user_doc/guide/task/flink.html
index 671a15d..4a4bb6a 100644
--- a/en-us/docs/latest/user_doc/guide/task/flink.html
+++ b/en-us/docs/latest/user_doc/guide/task/flink.html
@@ -13,7 +13,7 @@
   <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <h2>Overview</h2>
 <p>Flink task type for executing Flink programs. For Flink nodes, the worker submits the task by using the flink command <code>flink run</code>. See <a href="https://nightlies.apache.org/flink/flink-docs-release-1.14/docs/deployment/cli/">flink cli</a> for more details.</p>
-<h2>Create task</h2>
+<h2>Create Task</h2>
 <ul>
 <li>Click Project Management -&gt; Project Name -&gt; Workflow Definition, and click the &quot;Create Workflow&quot; button to enter the DAG editing page.</li>
 <li>Drag the <img src="/img/tasks/icons/flink.png" width="15"/> from the toolbar to the drawing board.</li>
@@ -50,13 +50,13 @@
 <li><strong>Predecessor task</strong>: Selecting a predecessor task for the current task will set the selected predecessor task as upstream of the current task.</li>
 </ul>
 <h2>Task Example</h2>
-<h3>Execute the WordCount program</h3>
+<h3>Execute the WordCount Program</h3>
 <p>This is a common introductory case in the Big Data ecosystem, which often applied to computational frameworks such as MapReduce, Flink and Spark. The main purpose is to count the number of identical words in the input text. (Flink's releases come with this example job)</p>
-<h4>Uploading the main package</h4>
+<h4>Upload the Main Package</h4>
 <p>When using the Flink task node, you will need to use the Resource Centre to upload the jar package for the executable. Refer to the <a href="../resource.md">resource center</a>.</p>
 <p>After configuring the Resource Centre, you can upload the required target files directly using drag and drop.</p>
 <p><img src="/img/tasks/demo/upload_flink.png" alt="resource_upload"></p>
-<h4>Configuring Flink nodes</h4>
+<h4>Configure Flink Nodes</h4>
 <p>Simply configure the required content according to the parameter descriptions above.</p>
 <p><img src="/img/tasks/demo/flink_task.png" alt="demo-flink-simple"></p>
 <h2>Notice</h2>
diff --git a/en-us/docs/latest/user_doc/guide/task/flink.json b/en-us/docs/latest/user_doc/guide/task/flink.json
index 365ea1f..4303352 100644
--- a/en-us/docs/latest/user_doc/guide/task/flink.json
+++ b/en-us/docs/latest/user_doc/guide/task/flink.json
@@ -1,6 +1,6 @@
 {
   "filename": "flink.md",
-  "__html": "<h1>Flink</h1>\n<h2>Overview</h2>\n<p>Flink task type for executing Flink programs. For Flink nodes, the worker submits the task by using the flink command <code>flink run</code>. See <a href=\"https://nightlies.apache.org/flink/flink-docs-release-1.14/docs/deployment/cli/\">flink cli</a> for more details.</p>\n<h2>Create task</h2>\n<ul>\n<li>Click Project Management -&gt; Project Name -&gt; Workflow Definition, and click the &quot;Create Workflow&quot; button to enter the D [...]
+  "__html": "<h1>Flink</h1>\n<h2>Overview</h2>\n<p>Flink task type for executing Flink programs. For Flink nodes, the worker submits the task by using the flink command <code>flink run</code>. See <a href=\"https://nightlies.apache.org/flink/flink-docs-release-1.14/docs/deployment/cli/\">flink cli</a> for more details.</p>\n<h2>Create Task</h2>\n<ul>\n<li>Click Project Management -&gt; Project Name -&gt; Workflow Definition, and click the &quot;Create Workflow&quot; button to enter the D [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/task/flink.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/task/map-reduce.html b/en-us/docs/latest/user_doc/guide/task/map-reduce.html
index cc2e583..e5d895c 100644
--- a/en-us/docs/latest/user_doc/guide/task/map-reduce.html
+++ b/en-us/docs/latest/user_doc/guide/task/map-reduce.html
@@ -58,13 +58,13 @@
 <li><strong>User-defined parameter</strong>: It is a user-defined parameter of the MapReduce part, which will replace the content with ${variable} in the script</li>
 </ul>
 <h2>Task Example</h2>
-<h3>Execute the WordCount program</h3>
+<h3>Execute the WordCount Program</h3>
 <p>This example is a common introductory type of MapReduce application, which is designed to count the number of identical words in the input text.</p>
-<h4>Uploading the main package</h4>
+<h4>Upload the Main Package</h4>
 <p>When using the MapReduce task node, you will need to use the Resource Centre to upload the jar package for the executable. Refer to the <a href="../resource.md">resource centre</a>.</p>
 <p>After configuring the Resource Centre, you can upload the required target files directly using drag and drop.</p>
 <p><img src="/img/tasks/demo/resource_upload.png" alt="resource_upload"></p>
-<h4>Configuring MapReduce nodes</h4>
+<h4>Configure MapReduce Nodes</h4>
 <p>Simply configure the required content according to the parameter descriptions above.</p>
 <p><img src="/img/tasks/demo/mr.png" alt="demo-mr-simple"></p>
 </div></section><footer class="footer-container"><div class="footer-body"><div><h3>About us</h3><h4>Do you need feedback? Please contact us through the following ways.</h4></div><div class="contact-container"><ul><li><a href="/en-us/community/development/subscribe.html"><img class="img-base" src="/img/emailgray.png"/><img class="img-change" src="/img/emailblue.png"/><p>Email List</p></a></li><li><a href="https://twitter.com/dolphinschedule"><img class="img-base" src="/img/twittergray.png [...]
diff --git a/en-us/docs/latest/user_doc/guide/task/map-reduce.json b/en-us/docs/latest/user_doc/guide/task/map-reduce.json
index 8203db0..7dc92ef 100644
--- a/en-us/docs/latest/user_doc/guide/task/map-reduce.json
+++ b/en-us/docs/latest/user_doc/guide/task/map-reduce.json
@@ -1,6 +1,6 @@
 {
   "filename": "map-reduce.md",
-  "__html": "<h1>MapReduce</h1>\n<h2>Overview</h2>\n<ul>\n<li>MapReduce(MR) task type for executing MapReduce programs. For MapReduce nodes, the worker submits the task by using the Hadoop command <code>hadoop jar</code>. See <a href=\"https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/CommandsManual.html#jar\">Hadoop Command Manual</a> for more details.</li>\n</ul>\n<h2>Create Task</h2>\n<ul>\n<li>Click Project Management-Project Name-Workflow Definition, and click [...]
+  "__html": "<h1>MapReduce</h1>\n<h2>Overview</h2>\n<ul>\n<li>MapReduce(MR) task type for executing MapReduce programs. For MapReduce nodes, the worker submits the task by using the Hadoop command <code>hadoop jar</code>. See <a href=\"https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/CommandsManual.html#jar\">Hadoop Command Manual</a> for more details.</li>\n</ul>\n<h2>Create Task</h2>\n<ul>\n<li>Click Project Management-Project Name-Workflow Definition, and click [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/task/map-reduce.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/task/spark.html b/en-us/docs/latest/user_doc/guide/task/spark.html
index 1a034f9..f380446 100644
--- a/en-us/docs/latest/user_doc/guide/task/spark.html
+++ b/en-us/docs/latest/user_doc/guide/task/spark.html
@@ -13,7 +13,7 @@
   <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <h2>Overview</h2>
 <p>Spark task type for executing Spark programs. For Spark nodes, the worker submits the task by using the spark command <code>spark submit</code>. See <a href="https://spark.apache.org/docs/3.2.1/submitting-applications.html#launching-applications-with-spark-submit">spark-submit</a> for more details.</p>
-<h2>Create task</h2>
+<h2>Create Task</h2>
 <ul>
 <li>Click Project Management -&gt; Project Name -&gt; Workflow Definition, and click the &quot;Create Workflow&quot; button to enter the DAG editing page.</li>
 <li>Drag the <img src="/img/tasks/icons/spark.png" width="15"/> from the toolbar to the drawing board.</li>
@@ -47,13 +47,13 @@
 <li><strong>Predecessor task</strong>: Selecting a predecessor task for the current task will set the selected predecessor task as upstream of the current task.</li>
 </ul>
 <h2>Task Example</h2>
-<h3>Execute the WordCount program</h3>
+<h3>Execute the WordCount Program</h3>
 <p>This is a common introductory case in the Big Data ecosystem, which often applied to computational frameworks such as MapReduce, Flink and Spark. The main purpose is to count the number of identical words in the input text.</p>
-<h4>Uploading the main package</h4>
+<h4>Upload the Main Package</h4>
 <p>When using the Spark task node, you will need to use the Resource Center to upload the jar package for the executable. Refer to the <a href="../resource.md">resource center</a>.</p>
 <p>After configuring the Resource Center, you can upload the required target files directly using drag and drop.</p>
 <p><img src="/img/tasks/demo/upload_spark.png" alt="resource_upload"></p>
-<h4>Configuring Spark nodes</h4>
+<h4>Configure Spark Nodes</h4>
 <p>Simply configure the required content according to the parameter descriptions above.</p>
 <p><img src="/img/tasks/demo/spark_task.png" alt="demo-spark-simple"></p>
 <h2>Notice</h2>
diff --git a/en-us/docs/latest/user_doc/guide/task/spark.json b/en-us/docs/latest/user_doc/guide/task/spark.json
index 3989ac6..f525035 100644
--- a/en-us/docs/latest/user_doc/guide/task/spark.json
+++ b/en-us/docs/latest/user_doc/guide/task/spark.json
@@ -1,6 +1,6 @@
 {
   "filename": "spark.md",
-  "__html": "<h1>Spark</h1>\n<h2>Overview</h2>\n<p>Spark task type for executing Spark programs. For Spark nodes, the worker submits the task by using the spark command <code>spark submit</code>. See <a href=\"https://spark.apache.org/docs/3.2.1/submitting-applications.html#launching-applications-with-spark-submit\">spark-submit</a> for more details.</p>\n<h2>Create task</h2>\n<ul>\n<li>Click Project Management -&gt; Project Name -&gt; Workflow Definition, and click the &quot;Create Work [...]
+  "__html": "<h1>Spark</h1>\n<h2>Overview</h2>\n<p>Spark task type for executing Spark programs. For Spark nodes, the worker submits the task by using the spark command <code>spark submit</code>. See <a href=\"https://spark.apache.org/docs/3.2.1/submitting-applications.html#launching-applications-with-spark-submit\">spark-submit</a> for more details.</p>\n<h2>Create Task</h2>\n<ul>\n<li>Click Project Management -&gt; Project Name -&gt; Workflow Definition, and click the &quot;Create Work [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/task/spark.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/task/sql.html b/en-us/docs/latest/user_doc/guide/task/sql.html
index c5270e7..0dc2d60 100644
--- a/en-us/docs/latest/user_doc/guide/task/sql.html
+++ b/en-us/docs/latest/user_doc/guide/task/sql.html
@@ -13,7 +13,7 @@
   <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
 <h2>Overview</h2>
 <p>SQL task, used to connect to database and execute SQL.</p>
-<h2>create data source</h2>
+<h2>Create Data Source</h2>
 <p>Refer to <a href="../datasource/introduction.md">Data Source</a></p>
 <h2>Create Task</h2>
 <ul>
@@ -32,10 +32,10 @@
 <li>Post-sql: Post-sql is executed after the sql statement.</li>
 </ul>
 <h2>Task Example</h2>
-<h3>Create a temporary table in hive and write data</h3>
+<h3>Create a Temporary Table in Hive and Write Data</h3>
 <p>This example creates a temporary table <code>tmp_hello_world</code> in hive and write a row of data. Before creating a temporary table, we need to ensure that the table does not exist, so we will use custom parameters to obtain the time of the day as the suffix of the table name every time we run, so that this task can run every day. The format of the created table name is: <code>tmp_hello_world_{yyyyMMdd}</code>.</p>
 <p><img src="/img/tasks/demo/hive-sql.png" alt="hive-sql"></p>
-<h3>After running the task successfully, query the results in hive.</h3>
+<h3>After Running the Task Successfully, Query the Results in Hive.</h3>
 <p>Log in to the bigdata cluster and use 'hive' command or 'beeline' or 'JDBC' and other methods to connect to the 'Apache Hive' for the query. The query SQL is <code>select * from tmp_hello_world_{yyyyMMdd}</code>, please replace '{yyyyMMdd}' with the date of the running day. The query screenshot is as follows:</p>
 <p><img src="/img/tasks/demo/hive-result.png" alt="hive-sql"></p>
 <h2>Notice</h2>
diff --git a/en-us/docs/latest/user_doc/guide/task/sql.json b/en-us/docs/latest/user_doc/guide/task/sql.json
index 927740b..03b3532 100644
--- a/en-us/docs/latest/user_doc/guide/task/sql.json
+++ b/en-us/docs/latest/user_doc/guide/task/sql.json
@@ -1,6 +1,6 @@
 {
   "filename": "sql.md",
-  "__html": "<h1>SQL</h1>\n<h2>Overview</h2>\n<p>SQL task, used to connect to database and execute SQL.</p>\n<h2>create data source</h2>\n<p>Refer to <a href=\"../datasource/introduction.md\">Data Source</a></p>\n<h2>Create Task</h2>\n<ul>\n<li>Click Project Management-Project Name-Workflow Definition, and click the &quot;Create Workflow&quot; button to enter the DAG editing page.</li>\n<li>Drag <img src=\"/img/tasks/icons/sql.png\" width=\"25\"/> from the toolbar to the drawing board.</ [...]
+  "__html": "<h1>SQL</h1>\n<h2>Overview</h2>\n<p>SQL task, used to connect to database and execute SQL.</p>\n<h2>Create Data Source</h2>\n<p>Refer to <a href=\"../datasource/introduction.md\">Data Source</a></p>\n<h2>Create Task</h2>\n<ul>\n<li>Click Project Management-Project Name-Workflow Definition, and click the &quot;Create Workflow&quot; button to enter the DAG editing page.</li>\n<li>Drag <img src=\"/img/tasks/icons/sql.png\" width=\"25\"/> from the toolbar to the drawing board.</ [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/task/sql.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/en-us/docs/latest/user_doc/guide/upgrade.html b/en-us/docs/latest/user_doc/guide/upgrade.html
index ff81c2a..ececb07 100644
--- a/en-us/docs/latest/user_doc/guide/upgrade.html
+++ b/en-us/docs/latest/user_doc/guide/upgrade.html
@@ -10,16 +10,16 @@
   <link rel="stylesheet" href="/build/vendor.23870e5.css">
 </head>
 <body>
-  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
-<h2>1. Back Up Previous Version's Files and Database.</h2>
-<h2>2. Stop All Services of DolphinScheduler.</h2>
+  <div id="root"><div class="md2html docs-page" data-reactroot=""><header class="header-container header-container-dark"><div class="header-body"><span class="mobile-menu-btn mobile-menu-btn-dark"></span><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_white.svg"/></a><div class="search search-dark"><span class="icon-search"></span></div><span class="language-switch language-switch-dark">中</span><div class="header-menu"><div><ul class="ant-menu whiteClass ant-menu-light ant- [...]
+<h2>Back Up Previous Version's Files and Database</h2>
+<h2>Stop All Services of DolphinScheduler</h2>
 <p><code>sh ./script/stop-all.sh</code></p>
-<h2>3. Download the New Version's Installation Package.</h2>
+<h2>Download the New Version's Installation Package</h2>
 <ul>
 <li><a href="/en-us/download/download.html">Download</a> the latest version of the installation packages.</li>
 <li>The following upgrade operations need to be performed in the new version's directory.</li>
 </ul>
-<h2>4. Database Upgrade</h2>
+<h2>Database Upgrade</h2>
 <ul>
 <li>
 <p>Modify the following properties in <code>conf/config/install_config.conf</code>.</p>
@@ -42,8 +42,8 @@ SPRING_DATASOURCE_PASSWORD=&quot;dolphinscheduler&quot;
 <p><code>sh ./script/create-dolphinscheduler.sh</code></p>
 </li>
 </ul>
-<h2>5. Backend Service Upgrade.</h2>
-<h3>5.1 Modify the Content in <code>conf/config/install_config.conf</code> File.</h3>
+<h2>Backend Service Upgrade</h2>
+<h3>Modify the Content in <code>conf/config/install_config.conf</code> File</h3>
 <ul>
 <li>Standalone Deployment please refer the [6, Modify running arguments] in <a href="./installation/standalone.md">Standalone-Deployment</a>.</li>
 <li>Cluster Deployment please refer the [6, Modify running arguments] in <a href="./installation/cluster.md">Cluster-Deployment</a>.</li>
@@ -77,7 +77,7 @@ SPRING_DATASOURCE_PASSWORD=&quot;dolphinscheduler&quot;
 <pre><code class="language-shell"><span class="hljs-meta">#</span><span class="bash">worker service is deployed on <span class="hljs-built_in">which</span> machine, and also specify <span class="hljs-built_in">which</span> worker group this worker belongs to.</span> 
 workers=&quot;ds1:service1,ds2:service2,ds3:service2&quot;
 </code></pre>
-<h3>5.2 Execute Deploy Script.</h3>
+<h3>Execute Deploy Script</h3>
 <pre><code class="language-shell">`sh install.sh`
 </code></pre>
 </div></section><footer class="footer-container"><div class="footer-body"><div><h3>About us</h3><h4>Do you need feedback? Please contact us through the following ways.</h4></div><div class="contact-container"><ul><li><a href="/en-us/community/development/subscribe.html"><img class="img-base" src="/img/emailgray.png"/><img class="img-change" src="/img/emailblue.png"/><p>Email List</p></a></li><li><a href="https://twitter.com/dolphinschedule"><img class="img-base" src="/img/twittergray.png [...]
diff --git a/en-us/docs/latest/user_doc/guide/upgrade.json b/en-us/docs/latest/user_doc/guide/upgrade.json
index 58df0fb..c7bb7dc 100644
--- a/en-us/docs/latest/user_doc/guide/upgrade.json
+++ b/en-us/docs/latest/user_doc/guide/upgrade.json
@@ -1,6 +1,6 @@
 {
   "filename": "upgrade.md",
-  "__html": "<h1>DolphinScheduler upgrade documentation</h1>\n<h2>1. Back Up Previous Version's Files and Database.</h2>\n<h2>2. Stop All Services of DolphinScheduler.</h2>\n<p><code>sh ./script/stop-all.sh</code></p>\n<h2>3. Download the New Version's Installation Package.</h2>\n<ul>\n<li><a href=\"/en-us/download/download.html\">Download</a> the latest version of the installation packages.</li>\n<li>The following upgrade operations need to be performed in the new version's directory.</ [...]
+  "__html": "<h1>DolphinScheduler Upgrade Documentation</h1>\n<h2>Back Up Previous Version's Files and Database</h2>\n<h2>Stop All Services of DolphinScheduler</h2>\n<p><code>sh ./script/stop-all.sh</code></p>\n<h2>Download the New Version's Installation Package</h2>\n<ul>\n<li><a href=\"/en-us/download/download.html\">Download</a> the latest version of the installation packages.</li>\n<li>The following upgrade operations need to be performed in the new version's directory.</li>\n</ul>\n [...]
   "link": "/dist/en-us/docs/2.0.3/user_doc/guide/upgrade.html",
   "meta": {}
 }
\ No newline at end of file