You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@dolphinscheduler.apache.org by zh...@apache.org on 2022/08/09 09:02:35 UTC

[dolphinscheduler-website] branch history-docs updated: Add contribute doc to version 2.0.6 (#821)

This is an automated email from the ASF dual-hosted git repository.

zhongjiajie pushed a commit to branch history-docs
in repository https://gitbox.apache.org/repos/asf/dolphinscheduler-website.git


The following commit(s) were added to refs/heads/history-docs by this push:
     new 2e26d8f21 Add contribute doc to version 2.0.6 (#821)
2e26d8f21 is described below

commit 2e26d8f2197aa48bbf67893c373fedf242de8427
Author: Jiajie Zhong <zh...@gmail.com>
AuthorDate: Tue Aug 9 17:02:30 2022 +0800

    Add contribute doc to version 2.0.6 (#821)
---
 .dlc.json                                          |   8 +
 docs/2.0.6/configs/docs2-0-6.js                    | 274 +++++++++
 docs/2.0.6/docs/en/contribute/api-standard.md      | 108 ++++
 .../docs/en/contribute/architecture-design.md      | 315 ++++++++++
 .../backend/mechanism/global-parameter.md          |  61 ++
 .../en/contribute/backend/mechanism/overview.md    |   6 +
 .../en/contribute/backend/mechanism/task/switch.md |   8 +
 docs/2.0.6/docs/en/contribute/backend/spi/alert.md | 101 ++++
 .../docs/en/contribute/backend/spi/datasource.md   |  23 +
 .../docs/en/contribute/backend/spi/registry.md     |  27 +
 docs/2.0.6/docs/en/contribute/backend/spi/task.md  |  15 +
 .../en/contribute/development-environment-setup.md | 209 +++++++
 .../docs/en/contribute/frontend-development.md     | 639 +++++++++++++++++++++
 docs/2.0.6/docs/en/contribute/have-questions.md    |  65 +++
 docs/2.0.6/docs/en/contribute/join/DS-License.md   |  42 ++
 .../docs/en/contribute/join/become-a-committer.md  |  11 +
 docs/2.0.6/docs/en/contribute/join/code-conduct.md |  68 +++
 .../docs/en/contribute/join/commit-message.md      |  94 +++
 docs/2.0.6/docs/en/contribute/join/contribute.md   |  40 ++
 docs/2.0.6/docs/en/contribute/join/document.md     |  62 ++
 docs/2.0.6/docs/en/contribute/join/issue.md        | 136 +++++
 docs/2.0.6/docs/en/contribute/join/microbench.md   | 100 ++++
 docs/2.0.6/docs/en/contribute/join/pull-request.md |  94 +++
 docs/2.0.6/docs/en/contribute/join/review.md       | 153 +++++
 docs/2.0.6/docs/en/contribute/join/security.md     |   8 +
 docs/2.0.6/docs/en/contribute/join/submit-code.md  |  63 ++
 docs/2.0.6/docs/en/contribute/join/subscribe.md    |  23 +
 docs/2.0.6/docs/en/contribute/join/unit-test.md    | 118 ++++
 .../docs/en/contribute/release/release-post.md     |  32 ++
 .../docs/en/contribute/release/release-prepare.md  |  31 +
 docs/2.0.6/docs/en/contribute/release/release.md   | 540 +++++++++++++++++
 docs/2.0.6/docs/zh/contribute/api-standard.md      | 111 ++++
 .../docs/zh/contribute/architecture-design.md      | 301 ++++++++++
 .../backend/mechanism/global-parameter.md          |  61 ++
 .../zh/contribute/backend/mechanism/overview.md    |   6 +
 .../zh/contribute/backend/mechanism/task/switch.md |   8 +
 docs/2.0.6/docs/zh/contribute/backend/spi/alert.md |  93 +++
 .../docs/zh/contribute/backend/spi/datasource.md   |  23 +
 .../docs/zh/contribute/backend/spi/registry.md     |  26 +
 docs/2.0.6/docs/zh/contribute/backend/spi/task.md  |  15 +
 .../zh/contribute/development-environment-setup.md | 201 +++++++
 .../docs/zh/contribute/frontend-development.md     | 639 +++++++++++++++++++++
 docs/2.0.6/docs/zh/contribute/have-questions.md    |  65 +++
 docs/2.0.6/docs/zh/contribute/join/DS-License.md   | 104 ++++
 .../docs/zh/contribute/join/become-a-committer.md  |  12 +
 docs/2.0.6/docs/zh/contribute/join/code-conduct.md |  68 +++
 .../docs/zh/contribute/join/commit-message.md      |  89 +++
 docs/2.0.6/docs/zh/contribute/join/contribute.md   |  42 ++
 docs/2.0.6/docs/zh/contribute/join/document.md     |  62 ++
 docs/2.0.6/docs/zh/contribute/join/issue.md        | 217 +++++++
 docs/2.0.6/docs/zh/contribute/join/microbench.md   |  98 ++++
 docs/2.0.6/docs/zh/contribute/join/pull-request.md |  95 +++
 docs/2.0.6/docs/zh/contribute/join/review.md       | 141 +++++
 docs/2.0.6/docs/zh/contribute/join/security.md     |   8 +
 docs/2.0.6/docs/zh/contribute/join/submit-code.md  |  71 +++
 docs/2.0.6/docs/zh/contribute/join/subscribe.md    |  25 +
 docs/2.0.6/docs/zh/contribute/join/unit-test.md    | 110 ++++
 .../docs/zh/contribute/release/release-post.md     |  30 +
 .../docs/zh/contribute/release/release-prepare.md  |  32 ++
 docs/2.0.6/docs/zh/contribute/release/release.md   | 534 +++++++++++++++++
 img/architecture-design/dag_examples.png           | Bin 0 -> 89184 bytes
 img/architecture-design/distributed_lock.png       | Bin 0 -> 188964 bytes
 .../distributed_lock_procss.png                    | Bin 0 -> 363294 bytes
 img/architecture-design/fault-tolerant.png         | Bin 0 -> 193511 bytes
 img/architecture-design/fault-tolerant_master.png  | Bin 0 -> 169131 bytes
 img/architecture-design/fault-tolerant_worker.png  | Bin 0 -> 164622 bytes
 img/architecture-design/grpc.png                   | Bin 0 -> 68933 bytes
 img/architecture-design/lack_thread.png            | Bin 0 -> 195509 bytes
 img/architecture-design/process_priority.png       | Bin 0 -> 49112 bytes
 img/architecture-design/task_priority.png          | Bin 0 -> 30544 bytes
 .../join/pull-request/checkstyle-idea.png          | Bin 0 -> 128175 bytes
 .../join/pull-request/code-style-idea.png          | Bin 0 -> 397042 bytes
 72 files changed, 6761 insertions(+)

diff --git a/.dlc.json b/.dlc.json
index 0252520ea..9badc5827 100644
--- a/.dlc.json
+++ b/.dlc.json
@@ -22,6 +22,14 @@
       "pattern": "^/zh-cn/download/download.html$",
       "replacement": "https://dolphinscheduler.apache.org/zh-cn/download/download.html"
     },
+    {
+      "pattern": "^/en-us/community/community.html$",
+      "replacement": "https://dolphinscheduler.apache.org/en-us/community/community.html"
+    },
+    {
+      "pattern": "^/zh-cn/community/community.html$",
+      "replacement": "https://dolphinscheduler.apache.org/zh-cn/community/community.html"
+    },
     {
       "pattern": "^/",
       "replacement": "{{BASEURL}}/"
diff --git a/docs/2.0.6/configs/docs2-0-6.js b/docs/2.0.6/configs/docs2-0-6.js
index b6bb1c155..2ceb7873c 100644
--- a/docs/2.0.6/configs/docs2-0-6.js
+++ b/docs/2.0.6/configs/docs2-0-6.js
@@ -276,6 +276,143 @@ export default {
           },
         ]
       },
+      {
+        title: 'Contribution',
+        children: [
+          {
+            title: 'Join',
+            children: [
+              {
+                title: 'Security Report',
+                link: '/en-us/docs/dev/user_doc/contribute/join/security.html',
+              },
+              {
+                title: 'How to Become a Committer',
+                link: '/en-us/docs/dev/user_doc/contribute/join/become-a-committer.html',
+              },
+              {
+                title: 'Subscribe Mailing Lists',
+                link: '/en-us/docs/dev/user_doc/contribute/join/subscribe.html',
+              },
+              {
+                title: 'Participate in Contributing',
+                link: '/en-us/docs/dev/user_doc/contribute/join/contribute.html',
+              },
+              {
+                title: 'Code of Conduct',
+                link: '/en-us/docs/dev/user_doc/contribute/join/code-conduct.html',
+              },
+              {
+                title: 'Review Issue or Pull Requests',
+                link: '/en-us/docs/dev/user_doc/contribute/join/review.html',
+              },
+              {
+                title: 'Submit Code',
+                link: '/en-us/docs/dev/user_doc/contribute/join/submit-code.html',
+              },
+              {
+                title: 'License Notice',
+                link: '/en-us/docs/dev/user_doc/contribute/join/DS-License.html',
+              },
+              {
+                title: 'Document Notice',
+                link: '/en-us/docs/dev/user_doc/contribute/join/document.html',
+              },
+              {
+                title: 'Issue Notice',
+                link: '/en-us/docs/dev/user_doc/contribute/join/issue.html',
+              },
+              {
+                title: 'Pull Request Notice',
+                link: '/en-us/docs/dev/user_doc/contribute/join/pull-request.html',
+              },
+              {
+                title: 'Commit Message Notice',
+                link: '/en-us/docs/dev/user_doc/contribute/join/commit-message.html',
+              },
+              {
+                title: 'Micro BenchMark Notice',
+                link: '/en-us/docs/dev/user_doc/contribute/join/microbench.html',
+              },
+              {
+                title: 'Unit Test Writing Guide',
+                link: '/en-us/docs/dev/user_doc/contribute/join/unit-test.html',
+              },
+            ],
+          },
+          {
+            title: 'Development Environment Setup',
+            link: '/en-us/docs/dev/user_doc/contribute/development-environment-setup.html',
+          },
+          {
+            title: 'Design Document',
+            children: [
+              // TODO not support multiply level for now
+              // {
+              // title: 'SPI',
+              // children: [
+              {
+                title: 'Architecture Design',
+                link: '/en-us/docs/dev/user_doc/contribute/architecture-design.html',
+              },
+              {
+                title: 'Alert SPI',
+                link: '/en-us/docs/dev/user_doc/contribute/backend/spi/alert.html',
+              },
+              {
+                title: 'Registry SPI',
+                link: '/en-us/docs/dev/user_doc/contribute/backend/spi/registry.html',
+              },
+              {
+                title: 'Task SPI',
+                link: '/en-us/docs/dev/user_doc/contribute/backend/spi/task.html',
+              },
+              {
+                title: 'Datasource SPI',
+                link: '/en-us/docs/dev/user_doc/contribute/backend/spi/datasource.html',
+              },
+              {
+                title: 'Mechanism Design',
+                link: '/en-us/docs/dev/user_doc/contribute/backend/mechanism/overview.html',
+              },
+            ],
+          },
+          {
+            title: 'Guidelines',
+            children: [
+              {
+                title: 'Frontend Development',
+                link: '/en-us/docs/dev/user_doc/contribute/frontend-development.html',
+              },
+              {
+                title: 'API Standard',
+                link: '/en-us/docs/dev/user_doc/contribute/api-standard.html',
+              },
+            ],
+          },
+          {
+            title: 'Release Guide',
+            children: [
+              {
+                title: 'Release Preparation',
+                link: '/en-us/docs/dev/user_doc/contribute/release/release-prepare.html',
+              },
+              {
+                title: 'Release Guide',
+                link: '/en-us/docs/dev/user_doc/contribute/release/release.html',
+              },
+              {
+                title: 'Release Post',
+                link: '/en-us/docs/dev/user_doc/contribute/release/release-post.html',
+              },
+            ],
+          },
+          {
+            title: 'Questions & Communications',
+            link: '/en-us/docs/dev/user_doc/contribute/have-questions.html',
+          },
+        ],
+      },
       {
         title: 'FAQ',
         children: [
@@ -574,6 +711,143 @@ export default {
           },
         ]
       },
+      {
+        title: '贡献指南',
+        children: [
+          {
+            title: '如何参与',
+            children: [
+              {
+                title: '报告安全问题',
+                link: '/zh-cn/docs/dev/user_doc/contribute/join/security.html',
+              },
+              {
+                title: '如何成为 Committer',
+                link: '/zh-cn/docs/dev/user_doc/contribute/join/become-a-committer.html',
+              },
+              {
+                title: '订阅/取消订阅邮件列表',
+                link: '/zh-cn/docs/dev/user_doc/contribute/join/subscribe.html',
+              },
+              {
+                title: '参与贡献',
+                link: '/zh-cn/docs/dev/user_doc/contribute/join/contribute.html',
+              },
+              {
+                title: '行为准则',
+                link: '/zh-cn/docs/dev/user_doc/contribute/join/code-conduct.html',
+              },
+              {
+                title: 'Review Issue or Pull Requests',
+                link: '/zh-cn/docs/dev/user_doc/contribute/join/review.html',
+              },
+              {
+                title: '提交代码',
+                link: '/zh-cn/docs/dev/user_doc/contribute/join/submit-code.html',
+              },
+              {
+                title: 'License须知',
+                link: '/zh-cn/docs/dev/user_doc/contribute/join/DS-License.html',
+              },
+              {
+                title: '文档须知',
+                link: '/zh-cn/docs/dev/user_doc/contribute/join/document.html',
+              },
+              {
+                title: 'Issue须知',
+                link: '/zh-cn/docs/dev/user_doc/contribute/join/issue.html',
+              },
+              {
+                title: 'Pull Request须知',
+                link: '/zh-cn/docs/dev/user_doc/contribute/join/pull-request.html',
+              },
+              {
+                title: 'Commit Message须知',
+                link: '/zh-cn/docs/dev/user_doc/contribute/join/commit-message.html',
+              },
+              {
+                title: '微基准测试须知',
+                link: '/zh-cn/docs/dev/user_doc/contribute/join/microbench.html',
+              },
+              {
+                title: '单元测试编写指南',
+                link: '/zh-cn/docs/dev/user_doc/contribute/join/unit-test.html',
+              },
+            ],
+          },
+          {
+            title: '环境搭建',
+            link: '/zh-cn/docs/dev/user_doc/contribute/development-environment-setup.html',
+          },
+          {
+            title: '设计文档',
+            children: [
+              // TODO not support multiply level for now
+              // {
+              // title: 'SPI',
+              // children: [
+              {
+                title: '架构设计',
+                link: '/zh-cn/docs/dev/user_doc/contribute/architecture-design.html',
+              },
+              {
+                title: 'Alert SPI',
+                link: '/zh-cn/docs/dev/user_doc/contribute/backend/spi/alert.html',
+              },
+              {
+                title: 'Registry SPI',
+                link: '/zh-cn/docs/dev/user_doc/contribute/backend/spi/registry.html',
+              },
+              {
+                title: 'Task SPI',
+                link: '/zh-cn/docs/dev/user_doc/contribute/backend/spi/task.html',
+              },
+              {
+                title: 'Datasource SPI',
+                link: '/zh-cn/docs/dev/user_doc/contribute/backend/spi/datasource.html',
+              },
+              {
+                title: '组件设计',
+                link: '/zh-cn/docs/dev/user_doc/contribute/backend/mechanism/overview.html',
+              },
+            ],
+          },
+          {
+            title: '规范',
+            children: [
+              {
+                title: '前端开发',
+                link: '/zh-cn/docs/dev/user_doc/contribute/frontend-development.html',
+              },
+              {
+                title: 'API规范',
+                link: '/zh-cn/docs/dev/user_doc/contribute/api-standard.html',
+              },
+            ],
+          },
+          {
+            title: '发版指南',
+            children: [
+              {
+                title: '发版准备',
+                link: '/zh-cn/docs/dev/user_doc/contribute/release/release-prepare.html',
+              },
+              {
+                title: '发版指南',
+                link: '/zh-cn/docs/dev/user_doc/contribute/release/release.html',
+              },
+              {
+                title: '发版后续',
+                link: '/zh-cn/docs/dev/user_doc/contribute/release/release-post.html',
+              },
+            ],
+          },
+          {
+            title: '问题与交流',
+            link: '/zh-cn/docs/dev/user_doc/contribute/have-questions.html',
+          },
+        ],
+      },
       {
         title: 'FAQ',
         children: [
diff --git a/docs/2.0.6/docs/en/contribute/api-standard.md b/docs/2.0.6/docs/en/contribute/api-standard.md
new file mode 100644
index 000000000..61d662216
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/api-standard.md
@@ -0,0 +1,108 @@
+# API design standard
+A standardized and unified API is the cornerstone of project design.The API of DolphinScheduler follows the REST ful standard. REST ful is currently the most popular Internet software architecture. It has a clear structure, conforms to standards, is easy to understand and extend.
+
+This article uses the DolphinScheduler API as an example to explain how to construct a Restful API.
+
+## 1. URI design
+REST is "Representational State Transfer".The design of Restful URI is based on resources.The resource corresponds to an entity on the network, for example: a piece of text, a picture, and a service. And each resource corresponds to a URI.
+
++ One Kind of Resource: expressed in the plural, such as `task-instances`、`groups` ;
++ A Resource: expressed in the singular, or use the ID to represent the corresponding resource, such as `group`、`groups/{groupId}`;
++ Sub Resources: Resources under a certain resource, such as `/instances/{instanceId}/tasks`;
++ A Sub Resource:`/instances/{instanceId}/tasks/{taskId}`;
+
+## 2. Method design
+We need to locate a certain resource by URI, and then use Method or declare actions in the path suffix to reflect the operation of the resource.
+
+### ① Query - GET
+Use URI to locate the resource, and use GET to indicate query.
+
++ When the URI is a type of resource, it means to query a type of resource. For example, the following example indicates paging query `alter-groups`.
+```
+Method: GET
+/dolphinscheduler/alert-groups
+```
+
++ When the URI is a single resource, it means to query this resource. For example, the following example means to query the specified `alter-group`.
+```
+Method: GET
+/dolphinscheduler/alter-groups/{id}
+```
+
++ In addition, we can also express query sub-resources based on URI, as follows:
+```
+Method: GET
+/dolphinscheduler/projects/{projectId}/tasks
+```
+
+**The above examples all represent paging query. If we need to query all data, we need to add `/list` after the URI to distinguish. Do not mix the same API for both paged query and query.**
+```
+Method: GET
+/dolphinscheduler/alert-groups/list
+```
+
+### ② Create - POST
+Use URI to locate the resource, use POST to indicate create, and then return the created id to requester.
+
++ create an `alter-group`:
+
+```
+Method: POST
+/dolphinscheduler/alter-groups
+```
+
++ create sub-resources is also the same as above.
+```
+Method: POST
+/dolphinscheduler/alter-groups/{alterGroupId}/tasks
+```
+
+### ③ Modify - PUT
+Use URI to locate the resource, use PUT to indicate modify.
++ modify an `alert-group`
+```
+Method: PUT
+/dolphinscheduler/alter-groups/{alterGroupId}
+```
+
+### ④ Delete -DELETE
+Use URI to locate the resource, use DELETE to indicate delete.
+
++ delete an `alert-group`
+```
+Method: DELETE
+/dolphinscheduler/alter-groups/{alterGroupId}
+```
+
++ batch deletion: batch delete the id array,we should use POST. **(Do not use the DELETE method, because the body of the DELETE request has no semantic meaning, and it is possible that some gateways, proxies, and firewalls will directly strip off the request body after receiving the DELETE request.)**
+```
+Method: POST
+/dolphinscheduler/alter-groups/batch-delete
+```
+
+### ⑤ Partial Modifications -PATCH
+Use URI to locate the resource, use PATCH to partial modifications.
+
+```
+Method: PATCH
+/dolphinscheduler/alter-groups/{alterGroupId}
+```
+
+### ⑥ Others
+In addition to creating, deleting, modifying and quering, we also locate the corresponding resource through url, and then append operations to it after the path, such as:
+```
+/dolphinscheduler/alert-groups/verify-name
+/dolphinscheduler/projects/{projectCode}/process-instances/{code}/view-gantt
+```
+
+## 3. Parameter design
+There are two types of parameters, one is request parameter and the other is path parameter. And the parameter must use small hump.
+
+In the case of paging, if the parameter entered by the user is less than 1, the front end needs to automatically turn to 1, indicating that the first page is requested; When the backend finds that the parameter entered by the user is greater than the total number of pages, it should directly return to the last page.
+
+## 4. Others design
+### base URL
+The URI of the project needs to use `/<project_name>` as the base path, so as to identify that these APIs are under this project.
+```
+/dolphinscheduler
+```
\ No newline at end of file
diff --git a/docs/2.0.6/docs/en/contribute/architecture-design.md b/docs/2.0.6/docs/en/contribute/architecture-design.md
new file mode 100644
index 000000000..a46bfb285
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/architecture-design.md
@@ -0,0 +1,315 @@
+## Architecture Design
+Before explaining the architecture of the schedule system, let us first understand the common nouns of the schedule system.
+
+### 1.Noun Interpretation
+
+**DAG:** Full name Directed Acyclic Graph,referred to as DAG。Tasks in the workflow are assembled in the form of directed acyclic graphs, which are topologically traversed from nodes with zero indegrees of ingress until there are no successor nodes. For example, the following picture:
+
+<p align="center">
+  <img src="../../../img/architecture-design/dag_examples.png" alt="dag示例"  width="80%" />
+  <p align="center">
+        <em>dag example</em>
+  </p>
+</p>
+
+**Process definition**: Visualization **DAG** by dragging task nodes and establishing associations of task nodes 
+
+**Process instance**: A process instance is an instantiation of a process definition, which can be generated by manual startup or  scheduling. The process definition runs once, a new process instance is generated
+
+**Task instance**: A task instance is the instantiation of a specific task node when a process instance runs, which indicates the specific task execution status
+
+**Task type**: Currently supports SHELL, SQL, SUB_PROCESS (sub-process), PROCEDURE, MR, SPARK, PYTHON, DEPENDENT (dependency), and plans to support dynamic plug-in extension, note: the sub-**SUB_PROCESS** is also A separate process definition that can be launched separately
+
+**Schedule mode** :  The system supports timing schedule and manual schedule based on cron expressions. Command type support: start workflow, start execution from current node, resume fault-tolerant workflow, resume pause process, start execution from failed node, complement, timer, rerun, pause, stop, resume waiting thread. Where **recovers the fault-tolerant workflow** and **restores the waiting thread** The two command types are used by the scheduling internal control and cannot be ca [...]
+
+**Timed schedule**: The system uses **quartz** distributed scheduler and supports the generation of cron expression visualization
+
+**Dependency**: The system does not only support **DAG** Simple dependencies between predecessors and successor nodes, but also provides **task dependencies** nodes, support for **custom task dependencies between processes**
+
+**Priority**: Supports the priority of process instances and task instances. If the process instance and task instance priority are not set, the default is first in, first out.
+
+**Mail Alert**: Support **SQL Task** Query Result Email Send, Process Instance Run Result Email Alert and Fault Tolerant Alert Notification
+
+**Failure policy**: For tasks running in parallel, if there are tasks that fail, two failure policy processing methods are provided. **Continue** means that the status of the task is run in parallel until the end of the process failure. **End** means that once a failed task is found, Kill also drops the running parallel task and the process ends.
+
+**Complement**: Complement historical data, support **interval parallel and serial** two complement methods
+
+
+
+### 2.System architecture
+
+#### 2.1 System Architecture Diagram
+<p align="center">
+  <img src="../../../img/architecture.jpg" alt="System Architecture Diagram"  />
+  <p align="center">
+        <em>System Architecture Diagram</em>
+  </p>
+</p>
+
+
+
+#### 2.2 Architectural description
+
+* **MasterServer** 
+
+    MasterServer adopts the distributed non-central design concept. MasterServer is mainly responsible for DAG task split, task submission monitoring, and monitoring the health status of other MasterServer and WorkerServer.
+    When the MasterServer service starts, it registers a temporary node with Zookeeper, and listens to the Zookeeper temporary node state change for fault tolerance processing.
+
+    
+
+    ##### The service mainly contains:
+
+    - **Distributed Quartz** distributed scheduling component, mainly responsible for the start and stop operation of the scheduled task. When the quartz picks up the task, the master internally has a thread pool to be responsible for the subsequent operations of the task.
+
+    - **MasterSchedulerThread** is a scan thread that periodically scans the **command** table in the database for different business operations based on different **command types**
+
+    - **MasterExecThread** is mainly responsible for DAG task segmentation, task submission monitoring, logic processing of various command types
+
+    - **MasterTaskExecThread** is mainly responsible for task persistence
+
+      
+
+* **WorkerServer** 
+
+     - WorkerServer also adopts a distributed, non-central design concept. WorkerServer is mainly responsible for task execution and providing log services. When the WorkerServer service starts, it registers the temporary node with Zookeeper and maintains the heartbeat.
+
+       ##### This service contains:
+
+       - **FetchTaskThread** is mainly responsible for continuously receiving tasks from **Task Queue** and calling **TaskScheduleThread** corresponding executors according to different task types.
+
+     - **ZooKeeper**
+
+       The ZooKeeper service, the MasterServer and the WorkerServer nodes in the system all use the ZooKeeper for cluster management and fault tolerance. In addition, the system also performs event monitoring and distributed locking based on ZooKeeper.
+       We have also implemented queues based on Redis, but we hope that DolphinScheduler relies on as few components as possible, so we finally removed the Redis implementation.
+
+     - **Task Queue**
+
+       The task queue operation is provided. Currently, the queue is also implemented based on Zookeeper. Since there is less information stored in the queue, there is no need to worry about too much data in the queue. In fact, we have over-measured a million-level data storage queue, which has no effect on system stability and performance.
+
+     - **Alert**
+
+       Provides alarm-related interfaces. The interfaces mainly include **Alarms**. The storage, query, and notification functions of the two types of alarm data. The notification function has two types: **mail notification** and **SNMP (not yet implemented)**.
+
+     - **API**
+
+       The API interface layer is mainly responsible for processing requests from the front-end UI layer. The service provides a RESTful api to provide request services externally.
+       Interfaces include workflow creation, definition, query, modification, release, offline, manual start, stop, pause, resume, start execution from this node, and more.
+
+     - **UI**
+
+       The front-end page of the system provides various visual operation interfaces of the system. For details, see the [quick start](https://dolphinscheduler.apache.org/en-us/docs/latest/user_doc/about/introduction.html) section.
+
+     
+
+#### 2.3 Architectural Design Ideas
+
+##### I. Decentralized vs centralization
+
+###### Centralization Thought
+
+The centralized design concept is relatively simple. The nodes in the distributed cluster are divided into two roles according to their roles:
+
+<p align="center">
+   <img src="https://analysys.github.io/easyscheduler_docs_cn/images/master_slave.png" alt="master-slave role" width="50%" />
+ </p>
+
+- The role of Master is mainly responsible for task distribution and supervising the health status of Slave. It can dynamically balance the task to Slave, so that the Slave node will not be "busy" or "free".
+- The role of the Worker is mainly responsible for the execution of the task and maintains the heartbeat with the Master so that the Master can assign tasks to the Slave.
+
+Problems in the design of centralized :
+
+- Once the Master has a problem, the group has no leader and the entire cluster will crash. In order to solve this problem, most Master/Slave architecture modes adopt the design scheme of the master and backup masters, which can be hot standby or cold standby, automatic switching or manual switching, and more and more new systems are available. Automatically elects the ability to switch masters to improve system availability.
+- Another problem is that if the Scheduler is on the Master, although it can support different tasks in one DAG running on different machines, it will generate overload of the Master. If the Scheduler is on the Slave, all tasks in a DAG can only be submitted on one machine. If there are more parallel tasks, the pressure on the Slave may be larger.
+
+###### Decentralization
+
+ <p align="center"
+   <img src="https://analysys.github.io/easyscheduler_docs_cn/images/decentralization.png" alt="decentralized" width="50%" />
+ </p>
+
+- In the decentralized design, there is usually no Master/Slave concept, all roles are the same, the status is equal, the global Internet is a typical decentralized distributed system, networked arbitrary node equipment down machine , all will only affect a small range of features.
+- The core design of decentralized design is that there is no "manager" that is different from other nodes in the entire distributed system, so there is no single point of failure problem. However, since there is no "manager" node, each node needs to communicate with other nodes to get the necessary machine information, and the unreliable line of distributed system communication greatly increases the difficulty of implementing the above functions.
+- In fact, truly decentralized distributed systems are rare. Instead, dynamic centralized distributed systems are constantly emerging. Under this architecture, the managers in the cluster are dynamically selected, rather than preset, and when the cluster fails, the nodes of the cluster will spontaneously hold "meetings" to elect new "managers". Go to preside over the work. The most typical case is the Etcd implemented in ZooKeeper and Go.
+
+- Decentralization of DolphinScheduler is the registration of Master/Worker to ZooKeeper. The Master Cluster and the Worker Cluster are not centered, and the Zookeeper distributed lock is used to elect one Master or Worker as the “manager” to perform the task.
+
+#####  二、Distributed lock practice
+
+DolphinScheduler uses ZooKeeper distributed locks to implement only one Master to execute the Scheduler at the same time, or only one Worker to perform task submission.
+
+1. The core process algorithm for obtaining distributed locks is as follows
+
+ <p align="center">
+   <img src="../../../img/architecture-design/distributed_lock.png" alt="Get Distributed Lock Process" width="70%" />
+ </p>
+
+2. Scheduler thread distributed lock implementation flow chart in DolphinScheduler:
+
+ <p align="center">
+   <img src="../../../img/architecture-design/distributed_lock_procss.png" alt="Get Distributed Lock Process" />
+ </p>
+
+##### Third, the thread is insufficient loop waiting problem
+
+- If there is no subprocess in a DAG, if the number of data in the Command is greater than the threshold set by the thread pool, the direct process waits or fails.
+- If a large number of sub-processes are nested in a large DAG, the following figure will result in a "dead" state:
+
+ <p align="center">
+   <img src="../../../img/architecture-design/lack_thread.png" alt="Thread is not enough to wait for loop" width="70%" />
+ </p>
+
+In the above figure, MainFlowThread waits for SubFlowThread1 to end, SubFlowThread1 waits for SubFlowThread2 to end, SubFlowThread2 waits for SubFlowThread3 to end, and SubFlowThread3 waits for a new thread in the thread pool, then the entire DAG process cannot end, and thus the thread cannot be released. This forms the state of the child parent process loop waiting. At this point, the scheduling cluster will no longer be available unless a new Master is started to add threads to break s [...]
+
+It seems a bit unsatisfactory to start a new Master to break the deadlock, so we proposed the following three options to reduce this risk:
+
+1. Calculate the sum of the threads of all Masters, and then calculate the number of threads required for each DAG, that is, pre-calculate before the DAG process is executed. Because it is a multi-master thread pool, the total number of threads is unlikely to be obtained in real time.
+2. Judge the single master thread pool. If the thread pool is full, let the thread fail directly.
+3. Add a Command type with insufficient resources. If the thread pool is insufficient, the main process will be suspended. This way, the thread pool has a new thread, which can make the process with insufficient resources hang up and wake up again.
+
+Note: The Master Scheduler thread is FIFO-enabled when it gets the Command.
+
+So we chose the third way to solve the problem of insufficient threads.
+
+##### IV. Fault Tolerant Design
+
+Fault tolerance is divided into service fault tolerance and task retry. Service fault tolerance is divided into two types: Master Fault Tolerance and Worker Fault Tolerance.
+
+###### 1. Downtime fault tolerance
+
+Service fault tolerance design relies on ZooKeeper's Watcher mechanism. The implementation principle is as follows:
+
+ <p align="center">
+   <img src="../../../img/architecture-design/fault-tolerant.png" alt="DolphinScheduler Fault Tolerant Design" width="70%" />
+ </p>
+
+The Master monitors the directories of other Masters and Workers. If the remove event is detected, the process instance is fault-tolerant or the task instance is fault-tolerant according to the specific business logic.
+
+
+
+- Master fault tolerance flow chart:
+
+ <p align="center">
+   <img src="../../../img/architecture-design/fault-tolerant_master.png" alt="Master Fault Tolerance Flowchart" width="70%" />
+ </p>
+
+After the ZooKeeper Master is fault-tolerant, it is rescheduled by the Scheduler thread in DolphinScheduler. It traverses the DAG to find the "Running" and "Submit Successful" tasks, and monitors the status of its task instance for the "Running" task. You need to determine whether the Task Queue already exists. If it exists, monitor the status of the task instance. If it does not exist, resubmit the task instance.
+
+
+
+- Worker fault tolerance flow chart:
+
+ <p align="center">
+   <img src="../../../img/architecture-design/fault-tolerant_worker.png" alt="Worker Fault Tolerance Flowchart" width="70%" />
+ </p>
+
+Once the Master Scheduler thread finds the task instance as "need to be fault tolerant", it takes over the task and resubmits.
+
+ Note: Because the "network jitter" may cause the node to lose the heartbeat of ZooKeeper in a short time, the node's remove event occurs. In this case, we use the easiest way, that is, once the node has timeout connection with ZooKeeper, it will directly stop the Master or Worker service.
+
+###### 2. Task failure retry
+
+Here we must first distinguish between the concept of task failure retry, process failure recovery, and process failure rerun:
+
+- Task failure Retry is task level, which is automatically performed by the scheduling system. For example, if a shell task sets the number of retries to 3 times, then the shell task will try to run up to 3 times after failing to run.
+- Process failure recovery is process level, is done manually, recovery can only be performed **from the failed node** or **from the current node**
+- Process failure rerun is also process level, is done manually, rerun is from the start node
+
+
+
+Next, let's talk about the topic, we divided the task nodes in the workflow into two types.
+
+- One is a business node, which corresponds to an actual script or processing statement, such as a Shell node, an MR node, a Spark node, a dependent node, and so on.
+- There is also a logical node, which does not do the actual script or statement processing, but the logical processing of the entire process flow, such as sub-flow sections.
+
+Each **service node** can configure the number of failed retries. When the task node fails, it will automatically retry until it succeeds or exceeds the configured number of retries. **Logical node** does not support failed retry. But the tasks in the logical nodes support retry.
+
+If there is a task failure in the workflow that reaches the maximum number of retries, the workflow will fail to stop, and the failed workflow can be manually rerun or process resumed.
+
+
+
+##### V. Task priority design
+
+In the early scheduling design, if there is no priority design and fair scheduling design, it will encounter the situation that the task submitted first may be completed simultaneously with the task submitted subsequently, but the priority of the process or task cannot be set. We have redesigned this, and we are currently designing it as follows:
+
+- According to **different process instance priority** prioritizes **same process instance priority** prioritizes **task priority within the same process** takes precedence over **same process** commit order from high Go to low for task processing.
+
+  - The specific implementation is to resolve the priority according to the json of the task instance, and then save the **process instance priority _ process instance id_task priority _ task id** information in the ZooKeeper task queue, when obtained from the task queue, Through string comparison, you can get the task that needs to be executed first.
+
+    - The priority of the process definition is that some processes need to be processed before other processes. This can be configured at the start of the process or at the time of scheduled start. There are 5 levels, followed by HIGHEST, HIGH, MEDIUM, LOW, and LOWEST. As shown below
+
+      <p align="center">
+         <img src="../../../img/architecture-design/process_priority.png" alt="Process Priority Configuration" width="40%" />
+       </p>
+
+    - The priority of the task is also divided into 5 levels, followed by HIGHEST, HIGH, MEDIUM, LOW, and LOWEST. As shown below
+
+      <p align="center">`
+         <img src="../../../img/architecture-design/task_priority.png" alt="task priority configuration" width="35%" />
+       </p>
+
+##### VI. Logback and gRPC implement log access
+
+- Since the Web (UI) and Worker are not necessarily on the same machine, viewing the log is not as it is for querying local files. There are two options:
+  - Put the logs on the ES search engine
+  - Obtain remote log information through gRPC communication
+- Considering the lightweightness of DolphinScheduler as much as possible, gRPC was chosen to implement remote access log information.
+
+ <p align="center">
+   <img src="../../../img/architecture-design/grpc.png" alt="grpc remote access" width="50%" />
+ </p>
+
+- We use a custom Logback FileAppender and Filter function to generate a log file for each task instance.
+- The main implementation of FileAppender is as follows:
+
+```java
+ /**
+  * task log appender
+  */
+ Public class TaskLogAppender extends FileAppender<ILoggingEvent> {
+ 
+     ...
+
+    @Override
+    Protected void append(ILoggingEvent event) {
+
+        If (currentlyActiveFile == null){
+            currentlyActiveFile = getFile();
+        }
+        String activeFile = currentlyActiveFile;
+        // thread name: taskThreadName-processDefineId_processInstanceId_taskInstanceId
+        String threadName = event.getThreadName();
+        String[] threadNameArr = threadName.split("-");
+        // logId = processDefineId_processInstanceId_taskInstanceId
+        String logId = threadNameArr[1];
+        ...
+        super.subAppend(event);
+    }
+}
+```
+
+Generate a log in the form of /process definition id/process instance id/task instance id.log
+
+- Filter matches the thread name starting with TaskLogInfo:
+- TaskLogFilter is implemented as follows:
+
+```java
+ /**
+ * task log filter
+ */
+Public class TaskLogFilter extends Filter<ILoggingEvent> {
+
+    @Override
+    Public FilterReply decide(ILoggingEvent event) {
+        If (event.getThreadName().startsWith("TaskLogInfo-")){
+            Return FilterReply.ACCEPT;
+        }
+        Return FilterReply.DENY;
+    }
+}
+```
+
+
+
+### summary
+
+Starting from the scheduling, this paper introduces the architecture principle and implementation ideas of the big data distributed workflow scheduling system-DolphinScheduler. To be continued
diff --git a/docs/2.0.6/docs/en/contribute/backend/mechanism/global-parameter.md b/docs/2.0.6/docs/en/contribute/backend/mechanism/global-parameter.md
new file mode 100644
index 000000000..53b73747d
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/backend/mechanism/global-parameter.md
@@ -0,0 +1,61 @@
+# Global Parameter development document
+
+After the user defines the parameter with the direction OUT, it is saved in the localParam of the task.
+
+## Usage of parameters
+
+Getting the direct predecessor node `preTasks` of the current `taskInstance` to be created from the DAG, get the `varPool` of `preTasks`, merge this varPool (List) into one `varPool`, and in the merging process, if parameters with the same parameter name are found, they will be handled according to the following logics:
+
+* If all the values are null, the merged value is null
+* If one and only one value is non-null, then the merged value is the non-null value
+* If all the values are not null, it would be the earliest value of the endtime of taskInstance taken by VarPool.
+
+The direction of all the merged properties is updated to IN during the merge process.
+
+The result of the merge is saved in taskInstance.varPool.
+
+The worker receives and parses the varPool into the format of `Map<String,Property>`, where the key of the map is property.prop, which is the parameter name.
+
+When the processor processes the parameters, it will merge the varPool and localParam and globalParam parameters, and if there are parameters with duplicate names during the merging process, they will be replaced according to the following priorities, with the higher priority being retained and the lower priority being replaced:
+
+* globalParam: high
+* varPool: middle
+* localParam: low
+
+The parameters are replaced with the corresponding values using regular expressions compared to ${parameter name} before the node content is executed.
+
+## Parameter setting
+
+Currently, only SQL and SHELL nodes are supported to get parameters.
+
+Get the parameter with direction OUT from localParam, and do the following way according to the type of different nodes.
+
+### SQL node
+
+The structure returned by the parameter is List<Map<String,String>>, where the elements of List are each row of data, the key of Map is the column name, and the value is the value corresponding to the column.
+
+* If the SQL statement returns one row of data, match the OUT parameter name based on the OUT parameter name defined by the user when defining the task, or discard it if it does not match.
+* If the SQL statement returns multiple rows of data, the column names are matched based on the OUT parameter names defined by the user when defining the task of type LIST. All rows of the corresponding column are converted to `List<String>` as the value of this parameter. If there is no match, it is discarded.
+
+### SHELL node
+
+The result of the processor execution is returned as `Map<String,String>`.
+
+The user needs to define `${setValue(key=value)}` in the output when defining the shell script.
+
+Remove `${setValue()}` when processing parameters, split by "=", with the 0th being the key and the 1st being the value.
+
+Similarly match the OUT parameter name and key defined by the user when defining the task, and use value as the value of that parameter.
+
+Return parameter processing
+
+* The result of acquired Processor is String.
+* Determine whether the processor is empty or not, and exit if it is empty.
+* Determine whether the localParam is empty or not, and exit if it is empty.
+* Get the parameter of localParam which is OUT, and exit if it is empty.
+* Format String as per appeal format (`List<Map<String,String>>` for SQL, `Map<String,String>>` for shell).
+
+Assign the parameters with matching values to varPool (List, which contains the original IN's parameters)
+
+* Format the varPool as json and pass it to master.
+* The parameters that are OUT would be written into the localParam after the master has received the varPool.
diff --git a/docs/2.0.6/docs/en/contribute/backend/mechanism/overview.md b/docs/2.0.6/docs/en/contribute/backend/mechanism/overview.md
new file mode 100644
index 000000000..4f0d592c4
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/backend/mechanism/overview.md
@@ -0,0 +1,6 @@
+# Overview
+
+<!-- TODO Since the side menu does not support multiple levels, add new page to keep all sub page here -->
+
+* [Global Parameter](global-parameter.md)
+* [Switch Task type](task/switch.md)
diff --git a/docs/2.0.6/docs/en/contribute/backend/mechanism/task/switch.md b/docs/2.0.6/docs/en/contribute/backend/mechanism/task/switch.md
new file mode 100644
index 000000000..490510405
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/backend/mechanism/task/switch.md
@@ -0,0 +1,8 @@
+# SWITCH Task development
+
+Switch task workflow step as follows
+
+* User-defined expressions and branch information are stored in `taskParams` in `taskdefinition`. When the switch is executed, it will be formatted as `SwitchParameters`
+* `SwitchTaskExecThread` processes the expressions defined in `switch` from top to bottom, obtains the value of the variable from `varPool`, and parses the expression through `javascript`. If the expression returns true, stop checking and record The order of the expression, here we record as resultConditionLocation. The task of SwitchTaskExecThread is over
+* After the `switch` task runs, if there is no error (more commonly, the user-defined expression is out of specification or there is a problem with the parameter name), then `MasterExecThread.submitPostNode` will obtain the downstream node of the `DAG` to continue execution.
+* If it is found in `DagHelper.parsePostNodes` that the current node (the node that has just completed the work) is a `switch` node, the `resultConditionLocation` will be obtained, and all branches except `resultConditionLocation` in the SwitchParameters will be skipped. In this way, only the branches that need to be executed are left
diff --git a/docs/2.0.6/docs/en/contribute/backend/spi/alert.md b/docs/2.0.6/docs/en/contribute/backend/spi/alert.md
new file mode 100644
index 000000000..e2629a87a
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/backend/spi/alert.md
@@ -0,0 +1,101 @@
+### DolphinScheduler Alert SPI main design
+
+#### DolphinScheduler SPI Design
+
+DolphinScheduler is undergoing a microkernel + plug-in architecture change. All core capabilities such as tasks, resource storage, registration centers, etc. will be designed as extension points. We hope to use SPI to improve DolphinScheduler’s own flexibility and friendliness (extended sex).
+
+For alarm-related codes, please refer to the `dolphinscheduler-alert-api` module. This module defines the extension interface of the alarm plug-in and some basic codes. When we need to realize the plug-inization of related functions, it is recommended to read the code of this block first. Of course, it is recommended that you read the document. This will reduce a lot of time, but the document There is a certain degree of lag. When the document is missing, it is recommended to take the so [...]
+
+We use the native JAVA-SPI, when you need to extend, in fact, you only need to pay attention to the extension of the `org.apache.dolphinscheduler.alert.api.AlertChannelFactory` interface, the underlying logic such as plug-in loading, and other kernels have been implemented, Which makes our development more focused and simple.
+
+By the way, we have adopted an excellent front-end component form-create, which supports the generation of front-end UI components based on JSON. If plug-in development involves the front-end, we will use JSON to generate related front-end UI components, org.apache.dolphinscheduler. The parameters of the plug-in are encapsulated in spi.params, which will convert all the relevant parameters into the corresponding JSON, which means that you can complete the drawing of the front-end compone [...]
+
+This article mainly focuses on the design and development of Alert.
+
+#### Main Modules
+
+If you don't care about its internal design, but simply want to know how to develop your own alarm plug-in, you can skip this content.
+
+* dolphinscheduler-alert-api
+
+  This module is the core module of ALERT SPI. This module defines the interface of the alarm plug-in extension and some basic codes. The extension plug-in must implement the interface defined by this module: `org.apache.dolphinscheduler.alert.api.AlertChannelFactory`
+
+* dolphinscheduler-alert-plugins
+
+  This module is currently a plug-in provided by us, and now we have supported dozens of plug-ins, such as Email, DingTalk, Script, etc.
+
+
+#### Alert SPI Main class information.
+AlertChannelFactory
+Alarm plug-in factory interface. All alarm plug-ins need to implement this interface. This interface is used to define the name of the alarm plug-in and the required parameters. The create method is used to create a specific alarm plug-in instance.
+
+AlertChannel
+The interface of the alert plug-in. The alert plug-in needs to implement this interface. There is only one method process in this interface. The upper-level alert system will call this method and obtain the return information of the alert through the AlertResult returned by this method.
+
+AlertData
+Alarm content information, including id, title, content, log.
+
+AlertInfo
+For alarm-related information, when the upper-level system calls an instance of the alarm plug-in, the instance of this class is passed to the specific alarm plug-in through the process method. It contains the alert content AlertData and the parameter information filled in by the front end of the called alert plug-in instance.
+
+AlertResult
+The alarm plug-in sends alarm return information.
+
+org.apache.dolphinscheduler.spi.params
+This package is a plug-in parameter definition. Our front-end uses the from-create front-end library http://www.form-create.com, which can dynamically generate the front-end UI based on the parameter list json returned by the plug-in definition, so We don't need to care about the front end when we are doing SPI plug-in development.
+
+Under this package, we currently only encapsulate RadioParam, TextParam, and PasswordParam, which are used to define text type parameters, radio parameters and password type parameters, respectively.
+
+AbsPluginParams This class is the base class of all parameters, RadioParam these classes all inherit this class. Each DS alert plug-in will return a list of AbsPluginParams in the implementation of AlertChannelFactory.
+
+The specific design of alert_spi can be seen in the issue: [Alert Plugin Design](https://github.com/apache/incubator-dolphinscheduler/issues/3049)
+
+#### Alert SPI built-in implementation
+
+* Email
+
+     Email alert notification
+
+* DingTalk
+
+     Alert for DingTalk group chat bots
+  
+     Related parameter configuration can refer to the DingTalk robot document.
+
+* EnterpriseWeChat
+
+     EnterpriseWeChat alert notifications
+
+     Related parameter configuration can refer to the EnterpriseWeChat robot document.
+
+* Script
+
+     We have implemented a shell script for alerting. We will pass the relevant alert parameters to the script and you can implement your alert logic in the shell. This is a good way to interface with internal alerting applications.
+
+* SMS
+
+     SMS alerts
+* FeiShu
+
+  FeiShu alert notification
+* Slack
+
+  Slack alert notification
+* PagerDuty
+
+  PagerDuty alert notification
+* WebexTeams
+
+  WebexTeams alert notification
+
+  Related parameter configuration can refer to the WebexTeams document.
+
+* Telegram
+
+  Telegram alert notification
+  
+  Related parameter configuration can refer to the Telegram document.
+
+* Http
+
+  We have implemented a Http script for alerting. And calling most of the alerting plug-ins end up being Http requests, if we not support your alert plug-in yet, you can use Http to realize your alert login. Also welcome to contribute your common plug-ins to the community :)
diff --git a/docs/2.0.6/docs/en/contribute/backend/spi/datasource.md b/docs/2.0.6/docs/en/contribute/backend/spi/datasource.md
new file mode 100644
index 000000000..5772b4357
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/backend/spi/datasource.md
@@ -0,0 +1,23 @@
+## DolphinScheduler Datasource SPI main design
+
+#### How do I use data sources?
+
+The data source center supports POSTGRESQL, HIVE/IMPALA, SPARK, CLICKHOUSE, SQLSERVER data sources by default.
+
+If you are using MySQL or ORACLE data source, you need to place the corresponding driver package in the lib directory
+
+#### How to do Datasource plugin development?
+
+org.apache.dolphinscheduler.spi.datasource.DataSourceChannel
+org.apache.dolphinscheduler.spi.datasource.DataSourceChannelFactory
+org.apache.dolphinscheduler.plugin.datasource.api.client.CommonDataSourceClient
+
+1. In the first step, the data source plug-in can implement the above interfaces and inherit the general client. For details, refer to the implementation of data source plug-ins such as sqlserver and mysql. The addition methods of all RDBMS plug-ins are the same.
+
+2. Add the driver configuration in the data source plug-in pom.xml
+
+We provide APIs for external access of all data sources in the dolphin scheduler data source API module
+
+#### **Future plan**
+
+Support data sources such as kafka, http, files, sparkSQL, FlinkSQL, etc.
\ No newline at end of file
diff --git a/docs/2.0.6/docs/en/contribute/backend/spi/registry.md b/docs/2.0.6/docs/en/contribute/backend/spi/registry.md
new file mode 100644
index 000000000..0957ff3cd
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/backend/spi/registry.md
@@ -0,0 +1,27 @@
+### DolphinScheduler Registry SPI Extension
+
+#### how to use?
+
+Make the following configuration (take zookeeper as an example)
+
+* Registry plug-in configuration, take Zookeeper as an example (registry.properties)
+  dolphinscheduler-service/src/main/resources/registry.properties
+  ```registry.properties
+   registry.plugin.name=zookeeper
+   registry.servers=127.0.0.1:2181
+  ```
+
+For specific configuration information, please refer to the parameter information provided by the specific plug-in, for example zk: `org/apache/dolphinscheduler/plugin/registry/zookeeper/ZookeeperConfiguration.java`
+All configuration information prefixes need to be +registry, such as base.sleep.time.ms, which should be configured in the registry as follows: registry.base.sleep.time.ms=100
+
+#### How to expand
+
+`dolphinscheduler-registry-api` defines the standard for implementing plugins. When you need to extend plugins, you only need to implement `org.apache.dolphinscheduler.registry.api.RegistryFactory`.
+
+Under the `dolphinscheduler-registry-plugin` module is the registry plugin we currently provide.
+
+#### FAQ
+
+1: registry connect timeout
+
+You can increase the relevant timeout parameters.
diff --git a/docs/2.0.6/docs/en/contribute/backend/spi/task.md b/docs/2.0.6/docs/en/contribute/backend/spi/task.md
new file mode 100644
index 000000000..70b01d48f
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/backend/spi/task.md
@@ -0,0 +1,15 @@
+## DolphinScheduler Task SPI extension
+
+#### How to develop task plugins?
+
+org.apache.dolphinscheduler.spi.task.TaskChannel
+
+The plug-in can implement the above interface. It mainly includes creating tasks (task initialization, task running, etc.) and task cancellation. If it is a yarn task, you need to implement org.apache.dolphinscheduler.plugin.task.api.AbstractYarnTask.
+
+We provide APIs for external access to all tasks in the dolphinscheduler-task-api module, while the dolphinscheduler-spi module is the spi general code library, which defines all the plug-in modules, such as the alarm module, the registry module, etc., you can read and view in detail .
+
+*NOTICE*
+
+Since the task plug-in involves the front-end page, the front-end SPI has not yet been implemented, so you need to implement the front-end page corresponding to the plug-in separately.
+
+If there is a class conflict in the task plugin, you can use [Shade-Relocating Classes](https://maven.apache.org/plugins/maven-shade-plugin/) to solve this problem.
\ No newline at end of file
diff --git a/docs/2.0.6/docs/en/contribute/development-environment-setup.md b/docs/2.0.6/docs/en/contribute/development-environment-setup.md
new file mode 100644
index 000000000..0940682d6
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/development-environment-setup.md
@@ -0,0 +1,209 @@
+# DolphinScheduler development
+
+## Software Requirements
+Before setting up the DolphinScheduler development environment, please make sure you have installed the software as below:
+
+* [Git](https://git-scm.com/downloads)
+* [JDK](https://www.oracle.com/technetwork/java/javase/downloads/index.html): v1.8.x (Currently does not support jdk 11)
+* [Maven](http://maven.apache.org/download.cgi): v3.5+
+* [Node](https://nodejs.org/en/download): v16.13+ (dolphinScheduler version is lower than 3.0, please install node v12.20+)
+* [Pnpm](https://pnpm.io/installation): v6.x
+
+### Clone Git Repository
+
+Download the git repository through your git management tool, here we use git-core as an example
+
+```shell
+mkdir dolphinscheduler
+cd dolphinscheduler
+git clone git@github.com:apache/dolphinscheduler.git
+```
+
+### compile source code
+
+Supporting system:
+* MacOS
+* Liunx
+
+Run `mvn clean install -Prelease -Dmaven.test.skip=true`
+
+## Docker image build
+
+DolphinScheduler will release new Docker images after it released, you could find them in [Docker Hub](https://hub.docker.com/search?q=DolphinScheduler).
+
+* If you want to modify DolphinScheduler source code, and build Docker images locally, you can run when finished the modification
+```shell
+cd dolphinscheduler
+./mvnw -B clean package \
+       -Dmaven.test.skip \
+       -Dmaven.javadoc.skip \
+       -Dmaven.checkstyle.skip \
+       -Ddocker.tag=<TAG> \
+       -Pdocker,release              
+```
+
+When the command is finished you could find them by command `docker imaegs`.
+
+* If you want to modify DolphinScheduler source code, build and push Docker images to your registry <HUB_URL>,you can run when finished the modification
+```shell
+cd dolphinscheduler
+./mvnw -B clean deploy \
+       -Dmaven.test.skip \
+       -Dmaven.javadoc.skip \
+       -Dmaven.checkstyle.skip \
+       -Dmaven.deploy.skip \
+       -Ddocker.tag=<TAG> \
+       -Ddocker.hub=<HUB_URL> \
+       -Pdocker,release           
+```
+
+* If you want to modify DolphinScheduler source code, and also want to add customize dependencies of Docker image, you can modify the definition of Dockerfile after modifying the source code. You can run the following command to find all Dockerfile files.
+
+```shell
+cd dolphinscheduler
+find . -iname 'Dockerfile'
+```
+
+Then run the Docker build command above
+
+* You could create custom Docker images base on those images if you want to change image like add some dependencies or upgrade package.
+
+```Dockerfile
+FROM dolphinscheduler-standalone-server
+RUN apt update ; \
+    apt install -y <YOUR-CUSTOM-DEPENDENCE> ; \
+```
+
+> **_Note:_** Docker will build and push linux/amd64,linux/arm64 multi-architecture images by default
+>
+> Have to use version after Docker 19.03, because after 19.03 docker contains buildx
+
+
+## Notice
+
+There are two ways to configure the DolphinScheduler development environment, standalone mode and normal mode
+
+* [Standalone mode](#dolphinscheduler-standalone-quick-start): **Recommended**,more convenient to build development environment, it can cover most scenes.
+* [Normal mode](#dolphinscheduler-normal-mode): Separate server master, worker, api, which can cover more test environments than standalone, and it is more like production environment in real life.
+
+## DolphinScheduler Standalone Quick Start
+
+> **_Note:_** Use standalone server only for development and debugging, because it uses H2 Database as default database and Zookeeper Testing Server which may not be stable in production.
+> 
+> Standalone is only supported in DolphinScheduler 1.3.9 and later versions.
+> 
+> Standalone server is able to connect to external databases like mysql and postgresql, see [Standalone Deployment](https://dolphinscheduler.apache.org/en-us/docs/dev/user_doc/guide/installation/standalone.html) for instructions.
+
+### Git Branch Choose
+
+Use different Git branch to develop different codes
+
+* If you want to develop based on a binary package, switch git branch to specific release branch, for example, if you want to develop base on 1.3.9, you should choose branch `1.3.9-release`.
+* If you want to develop the latest code, choose branch branch `dev`.
+
+### Start backend server
+
+Find the class `org.apache.dolphinscheduler.StandaloneServer` in Intellij IDEA and clikc run main function to startup.
+
+### Start frontend server
+
+Install frontend dependencies and run it.
+> Note: You can see more detail about the frontend setting in [frontend development](./frontend-development.md).
+
+```shell
+cd dolphinscheduler-ui
+pnpm install
+pnpm run dev
+```
+
+The browser access address [http://localhost:3000](http://localhost:3000) can login DolphinScheduler UI. The default username and password are **admin/dolphinscheduler123**
+
+## DolphinScheduler Normal Mode
+
+### Prepare
+
+#### zookeeper
+
+Download [ZooKeeper](https://www.apache.org/dyn/closer.lua/zookeeper/zookeeper-3.6.3), and extract it.
+
+* Create directory `zkData` and `zkLog`
+* Go to the zookeeper installation directory, copy configure file `zoo_sample.cfg` to `conf/zoo.cfg`, and change value of dataDir in conf/zoo.cfg to dataDir=./tmp/zookeeper
+
+    ```shell
+    # We use path /data/zookeeper/data and /data/zookeeper/datalog here as example
+    dataDir=/data/zookeeper/data
+    dataLogDir=/data/zookeeper/datalog
+    ```
+
+* Run `./bin/zkServer.sh` in terminal by command `./bin/zkServer.sh start`.
+
+#### Database
+
+The DolphinScheduler's metadata is stored in relational database. Currently supported MySQL and Postgresql. We use MySQL as an example. Start the database and create a new database named dolphinscheduler as DolphinScheduler metabase
+
+After creating the new database, run the sql file under `dolphinscheduler/dolphinscheduler-dao/src/main/resources/sql/dolphinscheduler_mysql.sql` directly in MySQL to complete the database initialization
+
+#### Start Backend Server
+
+Following steps will guide how to start the DolphinScheduler backend service
+
+##### Backend Start Prepare
+
+* Open project: Use IDE open the project, here we use Intellij IDEA as an example, after opening it will take a while for Intellij IDEA to complete the dependent download
+
+* File change
+  * If you use MySQL as your metadata database, you need to modify `dolphinscheduler/pom.xml` and change the `scope` of the `mysql-connector-java` dependency to `compile`. This step is not necessary to use PostgreSQL
+  * Modify database configuration, modify the database configuration in the `dolphinscheduler-master/src/main/resources/application.yaml`
+  * Modify database configuration, modify the database configuration in the `dolphinscheduler-worker/src/main/resources/application.yaml`
+  * Modify database configuration, modify the database configuration in the `dolphinscheduler-api/src/main/resources/application.yaml`
+
+
+We here use MySQL with database, username, password named dolphinscheduler as an example
+  ```application.yaml
+   spring:
+     datasource:
+       driver-class-name: com.mysql.cj.jdbc.Driver
+       url: jdbc:mysql://127.0.0.1:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8
+       username: dolphinscheduler
+       password: dolphinscheduler
+  ```
+
+* Log level: add a line `<appender-ref ref="STDOUT"/>` to the following configuration to enable the log to be displayed on the command line
+
+  `dolphinscheduler-master/src/main/resources/logback-spring.xml`
+  `dolphinscheduler-worker/src/main/resources/logback-spring.xml`
+  `dolphinscheduler-api/src/main/resources/logback-spring.xml`
+
+  here we add the result after modify as below:
+
+  ```diff
+  <root level="INFO">
+  +  <appender-ref ref="STDOUT"/>
+    <appender-ref ref="APILOGFILE"/>
+    <appender-ref ref="SKYWALKING-LOG"/>
+  </root>
+  ```
+
+> **_Note:_** Only DolphinScheduler 2.0 and later versions need to inatall plugin before start server. It not need before version 2.0.
+
+##### Server start
+
+There are three services that need to be started, including MasterServer, WorkerServer, ApiApplicationServer.
+
+* MasterServer:Execute function `main` in the class `org.apache.dolphinscheduler.server.master.MasterServer` by Intellij IDEA, with the configuration *VM Options* `-Dlogging.config=classpath:logback-spring.xml -Ddruid.mysql.usePingMethod=false -Dspring.profiles.active=mysql`
+* WorkerServer:Execute function `main` in the class `org.apache.dolphinscheduler.server.worker.WorkerServer` by Intellij IDEA, with the configuration *VM Options* `-Dlogging.config=classpath:logback-spring.xml -Ddruid.mysql.usePingMethod=false -Dspring.profiles.active=mysql`
+* ApiApplicationServer:Execute function `main` in the class `org.apache.dolphinscheduler.api.ApiApplicationServer` by Intellij IDEA, with the configuration *VM Options* `-Dlogging.config=classpath:logback-spring.xml -Dspring.profiles.active=api,mysql`. After it started, you could find Open API documentation in http://localhost:12345/dolphinscheduler/doc.html
+
+> The `mysql` in the VM Options `-Dspring.profiles.active=mysql` means specified configuration file
+
+### Start Frontend Server
+
+Install frontend dependencies and run it
+
+```shell
+cd dolphinscheduler-ui
+pnpm install
+pnpm run dev
+```
+
+The browser access address [http://localhost:3000](http://localhost:3000) can login DolphinScheduler UI. The default username and password are **admin/dolphinscheduler123**
diff --git a/docs/2.0.6/docs/en/contribute/frontend-development.md b/docs/2.0.6/docs/en/contribute/frontend-development.md
new file mode 100644
index 000000000..801610c6f
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/frontend-development.md
@@ -0,0 +1,639 @@
+# Front-end development documentation
+
+### Technical selection
+```
+Vue mvvm framework
+
+Es6 ECMAScript 6.0
+
+Ans-ui Analysys-ui
+
+D3  Visual Library Chart Library
+
+Jsplumb connection plugin library
+
+Lodash high performance JavaScript utility library
+```
+
+### Development environment
+
+- #### Node installation
+Node package download (note version v12.20.2) `https://nodejs.org/download/release/v12.20.2/` 
+
+- #### Front-end project construction
+Use the command line mode `cd`  enter the `dolphinscheduler-ui` project directory and execute `npm install` to pull the project dependency package.
+
+> If `npm install` is very slow, you can set the taobao mirror
+
+```
+npm config set registry http://registry.npm.taobao.org/
+```
+
+- Modify `API_BASE` in the file `dolphinscheduler-ui/.env` to interact with the backend:
+
+```
+# back end interface address
+API_BASE = http://127.0.0.1:12345
+```
+
+> #####  ! ! ! Special attention here. If the project reports a "node-sass error" error while pulling the dependency package, execute the following command again after execution.
+
+```bash
+npm install node-sass --unsafe-perm #Install node-sass dependency separately
+```
+
+- #### Development environment operation
+- `npm start` project development environment (after startup address http://localhost:8888)
+
+#### Front-end project release
+
+- `npm run build` project packaging (after packaging, the root directory will create a folder called dist for publishing Nginx online)
+
+Run the `npm run build` command to generate a package file (dist) package
+
+Copy it to the corresponding directory of the server (front-end service static page storage directory)
+
+Visit address` http://localhost:8888`
+
+#### Start with node and daemon under Linux
+
+Install pm2 `npm install -g pm2`
+
+Execute `pm2 start npm -- run dev` to start the project in the project `dolphinscheduler-ui `root directory
+
+#### command
+
+- Start `pm2 start npm -- run dev`
+
+- Stop `pm2 stop npm`
+
+- delete `pm2 delete npm`
+
+- Status  `pm2 list`
+
+```
+
+[root@localhost dolphinscheduler-ui]# pm2 start npm -- run dev
+[PM2] Applying action restartProcessId on app [npm](ids: 0)
+[PM2] [npm](0) ✓
+[PM2] Process successfully started
+┌──────────┬────┬─────────┬──────┬──────┬────────┬─────────┬────────┬─────┬──────────┬──────┬──────────┐
+│ App name │ id │ version │ mode │ pid  │ status │ restart │ uptime │ cpu │ mem      │ user │ watching │
+├──────────┼────┼─────────┼──────┼──────┼────────┼─────────┼────────┼─────┼──────────┼──────┼──────────┤
+│ npm      │ 0  │ N/A     │ fork │ 6168 │ online │ 31      │ 0s     │ 0%  │ 5.6 MB   │ root │ disabled │
+└──────────┴────┴─────────┴──────┴──────┴────────┴─────────┴────────┴─────┴──────────┴──────┴──────────┘
+ Use `pm2 show <id|name>` to get more details about an app
+
+```
+
+### Project directory structure
+
+`build` some webpack configurations for packaging and development environment projects
+
+`node_modules` development environment node dependency package
+
+`src` project required documents
+
+`src => combo` project third-party resource localization `npm run combo` specific view `build/combo.js`
+
+`src => font` Font icon library can be added by visiting https://www.iconfont.cn Note: The font library uses its own secondary development to reintroduce its own library `src/sass/common/_font.scss`
+
+`src => images` public image storage
+
+`src => js` js/vue
+
+`src => lib` internal components of the company (company component library can be deleted after open source)
+
+`src => sass` sass file One page corresponds to a sass file
+
+`src => view` page file One page corresponds to an html file
+
+```
+> Projects are developed using vue single page application (SPA)
+- All page entry files are in the `src/js/conf/${ corresponding page filename => home} index.js` entry file
+- The corresponding sass file is in `src/sass/conf/${corresponding page filename => home}/index.scss`
+- The corresponding html file is in `src/view/${corresponding page filename => home}/index.html`
+```
+
+Public module and utill `src/js/module`
+
+`components` => internal project common components
+
+`download` => download component
+
+`echarts` => chart component
+
+`filter` => filter and vue pipeline
+
+`i18n` => internationalization
+
+`io` => io request encapsulation based on axios
+
+`mixin` => vue mixin public part for disabled operation
+
+`permissions` => permission operation
+
+`util` => tool
+
+### System function module
+
+Home  => `http://localhost:8888/#/home`
+
+Project Management => `http://localhost:8888/#/projects/list`
+```
+| Project Home
+| Workflow
+  - Workflow definition
+  - Workflow instance
+  - Task instance
+```
+
+Resource Management => `http://localhost:8888/#/resource/file`
+```
+| File Management
+| udf Management
+  - Resource Management
+  - Function management
+```
+
+Data Source Management => `http://localhost:8888/#/datasource/list`
+
+Security Center => `http://localhost:8888/#/security/tenant`
+```
+| Tenant Management
+| User Management
+| Alarm Group Management
+  - master
+  - worker
+```
+
+User Center => `http://localhost:8888/#/user/account`
+
+## Routing and state management
+
+The project `src/js/conf/home` is divided into
+
+`pages` => route to page directory
+```
+ The page file corresponding to the routing address
+```
+
+`router` => route management
+```
+vue router, the entry file index.js in each page will be registered. Specific operations: https://router.vuejs.org/zh/
+```
+
+`store` => status management
+```
+The page corresponding to each route has a state management file divided into:
+
+actions => mapActions => Details:https://vuex.vuejs.org/zh/guide/actions.html
+
+getters => mapGetters => Details:https://vuex.vuejs.org/zh/guide/getters.html
+
+index => entrance
+
+mutations => mapMutations => Details:https://vuex.vuejs.org/zh/guide/mutations.html
+
+state => mapState => Details:https://vuex.vuejs.org/zh/guide/state.html
+
+Specific action:https://vuex.vuejs.org/zh/
+```
+
+## specification
+## Vue specification
+##### 1.Component name
+The component is named multiple words and is connected with a wire (-) to avoid conflicts with HTML tags and a clearer structure.
+```
+// positive example
+export default {
+    name: 'page-article-item'
+}
+```
+
+##### 2.Component files
+The internal common component of the `src/js/module/components` project writes the folder name with the same name as the file name. The subcomponents and util tools that are split inside the common component are placed in the internal `_source` folder of the component.
+```
+└── components
+    ├── header
+        ├── header.vue
+        └── _source
+            └── nav.vue
+            └── util.js
+    ├── conditions
+        ├── conditions.vue
+        └── _source
+            └── search.vue
+            └── util.js
+```
+
+##### 3.Prop
+When you define Prop, you should always name it in camel format (camelCase) and use the connection line (-) when assigning values to the parent component.
+This follows the characteristics of each language, because it is case-insensitive in HTML tags, and the use of links is more friendly; in JavaScript, the more natural is the hump name.
+
+```
+// Vue
+props: {
+    articleStatus: Boolean
+}
+// HTML
+<article-item :article-status="true"></article-item>
+```
+
+The definition of Prop should specify its type, defaults, and validation as much as possible.
+
+Example:
+
+```
+props: {
+    attrM: Number,
+    attrA: {
+        type: String,
+        required: true
+    },
+    attrZ: {
+        type: Object,
+        //  The default value of the array/object should be returned by a factory function
+        default: function () {
+            return {
+                msg: 'achieve you and me'
+            }
+        }
+    },
+    attrE: {
+        type: String,
+        validator: function (v) {
+            return !(['success', 'fail'].indexOf(v) === -1) 
+        }
+    }
+}
+```
+
+##### 4.v-for
+When performing v-for traversal, you should always bring a key value to make rendering more efficient when updating the DOM.
+```
+<ul>
+    <li v-for="item in list" :key="item.id">
+        {{ item.title }}
+    </li>
+</ul>
+```
+
+v-for should be avoided on the same element as v-if (`for example: <li>`) because v-for has a higher priority than v-if. To avoid invalid calculations and rendering, you should try to use v-if Put it on top of the container's parent element.
+```
+<ul v-if="showList">
+    <li v-for="item in list" :key="item.id">
+        {{ item.title }}
+    </li>
+</ul>
+```
+
+##### 5.v-if / v-else-if / v-else
+If the elements in the same set of v-if logic control are logically identical, Vue reuses the same part for more efficient element switching, `such as: value`. In order to avoid the unreasonable effect of multiplexing, you should add key to the same element for identification.
+```
+<div v-if="hasData" key="mazey-data">
+    <span>{{ mazeyData }}</span>
+</div>
+<div v-else key="mazey-none">
+    <span>no data</span>
+</div>
+```
+
+##### 6.Instruction abbreviation
+In order to unify the specification, the instruction abbreviation is always used. Using `v-bind`, `v-on` is not bad. Here is only a unified specification.
+```
+<input :value="mazeyUser" @click="verifyUser">
+```
+
+##### 7.Top-level element order of single file components
+Styles are packaged in a file, all the styles defined in a single vue file, the same name in other files will also take effect. All will have a top class name before creating a component.
+Note: The sass plugin has been added to the project, and the sas syntax can be written directly in a single vue file.
+For uniformity and ease of reading, they should be placed in the order of  `<template>`、`<script>`、`<style>`.
+
+```
+<template>
+  <div class="test-model">
+    test
+  </div>
+</template>
+<script>
+  export default {
+    name: "test",
+    data() {
+      return {}
+    },
+    props: {},
+    methods: {},
+    watch: {},
+    beforeCreate() {
+    },
+    created() {
+    },
+    beforeMount() {
+    },
+    mounted() {
+    },
+    beforeUpdate() {
+    },
+    updated() {
+    },
+    beforeDestroy() {
+    },
+    destroyed() {
+    },
+    computed: {},
+    components: {},
+  }
+</script>
+
+<style lang="scss" rel="stylesheet/scss">
+  .test-model {
+
+  }
+</style>
+
+```
+
+## JavaScript specification
+
+##### 1.var / let / const
+It is recommended to no longer use var, but use let / const, prefer const. The use of any variable must be declared in advance, except that the function defined by function can be placed anywhere.
+
+##### 2.quotes
+```
+const foo = 'after division'
+const bar = `${foo},ront-end engineer`
+```
+
+##### 3.function
+Anonymous functions use the arrow function uniformly. When multiple parameters/return values are used, the object's structure assignment is used first.
+```
+function getPersonInfo ({name, sex}) {
+    // ...
+    return {name, gender}
+}
+```
+The function name is uniformly named with a camel name. The beginning of the capital letter is a constructor. The lowercase letters start with ordinary functions, and the new operator should not be used to operate ordinary functions.
+
+##### 4.object
+```
+const foo = {a: 0, b: 1}
+const bar = JSON.parse(JSON.stringify(foo))
+
+const foo = {a: 0, b: 1}
+const bar = {...foo, c: 2}
+
+const foo = {a: 3}
+Object.assign(foo, {b: 4})
+
+const myMap = new Map([])
+for (let [key, value] of myMap.entries()) {
+    // ...
+}
+```
+
+##### 5.module
+Unified management of project modules using import / export.
+```
+// lib.js
+export default {}
+
+// app.js
+import app from './lib'
+```
+
+Import is placed at the top of the file.
+
+If the module has only one output value, use `export default`,otherwise no.
+
+## HTML / CSS
+
+##### 1.Label
+
+Do not write the type attribute when referencing external CSS or JavaScript. The HTML5 default type is the text/css and text/javascript properties, so there is no need to specify them.
+```
+<link rel="stylesheet" href="//www.test.com/css/test.css">
+<script src="//www.test.com/js/test.js"></script>
+```
+
+##### 2.Naming
+The naming of Class and ID should be semantic, and you can see what you are doing by looking at the name; multiple words are connected by a link.
+```
+// positive example
+.test-header{
+    font-size: 20px;
+}
+```
+
+##### 3.Attribute abbreviation
+CSS attributes use abbreviations as much as possible to improve the efficiency and ease of understanding of the code.
+
+```
+// counter example
+border-width: 1px;
+border-style: solid;
+border-color: #ccc;
+
+// positive example
+border: 1px solid #ccc;
+```
+
+##### 4.Document type
+The HTML5 standard should always be used.
+
+```
+<!DOCTYPE html>
+```
+
+##### 5.Notes
+A block comment should be written to a module file.
+```
+/**
+* @module mazey/api
+* @author Mazey <ma...@mazey.net>
+* @description test.
+* */
+```
+
+## interface
+
+##### All interfaces are returned as Promise 
+Note that non-zero is wrong for catching catch
+
+```
+const test = () => {
+  return new Promise((resolve, reject) => {
+    resolve({
+      a:1
+    })
+  })
+}
+
+// transfer
+test.then(res => {
+  console.log(res)
+  // {a:1}
+})
+```
+
+Normal return
+```
+{
+  code:0,
+  data:{}
+  msg:'success'
+}
+```
+
+Error return
+```
+{
+  code:10000, 
+  data:{}
+  msg:'failed'
+}
+```
+If the interface is a post request, the Content-Type defaults to application/x-www-form-urlencoded; if the Content-Type is changed to application/json,
+Interface parameter transfer needs to be changed to the following way
+```
+io.post('url', payload, null, null, { emulateJSON: false } res => {
+  resolve(res)
+}).catch(e => {
+  reject(e)
+})
+```
+
+##### Related interface path
+
+dag related interface `src/js/conf/home/store/dag/actions.js`
+
+Data Source Center Related Interfaces  `src/js/conf/home/store/datasource/actions.js`
+
+Project Management Related Interfaces `src/js/conf/home/store/projects/actions.js`
+
+Resource Center Related Interfaces `src/js/conf/home/store/resource/actions.js`
+
+Security Center Related Interfaces `src/js/conf/home/store/security/actions.js`
+
+User Center Related Interfaces `src/js/conf/home/store/user/actions.js`
+
+## Extended development
+
+##### 1.Add node
+
+(1) First place the icon icon of the node in the `src/js/conf/home/pages/dag `folder, and note the English name of the node defined by the `toolbar_${in the background. For example: SHELL}.png`
+
+(2)  Find the `tasksType` object in `src/js/conf/home/pages/dag/_source/config.js` and add it to it.
+```
+'DEPENDENT': {  //  The background definition node type English name is used as the key value
+  desc: 'DEPENDENT',  // tooltip desc
+  color: '#2FBFD8'  // The color represented is mainly used for tree and gantt
+}
+```
+
+(3)  Add a `${node type (lowercase)}`.vue file in `src/js/conf/home/pages/dag/_source/formModel/tasks`. The contents of the components related to the current node are written here. Must belong to a node component must have a function _verification () After the verification is successful, the relevant data of the current component is thrown to the parent component.
+```
+/**
+ * Verification
+*/
+  _verification () {
+    // datasource subcomponent verification
+    if (!this.$refs.refDs._verifDatasource()) {
+      return false
+    }
+
+    // verification function
+    if (!this.method) {
+      this.$message.warning(`${i18n.$t('Please enter method')}`)
+      return false
+    }
+
+    // localParams subcomponent validation
+    if (!this.$refs.refLocalParams._verifProp()) {
+      return false
+    }
+    // store
+    this.$emit('on-params', {
+      type: this.type,
+      datasource: this.datasource,
+      method: this.method,
+      localParams: this.localParams
+    })
+    return true
+  }
+```
+
+(4) Common components used inside the node component are under` _source`, and `commcon.js` is used to configure public data.
+
+##### 2.Increase the status type
+(1) Find the `tasksState` object in `src/js/conf/home/pages/dag/_source/config.js` and add it to it.
+
+```
+ 'WAITTING_DEPEND': {  // 'WAITTING_DEPEND': {  //Backend defines state type, frontend is used as key value
+  id: 11,  // front-end definition id is used as a sort
+  desc: `${i18n.$t('waiting for dependency')}`,  // tooltip desc
+  color: '#5101be',  // The color represented is mainly used for tree and gantt
+  icoUnicode: '&#xe68c;',  // font icon
+  isSpin: false  // whether to rotate (requires code judgment)
+}
+```
+
+##### 3.Add the action bar tool
+(1)  Find the `toolOper` object in `src/js/conf/home/pages/dag/_source/config.js` and add it to it.
+```
+{
+  code: 'pointer',  // tool identifier
+  icon: '&#xe781;',  // tool icon
+  disable: disable,  // disable
+  desc: `${i18n.$t('Drag node and selected item')}`  // tooltip desc
+}
+```
+
+(2) Tool classes are returned as a constructor  `src/js/conf/home/pages/dag/_source/plugIn`
+
+`downChart.js`  =>  dag image download processing
+
+`dragZoom.js`  =>  mouse zoom effect processing
+
+`jsPlumbHandle.js`  =>  drag and drop line processing
+
+`util.js`  =>   belongs to the `plugIn` tool class
+
+
+The operation is handled in the `src/js/conf/home/pages/dag/_source/dag.js` => `toolbarEvent` event.
+
+
+##### 3.Add a routing page
+
+(1) First add a routing address`src/js/conf/home/router/index.js` in route management
+```
+routing address{
+  path: '/test',  // routing address
+  name: 'test',  // alias
+  component: resolve => require(['../pages/test/index'], resolve),  // route corresponding component entry file
+  meta: {
+    title: `${i18n.$t('test')} - EasyScheduler`  // title display
+  }
+},
+```
+
+(2)Create a `test` folder in `src/js/conf/home/pages` and create an `index.vue `entry file in the folder.
+
+    This will give you direct access to`http://localhost:8888/#/test`
+
+
+##### 4.Increase the preset mailbox
+
+Find the `src/lib/localData/email.js` startup and timed email address input to automatically pull down the match.
+```
+export default ["test@analysys.com.cn","test1@analysys.com.cn","test3@analysys.com.cn"]
+```
+
+##### 5.Authority management and disabled state processing
+
+The permission gives the userType according to the backUser interface `getUserInfo` interface: `"ADMIN_USER/GENERAL_USER" `permission to control whether the page operation button is `disabled`.
+
+specific operation:`src/js/module/permissions/index.js`
+
+disabled processing:`src/js/module/mixin/disabledState.js`
+
diff --git a/docs/2.0.6/docs/en/contribute/have-questions.md b/docs/2.0.6/docs/en/contribute/have-questions.md
new file mode 100644
index 000000000..2d8475998
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/have-questions.md
@@ -0,0 +1,65 @@
+# Have Questions?
+
+## StackOverflow
+
+For usage questions, it is recommended you use the StackOverflow tag [apache-dolphinscheduler](https://stackoverflow.com/questions/tagged/apache-dolphinscheduler) as it is an active forum for DolphinScheduler users’ questions and answers.
+
+Some quick tips when using StackOverflow:
+
+- Prior to asking submitting questions, please:
+  - Search StackOverflow’s [apache-dolphinscheduler](https://stackoverflow.com/questions/tagged/apache-dolphinscheduler) tag to see if your question has already been answered
+- Please follow the StackOverflow [code of conduct](https://stackoverflow.com/help/how-to-ask)
+- Always use the apache-dolphinscheduler tag when asking questions
+- Please do not cross-post between [StackOverflow](https://stackoverflow.com/questions/tagged/apache-dolphinscheduler) and [GitHub issues](https://github.com/apache/dolphinscheduler/issues/new/choose)
+
+Question template:
+
+> **Describe the question**
+>
+> A clear and concise description of what the question is.
+>
+> **Which version of DolphinScheduler:**
+>
+>  -[1.3.0-preview]
+>
+> **Additional context**
+>
+> Add any other context about the problem here.
+>
+> **Requirement or improvement**
+>
+> \- Please describe about your requirements or improvement suggestions.
+
+For broad, opinion based, ask for external resources, debug issues, bugs, contributing to the project, and scenarios, it is recommended you use the[ GitHub issues ](https://github.com/apache/dolphinscheduler/issues/new/choose)or dev@dolphinscheduler.apache.org mailing list.
+
+## Mailing Lists
+
+- [dev@dolphinscheduler.apache.org](https://lists.apache.org/list.html?dev@dolphinscheduler.apache.org) is for people who want to contribute code to DolphinScheduler. [(subscribe)](mailto:dev-subscribe@dolphinscheduler.apache.org?subject=(send%20this%20email%20to%20subscribe)) [(unsubscribe)](mailto:dev-unsubscribe@dolphinscheduler.apache.org?subject=(send%20this%20email%20to%20unsubscribe)) [(archives)](http://lists.apache.org/list.html?dev@dolphinscheduler.apache.org)
+
+Some quick tips when using email:
+
+- Prior to asking submitting questions, please:
+  - Search StackOverflow at [apache-dolphinscheduler](https://stackoverflow.com/questions/tagged/apache-dolphinscheduler) to see if your question has already been answered
+
+- Tagging the subject line of your email will help you get a faster response, e.g. [api-server]: How to get open api interface?
+
+- Tags may help identify a topic by:
+  - Component: MasterServer,ApiServer,WorkerServer,AlertServer, etc
+  - Level: Beginner, Intermediate, Advanced
+  - Scenario: Debug, How-to
+
+- For error logs or long code examples, please use [GitHub gist](https://gist.github.com/) and include only a few lines of the pertinent code / log within the email.
+
+## Chat Rooms
+
+Chat rooms are great for quick questions or discussions on specialized topics. 
+
+The following chat rooms are officially part of Apache DolphinScheduler:
+
+​	The Slack workspace URL: http://asf-dolphinscheduler.slack.com/.
+
+​	You can join through invitation url: https://s.apache.org/dolphinscheduler-slack. 
+
+This chat room is used for questions and discussions related to using DolphinScheduler.
+
+ 
\ No newline at end of file
diff --git a/docs/2.0.6/docs/en/contribute/join/DS-License.md b/docs/2.0.6/docs/en/contribute/join/DS-License.md
new file mode 100644
index 000000000..c3f13d7bf
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/join/DS-License.md
@@ -0,0 +1,42 @@
+# License Notice
+
+As we know that DolphinScheduler is an open-source undergoing project at The Apache Software Foundation (ASF), which means that you have to follow the Apache way to become the DolphinScheduler contributor. Furthermore, Apache has extremely strict rules according to the License. This passage will explain the ASF license and how to avoid License risks at the early stage when you participate in DolphinScheduler.
+
+Note: This article only applies to the Apache projects.
+
+### Licenses Could be Accepted to the Apache Project
+
+You have to pay attention to the following open-source software protocols which Apache projects support when you intend to add a new feature to the DolphinScheduler (or other Apache projects), which functions refers to other open-source software references.
+
+[ASF 3RD PARTY LICENSE POLICY](https://apache.org/legal/resolved.html)
+
+If the 3rd party software is not present at the above policy, we are sorry that your code can not pass the audit and we suggest searching for other substitute plans.
+
+Besides,  when you demand new dependencies in the project, please email us about the reason and the outcome of the influence to dev@dolphinscheduler.apache.org to discuss. Besides, you need at least 3 positive votes from the PPMC to finish the whole step.
+
+### How to Legally Use 3rd Party Open-source Software in the DolphinScheduler
+
+Moreover, when we intend to refer a new software ( not limited to 3rd party jar, text, CSS, js, pics, icons, audios etc and modifications based on 3rd party files) to our project, we need to use them legally in addition to the permission of ASF. Refer to the following article:
+
+* [COMMUNITY-LED DEVELOPMENT "THE APACHE WAY"](https://apache.org/dev/licensing-howto.html)
+
+
+For example, we should contain the NOTICE file (every open-source project has NOTICE file, generally under root directory) of ZooKeeper in our project when we are using ZooKeeper. As the Apache explains, "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work.
+
+We are not going to dive into every 3rd party open-source license policy, you may look up them if interested.
+
+### DolphinScheduler-License Check Rules
+
+In general, we would have our License-check scripts to our project. DolphinScheduler-License is provided by [kezhenxu94](https://github.com/kezhenxu94) which differ a bit from other open-source projects. All in all, we are trying to make sure avoiding the license issues at the first time.
+
+We need to follow the following steps when we need to add new jars or external resources:
+
+* Add the name and the version of the jar file in the known-dependencies.txt
+* Add relevant maven repository address under 'dolphinscheduler-dist/release-docs/LICENSE' directory
+* Append relevant NOTICE files under 'dolphinscheduler-dist/release-docs/NOTICE' directory and make sure they are no different to the original repository
+* Add relevant source code protocols under 'dolphinscheduler-dist/release-docs/license/' directory and the file name should be named as license+filename.txt. Eg: license-zk.txt
+
+### References
+
+* [COMMUNITY-LED DEVELOPMENT "THE APACHE WAY"](https://apache.org/dev/licensing-howto.html)
+* [ASF 3RD PARTY LICENSE POLICY](https://apache.org/legal/resolved.html)
diff --git a/docs/2.0.6/docs/en/contribute/join/become-a-committer.md b/docs/2.0.6/docs/en/contribute/join/become-a-committer.md
new file mode 100644
index 000000000..deac7d863
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/join/become-a-committer.md
@@ -0,0 +1,11 @@
+# How to Become DolphinScheduler Committer
+
+Anyone can be a contributor to an Apache project. Being a contributor simply means that you take an interest in the project and contribute in some way, ranging from asking sensible questions (which documents the project and provides feedback to developers) through to providing new features as patches.
+
+If you become a valuable contributor to the project you may well be invited to become a committer. Committer is a term used at the ASF to signify someone who is committed to a particular project. It brings with it the privilege of write access to the project repository and resources.
+
+In Dolphinscheduler community, if a committer who have earned even more merit, can be invited to be a part of the Project Management Committee (PMC).
+
+One thing that is sometimes hard to understand when you are new to the open development process used at the ASF, is that we value the community more than the code. A strong and healthy community will be respectful and be a fun and rewarding place. More importantly, a diverse and healthy community can continue to support the code over the longer term, even as individual companies come and go from the field.
+
+More details could be found [here](https://community.apache.org/contributors/).
\ No newline at end of file
diff --git a/docs/2.0.6/docs/en/contribute/join/code-conduct.md b/docs/2.0.6/docs/en/contribute/join/code-conduct.md
new file mode 100644
index 000000000..5505e9585
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/join/code-conduct.md
@@ -0,0 +1,68 @@
+# Code of Conduct
+
+The following Code of Conduct is based on full compliance with the [Apache Software Foundation Code of Conduct](https://www.apache.org/foundation/policies/conduct.html).
+
+## Development philosophy
+ - **Consistent** code style, naming, and usage are consistent.  
+ - **Easy to read** code is obvious, easy to read and understand, when debugging one knows the intent of the code.
+ - **Neat** agree with the concepts of《Refactoring》and《Code Cleanliness》and pursue clean and elegant code.
+ - **Abstract** hierarchy is clear and the concepts are refined and reasonable. Keep methods, classes, packages, and modules at the same level of abstraction.
+ - **Heart** Maintain a sense of responsibility and continue to be carved in the spirit of artisans.
+ 
+## Development specifications
+
+ - Executing `mvn -U clean package -Prelease` can compile and test through all test cases. 
+ - The test coverage tool checks for no less than dev branch coverage.
+ - In the root directory, use Checkstyle to check your code for special reasons for violating validation rules. The template location is located at ds_check_style.xml.
+ - Follow the coding specifications.
+
+## Coding specifications
+
+ - Use linux line breaks.
+ - Indentation (including empty lines) is consistent with the last line.
+ - An empty line is required between the class declaration and the following variable or method.
+ - There should be no meaningless empty lines.
+ - Classes, methods, and variables should be named as the name implies and abbreviations should be avoided.
+ - Return value variables are named after `result`; `each` is used in loops to name loop variables; and `entry` is used in map instead of `each`.
+ - The cached exception is called `e`; Catch the exception and do nothing, and the exception is named `ignored`.
+ - Configuration Files are named in camelCase, and file names are lowercase with uppercase initial/starting letter.
+ - Code that requires comment interpretation should be as small as possible and interpreted by method name.
+ - `equals` and `==` In a conditional expression, the constant is left, the variable is on the right, and in the expression greater than less than condition, the variable is left and the constant is right.
+ - In addition to the abstract classes used for inheritance, try to design the class as `final`.
+ - Nested loops are as much a method as possible.
+ - The order in which member variables are defined and the order in which parameters are passed is consistent across classes and methods.
+ - Priority is given to the use of guard statements.
+ - Classes and methods have minimal access control.
+ - The private method used by the method should follow the method, and if there are multiple private methods, the writing private method should appear in the same order as the private method in the original method.
+ - Method entry and return values are not allowed to be `null`.
+ - The return and assignment statements of if else are preferred with the tri-objective operator.
+ - Priority is given to `LinkedList` and only use `ArrayList` if you need to get element values in the collection through the index.
+ - Collection types such as `ArrayList`,`HashMap` that may produce expansion must specify the initial size of the collection to avoid expansion.
+ - Logs and notes are always in English.
+ - Comments can only contain `javadoc`, `todo` and `fixme`.
+ - Exposed classes and methods must have javadoc, other classes and methods and methods that override the parent class do not require javadoc.
+
+## Unit test specifications
+
+ - Test code and production code are subject to the same code specifications.
+ - Unit tests are subject to AIR (Automatic, Independent, Repeatable) Design concept.
+   - Automatic: Unit tests should be fully automated, not interactive. Manual checking of output results is prohibited, `System.out`, `log`, etc. are not allowed, and must be verified with assertions. 
+   - Independent: It is prohibited to call each other between unit test cases and to rely on the order of execution. Each unit test can be run independently.
+   - Repeatable: Unit tests cannot be affected by the external environment and can be repeated. 
+ - Unit tests are subject to BCDE(Border, Correct, Design, Error) Design principles.
+   - Border (Boundary value test): The expected results are obtained by entering the boundaries of loop boundaries, special values, data order, etc.
+   - Correct (Correctness test): The expected results are obtained with the correct input.
+   - Design (Rationality Design): Design high-quality unit tests in combination with production code design.
+   - Error (Fault tolerance test): The expected results are obtained through incorrect input such as illegal data, abnormal flow, etc.
+ - If there is no special reason, the test needs to be fully covered.
+ - Each test case needs to be accurately asserted.
+ - Prepare the environment for code separation from the test code.
+ - Only jUnit `Assert`,hamcrest `CoreMatchers`,Mockito Correlation can use static import.
+ - Single-data assertions should use `assertTrue`,`assertFalse`,`assertNull` and `assertNotNull`.
+ - Multi-data assertions should use `assertThat`.
+ - Accurate assertion, try not to use `not`,`containsString` assertion.
+ - The true value of the test case should be named actualXXX, and the expected value should be named expectedXXX.
+ - Classes and Methods with `@Test` labels do not require javadoc.
+
+ - Public specifications.
+   - Each line is no longer than `200` in length, ensuring that each line is semantically complete for easy understanding.
diff --git a/docs/2.0.6/docs/en/contribute/join/commit-message.md b/docs/2.0.6/docs/en/contribute/join/commit-message.md
new file mode 100644
index 000000000..92269a7f6
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/join/commit-message.md
@@ -0,0 +1,94 @@
+# Commit Message Notice
+
+### Preface
+
+A good commit message can help other developers (or future developers) quickly understand the context of related changes, and can also help project managers determine whether the commit is suitable for inclusion in the release. But when we checked the commit logs of many open source projects, we found an interesting problem. Some developers have very good code quality, but the commit message record is rather confusing. When other contributors or learners are viewing the code, it can’t be [...]
+The purpose of the changes before and after the submission, as Peter Hutterer said:Re-establishing the context of a piece of code is wasteful. We can’t avoid it completely, so our efforts should go to reducing it as much as possible. Commit messages can do exactly that and as a result, a commit message shows whether a developer is a good collaborator. Therefore, DolphinScheduler developed the protocol in conjunction with other communities and official Apache documents.
+
+### Commit Message RIP
+
+#### 1:Clearly modify the content
+
+A commit message should clearly state what issues (bug fixes, function enhancements, etc.) the submission solves, so that other developers can better track the issues and clarify the optimization during the version iteration process.
+
+#### 2:Associate the corresponding Pull Request or Issue
+
+When our changes are large, the commit message should best be associated with the relevant Issue or Pull Request on GitHub, so that our developers can quickly understand the context of the code submission through the associated information when reviewing the code. If the current commit is for an issue, then the issue can be closed in the Footer section.
+
+#### 3:Unified format
+
+The formatted CommitMessage can help provide more historical information for quick browsing, and it can also generate a Change Log directly from commit.
+
+Commit message should include three parts: Header, Body and Footer. Among them, Header is required, Body and Footer can be omitted.
+
+##### Header
+
+The header part has only one line, including three fields: type (required), scope (optional), and subject (required).
+
+[DS-ISSUE number][type] subject
+
+(1) Type is used to indicate the category of commit, and only the following 7 types are allowed.
+
+- feat:New features
+- fix:Bug fixes
+- docs:Documentation
+- style: Format (does not affect changes in code operation)
+- refactor:Refactoring (It is not a new feature or a code change to fix a bug)
+- test:Add test
+- chore:Changes in the build process or auxiliary tools
+
+If the type is feat and fix, the commit will definitely appear in the change log. Other types (docs, chore, style, refactor, test) are not recommended.
+
+(2) Scope
+
+Scope is used to indicate the scope of commit impact, such as server, remote, etc. If there is no suitable scope, you can use \*.
+
+(3) subject
+
+Subject is a short description of the purpose of the commit, no more than 50 characters.
+
+##### Body
+
+The body part is a detailed description of this commit, which can be divided into multiple lines, and the line break will wrap with 72 characters to avoid automatic line wrapping affecting the appearance.
+
+Note the following points in the Body section:
+
+- Use the verb-object structure, note the use of present tense. For example, use change instead of changed or changes
+
+- Don't capitalize the first letter
+
+- The end of the sentence does not need a ‘.’ (period)
+
+##### Footer
+
+Footer only works in two situations
+
+(1) Incompatible changes
+
+If the current code is not compatible with the previous version, the Footer part starts with BREAKING CHANGE, followed by a description of the change, the reason for the change, and the migration method.
+
+(2) Close Issue
+
+If the current commit is for a certain issue, you can close the issue in the Footer section, or close multiple issues at once.
+
+##### For Example
+
+```
+[DS-001][docs-en] add commit message
+
+- commit message RIP
+- build some conventions
+- help the commit messages become clean and tidy
+- help developers and release managers better track issues
+  and clarify the optimization in the version iteration
+
+This closes #001
+```
+
+### Reference documents
+
+[Commit message format](https://cwiki.apache.org/confluence/display/GEODE/Commit+Message+Format)
+
+[On commit messages-Peter Hutterer](http://who-t.blogspot.com/2009/12/on-commit-messages.html)
+
+[RocketMQ Community Operation Conventions](https://mp.weixin.qq.com/s/LKM4IXAY-7dKhTzGu5-oug)
diff --git a/docs/2.0.6/docs/en/contribute/join/contribute.md b/docs/2.0.6/docs/en/contribute/join/contribute.md
new file mode 100644
index 000000000..ea8959604
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/join/contribute.md
@@ -0,0 +1,40 @@
+# Participate in Contributing
+
+First of all, thank you very much for choosing and using DolphinScheduler, and welcome to join the DolphinScheduler family!
+
+We encourage any form of participation in the community that will eventually become Committer or PPMC Such as:
+* Problems will be encountered via github on the [issue](https://github.com/apache/dolphinscheduler/issues) form feedback out.
+* Answer the issue questions that others are asking.
+* Help improve the documentation.
+* Help your project add test cases.
+* Add comments to the code.
+* Submit a PR that fixes the bug or Feature.
+* Publish application case practice, scheduling process analysis, or technical articles related to scheduling.
+* Help promote DolphinScheduler, participate in technical conferences or meetup, sharing and more.
+
+Welcome to the contributing team and join open source starting with submitting your first PR.
+ - For example, add code comments or find "easy to fix" tags or some very simple issue (misspellings, etc.) and so on, first familiarize yourself with the submission process through the first simple PR.
+ 
+Note: Contributions are not limited to PR Only, but contribute to the development of the project.
+
+I'm sure you'll benefit from open source by participating in DolphinScheduler!
+
+### 1. Participate in documentation contributions.
+
+Refer to the [Submit Guide-Document Notice](./document.md)
+
+### 2. Participate in code contributions.
+
+Refer to the [Submit Guide-Issue Notice](./issue.md), [Submit Guide-Pull Request Notice](./pull-request.md), [Submit Guide-Commit Message Notice](./commit-message.md)
+
+### 3. How to pick up an Issue and submit a Pull Request.
+
+If you want to implement a Feature or fix a Bug. Please refer to the following:
+
+* All Bugs and the new Features are recommended and managed using the Issues Page.
+* If you want to develop a Feature, first reply to the Issue associated with that feature, indicating that you are currently working on it. And set yourself a "deadline" when to Submit the Feature, and add it in the reply comment.
+* It's a good idea to find a mentor (or an instructor) in the core contributors who gives immediate feedback on design and functional implementation.
+* You should create a new branch to start your work, to get the name of the branch refer to the [Submit Guide-Pull Request Notice](./pull-request.md). For example, if you want to complete the feature and submit Issue 111, your branch name should be feature-111. The feature name can be determined after discussion with the instructor.
+* When you're done, send a Pull Request to dolphinscheduler, please refer to the《[Submit Guide-Submit Pull Request Process](./submit-code.md)》
+
+If you want to submit a Pull Request to complete a Feature or fix a Bug, it is recommended that you start with the `good first issue`, `easy-to-fix` issues, complete a small function to submit, do not change too many files at a time, changing too many files will also put a lot of pressure on Reviewers, it is recommended to submit them through multiple Pull Requests, not all at once.
\ No newline at end of file
diff --git a/docs/2.0.6/docs/en/contribute/join/document.md b/docs/2.0.6/docs/en/contribute/join/document.md
new file mode 100644
index 000000000..f2fd83140
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/join/document.md
@@ -0,0 +1,62 @@
+# Documentation Notice
+
+Good documentation is critical for any type of software. Any contribution that can improve the DolphinScheduler documentation is welcome.
+
+###  Get the document project
+
+Documentation for the DolphinScheduler project is maintained in a separate [git repository](https://github.com/apache/dolphinscheduler-website).
+
+First you need to fork the document project into your own github repository, and then clone the document to your local computer.
+
+```
+git clone https://github.com/<your-github-user-name>/dolphinscheduler-website
+```
+
+### The document environment
+
+The DolphinScheduler website is supported by [docsite](https://github.com/chengshiwen/docsite-ext)
+
+Make sure that your node version is 10+, docsite does not yet support versions higher than 10.x.
+
+### Document build guide
+
+1. Run `npm install` in the root directory to install the dependencies.
+
+2. Run commands to collect resources 2.1.Run `export PROTOCOL_MODE=ssh` tells Git clone resource via SSH protocol instead of HTTPS protocol. 2.2.Run `./scripts/prepare_docs.sh` prepare all related resources, for more information you could see [how prepare script work](https://github.com/apache/dolphinscheduler-website/blob/master/HOW_PREPARE_WOKR.md).
+
+3. Run `npm run start` in the root directory to start a local server, you will see the website in 'http://localhost:8080'.
+
+4. Run `npm run build` to build source code into dist directory.
+
+5. Verify your change locally: `python -m SimpleHTTPServer 8000`, when your python version is 3 use :`python3 -m http.server 8000` instead.
+
+If the latest version of node is installed locally, consider using `nvm` to allow different versions of `node` to run on your computer.
+
+1. Refer to the [Instructions](http://nvm.sh) to install nvm.
+
+2. Run `nvm install v10.23.1` to install node v10.
+
+3. Run `nvm use v10.23.1` to switch the current working environment to node v10.
+
+Now you can run and build the website in your local environment.
+
+### The document specification
+
+1. ** Spaces are Required ** between Chinese characters and English or numbers and ** Spaces are not required ** between Chinese punctuation marks and English or numbers, to enhance the aesthetics and readability of the Chinese-English mix.
+
+2. It is recommended that you use "you" in general. Of course, you can use the term when necessary, such as when there is a warning prompt.
+
+### How to submit a document Pull Request
+
+1. Do not use "git add." to commit all changes.
+
+2. Simply push the changed files, for example:
+
+ * `*.md`
+ * `blog.js or docs.js or site.js`
+
+3. Submit the Pull Request to the **master** branch.
+
+### Reference to the documentation
+
+[Apache Flink Translation Specifications](https://cwiki.apache.org/confluence/display/FLINK/Flink+Translation+Specifications)
diff --git a/docs/2.0.6/docs/en/contribute/join/issue.md b/docs/2.0.6/docs/en/contribute/join/issue.md
new file mode 100644
index 000000000..376b06598
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/join/issue.md
@@ -0,0 +1,136 @@
+# Issue Notice
+
+## Preface
+Issues function is used to track various Features, Bugs, Functions, etc. The project maintainer can organize the tasks to be completed through issues.
+
+Issue is an important step in drawing out a feature or bug,
+and the contents that can be discussed in an issue are not limited to the features, the causes of the existing bugs, the research on preliminary scheme, and the corresponding implementation design and code design.
+
+And only when the Issue is approved, the corresponding Pull Request should be implemented.
+
+If an issue corresponds to a large feature, it is recommended to divide it into multiple small issues according to the functional modules and other dimensions.
+
+## Specification
+
+### Issue title
+
+Title Format: [`Issue Type`][`Module Name`] `Issue Description`
+
+The `Issue Type` is as follows:
+
+<table>
+    <thead>
+        <tr>
+            <th style="width: 10%; text-align: center;">Issue Type</th>
+            <th style="width: 20%; text-align: center;">Description</th>
+            <th style="width: 20%; text-align: center;">Example</th>
+        </tr>
+    </thead>
+    <tbody>
+        <tr>
+            <td style="text-align: center;">Feature</td>
+            <td style="text-align: center;">Include expected new features and functions</td>
+            <td style="text-align: center;">[Feature][api] Add xxx api in xxx controller</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">Bug</td>
+            <td style="text-align: center;">Bugs in the program</td>
+            <td style="text-align: center;">[Bug][api] Throw exception when xxx</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">Improvement</td>
+            <td style="text-align: center;">Some improvements of the current program, not limited to code format, program performance, etc</td>
+            <td style="text-align: center;">[Improvement][server] Improve xxx between Master and Worker</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">Test</td>
+            <td style="text-align: center;">Specifically for the test case</td>
+            <td style="text-align: center;">[Test][server] Add xxx e2e test</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">Sub-Task</td>
+            <td style="text-align: center;">Those generally are subtasks of feature class. For large features, they can be divided into many small subtasks to complete one by one</td>
+            <td style="text-align: center;">[Sub-Task][server] Implement xxx in xxx</td>
+        </tr>
+    </tbody>
+</table>
+
+The `Module Name` is as follows:
+
+<table>
+    <thead>
+        <tr>
+            <th style="width: 10%; text-align: center;">Module Name</th>
+            <th style="width: 20%; text-align: center;">Description</th>
+        </tr>
+    </thead>
+    <tbody>
+        <tr>
+            <td style="text-align: center;">alert</td>
+            <td style="text-align: center;">Alert module</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">api</td>
+            <td style="text-align: center;">Application program interface layer module</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">service</td>
+            <td style="text-align: center;">Application service layer module</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">dao</td>
+            <td style="text-align: center;">Application data access layer module</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">plugin</td>
+            <td style="text-align: center;">Plugin module</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">remote</td>
+            <td style="text-align: center;">Communication module</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">server</td>
+            <td style="text-align: center;">Server module</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">ui</td>
+            <td style="text-align: center;">Front end module</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">docs-zh</td>
+            <td style="text-align: center;">Chinese document module</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">docs</td>
+            <td style="text-align: center;">English document module</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">...</td>
+            <td style="text-align: center;">-</td>
+        </tr>
+    </tbody>
+</table>
+
+### Issue content template
+
+https://github.com/apache/dolphinscheduler/tree/dev/.github/ISSUE_TEMPLATE
+
+### Contributor
+
+Except for some special cases, it is recommended to discuss under issue or mailing list to determine the design scheme or provide the design scheme,
+as well as the code implementation design before completing the issue.
+
+If there are many different solutions, it is suggested to make a decision through mailing list or voting under issue.
+The issue can be implemented after final scheme and code implementation design being approved.
+The main purpose of this is to avoid wasting time caused by different opinions on implementation design or reconstruction in the pull request review stage.
+
+### Question
+
+- How to deal with the user who raises an issue does not know the module corresponding to the issue.
+
+    It is true that most users when raising issue do not know which module the issue belongs to.
+    In fact, this is very common in many open source communities. In this case, the committer / contributor actually knows the module affected by the issue.
+    If the issue is really valuable after being approved by committer and contributor, then the committer can modify the issue title according to the specific module involved in the issue,
+    or leave a message to the user who raises the issue to modify it into the corresponding title.
+
diff --git a/docs/2.0.6/docs/en/contribute/join/microbench.md b/docs/2.0.6/docs/en/contribute/join/microbench.md
new file mode 100644
index 000000000..5cf148cb7
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/join/microbench.md
@@ -0,0 +1,100 @@
+# Micro BenchMark Notice
+
+All optimization must be based on data verification, and blind optimization is rejected. Based on this, we provide the MicroBench module.
+
+The MicroBench module is based on the OpenJDK JMH component (HotSpot's recommended benchmark test program). When you start benchmarking, you don't need additional dependencies.
+
+JMH, the Java MicroBenchmark Harness, is a tool suite dedicated to code microbenchmark testing. What is Micro Benchmark? Simply put, it is based on method-level benchmark testing, with an accuracy of microseconds. When you locate a hot method and want to further optimize the performance of the method, you can use JMH to quantitatively analyze the optimized results.
+
+### Several points to note in Java benchmark testing:
+
+- Prevent useless code from entering the test method.
+
+- Concurrent testing.
+
+- The test results are presented.
+
+### Typical application scenarios of JMH are:
+
+- 1: Quantitatively analyze the optimization effect of a hotspot function
+
+- 2: Want to quantitatively know how long a function needs to be executed, and the correlation between execution time and input variables
+
+- 3: Compare multiple implementations of a function
+
+DolphinScheduler-MicroBench provides AbstractBaseBenchmark, you can inherit from it, write your benchmark code, AbstractMicroBenchmark can guarantee to run in JUnit mode.
+
+### Customized operating parameters
+
+The default AbstractMicrobenchmark configuration is
+
+Warmup times 10 (warmupIterations)
+
+Number of tests 10 (measureIterations)
+
+Fork quantity 2 (forkCount)
+
+You can specify these parameters at startup,-DmeasureIterations, -DperfReportDir (output benchmark test result file directory), -DwarmupIterations, -DforkCount
+
+### DolphinScheduler-MicroBench Introduction
+
+It is generally not recommended to use fewer cycles when running tests. However, a smaller number of tests helps to verify the work during the benchmark test. After the verification is over, run a large number of benchmark tests.
+
+```java
+@Warmup(iterations = 2, time = 1)
+@Measurement(iterations = 4, time = 1)
+@State(Scope.Benchmark)
+public class EnumBenchMark extends AbstractBaseBenchmark {
+
+}
+```
+
+This can run benchmarks at the method level or the class level. Command line parameters will override the parameters on the annotation.
+
+```java
+@Benchmark // Method annotation, indicating that the method is an object that needs to be benchmarked.
+@BenchmarkMode(Mode.AverageTime) // Optional benchmark test mode is obtained through enumeration
+@OutputTimeUnit(TimeUnit.MICROSECONDS) // Output time unit
+public void enumStaticMapTest() {
+    TestTypeEnum.newGetNameByType(testNum);
+}
+```
+
+When your benchmark test is written, you can run it to view the specific test conditions: (The actual results depend on your system configuration)
+
+First, it will warm up our code,
+
+```java
+# Warmup Iteration   1: 0.007 us/op
+# Warmup Iteration   2: 0.008 us/op
+Iteration   1: 0.004 us/op
+Iteration   2: 0.004 us/op
+Iteration   3: 0.004 us/op
+Iteration   4: 0.004 us/op
+```
+
+After warmup, we usually get the following results
+
+```java
+Benchmark                        (testNum)   Mode  Cnt          Score           Error  Units
+EnumBenchMark.simpleTest               101  thrpt    8  428750972.826 ±  66511362.350  ops/s
+EnumBenchMark.simpleTest               108  thrpt    8  299615240.337 ± 290089561.671  ops/s
+EnumBenchMark.simpleTest               103  thrpt    8  288423221.721 ± 130542990.747  ops/s
+EnumBenchMark.simpleTest               104  thrpt    8  236811792.152 ± 155355935.479  ops/s
+EnumBenchMark.simpleTest               105  thrpt    8  472247775.246 ±  45769877.951  ops/s
+EnumBenchMark.simpleTest               103  thrpt    8  455473025.252 ±  61212956.944  ops/s
+EnumBenchMark.enumStaticMapTest        101   avgt    8          0.006 ±         0.003  us/op
+EnumBenchMark.enumStaticMapTest        108   avgt    8          0.005 ±         0.002  us/op
+EnumBenchMark.enumStaticMapTest        103   avgt    8          0.006 ±         0.005  us/op
+EnumBenchMark.enumStaticMapTest        104   avgt    8          0.006 ±         0.004  us/op
+EnumBenchMark.enumStaticMapTest        105   avgt    8          0.004 ±         0.001  us/op
+EnumBenchMark.enumStaticMapTest        103   avgt    8          0.004 ±         0.001  us/op
+EnumBenchMark.enumValuesTest           101   avgt    8          0.011 ±         0.004  us/op
+EnumBenchMark.enumValuesTest           108   avgt    8          0.025 ±         0.016  us/op
+EnumBenchMark.enumValuesTest           103   avgt    8          0.019 ±         0.010  us/op
+EnumBenchMark.enumValuesTest           104   avgt    8          0.018 ±         0.018  us/op
+EnumBenchMark.enumValuesTest           105   avgt    8          0.014 ±         0.012  us/op
+EnumBenchMark.enumValuesTest           103   avgt    8          0.012 ±         0.009  us/op
+```
+
+OpenJDK officially gave a lot of sample codes, interested students can query and learn JMH by themselves:[OpenJDK-JMH-Example](http://hg.openjdk.java.net/code-tools/jmh/file/tip/jmh-samples/src/main/java/org/openjdk/jmh/samples/)
diff --git a/docs/2.0.6/docs/en/contribute/join/pull-request.md b/docs/2.0.6/docs/en/contribute/join/pull-request.md
new file mode 100644
index 000000000..fece5d74b
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/join/pull-request.md
@@ -0,0 +1,94 @@
+# Pull Request Notice
+
+## Preface
+Pull Request is a way of software cooperation, which is a process of bringing code involving different functions into the trunk. During this process, the code can be discussed, reviewed, and modified.
+
+In Pull Request, we try not to discuss the implementation of the code. The general implementation of the code and its logic should be determined in Issue. In the Pull Request, we only focus on the code format and code specification, so as to avoid wasting time caused by different opinions on implementation.
+
+## Specification
+
+### Pull Request Title
+
+Title Format: [`Pull Request Type`-`Issue No`][`Module Name`] `Pull Request Description`
+
+The corresponding relationship between `Pull Request Type` and `Issue Type` is as follows:
+
+<table>
+    <thead>
+        <tr>
+            <th style="width: 10%; text-align: center;">Issue Type</th>
+            <th style="width: 20%; text-align: center;">Pull Request Type</th>
+            <th style="width: 20%; text-align: center;">Example(Suppose Issue No is 3333)</th>
+        </tr>
+    </thead>
+    <tbody>
+        <tr>
+            <td style="text-align: center;">Feature</td>
+            <td style="text-align: center;">Feature</td>
+            <td style="text-align: center;">[Feature-3333][server] Implement xxx</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">Bug</td>
+            <td style="text-align: center;">Fix</td>
+            <td style="text-align: center;">[Fix-3333][server] Fix xxx</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">Improvement</td>
+            <td style="text-align: center;">Improvement</td>
+            <td style="text-align: center;">[Improvement-3333][alert] Improve the performance of xxx</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">Test</td>
+            <td style="text-align: center;">Test</td>
+            <td style="text-align: center;">[Test-3333][api] Add the e2e test of xxx</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">Sub-Task</td>
+            <td style="text-align: center;">(Parent type corresponding to Sub-Task)</td>
+            <td style="text-align: center;">[Feature-3333][server] Implement xxx</td>
+        </tr>
+    </tbody>
+</table>
+
+`Issue No` refers to the Issue number corresponding to the current Pull Request to be resolved, `Module Name` is the same as the `Module Name` of Issue.
+
+### Pull Request Branch
+
+Branch name format: `Pull Request type`-`Issue number`. e.g. Feature-3333
+
+### Pull Request Content
+
+Please refer to the commit message section.
+
+### Pull Request Code Style
+
+Code style is the thing you have to consider when you submit pull request for DolphinScheduler. We using [Checkstyle](https://checkstyle.sourceforge.io), a development tool to help programmers write Java code that adheres to a coding standard, in CI to keep DolphinScheduler codebase in the same style. Your pull request could not be merged if your code style checker failed. You could format your code by *Checkstyle* in your local environment before you submit your pull request to check co [...]
+
+1. Prepare Checkstyle configuration file: You could download it manually by [click here](https://github.com/apache/dolphinscheduler/blob/3.0.0/style/checkstyle.xml), but find it in DolphinScheduler repository would be a better way. You could find configuration file in the path `style/checkstyle.xml` after you clone repository from Github.
+
+2. Download Checkstyle plugins in Intellij IDEA: Search plugin by keyword **CheckStyle-IDEA** or install in [this page](https://plugins.jetbrains.com/plugin/1065-checkstyle-idea). You could see [install plugin](https://www.jetbrains.com/help/idea/managing-plugins.html#install_plugin_from_repo) if you do not know how to install plugin in Intellij IDEA
+
+3. Configure and activate Checkstyle and Intellij IDEA code-style: After completing the above steps, you could configure and activate it in your environment. You could find Checkstyle plugins in the path `Preferences -> Tool -> Checkstyle`. After that you could activate Checkstyles as screenshot show
+
+<p align="center">
+    <img src="../../../../img/contribute/join/pull-request/checkstyle-idea.png" alt="checkstyle idea configuration" />
+</p>
+
+For now your Checkstyle plugins are setup, it would show codes and files which out of style. We highly recommend you configure Intellij IDEA code-style for auto-formatting your code in Intellij IDEA, you could find this setting in `Preferences -> Editor -> Code Style -> Java` and then activate it as screenshot show
+
+<p align="center">
+    <img src="../../../../img/contribute/join/pull-request/code-style-idea.png" alt="code style idea configuration" />
+</p>
+
+1. Format your codebase in Intellij IDEA before submit your pull request: After you done above steps, you could using Intellij IDEA shortcut `Command + L`(for Mac) or `Ctrl+L`(for Windows) to format your code. The best time to format your code is before you commit your change to your local git repository.
+
+### Question
+
+- How to deal with one Pull Request to many Issues scenario.
+
+  First of all, there are fewer scenarios for one Pull Request to many Issues.
+  The root cause is that multiple issues need to do the same thing.
+  Usually, there are two solutions to this scenario: the first is to merge multiple issues with into the same issue, and then close the other issues;
+  the second is multiple issues have subtle differences.
+  In this scenario, the responsibilities of each issue can be clearly divided. The type of each issue is marked as Sub-Task, and then these sub task type issues are associated with one issue.
+  And each Pull Request is submitted should be associated with only one issue of a sub task.
\ No newline at end of file
diff --git a/docs/2.0.6/docs/en/contribute/join/review.md b/docs/2.0.6/docs/en/contribute/join/review.md
new file mode 100644
index 000000000..e4dc79941
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/join/review.md
@@ -0,0 +1,153 @@
+# Community Review
+
+Beside submit Issues and pull requests to the GitHub repository mentioned in [team](/en-us/community/community.html), another important way to
+contribute to DolphinScheduler is reviewing GitHub Issues or Pull Requests. You can not only know the latest new and
+direction of the community, but also understand the good design in others during you reviewing. At the same time, you can
+increase your exposure in the community and accumulate your honor.
+
+Anyone is encouraged to review Issues and Pull Requests. We also raise a Help Wanted email discussion to solicit contributors
+from the community to review them. You could see detail in [mail][mail-review-wanted], we put the results of mail thread
+in [GitHub Discussion][discussion-result-review-wanted].
+
+> Note: It is only users mentioned in the [GitHub Discussion][discussion-result-review-wanted] can review Issues or Pull
+> Requests, Community advocates **Anyone is encouraged to review Issues and Pull Requests**. Users in 
+> [GitHub Discussion][discussion-result-review-wanted] show their willing to review when we collect in the mail thread.
+> The advantage of this list is when the community has discussion, in addition to the mention Members in [team](/en-us/community/community.html),
+> you can also find some help in [GitHub Discussion][discussion-result-review-wanted] people. If you want to join the
+> [GitHub Discussion][discussion-result-review-wanted], please comment in that discussion and leave a module you are interested
+> in, and the maintainer will add you to the list.
+
+## How Reviewing
+
+DolphinScheduler receives community contributions through GitHub, and all its Issues and Pull Requests are hosted in GitHub.
+If you want to join community by reviewing, please go to section [review Issues](#issues), if you prefer Pull Requests please
+go to section [review Pull Requests](#pull-requests).
+
+### Issues
+
+Review Issues means discuss [Issues][all-issues] in GitHub and give suggestions on it. Include but are not limited to the following situations
+
+| Situation | Reason | Label | Action |
+| ------ | ------ | ------ | ------ |
+| wont fix | Has been fixed in dev branch | [wontfix][label-wontfix] | Close Issue, inform creator the fixed version if it already release |
+| duplicate issue | Had the same problem before | [duplicate][label-duplicate] | Close issue, inform creator the link of same issue |
+| Description not clearly | Without detail reproduce step | [need more information][label-need-more-information] | Inform creator add more description |
+
+In addition give suggestion, add label for issue is also important during review. The labeled issues can be retrieved
+better, which convenient for further processing. An issue can with more than one label. Common issue categories are:
+
+| Label | Meaning |
+| ------ | ------ |
+| [UI][label-UI] | UI and front-end related |
+| [security][label-security] | Security Issue |
+| [user experience][label-user-experience] | User experience Issue |
+| [development][label-development] | Development Issue |
+| [Python][label-Python] | Python Issue |
+| [plug-in][label-plug-in] | Plug-in Issue |
+| [document][label-document] | Document Issue |
+| [docker][label-docker] | Docker Issue |
+| [need verify][label-need-verify] | Need verify Issue |
+| [e2e][label-e2e] | E2E Issue |
+| [win-os][label-win-os] | windows operating system Issue |
+| [suggestion][label-suggestion] | Give suggestion to us |
+ 
+Beside classification, label could also set the priority of Issues. The higher the priority, the more attention pay
+in the community, the easier it is to be fixed or implemented. The priority label are as follows
+
+| Label | priority |
+| ------ | ------ |
+| [priority:high][label-priority-high] | High priority |
+| [priority:middle][label-priority-middle] | Middle priority |
+| [priority:low][label-priority-low] | Low priority |
+
+All the labels above in common label. For all labels in this project you could see in [full label list][label-all-list]
+
+Before reading following content, please make sure you have labeled the Issue.
+  
+* Remove label [Waiting for reply][label-waiting-for-reply] after replying: Label [Waiting for reply][label-waiting-for-reply]
+  added when [creating an Issue][issue-choose]. It makes positioning un reply issue more convenient, and you should remove
+  this label after you reviewed it. If you do not remove it, will cause others to waste time looking on the same issue.
+* Mark [Waiting for review][label-waiting-for-review] when not sure whether issue is resolved or not: There are two situations
+  when you review issue. One is the problem has been located or resolved, maybe have to [Create PR](./submit-code.md)
+  when necessary. Secondly, you are not sure about this issue, you can labeled [Waiting for review][label-waiting-for-review]
+  and mention others to make a second confirmation.
+
+When an Issue need to create Pull Requests, you could also labeled it from below.
+
+| Label | Mean |
+| ------ | ------ |
+| [Chore][label-Chore] | Chore for project |
+| [Good first issue][label-good-first-issue] | Good first issue for new contributor |
+| [easy to fix][label-easy-to-fix] | Easy to fix, harder than `Good first issue` |
+| [help wanted][label-help-wanted] | Help wanted |
+
+> Note: Only members have permission to add or delete label. When you need to add or remove lebals but are not member,
+> you can `@`  members to do that. But as long as you have a GitHub account, you can comment on issues and give suggestions.
+> We encourage everyone in the community to comment and answer issues
+
+### Pull Requests
+
+<!-- markdown-link-check-disable -->
+Review Pull mean discussing in [Pull Requests][all-PRs] in GitHub and giving suggestions to it. DolphinScheduler's 
+Pull Requests reviewing are the same as [GitHub's reviewing changes in pull requests][gh-review-pr]. You can give your
+suggestions in Pull Requests
+
+* When you think the Pull Request is OK to be merged, you can agree to the Pull Request according to the "Approve" process
+  in [GitHub's reviewing changes in pull requests][gh-review-pr].
+* When you think Pull Request needs to be changed, you can comment it according to the "Comment" process in 
+  [GitHub's reviewing changes in pull requests][gh-review-pr]. And when you think issues that must be fixed before they
+  merged, please follow "Request changes" in [GitHub's reviewing changes in pull requests][gh-review-pr] to ask contributors
+  modify it.
+<!-- markdown-link-check-enable -->
+
+Labeled Pull Requests is an important part. Reasonable classification can save a lot of time for reviewers. The good news
+is that the label's name and usage of Pull Requests are the same in [Issues](#issues), which can reduce the memory. For
+example, if there is a Pull Request is related to docker and block deployment. We can label it with [docker][label-docker]
+and [priority:high][label-priority-high].
+
+Pull Requests have some unique labels of it own
+
+| Label | Mean |
+| ------ | ------ |
+| [miss document][label-miss-document] | Pull Requests miss document, and should be add |
+| [first time contributor][label-first-time-contributor] | Pull Requests submit by first time contributor |
+| [don't merge][label-do-not-merge] | Pull Requests have some problem and should not be merged |
+
+> Note: Only members have permission to add or delete label. When you need to add or remove lebals but are not member,
+> you can `@`  members to do that. But as long as you have a GitHub account, you can comment on Pull Requests and give suggestions.
+> We encourage everyone in the community to review Pull Requests
+
+[mail-review-wanted]: https://lists.apache.org/thread/9flwlzrp69xjn6v8tdkbytq8glqp2k51
+[discussion-result-review-wanted]: https://github.com/apache/dolphinscheduler/discussions/7545
+[label-wontfix]: https://github.com/apache/dolphinscheduler/labels/wontfix
+[label-duplicate]: https://github.com/apache/dolphinscheduler/labels/duplicate
+[label-need-more-information]: https://github.com/apache/dolphinscheduler/labels/need%20more%20information
+[label-win-os]: https://github.com/apache/dolphinscheduler/labels/win-os
+[label-waiting-for-reply]: https://github.com/apache/dolphinscheduler/labels/Waiting%20for%20reply
+[label-waiting-for-review]: https://github.com/apache/dolphinscheduler/labels/Waiting%20for%20review
+[label-user-experience]: https://github.com/apache/dolphinscheduler/labels/user%20experience
+[label-development]: https://github.com/apache/dolphinscheduler/labels/development
+[label-UI]: https://github.com/apache/dolphinscheduler/labels/UI
+[label-suggestion]: https://github.com/apache/dolphinscheduler/labels/suggestion
+[label-security]: https://github.com/apache/dolphinscheduler/labels/security
+[label-Python]: https://github.com/apache/dolphinscheduler/labels/Python
+[label-plug-in]: https://github.com/apache/dolphinscheduler/labels/plug-in
+[label-document]: https://github.com/apache/dolphinscheduler/labels/document
+[label-docker]: https://github.com/apache/dolphinscheduler/labels/docker
+[label-all-list]: https://github.com/apache/dolphinscheduler/labels
+[label-Chore]: https://github.com/apache/dolphinscheduler/labels/Chore
+[label-good-first-issue]: https://github.com/apache/dolphinscheduler/labels/good%20first%20issue
+[label-help-wanted]: https://github.com/apache/dolphinscheduler/labels/help%20wanted
+[label-easy-to-fix]: https://github.com/apache/dolphinscheduler/labels/easy%20to%20fix
+[label-priority-high]: https://github.com/apache/dolphinscheduler/labels/priority%3Ahigh
+[label-priority-middle]: https://github.com/apache/dolphinscheduler/labels/priority%3Amiddle
+[label-priority-low]: https://github.com/apache/dolphinscheduler/labels/priority%3Alow
+[label-miss-document]: https://github.com/apache/dolphinscheduler/labels/miss%20document
+[label-first-time-contributor]: https://github.com/apache/dolphinscheduler/labels/first%20time%20contributor
+[label-do-not-merge]: https://github.com/apache/dolphinscheduler/labels/don%27t%20merge
+[label-e2e]: https://github.com/apache/dolphinscheduler/labels/e2e
+[label-need-verify]: https://github.com/apache/dolphinscheduler/labels/need%20to%20verify
+[issue-choose]: https://github.com/apache/dolphinscheduler/issues/new/choose
+[all-issues]: https://github.com/apache/dolphinscheduler/issues
+[all-PRs]: https://github.com/apache/dolphinscheduler/pulls
+[gh-review-pr]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/reviewing-changes-in-pull-requests/about-pull-request-reviews
diff --git a/docs/2.0.6/docs/en/contribute/join/security.md b/docs/2.0.6/docs/en/contribute/join/security.md
new file mode 100644
index 000000000..28bcda144
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/join/security.md
@@ -0,0 +1,8 @@
+# Security
+
+The Apache Software Foundation takes a rigorous stance on eliminating security issues in its software projects. Apache DolphinScheduler is also very concerned Security issues related to its features and functionality.
+
+If you have apprehensions regarding DolphinScheduler’s security or you discover vulnerability or potential threat, don’t hesitate to get in touch with the Apache Security Team by dropping a mail at [security@apache.org](mailto:security@apache.org). Please specify the project name as DolphinScheduler in the email and provide a description of the relevant problem or potential threat. You are also urged to recommend the way to reproduce and replicate the issue. The apache security team and  [...]
+
+Please pay attention to report the security issue on the security email before disclosing it on public domain.
+
diff --git a/docs/2.0.6/docs/en/contribute/join/submit-code.md b/docs/2.0.6/docs/en/contribute/join/submit-code.md
new file mode 100644
index 000000000..ac8795032
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/join/submit-code.md
@@ -0,0 +1,63 @@
+# Submit Code
+
+* First from the remote repository *https://github.com/apache/dolphinscheduler.git* fork a copy of the code into your own repository
+
+* There are currently three branches in the remote repository:
+    * master           normal delivery branch
+        After the stable release, merge the code from the stable branch into the master.
+    
+    * dev              daily development branch
+        Every day dev development branch, newly submitted code can pull request to this branch.
+
+
+* Clone your repository to your local
+    `git clone https://github.com/apache/dolphinscheduler.git`
+
+* Add remote repository address, named upstream
+    `git remote add upstream https://github.com/apache/dolphinscheduler.git`
+
+* View repository
+    `git remote -v`
+
+>At this time, there will be two repositories: origin (your own repository) and upstream (remote repository)
+
+* Get/Update remote repository code
+    `git fetch upstream`
+
+* Synchronize remote repository code to local repository
+
+```
+git checkout origin/dev
+git merge --no-ff upstream/dev
+```
+
+If remote branch has a new branch such as `dev-1.0`, you need to synchronize this branch to the local repository
+      
+```
+git checkout -b dev-1.0 upstream/dev-1.0
+git push --set-upstream origin dev-1.0
+```
+
+* Create new branch
+```
+git checkout -b xxx origin/dev
+```
+
+Make sure that the branch `xxx` is building successfully on the latest code of the official dev branch
+* After modifying the code locally in the new branch, submit it to your own repository:
+  
+`git commit -m 'commit content'`
+    
+`git push origin xxx --set-upstream`
+
+* Submit changes to the remote repository
+
+* On the github page, click "New pull request".
+
+* Select the modified local branch and the branch you want to merge with the past, click "Create pull request".
+
+* Then the community Committers will do CodeReview, and then he will discuss some details (including design, implementation, performance, etc.) with you. When everyone on the team is satisfied with this modification, the commit will be merged into the dev branch
+
+* Finally, congratulations, you have become an official contributor to dolphinscheduler!
+
+
diff --git a/docs/2.0.6/docs/en/contribute/join/subscribe.md b/docs/2.0.6/docs/en/contribute/join/subscribe.md
new file mode 100644
index 000000000..f6e8a7493
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/join/subscribe.md
@@ -0,0 +1,23 @@
+# Subscribe Mailing Lists
+
+It is highly recommended to subscribe to the development mailing list to keep up-to-date with the community.
+
+In the process of using DolphinScheduler, if you have any questions or ideas, suggestions, you can participate in the DolphinScheduler community building through the Apache mailing list. Sending a subscription email is also very simple, the steps are as follows:
+
+1. Send an email to dev-subscribe@dolphinscheduler.apache.org with your own email address, subject and content are arbitrary.
+
+2. Receive confirmation email and reply. After completing step 1, you will receive a confirmation email from dev-help@dolphinscheduler.apache.org (if not received, please confirm whether the email is automatically classified as spam, promotion email, subscription email, etc.) . Then reply directly to the email, or click on the link in the email to reply quickly, the subject and content are arbitrary.
+
+3. Receive a welcome email. After completing the above steps, you will receive a welcome email with the subject WELCOME to dev@dolphinscheduler.apache.org, and you have successfully subscribed to the Apache DolphinScheduler mailing list.
+
+# Unsubscribe Mailing Lists
+
+If you do not need to know what's going on with DolphinScheduler, you can unsubscribe from the mailing list.
+
+Unsubscribe from the mailing list steps are as follows:
+
+1. Send an email to dev-unsubscribe@dolphinscheduler.apache.org with your subscribed email address, subject and content are arbitrary.
+
+2. Receive confirmation email and reply. After completing step 1, you will receive a confirmation email from dev-help@dolphinscheduler.apache.org (if not received, please confirm whether the email is automatically classified as spam, promotion email, subscription email, etc.) . Then reply directly to the email, or click on the link in the email to reply quickly, the subject and content are arbitrary.
+
+3. Receive a goodbye email. After completing the above steps, you will receive a goodbye email with the subject GOODBYE from dev@dolphinscheduler.apache.org, and you have successfully unsubscribed to the Apache DolphinScheduler mailing list, and you will not receive emails from dev@dolphinscheduler.apache.org.
diff --git a/docs/2.0.6/docs/en/contribute/join/unit-test.md b/docs/2.0.6/docs/en/contribute/join/unit-test.md
new file mode 100644
index 000000000..796cf59e8
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/join/unit-test.md
@@ -0,0 +1,118 @@
+## Unit Test Coverage
+
+### 1. The Benefits of Writing Unit Tests
+
+-    Unit tests help everyone to get into the details of the code and understand how it works.
+-    Through test cases we can find bugs and submit robust code.
+-    The test case is also a demo usage of the code.
+
+### 2. Some design principles for unit test cases
+
+-    The steps, granularity and combination of conditions should be carefully designed.
+-    Pay attention to boundary conditions.
+-    Unit tests should be well designed as well as avoiding useless code.
+-    When you find a `method` is difficult to write unit test, and if you confirm that the `method` is `bad code`, then refactor it with the developer.
+<!-- markdown-link-check-disable -->
+-    DolphinScheduler: [mockito](http://site.mockito.org/). Here are some development guides: [mockito tutorial](http://www.baeldung.com/bdd-mockito), [mockito refcard](https://dzone.com/refcardz/mockito)
+<!-- markdown-link-check-enable -->
+-    TDD(option): When you start writing a new feature, you can try writing test cases first.
+
+### 3. Test coverage setpoint
+
+-    At this stage, the default value for test coverage of Delta change codes is >= 60%, the higher the better.
+-    We can see the test reports on this page:  https://codecov.io/gh/apache/dolphinscheduler
+
+## Fundamental guidelines for unit test
+
+### 1. Isolation and singleness
+
+A test case should be accurate to the method level, and it should be possible to execute the test case alone. At the same time the focus is always on the method (only the method is tested).
+
+If the method is too complex, it should be split up again during the development phase. For test cases, it is best that a case focuses on only one branch (judgment). When changes are applied to it, they only affect the success of a test case. This will greatly facilitate our verification of issues and problem solving during the development phase. At the same time, however, it also poses a great challenge in terms of coverage.
+
+### 2. Automaticity
+
+Unit tests can be automated. Mandatory: all unit tests must be written under src/test. Also the method naming should conform to the specification. Benchmark tests are excluded.
+
+### 3. reproducibility
+
+Multiple executions (any environment, any time) result in unique and repeatable results.
+
+### 4. Lightweight
+
+That is, any environment can be implemented quickly.
+
+This requires that we don't rely on too many components, such as various spring beans and the like. These are all mock in unit tests, nd adding them would increase the speed of our single-test execution, as well as potentially passing on contamination.
+
+For some databases, other external components, etc. As far as possible, the mock client is not dependent on the external environment (the presence of any external dependencies greatly limits the portability and stability of test cases and the correctness of results), which also makes it easy for developers to test in any environment.
+
+### 5. Measurable
+
+Over the years, mockito has grown to be the NO.1 mock, but it still doesn't support mock static methods, constructors, etc. Even the website keeps saying: "Don't mock everything". So use static methods as little as possible.
+
+It is generally recommended to provide static methods only in some utility classes, in which case you don't need mocks and just use real classes. If the dependent class is not a utility class, static methods can be refactored into instance methods. This is more in line with the object-oriented design concept.
+
+### 6. Completeness
+
+Test coverage, this is a very difficult problem. For the core process, we hope to achieve 90% coverage, non-core process requirements more than 60%.
+
+High enough coverage will reduce the probability of bugs and also reduce the cost of our regression tests. This is a long process, and whenever developers add or modify code, test cases need to be refined at the same time. We hope developers and relevant code reviewer will pay enough attention to this point.
+
+### 7. Refusion invalid assertion
+
+Invalid assertions make the test itself meaningless, it has little to do with whether your code is correct or not. And there is a risk of creating an illusion of success that may last until your code is deploying to production.
+
+There are several types of invalid assertions:
+
+1.   Different types of comparisons.
+
+2.   Determines that an object or variable with a default value is not null.
+
+     This seems meaningless. Therefore, when making the relevant judgements you should pay attention to whether it contains a default value itself.
+
+3.   Assertions should be affirmative rather than negative if possible. Assertions should be within a range of predicted results, or exact values, whenever possible (otherwise you may end up with something that doesn't match your actual expectations but passes the assertion) unless your code only cares about whether it is empty or not.
+
+### 8. Some points to note for unit tests
+
+1: Thread.sleep()
+
+Try not to use Thread.sleep in your test code, it makes the test unstable and may fail unexpectedly due to the environment or load. The following approach is recommended.
+
+`Awaitility.await().atMost(...)`
+
+2: Ignore some test classes
+
+The @Ignore annotation should be linked to the relevant issue address so that subsequent developers can track the history of why the test was ignored.
+
+For example @Ignore("see #1").
+
+3: try-catch Unit test exception
+
+The test will fail when the code in the unit test throws an exception. Therefore, there is no need to use try-catch to catch exceptions.
+
+     ```java
+     @Test
+     public void testMethod() {
+       try {
+                 // Some code
+       } catch (MyException e) {
+         Assert.fail(e.getMessage());  // Noncompliant
+       }
+     }
+     ```
+You should this: 
+
+```java
+@Test
+public void testMethod() throws MyException {
+    // Some code
+}
+```
+
+4: Test exceptions
+
+When you need to test for exceptions, you should avoid including multiple method invocations in your test code (especially if there are multiple methods that can raise the same exception), and you should clearly state what you are testing for.
+
+5: Refuse to use MockitoJUnitRunner.Silent.class
+
+When an UnnecessaryStubbingException occurs in a unit test, do not first consider using @RunWith(MockitoJUnitRunner.Silent.class) to resolve it. This just hides the problem, and you should follow the exception hint to resolve the issue in question, which is not a difficult task. When the changes are done, you will find that your code is much cleaner again.
diff --git a/docs/2.0.6/docs/en/contribute/release/release-post.md b/docs/2.0.6/docs/en/contribute/release/release-post.md
new file mode 100644
index 000000000..25a8595db
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/release/release-post.md
@@ -0,0 +1,32 @@
+# Release Post
+
+We still have some publish task to do after we send the announcement mail, currently we have to publish Docker images to
+Docker Hub and also publish pydolphinscheduler to PyPI.
+
+## Publish Docker Image
+
+we already have the exists CI to publish the latest Docker image to GitHub container register with [config](https://github.com/apache/dolphinscheduler/blob/d80cf21456265c9d84e642bdb4db4067c7577fc6/.github/workflows/publish-docker.yaml#L55-L63).
+We could reuse the main command the CI run and publish our Docker images to Docker Hub by single command.
+
+```bash
+# Please change the <VERSION> place hold to the version you release
+./mvnw -B clean deploy \
+    -Dmaven.test.skip \
+    -Dmaven.javadoc.skip \
+    -Dmaven.checkstyle.skip \
+    -Dmaven.deploy.skip \
+    -Ddocker.tag=<VERSION> \
+    -Ddocker.hub=apache \
+    -Pdocker,release
+```
+
+## Publish pydolphinscheduler to PyPI
+
+Python API need to release to PyPI for easier download and use, you can see more detail in [Python API release](https://github.com/apache/dolphinscheduler/blob/dev/dolphinscheduler-python/pydolphinscheduler/RELEASE.md#to-pypi)
+to finish PyPI release.
+
+## Get All Contributors
+
+You might need all contributors in current release when you want to publish the release news or announcement, you could
+use the git command `git log --pretty="%an" <PREVIOUS-RELEASE-SHA>..<CURRENT-RELEASE-SHA> | sort | uniq` to auto generate
+the git author name.
diff --git a/docs/2.0.6/docs/en/contribute/release/release-prepare.md b/docs/2.0.6/docs/en/contribute/release/release-prepare.md
new file mode 100644
index 000000000..fe51973f1
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/release/release-prepare.md
@@ -0,0 +1,31 @@
+# Release Preparation
+
+## Check release-docs
+
+Compared with the last release, the `release-docs` of the current release needs to be updated to the latest, if there are dependencies and versions changes
+
+ - `dolphinscheduler-dist/release-docs/LICENSE`
+ - `dolphinscheduler-dist/release-docs/NOTICE`
+ - `dolphinscheduler-dist/release-docs/licenses`
+
+## Update Version
+
+For example, to release `x.y.z`, the following updates are required:
+
+- Version in the code:
+  - `sql`:
+    - `dolphinscheduler_mysql.sql`: `t_ds_version` needs to be updated to x.y.z
+    - `dolphinscheduler_postgre.sql`: `t_ds_version` needs to be updated to x.y.z
+    - `dolphinscheduler_h2.sql`: `t_ds_version` needs to be updated to x.y.z
+    - `upgrade`: whether to add`x.y.z_schema`
+    - `soft_version`: need to be updated to x.y.z
+  - `deploy/docker/.env`: `HUB` change to `apache`,`TAG` change to `x.y.z`
+  - `deploy/kubernetes/dolphinscheduler`:
+    - `Chart.yaml`: `appVersion` needs to be updated to x.y.z (`version` is helm chart version,incremented and different from x.y.z)
+    - `values.yaml`: `image.tag` needs to be updated to x.y.z
+  - `dolphinscheduler-python/pydolphinscheduler/setup.py`: change `version` to x.y.z
+- Version in the docs:
+  - Change the placeholder `<version>`(except `pom`)  to the `x.y.z` in directory `docs`
+  - Add new history version
+    - `docs/docs/en/history-versions.md` and `docs/docs/zh/history-versions.md`: Add the new version and link for `x.y.z`
+  - `docs/configs/docsdev.js`: change `/dev/` to `/x.y.z/`
diff --git a/docs/2.0.6/docs/en/contribute/release/release.md b/docs/2.0.6/docs/en/contribute/release/release.md
new file mode 100644
index 000000000..8451e7a4f
--- /dev/null
+++ b/docs/2.0.6/docs/en/contribute/release/release.md
@@ -0,0 +1,540 @@
+# Release Guide
+
+## Check Your Environment
+
+To make sure you could successfully complete the release for DolphinScheduler, you should check your environment and make sure
+all conditions are met, if any or them are missing, you should install them and make sure them work.
+
+```shell
+# JDK 1.8 above is requests
+java -version
+# Maven requests
+mvn -version
+# Python 3.6 above is requests, and you have to make keyword `python` work in your terminal and version match
+python --version
+```
+
+## GPG Settings
+
+### Install GPG
+
+Download installation package on [official GnuPG website](https://www.gnupg.org/download/index.html).
+The command of GnuPG 1.x version can differ a little from that of 2.x version.
+The following instructions take `GnuPG-2.1.23` version for example.
+
+After the installation, execute the following command to check the version number.
+
+```shell
+gpg --version
+```
+
+### Create Key
+
+After the installation, execute the following command to create key.
+
+This command indicates `GnuPG-2.x` can be used:
+
+```shell
+gpg --full-gen-key
+```
+
+This command indicates `GnuPG-1.x` can be used:
+
+```shell
+gpg --gen-key
+```
+
+Finish the key creation according to instructions, **Notice: Please use Apache mails and its password for key creation.**
+
+```shell
+gpg (GnuPG) 2.0.12; Copyright (C) 2009 Free Software Foundation, Inc.
+This is free software: you are free to change and redistribute it.
+There is NO WARRANTY, to the extent permitted by law.
+
+Please select what kind of key you want:
+  (1) RSA and RSA (default)
+  (2) DSA and Elgamal
+  (3) DSA (sign only)
+  (4) RSA (sign only)
+Your selection? 1
+RSA keys may be between 1024 and 4096 bits long.
+What keysize do you want? (2048) 4096
+Requested keysize is 4096 bits
+Please specify how long the key should be valid.
+        0 = key does not expire
+     <n>  = key expires in n days
+     <n>w = key expires in n weeks
+     <n>m = key expires in n months
+     <n>y = key expires in n years
+Key is valid for? (0)
+Key does not expire at all
+Is this correct? (y/N) y
+
+GnuPG needs to construct a user ID to identify your key.
+
+Real name: ${Input username}
+Email address: ${Input email}
+Comment: ${Input comment}
+You selected this USER-ID:
+   "${Inputed username} (${Inputed comment}) <${Inputed email}>"
+
+Change (N)ame, (C)omment, (E)mail or (O)kay/(Q)uit? O
+You need a Passphrase to protect your secret key. # Input your Apache mail passwords
+```
+
+### Check Generated Key
+
+```shell
+gpg --list-keys
+```
+
+Execution Result:
+
+```shell
+pub   4096R/85E11560 2019-11-15
+uid                  ${Username} (${Comment}) <{Email}>
+sub   4096R/A63BC462 2019-11-15
+```
+
+Among them, 85E11560 is public key ID.
+
+### Upload the Public Key to Key Server
+
+The command is as follow:
+
+```shell
+gpg --keyserver hkp://pool.sks-keyservers.net --send-key 85E11560
+```
+
+`pool.sks-keyservers.net` is randomly chosen from [public key server](https://sks-keyservers.net/status/).
+Each server will automatically synchronize with one another, so it would be okay to choose any one, a backup keys servers
+is `gpg --keyserver hkp://keyserver.ubuntu.com --send-key <YOUR_KEY_ID>`
+
+## Apache Maven Central Repository Release
+
+### Set `settings-security.xml` and `settings.xml`
+
+In this section, we add Apache server maven configuration to prepare the release, we have to add `settings-security.xml` according
+to [here](http://maven.apache.org/guides/mini/guide-encryption.html) firstly and then change your `~/.m2/settings.xml` like below
+
+```xml
+<settings>
+  <servers>
+    <server>
+      <id>apache.snapshots.https</id>
+      <username> <!-- APACHE LDAP username --> </username>
+      <password> <!-- APACHE LDAP encrypted password --> </password>
+    </server>
+    <server>
+      <id>apache.releases.https</id>
+      <username> <!-- APACHE LDAP username --> </username>
+      <password> <!-- APACHE LDAP encrypted password --> </password>
+    </server>
+  </servers>
+</settings>
+```
+
+### Set Release in Environment
+
+We will use the release version, your github name and your Apache username below several times, so it is better to store
+it to bash variable for easier use.
+
+```shell
+VERSION=<THE-VERSION-YOU-RELEASE>
+GH_USERNAME=<YOUR-GITHUB-USERNAME>
+A_USERNAME=<YOUR-APACHE-USERNAME>
+```
+
+> Note: We can use the variable directly in you bash after we set environment, without changing anything. For example, we
+> can use command `git clone -b "${VERSION}"-prepare https://github.com/apache/dolphinscheduler.git` to clone the release branch
+> and it can be success by covert the `"${VERSION}"` to `<THE-VERSION-YOU-RELEASE>`. But you have to change `<VERSION>` manually in
+> some of not bash step like [vote mail](#vote-procedure), we using `<VERSION>` instead of `"${VERSION}"` to notice release
+> manager they have to change by hand.
+
+### Create Release Branch
+
+In this section, we dwonload source code from github and create new branch to release
+
+```shell
+git clone -b "${VERSION}"-prepare https://github.com/apache/dolphinscheduler.git
+cd ~/dolphinscheduler/
+git pull
+git checkout -b "${VERSION}"-release
+git push origin "${VERSION}"-release
+```
+
+### Pre-Release Check
+
+```shell
+# make gpg command could be run in maven correct
+export GPG_TTY=$(tty)
+
+mvn release:prepare -Prelease,python -Darguments="-Dmaven.test.skip=true -Dcheckstyle.skip=true -Dmaven.javadoc.skip=true" -DautoVersionSubmodules=true -DdryRun=true -Dusername="${GH_USERNAME}"
+```
+
+* `-Prelease,python`: choose release and python profile, which will pack all the source codes, jar files and executable binary packages, and Python distribute package.
+* `-DautoVersionSubmodules=true`: it can make the version number is inputted only once and not for each sub-module.
+* `-DdryRun=true`: dry run which means not to generate or submit new version number and new tag.
+
+### Prepare for the Release
+
+First, clean local pre-release check information.
+
+```shell
+mvn release:clean
+```
+
+Then, prepare to execute the release.
+
+```shell
+mvn release:prepare -Prelease,python -Darguments="-Dmaven.test.skip=true -Dcheckstyle.skip=true -Dmaven.javadoc.skip=true" -DautoVersionSubmodules=true -DpushChanges=false -Dusername="${GH_USERNAME}"
+```
+
+It is basically the same as the previous rehearsal command, but deleting `-DdryRun=true` parameter.
+
+* `-DpushChanges=fals`: do not submit the edited version number and tag to GitHub automatically.
+
+> Note: You have to config your git `user.name` and `user.password` by command `git config --global user.email "you@example.com"`
+> and `git config --global user.name "Your Name"` if you meet some mistake like **Please tell me who you are.**
+> from git.
+
+After making sure there is no mistake in local files, submit them to GitHub.
+
+```shell
+git push -u origin "${VERSION}"-release
+git push origin --tags
+```
+
+<!-- markdown-link-check-disable -->
+
+> Note1: In this step, you should use github token for password because native password no longer supported, you can see
+> https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token for more
+> detail about how to create token about it.
+
+> Note2: After the command done, it will auto-created `release.properties` file and `*.Backup` files, their will be need
+> in the following command and DO NOT DELETE THEM
+
+<!-- markdown-link-check-enable -->
+
+### Deploy the Release
+
+```shell
+mvn release:perform -Prelease,python -Darguments="-Dmaven.test.skip=true -Dcheckstyle.skip=true -Dmaven.javadoc.skip=true" -DautoVersionSubmodules=true -Dusername="${GH_USERNAME}"
+```
+
+After that command is executed, the version to be released will be uploaded to Apache staging repository automatically.
+Go to [apache staging repositories](https://repository.apache.org/#stagingRepositories) and login by Apache LDAP. then you can see the uploaded version, the content of `Repository` column is the `${STAGING.REPOSITORY}`.
+Click `Close` to tell Nexus that the construction is finished, because only in this way, this version can be usable.
+If there is any problem in gpg signature, `Close` will fail, but you can see the failure information through `Activity`.
+
+## Apache SVN Repository Release
+
+### Checkout dolphinscheduler Release Directory
+
+If there is no local work directory, create one at first.
+
+```shell
+mkdir -p ~/ds_svn/dev/
+cd ~/ds_svn/dev/
+```
+
+After the creation, checkout dolphinscheduler release directory from Apache SVN.
+
+```shell
+svn --username="${A_USERNAME}" co https://dist.apache.org/repos/dist/dev/dolphinscheduler
+cd ~/ds_svn/dev/dolphinscheduler
+```
+
+### Add gpg Public Key
+
+Only the account in its first deployment needs to add that.
+It is alright for `KEYS` to only include the public key of the deployed account.
+
+```shell
+gpg -a --export <YOUR-GPG-KEY-ID> >> KEYS
+```
+
+### Add the Release Content to SVN Directory
+
+Create folder by version number.
+
+```shell
+mkdir -p ~/ds_svn/dev/dolphinscheduler/"${VERSION}"
+mkdir -p ~/ds_svn/dev/dolphinscheduler/"${VERSION}"/python
+cd ~/ds_svn/dev/dolphinscheduler/"${VERSION}"
+```
+
+Add source code packages, binary packages and executable binary packages to SVN working directory.
+
+```shell
+# Source and binary tarball for main code
+cp -f ~/dolphinscheduler/dolphinscheduler-dist/target/*.tar.gz ~/ds_svn/dev/dolphinscheduler/"${VERSION}"
+cp -f ~/dolphinscheduler/dolphinscheduler-dist/target/*.tar.gz.asc ~/ds_svn/dev/dolphinscheduler/"${VERSION}"
+
+# Source and binary tarball for Python API
+cp -f ~/dolphinscheduler/dolphinscheduler-dist/target/python/* ~/ds_svn/dev/dolphinscheduler/"${VERSION}"/python
+```
+
+### Generate sign files
+
+```shell
+shasum -a 512 apache-dolphinscheduler-"${VERSION}"-src.tar.gz >> apache-dolphinscheduler-"${VERSION}"-src.tar.gz.sha512
+shasum -b -a 512 apache-dolphinscheduler-"${VERSION}"-bin.tar.gz >> apache-dolphinscheduler-"${VERSION}"-bin.tar.gz.sha512
+cd python
+shasum -a 512 apache-dolphinscheduler-python-"${VERSION}".tar.gz >> apache-dolphinscheduler-python-"${VERSION}".tar.gz.sha512
+shasum -b -a 512 apache_dolphinscheduler-python-"${VERSION}"-py3-none-any.whl >> apache_dolphinscheduler-python-"${VERSION}"-py3-none-any.whl.sha512
+cd ../
+```
+
+### Commit to Apache SVN
+
+```shell
+cd ~/ds_svn/dev/dolphinscheduler
+svn add *
+svn --username="${A_USERNAME}" commit -m "release ${VERSION}"
+```
+## Check Release
+
+### Check sha512 hash
+
+```shell
+shasum -c apache-dolphinscheduler-"${VERSION}"-src.tar.gz.sha512
+shasum -c apache-dolphinscheduler-"${VERSION}"-bin.tar.gz.sha512
+cd python
+shasum -c apache-dolphinscheduler-python-"${VERSION}".tar.gz.sha512
+shasum -c apache_dolphinscheduler-python-"${VERSION}"-py3-none-any.whl.sha512
+cd ../
+```
+
+### Check gpg Signature
+
+First, import releaser's public key.
+Import KEYS from SVN repository to local. (The releaser does not need to import again; the checking assistant needs to import it, with the user name filled as the releaser's. )
+
+```shell
+curl https://dist.apache.org/repos/dist/dev/dolphinscheduler/KEYS >> KEYS
+gpg --import KEYS
+gpg --edit-key "${A_USERNAME}"
+  > trust
+
+Please decide how far you trust this user to correctly verify other users' keys
+(by looking at passports, checking fingerprints from different sources, etc.)
+
+  1 = I don't know or won't say
+  2 = I do NOT trust
+  3 = I trust marginally
+  4 = I trust fully
+  5 = I trust ultimately
+  m = back to the main menu
+
+Your decision? 5
+
+  > save
+```
+
+Then, check the gpg signature.
+
+```shell
+gpg --verify apache-dolphinscheduler-"${VERSION}"-src.tar.gz.asc
+gpg --verify apache-dolphinscheduler-"${VERSION}"-bin.tar.gz.asc
+cd python
+gpg --verify apache-dolphinscheduler-python-"${VERSION}".tar.gz.asc
+gpg --verify apache_dolphinscheduler-python-"${VERSION}"-py3-none-any.whl.asc
+cd ../
+```
+
+> Note: You have to create gpg signature manually when you can not find your `asc` file, the command
+> `gpg --armor --detach-sign --digest-algo=SHA512 apache-dolphinscheduler-"${VERSION}"-bin.tar.gz` and
+> `gpg --armor --detach-sign --digest-algo=SHA512 apache-dolphinscheduler-"${VERSION}"-src.tar.gz` will create them
+
+### Check Released Files
+
+#### Check source package
+
+Decompress `apache-dolphinscheduler-<VERSION>-src.tar.gz` and `python/apache-dolphinscheduler-python-<VERSION>.tar.gz` then check the following items:
+
+*   Check whether source tarball is oversized for including nonessential files
+*   `LICENSE` and `NOTICE` files exist
+*   Correct year in `NOTICE` file
+*   There is only text files but no binary files
+*   All source files have ASF headers
+*   Codes can be compiled and pass the unit tests (mvn install)
+*   The contents of the release match with what's tagged in version control (diff -r a verify_dir tag_dir)
+*   Check if there is any extra files or folders, empty folders for example
+
+#### Check binary packages
+
+Decompress `apache-dolphinscheduler-<VERSION>-src.tar.gz` and `python/apache-dolphinscheduler-python-<VERSION>-bin.tar.gz`
+to check the following items:
+
+- `LICENSE` and `NOTICE` files exist
+- Correct year in `NOTICE` file
+- Check the third party dependency license:
+  - The software have a compatible license
+  - All software licenses mentioned in `LICENSE`
+  - All the third party dependency licenses are under `licenses` folder
+  - If it depends on Apache license and has a `NOTICE` file, that `NOTICE` file need to be added to `NOTICE` file of the release
+
+## Call for a Vote
+
+### Update Release Notes
+
+You should create a release note in GitHub by [new release note](https://github.com/apache/dolphinscheduler/releases/new).
+It should be done before vote mail because we need the release note in the mail. You could use command
+`git log --pretty="- %s" <PREVIOUS-RELEASE-SHA>..<CURRENT-RELEASE-SHA> > changelog.md` to creat the changelog(some log
+maybe not correct, you should filter them by yourself) and classify them and paste them to GitHub release note page
+
+### Vote procedure
+
+1. DolphinScheduler community vote: send the vote e-mail to `dev@dolphinscheduler.apache.org`.
+PMC needs to check the rightness of the version according to the document before they vote.
+After at least 72 hours and with at least 3 `+1 and no -1 PMC member` votes, it can come to the next stage of the vote.
+
+2. Announce the vote result: send the result vote e-mail to `dev@dolphinscheduler.apache.org`。
+
+### Vote Templates
+
+#### DolphinScheduler Community Vote Template
+
+Title:
+
+```txt
+[VOTE] Release Apache DolphinScheduler <VERSION>
+```
+
+Body:
+
+```txt
+Hello DolphinScheduler Community,
+
+This is a call for vote to release Apache DolphinScheduler version <VERSION>
+
+Release notes: https://github.com/apache/dolphinscheduler/releases/tag/<VERSION>
+
+The release candidates: https://dist.apache.org/repos/dist/dev/dolphinscheduler/<VERSION>/
+
+Maven 2 staging repository: https://repository.apache.org/content/repositories/<VERSION>/org/apache/dolphinscheduler/
+
+Git tag for the release: https://github.com/apache/dolphinscheduler/tree/<VERSION>
+
+Release Commit ID: https://github.com/apache/dolphinscheduler/commit/<SHA-VALUE>
+
+Keys to verify the Release Candidate: https://dist.apache.org/repos/dist/dev/dolphinscheduler/KEYS
+
+Look at here for how to verify this release candidate: https://dolphinscheduler.apache.org/en-us/community/release.html
+
+The vote will be open for at least 72 hours or until necessary number of votes are reached.
+
+Please vote accordingly:
+
+[ ] +1 approve
+[ ] +0 no opinion
+[ ] -1 disapprove with the reason
+
+Checklist for reference:
+
+[ ] Download links are valid.
+[ ] Checksums and PGP signatures are valid.
+[ ] Source code artifacts have correct names matching the current release.
+[ ] LICENSE and NOTICE files are correct for each DolphinScheduler repo.
+[ ] All files have license headers if necessary.
+[ ] No compiled archives bundled in source archive.
+```
+
+2. Announce the vote result:
+
+Body:
+
+```txt
+The vote to release Apache DolphinScheduler <VERSION> has passed.Here is the vote result,
+
+4 PMC member +1 votes:
+
+xxx
+xxx
+xxx
+xxx
+
+1 community +1 vote:
+xxx
+
+Thanks everyone for taking time to check this release and help us.
+```
+
+## Finish the Release
+
+### Move source packages, binary packages from the `dev` directory to `release` directory
+
+```shell
+svn mv https://dist.apache.org/repos/dist/dev/dolphinscheduler/"${VERSION}" https://dist.apache.org/repos/dist/release/dolphinscheduler/
+```
+
+### Export you new gpg KEYS from dev to release(optional)
+
+Only if the first time you release with this gpg KEY, including it is you first release or you change your KEY
+
+```shell
+mkdir -p ~/ds_svn/release/
+cd ~/ds_svn/release/
+svn --username="${A_USERNAME}" co https://dist.apache.org/repos/dist/release/dolphinscheduler
+gpg -a --export <YOUR-GPG-KEY-ID> >> KEYS
+svn add *
+svn --username="${A_USERNAME}" commit -m "new key <YOUR-GPG-KEY-ID> add"
+```
+
+### Update Document
+
+Website should be present before you send the announce mail this section will tell you how to change the website. For example,
+the release version is `<VERSION>`, the following updates are required(note it will take effect immediately when the PR is merged):
+
+- Repository **apache/dolphinscheduler-website**:
+  - `download/en-us/download.md` and `download/zh-cn/download.md`: add the download of the `<VERSION>` release package
+  - `scripts/conf.sh`: Add new release version `<VERSION>` key-value pair to variable `DEV_RELEASE_DOCS_VERSIONS`
+- Repository **apache/dolphinscheduler**:
+  - `docs/configs/site.js`:
+    - `docsLatest`: update to `<VERSION>`
+    - `docs0`: The `text` of two places of `en-us/zh-cn` needs to be updated to `latest(<VERSION>)`
+    - `docsxyz`: Add a drop-down menu with `key` as `docsxyz` and `text` as `<VERSION>` in `children` of two places of `en-us/zh-cn`
+  - `docs/configs/index.md.jsx`: Add `<VERSION>: docsxyzConfig`
+  - `docs/docs/en/history-versions.md` and `docs/docs/zh/history-versions.md`: Add new `<VERSION>` release docs.
+  - `.github/ISSUE_TEMPLATE/bug-report.yml`: DolphinScheduler's GitHub [bug-report](https://github.com/apache/dolphinscheduler/blob/dev/.github/ISSUE_TEMPLATE/bug-report.yml)
+    issue template have **Version** selection bottom. So after we release DolphinScheduler we should and the new `<VERSION>` to
+    bug-report.yml
+
+### Find DolphinScheduler in [apache staging repositories](https://repository.apache.org/#stagingRepositories) and click `Release`
+
+### Send Announcement E-mail Community
+
+You should send announcement E-mail after release process finished. The E-mail should send to `dev@dolphinscheduler.apache.org`
+and cc to `announce@apache.org`.
+
+Announcement e-mail template as below:
+
+Title:
+
+```txt
+[ANNOUNCE] Release Apache DolphinScheduler <VERSION>
+```
+
+Body:
+
+```txt
+Hi all,
+
+We are glad to announce the release of Apache DolphinScheduler <VERSION>. Once again I would like to express my thanks to your help.
+
+Dolphin Scheduler is a distributed and easy-to-extend visual workflow scheduler system,
+dedicated to solving the complex task dependencies in data processing, making the scheduler system out of the box for data processing.
+
+
+Download Links: https://dolphinscheduler.apache.org/en-us/download/download.html
+
+Release Notes: https://github.com/apache/dolphinscheduler/releases/tag/<VERSION>
+
+Website: https://dolphinscheduler.apache.org/
+
+DolphinScheduler Resources:
+- Issue: https://github.com/apache/dolphinscheduler/issues/
+- Mailing list: dev@dolphinscheduler.apache.org
+- Documents: https://dolphinscheduler.apache.org/zh-cn/docs/<VERSION>/user_doc/about/introduction.html
+```
diff --git a/docs/2.0.6/docs/zh/contribute/api-standard.md b/docs/2.0.6/docs/zh/contribute/api-standard.md
new file mode 100644
index 000000000..0d528cea2
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/api-standard.md
@@ -0,0 +1,111 @@
+# API 设计规范
+规范统一的 API 是项目设计的基石。DolphinScheduler 的 API 遵循 REST ful 标准,REST ful 是目前最流行的一种互联网软件架构,它结构清晰,符合标准,易于理解,扩展方便。
+
+本文以 DolphinScheduler 项目的接口为样例,讲解如何构造具有 Restful 风格的 API。
+
+## 1. URI 设计
+REST 即为 Representational State Transfer 的缩写,即“表现层状态转化”。
+
+“表现层”指的就是“资源”。资源对应网络上的一种实体,例如:一段文本,一张图片,一种服务。且每种资源都对应一个特定的 URI。
+
+Restful URI 的设计基于资源:
++ 一类资源:用复数表示,如 `task-instances`、`groups` 等;
++ 单个资源:用单数,或是用 id 值表示某类资源下的一个,如 `group`、`groups/{groupId}`;
++ 子资源:某个资源下的资源:`/instances/{instanceId}/tasks`;
++ 子资源下的单个资源:`/instances/{instanceId}/tasks/{taskId}`;
+
+## 2. Method 设计
+我们需要通过 URI 来定位某种资源,再通过 Method,或者在路径后缀声明动作来体现对资源的操作。
+
+### ① 查询操作 - GET
+通过 URI 来定位要资源,通过 GET 表示查询。
+
++ 当 URI 为一类资源时表示查询一类资源,例如下面样例表示分页查询 `alter-groups`。
+```
+Method: GET
+/dolphinscheduler/alert-groups
+```
+
++ 当 URI 为单个资源时表示查询此资源,例如下面样例表示查询对应的 `alter-group`。
+```
+Method: GET
+/dolphinscheduler/alter-groups/{id}
+```
+
++ 此外,我们还可以根据 URI 来表示查询子资源,如下:
+```
+Method: GET
+/dolphinscheduler/projects/{projectId}/tasks
+```
+
+**上述的关于查询的方式都表示分页查询,如果我们需要查询全部数据的话,则需在 URI 的后面加 `/list` 来区分。分页查询和查询全部不要混用一个 API。**
+```
+Method: GET
+/dolphinscheduler/alert-groups/list
+```
+
+### ② 创建操作 - POST
+通过 URI 来定位要创建的资源类型,通过 POST 表示创建动作,并且将创建后的 `id` 返回给请求者。
+
++ 下面样例表示创建一个 `alter-group`:
+
+```
+Method: POST
+/dolphinscheduler/alter-groups
+```
+
++ 创建子资源也是类似的操作:
+```
+Method: POST
+/dolphinscheduler/alter-groups/{alterGroupId}/tasks
+```
+
+### ③ 修改操作 - PUT
+通过 URI 来定位某一资源,通过 PUT 指定对其修改。
+```
+Method: PUT
+/dolphinscheduler/alter-groups/{alterGroupId}
+```
+
+### ④ 删除操作 -DELETE
+通过 URI 来定位某一资源,通过 DELETE 指定对其删除。
+
++ 下面例子表示删除 `alterGroupId` 对应的资源:
+```
+Method: DELETE
+/dolphinscheduler/alter-groups/{alterGroupId}
+```
+
++ 批量删除:对传入的 id 数组进行批量删除,使用 POST 方法。**(这里不要用 DELETE 方法,因为 DELETE 请求的 body 在语义上没有任何意义,而且有可能一些网关,代理,防火墙在收到 DELETE 请求后会把请求的 body 直接剥离掉。)**
+```
+Method: POST
+/dolphinscheduler/alter-groups/batch-delete
+```
+
+### ⑤ 部分更新操作 -PATCH
+通过 URI 来定位某一资源,通过 PATCH 指定对其部分更新。
+
++ 下面例子表示部分更新 `alterGroupId` 对应的资源:
+```
+Method: PATCH
+/dolphinscheduler/alter-groups/{alterGroupId}
+```
+
+### ⑥ 其他操作
+除增删改查外的操作,我们同样也通过 `url` 定位到对应的资源,然后再在路径后面追加对其进行的操作。例如:
+```
+/dolphinscheduler/alert-groups/verify-name
+/dolphinscheduler/projects/{projectCode}/process-instances/{code}/view-gantt
+```
+
+## 3. 参数设计
+参数分为两种,一种是请求参数(Request Param 或 Request Body),另一种是路径参数(Path Param)。
+
+参数变量必须用小驼峰表示,并且在分页场景中,用户输入的参数小于 1,则前端需要返给后端 1 表示请求第一页;当后端发现用户输入的参数大于总页数时,直接返回最后一页。
+
+## 4. 其他设计
+### 基础路径
+整个项目的 URI 需要以 `/<project_name>` 作为基础路径,从而标识这类 API 都是项目下的,即:
+```
+/dolphinscheduler
+```
\ No newline at end of file
diff --git a/docs/2.0.6/docs/zh/contribute/architecture-design.md b/docs/2.0.6/docs/zh/contribute/architecture-design.md
new file mode 100644
index 000000000..35bee1a1d
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/architecture-design.md
@@ -0,0 +1,301 @@
+## 系统架构设计
+在对调度系统架构说明之前,我们先来认识一下调度系统常用的名词
+
+### 1.名词解释
+**DAG:** 全称Directed Acyclic Graph,简称DAG。工作流中的Task任务以有向无环图的形式组装起来,从入度为零的节点进行拓扑遍历,直到无后继节点为止。举例如下图:
+
+<p align="center">
+  <img src="../../../img/architecture-design/dag_examples.png" alt="dag示例"  width="80%" />
+  <p align="center">
+        <em>dag示例</em>
+  </p>
+</p>
+
+**流程定义**:通过拖拽任务节点并建立任务节点的关联所形成的可视化**DAG**
+
+**流程实例**:流程实例是流程定义的实例化,可以通过手动启动或定时调度生成,流程定义每运行一次,产生一个流程实例
+
+**任务实例**:任务实例是流程定义中任务节点的实例化,标识着具体的任务执行状态
+
+**任务类型**: 目前支持有SHELL、SQL、SUB_PROCESS(子流程)、PROCEDURE、MR、SPARK、PYTHON、DEPENDENT(依赖),同时计划支持动态插件扩展,注意:其中子 **SUB_PROCESS**  也是一个单独的流程定义,是可以单独启动执行的
+
+**调度方式:** 系统支持基于cron表达式的定时调度和手动调度。命令类型支持:启动工作流、从当前节点开始执行、恢复被容错的工作流、恢复暂停流程、从失败节点开始执行、补数、定时、重跑、暂停、停止、恢复等待线程。其中 **恢复被容错的工作流** 和 **恢复等待线程** 两种命令类型是由调度内部控制使用,外部无法调用
+
+**定时调度**:系统采用 **quartz** 分布式调度器,并同时支持cron表达式可视化的生成
+
+**依赖**:系统不单单支持 **DAG** 简单的前驱和后继节点之间的依赖,同时还提供**任务依赖**节点,支持**流程间的自定义任务依赖**
+
+**优先级** :支持流程实例和任务实例的优先级,如果流程实例和任务实例的优先级不设置,则默认是先进先出
+
+**邮件告警**:支持 **SQL任务** 查询结果邮件发送,流程实例运行结果邮件告警及容错告警通知
+
+**失败策略**:对于并行运行的任务,如果有任务失败,提供两种失败策略处理方式,**继续**是指不管并行运行任务的状态,直到流程失败结束。**结束**是指一旦发现失败任务,则同时Kill掉正在运行的并行任务,流程失败结束
+
+**补数**:补历史数据,支持**区间并行和串行**两种补数方式
+
+### 2.系统架构
+
+#### 2.1 系统架构图
+<p align="center">
+  <img src="../../../img/architecture.jpg" alt="系统架构图"  />
+  <p align="center">
+        <em>系统架构图</em>
+  </p>
+</p>
+
+#### 2.2 架构说明
+
+* **MasterServer** 
+
+    MasterServer采用分布式无中心设计理念,MasterServer主要负责 DAG 任务切分、任务提交监控,并同时监听其它MasterServer和WorkerServer的健康状态。
+    MasterServer服务启动时向Zookeeper注册临时节点,通过监听Zookeeper临时节点变化来进行容错处理。
+
+    ##### 该服务内主要包含:
+
+    - **Distributed Quartz**分布式调度组件,主要负责定时任务的启停操作,当quartz调起任务后,Master内部会有线程池具体负责处理任务的后续操作
+
+    - **MasterSchedulerThread**是一个扫描线程,定时扫描数据库中的 **command** 表,根据不同的**命令类型**进行不同的业务操作
+
+    - **MasterExecThread**主要是负责DAG任务切分、任务提交监控、各种不同命令类型的逻辑处理
+
+    - **MasterTaskExecThread**主要负责任务的持久化
+
+* **WorkerServer** 
+
+     WorkerServer也采用分布式无中心设计理念,WorkerServer主要负责任务的执行和提供日志服务。WorkerServer服务启动时向Zookeeper注册临时节点,并维持心跳。
+     ##### 该服务包含:
+     - **FetchTaskThread**主要负责不断从**Task Queue**中领取任务,并根据不同任务类型调用**TaskScheduleThread**对应执行器。
+
+* **ZooKeeper** 
+
+    ZooKeeper服务,系统中的MasterServer和WorkerServer节点都通过ZooKeeper来进行集群管理和容错。另外系统还基于ZooKeeper进行事件监听和分布式锁。
+    我们也曾经基于Redis实现过队列,不过我们希望DolphinScheduler依赖到的组件尽量地少,所以最后还是去掉了Redis实现。
+
+* **Task Queue** 
+
+    提供任务队列的操作,目前队列也是基于Zookeeper来实现。由于队列中存的信息较少,不必担心队列里数据过多的情况,实际上我们压测过百万级数据存队列,对系统稳定性和性能没影响。
+
+* **Alert** 
+
+    提供告警相关接口,接口主要包括两种类型的告警数据的存储、查询和通知功能。其中通知功能又有**邮件通知**和**SNMP(暂未实现)**两种。
+
+* **API** 
+
+    API接口层,主要负责处理前端UI层的请求。该服务统一提供RESTful api向外部提供请求服务。
+    接口包括工作流的创建、定义、查询、修改、发布、下线、手工启动、停止、暂停、恢复、从该节点开始执行等等。
+
+* **UI** 
+
+    系统的前端页面,提供系统的各种可视化操作界面,详见 [快速开始](https://dolphinscheduler.apache.org/zh-cn/docs/latest/user_doc/about/introduction.html) 部分。
+
+#### 2.3 架构设计思想
+
+##### 一、去中心化vs中心化 
+
+###### 中心化思想
+
+中心化的设计理念比较简单,分布式集群中的节点按照角色分工,大体上分为两种角色:
+<p align="center">
+   <img src="https://analysys.github.io/easyscheduler_docs_cn/images/master_slave.png" alt="master-slave角色"  width="50%" />
+ </p>
+
+- Master的角色主要负责任务分发并监督Slave的健康状态,可以动态的将任务均衡到Slave上,以致Slave节点不至于“忙死”或”闲死”的状态。
+- Worker的角色主要负责任务的执行工作并维护和Master的心跳,以便Master可以分配任务给Slave。
+
+
+
+中心化思想设计存在的问题:
+
+- 一旦Master出现了问题,则群龙无首,整个集群就会崩溃。为了解决这个问题,大多数Master/Slave架构模式都采用了主备Master的设计方案,可以是热备或者冷备,也可以是自动切换或手动切换,而且越来越多的新系统都开始具备自动选举切换Master的能力,以提升系统的可用性。
+- 另外一个问题是如果Scheduler在Master上,虽然可以支持一个DAG中不同的任务运行在不同的机器上,但是会产生Master的过负载。如果Scheduler在Slave上,则一个DAG中所有的任务都只能在某一台机器上进行作业提交,则并行任务比较多的时候,Slave的压力可能会比较大。
+
+
+
+###### 去中心化
+ <p align="center"
+   <img src="https://analysys.github.io/easyscheduler_docs_cn/images/decentralization.png" alt="去中心化"  width="50%" />
+ </p>
+
+- 在去中心化设计里,通常没有Master/Slave的概念,所有的角色都是一样的,地位是平等的,全球互联网就是一个典型的去中心化的分布式系统,联网的任意节点设备down机,都只会影响很小范围的功能。
+- 去中心化设计的核心设计在于整个分布式系统中不存在一个区别于其他节点的”管理者”,因此不存在单点故障问题。但由于不存在” 管理者”节点所以每个节点都需要跟其他节点通信才得到必须要的机器信息,而分布式系统通信的不可靠性,则大大增加了上述功能的实现难度。
+- 实际上,真正去中心化的分布式系统并不多见。反而动态中心化分布式系统正在不断涌出。在这种架构下,集群中的管理者是被动态选择出来的,而不是预置的,并且集群在发生故障的时候,集群的节点会自发的举行"会议"来选举新的"管理者"去主持工作。最典型的案例就是ZooKeeper及Go语言实现的Etcd。
+
+
+
+- DolphinScheduler的去中心化是Master/Worker注册到Zookeeper中,实现Master集群和Worker集群无中心,并使用Zookeeper分布式锁来选举其中的一台Master或Worker为“管理者”来执行任务。
+
+#####  二、分布式锁实践
+
+DolphinScheduler使用ZooKeeper分布式锁来实现同一时刻只有一台Master执行Scheduler,或者只有一台Worker执行任务的提交。
+1. 获取分布式锁的核心流程算法如下
+ <p align="center">
+   <img src="../../../img/architecture-design/distributed_lock.png" alt="获取分布式锁流程"  width="70%" />
+ </p>
+
+2. DolphinScheduler中Scheduler线程分布式锁实现流程图:
+ <p align="center">
+   <img src="../../../img/architecture-design/distributed_lock_procss.png" alt="获取分布式锁流程" />
+ </p>
+
+
+##### 三、线程不足循环等待问题
+
+-  如果一个DAG中没有子流程,则如果Command中的数据条数大于线程池设置的阈值,则直接流程等待或失败。
+-  如果一个大的DAG中嵌套了很多子流程,如下图则会产生“死等”状态:
+
+ <p align="center">
+   <img src="../../../img/architecture-design/lack_thread.png" alt="线程不足循环等待问题"  width="70%" />
+ </p>
+上图中MainFlowThread等待SubFlowThread1结束,SubFlowThread1等待SubFlowThread2结束, SubFlowThread2等待SubFlowThread3结束,而SubFlowThread3等待线程池有新线程,则整个DAG流程不能结束,从而其中的线程也不能释放。这样就形成的子父流程循环等待的状态。此时除非启动新的Master来增加线程来打破这样的”僵局”,否则调度集群将不能再使用。
+
+对于启动新Master来打破僵局,似乎有点差强人意,于是我们提出了以下三种方案来降低这种风险:
+
+1. 计算所有Master的线程总和,然后对每一个DAG需要计算其需要的线程数,也就是在DAG流程执行之前做预计算。因为是多Master线程池,所以总线程数不太可能实时获取。 
+2. 对单Master线程池进行判断,如果线程池已经满了,则让线程直接失败。
+3. 增加一种资源不足的Command类型,如果线程池不足,则将主流程挂起。这样线程池就有了新的线程,可以让资源不足挂起的流程重新唤醒执行。
+
+注意:Master Scheduler线程在获取Command的时候是FIFO的方式执行的。
+
+于是我们选择了第三种方式来解决线程不足的问题。
+
+
+##### 四、容错设计
+容错分为服务宕机容错和任务重试,服务宕机容错又分为Master容错和Worker容错两种情况
+
+###### 1. 宕机容错
+
+服务容错设计依赖于ZooKeeper的Watcher机制,实现原理如图:
+
+ <p align="center">
+   <img src="../../../img/architecture-design/fault-tolerant.png" alt="DolphinScheduler容错设计"  width="70%" />
+ </p>
+其中Master监控其他Master和Worker的目录,如果监听到remove事件,则会根据具体的业务逻辑进行流程实例容错或者任务实例容错。
+
+
+
+- Master容错流程图:
+
+ <p align="center">
+   <img src="../../../img/architecture-design/fault-tolerant_master.png" alt="Master容错流程图"  width="70%" />
+ </p>
+ZooKeeper Master容错完成之后则重新由DolphinScheduler中Scheduler线程调度,遍历 DAG 找到”正在运行”和“提交成功”的任务,对”正在运行”的任务监控其任务实例的状态,对”提交成功”的任务需要判断Task Queue中是否已经存在,如果存在则同样监控任务实例的状态,如果不存在则重新提交任务实例。
+
+
+
+- Worker容错流程图:
+
+ <p align="center">
+   <img src="../../../img/architecture-design/fault-tolerant_worker.png" alt="Worker容错流程图"  width="70%" />
+ </p>
+
+Master Scheduler线程一旦发现任务实例为” 需要容错”状态,则接管任务并进行重新提交。
+
+ 注意:由于” 网络抖动”可能会使得节点短时间内失去和ZooKeeper的心跳,从而发生节点的remove事件。对于这种情况,我们使用最简单的方式,那就是节点一旦和ZooKeeper发生超时连接,则直接将Master或Worker服务停掉。
+
+###### 2.任务失败重试
+
+这里首先要区分任务失败重试、流程失败恢复、流程失败重跑的概念:
+
+- 任务失败重试是任务级别的,是调度系统自动进行的,比如一个Shell任务设置重试次数为3次,那么在Shell任务运行失败后会自己再最多尝试运行3次
+- 流程失败恢复是流程级别的,是手动进行的,恢复是从只能**从失败的节点开始执行**或**从当前节点开始执行**
+- 流程失败重跑也是流程级别的,是手动进行的,重跑是从开始节点进行
+
+
+
+接下来说正题,我们将工作流中的任务节点分了两种类型。
+
+- 一种是业务节点,这种节点都对应一个实际的脚本或者处理语句,比如Shell节点,MR节点、Spark节点、依赖节点等。
+
+- 还有一种是逻辑节点,这种节点不做实际的脚本或语句处理,只是整个流程流转的逻辑处理,比如子流程节点等。
+
+每一个**业务节点**都可以配置失败重试的次数,当该任务节点失败,会自动重试,直到成功或者超过配置的重试次数。**逻辑节点**不支持失败重试。但是逻辑节点里的任务支持重试。
+
+如果工作流中有任务失败达到最大重试次数,工作流就会失败停止,失败的工作流可以手动进行重跑操作或者流程恢复操作
+
+
+
+##### 五、任务优先级设计
+在早期调度设计中,如果没有优先级设计,采用公平调度设计的话,会遇到先行提交的任务可能会和后继提交的任务同时完成的情况,而不能做到设置流程或者任务的优先级,因此我们对此进行了重新设计,目前我们设计如下:
+
+-  按照**不同流程实例优先级**优先于**同一个流程实例优先级**优先于**同一流程内任务优先级**优先于**同一流程内任务**提交顺序依次从高到低进行任务处理。
+    - 具体实现是根据任务实例的json解析优先级,然后把**流程实例优先级_流程实例id_任务优先级_任务id**信息保存在ZooKeeper任务队列中,当从任务队列获取的时候,通过字符串比较即可得出最需要优先执行的任务
+
+        - 其中流程定义的优先级是考虑到有些流程需要先于其他流程进行处理,这个可以在流程启动或者定时启动时配置,共有5级,依次为HIGHEST、HIGH、MEDIUM、LOW、LOWEST。如下图
+            <p align="center">
+               <img src="https://analysys.github.io/easyscheduler_docs_cn/images/process_priority.png" alt="流程优先级配置"  width="40%" />
+             </p>
+
+        - 任务的优先级也分为5级,依次为HIGHEST、HIGH、MEDIUM、LOW、LOWEST。如下图
+            <p align="center">
+               <img src="https://analysys.github.io/easyscheduler_docs_cn/images/task_priority.png" alt="任务优先级配置"  width="35%" />
+             </p>
+
+
+##### 六、Logback和gRPC实现日志访问
+
+-  由于Web(UI)和Worker不一定在同一台机器上,所以查看日志不能像查询本地文件那样。有两种方案:
+  -  将日志放到ES搜索引擎上
+  -  通过gRPC通信获取远程日志信息
+
+-  介于考虑到尽可能的DolphinScheduler的轻量级性,所以选择了gRPC实现远程访问日志信息。
+
+ <p align="center">
+   <img src="https://analysys.github.io/easyscheduler_docs_cn/images/grpc.png" alt="grpc远程访问"  width="60%" />
+ </p>
+
+
+- 我们使用自定义Logback的FileAppender和Filter功能,实现每个任务实例生成一个日志文件。
+- FileAppender主要实现如下:
+
+ ```java
+ /**
+  * task log appender
+  */
+ public class TaskLogAppender extends FileAppender<ILoggingEvent> {
+ 
+     ...
+
+    @Override
+    protected void append(ILoggingEvent event) {
+
+        if (currentlyActiveFile == null){
+            currentlyActiveFile = getFile();
+        }
+        String activeFile = currentlyActiveFile;
+        // thread name: taskThreadName-processDefineId_processInstanceId_taskInstanceId
+        String threadName = event.getThreadName();
+        String[] threadNameArr = threadName.split("-");
+        // logId = processDefineId_processInstanceId_taskInstanceId
+        String logId = threadNameArr[1];
+        ...
+        super.subAppend(event);
+    }
+}
+ ```
+
+
+以/流程定义id/流程实例id/任务实例id.log的形式生成日志
+
+- 过滤匹配以TaskLogInfo开始的线程名称:
+
+- TaskLogFilter实现如下:
+
+ ```java
+ /**
+ *  task log filter
+ */
+public class TaskLogFilter extends Filter<ILoggingEvent> {
+
+    @Override
+    public FilterReply decide(ILoggingEvent event) {
+        if (event.getThreadName().startsWith("TaskLogInfo-")){
+            return FilterReply.ACCEPT;
+        }
+        return FilterReply.DENY;
+    }
+}
+ ```
+
+### 总结
+本文从调度出发,初步介绍了大数据分布式工作流调度系统--DolphinScheduler的架构原理及实现思路。未完待续
+
diff --git a/docs/2.0.6/docs/zh/contribute/backend/mechanism/global-parameter.md b/docs/2.0.6/docs/zh/contribute/backend/mechanism/global-parameter.md
new file mode 100644
index 000000000..7df22bc22
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/backend/mechanism/global-parameter.md
@@ -0,0 +1,61 @@
+# 全局参数开发文档
+
+用户在定义方向为 OUT 的参数后,会保存在 task 的 localParam 中。
+
+## 参数的使用
+
+从 DAG 中获取当前需要创建的 taskInstance 的直接前置节点 preTasks,获取 preTasks 的 varPool,将该 `varPool(List<Property>)`合并为一个 varPool,在合并过程中,如果发现有相同的变量名的变量,按照以下逻辑处理
+
+* 若所有的值都是 null,则合并后的值为 null
+* 若有且只有一个值为非 null,则合并后的值为该非 null 值
+* 若所有的值都不是 null,则根据取 varPool 的 taskInstance 的 endtime 最早的一个
+
+在合并过程中将所有的合并过来的 Property 的方向更新为 IN
+
+合并后的结果保存在 taskInstance.varPool 中。
+
+Worker 收到后将 varPool 解析为 Map<String,Property> 的格式,其中 map 的 key 为 property.prop 也就是变量名。
+
+在 processor 处理参数时,会将 varPool 和 localParam 和 globalParam 三个变量池参数合并,合并过程中若有参数名重复的参数,按照以下优先级进行替换,高优先级保留,低优先级被替换:
+
+* `globalParam` :高
+* `varPool` :中
+* `localParam` :低
+
+参数会在节点内容执行之前利用正则表达式比配到 ${变量名},替换为对应的值。
+
+## 参数的设置
+
+目前仅支持 SQL 和 SHELL 节点的参数获取。
+从 localParam 中获取方向为 OUT 的参数,根据不同节点的类型做以下方式处理。
+
+### SQL 节点
+
+参数返回的结构为 List<Map<String,String>>
+
+其中,List 的元素为每行数据,Map 的 key 为列名,value 为该列对应的值
+
+* 若 SQL 语句返回为有一行数据,则根据用户在定义 task 时定义的 OUT 参数名匹配列名,若没有匹配到则放弃。
+* 若 SQL 语句返回多行,按照根据用户在定义 task 时定义的类型为 LIST 的 OUT 参数名匹配列名,将对应列的所有行数据转换为 `List<String>`,作为该参数的值。若没有匹配到则放弃。
+
+### SHELL 节点
+
+processor 执行后的结果返回为 `Map<String,String>`
+
+用户在定义 shell 脚本时需要在输出中定义 `${setValue(key=value)}`
+
+在参数处理时去掉 ${setValue()},按照 “=” 进行拆分,第 0 个为 key,第 1 个为 value。
+
+同样匹配用户定义 task 时定义的 OUT 参数名与 key,将 value 作为该参数的值。
+
+返回参数处理
+
+* 获取到的 processor 的结果为 String
+* 判断 processor 是否为空,为空退出
+* 判断 localParam 是否为空,为空退出
+* 获取 localParam 中为 OUT 的参数,为空退出
+* 将String按照上诉格式格式化(SQL为List<Map<String,String>>,shell为Map<String,String>)
+* 将匹配好值的参数赋值给 varPool(List<Property>,其中包含原有 IN 的参数)
+
+varPool 格式化为 json,传递给 master。
+Master 接收到 varPool 后,将其中为 OUT 的参数回写到 localParam 中。
diff --git a/docs/2.0.6/docs/zh/contribute/backend/mechanism/overview.md b/docs/2.0.6/docs/zh/contribute/backend/mechanism/overview.md
new file mode 100644
index 000000000..22bed2737
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/backend/mechanism/overview.md
@@ -0,0 +1,6 @@
+# 综述
+
+<!-- TODO 由于 side menu 不支持多个等级,所以新建了一个leading page存放 -->
+
+* [全局参数](global-parameter.md)
+* [switch任务类型](task/switch.md)
diff --git a/docs/2.0.6/docs/zh/contribute/backend/mechanism/task/switch.md b/docs/2.0.6/docs/zh/contribute/backend/mechanism/task/switch.md
new file mode 100644
index 000000000..27ed7f9cf
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/backend/mechanism/task/switch.md
@@ -0,0 +1,8 @@
+# SWITCH 任务类型开发文档
+
+Switch任务类型的工作流程如下
+
+* 用户定义的表达式和分支流转的信息存在了taskdefinition中的taskParams中,当switch被执行到时,会被格式化为SwitchParameters。
+* SwitchTaskExecThread从上到下(用户在页面上定义的表达式顺序)处理switch中定义的表达式,从varPool中获取变量的值,通过js解析表达式,如果表达式返回true,则停止检查,并且记录该表达式的顺序,这里我们记录为resultConditionLocation。SwitchTaskExecThread的任务便结束了。
+* 当switch节点运行结束之后,如果没有发生错误(较为常见的是用户定义的表达式不合规范或参数名有问题),这个时候MasterExecThread.submitPostNode会获取DAG的下游节点继续执行。
+* DagHelper.parsePostNodes中如果发现当前节点(刚刚运行完成功的节点)是switch节点的话,会获取resultConditionLocation,将SwitchParameters中除了resultConditionLocation以外的其他分支全部skip掉。这样留下来的就只有需要执行的分支了。
diff --git a/docs/2.0.6/docs/zh/contribute/backend/spi/alert.md b/docs/2.0.6/docs/zh/contribute/backend/spi/alert.md
new file mode 100644
index 000000000..709802e78
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/backend/spi/alert.md
@@ -0,0 +1,93 @@
+### DolphinScheduler Alert SPI 主要设计
+
+#### DolphinScheduler SPI 设计
+
+DolphinScheduler 正在处于微内核 + 插件化的架构更改之中,所有核心能力如任务、资源存储、注册中心等都将被设计为扩展点,我们希望通过 SPI 来提高 DolphinScheduler 本身的灵活性以及友好性(扩展性)。
+
+告警相关代码可以参考 `dolphinscheduler-alert-api` 模块。该模块定义了告警插件扩展的接口以及一些基础代码,当我们需要实现相关功能的插件化的时候,建议先阅读此块的代码,当然,更建议你阅读文档,这会减少很多时间,不过文档有一定的后滞性,当文档缺失的时候,建议以源码为准(如果有兴趣,我们也欢迎你来提交相关文档),此外,我们几乎不会对扩展接口做变更(不包括新增),除非重大架构调整,出现不兼容升级版本,因此,现有文档一般都能够满足。
+
+我们采用了原生的 JAVA-SPI,当你需要扩展的时候,事实上你只需要关注扩展`org.apache.dolphinscheduler.alert.api.AlertChannelFactory`接口即可,底层相关逻辑如插件加载等内核已经实现,这让我们的开发更加专注且简单。
+
+顺便提一句,我们采用了一款优秀的前端组件 form-create,它支持基于 json 生成前端 ui 组件,如果插件开发牵扯到前端,我们会通过 json 来生成相关前端 UI 组件,org.apache.dolphinscheduler.spi.params 里面对插件的参数做了封装,它会将相关参数全部全部转化为对应的 json,这意味这你完全可以通过 Java 代码的方式完成前端组件的绘制(这里主要是表单,我们只关心前后端交互的数据)。
+
+本文主要着重讲解 Alert 告警相关设计以及开发。
+
+#### 主要模块
+
+如果你并不关心它的内部设计,只是想单纯的了解如何开发自己的告警插件,可以略过该内容。
+
+* dolphinscheduler-alert-api
+
+  该模块是 ALERT SPI 的核心模块,该模块定义了告警插件扩展的接口以及一些基础代码,扩展插件必须实现此模块所定义的接口:`org.apache.dolphinscheduler.alert.api.AlertChannelFactory`
+
+* dolphinscheduler-alert-plugins
+
+  该模块是目前我们提供的插件,目前我们已经支持数十种插件,如 Email、DingTalk、Script等。
+
+
+#### Alert SPI 主要类信息:
+
+AlertChannelFactory
+告警插件工厂接口,所有告警插件需要实现该接口,该接口用来定义告警插件的名称,需要的参数,create 方法用来创建具体的告警插件实例。
+
+AlertChannel
+告警插件的接口,告警插件需要实现该接口,该接口中只有一个方法 process ,上层告警系统会调用该方法并通过该方法返回的 AlertResult 来获取告警的返回信息。
+
+AlertData
+告警内容信息,包括 id,标题,内容,日志。
+
+AlertInfo
+告警相关信息,上层系统调用告警插件实例时,将该类的实例通过 process 方法传入具体的告警插件。内部包含告警内容 AlertData 和调用的告警插件实例的前端填写的参数信息。
+
+AlertResult
+告警插件发送告警返回信息。
+
+org.apache.dolphinscheduler.spi.params
+该包下是插件化的参数定义,我们前端使用 from-create 这个前端库,该库可以基于插件定义返回的参数列表 json 来动态生成前端的 ui,因此我们在做 SPI 插件开发的时候无需关心前端。
+
+该 package 下我们目前只封装了 RadioParam,TextParam,PasswordParam,分别用来定义 text 类型的参数,radio 参数和 password 类型的参数。
+
+AbsPluginParams 该类是所有参数的基类,RadioParam 这些类都继承了该类。每个 DS 的告警插件都会在 AlertChannelFactory 的实现中返回一个 AbsPluginParams 的 list。
+
+alert_spi 具体设计可见 issue:[Alert Plugin Design](https://github.com/apache/incubator-dolphinscheduler/issues/3049)
+
+#### Alert SPI 内置实现
+
+* Email
+
+  电子邮件告警通知
+
+* DingTalk
+
+  钉钉群聊机器人告警
+
+  相关参数配置可以参考钉钉机器人文档。
+* EnterpriseWeChat
+
+  企业微信告警通知
+
+  相关参数配置可以参考企业微信机器人文档。
+* Script
+
+  我们实现了 Shell 脚本告警,我们会将相关告警参数透传给脚本,你可以在 Shell 中实现你的相关告警逻辑,如果你需要对接内部告警应用,这是一种不错的方法。
+* FeiShu
+
+  飞书告警通知
+* Slack
+
+  Slack告警通知
+* PagerDuty
+
+  PagerDuty告警通知
+* WebexTeams
+
+  WebexTeams告警通知
+  相关参数配置可以参考WebexTeams文档。
+* Telegram
+
+  Telegram告警通知
+  相关参数配置可以参考Telegram文档。
+* Http
+
+  我们实现了Http告警,调用大部分的告警插件最终都是Http请求,如果我们没有支持你常用插件,可以使用Http来实现你的告警需求,同时也欢迎将你常用插件贡献到社区。
+
diff --git a/docs/2.0.6/docs/zh/contribute/backend/spi/datasource.md b/docs/2.0.6/docs/zh/contribute/backend/spi/datasource.md
new file mode 100644
index 000000000..1868c86d9
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/backend/spi/datasource.md
@@ -0,0 +1,23 @@
+## DolphinScheduler Datasource SPI 主要设计
+
+#### 如何使用数据源?
+
+数据源中心默认支持POSTGRESQL、HIVE/IMPALA、SPARK、CLICKHOUSE、SQLSERVER数据源。
+
+如果使用的是MySQL、ORACLE数据源则需要、把对应的驱动包放置lib目录下
+
+#### 如何进行数据源插件开发?
+
+org.apache.dolphinscheduler.spi.datasource.DataSourceChannel
+org.apache.dolphinscheduler.spi.datasource.DataSourceChannelFactory
+org.apache.dolphinscheduler.plugin.datasource.api.client.CommonDataSourceClient
+
+1. 第一步数据源插件实现以上接口和继承通用client即可,具体可以参考sqlserver、mysql等数据源插件实现,所有RDBMS插件的添加方式都是一样的。
+2. 在数据源插件pom.xml添加驱动配置
+
+我们在 dolphinscheduler-datasource-api 模块提供了所有数据源对外访问的 API
+
+#### **未来计划**
+
+支持kafka、http、文件、sparkSQL、FlinkSQL等数据源
+
diff --git a/docs/2.0.6/docs/zh/contribute/backend/spi/registry.md b/docs/2.0.6/docs/zh/contribute/backend/spi/registry.md
new file mode 100644
index 000000000..36c4d1f00
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/backend/spi/registry.md
@@ -0,0 +1,26 @@
+### DolphinScheduler Registry SPI 扩展
+
+#### 如何使用?
+
+进行以下配置(以 zookeeper 为例)
+
+* 注册中心插件配置, 以Zookeeper 为例 (registry.properties)
+  dolphinscheduler-service/src/main/resources/registry.properties
+  ```registry.properties
+   registry.plugin.name=zookeeper
+   registry.servers=127.0.0.1:2181
+  ```
+
+具体配置信息请参考具体插件提供的参数信息,例如 zk:`org/apache/dolphinscheduler/plugin/registry/zookeeper/ZookeeperConfiguration.java`
+所有配置信息前缀需要 +registry,如 base.sleep.time.ms,在 registry 中应该这样配置:registry.base.sleep.time.ms=100
+
+#### 如何扩展
+
+`dolphinscheduler-registry-api` 定义了实现插件的标准,当你需要扩展插件的时候只需要实现 `org.apache.dolphinscheduler.registry.api.RegistryFactory` 即可。
+
+`dolphinscheduler-registry-plugin` 模块下是我们目前所提供的注册中心插件。
+#### FAQ
+
+1:registry connect timeout
+
+可以增加相关超时参数。
diff --git a/docs/2.0.6/docs/zh/contribute/backend/spi/task.md b/docs/2.0.6/docs/zh/contribute/backend/spi/task.md
new file mode 100644
index 000000000..b2ee5242b
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/backend/spi/task.md
@@ -0,0 +1,15 @@
+## DolphinScheduler Task SPI 扩展
+
+#### 如何进行任务插件开发?
+
+org.apache.dolphinscheduler.spi.task.TaskChannel
+
+插件实现以上接口即可。主要包含创建任务(任务初始化,任务运行等方法)、任务取消,如果是 yarn 任务,则需要实现 org.apache.dolphinscheduler.plugin.task.api.AbstractYarnTask。
+
+我们在 dolphinscheduler-task-api 模块提供了所有任务对外访问的 API,而 dolphinscheduler-spi 模块则是 spi 通用代码库,定义了所有的插件模块,比如告警模块,注册中心模块等,你可以详细阅读查看。
+
+*NOTICE*
+
+由于任务插件涉及到前端页面,目前前端的SPI还没有实现,因此你需要单独实现插件对应的前端页面。
+
+如果任务插件存在类冲突,你可以采用 [Shade-Relocating Classes](https://maven.apache.org/plugins/maven-shade-plugin/) 来解决这种问题。
diff --git a/docs/2.0.6/docs/zh/contribute/development-environment-setup.md b/docs/2.0.6/docs/zh/contribute/development-environment-setup.md
new file mode 100644
index 000000000..5b2dbe059
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/development-environment-setup.md
@@ -0,0 +1,201 @@
+# DolphinScheduler 开发手册
+
+## 软件要求
+在搭建 DolphinScheduler 开发环境之前请确保你已经安装以下软件:
+
+* [Git](https://git-scm.com/downloads)
+* [JDK](https://www.oracle.com/technetwork/java/javase/downloads/index.html): v1.8.x (当前暂不支持 jdk 11)
+* [Maven](http://maven.apache.org/download.cgi): v3.5+
+* [Node](https://nodejs.org/en/download): v16.13+ (dolphinScheduler 版本低于 3.0, 请安装 node v12.20+)
+* [Pnpm](https://pnpm.io/installation): v6.x
+
+### 克隆代码库
+
+通过你 git 管理工具下载 git 代码,下面以 git-core 为例
+
+```shell
+mkdir dolphinscheduler
+cd dolphinscheduler
+git clone git@github.com:apache/dolphinscheduler.git
+```
+
+### 编译源码
+
+支持的系统:
+* MacOS
+* Linux
+
+运行 `mvn clean install -Prelease -Dmaven.test.skip=true`
+
+## Docker镜像构建
+
+DolphinScheduler 每次发版都会同时发布 Docker 镜像,你可以在 [Docker Hub](https://hub.docker.com/search?q=DolphinScheduler) 中找到这些镜像
+
+* 如果你想基于源码进行改造,然后在本地构建Docker镜像,可以在代码改造完成后运行
+```shell
+cd dolphinscheduler
+./mvnw -B clean package \
+       -Dmaven.test.skip \
+       -Dmaven.javadoc.skip \
+       -Dmaven.checkstyle.skip \
+       -Ddocker.tag=<TAG> \
+       -Pdocker,release             
+```
+当命令运行完了后你可以通过 `docker images` 命令查看刚刚创建的镜像
+
+* 如果你想基于源码进行改造,然后构建Docker镜像并推送到 <HUB_URL>,可以在代码改造完成后运行
+```shell
+cd dolphinscheduler
+./mvnw -B clean deploy \
+       -Dmaven.test.skip \
+       -Dmaven.javadoc.skip \
+       -Dmaven.checkstyle.skip \
+       -Dmaven.deploy.skip \
+       -Ddocker.tag=<TAG> \
+       -Ddocker.hub=<HUB_URL> \
+       -Pdocker,release               
+```
+
+* 如果你不仅需要改造源码,还想要自定义 Docker 镜像打包的依赖,可以在修改源码的同时修改 Dockerfile 的定义。你可以运行以下命令找到所有的 Dockerfile 文件
+
+```shell
+cd dolphinscheduler
+find . -iname 'Dockerfile'
+```
+
+之后再运行上面的构建镜像命令
+
+* 如果你因为个性化需求想要自己打包 Docker 镜像,最佳实践是基于 DolphinScheduler 对应镜像编写 Dockerfile 文件
+
+```Dockerfile
+FROM dolphinscheduler-standalone-server
+RUN apt update ; \
+    apt install -y <YOUR-CUSTOM-DEPENDENCE> ; \
+```
+
+> **_注意:_** Docker默认会构建并推送 linux/amd64,linux/arm64 多架构镜像
+>
+> 必须使用Docker 19.03及以后的版本,因为19.03及以后的版本包含 buildx
+
+## 开发者须知
+
+DolphinScheduler 开发环境配置有两个方式,分别是standalone模式,以及普通模式
+
+* [standalone模式](#dolphinscheduler-standalone快速开发模式):**推荐使用,但仅支持 1.3.9 及以后的版本**,方便快速的开发环境搭建,能解决大部分场景的开发
+* [普通模式](#dolphinscheduler-普通开发模式):master、worker、api等单独启动,能更好的的模拟真实生产环境,可以覆盖的测试环境更多
+
+## DolphinScheduler Standalone快速开发模式
+
+> **_注意:_** 仅供单机开发调试使用,默认使用 H2 Database,Zookeeper Testing Server
+> 
+> Standalone 仅在 DolphinScheduler 1.3.9 及以后的版本支持
+
+### 分支选择
+
+开发不同的代码需要基于不同的分支
+
+* 如果想基于二进制包开发,切换到对应版本的代码,如 1.3.9 则是 `1.3.9-release`
+* 如果想要开发最新代码,切换到 `dev` 分支
+
+### 启动后端
+
+在 Intellij IDEA 找到并启动类 `org.apache.dolphinscheduler.StandaloneServer` 即可完成后端启动
+
+### 启动前端
+
+安装前端依赖并运行前端组件
+> 注意:你可以在[frontend development](./frontend-development.md)里查看更多前端的相关配置
+
+```shell
+cd dolphinscheduler-ui
+pnpm install
+pnpm run dev
+```
+
+截止目前,前后端已成功运行起来,浏览器访问[http://localhost:3000](http://localhost:3000),并使用默认账户密码 **admin/dolphinscheduler123** 即可完成登录
+
+## DolphinScheduler 普通开发模式
+
+### 必要软件安装
+
+#### zookeeper
+
+下载 [ZooKeeper](https://www.apache.org/dyn/closer.lua/zookeeper/zookeeper-3.6.3),解压
+
+* 在 ZooKeeper 的目录下新建 zkData、zkLog文件夹
+* 将 conf 目录下的 `zoo_sample.cfg` 文件,复制一份,重命名为 `zoo.cfg`,修改其中数据和日志的配置,如:
+
+    ```shell
+    dataDir=/data/zookeeper/data ## 此处使用绝对路径
+    dataLogDir=/data/zookeeper/datalog
+    ```
+
+* 运行 `./bin/zkServer.sh`
+
+#### 数据库
+
+DolphinScheduler 的元数据存储在关系型数据库中,目前支持的关系型数据库包括 MySQL 以及 PostgreSQL。下面以MySQL为例,启动数据库并创建新 database 作为 DolphinScheduler 元数据库,这里以数据库名 dolphinscheduler 为例
+
+创建完新数据库后,将 `dolphinscheduler/dolphinscheduler-dao/src/main/resources/sql/dolphinscheduler_mysql.sql` 下的 sql 文件直接在 MySQL 中运行,完成数据库初始化
+
+#### 启动后端
+
+下面步骤将引导如何启动 DolphinScheduler 后端服务
+
+##### 必要的准备工作
+
+* 打开项目:使用开发工具打开项目,这里以 Intellij IDEA 为例,打开后需要一段时间,让 Intellij IDEA 完成以依赖的下载
+
+* 必要的修改
+  * 如果使用 MySQL 作为元数据库,需要先修改 `dolphinscheduler/pom.xml`,将 `mysql-connector-java` 依赖的 `scope` 改为 `compile`,使用 PostgreSQL 则不需要
+  * 修改 Master 数据库配置,修改 `dolphinscheduler-master/src/main/resources/application.yaml` 文件中的数据库配置
+  * 修改 Worker 数据库配置,修改 `dolphinscheduler-worker/src/main/resources/application.yaml` 文件中的数据库配置
+  * 修改 Api 数据库配置,修改 `dolphinscheduler-api/src/main/resources/application.yaml` 文件中的数据库配置
+
+  本样例以 MySQL 为例,其中数据库名为 dolphinscheduler,账户名密码均为 dolphinscheduler
+  ```application.yaml
+   spring:
+     datasource:
+       driver-class-name: com.mysql.cj.jdbc.Driver
+       url: jdbc:mysql://127.0.0.1:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8
+       username: dolphinscheduler
+       password: dolphinscheduler
+  ```
+
+* 修改日志级别:为以下配置增加一行内容 `<appender-ref ref="STDOUT"/>` 使日志能在命令行中显示
+
+  `dolphinscheduler-master/src/main/resources/logback-spring.xml`
+  `dolphinscheduler-worker/src/main/resources/logback-spring.xml`
+  `dolphinscheduler-api/src/main/resources/logback-spring.xml`
+
+  修改后的结果如下:
+
+  ```diff
+  <root level="INFO">
+  +  <appender-ref ref="STDOUT"/>
+    <appender-ref ref="APILOGFILE"/>
+    <appender-ref ref="SKYWALKING-LOG"/>
+  </root>
+  ```
+
+##### 启动服务
+
+我们需要启动三个服务,包括 MasterServer,WorkerServer,ApiApplicationServer
+
+* MasterServer:在 Intellij IDEA 中执行 `org.apache.dolphinscheduler.server.master.MasterServer` 中的 `main` 方法,并配置 *VM Options* `-Dlogging.config=classpath:logback-spring.xml -Ddruid.mysql.usePingMethod=false -Dspring.profiles.active=mysql`
+* WorkerServer:在 Intellij IDEA 中执行 `org.apache.dolphinscheduler.server.worker.WorkerServer` 中的 `main` 方法,并配置 *VM Options* `-Dlogging.config=classpath:logback-spring.xml -Ddruid.mysql.usePingMethod=false -Dspring.profiles.active=mysql`
+* ApiApplicationServer:在 Intellij IDEA 中执行 `org.apache.dolphinscheduler.api.ApiApplicationServer` 中的 `main` 方法,并配置 *VM Options* `-Dlogging.config=classpath:logback-spring.xml -Dspring.profiles.active=api,mysql`。启动完成可以浏览 Open API 文档,地址为 http://localhost:12345/dolphinscheduler/doc.html
+
+> VM Options `-Dspring.profiles.active=mysql` 中 `mysql` 表示指定的配置文件
+
+### 启动前端
+
+安装前端依赖并运行前端组件
+
+```shell
+cd dolphinscheduler-ui
+pnpm install
+pnpm run dev
+```
+
+截止目前,前后端已成功运行起来,浏览器访问[http://localhost:3000](http://localhost:3000),并使用默认账户密码 **admin/dolphinscheduler123** 即可完成登录
diff --git a/docs/2.0.6/docs/zh/contribute/frontend-development.md b/docs/2.0.6/docs/zh/contribute/frontend-development.md
new file mode 100644
index 000000000..df07d6af9
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/frontend-development.md
@@ -0,0 +1,639 @@
+# 前端开发文档
+
+### 技术选型
+```
+Vue mvvm 框架
+
+Es6 ECMAScript 6.0
+
+Ans-ui Analysys-ui
+
+D3 可视化库图表库
+
+Jsplumb 连线插件库
+
+Lodash 高性能的 JavaScript 实用工具库
+```
+
+### 开发环境搭建
+   
+- #### Node安装
+Node包下载 (注意版本 v12.20.2) `https://nodejs.org/download/release/v12.20.2/` 
+
+- #### 前端项目构建
+用命令行模式 `cd`  进入 `dolphinscheduler-ui`项目目录并执行 `npm install` 拉取项目依赖包
+
+> 如果 `npm install` 速度非常慢,你可以设置淘宝镜像
+
+```
+npm config set registry http://registry.npm.taobao.org/
+```
+
+- 修改 `dolphinscheduler-ui/.env` 文件中的 `API_BASE`,用于跟后端交互:
+
+```
+# 代理的接口地址(自行修改)
+API_BASE = http://127.0.0.1:12345
+```
+
+> #####  !!!这里特别注意 项目如果在拉取依赖包的过程中报 " node-sass error " 错误,请在执行完后再次执行以下命令
+
+```bash
+npm install node-sass --unsafe-perm #单独安装node-sass依赖
+```
+
+- #### 开发环境运行
+- `npm start` 项目开发环境 (启动后访问地址 http://localhost:8888)
+
+#### 前端项目发布
+
+- `npm run build` 项目打包 (打包后根目录会创建一个名为dist文件夹,用于发布线上Nginx)
+
+运行 `npm run build` 命令,生成打包文件(dist)包
+
+再拷贝到服务器对应的目录下(前端服务静态页面存放目录)
+
+访问地址 `http://localhost:8888` 
+
+#### Linux下使用node启动并且守护进程
+
+安装pm2 `npm install -g pm2`
+
+在项目`dolphinscheduler-ui`根目录执行 `pm2 start npm -- run dev` 启动项目
+
+#### 命令
+
+- 启用 `pm2 start npm -- run dev`
+
+- 停止 `pm2 stop npm`
+
+- 删除 `pm2 delete npm`
+
+- 状态 `pm2 list`
+
+```
+
+[root@localhost dolphinscheduler-ui]# pm2 start npm -- run dev
+[PM2] Applying action restartProcessId on app [npm](ids: 0)
+[PM2] [npm](0) ✓
+[PM2] Process successfully started
+┌──────────┬────┬─────────┬──────┬──────┬────────┬─────────┬────────┬─────┬──────────┬──────┬──────────┐
+│ App name │ id │ version │ mode │ pid  │ status │ restart │ uptime │ cpu │ mem      │ user │ watching │
+├──────────┼────┼─────────┼──────┼──────┼────────┼─────────┼────────┼─────┼──────────┼──────┼──────────┤
+│ npm      │ 0  │ N/A     │ fork │ 6168 │ online │ 31      │ 0s     │ 0%  │ 5.6 MB   │ root │ disabled │
+└──────────┴────┴─────────┴──────┴──────┴────────┴─────────┴────────┴─────┴──────────┴──────┴──────────┘
+ Use `pm2 show <id|name>` to get more details about an app
+
+```
+
+### 项目目录结构
+
+`build` 打包及开发环境项目的一些webpack配置
+
+`node_modules` 开发环境node依赖包
+
+`src` 项目所需文件
+
+`src => combo` 项目第三方资源本地化 `npm run combo`具体查看`build/combo.js`
+
+`src => font` 字体图标库可访问 `https://www.iconfont.cn` 进行添加 注意:字体库用的自己的 二次开发需要重新引入自己的库 `src/sass/common/_font.scss`
+
+`src => images` 公共图片存放
+
+`src => js` js/vue
+
+`src => lib` 公司内部组件(公司组件库开源后可删掉)
+
+`src => sass` sass文件 一个页面对应一个sass文件
+
+`src => view` 页面文件 一个页面对应一个html文件
+
+```
+> 项目采用vue单页面应用(SPA)开发
+- 所有页面入口文件在 `src/js/conf/${对应页面文件名 => home}` 的 `index.js` 入口文件
+- 对应的sass文件则在 `src/sass/conf/${对应页面文件名 => home}/index.scss`
+- 对应的html文件则在 `src/view/${对应页面文件名 => home}/index.html`
+```
+
+公共模块及util `src/js/module`
+
+`components` => 内部项目公共组件
+
+`download` => 下载组件
+
+`echarts` => 图表组件
+
+`filter` => 过滤器和vue管道
+
+`i18n` => 国际化
+
+`io` => io请求封装 基于axios
+
+`mixin` => vue mixin 公共部分 用于disabled操作
+
+`permissions` => 权限操作
+
+`util` => 工具
+
+### 系统功能模块
+
+首页 => `http://localhost:8888/#/home`
+
+项目管理 => `http://localhost:8888/#/projects/list`
+```
+| 项目首页
+| 工作流
+  - 工作流定义
+  - 工作流实例
+  - 任务实例
+```
+ 
+资源管理 => `http://localhost:8888/#/resource/file`
+```
+| 文件管理
+| UDF管理
+  - 资源管理
+  - 函数管理
+```
+
+数据源管理 => `http://localhost:8888/#/datasource/list`
+
+安全中心 => `http://localhost:8888/#/security/tenant`
+```
+| 租户管理
+| 用户管理
+| 告警组管理
+  - master
+  - worker
+```
+
+用户中心 => `http://localhost:8888/#/user/account`
+
+## 路由和状态管理
+
+项目 `src/js/conf/home` 下分为
+
+`pages` => 路由指向页面目录
+```
+ 路由地址对应的页面文件
+```
+
+`router` => 路由管理
+```
+vue的路由器,在每个页面的入口文件index.js 都会注册进来 具体操作:https://router.vuejs.org/zh/
+```
+
+`store` => 状态管理
+```
+每个路由对应的页面都有一个状态管理的文件 分为:
+
+actions => mapActions => 详情:https://vuex.vuejs.org/zh/guide/actions.html
+
+getters => mapGetters => 详情:https://vuex.vuejs.org/zh/guide/getters.html
+
+index => 入口
+
+mutations => mapMutations => 详情:https://vuex.vuejs.org/zh/guide/mutations.html
+
+state => mapState => 详情:https://vuex.vuejs.org/zh/guide/state.html
+
+具体操作:https://vuex.vuejs.org/zh/
+```
+
+## 规范
+## Vue规范
+##### 1.组件名
+组件名为多个单词,并且用连接线(-)连接,避免与 HTML 标签冲突,并且结构更加清晰。
+```
+// 正例
+export default {
+    name: 'page-article-item'
+}
+```
+
+##### 2.组件文件
+`src/js/module/components`项目内部公共组件书写文件夹名与文件名同名,公共组件内部所拆分的子组件与util工具都放置组件内部 `_source`文件夹里。
+```
+└── components
+    ├── header
+        ├── header.vue
+        └── _source
+            └── nav.vue
+            └── util.js
+    ├── conditions
+        ├── conditions.vue
+        └── _source
+            └── search.vue
+            └── util.js
+```
+
+##### 3.Prop
+定义 Prop 的时候应该始终以驼峰格式(camelCase)命名,在父组件赋值的时候使用连接线(-)。
+这里遵循每个语言的特性,因为在 HTML 标记中对大小写是不敏感的,使用连接线更加友好;而在 JavaScript 中更自然的是驼峰命名。
+
+```
+// Vue
+props: {
+    articleStatus: Boolean
+}
+// HTML
+<article-item :article-status="true"></article-item>
+```
+
+Prop 的定义应该尽量详细的指定其类型、默认值和验证。
+
+示例:
+
+```
+props: {
+    attrM: Number,
+    attrA: {
+        type: String,
+        required: true
+    },
+    attrZ: {
+        type: Object,
+        // 数组/对象的默认值应该由一个工厂函数返回
+        default: function () {
+            return {
+                msg: '成就你我'
+            }
+        }
+    },
+    attrE: {
+        type: String,
+        validator: function (v) {
+            return !(['success', 'fail'].indexOf(v) === -1) 
+        }
+    }
+}
+```
+
+##### 4.v-for
+在执行 v-for 遍历的时候,总是应该带上 key 值使更新 DOM 时渲染效率更高。
+```
+<ul>
+    <li v-for="item in list" :key="item.id">
+        {{ item.title }}
+    </li>
+</ul>
+```
+
+v-for 应该避免与 v-if 在同一个元素(`例如:<li>`)上使用,因为 v-for 的优先级比 v-if 更高,为了避免无效计算和渲染,应该尽量将 v-if 放到容器的父元素之上。
+```
+<ul v-if="showList">
+    <li v-for="item in list" :key="item.id">
+        {{ item.title }}
+    </li>
+</ul>
+```
+
+##### 5.v-if / v-else-if / v-else
+若同一组 v-if 逻辑控制中的元素逻辑相同,Vue 为了更高效的元素切换,会复用相同的部分,`例如:value`。为了避免复用带来的不合理效果,应该在同种元素上加上 key 做标识。
+```
+<div v-if="hasData" key="mazey-data">
+    <span>{{ mazeyData }}</span>
+</div>
+<div v-else key="mazey-none">
+    <span>无数据</span>
+</div>
+```
+
+##### 6.指令缩写
+为了统一规范始终使用指令缩写,使用`v-bind`,`v-on`并没有什么不好,这里仅为了统一规范。
+```
+<input :value="mazeyUser" @click="verifyUser">
+```
+
+##### 7.单文件组件的顶级元素顺序
+样式后续都是打包在一个文件里,所有在单个vue文件中定义的样式,在别的文件里同类名的样式也是会生效的所有在创建一个组件前都会有个顶级类名
+注意:项目内已经增加了sass插件,单个vue文件里可以直接书写sass语法
+为了统一和便于阅读,应该按 `<template>`、`<script>`、`<style>`的顺序放置。
+
+```
+<template>
+  <div class="test-model">
+    test
+  </div>
+</template>
+<script>
+  export default {
+    name: "test",
+    data() {
+      return {}
+    },
+    props: {},
+    methods: {},
+    watch: {},
+    beforeCreate() {
+    },
+    created() {
+    },
+    beforeMount() {
+    },
+    mounted() {
+    },
+    beforeUpdate() {
+    },
+    updated() {
+    },
+    beforeDestroy() {
+    },
+    destroyed() {
+    },
+    computed: {},
+    components: {},
+  }
+</script>
+
+<style lang="scss" rel="stylesheet/scss">
+  .test-model {
+
+  }
+</style>
+
+```
+
+## JavaScript规范
+
+##### 1.var / let / const
+建议不再使用 var,而使用 let / const,优先使用 const。任何一个变量的使用都要提前申明,除了 function 定义的函数可以随便放在任何位置。
+
+##### 2.引号
+```
+const foo = '后除'
+const bar = `${foo},前端工程师`
+```
+
+##### 3.函数
+匿名函数统一使用箭头函数,多个参数/返回值时优先使用对象的结构赋值。
+```
+function getPersonInfo ({name, sex}) {
+    // ...
+    return {name, gender}
+}
+```
+函数名统一使用驼峰命名,以大写字母开头申明的都是构造函数,使用小写字母开头的都是普通函数,也不该使用 new 操作符去操作普通函数。
+
+##### 4.对象
+```
+const foo = {a: 0, b: 1}
+const bar = JSON.parse(JSON.stringify(foo))
+
+const foo = {a: 0, b: 1}
+const bar = {...foo, c: 2}
+
+const foo = {a: 3}
+Object.assign(foo, {b: 4})
+
+const myMap = new Map([])
+for (let [key, value] of myMap.entries()) {
+    // ...
+}
+```
+
+##### 5.模块
+统一使用 import / export 的方式管理项目的模块。
+```
+// lib.js
+export default {}
+
+// app.js
+import app from './lib'
+```
+
+import 统一放在文件顶部。
+
+如果模块只有一个输出值,使用 `export default`,否则不用。
+
+
+## HTML / CSS
+
+###### 1.标签
+在引用外部 CSS 或 JavaScript 时不写 type 属性。HTML5 默认 type 为 `text/css` 和 `text/javascript` 属性,所以没必要指定。
+```
+<link rel="stylesheet" href="//www.test.com/css/test.css">
+<script src="//www.test.com/js/test.js"></script>
+```
+
+##### 2.命名
+Class 和 ID 的命名应该语义化,通过看名字就知道是干嘛的;多个单词用连接线 - 连接。
+```
+// 正例
+.test-header{
+    font-size: 20px;
+}
+```
+
+##### 3.属性缩写
+CSS 属性尽量使用缩写,提高代码的效率和方便理解。
+
+```
+// 反例
+border-width: 1px;
+border-style: solid;
+border-color: #ccc;
+
+// 正例
+border: 1px solid #ccc;
+```
+
+##### 4.文档类型
+应该总是使用 HTML5 标准。
+
+```
+<!DOCTYPE html>
+```
+
+##### 5.注释
+应该给一个模块文件写一个区块注释。
+```
+/**
+* @module mazey/api
+* @author Mazey <ma...@mazey.net>
+* @description test.
+* */
+```
+
+## 接口
+
+##### 所有的接口都以 Promise 形式返回 
+注意非0都为错误走catch
+
+```
+const test = () => {
+  return new Promise((resolve, reject) => {
+    resolve({
+      a:1
+    })
+  })
+}
+
+// 调用
+test.then(res => {
+  console.log(res)
+  // {a:1}
+})
+```
+
+正常返回
+```
+{
+  code:0,
+  data:{}
+  msg:'成功'
+}
+```
+
+错误返回
+```
+{
+  code:10000, 
+  data:{}
+  msg:'失败'
+}
+```
+接口如果是post请求,Content-Type默认为application/x-www-form-urlencoded;如果Content-Type改成application/json,
+接口传参需要改成下面的方式
+```
+io.post('url', payload, null, null, { emulateJSON: false } res => {
+  resolve(res)
+}).catch(e => {
+  reject(e)
+})
+```
+
+##### 相关接口路径
+
+dag 相关接口 `src/js/conf/home/store/dag/actions.js`
+
+数据源中心 相关接口 `src/js/conf/home/store/datasource/actions.js`
+
+项目管理 相关接口 `src/js/conf/home/store/projects/actions.js`
+
+资源中心 相关接口 `src/js/conf/home/store/resource/actions.js`
+
+安全中心 相关接口 `src/js/conf/home/store/security/actions.js`
+
+用户中心 相关接口 `src/js/conf/home/store/user/actions.js`
+
+## 扩展开发
+
+##### 1.增加节点
+
+(1) 先将节点的icon小图标放置`src/js/conf/home/pages/dag`文件夹内,注意 `toolbar_${后台定义的节点的英文名称 例如:SHELL}.png`
+
+(2) 找到 `src/js/conf/home/pages/dag/_source/config.js` 里的 `tasksType` 对象,往里增加
+```
+'DEPENDENT': {  // 后台定义节点类型英文名称用作key值
+  desc: 'DEPENDENT',  // tooltip desc
+  color: '#2FBFD8'  // 代表的颜色主要用于 tree和gantt 两张图
+}
+```
+
+(3) 在 `src/js/conf/home/pages/dag/_source/formModel/tasks` 增加一个 `${节点类型(小写)}`.vue 文件,跟当前节点相关的组件内容都在这里写。 属于节点组件内的必须拥有一个函数 `_verification()` 验证成功后将当前组件的相关数据往父组件抛。
+```
+/**
+ * 验证
+*/
+  _verification () {
+    // datasource 子组件验证
+    if (!this.$refs.refDs._verifDatasource()) {
+      return false
+    }
+
+    // 验证函数
+    if (!this.method) {
+      this.$message.warning(`${i18n.$t('请输入方法')}`)
+      return false
+    }
+
+    // localParams 子组件验证
+    if (!this.$refs.refLocalParams._verifProp()) {
+      return false
+    }
+    // 存储
+    this.$emit('on-params', {
+      type: this.type,
+      datasource: this.datasource,
+      method: this.method,
+      localParams: this.localParams
+    })
+    return true
+  }
+``` 
+
+(4) 节点组件内部所用到公共的组件都在`_source`下,`commcon.js`用于配置公共数据
+
+##### 2.增加状态类型
+
+(1) 找到 `src/js/conf/home/pages/dag/_source/config.js` 里的 `tasksState` 对象,往里增加
+```
+'WAITTING_DEPEND': {  //后端定义状态类型 前端用作key值
+  id: 11,  // 前端定义id 后续用作排序
+  desc: `${i18n.$t('等待依赖')}`,  // tooltip desc
+  color: '#5101be',  // 代表的颜色主要用于 tree和gantt 两张图
+  icoUnicode: '&#xe68c;',  // 字体图标 
+  isSpin: false  // 是否旋转(需代码判断)
+}
+```
+
+##### 3.增加操作栏工具
+(1) 找到 `src/js/conf/home/pages/dag/_source/config.js` 里的 `toolOper` 对象,往里增加
+```
+{
+  code: 'pointer',  // 工具标识
+  icon: '&#xe781;',  // 工具图标 
+  disable: disable,  // 是否禁用
+  desc: `${i18n.$t('拖动节点和选中项')}`  // tooltip desc
+}
+```
+
+(2) 工具类都以一个构造函数返回 `src/js/conf/home/pages/dag/_source/plugIn`
+
+`downChart.js`  =>  dag 图片下载处理 
+
+`dragZoom.js`  =>  鼠标缩放效果处理 
+
+`jsPlumbHandle.js`  =>  拖拽线条处理 
+
+`util.js`  =>   属于 `plugIn` 工具类
+
+
+操作则在 `src/js/conf/home/pages/dag/_source/dag.js` => `toolbarEvent` 事件中处理。
+
+
+##### 3.增加一个路由页面
+
+(1) 首先在路由管理增加一个路由地址`src/js/conf/home/router/index.js`
+```
+{
+  path: '/test',  // 路由地址 
+  name: 'test',  // 别名
+  component: resolve => require(['../pages/test/index'], resolve),  // 路由对应组件入口文件
+  meta: {
+    title: `${i18n.$t('test')} - DolphinScheduler`  // title 显示
+  }
+},
+```
+
+(2) 在`src/js/conf/home/pages` 建一个 `test` 文件夹,在文件夹里建一个`index.vue`入口文件。
+
+    这样就可以直接访问 `http://localhost:8888/#/test`
+
+
+##### 4.增加预置邮箱
+
+找到`src/lib/localData/email.js`启动和定时邮箱地址输入可以自动下拉匹配。
+```
+export default ["test@analysys.com.cn","test1@analysys.com.cn","test3@analysys.com.cn"]
+```
+
+##### 5.权限管理及disabled状态处理
+
+权限根据后端接口`getUserInfo`接口给出`userType: "ADMIN_USER/GENERAL_USER"`权限控制页面操作按钮是否`disabled`
+
+具体操作:`src/js/module/permissions/index.js`
+
+disabled处理:`src/js/module/mixin/disabledState.js`
+
diff --git a/docs/2.0.6/docs/zh/contribute/have-questions.md b/docs/2.0.6/docs/zh/contribute/have-questions.md
new file mode 100644
index 000000000..7d9ad9cc9
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/have-questions.md
@@ -0,0 +1,65 @@
+# 当你遇到问题时
+
+## StackOverflow
+
+如果在使用上有疑问,建议你使用StackOverflow标签 [apache-dolphinscheduler](https://stackoverflow.com/questions/tagged/apache-dolphinscheduler),这是一个DolphinScheduler用户问答的活跃论坛。
+
+使用StackOverflow时的快速提示:
+
+- 在提交问题之前:
+  - 在StackOverflow的 [apache-dolphinscheduler](https://stackoverflow.com/questions/tagged/apache-dolphinscheduler) 标签下进行搜索,看看你的问题是否已经被回答。
+
+- 请遵守StackOverflow的[行为准则](https://stackoverflow.com/help/how-to-ask)
+
+- 提出问题时,请务必使用apache-dolphinscheduler标签。
+
+- 请不要在 [StackOverflow](https://stackoverflow.com/questions/tagged/apache-dolphinscheduler) 和 [GitHub issues](https://github.com/apache/dolphinscheduler/issues/new/choose)之间交叉发帖。
+
+提问模板:
+
+> **Describe the question**
+>
+> 对问题的内容进行清晰、简明的描述。
+>
+> **Which version of DolphinScheduler:**
+>
+>  -[1.3.0-preview]
+>
+> **Additional context**
+>
+> 在此添加关于该问题的其他背景。
+>
+> **Requirement or improvement**
+>
+> 在此描述您的要求或改进建议。
+
+如果你的问题较为宽泛、有意见或建议、期望请求外部资源,或是有项目调试、bug提交等相关问题,或者想要对项目做出贡献、对场景进行讨论,建议你提交[ GitHub issues ](https://github.com/apache/dolphinscheduler/issues/new/choose)或使用dev@dolphinscheduler.apache.org 邮件列表进行讨论。
+
+## 邮件列表
+
+- [dev@dolphinscheduler.apache.org](https://lists.apache.org/list.html?dev@dolphinscheduler.apache.org) 是为那些想为DolphinScheduler贡献代码的人准备的。 [(订阅)](mailto:dev-subscribe@dolphinscheduler.apache.org?subject=(send%20this%20email%20to%20subscribe)) [(退订)](mailto:dev-unsubscribe@dolphinscheduler.apache.org?subject=(send%20this%20email%20to%20unsubscribe)) [(存档)](http://lists.apache.org/list.html?dev@dolphinscheduler.apache.org)
+
+使用电子邮件时的一些快速提示:
+
+- 在提出问题之前:
+  - 请在StackOverflow的 [apache-dolphinscheduler](https://stackoverflow.com/questions/tagged/apache-dolphinscheduler) 标签下进行搜索,看看你的问题是否已经被回答。
+- 在你的邮件的主题栏里加上标签会帮助你得到更快的回应,例如:[ApiServer]:如何获得开放的api接口?
+- 可以通过以下标签定义你的主题。
+  - 组件相关:MasterServer、ApiServer、WorkerServer、AlertServer等等。
+  - 级别:Beginner、Intermediate、Advanced
+  - 场景相关:Debug,、How-to
+- 如果内容包括错误日志或长代码,请使用 [GitHub gist](https://gist.github.com/),并在邮件中只附加相关代码/日志的几行。
+
+## Chat Rooms
+
+聊天室是快速提问或讨论具体话题的好地方。
+
+以下聊天室是Apache DolphinScheduler的正式组成部分:
+
+​	Slack工作区的网址:http://asf-dolphinscheduler.slack.com/
+
+​	你可以通过该邀请链接加入:https://s.apache.org/dolphinscheduler-slack 
+
+此聊天室用于与DolphinScheduler使用相关的问题讨论。
+
+ 
\ No newline at end of file
diff --git a/docs/2.0.6/docs/zh/contribute/join/DS-License.md b/docs/2.0.6/docs/zh/contribute/join/DS-License.md
new file mode 100644
index 000000000..94aa2cd3d
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/join/DS-License.md
@@ -0,0 +1,104 @@
+# License 须知
+
+如您所知,DolphinScheduler现属于ASF(Apache基金会)下的开源项目,这意味着当您想要成为DolphinScheduler的贡献者的时候,就必须按照Apache的规则来,而Apache对于License有着极其严苛的规则,为了避免贡献者在License上浪费过多的时间,
+本文将为您讲解ASF—License以及参与DolphinScheduler如何过早的规避掉License风险。
+
+注:本文仅适用于Apache项目。
+
+### Apache项目可接受的License
+
+当您想要为DolphinScheduler(亦或其他Apache项目)增添一个新的功能,这个功能涉及到其他开源软件的引用,那么您必须注意,目前Apache项目支持遵从以下协议的开源软件(如果有遗漏,欢迎补充):
+
+[ASF第三方许可证策](https://apache.org/legal/resolved.html)
+
+如果您所使用的第三方软件并不在以上协议之中,那么很抱歉,您的代码将无法通过审核,建议您找寻其他替代方案。
+
+另外,当您需要使用新的软件的时候,请将您这样做的原因、最终产出结果发邮件至dev@dolphinscheduler.apache.org讨论,当得到至少3票PPMC认同的时候,您方可以引入。
+
+### 如何在DolphinScheduler合法的使用第三方开源软件
+
+当我们想要引入一个新的第三方软件(包含但不限于第三方的jar、文本、css、js、图片、图标、音视频等及在第三方基础上做的修改)至我们的项目中的时候,除了他们所遵从的协议是Apache允许的,另外一点很重要,就是合法的使用。您可以参考以下文章
+
+* [COMMUNITY-LED DEVELOPMENT "THE APACHE WAY"](https://apache.org/dev/licensing-howto.html)
+
+
+以Apache为例,当我们使用了ZooKeeper,那么ZooKeeper的NOTICE文件(每个开源项目都会有NOTICE文件,一般位于根目录)则必须在我们的项目中体现,用Apache的话来讲,就是"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a
+copyright notice that is included in or attached to the work.
+
+关于具体的各个开源协议使用协议,在此不做过多篇幅一一介绍,有兴趣可以自行查询了解。
+
+### DolphinScheduler-License 检测规则
+
+一般来讲,我们都会为自己的项目建立License-check脚本,DolphinScheduler-License是由[kezhenxu94](https://github.com/kezhenxu94)提供,其他开源软件略有不同,但最终结果都是为了确保我们在使用过程中能够第一时间避免License的问题。
+
+当我们需要添加新的Jar或其他外部资源的时候,我们需要按照以下步骤:
+
+* 在known-dependencies.txt中添加你所需要的jar名称+版本。
+* 在dolphinscheduler-dist/release-docs/LICENSE中添加相关的maven仓库地址。
+* 在dolphinscheduler-dist/release-docs/NOTICE中追加相关的NOTICE文件,此文件请务必和原代码仓库地址中的NOTICE文件一致。
+* 在dolphinscheduler-dist/release-docs/license/下添加相关源代码的协议,文件命名为license+文件名.txt。
+#### check dependency license fail
+```
+--- /dev/fd/63	2020-12-03 03:08:57.191579482 +0000
++++ /dev/fd/62	2020-12-03 03:08:57.191579482 +0000
+@@ -1,0 +2 @@
++HikariCP-java6-2.3.13.jar
+@@ -16,0 +18 @@
++c3p0-0.9.5.2.jar
+@@ -149,0 +152 @@
++mchange-commons-java-0.2.11.jar
+Error: Process completed with exit code 1.
+```
+一般来讲,添加一个jar的工作往往不会如此轻易的结束,因为它往往依赖了其它各种各样的jar,这些jar我们同样需要添加相应的license。
+这种情况下,我们会在check里面得到 check dependency license fail的错误信息,如上,我们缺少了HikariCP-java6-2.3.13、c3p0等的license声明,
+按照添加jar的步骤补充即可,提示还是蛮友好的(哈哈)。
+### 附件
+
+<!-- markdown-link-check-disable -->
+附件:新jar的邮件格式 
+```
+[VOTE][New Jar] jetcd-core(registry plugin support etcd3 ) 
+
+
+(说明目的,以及需要添加的 jar 是什么)Hi, the registry SPI will provide the implementation of etcd3. Therefore, we need to introduce a new jar (jetcd-core, jetcd-launcher (test)), which complies with the Apache-2.0 License. I checked his related dependencies to make sure it complies with the license of the Apache project.
+
+new jar : 
+
+jetcd-core             version -x.x.x   license apache2.0
+
+jetcd-launcher (test)  version -x.x.x   license apache2.0
+
+dependent jar(它依赖了哪些jar,最好附带版本,以及相关采用的license协议):
+grpc-core     version -x.x.x  license XXX
+grpc-netty    version -x.x.x  license XXX
+grpc-protobuf version -x.x.x  license XXX
+grpc-stub     version -x.x.x  license XXX
+grpc-grpclb   version -x.x.x  license XXX
+netty-all     version -x.x.x  license XXX
+failsafe      version -x.x.x  license XXX
+
+相关地址:主要有github地址、license文件地址、notice 文件地址、maven中央仓库地址
+
+github address:https://github.com/etcd-io/jetcd
+
+
+license:https://github.com/etcd-io/jetcd/blob/master/LICENSE
+
+
+notice:https://github.com/etcd-io/jetcd/blob/master/NOTICE
+
+
+Maven repository:
+
+
+https://mvnrepository.com/artifact/io.etcd/jetcd-core
+
+
+https://mvnrepository.com/artifact/io.etcd/jetcd-launcher
+```
+<!-- markdown-link-check-enable -->
+
+### 参考文章:
+* [COMMUNITY-LED DEVELOPMENT "THE APACHE WAY"](https://apache.org/dev/licensing-howto.html)
+* [ASF 3RD PARTY LICENSE POLICY](https://apache.org/legal/resolved.html)
+
diff --git a/docs/2.0.6/docs/zh/contribute/join/become-a-committer.md b/docs/2.0.6/docs/zh/contribute/join/become-a-committer.md
new file mode 100644
index 000000000..b38dd0cec
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/join/become-a-committer.md
@@ -0,0 +1,12 @@
+# 如何成为 DolphinScheduler Committer
+
+每个人都可以成为Apache项目的贡献者。作为一个贡献者只是意味着你对项目感兴趣并以某种方式做出贡献,从提出合理的问题(这些问题记录了项目并向开发人员提供反馈)到提供新的特性作为补丁。
+
+如果你成为对一个项目有价值的贡献者,你有可能被邀请成为一个committer。committer是ASF(Apache软件基金会)中用来表示提交特定项目的人的术语。它给你带来对项目仓库和资源写的权限。
+
+在Dolphinscheduler社区,如果一个committer获得大量的优秀成绩,就可以被邀请加入项目管理委员会(PMC)。
+
+当您不熟悉ASF使用的开源的开发过程时,有时难以理解的一点,就是我们更重视社区而不是代码。一个强大而健康的社区将受到尊重,成为一个有趣和有益的地方。更重要的是,一个多元化和健康的社区
+可以长时间的持续支持代码,即使个别公司在这个领域来来往往,也是如此。
+
+更多详细信息可以在[这里](https://community.apache.org/contributors/)找到
\ No newline at end of file
diff --git a/docs/2.0.6/docs/zh/contribute/join/code-conduct.md b/docs/2.0.6/docs/zh/contribute/join/code-conduct.md
new file mode 100644
index 000000000..a3668b52f
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/join/code-conduct.md
@@ -0,0 +1,68 @@
+# 行为准则
+
+以下行为准则以完全遵循[Apache软件基金会行为准则](https://www.apache.org/foundation/policies/conduct.html)为前提。
+
+## 开发理念
+ - **一致** 代码风格、命名以及使用方式保持一致。
+ - **易读** 代码无歧义,易于阅读和理解而非调试手段才知晓代码意图。
+ - **整洁** 认同《重构》和《代码整洁之道》的理念,追求整洁优雅代码。
+ - **抽象** 层次划分清晰,概念提炼合理。保持方法、类、包以及模块处于同一抽象层级。
+ - **用心** 保持责任心,持续以工匠精神雕琢。
+ 
+## 开发规范
+
+ - 执行`mvn -U clean package -Prelease`可以编译和测试通过全部测试用例。
+ - 测试覆盖率工具检查不低于dev分支覆盖率。
+ - 请使用Checkstyle检查代码,违反验证规则的需要有特殊理由。模板位置在根目录下ds_check_style.xml。
+ - 遵守编码规范。
+ 
+## 编码规范
+
+ - 使用linux换行符。
+ - 缩进(包含空行)和上一行保持一致。
+ - 类声明后与下面的变量或方法之间需要空一行。
+ - 不应有无意义的空行。
+ - 类、方法和变量的命名要做到顾名思义,避免使用缩写。
+ - 返回值变量使用`result`命名;循环中使用`each`命名循环变量;map中使用`entry`代替`each`。
+ - 捕获的异常名称命名为`e`;捕获异常且不做任何事情,异常名称命名为`ignored`。
+ - 配置文件使用驼峰命名,文件名首字母小写。
+ - 需要注释解释的代码尽量提成小方法,用方法名称解释。
+ - `equals`和`==`条件表达式中,常量在左,变量在右;大于小于等条件表达式中,变量在左,常量在右。
+ - 除了用于继承的抽象类之外,尽量将类设计为`final`。
+ - 嵌套循环尽量提成方法。
+ - 成员变量定义顺序以及参数传递顺序在各个类和方法中保持一致。
+ - 优先使用卫语句。
+ - 类和方法的访问权限控制为最小。
+ - 方法所用到的私有方法应紧跟该方法,如果有多个私有方法,书写私有方法应与私有方法在原方法的出现顺序相同。
+ - 方法入参和返回值不允许为`null`。
+ - 优先使用三目运算符代替if else的返回和赋值语句。
+ - 优先考虑使用`LinkedList`,只有在需要通过下标获取集合中元素值时再使用`ArrayList`。
+ - `ArrayList`,`HashMap`等可能产生扩容的集合类型必须指定集合初始大小,避免扩容。
+ - 日志与注释一律使用英文。
+ - 注释只能包含javadoc,todo和fixme。
+ - 公开的类和方法必须有javadoc,其他类和方法以及覆盖自父类的方法无需javadoc。
+
+## 单元测试规范
+
+ - 测试代码和生产代码需遵守相同代码规范。
+ - 单元测试需遵循AIR(Automatic, Independent, Repeatable)设计理念。
+   - 自动化(Automatic):单元测试应全自动执行,而非交互式。禁止人工检查输出结果,不允许使用`System.out`,`log`等,必须使用断言进行验证。
+   - 独立性(Independent):禁止单元测试用例间的互相调用,禁止依赖执行的先后次序。每个单元测试均可独立运行。
+   - 可重复执行(Repeatable):单元测试不能受到外界环境的影响,可以重复执行。
+ - 单元测试需遵循BCDE(Border, Correct, Design, Error)设计原则。
+   - 边界值测试(Border):通过循环边界、特殊数值、数据顺序等边界的输入,得到预期结果。
+   - 正确性测试(Correct):通过正确的输入,得到预期结果。
+   - 合理性设计(Design):与生产代码设计相结合,设计高质量的单元测试。
+   - 容错性测试(Error):通过非法数据、异常流程等错误的输入,得到预期结果。
+ - 如无特殊理由,测试需全覆盖。
+ - 每个测试用例需精确断言。
+ - 准备环境的代码和测试代码分离。
+ - 只有junit `Assert`,hamcrest `CoreMatchers`,Mockito相关可以使用static import。
+ - 单数据断言,应使用`assertTrue`,`assertFalse`,`assertNull`和`assertNotNull`。
+ - 多数据断言,应使用`assertThat`。
+ - 精确断言,尽量不使用`not`,`containsString`断言。
+ - 测试用例的真实值应名为为actualXXX,期望值应命名为expectedXXX。
+ - 测试类和`@Test`标注的方法无需javadoc。
+
+ - 公共规范
+   - 每行长度不超过`200`个字符,保证每一行语义完整以便于理解。
diff --git a/docs/2.0.6/docs/zh/contribute/join/commit-message.md b/docs/2.0.6/docs/zh/contribute/join/commit-message.md
new file mode 100644
index 000000000..3b1b0e8fc
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/join/commit-message.md
@@ -0,0 +1,89 @@
+# Commit Message 须知
+
+### 前言
+  一个好的 commit message 是能够帮助其他的开发者(或者未来的开发者)快速理解相关变更的上下文,同时也可以帮助项目管理人员确定该提交是否适合包含在发行版中。但当我们在查看了很多开源项目的 commit log 后,发现一个有趣的问题,一部分开发者,代码质量很不错,但是 commit message 记录却比较混乱,当其他贡献者或者学习者在查看代码的时候,并不能通过 commit log 很直观的了解
+该提交前后变更的目的,正如 Peter Hutterer 所言:Re-establishing the context of a piece of code is wasteful. We can’t avoid it completely, so our efforts should go to reducing it as much as possible. Commit messages can do exactly that and as a result, a commit message shows whether a developer is a good collaborator. 因此,DolphinScheduler 结合其他社区以及 Apache 官方文档制定了该规约。
+
+### Commit Message RIP
+
+#### 1:明确修改内容
+
+commit message 应该明确说明该提交解决了哪些问题(bug 修复、功能增强等),以便于用户开发者更好的跟踪问题,明确版本迭代过程中的优化情况。
+
+#### 2:关联相应的Pull Request 或者Issue
+
+当我们的改动较大的时候,commit message 最好能够关联 GitHub 上的相关 Issue 或者 Pull Request,这样,我们的开发者在查阅代码的时候能够通过关联信息较为迅速的了解改代码提交的上下文情景,如果当前 commit 针对某个 issue,那么可以在 Footer 部分关闭这个 issue。
+
+#### 3:统一的格式
+
+格式化后的 CommitMessage 能够帮助我们提供更多的历史信息,方便快速浏览,同时也可以直接从 commit 生成 Change Log。
+
+Commit message 应该包括三个部分:Header,Body 和 Footer。其中,Header 是必需的,Body 和 Footer 可以省略。
+
+##### header
+Header 部分只有一行,包括三个字段:type(必需)、scope(可选)和 subject(必需)。
+
+[DS-ISSUE编号][type] subject
+
+(1) type 用于说明 commit 的类别,只允许使用下面7个标识。
+
+* feat:新功能(feature)
+* fix:修补bug
+* docs:文档(documentation)
+* style: 格式(不影响代码运行的变动)
+* refactor:重构(即不是新增功能,也不是修改bug的代码变动)
+* test:增加测试
+* chore:构建过程或辅助工具的变动
+
+如果 type 为 feat 和 fix,则该 commit 将肯定出现在 Change log 之中。其他情况(docs、chore、style、refactor、test)建议不放入。
+
+(2)scope
+
+scope 用于说明 commit 影响的范围,比如 server、remote 等,如果没有更合适的范围,你可以用 *。
+
+(3) subject
+
+subject 是 commit 目的的简短描述,不超过50个字符。
+
+##### Body
+
+Body 部分是对本次 commit 的详细描述,可以分成多行,换行符将以72个字符换行,避免自动换行影响美观。
+
+Body 部分需要注意以下几点:
+
+* 使用动宾结构,注意使用现在时,比如使用 change 而非 changed 或 changes
+
+* 首字母不要大写
+
+* 语句最后不需要 ‘.’ (句号) 结尾
+
+
+##### Footer
+
+Footer只适用于两种情况
+
+(1) 不兼容变动
+
+如果当前代码与上一个版本不兼容,则 Footer 部分以 BREAKING CHANGE 开头,后面是对变动的描述、以及变动理由和迁移方法。
+
+(2) 关闭 Issue
+
+如果当前 commit 针对某个issue,那么可以在 Footer 部分关闭这个 issue,也可以一次关闭多个 issue 。
+
+##### 举个例子
+[DS-001][docs-zh] add commit message
+
+* commit message RIP
+* build some conventions 
+* help the commit messages become clean and tidy 
+* help developers and release managers better track issues 
+and clarify the optimization in the version iteration
+
+This closes #001
+
+### 参考文档
+[提交消息格式](https://cwiki.apache.org/confluence/display/GEODE/Commit+Message+Format)
+
+[On commit messages-Peter Hutterer](http://who-t.blogspot.com/2009/12/on-commit-messages.html)
+
+[RocketMQ Community Operation Conventions](https://mp.weixin.qq.com/s/LKM4IXAY-7dKhTzGu5-oug)
\ No newline at end of file
diff --git a/docs/2.0.6/docs/zh/contribute/join/contribute.md b/docs/2.0.6/docs/zh/contribute/join/contribute.md
new file mode 100644
index 000000000..504962141
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/join/contribute.md
@@ -0,0 +1,42 @@
+# 参与贡献
+
+首先非常感谢大家选择和使用 DolphinScheduler,非常欢迎大家加入 DolphinScheduler 大家庭,融入开源世界!
+
+我们鼓励任何形式的参与社区,最终成为 Committer 或 PPMC,如: 
+* 将遇到的问题通过 github 上 [issue](https://github.com/apache/dolphinscheduler/issues) 的形式反馈出来
+* 回答别人遇到的 issue 问题
+* 帮助完善文档
+* 帮助项目增加测试用例
+* 为代码添加注释
+* 提交修复 Bug 或者 Feature 的 PR
+* 发表应用案例实践、调度流程分析或者与调度相关的技术文章
+* 帮助推广 DolphinScheduler,参与技术大会或者 meetup 的分享等
+
+欢迎加入贡献的队伍,加入开源从提交第一个 PR 开始
+  - 比如添加代码注释或找到带有 ”easy to fix” 标记或一些非常简单的 issue(拼写错误等) 等等,先通过第一个简单的 PR 熟悉提交流程
+
+注:贡献不仅仅限于 PR 哈,对促进项目发展的都是贡献
+
+相信参与 DolphinScheduler,一定会让您从开源中受益!
+
+### 1. 参与文档贡献
+
+参考[参与贡献-文档需知](./document.md)
+
+### 2. 参与代码贡献
+
+参考[参与贡献 Issue 需知](./issue.md),[参与贡献 Pull Request 需知](./pull-request.md),[参与贡献 CommitMessage 需知](./commit-message.md)
+
+
+### 3. 如何领取 Issue,提交 Pull Request
+
+如果你想实现某个 Feature 或者修复某个 Bug。请参考以下内容:
+
+* 所有的 Bug 与新 Feature 建议使用 Issues Page 进行管理。
+* 如果想要开发实现某个 Feature 功能,请先回复该功能所关联的 Issue,表明你当前正在这个 Issue 上工作。 并在回复的时候为自己设置一个 **deadline**,并添加的回复内容中。
+* 最好在核心贡献者找到一个导师(指导者),导师会在设计与功能实现上给予即时的反馈。
+* 你应该新建一个分支来开始你的工作,分支的名字参考[参与贡献 Pull Request 需知](./pull-request.md)。比如,你想完成 feature 功能并提交了 Issue 111,那么你的 branch 名字应为 feature-111。 功能名称可与导师讨论后确定。
+* 完成后,发送一个 Pull Request 到 dolphinscheduler,提交过程具体请参考下面《[提交代码流程](./submit-code.md)》。
+
+如果是想提交 Pull Request 完成某一个 Feature 或者修复某个 Bug,这里都建议大家从小处做起,完成一个小功能就提交一次,每次别改动太多文件,改动文件太多也会给 Reviewer 造成很大的心理压力,建议通过多次 Pull Request 的方式完成。
+
diff --git a/docs/2.0.6/docs/zh/contribute/join/document.md b/docs/2.0.6/docs/zh/contribute/join/document.md
new file mode 100644
index 000000000..6d9fde3e2
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/join/document.md
@@ -0,0 +1,62 @@
+# 文档须知
+
+良好的使用文档对任何类型的软件都是至关重要的。欢迎任何可以改进 DolphinScheduler 文档的贡献。
+
+### 获取文档项目
+
+DolphinScheduler 项目的文档维护在独立的 [git 仓库](https://github.com/apache/dolphinscheduler-website)中。
+
+首先你需要先将文档项目 fork 到自己的 github 仓库中,然后将 fork 的文档克隆到本地计算机中。
+
+```
+git clone https://github.com/<your-github-user-name>/dolphinscheduler-website
+```
+
+### 文档环境
+
+DolphinScheduler 网站由 [docsite](https://github.com/chengshiwen/docsite-ext) 提供支持。
+
+请确保你的 node 版本是 10+,docsite 尚不支持高于 10.x 的版本。
+
+### 文档构建指南
+
+1. 在根目录中运行 `npm install` 以安装依赖项。
+
+2. 运行命令收集资源:2.1.运行 `export PROTOCOL_MODE=ssh` 告诉Git克隆资源,通过SSH协议而不是HTTPS协议。 2.2.运行 `./scripts/prepare_docs.sh` 准备所有相关资源,关更多信息,您可以查看[how prepare script work](https://github.com/apache/dolphinscheduler-website/blob/master/HOW_PREPARE_WOKR.md)。
+
+3. 在根目录下运行 `npm run start` 启动本地服务器,其将允许在 http://localhost:8080。
+
+4. 运行 `npm run build` 可以生成文档网站源代码。
+
+5. 在本地验证你的更改:`python -m SimpleHTTPServer 8000`,当 python 版本为 3 时,请使用:`python3 -m http.server 8000`。
+
+如果本地安装了更高版本的 node,可以考虑使用 `nvm` 来允许不同版本的 `node` 在你的计算机上运行。
+
+1. 参考[说明](http://nvm.sh)安装 nvm
+
+2. 运行 `nvm install v10.23.1` 安装 node v10
+
+3. 运行 `nvm use v10.23.1` 将当前工作环境切换到 node v10
+
+然后你就可以在本地环境运行和建立网站了。
+
+### 文档规范
+
+1. 汉字与英文、数字之间**需空格**,中文标点符号与英文、数字之间**不需空格**,以增强中英文混排的美观性和可读性。
+
+2. 建议在一般情况下使用 “你” 即可。当然必要的时候可以使用 “您” 来称呼,比如有 warning 提示的时候。
+
+### 怎样提交文档 Pull Request
+
+1. 不要使用 “git add.” 提交所有更改。
+
+2. 只需推送更改的文件,例如:
+
+ * `*.md`
+ * `blog.js or docs.js or site.js`
+
+3. 向 **master** 分支提交 Pull Request。
+
+### 参考文档
+
+[Apache Flink 中文文档规范](https://cwiki.apache.org/confluence/display/FLINK/Flink+Translation+Specifications)
diff --git a/docs/2.0.6/docs/zh/contribute/join/issue.md b/docs/2.0.6/docs/zh/contribute/join/issue.md
new file mode 100644
index 000000000..b81cbd822
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/join/issue.md
@@ -0,0 +1,217 @@
+# Issue 须知
+
+## 前言
+Issues 功能被用来追踪各种特性,Bug,功能等。项目维护者可以通过 Issues 来组织需要完成的任务。
+
+Issue 是引出一个 Feature 或 Bug 等的重要步骤,在单个
+Issue 中可以讨论的内容包括但不限于 Feature 的包含的功能,存在的 Bug 产生原因,前期方案的调研,以及其对应的实现设计和代码思路。
+
+并且只有当 Issue 被 approve 之后才需要有对应的 Pull Request 去实现。
+
+如果是一个 Issue 对应的是一个大 Feature,建议先将其按照功能模块等维度分成多个小的 Issue。
+
+## 规范
+
+### Issue 标题
+
+标题格式:[`Issue 类型`][`模块名`] `Issue 描述`
+
+其中`Issue 类型`如下:
+
+<table>
+    <thead>
+        <tr>
+            <th style="width: 10%; text-align: center;">Issue 类型</th>
+            <th style="width: 20%; text-align: center;">描述</th>
+            <th style="width: 20%; text-align: center;">样例</th>
+        </tr>
+    </thead>
+    <tbody>
+        <tr>
+            <td style="text-align: center;">Feature</td>
+            <td style="text-align: center;">包含所期望的新功能和新特性</td>
+            <td style="text-align: center;">[Feature][api] Add xxx api in xxx controller</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">Bug</td>
+            <td style="text-align: center;">程序中存在的 Bug</td>
+            <td style="text-align: center;">[Bug][api] Throw exception when xxx</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">Improvement</td>
+            <td style="text-align: center;">针对目前程序的一些改进,不限于代码格式,程序性能等</td>
+            <td style="text-align: center;">[Improvement][server] Improve xxx between Master and Worker</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">Test</td>
+            <td style="text-align: center;">专门针对测试用例部分</td>
+            <td style="text-align: center;">[Test][server] Add xxx e2e test</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">Sub-Task</td>
+            <td style="text-align: center;">一般都是属于 Feature 类的子任务,针对大 Feature,可以将其分成很多个小的子任务来一一完成</td>
+            <td style="text-align: center;">[Sub-Task][server] Implement xxx in xxx</td>
+        </tr>
+    </tbody>
+</table>
+
+其中`模块名`如下:
+
+<table>
+    <thead>
+        <tr>
+            <th style="width: 10%; text-align: center;">模块名</th>
+            <th style="width: 20%; text-align: center;">描述</th>
+        </tr>
+    </thead>
+    <tbody>
+        <tr>
+            <td style="text-align: center;">alert</td>
+            <td style="text-align: center;">报警模块</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">api</td>
+            <td style="text-align: center;">应用程序接口层模块</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">service</td>
+            <td style="text-align: center;">应用程序服务层模块</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">dao</td>
+            <td style="text-align: center;">应用程序数据访问层模块</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">plugin</td>
+            <td style="text-align: center;">插件模块</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">remote</td>
+            <td style="text-align: center;">通信模块</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">server</td>
+            <td style="text-align: center;">服务器模块</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">ui</td>
+            <td style="text-align: center;">前端界面模块</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">docs-zh</td>
+            <td style="text-align: center;">中文文档</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">docs</td>
+            <td style="text-align: center;">英文文档</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">待补充...</td>
+            <td style="text-align: center;">-</td>
+        </tr>
+    </tbody>
+</table>
+
+### Issue 内容模板
+
+https://github.com/apache/dolphinscheduler/tree/dev/.github/ISSUE_TEMPLATE
+
+### Bug 类 Issue
+
+当您发现一个 Bug 时,请提交一个 Issue 类的 Bug,提交前:
+* 请先在 issue 列表里查找一下是否该 Bug 已经提交,如果已经有此 Bug,请在此 Bug 下接着回复。
+* 如果该 Bug 是可以复现的。请尽量提供完整的重现步骤。
+
+请在 issues 页面中提交 Bug。
+
+一个高质量的 Bug 通常有以下特征:
+
+* 使用一个清晰并有描述性的标题来定义 Bug。
+* 详细的描述复现 Bug 的步骤。包括您的配置情况,预计产生的结果,实际产生的结果。并附加详细的 TRACE 日志。
+* 如果程序抛出异常,请附加完整的堆栈日志。
+* 如有可能,请附上屏幕截图或动态的 GIF 图,这些图片能帮助演示整个问题的产生过程。
+* 哪个版本。
+* 需要修复的优先级(危急、重大、次要、细微)。
+
+下面是 **Bug 的 Markdown 内容模板**,请按照该模板填写 issue。
+
+```shell
+**标题** 
+标题格式: [BUG][Priority] bug标题
+Priority分为四级: Critical、Major、Minor、Trivial
+
+**问题描述**
+[清晰准确描述遇到的问题]
+
+**问题复现步骤:**
+1. [第一步]
+2. [第二步]
+3. [...]
+
+**期望的表现:**
+[在这里描述期望的表现]
+
+**观察到的表现:**
+[在这里描述观察到的表现]
+
+**屏幕截图和动态GIF图**
+![复现步骤的屏幕截图和动态GIF图](图片的url)
+
+**DolphinScheduler版本:(以1.1.0为例)** 
+ -[1.1.0]
+ 
+**补充的内容:**
+[请描述补充的内容,比如]
+
+**需求或者建议**
+[请描述你的需求或者建议]
+```
+
+### Feature 类 Issue
+
+提交前:
+* 请确定这不是一个重复的功能增强建议。 查看 Issue Page 列表,搜索您要提交的功能增强建议是否已经被提交过。
+
+请在 issues 页面中提交 Feature。
+
+一个高质量的 Feature 通常有以下特征:
+* 一个清晰的标题来定义 Feature
+* 详细描述 Feature 的行为模式
+* 说明为什么该 Feature 对大多数用户是有用的。新功能应该具有广泛的适用性。
+* 尽量列出其他调度已经具备的类似功能。商用与开源软件均可。
+
+以下是 **Feature 的 Markdown 内容模板**,请按照该模板填写 issue 内容。
+```shell
+**标题** 
+标题格式: [Feature][Priority] feature标题
+Priority分为四级: Critical、Major、Minor、Trivial
+
+**Feature的描述**
+[描述新Feature应实现的功能]
+
+**为什么这个新功能是对大多数用户有用的**
+[解释这个功能为什么对大多数用户是有用的]
+
+**补充的内容**
+[列出其他的调度是否包含该功能,是如何实现的]
+
+```
+
+
+### Contributor
+
+除一些特殊情况之外,在开始完成
+Issue 之前,建议先在 Issue 下或者邮件列表中和大家讨论确定设计方案或者提供设计方案,以及代码实现思路。
+
+如果存在多种不同的方案,建议通过邮件列表或者在
+Issue 下进行投票决定,最终方案和代码实现思路被
+approve 之后,再去实现,这样做的主要目的是避免在
+Pull Request review 阶段针对实现思路的意见不同或需要重构而导致 waste time。
+
+### 相关问题
+
+- 当出现提出 Issue 的用户不清楚该 Issue 对应的模块时的处理方式。
+
+    确实存在大多数提出 Issue 用户不清楚这个 Issue 是属于哪个模块的,其实这在很多开源社区都是很常见的。在这种情况下,其实
+    committer/contributor 是知道这个 Issue 影响的模块的,如果之后这个 Issue 被 committer 和 contributor approve
+    确实有价值,那么 committer 就可以按照 Issue 涉及到的具体的模块去修改 Issue 标题,或者留言给提出 Issue 的用户去修改成对应的标题。
\ No newline at end of file
diff --git a/docs/2.0.6/docs/zh/contribute/join/microbench.md b/docs/2.0.6/docs/zh/contribute/join/microbench.md
new file mode 100644
index 000000000..97f37ae3e
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/join/microbench.md
@@ -0,0 +1,98 @@
+# 微基准测试须知
+
+所有的优化必须建立在数据印证的基础上,拒绝盲目优化。基于此,我们提供了MicroBench模块。
+
+MicroBench模块是基于OpenJDK JMH构件的(HotSpot的推荐基准测试方案)。当你开始基准测试时,你不需要额外的依赖。
+
+JMH,即Java MicroBenchmark Harness,是专门用于代码微基准测试的工具套件。何谓Micro Benchmark呢?简单的来说就是基于方法层面的基准测试,精度可以达到微秒级。当你定位到热点方法,希望进一步优化方法性能的时候,就可以使用JMH对优化的结果进行量化的分析。
+
+### Java基准测试需要注意的几个点:
+
+* 防止无用代码进入测试方法中。
+
+* 并发测试。
+
+* 测试结果呈现。
+
+### JMH比较典型的应用场景有:
+
+* 1:定量分析某个热点函数的优化效果
+
+* 2:想定量地知道某个函数需要执行多长时间,以及执行时间和输入变量的相关性
+
+* 3:对比一个函数的多种实现方式
+
+
+DolphinScheduler-MicroBench提供了AbstractBaseBenchmark,你可以在其基础上继承,编写你的基准测试代码,AbstractMicroBenchmark能保证以JUnit的方式运行。
+
+### 定制运行参数
+ 
+ 默认的AbstractMicrobenchmark配置是
+ 
+ Warmup次数 10(warmupIterations)
+ 
+ 测试次数 10(measureIterations)
+ 
+ Fork数量 2 (forkCount)
+ 
+ 你可以在启动的时候指定这些参数,-DmeasureIterations、-DperfReportDir(输出基准测试结果文件目录)、-DwarmupIterations、-DforkCount
+ 
+### DolphinScheduler-MicroBench 介绍
+
+
+ 通常并不建议跑测试时,用较少的循环次数,但是较少的次数有助于确认基准测试时工作的,在确认结束后,再运行大量的基准测试。
+ ```java
+@Warmup(iterations = 2, time = 1)
+@Measurement(iterations = 4, time = 1)
+@State(Scope.Benchmark)
+public class EnumBenchMark extends AbstractBaseBenchmark {
+
+}
+```
+ 这可以以方法级别或者类级别来运行基准测试,命令行的参数会覆盖annotation上的参数。
+ 
+```java
+    @Benchmark //方法注解,表示该方法是需要进行 benchmark 的对象。
+    @BenchmarkMode(Mode.AverageTime) //可选基准测试模式通过枚举Mode得到
+    @OutputTimeUnit(TimeUnit.MICROSECONDS) // 输出的时间单位
+    public void enumStaticMapTest() {
+        TestTypeEnum.newGetNameByType(testNum);
+    }
+```
+
+当你的基准测试编写完成后,你可以运行它查看具体的测试情况:(实际结果取决于你的系统配置情况)
+
+首先它会对我们的代码进行预热,
+
+```
+# Warmup Iteration   1: 0.007 us/op
+# Warmup Iteration   2: 0.008 us/op
+Iteration   1: 0.004 us/op
+Iteration   2: 0.004 us/op
+Iteration   3: 0.004 us/op
+Iteration   4: 0.004 us/op
+```
+在经过预热后,我们通常会得到如下结果
+```java
+Benchmark                        (testNum)   Mode  Cnt          Score           Error  Units
+EnumBenchMark.simpleTest               101  thrpt    8  428750972.826 ±  66511362.350  ops/s
+EnumBenchMark.simpleTest               108  thrpt    8  299615240.337 ± 290089561.671  ops/s
+EnumBenchMark.simpleTest               103  thrpt    8  288423221.721 ± 130542990.747  ops/s
+EnumBenchMark.simpleTest               104  thrpt    8  236811792.152 ± 155355935.479  ops/s
+EnumBenchMark.simpleTest               105  thrpt    8  472247775.246 ±  45769877.951  ops/s
+EnumBenchMark.simpleTest               103  thrpt    8  455473025.252 ±  61212956.944  ops/s
+EnumBenchMark.enumStaticMapTest        101   avgt    8          0.006 ±         0.003  us/op
+EnumBenchMark.enumStaticMapTest        108   avgt    8          0.005 ±         0.002  us/op
+EnumBenchMark.enumStaticMapTest        103   avgt    8          0.006 ±         0.005  us/op
+EnumBenchMark.enumStaticMapTest        104   avgt    8          0.006 ±         0.004  us/op
+EnumBenchMark.enumStaticMapTest        105   avgt    8          0.004 ±         0.001  us/op
+EnumBenchMark.enumStaticMapTest        103   avgt    8          0.004 ±         0.001  us/op
+EnumBenchMark.enumValuesTest           101   avgt    8          0.011 ±         0.004  us/op
+EnumBenchMark.enumValuesTest           108   avgt    8          0.025 ±         0.016  us/op
+EnumBenchMark.enumValuesTest           103   avgt    8          0.019 ±         0.010  us/op
+EnumBenchMark.enumValuesTest           104   avgt    8          0.018 ±         0.018  us/op
+EnumBenchMark.enumValuesTest           105   avgt    8          0.014 ±         0.012  us/op
+EnumBenchMark.enumValuesTest           103   avgt    8          0.012 ±         0.009  us/op
+```
+
+OpenJDK官方给了很多样例代码,有兴趣的同学可以自己查询并学习JMH:[OpenJDK-JMH-Example](http://hg.openjdk.java.net/code-tools/jmh/file/tip/jmh-samples/src/main/java/org/openjdk/jmh/samples/)
\ No newline at end of file
diff --git a/docs/2.0.6/docs/zh/contribute/join/pull-request.md b/docs/2.0.6/docs/zh/contribute/join/pull-request.md
new file mode 100644
index 000000000..26ce54c41
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/join/pull-request.md
@@ -0,0 +1,95 @@
+# Pull Request 须知
+
+## 前言
+Pull Request 本质上是一种软件的合作方式,是将涉及不同功能的代码,纳入主干的一种流程。这个过程中,可以进行讨论、审核和修改代码。
+
+在 Pull Request 中尽量不讨论代码的实现方案,代码及其逻辑的大体实现方案应该尽量在
+Issue 或者邮件列表中被讨论确定,在 Pull Request 中我们尽量只关注代码的格式以及代码规范等信息,从而避免实现方式的意见不同而导致
+waste time。
+
+## 规范
+
+### Pull Request 标题
+
+标题格式:[`Pull Request 类型`-`Issue 号`][`模块名`] `Pull Request 描述`
+
+其中`Pull Request 类型`和`Issue 类型`的对应关系如下:
+
+<table>
+    <thead>
+        <tr>
+            <th style="width: 10%; text-align: center;">Issue 类型</th>
+            <th style="width: 20%; text-align: center;">Pull Request 类型</th>
+            <th style="width: 20%; text-align: center;">样例(假设 Issue 号为 3333)</th>
+        </tr>
+    </thead>
+    <tbody>
+        <tr>
+            <td style="text-align: center;">Feature</td>
+            <td style="text-align: center;">Feature</td>
+            <td style="text-align: center;">[Feature-3333][server] Implement xxx</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">Bug</td>
+            <td style="text-align: center;">Fix</td>
+            <td style="text-align: center;">[Fix-3333][server] Fix xxx</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">Improvement</td>
+            <td style="text-align: center;">Improvement</td>
+            <td style="text-align: center;">[Improvement-3333][alert] Improve the performance of xxx</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">Test</td>
+            <td style="text-align: center;">Test</td>
+            <td style="text-align: center;">[Test-3333][api] Add the e2e test of xxx</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">Sub-Task</td>
+            <td style="text-align: center;">Sub-Task 对应的父类型</td>
+            <td style="text-align: center;">[Feature-3333][server] Implement xxx</td>
+        </tr>
+    </tbody>
+</table>
+
+其中 `Issue 号`是指当前 Pull Request 对应要解决的 Issue 号,`模块名`同 Issue 的模块名。
+
+### Pull Request 分支名
+
+分支名格式:`Pull Request 类型`-`Issue 号`,举例:Feature-3333。
+
+### Pull Request 内容
+
+请参阅到 commit message 篇。
+
+### Pull Request Code Style
+
+当你向 DolphinScheduler 提交 pull request 的时候 code-style 是你不得不考虑的问题。我们在 CI 中使用 Checkstyle [参考](https://checkstyle.sourceforge.io/)来保持代码风格的统一,它是一种帮助开发者编写遵循编码规范的 Java 代码开发工具。如果你的 pull request 没有通过 Checkstyle 的检测,那它将不会被合并到主库中。你可以在提交 pull request 前使用 Checkstyle 来检测或者格式化你的代码。如下的步骤将引领你配置并激活 Checkstyle
+
+1. 准备 Checkstyle 配置文件:你可以点击[这里](https://github.com/apache/dolphinscheduler/blob/3.0.0/style/checkstyle.xml)手动下载,但是我们更加推荐在 DolphinScheduler 代码库中找到它。当你将代码库克隆下来后,你可以在路径 `style/checkstyle.xml` 下找到配置文件
+2. 下载 Intellij IDEA Checkstyle 插件:通过关键字**CheckStyle-IDEA**或者通过[这个页面](https://plugins.jetbrains.com/plugin/1065-checkstyle-idea)安装均可。如果你不清楚如何安装Intellij IDEA插件,可以参考[这个连接](https://www.jetbrains.com/help/idea/managing-plugins.html#install_plugin_from_repo)
+3. 配置并激活 Checkstyles 以及 Intellij IDEA 代码风格:当完成上面几步后,你就可以配置并激活他们了。你可以在路径`Preferences -> Tool -> Checkstyle`中找到 Checkstyle,请参照下图完成其配置
+
+<p align="center">
+    <img src="../../../../img/contribute/join/pull-request/checkstyle-idea.png" alt="checkstyle idea configuration" />
+</p>
+
+截止目前,Checkstyle 插件已经配置完成了,当有代码或者文件不符合风格时就会显示在 Checkstyle 中。但强烈建议同时配置 Intellij IDEA 的代码风格,完成配置后可以使用 Intellij IDEA 自动格式化功能。你可以在路径`Preferences -> Editor -> Code Style -> Java`找到配置,请参照下图完成其配置
+
+<p align="center">
+    <img src="../../../../img/contribute/join/pull-request/code-style-idea.png" alt="code style idea configuration" />
+</p>
+
+1. 在提交 pull request 前格式化你的代码:完成上面全部后,你可以使用快捷键`Command + L`(Mac用户) or `Ctrl+L`(Windows用户)在 Intellij IDEA 完成自动格式化。格式化代码的最佳时间是将你的修改提交到本地 git 版本库之前
+
+### 相关问题
+
+- 怎样处理一个 Pull Request 对应多个 Issue 的场景。
+
+    首先 Pull Request 和 Issue 一对多的场景是比较少的。Pull Request 和 Issue 一对多的根本原因就是出现了多个
+    Issue 需要做大体相同的一件事情的场景,通常针对这种场景有两种解决方法:第一种就是把多个功能相同的 Issue 合并到同一个 Issue 上,然后把其他的
+    Issue 进行关闭;第二种就是多个 Issue 大体上是在做一个功能,但是存在一些细微的差别,这类场景下可以把每个 Issue 的职责划分清楚,每一个
+    Issue 的类型都标记为 Sub-Task,然后将这些 Sub-Task 类型的 Issue 关联到一个总 Issue 上,在提交
+    Pull Request 时,每个 Pull Request 都只关联一个 Sub-Task 的 Issue。
+    
+    尽量把一个 Pull Request 作为最小粒度。如果一个 Pull Request 只做一件事,Contributor 容易完成,Pull Request 影响的范围也会更加清晰,对 reviewer 的压力也会小。
\ No newline at end of file
diff --git a/docs/2.0.6/docs/zh/contribute/join/review.md b/docs/2.0.6/docs/zh/contribute/join/review.md
new file mode 100644
index 000000000..00ead4920
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/join/review.md
@@ -0,0 +1,141 @@
+# 参与社区 review
+
+贡献 DolphinScheduler 的方式,除了向 [团队](/en-us/community/community.html) 中提到的 GitHub 仓库提交 Issues 和 pull requests 外,另一非常重要的方式是
+review 社区的 Issues 或者 Pull Requests。通过别人 Issues 和 Pull Requests,你不仅能知道社区的最新进展和发展方向,还能了解别人代码的设
+计思想,同时可以增加自己在社区的曝光、积累自己在社区的荣誉值。
+
+任何人都被鼓励去 review 社区的  Issues 和 Pull Requests。我们还曾经发起过一个 Help Wanted 的邮件讨论,向社区征求贡献者协助 review Issues
+以及 Pull Requests,详见 [邮件][mail-review-wanted],并将其结果放到了 [GitHub Discussion][discussion-result-review-wanted] 中。
+
+> 注意: 这里并不是说只有 [GitHub Discussion][discussion-result-review-wanted] 中提及的用户才可以协助 review Issue 或者 Pull Requests,
+> 请记住社区的主张是 **任何人都被鼓励去 review 社区的  Issues 和 Pull Requests**。只是那部分用户在邮件列表意见征集的时候,表达了愿意付
+> 出更多的时间,参与社区的 review。另一个好处是,当社区有不确定的问题的时,除了可以找 [团队](/en-us/community/community.html) 中对应的 Members 外,还可以找
+> [GitHub Discussion][discussion-result-review-wanted] 中提及的人解答对应的问题。如果你要想要加入到 [GitHub Discussion][discussion-result-review-wanted]
+> 中,请在该 discussion 中评论并留下你感兴趣的模块,维护者会将你加入到对应的名单中。
+
+## 怎么参与社区 review
+
+DolphinScheduler 主要通过 GitHub 接收社区的贡献,其所有的 Issues 和 Pull Requests 都托管在 GitHub 中,如果你想参与 Issues 的 review
+具体请查看 [review Issues](#issues) 章节,如果你是想要参与 Pull Requests 的 review 具体请查看 [review Pull Requests](#pull-requests)
+章节。
+
+### Issues
+
+Review Issues 是指在 GitHub 中参与 [Issues][all-issues] 的讨论,并在对应的 Issues 给出建议。给出的建议包括但不限于如下的情况
+
+| 情况 | 原因 | 需增加标签 | 需要的动作 |
+| ------ | ------ | ------ | ------ |
+| 不需要修改 | 问题在 dev 分支最新代码中已经修复了 | [wontfix][label-wontfix] | 关闭 Issue,告知提出者将在那个版本发布,如已发布告知版本 |
+| 重复的问题 | 之前已经存在相同的问题 | [duplicate][label-duplicate] | 关闭 Issue,告知提出者相同问题的连接 |
+| 问题描述不清晰 | 没有明确说明问题如何复现 | [need more information][label-need-more-information] | 提醒用户需要增加缺失的描述 |
+
+除了个 issue 建议之外,给 Issue 分类也是非常重要的一个工作。分类后的 Issue 可以更好的被检索,为以后进一步处理提供便利。一个 Issue 可以被打上多个标签,常见的 Issue 分类有
+
+| 标签 | 标签代表的情况 |
+| ------ | ------ |
+| [UI][label-UI] | UI以及前端相关的 Issue |
+| [security][label-security] | 安全相关的 Issue |
+| [user experience][label-user-experience] | 用户体验相关的 Issue |
+| [development][label-development] | 开发者相关的 Issue |
+| [Python][label-Python] | Python相关的 Issue |
+| [plug-in][label-plug-in] | 插件相关的 Issue |
+| [document][label-document] | 文档相关的 Issue |
+| [docker][label-docker] | docker相关的 Issue |
+| [need verify][label-need-verify] | Issue 需要被验证 |
+| [e2e][label-e2e] | e2e相关的 Issue |
+| [win-os][label-win-os] | windows 操作系统相关的 Issue |
+| [suggestion][label-suggestion] | Issue 为项目提出了建议 |
+
+标签除了分类之外,还能区分 Issue 的优先级,优先级越高的标签越重要,越容易被重视,并会尽快被修复或者实现,优先级的标签如下
+
+| 标签 | 优先级 |
+| ------ | ------ |
+| [priority:high][label-priority-high] | 高优先级 |
+| [priority:middle][label-priority-middle] | 中优先级 |
+| [priority:low][label-priority-low] | 低优先级 |
+
+以上是常见的几个标签,更多的标签请查阅项目[全部的标签列表][label-all-list]
+
+在阅读以下内容是,请确保你已经为 Issue 打了标签。
+
+* 回复后及时去掉标签[Waiting for reply][label-waiting-for-reply]:在 [创建 Issue 的时候][issue-choose],我们会为 Issue 打上特定的标签
+  [Waiting for reply][label-waiting-for-reply],方便定位还没有被回复的 Issue,所以当你 review 了 Issue 之后,就需要将标签
+  [Waiting for reply][label-waiting-for-reply] 及时的从 Issue 中删除。
+* 打上 [Waiting for review][label-waiting-for-review] 标当你不确定这个 Issue 是否被解决:当你查阅了 Issue 后,会有两个情况出现。一是
+  问题已经被定位或解决,如果创建 Pull Requests 的话,则参考 [创建PR](./submit-code.md)。二是你也不确定这个问题是否真的是
+  被解决,这时你可以为 Issue 打上 [Waiting for review][label-waiting-for-review] 标签,并在 Issue 中 `@` 对应的人进行二次确认
+
+当 Issue 需要被创建 Pull Requests 解决,也可以视情况打上部分标签
+
+| 标签 | 标签代表的PR |
+| ------ | ------ |
+| [Chore][label-Chore] | 日常维护工作 |
+| [Good first issue][label-good-first-issue] | 适合首次贡献者解决的 Issue |
+| [easy to fix][label-easy-to-fix] | 比较容易解决 |
+| [help wanted][label-help-wanted] | 向社区寻求帮忙 |
+
+> 注意: 上面关于增加和删除标签的操作,目前只有成员可以操作,当你遇到需要增减标签的时候,但是不是成员是,可以 `@` 对应的成员让其帮忙增减。
+> 但只要你有 GitHub 账号就能评论 Issue,并给出建议。我们鼓励社区每人都去评论并为 Issue 给出解答
+
+### Pull Requests
+
+<!-- markdown-link-check-disable -->
+Review Pull 是指在 GitHub 中参与 [Pull Requests][all-PRs] 的讨论,并在对应的 Pull Requests 给出建议。DolphinScheduler review
+Pull Requests 与 [GitHub 的 reviewing changes in pull requests][gh-review-pr] 一样。你可以为 Pull Requests 提出自己的看法,
+
+* 当你认为这个 Pull Requests 没有问题,可以被合并的时候,可以根据 [GitHub 的 reviewing changes in pull requests][gh-review-pr] 的
+  approve 流程同意这个 Pull Requests。
+* 当你觉得这个 Pull Requests 需要被修改时,可以根据 [GitHub 的 reviewing changes in pull requests][gh-review-pr] 的 comment
+  流程评论这个 Pull Requests。当你认为存在一定要先修复才能合并的问题,请参照 [GitHub 的 reviewing changes in pull requests][gh-review-pr]
+  的 Request changes 流程要求贡献者修改 Pull Requests 的内容。
+<!-- markdown-link-check-enable -->
+
+为 Pull Requests 打上标签也是非常重要的一个环节,合理的分类能为后来的 reviewer 节省大量的时间。值得高兴的是,Pull Requests 的标签和 [Issues](#issues)
+中提及的标签和用法是一致的,这能减少 reviewer 对标签的记忆。例如这个 Pull Requests 是和 docker 并且直接影响到用户部署的,我们可以为他
+打上 [docker][label-docker] 和 [priority:high][label-priority-high] 的标签。
+
+除了和 Issue 类似的标签外,Pull Requests 还有许多自己特有的标签
+
+| 标签 | 含义 |
+| ------ | ------ |
+| [miss document][label-miss-document] | 该 Pull Requests 缺少文档 需要增加 |
+| [first time contributor][label-first-time-contributor] | 该 Pull Requests 贡献者是第一次贡献项目 |
+| [don't merge][label-do-not-merge] | 该 Pull Requests 有问题 暂时先不要合并 |
+
+> 注意: 上面关于增加和删除标签的操作,目前只有成员可以操作,当你遇到需要增减标签的时候,可以 `@` 对应的成员让其帮忙增减。但只要你有 GitHub
+> 账号就能评论 Pull Requests,并给出建议。我们鼓励社区每人都去评论并为 Pull Requests 给出建议
+
+[mail-review-wanted]: https://lists.apache.org/thread/9flwlzrp69xjn6v8tdkbytq8glqp2k51
+[discussion-result-review-wanted]: https://github.com/apache/dolphinscheduler/discussions/7545
+[label-wontfix]: https://github.com/apache/dolphinscheduler/labels/wontfix
+[label-duplicate]: https://github.com/apache/dolphinscheduler/labels/duplicate
+[label-need-more-information]: https://github.com/apache/dolphinscheduler/labels/need%20more%20information
+[label-win-os]: https://github.com/apache/dolphinscheduler/labels/win-os
+[label-waiting-for-reply]: https://github.com/apache/dolphinscheduler/labels/Waiting%20for%20reply
+[label-waiting-for-review]: https://github.com/apache/dolphinscheduler/labels/Waiting%20for%20review
+[label-user-experience]: https://github.com/apache/dolphinscheduler/labels/user%20experience
+[label-development]: https://github.com/apache/dolphinscheduler/labels/development
+[label-UI]: https://github.com/apache/dolphinscheduler/labels/UI
+[label-suggestion]: https://github.com/apache/dolphinscheduler/labels/suggestion
+[label-security]: https://github.com/apache/dolphinscheduler/labels/security
+[label-Python]: https://github.com/apache/dolphinscheduler/labels/Python
+[label-plug-in]: https://github.com/apache/dolphinscheduler/labels/plug-in
+[label-document]: https://github.com/apache/dolphinscheduler/labels/document
+[label-docker]: https://github.com/apache/dolphinscheduler/labels/docker
+[label-all-list]: https://github.com/apache/dolphinscheduler/labels
+[label-Chore]: https://github.com/apache/dolphinscheduler/labels/Chore
+[label-good-first-issue]: https://github.com/apache/dolphinscheduler/labels/good%20first%20issue
+[label-help-wanted]: https://github.com/apache/dolphinscheduler/labels/help%20wanted
+[label-easy-to-fix]: https://github.com/apache/dolphinscheduler/labels/easy%20to%20fix
+[label-priority-high]: https://github.com/apache/dolphinscheduler/labels/priority%3Ahigh
+[label-priority-middle]: https://github.com/apache/dolphinscheduler/labels/priority%3Amiddle
+[label-priority-low]: https://github.com/apache/dolphinscheduler/labels/priority%3Alow
+[label-miss-document]: https://github.com/apache/dolphinscheduler/labels/miss%20document
+[label-first-time-contributor]: https://github.com/apache/dolphinscheduler/labels/first%20time%20contributor
+[label-do-not-merge]: https://github.com/apache/dolphinscheduler/labels/don%27t%20merge
+[label-e2e]: https://github.com/apache/dolphinscheduler/labels/e2e
+[label-need-verify]: https://github.com/apache/dolphinscheduler/labels/need%20to%20verify
+[issue-choose]: https://github.com/apache/dolphinscheduler/issues/new/choose
+[all-issues]: https://github.com/apache/dolphinscheduler/issues
+[all-PRs]: https://github.com/apache/dolphinscheduler/pulls
+[gh-review-pr]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/reviewing-changes-in-pull-requests/about-pull-request-reviews
diff --git a/docs/2.0.6/docs/zh/contribute/join/security.md b/docs/2.0.6/docs/zh/contribute/join/security.md
new file mode 100644
index 000000000..6fef3cc1d
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/join/security.md
@@ -0,0 +1,8 @@
+# 安全
+
+Apache Software Foundation在消除其软件项目中的安全性问题方面采取严格的立场。 Apache DolphinScheduler也非常关注与其功能有关的安全性问题。
+
+如果您对DolphinScheduler的安全性有疑虑,或者发现了漏洞或潜在威胁,请发送邮件至[security@apache.org](mailto:security@apache.org),与Apache安全团队联系。 请在电子邮件中将项目名称指定为DolphinScheduler,并提供相关问题或潜在威胁的描述。 还敦促您推荐重现和复制问题的方法。 在评估和分析调查结果之后,apache安全团队和DolphinScheduler社区将与您联系。
+
+在公共领域公开该安全电子邮件之前,请注意在安全电子邮件中报告该安全问题。
+
diff --git a/docs/2.0.6/docs/zh/contribute/join/submit-code.md b/docs/2.0.6/docs/zh/contribute/join/submit-code.md
new file mode 100644
index 000000000..e473112ee
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/join/submit-code.md
@@ -0,0 +1,71 @@
+# 提交代码
+
+* 首先从远端仓库*https://github.com/apache/dolphinscheduler.git* fork一份代码到自己的仓库中
+
+* 远端仓库中目前有三个分支:
+    * master 正常交付分支
+	   发布稳定版本以后,将稳定版本分支的代码合并到master上。
+    
+	* dev    日常开发分支
+	   日常dev开发分支,新提交的代码都可以pull request到这个分支上。
+	   
+    * branch-1.0.0 发布版本分支
+	   发布版本分支,后续会有2.0...等版本分支。
+
+* 把自己仓库clone到本地
+  
+    ` git clone https://github.com/apache/dolphinscheduler.git`
+
+*  添加远端仓库地址,命名为upstream
+
+    ` git remote add upstream https://github.com/apache/dolphinscheduler.git `
+
+*  查看仓库:
+
+    ` git remote -v`
+
+> 此时会有两个仓库:origin(自己的仓库)和upstream(远端仓库)
+
+*  获取/更新远端仓库代码(已经是最新代码,就跳过)
+  
+    ` git fetch upstream `
+
+
+* 同步远端仓库代码到本地仓库
+
+```
+ git checkout origin/dev
+ git merge --no-ff upstream/dev
+```
+
+如果远端分支有新加的分支比如`dev-1.0`,需要同步这个分支到本地仓库
+
+```
+git checkout -b dev-1.0 upstream/dev-1.0
+git push --set-upstream origin dev-1.0
+```
+
+* 新建分支
+
+```
+git checkout -b xxx origin/dev
+```
+
+确保分支`xxx`是基于官方dev分支的最新代码
+
+
+* 在新建的分支上本地修改代码以后,提交到自己仓库:
+  
+    `git commit -m 'commit content'`
+    
+    `git push origin xxx --set-upstream`
+
+* 将修改提交到远端仓库
+
+	* 在github的PullRequest页面,点击"New pull request".
+	 
+	* 选择修改完的本地分支和要合并的目的分支,点击"Create pull request".
+	
+* 接着社区Committer们会做CodeReview,然后他会与您讨论一些细节(包括设计,实现,性能等)。当团队中所有人员对本次修改满意后,会将提交合并到dev分支
+
+* 最后,恭喜您已经成为了dolphinscheduler的官方贡献者!
diff --git a/docs/2.0.6/docs/zh/contribute/join/subscribe.md b/docs/2.0.6/docs/zh/contribute/join/subscribe.md
new file mode 100644
index 000000000..1d79fdb1b
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/join/subscribe.md
@@ -0,0 +1,25 @@
+# 订阅邮件列表
+
+强烈推荐订阅开发邮件列表,与社区保持最新信息同步,这一点非常重要。
+
+在使用DolphinScheduler的过程中,如果您有任何问题或者想法、建议,都可以通过Apache邮件列表参与到DolphinScheduler的社区建设中来。
+
+发送订阅邮件也非常简单,步骤如下:
+
+1. 用自己的邮箱向[dev-subscribe@dolphinscheduler.apache.org](mailto:dev-subscribe@dolphinscheduler.apache.org)发送一封邮件,主题和内容任意。
+
+2. 接收确认邮件并回复。 完成步骤1后,您将收到一封来自dev-help@dolphinscheduler.apache.org的确认邮件(如未收到,请确认邮件是否被自动归入垃圾邮件、推广邮件、订阅邮件等文件夹)。然后直接回复该邮件,或点击邮件里的链接快捷回复即可,主题和内容任意。
+
+3. 接收欢迎邮件。 完成以上步骤后,您会收到一封主题为WELCOME to dev@dolphinscheduler.apache.org的欢迎邮件,至此您已成功订阅Apache DolphinScheduler的邮件列表。
+
+# 取消订阅邮件列表
+
+如果您不再需要了解DolphinScheduler的动态,可以取消订阅邮件列表。
+
+取消订阅邮件列表步骤如下:
+
+1. 用已经订阅的邮箱向[dev-unsubscribe@dolphinscheduler.apache.org](mailto:dev-unsubscribe@dolphinscheduler.apache.org)发送一封邮件,主题和内容任意。
+
+2. 接收确认邮件并回复。 完成步骤1后,您将收到一封来自dev-help@dolphinscheduler.apache.org的确认邮件(如未收到,请确认邮件是否被自动归入垃圾邮件、推广邮件、订阅邮件等文件夹)。然后直接回复该邮件,或点击邮件里的链接快捷回复即可,主题和内容任意。
+
+3. 接收告别邮件。 完成以上步骤后,您会收到一封主题为GOODBYE from dev@dolphinscheduler.apache.org的告别邮件,至此您已成功取消订阅Apache DolphinScheduler的邮件列表,以后将不会再接收来自dev@dolphinscheduler.apache.org的邮件通知。
diff --git a/docs/2.0.6/docs/zh/contribute/join/unit-test.md b/docs/2.0.6/docs/zh/contribute/join/unit-test.md
new file mode 100644
index 000000000..6670aec7a
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/join/unit-test.md
@@ -0,0 +1,110 @@
+## Unit Test 覆盖率
+Unit Test 
+### 1.写单元测试的收益
+* 单元测试能帮助每个人深入代码细节,了解代码的功能。
+* 通过测试用例我们能发现 bug,并提交代码的健壮性。
+* 测试用例同时也是代码的 demo 用法。
+### 2.单元测试用例的一些设计原则
+* 应该精心设计好步骤,颗粒度和组合条件。
+* 注意边界条件。
+* 单元测试也应该好好设计,不要写无用的代码。
+* 当你发现一个`方法`很难写单元测试时,如果可以确认这个`方法`是`臭代码`,那么就和开发者一起重构它。
+<!-- markdown-link-check-disable -->
+* DolphinScheduler: [mockito](http://site.mockito.org/). 下面是一些开发向导: [mockito tutorial](https://www.baeldung.com/bdd-mockito), [mockito refcard](https://dzone.com/refcardz/mockito)
+<!-- markdown-link-check-enable -->
+* TDD(可选):当你开始写一个新的功能时,你可以试着先写测试用例。
+### 3.测试覆盖率设定值
+* 在现阶段,Delta 更改代码的测试覆盖设定值为:>=60%,越高越好。
+* 我们可以在这个页面中看到测试报告: https://codecov.io/gh/apache/dolphinscheduler
+
+## 单元测试基本准则
+### 1: 隔离性与单一性
+
+一个测试用例应该精确到方法级别,并应该能够单独执行该测试用例。同时关注点也始终在该方法上(只测试该方法)。
+
+如果方法过于复杂,开发阶段就应该将其再次进行拆分,对于测试用例来讲,最佳做法是一个用例只关注一个分支(判断)。当对其进行修改后,也仅仅影响一个测试用例的成功与否。这会极大方便我们在开发阶段验证问题和解决问题,但与此同时,也对我们覆盖率提出了极大的挑战。
+
+### 2:自动性
+
+单元测试能够自动化进行。强制要求:所有的单元测试必须写在 src/test 下,同时方法命名应该符合规范。基准测试除外。
+
+### 3:可重复性
+
+多次执行(任何环境任何时间)结果唯一,且可以重复执行。
+
+### 4:轻量型
+
+即任何环境都可快速执行。
+
+这要求我们尽可能不要依赖太多组件,如各种 spring bean 之类的。在单元测试中,这些都是可被 mock 的,增加这些,会加大我们单测的执行速度,同时也可能会传递污染。
+
+对于一些数据库、其他外部组件等。尽可能也采用模拟客户端的形式,即不依赖于外部环境,(任何外部依赖的存在都会极大的限制测试用例的可迁移性和稳定性以及结果正确性),这同时也方便开发者在任何环境都能够进行测试。
+
+### 5: 可测性
+
+这么多年过去了,你所看到的 mockito 已经成长为 mock 界的 NO.1 了,但他依然不支持 mock 静态方法、构造方法等。甚至官网上一直写着: "Don’t mock everything" 。因此尽量少用静态方法。
+
+一般建议只在一些工具类提供静态方法,这种情况下也不需要 mock,直接使用真实类即可。如果被依赖类不是工具类,可以将静态方法重构为实例方法。这样更加符合面向对象的设计理念。
+
+### 6: 完备性
+
+测试覆盖率,这是个非常费劲的问题,对于核心流程,我们是希望能够达到 90% 的覆盖率,非核心流程要求 60% 以上。
+
+覆盖率足够高的情况下会减少足够多的 bug 出现的概率,同时也减少了我们回归测试的成本。这是一个长久的工作,每当开发者新增或者修改代码的时候,相关测试用例与此同时也需要完善。这一点,希望开发者以及相关代码 reviewer 都能足够重视。
+
+### 7:拒绝无效断言
+
+无效断言让测试本身变得毫无意义,它和你的代码正确与否几乎没什么关系,且有可能会给你造成一种成功的假象,这种假象有可能持续到你的代码部署到生产环境。
+
+关于无效的断言这么几种类型
+
+1:不同类型的比较。
+
+2:判断一个具有默认值的对象或者变量不为空。
+
+这本身显得毫无意义,因此,在进行相关判断的时候应该关注一下其本身是否含有默认值。
+
+3:断言尽可能采用肯定断言而非否定断言,断言尽可能在一个预知结果范围内,或者是准确的数值,(否则有可能会导致一些不符合你的实际预期但是通过了断言)除非你的代码只关心他是否为空。
+
+### 8:一些单测的注意点
+1:Thread.sleep()
+
+测试代码中尽量不要使用 Thread.sleep,这让测试变得不稳定,可能会因为环境或者负载而意外导致失败。建议采用以下方式:
+
+Awaitility.await().atMost(…)
+
+2:忽略某些测试类
+
+@Ignore 注解应该附上相关 issue 地址,方便后续开发者追踪了解该测试被忽略的历史原因。
+
+如 @Ignore("see #1")
+
+3: try-catch 单元测试异常
+
+当单元测试中的代码引发异常的时候,测试将失败,因此,不需要使用 try-catch 捕获异常。
+
+```
+@Test
+public void testMethod() {
+  try {
+            // Some code
+  } catch (MyException e) {
+    Assert.fail(e.getMessage());  // Noncompliant
+  }
+}
+```
+你应该这样做:
+```
+@Test
+public void testMethod() throws MyException {
+    // Some code
+}
+```
+4:测试异常情况
+
+当你需要进行异常情况测试时,应该避免在测试代码中包含多个方法的调用(尤其是有多个可以引发相同异常的方法),同时应该明确说明你要测试什么。
+
+5:拒绝使用 MockitoJUnitRunner.Silent.class
+
+当单测出现 UnnecessaryStubbingException 时,请不要第一时间考虑使用 @RunWith(MockitoJUnitRunner.Silent.class) 来解决它,这只是隐藏了问题,
+你应该根据异常提示解决相关问题,这并不是一个困难的工作。当完成更改时,你会发现,你的代码又简洁了许多。
diff --git a/docs/2.0.6/docs/zh/contribute/release/release-post.md b/docs/2.0.6/docs/zh/contribute/release/release-post.md
new file mode 100644
index 000000000..07050594a
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/release/release-post.md
@@ -0,0 +1,30 @@
+# 发版后续
+
+发送公告邮件后,我们还有一些发布任务要做,目前我们必须将 Docker 镜像发布到 Docker Hub 和 并且需要将 pydolphinscheduler 发布到 PyPI。
+
+## 发布 Docker 镜像
+
+我们已经有 CI 发布最新的 Docker 镜像到 GitHub container register [点击查看详情](https://github.com/apache/dolphinscheduler/blob/d80cf21456265c9d84e642bdb4db4067c7577fc6/.github/workflows/publish-docker.yaml#L55-L63)。
+我们可以稍微修改 CI 的主要命令实现单个命令发布 Docker 镜像发布到 Docker Hub。
+
+```bash
+# 请将 <VERSION> 修改成你要发版的版本
+./mvnw -B clean deploy \
+    -Dmaven.test.skip \
+    -Dmaven.javadoc.skip \
+    -Dmaven.checkstyle.skip \
+    -Dmaven.deploy.skip \
+    -Ddocker.tag=<VERSION> \
+    -Ddocker.hub=apache \
+    -Pdocker,release
+```
+
+## 发布 pydolphinscheduler 到 PyPI
+
+需要将 Python API 发布到 PyPI,请参考 [Python API release](https://github.com/apache/dolphinscheduler/blob/dev/dolphinscheduler-python/pydolphinscheduler/RELEASE.md#to-pypi)
+完成 PyPI 的发版
+
+## 获取全部的贡献者
+
+当您想要发布新版本的新闻或公告时,您可能需要当前版本的所有贡献者,您可以使用 git 命令 `git log --pretty="%an" <PREVIOUS-RELEASE-SHA>..<CURRENT-RELEASE-SHA> | sort | uniq`
+(将对应的版本改成两个版本的 tag 值)自动生成 git 作者姓名。
\ No newline at end of file
diff --git a/docs/2.0.6/docs/zh/contribute/release/release-prepare.md b/docs/2.0.6/docs/zh/contribute/release/release-prepare.md
new file mode 100644
index 000000000..9dedd36dd
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/release/release-prepare.md
@@ -0,0 +1,32 @@
+# 发版准备
+
+## 检查 release-docs
+
+和上一个版本比较,如果有依赖及版本发生了变化,当前版本的 `release-docs` 需要被更新到最新
+
+ - `dolphinscheduler-dist/release-docs/LICENSE`
+ - `dolphinscheduler-dist/release-docs/NOTICE`
+ - `dolphinscheduler-dist/release-docs/licenses`
+
+## 更新版本
+
+例如要发版 `x.y.z`,需要先进行以下修改:
+
+- 修改代码中的版本号:
+  - `sql`:
+    - `dolphinscheduler_mysql.sql`: `t_ds_version` 版本更新为 x.y.z
+    - `dolphinscheduler_postgre.sql`: `t_ds_version` 版本更新为 x.y.z
+    - `dolphinscheduler_h2.sql`: `t_ds_version` 版本更新为 x.y.z
+    - `upgrade`: 是否新增 `x.y.z_schema`
+    - `soft_version`: 版本更新为 x.y.z
+  - `deploy/docker/.env`: `HUB` 改为 `apache`,`TAG` 改为 `x.y.z`
+  - `deploy/kubernetes/dolphinscheduler`:
+    - `Chart.yaml`: `appVersion` 版本更新为 x.y.z (`version` 为 helm chart 版本, 增量更新但不要设置为 x.y.z)
+    - `values.yaml`: `image.tag` 版本更新为 x.y.z
+  - `dolphinscheduler-python/pydolphinscheduler/setup.py`: 修改其中的 `version` 为 x.y.z
+- 修改文档(docs模块)中的版本号:
+  - 将 `docs` 文件夹下文件的占位符 `<version>` (除了 pom.xml 相关的) 修改成 `x.y.z`
+  - 新增历史版本
+     - `docs/docs/en/history-versions.md` 和 `docs/docs/zh/history-versions.md`: 增加新的历史版本为 `x.y.z`
+  - 修改文档 sidebar
+    - `docs/configs/docsdev.js`: 将里面的 `/dev/` 修改成 `/x.y.z/`
diff --git a/docs/2.0.6/docs/zh/contribute/release/release.md b/docs/2.0.6/docs/zh/contribute/release/release.md
new file mode 100644
index 000000000..71fb7cf1c
--- /dev/null
+++ b/docs/2.0.6/docs/zh/contribute/release/release.md
@@ -0,0 +1,534 @@
+# 发版指南
+
+## 检查环境
+
+为确保您可以成功完成 DolphinScheduler 的发布,您应该检查您的环境并确保满足所有条件,如果缺少任何条件,您应该安装它们并确保它们正常工作。
+
+```shell
+# 需要 JDK 1.8 及以上的版本
+java -version
+# 需要 Maven 
+mvn -version
+# 需要 Python 3.6 及以上的版本,并且需要 `python` 关键字能在命令行中运行,且版本符合条件。
+python --version
+```
+
+## GPG设置
+
+### 安装GPG
+
+在[GnuPG官网](https://www.gnupg.org/download/index.html)下载安装包。
+GnuPG的1.x版本和2.x版本的命令有细微差别,下列说明以`GnuPG-2.1.23`版本为例。
+
+安装完成后,执行以下命令查看版本号。
+
+```shell
+gpg --version
+```
+
+### 创建key
+
+安装完成后,执行以下命令创建key。
+
+`GnuPG-2.x`可使用:
+
+```shell
+gpg --full-gen-key
+```
+
+`GnuPG-1.x`可使用:
+
+```shell
+gpg --gen-key
+```
+
+根据提示完成key,**注意:请使用Apache mail 和 对应的密码生成GPG的Key。**
+
+```shell
+gpg (GnuPG) 2.0.12; Copyright (C) 2009 Free Software Foundation, Inc.
+This is free software: you are free to change and redistribute it.
+There is NO WARRANTY, to the extent permitted by law.
+
+Please select what kind of key you want:
+  (1) RSA and RSA (default)
+  (2) DSA and Elgamal
+  (3) DSA (sign only)
+  (4) RSA (sign only)
+Your selection? 1
+RSA keys may be between 1024 and 4096 bits long.
+What keysize do you want? (2048) 4096
+Requested keysize is 4096 bits
+Please specify how long the key should be valid.
+        0 = key does not expire
+     <n>  = key expires in n days
+     <n>w = key expires in n weeks
+     <n>m = key expires in n months
+     <n>y = key expires in n years
+Key is valid for? (0)
+Key does not expire at all
+Is this correct? (y/N) y
+
+GnuPG needs to construct a user ID to identify your key.
+
+Real name: ${输入用户名}
+Email address: ${输入邮件地址}
+Comment: ${输入注释}
+You selected this USER-ID:
+   "${输入的用户名} (${输入的注释}) <${输入的邮件地址}>"
+
+Change (N)ame, (C)omment, (E)mail or (O)kay/(Q)uit? O
+You need a Passphrase to protect your secret key. # 输入apache登录密码
+```
+注意:如果遇到以下错误:
+```
+gpg: cancelled by user
+gpg: Key generation canceled.
+```
+需要使用自己的用户登录服务器,而不是root切到自己的账户
+
+### 查看生成的key
+
+```shell
+gpg --list-keys
+```
+
+执行结果:
+
+```shell
+pub   4096R/85E11560 2019-11-15
+uid                  ${用户名} (${注释}) <{邮件地址}>
+sub   4096R/A63BC462 2019-11-15
+```
+
+其中85E11560为公钥ID。
+
+### 将公钥同步到服务器
+
+命令如下:
+
+```shell
+gpg --keyserver hkp://pool.sks-keyservers.net --send-key 85E11560
+```
+
+`pool.sks-keyservers.net`为随意挑选的[公钥服务器](https://sks-keyservers.net/status/),每个服务器之间是自动同步的,选任意一个即可。
+
+注意:如果同步到公钥服务器,可以在服务器上查到新建的公钥
+http://keyserver.ubuntu.com:11371/pks/lookup?search=${用户名}&fingerprint=on&op=index
+备用公钥服务器 gpg --keyserver hkp://keyserver.ubuntu.com --send-key ${公钥ID}
+
+
+## 发布Apache Maven中央仓库
+
+### 设置 `settings-security.xml` 和 `settings.xml` 文件
+
+在本节中,我们添加 Apache 服务器 maven 配置以准备发布,请参考[这里](http://maven.apache.org/guides/mini/guide-encryption.html) 添加
+`settings-security.xml` 文件,并且像下面这样更改你的 `~/.m2/settings.xml`
+
+```xml
+<settings>
+  <servers>
+    <server>
+      <id>apache.snapshots.https</id>
+      <username> <!-- APACHE LDAP 用户名 --> </username>
+      <password> <!-- APACHE LDAP 加密后的密码 --> </password>
+    </server>
+    <server>
+      <id>apache.releases.https</id>
+      <username> <!-- APACHE LDAP 用户名 --> </username>
+      <password> <!-- APACHE LDAP 加密后的密码 --> </password>
+    </server>
+  </servers>
+</settings>
+```
+
+### 配置环境变量
+
+我们将多次使用发布版本 `VERSION`,github名称 `GH_USERNAME`,以及 Apache 用户名 `<YOUR-APACHE-USERNAME>`,因此最好将其存储到bash变量中以便于使用。
+
+```shell
+VERSION=<THE-VERSION-YOU-RELEASE>
+GH_USERNAME=<YOUR-GITHUB-USERNAME>
+A_USERNAME=<YOUR-APACHE-USERNAME>
+```
+
+> 注意:设置环境变量后,我们可以直接在你的 bash 中使用该变量,而无需更改任何内容。例如,我们可以直接使用命令 `git clone -b "${VERSION}"-prepare https://github.com/apache/dolphinscheduler.git`
+> 来克隆发布分支,他会自动将其中的 `"${VERSION}"` 转化成你设置的值 `<THE-VERSION-YOU-RELEASE>`。 但是您必须在一些非 bash 步骤中手动更改 
+> `<VERSION>` 为对应的版本号,例如发起投票中的内容。我们使用 `<VERSION>` 而不是 `"${VERSION}"` 来提示 release manager 他们必须手动更改这部分内容
+
+### 创建发布分支
+
+在本节中,我们从 github 下载源代码并创建新分支以发布
+
+```shell
+git clone -b "${VERSION}"-prepare https://github.com/apache/dolphinscheduler.git
+cd ~/dolphinscheduler/
+git pull
+git checkout -b ${RELEASE.VERSION}-release
+git push origin ${RELEASE.VERSION}-release
+```
+
+### 发布预校验
+
+```shell
+# 保证 python profile 的 gpg 可以正常运行
+export GPG_TTY=$(tty)
+
+# 运行发版校验
+mvn release:prepare -Prelease,python -Darguments="-Dmaven.test.skip=true -Dcheckstyle.skip=true -Dmaven.javadoc.skip=true" -DautoVersionSubmodules=true -DdryRun=true -Dusername="${GH_USERNAME}"
+```
+
+* `-Prelease,python`: 选择release和python的profile,这个profile会打包所有源码、jar文件以及可执行二进制包,以及Python的二进制包。
+* `-DautoVersionSubmodules=true`: 作用是发布过程中版本号只需要输入一次,不必为每个子模块都输入一次。
+* `-DdryRun=true`: 演练,即不产生版本号提交,不生成新的tag。
+
+### 准备发布
+
+首先清理发布预校验本地信息。
+
+```shell
+mvn release:clean
+```
+
+然后准备执行发布。
+
+```shell
+mvn release:prepare -Prelease,python -Darguments="-Dmaven.test.skip=true -Dcheckstyle.skip=true -Dmaven.javadoc.skip=true" -DautoVersionSubmodules=true -DpushChanges=false -Dusername="${GH_USERNAME}"
+```
+
+和上一步演练的命令基本相同,去掉了 `-DdryRun=true` 参数。
+
+* `-DpushChanges=fals`:不要将修改后的版本号和tag自动提交至GitHub。
+
+> 注意:如果你遇到来自 git 的类似 **Please tell me who you are.** 错误信息。您可以通过命令 `git config --global user.email "you@example.com"`
+> 和 `git config --global user.name "Your Name"` 来配置你的用户名和邮箱如果你遇到一些错误。
+
+将本地文件检查无误后,提交至github。
+
+```shell
+git push -u origin "${VERSION}"-release
+git push origin --tags
+```
+
+<!-- markdown-link-check-disable -->
+
+> 注意1:因为 Github 不再支持在 HTTPS 协议中使用原生密码在,所以在这一步你应该使用 github token 作为密码。你可以通过 https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating -a-personal-access-token
+> 了解更多如果创建 token 的信息。
+
+> 注意2:命令完成后,会自动创建 `release.properties` 文件和 `*.Backup` 文件,它们在下面的命令中是需要的,不要删除它们
+
+<!-- markdown-link-check-enable -->
+
+### 部署发布
+
+```shell
+mvn release:perform -Prelease,python -Darguments="-Dmaven.test.skip=true -Dcheckstyle.skip=true -Dmaven.javadoc.skip=true" -DautoVersionSubmodules=true -Dusername="${GH_USERNAME}"
+```
+
+执行完该命令后,待发布版本会自动上传到Apache的临时筹备仓库(staging repository)。你可以通过访问 [apache staging repositories](https://repository.apache.org/#stagingRepositories)
+, 然后使用Apache的LDAP账户登录后,就会看到上传的版本,`Repository` 列的内容即为 `${STAGING.REPOSITORY}`。
+点击 `Close` 来告诉Nexus这个构建已经完成,只有这样该版本才是可用的。如果电子签名等出现问题,`Close` 会失败,可以通过 `Activity` 查看失败信息。
+
+## 发布Apache SVN仓库
+
+### 检出dolphinscheduler发布目录
+
+如无本地工作目录,则先创建本地工作目录。
+
+```shell
+mkdir -p ~/ds_svn/dev/
+cd ~/ds_svn/dev/
+```
+
+创建完毕后,从Apache SVN检出dolphinscheduler发布目录。
+
+```shell
+svn --username="${A_USERNAME}" co https://dist.apache.org/repos/dist/dev/dolphinscheduler
+cd ~/ds_svn/dev/dolphinscheduler
+```
+
+### 添加gpg公钥
+
+仅第一次部署的账号需要添加,只要`KEYS`中包含已经部署过的账户的公钥即可。
+
+```shell
+gpg -a --export <YOUR-GPG-KEY-ID> >> KEYS
+```
+
+### 将待发布的内容添加至SVN目录
+
+创建版本号目录。
+
+```shell
+mkdir -p ~/ds_svn/dev/dolphinscheduler/"${VERSION}"
+mkdir -p ~/ds_svn/dev/dolphinscheduler/"${VERSION}"/python
+cd ~/ds_svn/dev/dolphinscheduler/"${VERSION}"
+```
+
+将源码包和二进制包添加至SVN工作目录。
+
+```shell
+# 主程序源码包和二进制包
+cp -f ~/dolphinscheduler/dolphinscheduler-dist/target/*.tar.gz ~/ds_svn/dev/dolphinscheduler/"${VERSION}"
+cp -f ~/dolphinscheduler/dolphinscheduler-dist/target/*.tar.gz.asc ~/ds_svn/dev/dolphinscheduler/"${VERSION}"
+
+# Python API 源码和二进制包
+cp -f ~/dolphinscheduler/dolphinscheduler-dist/target/python/* ~/ds_svn/dev/dolphinscheduler/"${VERSION}"/python
+```
+
+### 生成文件签名
+
+```shell
+shasum -a 512 apache-dolphinscheduler-"${VERSION}"-src.tar.gz >> apache-dolphinscheduler-"${VERSION}"-src.tar.gz.sha512
+shasum -b -a 512 apache-dolphinscheduler-"${VERSION}"-bin.tar.gz >> apache-dolphinscheduler-"${VERSION}"-bin.tar.gz.sha512
+cd python
+shasum -a 512 apache-dolphinscheduler-python-"${VERSION}".tar.gz >> apache-dolphinscheduler-python-"${VERSION}".tar.gz.sha512
+shasum -b -a 512 apache_dolphinscheduler-python-"${VERSION}"-py3-none-any.whl >> apache_dolphinscheduler-python-"${VERSION}"-py3-none-any.whl.sha512
+cd ../
+```
+
+### 提交Apache SVN
+
+```shell
+cd ~/ds_svn/dev/dolphinscheduler
+svn add *
+svn --username="${A_USERNAME}" commit -m "release ${VERSION}"
+```
+## 检查发布结果
+
+### 检查sha512哈希
+
+```shell
+shasum -c apache-dolphinscheduler-"${VERSION}"-src.tar.gz.sha512
+shasum -c apache-dolphinscheduler-"${VERSION}"-bin.tar.gz.sha512
+cd python
+shasum -c apache-dolphinscheduler-python-"${VERSION}".tar.gz.sha512
+shasum -c apache_dolphinscheduler-python-"${VERSION}"-py3-none-any.whl.sha512
+cd ../
+```
+
+### 检查gpg签名
+
+首先导入发布人公钥。从svn仓库导入KEYS到本地环境。(发布版本的人不需要再导入,帮助做验证的人需要导入,用户名填发版人的即可)
+
+```shell
+curl https://dist.apache.org/repos/dist/dev/dolphinscheduler/KEYS >> KEYS
+gpg --import KEYS
+gpg --edit-key "${A_USERNAME}"
+  > trust
+
+Please decide how far you trust this user to correctly verify other users' keys
+(by looking at passports, checking fingerprints from different sources, etc.)
+
+  1 = I don't know or won't say
+  2 = I do NOT trust
+  3 = I trust marginally
+  4 = I trust fully
+  5 = I trust ultimately
+  m = back to the main menu
+
+Your decision? 5
+
+  > save
+```
+
+然后进行gpg签名检查。
+
+```shell
+gpg --verify apache-dolphinscheduler-"${VERSION}"-src.tar.gz.asc
+gpg --verify apache-dolphinscheduler-"${VERSION}"-bin.tar.gz.asc
+cd python
+gpg --verify apache-dolphinscheduler-python-"${VERSION}".tar.gz.asc
+gpg --verify apache_dolphinscheduler-python-"${VERSION}"-py3-none-any.whl.asc
+cd ../
+```
+
+> 注意:当你找不到你的 `asc` 文件时,你必须手动创建 gpg 签名,命令 `gpg --armor --detach-sign --digest-algo=SHA512 apache-dolphinscheduler-"${VERSION}"- bin.tar.gz`
+> 和 `gpg --armor --detach-sign --digest-algo=SHA512 apache-dolphinscheduler-"${VERSION}"-src.tar.gz` 将创建它们
+
+### 检查发布文件内容
+
+#### 检查源码包的文件内容
+
+解压缩`apache-dolphinscheduler-<VERSION>-src.tar.gz`以及Python文件夹下的`apache-dolphinscheduler-python-<VERSION>.tar.gz`,进行如下检查:
+
+- 检查源码包是否包含由于包含不必要文件,致使tarball过于庞大
+- 存在`LICENSE`和`NOTICE`文件
+- 只存在文本文件,不存在二进制文件
+- 所有文件的开头都有ASF许可证
+- 能够正确编译,单元测试可以通过 (mvn install)
+- 版本内容与GitHub上tag的内容相符 (diff -r a verify_dir tag_dir)
+- 检查是否有多余文件或文件夹,例如空文件夹等
+
+#### 检查二进制包的文件内容
+
+解压缩`apache-dolphinscheduler-<VERSION>-src.tar.gz`和`apache-dolphinscheduler-python-<VERSION>-bin.tar.gz`
+进行如下检查:
+
+- 存在`LICENSE`和`NOTICE`文件
+- 所有文本文件开头都有ASF许可证
+- 检查第三方依赖许可证:
+  - 第三方依赖的许可证兼容
+  - 所有第三方依赖的许可证都在`LICENSE`文件中声明
+  - 依赖许可证的完整版全部在`license`目录
+  - 如果依赖的是Apache许可证并且存在`NOTICE`文件,那么这些`NOTICE`文件也需要加入到版本的`NOTICE`文件中
+
+## 发起投票
+
+### 更新版本说明
+
+在 GitHub 中通过 [创建新的 release note](https://github.com/apache/dolphinscheduler/releases/new) 创建一个 release note。 这要在
+投票邮件开始之前完成,因为我们需要在邮件中使用 release note。你可以通过命令 `git log --pretty="- %s" <PREVIOUS-RELEASE-SHA>..<CURRENT-RELEASE-SHA> > changelog.md`
+自动生成 changelog(部分可以不太准确,需要人为过滤一遍),然后将他们分类并粘贴到 GitHub 的 release note 中
+
+### 投票阶段
+
+1. DolphinScheduler社区投票,发起投票邮件到`dev@dolphinscheduler.apache.org`。PMC需要先按照文档检查版本的正确性,然后再进行投票。
+经过至少72小时并统计到至少3个`+1 并且没有-1 PMC member`票后,即可进入下一阶段。
+
+2. 宣布投票结果,发起投票结果邮件到`dev@dolphinscheduler.apache.org`。
+
+### 投票模板
+
+ 1. DolphinScheduler社区投票模板
+
+标题:
+
+```txt
+[VOTE] Release Apache DolphinScheduler <VERSION>
+```
+
+正文:
+
+```txt
+Hello DolphinScheduler Community,
+
+This is a call for vote to release Apache DolphinScheduler version <VERSION>
+
+Release notes: https://github.com/apache/dolphinscheduler/releases/tag/<VERSION>
+
+The release candidates: https://dist.apache.org/repos/dist/dev/dolphinscheduler/<VERSION>/
+
+Maven 2 staging repository: https://repository.apache.org/content/repositories/<VERSION>/org/apache/dolphinscheduler/
+
+Git tag for the release: https://github.com/apache/dolphinscheduler/tree/<VERSION>
+
+Release Commit ID: https://github.com/apache/dolphinscheduler/commit/<SHA-VALUE>
+
+Keys to verify the Release Candidate: https://dist.apache.org/repos/dist/dev/dolphinscheduler/KEYS
+
+Look at here for how to verify this release candidate: https://dolphinscheduler.apache.org/en-us/community/release.html
+
+The vote will be open for at least 72 hours or until necessary number of votes are reached.
+
+Please vote accordingly:
+
+[ ] +1 approve
+[ ] +0 no opinion
+[ ] -1 disapprove with the reason
+
+Checklist for reference:
+
+[ ] Download links are valid.
+[ ] Checksums and PGP signatures are valid.
+[ ] Source code artifacts have correct names matching the current release.
+[ ] LICENSE and NOTICE files are correct for each DolphinScheduler repo.
+[ ] All files have license headers if necessary.
+[ ] No compiled archives bundled in source archive.
+```
+
+2.宣布投票结果模板
+
+正文:
+
+```txt
+The vote to release Apache DolphinScheduler <VERSION> has passed.Here is the vote result,
+
+4 PMC member +1 votes:
+
+xxx
+xxx
+xxx
+xxx
+
+1 community +1 vote:
+xxx
+
+Thanks everyone for taking time to check this release and help us.
+```
+
+
+## 完成发布
+
+### 将源码和二进制包从svn的dev目录移动到release目录
+
+```shell
+svn mv https://dist.apache.org/repos/dist/dev/dolphinscheduler/"${VERSION}" https://dist.apache.org/repos/dist/release/dolphinscheduler/
+```
+
+### 将 gpg KEYS svn的dev目录移动到release目录
+
+只有你第一次使用该 KEY 发版时才需要,如果之前已经发过版且 KEY 没有变化则不需要
+
+```shell
+mkdir -p ~/ds_svn/release/
+cd ~/ds_svn/release/
+svn --username="${A_USERNAME}" co https://dist.apache.org/repos/dist/release/dolphinscheduler
+gpg -a --export <YOUR-GPG-KEY-ID> >> KEYS
+svn add *
+svn --username="${A_USERNAME}" commit -m "new key <YOUR-GPG-KEY-ID> add"
+```
+
+### 更新文档
+
+官网应该在您发送通知邮件之前完成更新,本节将告诉您如何更改网站。假设发版的版本是 `<VERSION>`,需要进行以下更新(注意,当修改pull requests 被 merge 后就会生效):
+
+- **apache/dolphinscheduler-website** 仓库:
+  - `download/en-us/download.md` 和 `download/zh-cn/download.md`: 增加 `<VERSION>` 版本发布包的下载
+  - `scripts/conf.sh`: 在变量 `DEV_RELEASE_DOCS_VERSIONS` 中增加版本为 `<VERSION>` 的新键值对
+- **apache/dolphinscheduler** 仓库:
+  - `docs/configs/site.js`:
+    - `docsLatest`: 更新为 `<VERSION>`
+    - `docs0`: 两处 `en-us/zh-cn` 的 `text` 更新为 `latest(<VERSION>)`
+    - `docsxyz`: 两处 `en-us/zh-cn` 的 `children` 增加 `key` 为 `docsxyz`, `text` 为 `<VERSION>` 的下拉菜单
+  - `docs/configs/index.md.jsx`: 增加 `'<VERSION>': docsxyzConfig,`
+  - `docs/docs/en/history-versions.md` 和 `docs/docs/zh/history-versions.md`: 增加新的发版版本 `<VERSION>` 的链接
+  - `.github/ISSUE_TEMPLATE/bug-report.yml`: DolphinScheduler 在 GitHub issue 中有版本选择的部分,当有新版本发版后,需要更新这部分的内容。目前与版本关联的是
+    [bug-report](https://github.com/apache/dolphinscheduler/blob/dev/.github/ISSUE_TEMPLATE/bug-report.yml),发版的时候需要
+    向其中的 **Version** 部分增加内容。
+
+### 在 [apache staging repositories](https://repository.apache.org/#stagingRepositories) 仓库找到 DolphinScheduler 并点击`Release`
+
+### 发送公告邮件通知社区
+
+当完成了上述的发版流程后,需要发送一封公告邮件给社区。你需要将邮件发送到 `dev@dolphinscheduler.apache.org` 并抄送到 `announce@apache.org`。
+
+通知邮件模板如下:
+
+标题:
+
+```txt
+[ANNOUNCE] Release Apache DolphinScheduler <VERSION>
+```
+
+正文:
+
+```txt
+Hi all,
+
+We are glad to announce the release of Apache DolphinScheduler <VERSION>. Once again I would like to express my thanks to your help.
+
+Dolphin Scheduler is a distributed and easy-to-extend visual workflow scheduler system,
+dedicated to solving the complex task dependencies in data processing, making the scheduler system out of the box for data processing.
+
+
+Download Links: https://dolphinscheduler.apache.org/en-us/download/download.html
+
+Release Notes: https://github.com/apache/dolphinscheduler/releases/tag/<VERSION>
+
+Website: https://dolphinscheduler.apache.org/
+
+DolphinScheduler Resources:
+- Issue: https://github.com/apache/dolphinscheduler/issues/
+- Mailing list: dev@dolphinscheduler.apache.org
+- Documents: https://dolphinscheduler.apache.org/zh-cn/docs/<VERSION>/user_doc/about/introduction.html
+```
diff --git a/img/architecture-design/dag_examples.png b/img/architecture-design/dag_examples.png
new file mode 100644
index 000000000..15848da71
Binary files /dev/null and b/img/architecture-design/dag_examples.png differ
diff --git a/img/architecture-design/distributed_lock.png b/img/architecture-design/distributed_lock.png
new file mode 100644
index 000000000..5c34fc429
Binary files /dev/null and b/img/architecture-design/distributed_lock.png differ
diff --git a/img/architecture-design/distributed_lock_procss.png b/img/architecture-design/distributed_lock_procss.png
new file mode 100644
index 000000000..469128bc1
Binary files /dev/null and b/img/architecture-design/distributed_lock_procss.png differ
diff --git a/img/architecture-design/fault-tolerant.png b/img/architecture-design/fault-tolerant.png
new file mode 100644
index 000000000..45dadf76e
Binary files /dev/null and b/img/architecture-design/fault-tolerant.png differ
diff --git a/img/architecture-design/fault-tolerant_master.png b/img/architecture-design/fault-tolerant_master.png
new file mode 100644
index 000000000..a9901ce73
Binary files /dev/null and b/img/architecture-design/fault-tolerant_master.png differ
diff --git a/img/architecture-design/fault-tolerant_worker.png b/img/architecture-design/fault-tolerant_worker.png
new file mode 100644
index 000000000..e7f379d1f
Binary files /dev/null and b/img/architecture-design/fault-tolerant_worker.png differ
diff --git a/img/architecture-design/grpc.png b/img/architecture-design/grpc.png
new file mode 100644
index 000000000..633b83756
Binary files /dev/null and b/img/architecture-design/grpc.png differ
diff --git a/img/architecture-design/lack_thread.png b/img/architecture-design/lack_thread.png
new file mode 100644
index 000000000..0dc5a7b13
Binary files /dev/null and b/img/architecture-design/lack_thread.png differ
diff --git a/img/architecture-design/process_priority.png b/img/architecture-design/process_priority.png
new file mode 100644
index 000000000..c6cd6001a
Binary files /dev/null and b/img/architecture-design/process_priority.png differ
diff --git a/img/architecture-design/task_priority.png b/img/architecture-design/task_priority.png
new file mode 100644
index 000000000..347026071
Binary files /dev/null and b/img/architecture-design/task_priority.png differ
diff --git a/img/contribute/join/pull-request/checkstyle-idea.png b/img/contribute/join/pull-request/checkstyle-idea.png
new file mode 100644
index 000000000..023fc3a54
Binary files /dev/null and b/img/contribute/join/pull-request/checkstyle-idea.png differ
diff --git a/img/contribute/join/pull-request/code-style-idea.png b/img/contribute/join/pull-request/code-style-idea.png
new file mode 100644
index 000000000..6c2ecfaa1
Binary files /dev/null and b/img/contribute/join/pull-request/code-style-idea.png differ