You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@dolphinscheduler.apache.org by zh...@apache.org on 2022/06/02 15:29:00 UTC

[dolphinscheduler] branch dev updated: [doc] Use related path for img (#10325)

This is an automated email from the ASF dual-hosted git repository.

zhongjiajie pushed a commit to branch dev
in repository https://gitbox.apache.org/repos/asf/dolphinscheduler.git


The following commit(s) were added to refs/heads/dev by this push:
     new 12fdd9bbd9 [doc] Use related path for img (#10325)
12fdd9bbd9 is described below

commit 12fdd9bbd9d15dfe2a993731a2b5054ea5eb3956
Author: Jiajie Zhong <zh...@hotmail.com>
AuthorDate: Thu Jun 2 23:28:53 2022 +0800

    [doc] Use related path for img (#10325)
    
    Use related path in our docs for imgs, previous
    can not use because website need the absolute path
    from root directory, and after we merged
    apache/dolphinscheduler-website#789 we have covert
    function to do that
    
    close: #9426
---
 .dlc.json                                          |   4 -
 .github/workflows/docs.yml                         |   1 +
 docs/docs/en/about/glossary.md                     |   2 +-
 docs/docs/en/architecture/cache.md                 |   2 +-
 docs/docs/en/architecture/design.md                |  10 +-
 docs/docs/en/architecture/metadata.md              |   6 +-
 docs/docs/en/development/architecture-design.md    |  22 ++--
 docs/docs/en/development/e2e-test.md               |  10 +-
 docs/docs/en/faq.md                                |   2 +-
 .../docs/en/guide/alert/alert_plugin_user_guide.md |  10 +-
 docs/docs/en/guide/alert/dingtalk.md               |   2 +-
 docs/docs/en/guide/alert/enterprise-webexteams.md  |  14 +--
 docs/docs/en/guide/alert/enterprise-wechat.md      |  20 ++--
 docs/docs/en/guide/alert/http.md                   |   4 +-
 docs/docs/en/guide/alert/script.md                 |   2 +-
 docs/docs/en/guide/alert/telegram.md               |   2 +-
 docs/docs/en/guide/data-quality.md                 |  26 ++---
 docs/docs/en/guide/datasource/hive.md              |   4 +-
 docs/docs/en/guide/datasource/mysql.md             |   2 +-
 docs/docs/en/guide/datasource/postgresql.md        |   2 +-
 docs/docs/en/guide/datasource/spark.md             |   2 +-
 docs/docs/en/guide/homepage.md                     |   2 +-
 docs/docs/en/guide/monitor.md                      |  10 +-
 docs/docs/en/guide/open-api.md                     |  18 +--
 docs/docs/en/guide/parameter/context.md            |  14 +--
 docs/docs/en/guide/parameter/global.md             |   6 +-
 docs/docs/en/guide/parameter/local.md              |  10 +-
 docs/docs/en/guide/parameter/priority.md           |   8 +-
 docs/docs/en/guide/project/project-list.md         |   4 +-
 docs/docs/en/guide/project/task-definition.md      |   2 +-
 docs/docs/en/guide/project/task-instance.md        |   4 +-
 docs/docs/en/guide/project/workflow-definition.md  |  36 +++---
 docs/docs/en/guide/project/workflow-instance.md    |  20 ++--
 docs/docs/en/guide/resource/file-manage.md         |  18 +--
 docs/docs/en/guide/resource/task-group.md          |  12 +-
 docs/docs/en/guide/resource/udf-manage.md          |   8 +-
 docs/docs/en/guide/security.md                     |  20 ++--
 docs/docs/en/guide/start/docker.md                 |   2 +-
 docs/docs/en/guide/start/quick-start.md            |  26 ++---
 docs/docs/en/guide/task/conditions.md              |  10 +-
 docs/docs/en/guide/task/datax.md                   | 126 ++++++++++-----------
 docs/docs/en/guide/task/dependent.md               |   8 +-
 docs/docs/en/guide/task/flink.md                   |  10 +-
 docs/docs/en/guide/task/http.md                    |   4 +-
 docs/docs/en/guide/task/jupyter.md                 |   4 +-
 docs/docs/en/guide/task/kubernetes.md              |   4 +-
 docs/docs/en/guide/task/map-reduce.md              |   8 +-
 docs/docs/en/guide/task/mlflow.md                  |  20 ++--
 docs/docs/en/guide/task/openmldb.md                |   6 +-
 docs/docs/en/guide/task/pigeon.md                  |   2 +-
 docs/docs/en/guide/task/python.md                  |   6 +-
 docs/docs/en/guide/task/shell.md                   |   6 +-
 docs/docs/en/guide/task/spark.md                   |  10 +-
 docs/docs/en/guide/task/sql.md                     |   6 +-
 docs/docs/en/guide/task/stored-procedure.md        |   2 +-
 docs/docs/en/guide/task/sub-process.md             |   8 +-
 docs/docs/en/guide/task/switch.md                  |   4 +-
 docs/docs/en/guide/task/zeppelin.md                |   6 +-
 docs/docs/zh/about/glossary.md                     |   2 +-
 docs/docs/zh/architecture/cache.md                 |   2 +-
 docs/docs/zh/architecture/design.md                |  10 +-
 docs/docs/zh/architecture/metadata.md              |   6 +-
 docs/docs/zh/development/architecture-design.md    |  16 +--
 docs/docs/zh/development/e2e-test.md               |  10 +-
 docs/docs/zh/faq.md                                |   2 +-
 .../docs/zh/guide/alert/alert_plugin_user_guide.md |  10 +-
 docs/docs/zh/guide/alert/dingtalk.md               |   2 +-
 docs/docs/zh/guide/alert/enterprise-webexteams.md  |  14 +--
 docs/docs/zh/guide/alert/enterprise-wechat.md      |  20 ++--
 docs/docs/zh/guide/alert/http.md                   |   4 +-
 docs/docs/zh/guide/alert/script.md                 |   2 +-
 docs/docs/zh/guide/alert/telegram.md               |   2 +-
 docs/docs/zh/guide/data-quality.md                 |  26 ++---
 docs/docs/zh/guide/datasource/hive.md              |   4 +-
 docs/docs/zh/guide/datasource/mysql.md             |   2 +-
 docs/docs/zh/guide/datasource/postgresql.md        |   2 +-
 docs/docs/zh/guide/datasource/spark.md             |   4 +-
 docs/docs/zh/guide/homepage.md                     |   2 +-
 docs/docs/zh/guide/monitor.md                      |  10 +-
 docs/docs/zh/guide/open-api.md                     |  18 +--
 docs/docs/zh/guide/parameter/context.md            |  14 +--
 docs/docs/zh/guide/parameter/global.md             |   6 +-
 docs/docs/zh/guide/parameter/local.md              |  10 +-
 docs/docs/zh/guide/parameter/priority.md           |   8 +-
 docs/docs/zh/guide/project/project-list.md         |   4 +-
 docs/docs/zh/guide/project/task-definition.md      |   2 +-
 docs/docs/zh/guide/project/task-instance.md        |   4 +-
 docs/docs/zh/guide/project/workflow-definition.md  |  36 +++---
 docs/docs/zh/guide/project/workflow-instance.md    |  20 ++--
 docs/docs/zh/guide/resource/file-manage.md         |  18 +--
 docs/docs/zh/guide/resource/task-group.md          |  12 +-
 docs/docs/zh/guide/resource/udf-manage.md          |   8 +-
 docs/docs/zh/guide/security.md                     |  20 ++--
 docs/docs/zh/guide/start/docker.md                 |   2 +-
 docs/docs/zh/guide/start/quick-start.md            |  24 ++--
 docs/docs/zh/guide/task/conditions.md              |  10 +-
 docs/docs/zh/guide/task/datax.md                   | 124 ++++++++++----------
 docs/docs/zh/guide/task/dependent.md               |   8 +-
 docs/docs/zh/guide/task/flink.md                   |  10 +-
 docs/docs/zh/guide/task/http.md                    |   4 +-
 docs/docs/zh/guide/task/jupyter.md                 |   4 +-
 docs/docs/zh/guide/task/kubernetes.md              |   4 +-
 docs/docs/zh/guide/task/map-reduce.md              |   8 +-
 docs/docs/zh/guide/task/mlflow.md                  |  20 ++--
 docs/docs/zh/guide/task/openmldb.md                |   6 +-
 docs/docs/zh/guide/task/pigeon.md                  |   2 +-
 docs/docs/zh/guide/task/python.md                  |   6 +-
 docs/docs/zh/guide/task/shell.md                   |   6 +-
 docs/docs/zh/guide/task/spark.md                   |  10 +-
 docs/docs/zh/guide/task/sql.md                     |   6 +-
 docs/docs/zh/guide/task/stored-procedure.md        |   2 +-
 docs/docs/zh/guide/task/sub-process.md             |   8 +-
 docs/docs/zh/guide/task/switch.md                  |   4 +-
 docs/docs/zh/guide/task/zeppelin.md                |   6 +-
 docs/img_utils.py                                  |  11 +-
 115 files changed, 618 insertions(+), 618 deletions(-)

diff --git a/.dlc.json b/.dlc.json
index 7c0ff97038..de75d319c2 100644
--- a/.dlc.json
+++ b/.dlc.json
@@ -21,10 +21,6 @@
     {
       "pattern": "^/zh-cn/download/download.html$",
       "replacement": "https://dolphinscheduler.apache.org/zh-cn/download/download.html"
-    },
-    {
-      "pattern": "^/img",
-      "replacement": "{{BASEURL}}/docs/img"
     }
   ],
   "timeout": "10s",
diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
index 1ef6c75e31..7f053d8949 100644
--- a/.github/workflows/docs.yml
+++ b/.github/workflows/docs.yml
@@ -22,6 +22,7 @@ on:
       - '.github/workflows/docs.yml'
       - '**/*.md'
       - 'docs/**'
+      - '.dlc.json'
   schedule:
     - cron: '0 18 * * *'  # TimeZone: UTC 0
 
diff --git a/docs/docs/en/about/glossary.md b/docs/docs/en/about/glossary.md
index 0dd2ed0caf..492fdee963 100644
--- a/docs/docs/en/about/glossary.md
+++ b/docs/docs/en/about/glossary.md
@@ -9,7 +9,7 @@ scheduling system
 form of a directed acyclic graph, and topological traversal is performed from nodes with zero degrees of entry until
 there are no subsequent nodes. Examples are as follows:
 
-![about-glossary](/img/new_ui/dev/about/glossary.png)
+![about-glossary](../../../img/new_ui/dev/about/glossary.png)
 
 **Process definition**: Visualization formed by dragging task nodes and establishing task node associations**DAG**
 
diff --git a/docs/docs/en/architecture/cache.md b/docs/docs/en/architecture/cache.md
index a0251e314f..3885dddd24 100644
--- a/docs/docs/en/architecture/cache.md
+++ b/docs/docs/en/architecture/cache.md
@@ -39,4 +39,4 @@ Note: the final strategy for cache update comes from the expiration strategy con
 
 The sequence diagram shows below:
 
-<img src="/img/cache-evict.png" alt="cache-evict" style="zoom: 67%;" />
\ No newline at end of file
+<img src="../../../img/cache-evict.png" alt="cache-evict" style="zoom: 67%;" />
\ No newline at end of file
diff --git a/docs/docs/en/architecture/design.md b/docs/docs/en/architecture/design.md
index d2b8be211c..9086f54813 100644
--- a/docs/docs/en/architecture/design.md
+++ b/docs/docs/en/architecture/design.md
@@ -5,7 +5,7 @@
 ### System Architecture Diagram
 
 <p align="center">
-  <img src="/img/architecture-1.3.0.jpg" alt="System architecture diagram"  width="70%" />
+  <img src="../../../img/architecture-1.3.0.jpg" alt="System architecture diagram"  width="70%" />
   <p align="center">
         <em>System architecture diagram</em>
   </p>
@@ -14,7 +14,7 @@
 ### Start Process Activity Diagram
 
 <p align="center">
-  <img src="/img/process-start-flow-1.3.0.png" alt="Start process activity diagram"  width="70%" />
+  <img src="../../../img/process-start-flow-1.3.0.png" alt="Start process activity diagram"  width="70%" />
   <p align="center">
         <em>Start process activity diagram</em>
   </p>
@@ -112,7 +112,7 @@ DolphinScheduler uses ZooKeeper distributed lock to implement only one Master ex
 
 2. Flow diagram of implementation of Scheduler thread distributed lock in DolphinScheduler:
  <p align="center">
-   <img src="/img/distributed_lock_procss.png" alt="Obtain distributed lock process"  width="50%" />
+   <img src="../../../img/distributed_lock_procss.png" alt="Obtain distributed lock process"  width="50%" />
  </p>
 
 
@@ -155,7 +155,7 @@ Among them, the Master monitors the directories of other Masters and Workers. If
 - Master fault tolerance:
 
 <p align="center">
-   <img src="/img/failover-master.jpg" alt="failover-master"  width="50%" />
+   <img src="../../../img/failover-master.jpg" alt="failover-master"  width="50%" />
  </p>
 
 Fault tolerance range: From the perspective of host, the fault tolerance range of Master includes: own host and node host that does not exist in the registry, and the entire process of fault tolerance will be locked;
@@ -167,7 +167,7 @@ Fault-tolerant post-processing: After the fault tolerance of ZooKeeper Master co
 - Worker fault tolerance:
 
 <p align="center">
-   <img src="/img/failover-worker.jpg" alt="failover-worker"  width="50%" />
+   <img src="../../../img/failover-worker.jpg" alt="failover-worker"  width="50%" />
  </p>
 
 Fault tolerance range: From the perspective of process instance, each Master is only responsible for fault tolerance of its own process instance; it will lock only when `handleDeadServer`;
diff --git a/docs/docs/en/architecture/metadata.md b/docs/docs/en/architecture/metadata.md
index 54ebc56fbf..7c1b012956 100644
--- a/docs/docs/en/architecture/metadata.md
+++ b/docs/docs/en/architecture/metadata.md
@@ -36,7 +36,7 @@
 
 ### User Queue DataSource
 
-![image.png](/img/metadata-erd/user-queue-datasource.png)
+![image.png](../../../img/metadata-erd/user-queue-datasource.png)
 
 - One tenant can own Multiple users.
 - The queue field in the t_ds_user table stores the queue_name information in the t_ds_queue table, t_ds_tenant stores queue information using queue_id column. During the execution of the process definition, the user queue has the highest priority. If the user queue is null, use the tenant queue.
@@ -44,7 +44,7 @@
   
 ### Project Resource Alert
 
-![image.png](/img/metadata-erd/project-resource-alert.png)
+![image.png](../../../img/metadata-erd/project-resource-alert.png)
 
 - User can have multiple projects, user project authorization completes the relationship binding using project_id and user_id in t_ds_relation_project_user table.
 - The user_id in the t_ds_projcet table represents the user who create the project, and the user_id in the t_ds_relation_project_user table represents users who have permission to the project.
@@ -53,7 +53,7 @@
   
 ### Command Process Task
 
-![image.png](/img/metadata-erd/command.png)<br />![image.png](/img/metadata-erd/process-task.png)
+![image.png](../../../img/metadata-erd/command.png)<br />![image.png](../../../img/metadata-erd/process-task.png)
 
 - A project has multiple process definitions, a process definition can generate multiple process instances, and a process instance can generate multiple task instances.
 - The t_ds_schedulers table stores the specified time schedule information for process definition.
diff --git a/docs/docs/en/development/architecture-design.md b/docs/docs/en/development/architecture-design.md
index e48d53e9e1..a46bfb2859 100644
--- a/docs/docs/en/development/architecture-design.md
+++ b/docs/docs/en/development/architecture-design.md
@@ -6,7 +6,7 @@ Before explaining the architecture of the schedule system, let us first understa
 **DAG:** Full name Directed Acyclic Graph,referred to as DAG。Tasks in the workflow are assembled in the form of directed acyclic graphs, which are topologically traversed from nodes with zero indegrees of ingress until there are no successor nodes. For example, the following picture:
 
 <p align="center">
-  <img src="/img/architecture-design/dag_examples.png" alt="dag示例"  width="80%" />
+  <img src="../../../img/architecture-design/dag_examples.png" alt="dag示例"  width="80%" />
   <p align="center">
         <em>dag example</em>
   </p>
@@ -40,7 +40,7 @@ Before explaining the architecture of the schedule system, let us first understa
 
 #### 2.1 System Architecture Diagram
 <p align="center">
-  <img src="/img/architecture.jpg" alt="System Architecture Diagram"  />
+  <img src="../../../img/architecture.jpg" alt="System Architecture Diagram"  />
   <p align="center">
         <em>System Architecture Diagram</em>
   </p>
@@ -140,13 +140,13 @@ DolphinScheduler uses ZooKeeper distributed locks to implement only one Master t
 1. The core process algorithm for obtaining distributed locks is as follows
 
  <p align="center">
-   <img src="/img/architecture-design/distributed_lock.png" alt="Get Distributed Lock Process" width="70%" />
+   <img src="../../../img/architecture-design/distributed_lock.png" alt="Get Distributed Lock Process" width="70%" />
  </p>
 
 2. Scheduler thread distributed lock implementation flow chart in DolphinScheduler:
 
  <p align="center">
-   <img src="/img/architecture-design/distributed_lock_procss.png" alt="Get Distributed Lock Process" />
+   <img src="../../../img/architecture-design/distributed_lock_procss.png" alt="Get Distributed Lock Process" />
  </p>
 
 ##### Third, the thread is insufficient loop waiting problem
@@ -155,7 +155,7 @@ DolphinScheduler uses ZooKeeper distributed locks to implement only one Master t
 - If a large number of sub-processes are nested in a large DAG, the following figure will result in a "dead" state:
 
  <p align="center">
-   <img src="/img/architecture-design/lack_thread.png" alt="Thread is not enough to wait for loop" width="70%" />
+   <img src="../../../img/architecture-design/lack_thread.png" alt="Thread is not enough to wait for loop" width="70%" />
  </p>
 
 In the above figure, MainFlowThread waits for SubFlowThread1 to end, SubFlowThread1 waits for SubFlowThread2 to end, SubFlowThread2 waits for SubFlowThread3 to end, and SubFlowThread3 waits for a new thread in the thread pool, then the entire DAG process cannot end, and thus the thread cannot be released. This forms the state of the child parent process loop waiting. At this point, the scheduling cluster will no longer be available unless a new Master is started to add threads to break s [...]
@@ -179,7 +179,7 @@ Fault tolerance is divided into service fault tolerance and task retry. Service
 Service fault tolerance design relies on ZooKeeper's Watcher mechanism. The implementation principle is as follows:
 
  <p align="center">
-   <img src="/img/architecture-design/fault-tolerant.png" alt="DolphinScheduler Fault Tolerant Design" width="70%" />
+   <img src="../../../img/architecture-design/fault-tolerant.png" alt="DolphinScheduler Fault Tolerant Design" width="70%" />
  </p>
 
 The Master monitors the directories of other Masters and Workers. If the remove event is detected, the process instance is fault-tolerant or the task instance is fault-tolerant according to the specific business logic.
@@ -189,7 +189,7 @@ The Master monitors the directories of other Masters and Workers. If the remove
 - Master fault tolerance flow chart:
 
  <p align="center">
-   <img src="/img/architecture-design/fault-tolerant_master.png" alt="Master Fault Tolerance Flowchart" width="70%" />
+   <img src="../../../img/architecture-design/fault-tolerant_master.png" alt="Master Fault Tolerance Flowchart" width="70%" />
  </p>
 
 After the ZooKeeper Master is fault-tolerant, it is rescheduled by the Scheduler thread in DolphinScheduler. It traverses the DAG to find the "Running" and "Submit Successful" tasks, and monitors the status of its task instance for the "Running" task. You need to determine whether the Task Queue already exists. If it exists, monitor the status of the task instance. If it does not exist, resubmit the task instance.
@@ -199,7 +199,7 @@ After the ZooKeeper Master is fault-tolerant, it is rescheduled by the Scheduler
 - Worker fault tolerance flow chart:
 
  <p align="center">
-   <img src="/img/architecture-design/fault-tolerant_worker.png" alt="Worker Fault Tolerance Flowchart" width="70%" />
+   <img src="../../../img/architecture-design/fault-tolerant_worker.png" alt="Worker Fault Tolerance Flowchart" width="70%" />
  </p>
 
 Once the Master Scheduler thread finds the task instance as "need to be fault tolerant", it takes over the task and resubmits.
@@ -238,13 +238,13 @@ In the early scheduling design, if there is no priority design and fair scheduli
     - The priority of the process definition is that some processes need to be processed before other processes. This can be configured at the start of the process or at the time of scheduled start. There are 5 levels, followed by HIGHEST, HIGH, MEDIUM, LOW, and LOWEST. As shown below
 
       <p align="center">
-         <img src="/img/architecture-design/process_priority.png" alt="Process Priority Configuration" width="40%" />
+         <img src="../../../img/architecture-design/process_priority.png" alt="Process Priority Configuration" width="40%" />
        </p>
 
     - The priority of the task is also divided into 5 levels, followed by HIGHEST, HIGH, MEDIUM, LOW, and LOWEST. As shown below
 
       <p align="center">`
-         <img src="/img/architecture-design/task_priority.png" alt="task priority configuration" width="35%" />
+         <img src="../../../img/architecture-design/task_priority.png" alt="task priority configuration" width="35%" />
        </p>
 
 ##### VI. Logback and gRPC implement log access
@@ -255,7 +255,7 @@ In the early scheduling design, if there is no priority design and fair scheduli
 - Considering the lightweightness of DolphinScheduler as much as possible, gRPC was chosen to implement remote access log information.
 
  <p align="center">
-   <img src="/img/architecture-design/grpc.png" alt="grpc remote access" width="50%" />
+   <img src="../../../img/architecture-design/grpc.png" alt="grpc remote access" width="50%" />
  </p>
 
 - We use a custom Logback FileAppender and Filter function to generate a log file for each task instance.
diff --git a/docs/docs/en/development/e2e-test.md b/docs/docs/en/development/e2e-test.md
index 3f5e26af69..1db70f46a3 100644
--- a/docs/docs/en/development/e2e-test.md
+++ b/docs/docs/en/development/e2e-test.md
@@ -104,7 +104,7 @@ The goToTab method is provided in SecurityPage to test the corresponding sidebar
     }
 ```
 
-![SecurityPage](/img/e2e-test/SecurityPage.png)
+![SecurityPage](../../../img/e2e-test/SecurityPage.png)
 
 For navigation bar options jumping, the goToNav method is provided in `org/apache/dolphinscheduler/e2e/pages/common/NavBarPage.java`. The currently supported pages are: ProjectPage, SecurityPage and ResourcePage.
 
@@ -139,7 +139,7 @@ For navigation bar options jumping, the goToNav method is provided in `org/apach
 
 Current E2E test cases supported include: File Management, Project Management, Queue Management, Tenant Management, User Management, Worker Group Management and Workflow Test.
 
-![E2E_Cases](/img/e2e-test/E2E_Cases.png)
+![E2E_Cases](../../../img/e2e-test/E2E_Cases.png)
 
 The following is an example of a tenant management test. As explained earlier, we use docker-compose for deployment, so for each test case, we need to import the corresponding file in the form of an annotation.
 
@@ -186,12 +186,12 @@ When running E2E tests locally, the `-Dlocal=true` parameter can be configured t
 When running E2E tests with `M1` chip, you can use `-Dm1_chip=true` parameter to configure containers supported by 
 `ARM64`.
 
-![Dlocal](/img/e2e-test/Dlocal.png)
+![Dlocal](../../../img/e2e-test/Dlocal.png)
 
 If a connection timeout occurs during a local run, increase the load time to a recommended 30 and above.
 
-![timeout](/img/e2e-test/timeout.png)
+![timeout](../../../img/e2e-test/timeout.png)
 
 The test run will be available as an MP4 file.
 
-![MP4](/img/e2e-test/MP4.png)
+![MP4](../../../img/e2e-test/MP4.png)
diff --git a/docs/docs/en/faq.md b/docs/docs/en/faq.md
index 31b7ef47a9..e99660558a 100644
--- a/docs/docs/en/faq.md
+++ b/docs/docs/en/faq.md
@@ -558,7 +558,7 @@ When the master service and worker service are registered with zookeeper, releva
 If the ip address is obtained incorrectly, please check the network information. For example, in the Linux system, use the `ifconfig` command to view the network information. The following figure is an example:
 
 <p align="center">
-  <img src="/img/network/network_config.png" width="60%" />
+  <img src="../../img/network/network_config.png" width="60%" />
 </p>
 
 You can use the three strategies provided by dolphinscheduler to get the available ip:
diff --git a/docs/docs/en/guide/alert/alert_plugin_user_guide.md b/docs/docs/en/guide/alert/alert_plugin_user_guide.md
index f04b0a6d9b..6c41023c48 100644
--- a/docs/docs/en/guide/alert/alert_plugin_user_guide.md
+++ b/docs/docs/en/guide/alert/alert_plugin_user_guide.md
@@ -4,7 +4,7 @@
 
 In version 2.0.0, users need to create alert instances, and needs to choose an alarm policy when defining an alarm instance, there are three options: send if the task succeeds, send on failure, and send on both success and failure. when the workflow or task is executed, if an alarm is triggered, calling the alarm instance send method needs a logical judgment, which matches the alarm instance with the task status, executes the alarm instance sending logic if it matches, and filters if it  [...]
 The alarm module supports the following scenarios:
-<img src="/img/alert/alert_scenarios_en.png">
+<img src="../../../../img/alert/alert_scenarios_en.png">
 
 The steps to use are as follows:
 
@@ -12,7 +12,7 @@ First, go to the Security Center page. Select Alarm Group Management, click Alar
 
 Then select Alarm Group Management, create an alarm group, and choose the corresponding alarm instance.
 
-![alert-instance01](/img/new_ui/dev/alert/alert_instance01.png)
-![alert-instance02](/img/new_ui/dev/alert/alert_instance02.png)
-![alert-instance03](/img/new_ui/dev/alert/alert_instance03.png)
-![alert-instance04](/img/new_ui/dev/alert/alert_instance04.png)
+![alert-instance01](../../../../img/new_ui/dev/alert/alert_instance01.png)
+![alert-instance02](../../../../img/new_ui/dev/alert/alert_instance02.png)
+![alert-instance03](../../../../img/new_ui/dev/alert/alert_instance03.png)
+![alert-instance04](../../../../img/new_ui/dev/alert/alert_instance04.png)
diff --git a/docs/docs/en/guide/alert/dingtalk.md b/docs/docs/en/guide/alert/dingtalk.md
index 2c0708e275..950a75ae85 100644
--- a/docs/docs/en/guide/alert/dingtalk.md
+++ b/docs/docs/en/guide/alert/dingtalk.md
@@ -3,7 +3,7 @@
 If you need to use `DingTalk` for alerting, create an alert instance in the alert instance management and select the DingTalk plugin. 
 The following shows the `DingTalk` configuration example:
 
-![alert-dingtalk](/img/new_ui/dev/alert/alert_dingtalk.png)
+![alert-dingtalk](../../../../img/new_ui/dev/alert/alert_dingtalk.png)
 
 ## Parameter Configuration
 
diff --git a/docs/docs/en/guide/alert/enterprise-webexteams.md b/docs/docs/en/guide/alert/enterprise-webexteams.md
index 19819cdbb8..5c9228fd10 100644
--- a/docs/docs/en/guide/alert/enterprise-webexteams.md
+++ b/docs/docs/en/guide/alert/enterprise-webexteams.md
@@ -4,7 +4,7 @@ If you need to use `Webex Teams` to alert, create an alert instance in the alert
 You can pick private alert or room group chat alert.
 The following is the `WebexTeams` configuration example:
 
-![enterprise-webexteams-plugin](/img/alert/enterprise-webexteams-plugin.png)
+![enterprise-webexteams-plugin](../../../../img/alert/enterprise-webexteams-plugin.png)
 
 ## Parameter Configuration
 
@@ -25,13 +25,13 @@ The following is the `WebexTeams` configuration example:
 
 Create a bot visit [Official Website My-Apps](https://developer.webex.com/my-apps) to `Create a New APP` and select `Create a Bot`, fill in the bot information and acquire `bot username` and `bot ID` for further usage.
 
-![enterprise-webexteams-bot-info](/img/alert/enterprise-webexteams-bot.png)
+![enterprise-webexteams-bot-info](../../../../img/alert/enterprise-webexteams-bot.png)
 
 ## Create a Room
 
 Create a root visit [Official Website for Developer APIs](https://developer.webex.com/docs/api/v1/rooms/create-a-room) to create a new room, fill in the room name and acquire `id`(room ID) and `creatorId` for further usage.
 
-![enterprise-webexteams-room-info](/img/alert/enterprise-webexteams-room.png)
+![enterprise-webexteams-room-info](../../../../img/alert/enterprise-webexteams-room.png)
 
 ### Invite Bot to the Room
 
@@ -43,22 +43,22 @@ In this way, you can send private message to a person by `User Email` or `UserId
 The `user Email` is user register Email.
 The `userId` we can acquire it from the `creatorId` of creating a new group chat room API.
 
-![enterprise-webexteams-private-message-form](/img/alert/enterprise-webexteams-private-form.png)
+![enterprise-webexteams-private-message-form](../../../../img/alert/enterprise-webexteams-private-form.png)
 
 ### Private Alert Message Example
 
-![enterprise-webexteams-private-message-example](/img/alert/enterprise-webexteams-private-msg.png)
+![enterprise-webexteams-private-message-example](../../../../img/alert/enterprise-webexteams-private-msg.png)
 
 ## Send Group Room Message
 
 In this way, you can send group room message to a room by `Room ID`. Fill in the `Room Id` and `Bot Access Token` and select `Destination` `roomId`.
 The `Room ID` we can acquire it from the `id` of creating a new group chat room API.
 
-![enterprise-webexteams-room](/img/alert/enterprise-webexteams-group-form.png)
+![enterprise-webexteams-room](../../../../img/alert/enterprise-webexteams-group-form.png)
 
 ### Group Room Alert Message Example
 
-![enterprise-webexteams-room-message-example](/img/alert/enterprise-webexteams-room-msg.png)
+![enterprise-webexteams-room-message-example](../../../../img/alert/enterprise-webexteams-room-msg.png)
 
 [WebexTeams Application Bot Guide](https://developer.webex.com/docs/bots)
 [WebexTeams Message Guide](https://developer.webex.com/docs/api/v1/messages/create-a-message)
diff --git a/docs/docs/en/guide/alert/enterprise-wechat.md b/docs/docs/en/guide/alert/enterprise-wechat.md
index 41970143df..1fec0485e6 100644
--- a/docs/docs/en/guide/alert/enterprise-wechat.md
+++ b/docs/docs/en/guide/alert/enterprise-wechat.md
@@ -3,7 +3,7 @@
 If you need to use `Enterprise WeChat` to alert, create an alert instance in the alert instance management, and choose the `WeChat` plugin.
 The following is the `WeChat` configuration example:
 
-![enterprise-wechat-plugin](/img/alert/enterprise-wechat-plugin.png)
+![enterprise-wechat-plugin](../../../../img/alert/enterprise-wechat-plugin.png)
 
 ## Send Type
 
@@ -14,15 +14,15 @@ The parameter `send.type` corresponds to sending messages to Enterprise WeChat c
 The APP sends type means to notify the alert results via Enterprise WeChat customized APPs, supports sending messages to both specified users and all members. Currently, send to specified enterprise department and tags are not supported, a new PR to contribute is welcomed.
 The following is the `APP` alert config example:
 
-![enterprise-wechat-app-msg-config](/img/alert/wechat-app-form-example.png)
+![enterprise-wechat-app-msg-config](../../../../img/alert/wechat-app-form-example.png)
 
 The following is the `APP` `MARKDOWN` alert message example:
 
-![enterprise-wechat-app-msg-markdown](/img/alert/enterprise-wechat-app-msg-md.png)
+![enterprise-wechat-app-msg-markdown](../../../../img/alert/enterprise-wechat-app-msg-md.png)
 
 The following is the `APP` `TEXT` alert message example:
 
-![enterprise-wechat-app-msg-text](/img/alert/enterprise-wechat-app-msg.png)
+![enterprise-wechat-app-msg-text](../../../../img/alert/enterprise-wechat-app-msg.png)
 
 #### Prerequisites
 
@@ -34,7 +34,7 @@ The Enterprise WeChat APPs support sending messages to both specified users and
 To acquire user `userId` refer to [Official Doc](https://developer.work.weixin.qq.com/document/path/95402), acquire `userId` by user phone number.
 The following is the `query userId` API example:
 
-![enterprise-wechat-create-group](/img/alert/enterprise-wechat-query-userid.png)
+![enterprise-wechat-create-group](../../../../img/alert/enterprise-wechat-query-userid.png)
 
 #### References
 
@@ -46,15 +46,15 @@ APP: https://work.weixin.qq.com/api/doc/90000/90135/90236
 The Group Chat send type means to notify the alert results via group chat created by Enterprise WeChat API, sending messages to all members of the group and specified users are not supported.
 The following is the `Group Chat` alert config example:
 
-![enterprise-wechat-app-msg-config](/img/alert/wechat-group-form-example.png)
+![enterprise-wechat-app-msg-config](../../../../img/alert/wechat-group-form-example.png)
 
 The following is the `APP` `MARKDOWN` alert message example:
 
-![enterprise-wechat-group-msg-markdown](/img/alert/enterprise-wechat-group-msg-md.png)
+![enterprise-wechat-group-msg-markdown](../../../../img/alert/enterprise-wechat-group-msg-md.png)
 
 The following is the `Group Chat` `TEXT` alert message example:
 
-![enterprise-wechat-group-msg-text](/img/alert/enterprise-wechat-group-msg.png)
+![enterprise-wechat-group-msg-text](../../../../img/alert/enterprise-wechat-group-msg.png)
 
 #### Prerequisites
 
@@ -62,9 +62,9 @@ Before sending messages to group chat, create a new group chat by Enterprise WeC
 To acquire user `userId` refer to [Official Doc](https://developer.work.weixin.qq.com/document/path/95402), acquire `userId` by user phone number.
 The following is the `create new group chat` API and `query userId` API example:
 
-![enterprise-wechat-create-group](/img/alert/enterprise-wechat-create-group.png)
+![enterprise-wechat-create-group](../../../../img/alert/enterprise-wechat-create-group.png)
 
-![enterprise-wechat-create-group](/img/alert/enterprise-wechat-query-userid.png)
+![enterprise-wechat-create-group](../../../../img/alert/enterprise-wechat-query-userid.png)
 
 #### References
 
diff --git a/docs/docs/en/guide/alert/http.md b/docs/docs/en/guide/alert/http.md
index 9174fafbd8..e69cc0f2e9 100644
--- a/docs/docs/en/guide/alert/http.md
+++ b/docs/docs/en/guide/alert/http.md
@@ -24,11 +24,11 @@ Using `POST` and `GET` method to send `Http` request in the `Request Type`.
 Send alert information by `Http` GET method.
 The following shows the `GET` configuration example:
 
-![enterprise-wechat-app-msg-config](/img/alert/http-get-example.png)
+![enterprise-wechat-app-msg-config](../../../../img/alert/http-get-example.png)
 
 ### POST Http
 
 Send alert information inside `Http` body by `Http` POST method.
 The following shows the `POST` configuration example:
 
-![enterprise-wechat-app-msg-config](/img/alert/http-post-example.png)
\ No newline at end of file
+![enterprise-wechat-app-msg-config](../../../../img/alert/http-post-example.png)
\ No newline at end of file
diff --git a/docs/docs/en/guide/alert/script.md b/docs/docs/en/guide/alert/script.md
index fb7a34b883..0e9b89440f 100644
--- a/docs/docs/en/guide/alert/script.md
+++ b/docs/docs/en/guide/alert/script.md
@@ -3,7 +3,7 @@
 If you need to use `Shell script` for alerting, create an alert instance in the alert instance management and select the `Script` plugin. 
 The following shows the `Script` configuration example:
 
-![dingtalk-plugin](/img/alert/script-plugin.png)
+![dingtalk-plugin](../../../../img/alert/script-plugin.png)
 
 ## Parameter Configuration
 
diff --git a/docs/docs/en/guide/alert/telegram.md b/docs/docs/en/guide/alert/telegram.md
index d6fed91806..49b11037a3 100644
--- a/docs/docs/en/guide/alert/telegram.md
+++ b/docs/docs/en/guide/alert/telegram.md
@@ -3,7 +3,7 @@
 If you need `Telegram` to alert, create an alert instance in the alert instance management, and choose the `Telegram` plugin.
 The following shows the `Telegram` configuration example:
 
-![alert-telegram](/img/new_ui/dev/alert/alert_telegram.png)
+![alert-telegram](../../../../img/new_ui/dev/alert/alert_telegram.png)
 
 ## Parameter Configuration
 
diff --git a/docs/docs/en/guide/data-quality.md b/docs/docs/en/guide/data-quality.md
index ed3ee03cc9..5584b2e36d 100644
--- a/docs/docs/en/guide/data-quality.md
+++ b/docs/docs/en/guide/data-quality.md
@@ -65,7 +65,7 @@ The goal of the null value check is to check the number of empty rows in the spe
   ```
 
 ### UI Guide
-![dataquality_null_check](/img/tasks/demo/null_check.png)
+![dataquality_null_check](../../../img/tasks/demo/null_check.png)
 - Source data type: select MySQL, PostgreSQL, etc.
 - Source data source: the corresponding data source under the source data type
 - Source data table: drop-down to select the table where the validation data is located
@@ -87,7 +87,7 @@ The goal of the null value check is to check the number of empty rows in the spe
 ### Introduction
 The timeliness check is used to check whether the data is processed within the expected time. The start time and end time can be specified to define the time range. If the amount of data within the time range does not reach the set threshold, the check task will be judged as fail
 ### UI Guide
-![dataquality_timeliness_check](/img/tasks/demo/timeliness_check.png)
+![dataquality_timeliness_check](../../../img/tasks/demo/timeliness_check.png)
 - Source data type: select MySQL, PostgreSQL, etc.
 - Source data source: the corresponding data source under the source data type
 - Source data table: drop-down to select the table where the validation data is located
@@ -112,7 +112,7 @@ The timeliness check is used to check whether the data is processed within the e
 ### Introduction
 The goal of field length verification is to check whether the length of the selected field meets the expectations. If there is data that does not meet the requirements, and the number of rows exceeds the threshold, the task will be judged to fail
 ### UI Guide
-![dataquality_length_check](/img/tasks/demo/field_length_check.png)
+![dataquality_length_check](../../../img/tasks/demo/field_length_check.png)
 - Source data type: select MySQL, PostgreSQL, etc.
 - Source data source: the corresponding data source under the source data type
 - Source data table: drop-down to select the table where the validation data is located
@@ -136,7 +136,7 @@ The goal of field length verification is to check whether the length of the sele
 ### Introduction
 The goal of the uniqueness check is to check whether the field is duplicated. It is generally used to check whether the primary key is duplicated. If there is duplication and the threshold is reached, the check task will be judged to be failed.
 ### UI Guide
-![dataquality_uniqueness_check](/img/tasks/demo/uniqueness_check.png)
+![dataquality_uniqueness_check](../../../img/tasks/demo/uniqueness_check.png)
 - Source data type: select MySQL, PostgreSQL, etc.
 - Source data source: the corresponding data source under the source data type
 - Source data table: drop-down to select the table where the validation data is located
@@ -158,7 +158,7 @@ The goal of the uniqueness check is to check whether the field is duplicated. It
 ### Introduction
 The goal of regular expression verification is to check whether the format of the value of a field meets the requirements, such as time format, email format, ID card format, etc. If there is data that does not meet the format and exceeds the threshold, the task will be judged as failed.
 ### UI Guide
-![dataquality_regex_check](/img/tasks/demo/regexp_check.png)
+![dataquality_regex_check](../../../img/tasks/demo/regexp_check.png)
 - Source data type: select MySQL, PostgreSQL, etc.
 - Source data source: the corresponding data source under the source data type
 - Source data table: drop-down to select the table where the validation data is located
@@ -181,7 +181,7 @@ The goal of regular expression verification is to check whether the format of th
 ### Introduction
 The goal of enumeration value verification is to check whether the value of a field is within the range of enumeration values. If there is data that is not in the range of enumeration values ​​and exceeds the threshold, the task will be judged to fail
 ### UI Guide
-![dataquality_enum_check](/img/tasks/demo/enumeration_check.png)
+![dataquality_enum_check](../../../img/tasks/demo/enumeration_check.png)
 - Source data type: select MySQL, PostgreSQL, etc.
 - Source data source: the corresponding data source under the source data type
 - Source data table: drop-down to select the table where the validation data is located
@@ -203,7 +203,7 @@ The goal of enumeration value verification is to check whether the value of a fi
 ### Introduction
 The goal of table row number verification is to check whether the number of rows in the table reaches the expected value. If the number of rows does not meet the standard, the task will be judged as failed.
 ### UI Guide
-![dataquality_count_check](/img/tasks/demo/table_count_check.png)
+![dataquality_count_check](../../../img/tasks/demo/table_count_check.png)
 - Source data type: select MySQL, PostgreSQL, etc.
 - Source data source: the corresponding data source under the source data type
 - Source data table: drop-down to select the table where the validation data is located
@@ -224,7 +224,7 @@ The goal of table row number verification is to check whether the number of rows
 ## Custom SQL Check
 ### Introduction
 ### UI Guide
-![dataquality_custom_sql_check](/img/tasks/demo/custom_sql_check.png)
+![dataquality_custom_sql_check](../../../img/tasks/demo/custom_sql_check.png)
 - Source data type: select MySQL, PostgreSQL, etc.
 - Source data source: the corresponding data source under the source data type
 - Source data table: drop-down to select the table where the data to be verified is located
@@ -259,7 +259,7 @@ Accuracy checks are performed by comparing the accuracy differences of data reco
 |b|3|
 If you compare the data in c1 and c21, the tables test1 and test2 are exactly the same. If you compare c2 and c22, the data in table test1 and table test2 are inconsistent.
 ### UI Guide
-![dataquality_multi_table_accuracy_check](/img/tasks/demo/multi_table_accuracy_check.png)
+![dataquality_multi_table_accuracy_check](../../../img/tasks/demo/multi_table_accuracy_check.png)
 - Source data type: select MySQL, PostgreSQL, etc.
 - Source data source: the corresponding data source under the source data type
 - Source data table: drop-down to select the table where the data to be verified is located
@@ -280,7 +280,7 @@ If you compare the data in c1 and c21, the tables test1 and test2 are exactly th
 ### Introduction
 Two-table value comparison allows users to customize different SQL statistics for two tables and compare the corresponding values. For example, for the source table A, the total amount of a certain column is calculated, and for the target table, the total amount of a certain column is calculated. value sum2, compare sum1 and sum2 to determine the check result
 ### UI Guide
-![dataquality_multi_table_comparison_check](/img/tasks/demo/multi_table_comparison_check.png)
+![dataquality_multi_table_comparison_check](../../../img/tasks/demo/multi_table_comparison_check.png)
 - Source data type: select MySQL, PostgreSQL, etc.
 - Source data source: the corresponding data source under the source data type
 - Source data table: the table where the data is to be verified
@@ -302,9 +302,9 @@ Two-table value comparison allows users to customize different SQL statistics fo
     - Blocking: The data quality task fails, the DolphinScheduler task result is failed, and an alarm is sent
 
 ## Task result view
-![dataquality_result](/img/tasks/demo/result.png)
+![dataquality_result](../../../img/tasks/demo/result.png)
 ## Rule View
 ### List of rules
-![dataquality_rule_list](/img/tasks/demo/rule_list.png)
+![dataquality_rule_list](../../../img/tasks/demo/rule_list.png)
 ### Rules Details
-![dataquality_rule_detail](/img/tasks/demo/rule_detail.png)
\ No newline at end of file
+![dataquality_rule_detail](../../../img/tasks/demo/rule_detail.png)
\ No newline at end of file
diff --git a/docs/docs/en/guide/datasource/hive.md b/docs/docs/en/guide/datasource/hive.md
index 6ac14fa479..2e82c20535 100644
--- a/docs/docs/en/guide/datasource/hive.md
+++ b/docs/docs/en/guide/datasource/hive.md
@@ -2,7 +2,7 @@
 
 ## Use HiveServer2
 
-![hive](/img/new_ui/dev/datasource/hive.png)
+![hive](../../../../img/new_ui/dev/datasource/hive.png)
 
 - Datasource: select `HIVE`
 - Datasource name: enter the name of the DataSource
@@ -19,7 +19,7 @@
 
 ## Use HiveServer2 HA ZooKeeper
 
-![hive-server2](/img/new_ui/dev/datasource/hiveserver2.png)
+![hive-server2](../../../../img/new_ui/dev/datasource/hiveserver2.png)
 
 NOTICE: If Kerberos is disabled, ensure the parameter `hadoop.security.authentication.startup.state` is false, and parameter `java.security.krb5.conf.path` value sets null. 
 If **Kerberos** is enabled, needs to set the following parameters  in `common.properties`: 
diff --git a/docs/docs/en/guide/datasource/mysql.md b/docs/docs/en/guide/datasource/mysql.md
index 1648204db4..5f339a57f1 100644
--- a/docs/docs/en/guide/datasource/mysql.md
+++ b/docs/docs/en/guide/datasource/mysql.md
@@ -1,6 +1,6 @@
 # MySQL
 
-![mysql](/img/new_ui/dev/datasource/mysql.png)
+![mysql](../../../../img/new_ui/dev/datasource/mysql.png)
 
 - Datasource: select MYSQL
 - Datasource name: enter the name of the DataSource
diff --git a/docs/docs/en/guide/datasource/postgresql.md b/docs/docs/en/guide/datasource/postgresql.md
index 3083e859c4..a39ed0efbc 100644
--- a/docs/docs/en/guide/datasource/postgresql.md
+++ b/docs/docs/en/guide/datasource/postgresql.md
@@ -1,6 +1,6 @@
 # PostgreSQL
 
-![postgresql](/img/new_ui/dev/datasource/postgresql.png)
+![postgresql](../../../../img/new_ui/dev/datasource/postgresql.png)
 
 - Datasource: select POSTGRESQL
 - Datasource name: enter the name of the DataSource
diff --git a/docs/docs/en/guide/datasource/spark.md b/docs/docs/en/guide/datasource/spark.md
index f481966fc0..a9b6b2cf0f 100644
--- a/docs/docs/en/guide/datasource/spark.md
+++ b/docs/docs/en/guide/datasource/spark.md
@@ -1,6 +1,6 @@
 # Spark
 
-![sparksql](/img/new_ui/dev/datasource/sparksql.png)
+![sparksql](../../../../img/new_ui/dev/datasource/sparksql.png)
 
 - Datasource: select Spark
 - Datasource name: enter the name of the DataSource
diff --git a/docs/docs/en/guide/homepage.md b/docs/docs/en/guide/homepage.md
index f8db29df51..64427c136c 100644
--- a/docs/docs/en/guide/homepage.md
+++ b/docs/docs/en/guide/homepage.md
@@ -2,4 +2,4 @@
 
 The home page contains task status statistics, process status statistics, and workflow definition statistics for all projects of the user.
 
-![homepage](/img/new_ui/dev/homepage/homepage.png)
+![homepage](../../../img/new_ui/dev/homepage/homepage.png)
diff --git a/docs/docs/en/guide/monitor.md b/docs/docs/en/guide/monitor.md
index 5ccb95d0da..821baaa47c 100644
--- a/docs/docs/en/guide/monitor.md
+++ b/docs/docs/en/guide/monitor.md
@@ -8,25 +8,25 @@
 
 - Mainly related to master information.
 
-![master](/img/new_ui/dev/monitor/master.png)
+![master](../../../img/new_ui/dev/monitor/master.png)
 
 ### Worker Server
 
 - Mainly related to worker information.
 
-![worker](/img/new_ui/dev/monitor/worker.png)
+![worker](../../../img/new_ui/dev/monitor/worker.png)
 
 ### Database
 
 - Mainly the health status of the DB.
 
-![db](/img/new_ui/dev/monitor/db.png)
+![db](../../../img/new_ui/dev/monitor/db.png)
 
 ## Statistics Management
 
 ### Statistics
 
-![statistics](/img/new_ui/dev/monitor/statistics.png)
+![statistics](../../../img/new_ui/dev/monitor/statistics.png)
 
 - Number of commands wait to be executed: statistics of the `t_ds_command` table data.
 - The number of failed commands: statistics of the `t_ds_error_command` table data.
@@ -38,4 +38,4 @@
 The audit log provides information about who accesses the system and the operations made to the system and record related
 time, which strengthen the security of the system and maintenance.
 
-![audit-log](/img/new_ui/dev/monitor/audit-log.jpg)
+![audit-log](../../../img/new_ui/dev/monitor/audit-log.jpg)
diff --git a/docs/docs/en/guide/open-api.md b/docs/docs/en/guide/open-api.md
index 656fb415b4..606d670bdf 100644
--- a/docs/docs/en/guide/open-api.md
+++ b/docs/docs/en/guide/open-api.md
@@ -10,11 +10,11 @@ Generally, projects and processes are created through pages, but considering the
 
 1. Log in to the scheduling system, click "Security", then click "Token manage" on the left, and click "Create token" to create a token.
 
-![create-token](/img/new_ui/dev/security/create-token.png)
+![create-token](../../../img/new_ui/dev/security/create-token.png)
 
 2. Select the "Expiration time" (Token validity time), select "User" (choose the specified user to perform the API operation), click "Generate token", copy the `Token` string, and click "Submit".
 
-![token-expiration](/img/new_ui/dev/open-api/token_expiration.png)
+![token-expiration](../../../img/new_ui/dev/open-api/token_expiration.png)
 
 ### Examples
 
@@ -24,7 +24,7 @@ Generally, projects and processes are created through pages, but considering the
 
 > Address:http://{API server ip}:12345/dolphinscheduler/doc.html?language=en_US&lang=en
 
-![api-doc](/img/new_ui/dev/open-api/api_doc.png)
+![api-doc](../../../img/new_ui/dev/open-api/api_doc.png)
 
 2. select a test API, the API selected for this test is `queryAllProjectList`
 
@@ -36,7 +36,7 @@ Generally, projects and processes are created through pages, but considering the
     token: The Token just generated
     ```
    
-![api-test](/img/new_ui/dev/open-api/api_test.png)
+![api-test](../../../img/new_ui/dev/open-api/api_test.png)
 
 #### Create a Project
 
@@ -44,15 +44,15 @@ This demonstrates how to use the calling api to create the corresponding project
 
 By consulting the api documentation, configure the KEY as Accept and VALUE as the parameter of application/json in the headers of Postman.
 
-![create-project01](/img/new_ui/dev/open-api/create_project01.png)
+![create-project01](../../../img/new_ui/dev/open-api/create_project01.png)
 
 And then configure the required projectName and description parameters in Body.
 
-![create-project02](/img/new_ui/dev/open-api/create_project02.png)
+![create-project02](../../../img/new_ui/dev/open-api/create_project02.png)
 
 Check the post request result.
 
-![create-project03](/img/new_ui/dev/open-api/create_project03.png)
+![create-project03](../../../img/new_ui/dev/open-api/create_project03.png)
 
 The returned `msg` information is "success", indicating that we have successfully created the project through API.
 
@@ -60,9 +60,9 @@ If you are interested in the source code of creating a project, please continue
 
 ### Appendix: The Source Code of Creating a Project
 
-![api-source01](/img/new_ui/dev/open-api/api_source01.png)
+![api-source01](../../../img/new_ui/dev/open-api/api_source01.png)
 
-![api-source02](/img/new_ui/dev/open-api/api_source02.png)
+![api-source02](../../../img/new_ui/dev/open-api/api_source02.png)
 
 
 
diff --git a/docs/docs/en/guide/parameter/context.md b/docs/docs/en/guide/parameter/context.md
index 170477c078..5b0649f02c 100644
--- a/docs/docs/en/guide/parameter/context.md
+++ b/docs/docs/en/guide/parameter/context.md
@@ -28,7 +28,7 @@ The user needs to pass the parameter when creating the shell script, the output
 
 Create a Node_A task, add output and value parameters to the custom parameters, and write the following script:
 
-![context-parameter01](/img/new_ui/dev/parameter/context_parameter01.png)
+![context-parameter01](../../../../img/new_ui/dev/parameter/context_parameter01.png)
 
 Parameter Description:
 
@@ -39,13 +39,13 @@ When the SHELL node is defined, the log detects the format of `${setValue(output
 
 Create the Node_B task, which is mainly used to test and output the parameters passed by the upstream task Node_A.
 
-![context-parameter02](/img/new_ui/dev/parameter/context_parameter02.png)
+![context-parameter02](../../../../img/new_ui/dev/parameter/context_parameter02.png)
 
 #### Create SQL tasks and use parameters
 
 When the SHELL task is completed, we can use the output passed upstream as the query object for the SQL. The id of the query is renamed to ID and is output as a parameter.
 
-![context-parameter03](/img/new_ui/dev/parameter/context_parameter03.png)
+![context-parameter03](../../../../img/new_ui/dev/parameter/context_parameter03.png)
 
 > Note: If the result of the SQL node has only one row, one or multiple fields, the name of the `prop` needs to be the same as the field name. The data type can choose structure except `LIST`. The parameter assigns the value according to the same column name in the SQL query result.
 >
@@ -55,7 +55,7 @@ When the SHELL task is completed, we can use the output passed upstream as the q
 
 Click on the Save workflow icon and set the global parameters output and value.
 
-![context-parameter03](/img/new_ui/dev/parameter/context_parameter04.png)
+![context-parameter03](../../../../img/new_ui/dev/parameter/context_parameter04.png)
 
 #### View results
 
@@ -63,15 +63,15 @@ After the workflow is created, run the workflow online and view its running resu
 
 The result of Node_A is as follows:
 
-![context-log01](/img/new_ui/dev/parameter/context_log01.png)
+![context-log01](../../../../img/new_ui/dev/parameter/context_log01.png)
 
 The result of Node_B is as follows:
 
-![context-log02](/img/new_ui/dev/parameter/context_log02.png)
+![context-log02](../../../../img/new_ui/dev/parameter/context_log02.png)
 
 The result of Node_mysql is as follows:
 
-![context-log03](/img/new_ui/dev/parameter/context_log03.png)
+![context-log03](../../../../img/new_ui/dev/parameter/context_log03.png)
 
 Even though output is assigned a value of 1 in Node_A's script, the log still shows a value of 100. But according to the principle from [parameter priority](priority.md): `Local Parameter > Parameter Context > Global Parameter`, the output value in Node_B is 1. It proves that the output parameter is passed in the workflow with reference to the expected value, and the query operation is completed using this value in Node_mysql.
 
diff --git a/docs/docs/en/guide/parameter/global.md b/docs/docs/en/guide/parameter/global.md
index 882b1098d2..ab3b9d8874 100644
--- a/docs/docs/en/guide/parameter/global.md
+++ b/docs/docs/en/guide/parameter/global.md
@@ -12,13 +12,13 @@ The specific use method can be determined according to the actual production sit
 
 Create a shell task and enter `echo ${dt}` in the script content. In this case, dt is the global parameter we need to declare. As shown below:
 
-![global-parameter01](/img/new_ui/dev/parameter/global_parameter01.png)
+![global-parameter01](../../../../img/new_ui/dev/parameter/global_parameter01.png)
 
 ### Save the workflow and set global parameters
 
 You could follow this guide to set global parameter: On the workflow definition page, click the plus sign to the right of "Set Global", after filling in the variable name and value, then save it
 
-![global-parameter02](/img/new_ui/dev/parameter/global_parameter02.png)
+![global-parameter02](../../../../img/new_ui/dev/parameter/global_parameter02.png)
 
 > Note: The dt parameter defined here can be referenced by the local parameters of any other node.
 
@@ -26,4 +26,4 @@ You could follow this guide to set global parameter: On the workflow definition
 
 On the task instance page, you can check the log to verify the execution result of the task and determine whether the parameters are valid.
 
-![global-parameter03](/img/new_ui/dev/parameter/global_parameter03.png)
+![global-parameter03](../../../../img/new_ui/dev/parameter/global_parameter03.png)
diff --git a/docs/docs/en/guide/parameter/local.md b/docs/docs/en/guide/parameter/local.md
index cc2b7ff417..f1ba8ea3e4 100644
--- a/docs/docs/en/guide/parameter/local.md
+++ b/docs/docs/en/guide/parameter/local.md
@@ -20,7 +20,7 @@ Usage of local parameters is: at the task define page, click the '+' beside the
 
 This example shows how to use local parameters to print the current date. Create a Shell task and write a script with the content `echo ${dt}`. Click **custom parameter** in the configuration bar, and the configuration is as follows:
 
-![local-parameter01](/img/new_ui/dev/parameter/local_parameter01.png)
+![local-parameter01](../../../../img/new_ui/dev/parameter/local_parameter01.png)
 
 Parameters:
 
@@ -31,7 +31,7 @@ Parameters:
 
 Save the workflow and run it. View Shell task's log.
 
-![local-parameter02](/img/new_ui/dev/parameter/local_parameter02.png)
+![local-parameter02](../../../../img/new_ui/dev/parameter/local_parameter02.png)
 
 > Note: The local parameter can be used in the workflow of the current task node. If it is set to OUT, it can be passed to the downstream workflow. Please refer to: [Parameter Context](context.md)
 
@@ -41,7 +41,7 @@ If you want to simple export parameters and then use them in downstream tasks, y
 you can manage your parameters into one single task. You can use syntax `echo '${setValue(set_val=123)}'`(**do not forget the
 single quote**) in Shell task and add new `OUT` custom parameter to export it.
 
-![local-parameter-set-val](/img/new_ui/dev/parameter/local_param_set_val.png)
+![local-parameter-set-val](../../../../img/new_ui/dev/parameter/local_param_set_val.png)
 
 You could get this value in downstream task using syntax `echo '${set_val}'`.
 
@@ -53,7 +53,7 @@ change its value. You can use syntax `echo "#{setValue(set_val_param=${val})}"`(
 using any variable with `setValue`**) in Shell task and add new `IN` custom parameter for input variable `val` and `OUT` custom
 parameter for exporting parameter `set_val_param`.
 
-![local-parameter-set-val-param](/img/new_ui/dev/parameter/local_param_set_val_custom.png)
+![local-parameter-set-val-param](../../../../img/new_ui/dev/parameter/local_param_set_val_custom.png)
 
 You could get this value in downstream task using syntax `echo '${set_val_param}'`.
 
@@ -72,6 +72,6 @@ in Shell task(**do not forget the double quote, if you are using any variable wi
 for exporting parameter `set_val_var`
 .
 
-![local-parameter-set-val-bash](/img/new_ui/dev/parameter/local_param_set_val_bash.png)
+![local-parameter-set-val-bash](../../../../img/new_ui/dev/parameter/local_param_set_val_bash.png)
 
 You could get this value in downstream task using syntax `echo '${set_val_var}'`.
diff --git a/docs/docs/en/guide/parameter/priority.md b/docs/docs/en/guide/parameter/priority.md
index 4997af5bec..374c4a06fd 100644
--- a/docs/docs/en/guide/parameter/priority.md
+++ b/docs/docs/en/guide/parameter/priority.md
@@ -19,21 +19,21 @@ Followings are examples shows task parameters priority problems:#############
 
 1: Use shell nodes to explain the first case.
 
-![priority-parameter01](/img/new_ui/dev/parameter/priority_parameter01.png)
+![priority-parameter01](../../../../img/new_ui/dev/parameter/priority_parameter01.png)
 
 The [useParam] node can use the parameters which are set in the [createParam] node. The [useParam] node cannot obtain the parameters from the [noUseParam] node due to there is no dependency between them. Other task node types have the same usage rules with the Shell example here.
 
-![priority-parameter02](/img/new_ui/dev/parameter/priority_parameter02.png)
+![priority-parameter02](../../../../img/new_ui/dev/parameter/priority_parameter02.png)
 
 The [createParam] node can use parameters directly. In addition, the node creates two parameters named "key" and "key1", and "key1" has the same name as the one passed by the upstream node and assign value "12". However, due to the priority rules, the value assignment will assign "12" and the value from the upstream node is discarded.
 
 2: Use SQL nodes to explain another case.
 
-![priority-parameter03](/img/new_ui/dev/parameter/priority_parameter03.png)
+![priority-parameter03](../../../../img/new_ui/dev/parameter/priority_parameter03.png)
 
 The following shows the definition of the [use_create] node:
 
-![priority-parameter04](/img/new_ui/dev/parameter/priority_parameter04.png)
+![priority-parameter04](../../../../img/new_ui/dev/parameter/priority_parameter04.png)
 
 "status" is the own parameters of the node set by the current node. However, the user also sets the "status" parameter (global parameter) when saving the process definition and assign its value to -1. Then the value of status will be 2 with higher priority when the SQL executes. The global parameter value is discarded.
 
diff --git a/docs/docs/en/guide/project/project-list.md b/docs/docs/en/guide/project/project-list.md
index 38f719160d..1ce2c5dd40 100644
--- a/docs/docs/en/guide/project/project-list.md
+++ b/docs/docs/en/guide/project/project-list.md
@@ -4,7 +4,7 @@
 
 - Click "Project Management" to enter the project management page, click the "Create Project" button, enter the project name, project description, and click "Submit" to create a new project.
 
-![project-list](/img/new_ui/dev/project/project-list.png)
+![project-list](../../../../img/new_ui/dev/project/project-list.png)
 
 ## Project Home
 
@@ -14,5 +14,5 @@
 - Process status statistics: within the specified time range, count the number of workflow instances status as submission success, running, ready to pause, pause, ready to stop, stop, failure, success, need fault tolerance, kill and waiting threads
 - Workflow definition statistics: count the workflow definitions created by this user and granted by the administrator
 
-![project-overview](/img/new_ui/dev/project/project-overview.png)
+![project-overview](../../../../img/new_ui/dev/project/project-overview.png)
 
diff --git a/docs/docs/en/guide/project/task-definition.md b/docs/docs/en/guide/project/task-definition.md
index 53646eb2d9..c30948987a 100644
--- a/docs/docs/en/guide/project/task-definition.md
+++ b/docs/docs/en/guide/project/task-definition.md
@@ -5,7 +5,7 @@ We already have workflow level task editor in [workflow definition](workflow-def
 workflow and then edit its task definition. It is depressing when you want to edit the task definition but do not remember
 which workflow it belongs to. So we decide to add `Task Definition` view under `Task` menu.
 
-![task-definition](/img/new_ui/dev/project/task-definition.jpg)
+![task-definition](../../../../img/new_ui/dev/project/task-definition.jpg)
 
 In this view, you can create, query, update, delete task definition by click the related button in `operation` column. The
 most exciting thing is you could query task by task name in the wildcard, and it is useful when you only remember the task
diff --git a/docs/docs/en/guide/project/task-instance.md b/docs/docs/en/guide/project/task-instance.md
index 6ae52923a3..3a22f21f4f 100644
--- a/docs/docs/en/guide/project/task-instance.md
+++ b/docs/docs/en/guide/project/task-instance.md
@@ -2,8 +2,8 @@
 
 - Click Project Management -> Workflow -> Task Instance. Enter the task instance page, as shown in the figure below, click workflow instance name, you can jump to the workflow instance DAG chart to view the task status.
 
-![task-instance](/img/new_ui/dev/project/task-instance.png)
+![task-instance](../../../../img/new_ui/dev/project/task-instance.png)
 
 - View log:Click the "view log" button in the operation column to view task execution log.
 
-![task-log](/img/new_ui/dev/project/task-log.png)
+![task-log](../../../../img/new_ui/dev/project/task-log.png)
diff --git a/docs/docs/en/guide/project/workflow-definition.md b/docs/docs/en/guide/project/workflow-definition.md
index c0e2a99dc1..3a914373a3 100644
--- a/docs/docs/en/guide/project/workflow-definition.md
+++ b/docs/docs/en/guide/project/workflow-definition.md
@@ -4,11 +4,11 @@
 
 - Click Project Management -> Workflow -> Workflow Definition, enter the workflow definition page, and click the "Create Workflow" button to enter the **workflow DAG edit** page, as shown in the following figure:
 
-  ![workflow-dag](/img/new_ui/dev/project/workflow-dag.png)
+  ![workflow-dag](../../../../img/new_ui/dev/project/workflow-dag.png)
 
-- Drag from the toolbar <img src="/img/tasks/icons/shell.png" width="15"/> to the canvas, to add a shell task to the canvas, as shown in the figure below:
+- Drag from the toolbar <img src="../../../../img/tasks/icons/shell.png" width="15"/> to the canvas, to add a shell task to the canvas, as shown in the figure below:
 
-  ![demo-shell-simple](/img/tasks/demo/shell.jpg)
+  ![demo-shell-simple](../../../../img/tasks/demo/shell.jpg)
 
 - **Add parameter settings for shell task:**
 
@@ -22,15 +22,15 @@
 
 - **Set dependencies between tasks:** Click the plus sign on the right of the task node to connect the task; as shown in the figure below, task Node_B and task Node_C execute in parallel, When task Node_A finished execution, tasks Node_B and Node_C will execute simultaneously.
 
-  ![workflow-dependent](/img/new_ui/dev/project/workflow-dependent.png)
+  ![workflow-dependent](../../../../img/new_ui/dev/project/workflow-dependent.png)
 
-- **Delete dependencies:** Click the "arrow" icon in the upper right corner <img src="/img/arrow.png" width="35"/>, select the connection line, and click the "Delete" icon in the upper right corner <img src= "/img/delete.png" width="35"/>, delete dependencies between tasks.
+- **Delete dependencies:** Click the "arrow" icon in the upper right corner <img src="../../../../img/arrow.png" width="35"/>, select the connection line, and click the "Delete" icon in the upper right corner <img src= "../../../../img/delete.png" width="35"/>, delete dependencies between tasks.
 
-  ![workflow-delete](/img/new_ui/dev/project/workflow-delete.png)
+  ![workflow-delete](../../../../img/new_ui/dev/project/workflow-delete.png)
 
 - **Save workflow definition:** Click the "Save" button, and the "Set DAG chart name" window pops up, as shown in the figure below. Enter the workflow definition name, workflow definition description, and set global parameters (optional, refer to [global parameters](../parameter/global.md)), click the "Add" button to finish workflow definition creation.
 
-  ![workflow-save](/img/new_ui/dev/project/workflow-save.png)
+  ![workflow-save](../../../../img/new_ui/dev/project/workflow-save.png)
 
 > For other types of tasks, please refer to [Task Node Type and Parameter Settings](#TaskParamers). <!-- markdown-link-check-disable-line -->
 
@@ -38,7 +38,7 @@
 
 Click Project Management -> Workflow -> Workflow Definition to enter the workflow definition page, as shown below:
 
-![workflow-list](/img/new_ui/dev/project/workflow-list.png)
+![workflow-list](../../../../img/new_ui/dev/project/workflow-list.png)
 
 The following are the operation functions of the workflow definition list:
 
@@ -52,17 +52,17 @@ The following are the operation functions of the workflow definition list:
 - **Download:** Download workflow definition to local
 - **Tree Diagram:** Display the task node type and task status in a tree structure, as shown in the figure below:
 
-![workflow-tree](/img/new_ui/dev/project/workflow-tree.png)
+![workflow-tree](../../../../img/new_ui/dev/project/workflow-tree.png)
 
 ## Run the Workflow
 
-- Click Project Management -> Workflow -> Workflow Definition to enter the workflow definition page, as shown in the figure below, click the "Go Online" button <img src="/img/online.png" width="35"/>to make workflow online.
+- Click Project Management -> Workflow -> Workflow Definition to enter the workflow definition page, as shown in the figure below, click the "Go Online" button <img src="../../../../img/online.png" width="35"/>to make workflow online.
 
-![workflow-online](/img/new_ui/dev/project/workflow-online.png)
+![workflow-online](../../../../img/new_ui/dev/project/workflow-online.png)
 
 - Click the "Run" button to pop up the startup parameter setting window, as shown in the figure below, set the startup parameters, click the "Run" button in the pop-up box, the workflow starts running, and the workflow instance page generates a workflow instance.
 
-![workflow-run](/img/new_ui/dev/project/workflow-run.png)
+![workflow-run](../../../../img/new_ui/dev/project/workflow-run.png)
  
   Description of workflow operating parameters: 
        
@@ -77,7 +77,7 @@ The following are the operation functions of the workflow definition list:
     * Complement: two modes including serial complement and parallel complement. Serial complement: within the specified time range, the complements are executed from the start date to the end date and N process instances are generated in turn; parallel complement: within the specified time range, multiple days are complemented at the same time to generate N process instances.
       * You can select complement time range (When the scheduled configuration is not online, the daily complement will be performed by default according to the selected time range.If the timing configuration is online, it will be complemented according to the selected time range in combination with the timing configuration) when executing a timing workflow definition. For example, need to fill in the data from 1st May to 10th May, as shown in the figure below:
 
-    ![workflow-date](/img/new_ui/dev/project/workflow-date.png)
+    ![workflow-date](../../../../img/new_ui/dev/project/workflow-date.png)
 
   > Serial mode: the complement execute sequentially from 9th May to 10th May, and the process instance page generates 10 process instances;
 
@@ -85,20 +85,20 @@ The following are the operation functions of the workflow definition list:
 
 ## Workflow Timing
 
-- Create timing: Click Project Management->Workflow->Workflow Definition, enter the workflow definition page, make the workflow online, click the "timing" button <img src="/img/timing.png" width="35"/> , the timing parameter setting dialog box pops up, as shown in the figure below:
+- Create timing: Click Project Management->Workflow->Workflow Definition, enter the workflow definition page, make the workflow online, click the "timing" button <img src="../../../../img/timing.png" width="35"/> , the timing parameter setting dialog box pops up, as shown in the figure below:
 
-  ![workflow-time01](/img/new_ui/dev/project/workflow-time01.png)
+  ![workflow-time01](../../../../img/new_ui/dev/project/workflow-time01.png)
 
 - Choose the start and end time. In the time range, the workflow runs at regular intervals; If not in the time range, no regular workflow instances generate.
 - Add a timing that execute 5 minutes once, as shown in the following figure:
 
-  ![workflow-time02](/img/new_ui/dev/project/workflow-time02.png)
+  ![workflow-time02](../../../../img/new_ui/dev/project/workflow-time02.png)
 
 - Failure strategy, notification strategy, process priority, worker group, notification group, recipient, and CC are the same as workflow running parameters.
 - Click the "Create" button to create the timing. Now the timing status is "**Offline**" and the timing needs to be **Online** to make effect.
-- Timing online: Click the "Timing Management" button <img src="/img/timeManagement.png" width="35"/>, enter the timing management page, click the "online" button, the timing status will change to "online", as shown in the below figure, the workflow makes effect regularly.
+- Timing online: Click the "Timing Management" button <img src="../../../../img/timeManagement.png" width="35"/>, enter the timing management page, click the "online" button, the timing status will change to "online", as shown in the below figure, the workflow makes effect regularly.
 
-  ![workflow-time03](/img/new_ui/dev/project/workflow-time03.png)
+  ![workflow-time03](../../../../img/new_ui/dev/project/workflow-time03.png)
 
 ## Import Workflow
 
diff --git a/docs/docs/en/guide/project/workflow-instance.md b/docs/docs/en/guide/project/workflow-instance.md
index 004322e1ab..e38204ef37 100644
--- a/docs/docs/en/guide/project/workflow-instance.md
+++ b/docs/docs/en/guide/project/workflow-instance.md
@@ -4,45 +4,45 @@
 
 - Click Project Management -> Workflow -> Workflow Instance, enter the Workflow Instance page, as shown in the figure below:
 
-![workflow-instance](/img/new_ui/dev/project/workflow-instance.png)
+![workflow-instance](../../../../img/new_ui/dev/project/workflow-instance.png)
 
 - Click the workflow name to enter the DAG view page, and check the task execution status, as shown in the figure below:
 
-![instance-state](/img/new_ui/dev/project/instance-state.png)
+![instance-state](../../../../img/new_ui/dev/project/instance-state.png)
 
 ## View Task Log
 
 - Enter the workflow instance page, click the workflow name, enter the DAG view page, double-click the task node, as shown in the figure below:
 
-![instance-log01](/img/new_ui/dev/project/instance-log01.png)
+![instance-log01](../../../../img/new_ui/dev/project/instance-log01.png)
 
 - Click "View Log", a log window pops up, as shown in the figure below, you can also view the task log on the task instance page, refer to [Task View Log](./task-instance.md)
 
-![instance-log02](/img/new_ui/dev/project/instance-log02.png)
+![instance-log02](../../../../img/new_ui/dev/project/instance-log02.png)
 
 ## View Task History
 
 - Click Project Management -> Workflow -> Workflow Instance, enter the workflow instance page, and click the workflow name to enter the workflow DAG page;
 - Double-click the task node, as shown in the figure below, click "View History" to jump to the task instance page, and display a list of task instances running by the workflow instance
 
-![instance-history](/img/new_ui/dev/project/instance-history.png)
+![instance-history](../../../../img/new_ui/dev/project/instance-history.png)
 
 ## View Operation Parameters
 
 - Click Project Management -> Workflow -> Workflow Instance, enter the workflow instance page, and click the workflow name to enter the workflow DAG page;
-- Click the icon in the upper left corner <img src="/img/run_params_button.png" width="35"/>,View the startup parameters of the workflow instance; click the icon <img src="/img/global_param.png" width="35"/>,View the global and local parameters of the workflow instance, as shown in the following figure:
+- Click the icon in the upper left corner <img src="../../../../img/run_params_button.png" width="35"/>,View the startup parameters of the workflow instance; click the icon <img src="../../../../img/global_param.png" width="35"/>,View the global and local parameters of the workflow instance, as shown in the following figure:
 
-![instance-parameter](/img/new_ui/dev/project/instance-parameter.png)
+![instance-parameter](../../../../img/new_ui/dev/project/instance-parameter.png)
 
 ## Workflow Instance Operation Function
 
 Click Project Management -> Workflow -> Workflow Instance, enter the workflow instance page, as shown in the figure below:
 
-![workflow-instance](/img/new_ui/dev/project/workflow-instance.png)
+![workflow-instance](../../../../img/new_ui/dev/project/workflow-instance.png)
 
 - **Edit:** only processes with success/failed/stop status can be edited. Click the "Edit" button or the workflow instance name to enter the DAG edit page. After the edit, click the "Save" button to confirm, as shown in the figure below. In the pop-up box, check "Whether to update the workflow definition", after saving, the information modified by the instance will be updated to the workflow definition; if not checked, the workflow definition would not be updated.
      <p align="center">
-       <img src="/img/editDag-en.png" width="80%" />
+       <img src="../../../../img/editDag-en.png" width="80%" />
      </p>
 - **Rerun:** re-execute the terminated process
 - **Recovery failed:** for failed processes, you can perform failure recovery operations, starting from the failed node
@@ -52,5 +52,5 @@ Click Project Management -> Workflow -> Workflow Instance, enter the workflow in
 - **Delete:** delete the workflow instance and the task instance under the workflow instance
 - **Gantt chart:** the vertical axis of the Gantt chart is the topological sorting of task instances of the workflow instance, and the horizontal axis is the running time of the task instances, as shown in the figure below:
 
-![instance-gantt](/img/new_ui/dev/project/instance-gantt.png)
+![instance-gantt](../../../../img/new_ui/dev/project/instance-gantt.png)
 
diff --git a/docs/docs/en/guide/resource/file-manage.md b/docs/docs/en/guide/resource/file-manage.md
index 533ad194b8..ec871ea04c 100644
--- a/docs/docs/en/guide/resource/file-manage.md
+++ b/docs/docs/en/guide/resource/file-manage.md
@@ -2,7 +2,7 @@
 
 When third party jars are used in the scheduling process or user defined scripts are required, these can be created from this page. The types of files that can be created include: txt, log, sh, conf, py, java and so on. Files can be edited, renamed, downloaded and deleted.
 
-![file-manage](/img/new_ui/dev/resource/file-manage.png)
+![file-manage](../../../../img/new_ui/dev/resource/file-manage.png)
 
 > **_Note:_**
 >
@@ -14,19 +14,19 @@ When third party jars are used in the scheduling process or user defined scripts
 
 The file format supports the following types: txt, log, sh, conf, cfg, py, java, sql, xml, hql, properties.
 
-![create-file](/img/new_ui/dev/resource/create-file.png)
+![create-file](../../../../img/new_ui/dev/resource/create-file.png)
 
 ### Upload Files
 
 Click the "Upload File" button to upload, drag the file to the upload area, the file name will be automatically completed with the uploaded file name.
 
-![upload-file](/img/new_ui/dev/resource/upload-file.png)
+![upload-file](../../../../img/new_ui/dev/resource/upload-file.png)
 
 ### View File Content
 
  For the files that can be viewed, click the file name to view the file details.
 
-![file_detail](/img/tasks/demo/file_detail.png)
+![file_detail](../../../../img/tasks/demo/file_detail.png)
 
 ### Download file
 
@@ -34,7 +34,7 @@ Click the "Upload File" button to upload, drag the file to the upload area, the
 
 ### Rename File
 
-![rename-file](/img/new_ui/dev/resource/rename-file.png)
+![rename-file](../../../../img/new_ui/dev/resource/rename-file.png)
 
 ### Delete File
 
@@ -44,7 +44,7 @@ File list -> Click the "Delete" button to delete the specified file.
 
 Click the "Re-upload File" button to upload a new file to replace the old file, drag the file to the re-upload area, the file name will be automatically completed with the new uploaded file name.
 
-![reuplod_file](/img/reupload_file_en.png)
+![reuplod_file](../../../../img/reupload_file_en.png)
 
 > Note: File name or source name of your local file can not contain specific characters like `.` or `/` when you trying to
 > upload, create or rename file in resource center.
@@ -57,7 +57,7 @@ The example uses a simple shell script to demonstrate the use of resource center
 
 Create a shell file, print `hello world`.
 
-![create-shell](/img/new_ui/dev/resource/demo/file-demo01.png)
+![create-shell](../../../../img/new_ui/dev/resource/demo/file-demo01.png)
 
 Create the workflow execution shell
 
@@ -66,10 +66,10 @@ In the workflow definition module of project Manage, create a new workflow using
 - Script: 'sh hello.sh'
 - Resource: Select 'hello.sh'
 
-![use-shell](/img/new_ui/dev/resource/demo/file-demo02.png)
+![use-shell](../../../../img/new_ui/dev/resource/demo/file-demo02.png)
 
 ### View the results
 
 You can view the log results of running the node in the workflow example. The diagram below:
 
-![log-shell](/img/new_ui/dev/resource/demo/file-demo03.png)
+![log-shell](../../../../img/new_ui/dev/resource/demo/file-demo03.png)
diff --git a/docs/docs/en/guide/resource/task-group.md b/docs/docs/en/guide/resource/task-group.md
index 6237b6e4a6..e28e79bf7a 100644
--- a/docs/docs/en/guide/resource/task-group.md
+++ b/docs/docs/en/guide/resource/task-group.md
@@ -6,11 +6,11 @@ The task group is mainly used to control the concurrency of task instances and i
 
 #### Create Task Group 
 
-![create-taskGroup](/img/new_ui/dev/resource/create-taskGroup.png)
+![create-taskGroup](../../../../img/new_ui/dev/resource/create-taskGroup.png)
 
 The user clicks [Resources] - [Task Group Management] - [Task Group option] - [Create Task Group] 
 
-![create-taskGroup](/img/new_ui/dev/resource/create-taskGroup.png) 
+![create-taskGroup](../../../../img/new_ui/dev/resource/create-taskGroup.png) 
 
 You need to enter the information inside the picture:
 
@@ -22,17 +22,17 @@ You need to enter the information inside the picture:
 
 #### View Task Group Queue 
 
-![view-queue](/img/new_ui/dev/resource/view-queue.png) 
+![view-queue](../../../../img/new_ui/dev/resource/view-queue.png) 
 
 Click the button to view task group usage information:
 
-![view-queue](/img/new_ui/dev/resource/view-groupQueue.png) 
+![view-queue](../../../../img/new_ui/dev/resource/view-groupQueue.png) 
 
 #### Use of Task Groups 
 
 **Note**: The usage of task groups is applicable to tasks executed by workers, such as [switch] nodes, [condition] nodes, [sub_process] and other node types executed by the master are not controlled by the task group. Let's take the shell node as an example: 
 
-![use-queue](/img/new_ui/dev/resource/use-queue.png)                 
+![use-queue](../../../../img/new_ui/dev/resource/use-queue.png)                 
 
 Regarding the configuration of the task group, all you need to do is to configure these parts in the red box:
 
@@ -52,5 +52,5 @@ When the task that has occupied the task group resource is finished, the task gr
 
 #### Task Group Flowchart 
 
-![task_group](/img/task_group_process.png)
+![task_group](../../../../img/task_group_process.png)
       
diff --git a/docs/docs/en/guide/resource/udf-manage.md b/docs/docs/en/guide/resource/udf-manage.md
index 99928619f5..b2072ddf95 100644
--- a/docs/docs/en/guide/resource/udf-manage.md
+++ b/docs/docs/en/guide/resource/udf-manage.md
@@ -17,7 +17,7 @@ The resource management and file management functions are similar. The differenc
 - Package name Class name: enter the full path of the UDF function.
 - UDF resource: set the resource file corresponding to the created UDF function.
 
-![create-udf](/img/new_ui/dev/resource/create-udf.png)
+![create-udf](../../../../img/new_ui/dev/resource/create-udf.png)
 
 ## Example
 
@@ -25,13 +25,13 @@ The resource management and file management functions are similar. The differenc
 
 You can customize UDF functions based on actual production requirements. Write a function that adds "HelloWorld" to the end of any string. As shown below:
 
-![code-udf](/img/new_ui/dev/resource/demo/udf-demo01.png)
+![code-udf](../../../../img/new_ui/dev/resource/demo/udf-demo01.png)
 
 ### Configure the UDF function
 
 Before configuring the UDF function, upload the jar package of the UDF function through resource management. Then enter function management and configure related information. As shown below:
 
-![conf-udf](/img/new_ui/dev/resource/demo/udf-demo02.png)
+![conf-udf](../../../../img/new_ui/dev/resource/demo/udf-demo02.png)
 
 ### Use UDF functions
 
@@ -42,4 +42,4 @@ Enter the workflow and define an SQL node. Set the data source type to HIVE and
 - SQL statement: `select HwUdf("abc");` This function is used in the same way as the built-in functions, and can be accessed directly using the function name.
 - UDF function: Select the one configured for the resource center.
 
-![use-udf](/img/new_ui/dev/resource/demo/udf-demo03.png)
+![use-udf](../../../../img/new_ui/dev/resource/demo/udf-demo03.png)
diff --git a/docs/docs/en/guide/security.md b/docs/docs/en/guide/security.md
index 3c22f580c5..1baee568e6 100644
--- a/docs/docs/en/guide/security.md
+++ b/docs/docs/en/guide/security.md
@@ -8,7 +8,7 @@
 - Configure `queue` parameter to execute programs such as Spark and MapReduce.
 - The administrator enters the `Security Center->Queue Management` page and clicks the "Create Queue" button to create a new queue.
 
-![create-queue](/img/new_ui/dev/security/create-queue.png)
+![create-queue](../../../img/new_ui/dev/security/create-queue.png)
 
 ## Add Tenant
 
@@ -16,7 +16,7 @@
 - Tenant Code: **Tenant Code is the only user on Linux and cannot be repeated**
 - The administrator enters the `Security Center->Tenant Management` page and clicks the `Create Tenant` button to create a tenant.
 
-![create-tenant](/img/new_ui/dev/security/create-tenant.png)
+![create-tenant](../../../img/new_ui/dev/security/create-tenant.png)
 
 ## Create Normal User
 
@@ -28,7 +28,7 @@
 
 - The administrator enters the `Security Center -> User Management` page and clicks the `Create User` button to create a user.
 
-![create-user](/img/new_ui/dev/security/create-user.png)
+![create-user](../../../img/new_ui/dev/security/create-user.png)
 
 > **Edit user information**
 
@@ -46,7 +46,7 @@
 
 * The administrator enters the `Security Center -> Alarm Group Management` page and clicks the `Create Alarm Group` button to create an alarm group.
 
-![create-alarmInstance](/img/new_ui/dev/security/create-alarmInstance.png)
+![create-alarmInstance](../../../img/new_ui/dev/security/create-alarmInstance.png)
 
 ## Token Management
 
@@ -54,7 +54,7 @@
 
 - The administrator enters the `Security Center -> Token Management page`, clicks the `Create Token` button, selects the expiration time and user, clicks the `Generate Token` button, and clicks the `Submit` button, then create the selected user's token successfully.
 
-![create-token](/img/new_ui/dev/security/create-token.png)
+![create-token](../../../img/new_ui/dev/security/create-token.png)
 
 - After a normal user logs in, click the user information in the username drop-down box, enter the token management page, select the expiration time, click the `Generate Token` button, and click the `Submit` button, then the user creates a token successfully.
 - Call example:
@@ -102,13 +102,13 @@
 
 - The administrator enters the `Security Center -> User Management` page and clicks the `Authorize` button of the user who needs to be authorized, as shown in the figure below:
  <p align="center">
-  <img src="/img/auth-en.png" width="80%" />
+  <img src="../../../img/auth-en.png" width="80%" />
 </p>
 
 - Select the project and authorize the project.
 
 <p align="center">
-   <img src="/img/auth-project-en.png" width="80%" />
+   <img src="../../../img/auth-project-en.png" width="80%" />
  </p>
 
 - Resources, data sources, and UDF function authorization are the same as project authorization.
@@ -142,13 +142,13 @@ worker.groups=default,test
 
 - The environment configuration is equivalent to the configuration in the `dolphinscheduler_env.sh` file.
 
-![create-environment](/img/new_ui/dev/security/create-environment.png)
+![create-environment](../../../img/new_ui/dev/security/create-environment.png)
 
 > Usage environment
 
 - Create a task node in the workflow definition, select the worker group and the environment corresponding to the worker group. When executing the task, the Worker will execute the environment first before executing the task.
 
-![use-environment](/img/new_ui/dev/security/use-environment.png)
+![use-environment](../../../img/new_ui/dev/security/use-environment.png)
 
 ## Namespace Management
 
@@ -160,4 +160,4 @@ worker.groups=default,test
 
 - After creation and authorization, you can select it from the namespace drop down list when edit k8s task, If the k8s cluster name is `ds_null_k8s` means test mode which will not operate the cluster actually.
 
-![create-environment](/img/new_ui/dev/security/create-namespace.png)
+![create-environment](../../../img/new_ui/dev/security/create-namespace.png)
diff --git a/docs/docs/en/guide/start/docker.md b/docs/docs/en/guide/start/docker.md
index 8b88b0f29b..b2585eb63b 100644
--- a/docs/docs/en/guide/start/docker.md
+++ b/docs/docs/en/guide/start/docker.md
@@ -125,7 +125,7 @@ $ docker run -d --name dolphinscheduler-alert-server \
 You could access DolphinScheduler web UI by click [http://localhost:12345/dolphinscheduler/ui](http://localhost:12345/dolphinscheduler/ui)
 and use `admin` and `dolphinscheduler123` as default username and password in the login page.
 
-![login](/img/new_ui/dev/quick-start/login.png)
+![login](../../../../img/new_ui/dev/quick-start/login.png)
 
 > Note: If you start the services by the way [using exists PostgreSQL ZooKeeper](#using-exists-postgresql-zookeeper), and
 > strating with multiple machine, you should change URL domain from `localhost` to IP or hostname the api server running.
diff --git a/docs/docs/en/guide/start/quick-start.md b/docs/docs/en/guide/start/quick-start.md
index 13f9c5a7b0..6506162ead 100644
--- a/docs/docs/en/guide/start/quick-start.md
+++ b/docs/docs/en/guide/start/quick-start.md
@@ -1,62 +1,62 @@
 # Quick Start
 
 * Watch Apache DolphinScheduler Quick Start Tutorile here:
-  [![image](/img/video_cover/quick-use.png)](https://www.youtube.com/watch?v=nrF20hpCkug)
+  [![image](../../../../img/video_cover/quick-use.png)](https://www.youtube.com/watch?v=nrF20hpCkug)
 
 
 * Administrator user login
 
   > Address:http://localhost:12345/dolphinscheduler/ui  Username and password: `admin/dolphinscheduler123`
 
-![login](/img/new_ui/dev/quick-start/login.png)
+![login](../../../../img/new_ui/dev/quick-start/login.png)
 
 * Create a queue
 
-![create-queue](/img/new_ui/dev/quick-start/create-queue.png)
+![create-queue](../../../../img/new_ui/dev/quick-start/create-queue.png)
 
 * Create a tenant
 
-![create-tenant](/img/new_ui/dev/quick-start/create-tenant.png)
+![create-tenant](../../../../img/new_ui/dev/quick-start/create-tenant.png)
 
 * Create Ordinary Users
 
-![create-user](/img/new_ui/dev/quick-start/create-user.png)
+![create-user](../../../../img/new_ui/dev/quick-start/create-user.png)
 
 * Create an alarm instance
 
-![create-alarmInstance](/img/new_ui/dev/quick-start/create-alarmInstance.png)
+![create-alarmInstance](../../../../img/new_ui/dev/quick-start/create-alarmInstance.png)
 
 * Create an alarm group
 
-![create-alarmGroup](/img/new_ui/dev/quick-start/create-alarmGroup.png)
+![create-alarmGroup](../../../../img/new_ui/dev/quick-start/create-alarmGroup.png)
   
 * Create a worker group
 
-![create-workerGroup](/img/new_ui/dev/quick-start/create-workerGroup.png)
+![create-workerGroup](../../../../img/new_ui/dev/quick-start/create-workerGroup.png)
 
  * Create environment
 
-![create-environment](/img/new_ui/dev/quick-start/create-environment.png)
+![create-environment](../../../../img/new_ui/dev/quick-start/create-environment.png)
     
 * Create a token
   
-![create-token](/img/new_ui/dev/quick-start/create-token.png)
+![create-token](../../../../img/new_ui/dev/quick-start/create-token.png)
      
 * Login with regular users
   > Click on the user name in the upper right corner to "exit" and re-use the normal user login.
 
 * `Project Management - > Create Project - > Click on Project Name`
 
-![project](/img/new_ui/dev/quick-start/project.png)
+![project](../../../../img/new_ui/dev/quick-start/project.png)
 
 * `Click Workflow Definition - > Create Workflow Definition - > Online Process Definition`
 
 <p align="center">
-   <img src="/img/process_definition_en.png" width="60%" />
+   <img src="../../../../img/process_definition_en.png" width="60%" />
  </p>
 
 * `Running Process Definition - > Click Workflow Instance - > Click Process Instance Name - > Double-click Task Node - > View Task Execution Log`
 
  <p align="center">
-   <img src="/img/log_en.png" width="60%" />
+   <img src="../../../../img/log_en.png" width="60%" />
 </p>
diff --git a/docs/docs/en/guide/task/conditions.md b/docs/docs/en/guide/task/conditions.md
index df723b355b..f7d9f81c3b 100644
--- a/docs/docs/en/guide/task/conditions.md
+++ b/docs/docs/en/guide/task/conditions.md
@@ -5,7 +5,7 @@ Condition is a conditional node, that determines which downstream task should ru
 ## Create Task
 
 - Click `Project -> Management-Project -> Name-Workflow Definition`, and click the "Create Workflow" button to enter the DAG editing page.
-- Drag from the toolbar <img src="/img/conditions.png" width="20"/> task node to canvas.
+- Drag from the toolbar <img src="../../../../img/conditions.png" width="20"/> task node to canvas.
 
 ## Parameter
 
@@ -41,13 +41,13 @@ Go to the workflow definition page, and then create the following task nodes:
 - Node_Success: Shell task, print out "success", Node_A executes the successful branch.
 - Node_False: Shell task, print out "false", Node_A executes the failed branch.
 
-![condition_task01](/img/tasks/demo/condition_task01.png)
+![condition_task01](../../../../img/tasks/demo/condition_task01.png)
 
 ### 2. View the execution result
 
 After you finish creating the workflow, you can run the workflow online. You can view the execution status of each task on the workflow instance page. As shown below:
 
-![condition_task02](/img/tasks/demo/condition_task02.png)
+![condition_task02](../../../../img/tasks/demo/condition_task02.png)
 
 In the above figure, the task status marked with a green check mark is the successfully executed task node.
 
@@ -57,5 +57,5 @@ In the above figure, the task status marked with a green check mark is the succe
 - The Conditions task and the workflow that contain it do not support copy operations.
 - The predecessor task of Conditions cannot connect to its branch nodes, which will cause logical confusion and does not conform to DAG scheduling. The situation shown below is **wrong**.
 
-![condition_task03](/img/tasks/demo/condition_task03.png)
-![condition_task04](/img/tasks/demo/condition_task04.png)
+![condition_task03](../../../../img/tasks/demo/condition_task03.png)
+![condition_task04](../../../../img/tasks/demo/condition_task04.png)
diff --git a/docs/docs/en/guide/task/datax.md b/docs/docs/en/guide/task/datax.md
index 2413d360c9..d537817759 100644
--- a/docs/docs/en/guide/task/datax.md
+++ b/docs/docs/en/guide/task/datax.md
@@ -1,63 +1,63 @@
-# DataX
-
-## Overview
-
-DataX task type for executing DataX programs. For DataX nodes, the worker will execute `${DATAX_HOME}/bin/datax.py` to analyze the input json file.
-
-## Create Task
-
-- Click Project Management -> Project Name -> Workflow Definition, and click the "Create Workflow" button to enter the DAG editing page.
-- Drag the <img src="/img/tasks/icons/datax.png" width="15"/> from the toolbar to the drawing board.
-
-## Task Parameter
-
-- **Node name**: The node name in a workflow definition is unique.
-- **Run flag**: Identifies whether this node can be scheduled normally, if it does not need to be executed, you can turn on the prohibition switch.
-- **Descriptive information**: describe the function of the node.
-- **Task priority**: When the number of worker threads is insufficient, they are executed in order from high to low, and when the priority is the same, they are executed according to the first-in first-out principle.
-- **Worker grouping**: Tasks are assigned to the machines of the worker group to execute. If Default is selected, a worker machine will be randomly selected for execution.
-- **Environment Name**: Configure the environment name in which to run the script.
-- **Number of failed retry attempts**: The number of times the task failed to be resubmitted.
-- **Failed retry interval**: The time, in cents, interval for resubmitting the task after a failed task.
-- **Delayed execution time**: The time, in cents, that a task is delayed in execution.
-- **Timeout alarm**: Check the timeout alarm and timeout failure. When the task exceeds the "timeout period", an alarm email will be sent and the task execution will fail.
-- **Custom template**: Custom the content of the DataX node's json profile when the default data source provided does not meet the required requirements.
-- **json**: json configuration file for DataX synchronization.
-- **Custom parameters**: SQL task type, and stored procedure is a custom parameter order to set values for the method. The custom parameter type and data type are the same as the stored procedure task type. The difference is that the SQL task type custom parameter will replace the \${variable} in the SQL statement.
-- **Data source**: Select the data source from which the data will be extracted.
-- **sql statement**: the sql statement used to extract data from the target database, the sql query column name is automatically parsed when the node is executed, and mapped to the target table synchronization column name. When the source table and target table column names are inconsistent, they can be converted by column alias.
-- **Target library**: Select the target library for data synchronization.
-- **Pre-sql**: Pre-sql is executed before the sql statement (executed by the target library).
-- **Post-sql**: Post-sql is executed after the sql statement (executed by the target library).
-- **Stream limit (number of bytes)**: Limits the number of bytes in the query.
-- **Limit flow (number of records)**: Limit the number of records for a query.
-- **Running memory**: the minimum and maximum memory required can be configured to suit the actual production environment.
-- **Predecessor task**: Selecting a predecessor task for the current task will set the selected predecessor task as upstream of the current task.
-
-## Task Example
-
-This example demonstrates importing data from Hive into MySQL.
-
-### Configuring the DataX environment in DolphinScheduler
-
-If you are using the DataX task type in a production environment, it is necessary to configure the required environment first. The configuration file is as follows: `/dolphinscheduler/conf/env/dolphinscheduler_env.sh`.
-
-![datax_task01](/img/tasks/demo/datax_task01.png)
-
-After the environment has been configured, DolphinScheduler needs to be restarted.
-
-### Configuring DataX Task Node
-
-As the default data source does not contain data to be read from Hive, a custom json is required, refer to: [HDFS Writer](https://github.com/alibaba/DataX/blob/master/hdfswriter/doc/hdfswriter.md). Note: Partition directories exist on the HDFS path, when importing data in real world situations, partitioning is recommended to be passed as a parameter, using custom parameters.
-
-After writing the required json file, you can configure the node content by following the steps in the diagram below.
-
-![datax_task02](/img/tasks/demo/datax_task02.png)
-
-### View run results
-
-![datax_task03](/img/tasks/demo/datax_task03.png)
-
-### Notice
-
-If the default data source provided does not meet your needs, you can configure the writer and reader of DataX according to the actual usage environment in the custom template option, available at https://github.com/alibaba/DataX.
+# DataX
+
+## Overview
+
+DataX task type for executing DataX programs. For DataX nodes, the worker will execute `${DATAX_HOME}/bin/datax.py` to analyze the input json file.
+
+## Create Task
+
+- Click Project Management -> Project Name -> Workflow Definition, and click the "Create Workflow" button to enter the DAG editing page.
+- Drag the <img src="../../../../img/tasks/icons/datax.png" width="15"/> from the toolbar to the drawing board.
+
+## Task Parameter
+
+- **Node name**: The node name in a workflow definition is unique.
+- **Run flag**: Identifies whether this node can be scheduled normally, if it does not need to be executed, you can turn on the prohibition switch.
+- **Descriptive information**: describe the function of the node.
+- **Task priority**: When the number of worker threads is insufficient, they are executed in order from high to low, and when the priority is the same, they are executed according to the first-in first-out principle.
+- **Worker grouping**: Tasks are assigned to the machines of the worker group to execute. If Default is selected, a worker machine will be randomly selected for execution.
+- **Environment Name**: Configure the environment name in which to run the script.
+- **Number of failed retry attempts**: The number of times the task failed to be resubmitted.
+- **Failed retry interval**: The time, in cents, interval for resubmitting the task after a failed task.
+- **Delayed execution time**: The time, in cents, that a task is delayed in execution.
+- **Timeout alarm**: Check the timeout alarm and timeout failure. When the task exceeds the "timeout period", an alarm email will be sent and the task execution will fail.
+- **Custom template**: Custom the content of the DataX node's json profile when the default data source provided does not meet the required requirements.
+- **json**: json configuration file for DataX synchronization.
+- **Custom parameters**: SQL task type, and stored procedure is a custom parameter order to set values for the method. The custom parameter type and data type are the same as the stored procedure task type. The difference is that the SQL task type custom parameter will replace the \${variable} in the SQL statement.
+- **Data source**: Select the data source from which the data will be extracted.
+- **sql statement**: the sql statement used to extract data from the target database, the sql query column name is automatically parsed when the node is executed, and mapped to the target table synchronization column name. When the source table and target table column names are inconsistent, they can be converted by column alias.
+- **Target library**: Select the target library for data synchronization.
+- **Pre-sql**: Pre-sql is executed before the sql statement (executed by the target library).
+- **Post-sql**: Post-sql is executed after the sql statement (executed by the target library).
+- **Stream limit (number of bytes)**: Limits the number of bytes in the query.
+- **Limit flow (number of records)**: Limit the number of records for a query.
+- **Running memory**: the minimum and maximum memory required can be configured to suit the actual production environment.
+- **Predecessor task**: Selecting a predecessor task for the current task will set the selected predecessor task as upstream of the current task.
+
+## Task Example
+
+This example demonstrates importing data from Hive into MySQL.
+
+### Configuring the DataX environment in DolphinScheduler
+
+If you are using the DataX task type in a production environment, it is necessary to configure the required environment first. The configuration file is as follows: `/dolphinscheduler/conf/env/dolphinscheduler_env.sh`.
+
+![datax_task01](../../../../img/tasks/demo/datax_task01.png)
+
+After the environment has been configured, DolphinScheduler needs to be restarted.
+
+### Configuring DataX Task Node
+
+As the default data source does not contain data to be read from Hive, a custom json is required, refer to: [HDFS Writer](https://github.com/alibaba/DataX/blob/master/hdfswriter/doc/hdfswriter.md). Note: Partition directories exist on the HDFS path, when importing data in real world situations, partitioning is recommended to be passed as a parameter, using custom parameters.
+
+After writing the required json file, you can configure the node content by following the steps in the diagram below.
+
+![datax_task02](../../../../img/tasks/demo/datax_task02.png)
+
+### View run results
+
+![datax_task03](../../../../img/tasks/demo/datax_task03.png)
+
+### Notice
+
+If the default data source provided does not meet your needs, you can configure the writer and reader of DataX according to the actual usage environment in the custom template option, available at https://github.com/alibaba/DataX.
diff --git a/docs/docs/en/guide/task/dependent.md b/docs/docs/en/guide/task/dependent.md
index 1cbf354fd4..3a97bb50d7 100644
--- a/docs/docs/en/guide/task/dependent.md
+++ b/docs/docs/en/guide/task/dependent.md
@@ -8,7 +8,7 @@ Dependent nodes are **dependency check nodes**. For example, process A depends o
 ## Create Task
 
 - Click `Project -> Management-Project -> Name-Workflow Definition`, and click the `Create Workflow` button to enter the DAG editing page.
-- Drag from the toolbar <img src="/img/tasks/icons/dependent.png" width="15"/> task node to canvas.
+- Drag from the toolbar <img src="../../../../img/tasks/icons/dependent.png" width="15"/> task node to canvas.
 
 ## Task Parameter
 
@@ -28,12 +28,12 @@ The Dependent node provides a logical judgment function, which can detect the ex
 
 For example, process A is a weekly task, processes B and C are daily tasks, and task A requires tasks B and C to be successfully executed every day of the last week.
 
-![dependent_task01](/img/tasks/demo/dependent_task01.png)
+![dependent_task01](../../../../img/tasks/demo/dependent_task01.png)
 
 And another example is that process A is a weekly report task, processes B and C are daily tasks, and task A requires tasks B or C to be successfully executed every day of the last week:
 
-![dependent_task02](/img/tasks/demo/dependent_task02.png)
+![dependent_task02](../../../../img/tasks/demo/dependent_task02.png)
 
 If the weekly report A also needs to be executed successfully last Tuesday:
 
-![dependent_task03](/img/tasks/demo/dependent_task03.png)
+![dependent_task03](../../../../img/tasks/demo/dependent_task03.png)
diff --git a/docs/docs/en/guide/task/flink.md b/docs/docs/en/guide/task/flink.md
index f0d081bdec..72f196b3c4 100644
--- a/docs/docs/en/guide/task/flink.md
+++ b/docs/docs/en/guide/task/flink.md
@@ -11,7 +11,7 @@ Flink task type, used to execute Flink programs. For Flink nodes:
 ## Create Task
 
 - Click `Project -> Management-Project -> Name-Workflow Definition`, and click the "Create Workflow" button to enter the DAG editing page.
-- Drag from the toolbar <img src="/img/tasks/icons/flink.png" width="15"/>task node to canvas.
+- Drag from the toolbar <img src="../../../../img/tasks/icons/flink.png" width="15"/>task node to canvas.
 
 ## Task Parameter
 
@@ -54,7 +54,7 @@ This is a common introductory case in the big data ecosystem, which often apply
 
 If you are using the flink task type in a production environment, it is necessary to configure the required environment first. The following is the configuration file: `bin/env/dolphinscheduler_env.sh`.
 
-![demo-flink-simple](/img/tasks/demo/flink_task01.png)
+![demo-flink-simple](../../../../img/tasks/demo/flink_task01.png)
 
 #### Upload the Main Package
 
@@ -62,19 +62,19 @@ When using the Flink task node, you need to upload the jar package to the Resour
 
 After finish the Resource Centre configuration, upload the required target files directly by dragging and dropping.
 
-![resource_upload](/img/tasks/demo/upload_jar.png)
+![resource_upload](../../../../img/tasks/demo/upload_jar.png)
 
 #### Configure Flink Nodes
 
 Configure the required content according to the parameter descriptions above.
 
-![demo-flink-simple](/img/tasks/demo/flink_task02.png)
+![demo-flink-simple](../../../../img/tasks/demo/flink_task02.png)
 
 ### Execute the FlinkSQL Program
 
 Configure the required content according to the parameter descriptions above.
 
-![demo-flink-sql-simple](/img/tasks/demo/flink_sql_test.png)
+![demo-flink-sql-simple](../../../../img/tasks/demo/flink_sql_test.png)
 
 ## Notice
 
diff --git a/docs/docs/en/guide/task/http.md b/docs/docs/en/guide/task/http.md
index 3034913c62..970a4a4fe4 100644
--- a/docs/docs/en/guide/task/http.md
+++ b/docs/docs/en/guide/task/http.md
@@ -7,7 +7,7 @@ This node is used to perform http type tasks such as the common POST and GET req
 ## Create Task
 
 - Click `Project -> Management-Project -> Name-Workflow Definition`, and click the "Create Workflow" button to enter the DAG editing page.
-- Drag the <img src="/img/tasks/icons/http.png" width="15"/> from the toolbar to the drawing board.
+- Drag the <img src="../../../../img/tasks/icons/http.png" width="15"/> from the toolbar to the drawing board.
 
 ## Task Parameter
 
@@ -40,7 +40,7 @@ The main configuration parameters are as follows:
      - userName: Username
      - userPassword: User login password
 
-![http_task](/img/tasks/demo/http_task01.png)
+![http_task](../../../../img/tasks/demo/http_task01.png)
 
 ## Notice
 
diff --git a/docs/docs/en/guide/task/jupyter.md b/docs/docs/en/guide/task/jupyter.md
index 4e445762b9..92d614d65f 100644
--- a/docs/docs/en/guide/task/jupyter.md
+++ b/docs/docs/en/guide/task/jupyter.md
@@ -14,7 +14,7 @@ Click [here](https://docs.conda.io/en/latest/) for more information about `conda
 ## Create Task
 
 - Click Project Management-Project Name-Workflow Definition, and click the "Create Workflow" button to enter the DAG editing page.
-- Drag <img src="/img/tasks/icons/jupyter.png" width="15"/> from the toolbar to the canvas.
+- Drag <img src="../../../../img/tasks/icons/jupyter.png" width="15"/> from the toolbar to the canvas.
 
 ## Task Parameter
 
@@ -42,4 +42,4 @@ Click [here](https://docs.conda.io/en/latest/) for more information about `conda
 
 This example illustrates how to create a jupyter task node.
 
-![demo-jupyter-simple](/img/tasks/demo/jupyter.png)
+![demo-jupyter-simple](../../../../img/tasks/demo/jupyter.png)
diff --git a/docs/docs/en/guide/task/kubernetes.md b/docs/docs/en/guide/task/kubernetes.md
index bc024b6100..2bbb095f66 100644
--- a/docs/docs/en/guide/task/kubernetes.md
+++ b/docs/docs/en/guide/task/kubernetes.md
@@ -7,7 +7,7 @@ K8S task type used to execute a batch task. In this task, the worker submits the
 ## Create Task
 
 - Click `Project -> Management-Project -> Name-Workflow Definition`, and click the `Create Workflow` button to enter the DAG editing page.
-- Drag from the toolbar <img src="/img/tasks/icons/kubernetes.png" width="15"/> to the canvas.
+- Drag from the toolbar <img src="../../../../img/tasks/icons/kubernetes.png" width="15"/> to the canvas.
 
 ## Task Parameter
 
@@ -37,7 +37,7 @@ If you are using the K8S task type in a production environment, the K8S cluster
 
 Configure the required content according to the parameter descriptions above.
 
-![K8S](/img/tasks/demo/kubernetes-task-en.png)
+![K8S](../../../../img/tasks/demo/kubernetes-task-en.png)
 
 ## Notice
 
diff --git a/docs/docs/en/guide/task/map-reduce.md b/docs/docs/en/guide/task/map-reduce.md
index 03cd9e1b1e..268d625c60 100644
--- a/docs/docs/en/guide/task/map-reduce.md
+++ b/docs/docs/en/guide/task/map-reduce.md
@@ -7,7 +7,7 @@ MapReduce(MR) task type used for executing MapReduce programs. For MapReduce nod
 ## Create Task
 
 - Click `Project -> Management-Project -> Name-Workflow Definition`, and click the `Create Workflow` button to enter the DAG editing page.
-- Drag from the toolbar <img src="/img/tasks/icons/mr.png" width="15"/> to the canvas.
+- Drag from the toolbar <img src="../../../../img/tasks/icons/mr.png" width="15"/> to the canvas.
 
 ## Task Parameter
 
@@ -56,7 +56,7 @@ This example is a common introductory type of MapReduce application, which used
 
 If you are using the MapReduce task type in a production environment, it is necessary to configure the required environment first. The following is the configuration file: `bin/env/dolphinscheduler_env.sh`.
 
-![mr_configure](/img/tasks/demo/mr_task01.png)
+![mr_configure](../../../../img/tasks/demo/mr_task01.png)
 
 #### Upload the Main Package
 
@@ -64,10 +64,10 @@ When using the MapReduce task node, you need to use the Resource Centre to uploa
 
 After finish the Resource Centre configuration, upload the required target files directly by dragging and dropping.
 
-![resource_upload](/img/tasks/demo/upload_jar.png)
+![resource_upload](../../../../img/tasks/demo/upload_jar.png)
 
 #### Configure MapReduce Nodes
 
 Configure the required content according to the parameter descriptions above.
 
-![demo-mr-simple](/img/tasks/demo/mr_task02.png)
+![demo-mr-simple](../../../../img/tasks/demo/mr_task02.png)
diff --git a/docs/docs/en/guide/task/mlflow.md b/docs/docs/en/guide/task/mlflow.md
index 8e15dd753e..efcb592534 100644
--- a/docs/docs/en/guide/task/mlflow.md
+++ b/docs/docs/en/guide/task/mlflow.md
@@ -33,7 +33,7 @@ The Mlflow plugin currently supports and will support the following:
 
 - Click `Project -> Management-Project -> Name-Workflow Definition`, and click the "Create Workflow" button to enter the
   DAG editing page.
-- Drag from the toolbar <img src="/img/tasks/icons/mlflow.png" width="15"/> task node to canvas.
+- Drag from the toolbar <img src="../../../../img/tasks/icons/mlflow.png" width="15"/> task node to canvas.
 
 ## Task Example
 
@@ -60,7 +60,7 @@ First, introduce some general parameters of DolphinScheduler
 
 #### BasicAlgorithm
 
-![mlflow-conda-env](/img/tasks/demo/mlflow-basic-algorithm.png)
+![mlflow-conda-env](../../../../img/tasks/demo/mlflow-basic-algorithm.png)
 
 **Task Parameter**
 
@@ -88,7 +88,7 @@ First, introduce some general parameters of DolphinScheduler
 
 #### AutoML
 
-![mlflow-automl](/img/tasks/demo/mlflow-automl.png)
+![mlflow-automl](../../../../img/tasks/demo/mlflow-automl.png)
 
 **Task Parameter**
 
@@ -112,7 +112,7 @@ First, introduce some general parameters of DolphinScheduler
 
 #### Custom projects
 
-![mlflow-custom-project-template.png](/img/tasks/demo/mlflow-custom-project-template.png)
+![mlflow-custom-project-template.png](../../../../img/tasks/demo/mlflow-custom-project-template.png)
 
 **Task Parameter**
 
@@ -126,13 +126,13 @@ You can now use this feature to run all mlFlow projects on Github (For example [
 
 The actual interface is as follows
 
-![mlflow-custom-project.png](/img/tasks/demo/mlflow-custom-project.png)
+![mlflow-custom-project.png](../../../../img/tasks/demo/mlflow-custom-project.png)
 
 ### MLflow Models
 
 #### MLFLOW
 
-![mlflow-models-mlflow](/img/tasks/demo/mlflow-models-mlflow.png)
+![mlflow-models-mlflow](../../../../img/tasks/demo/mlflow-models-mlflow.png)
 
 **Task Parameter**
 
@@ -142,7 +142,7 @@ The actual interface is as follows
 
 #### Docker
 
-![mlflow-models-docker](/img/tasks/demo/mlflow-models-docker.png)
+![mlflow-models-docker](../../../../img/tasks/demo/mlflow-models-docker.png)
 
 **Task Parameter**
 
@@ -158,12 +158,12 @@ You need to enter the admin account to configure a conda environment variable(
 install [anaconda](https://docs.continuum.io/anaconda/install/)
 or [miniconda](https://docs.conda.io/en/latest/miniconda.html#installing ) in advance )
 
-![mlflow-conda-env](/img/tasks/demo/mlflow-conda-env.png)
+![mlflow-conda-env](../../../../img/tasks/demo/mlflow-conda-env.png)
 
 Note During the configuration task, select the conda environment created above. Otherwise, the program cannot find the
 Conda environment.
 
-![mlflow-set-conda-env](/img/tasks/demo/mlflow-set-conda-env.png)
+![mlflow-set-conda-env](../../../../img/tasks/demo/mlflow-set-conda-env.png)
 
 ### Start the mlflow service
 
@@ -181,4 +181,4 @@ After running, an MLflow service is started
 
 After this, you can visit the MLFlow service (`http://localhost:5000`) page to view the experiments and models.
 
-![mlflow-server](/img/tasks/demo/mlflow-server.png)
+![mlflow-server](../../../../img/tasks/demo/mlflow-server.png)
diff --git a/docs/docs/en/guide/task/openmldb.md b/docs/docs/en/guide/task/openmldb.md
index e2e48ea84a..10f84c59f4 100644
--- a/docs/docs/en/guide/task/openmldb.md
+++ b/docs/docs/en/guide/task/openmldb.md
@@ -11,7 +11,7 @@ OpenMLDB task plugin used to execute tasks on OpenMLDB cluster.
 
 - Click `Project -> Management-Project -> Name-Workflow Definition`, and click the "Create Workflow" button to enter the
   DAG editing page.
-- Drag from the toolbar <img src="/img/tasks/icons/openmldb.png" width="15"/> task node to canvas.
+- Drag from the toolbar <img src="../../../../img/tasks/icons/openmldb.png" width="15"/> task node to canvas.
 
 ## Task Example
 
@@ -48,13 +48,13 @@ Here are some examples:
 
 #### Load data
 
-![load data](/img/tasks/demo/openmldb-load-data.png)
+![load data](../../../../img/tasks/demo/openmldb-load-data.png)
 
 We use `LOAD DATA` to load data into OpenMLDB cluster. We select `offline` here, so it will load to offline storage.
 
 #### Feature extraction
 
-![fe](/img/tasks/demo/openmldb-feature-extraction.png)
+![fe](../../../../img/tasks/demo/openmldb-feature-extraction.png)
 
 We use `SELECT INTO` to do feature extraction. We select `offline` here, so it will run sql on offline engine.
 
diff --git a/docs/docs/en/guide/task/pigeon.md b/docs/docs/en/guide/task/pigeon.md
index a726f1a263..c8cdeff4c4 100644
--- a/docs/docs/en/guide/task/pigeon.md
+++ b/docs/docs/en/guide/task/pigeon.md
@@ -4,7 +4,7 @@ Pigeon is a task used to trigger remote tasks, acquire logs or status by calling
 
 ## Create
 
-Drag from the toolbar <img src="/img/pigeon.png" width="20"/> to the canvas to create a new Pigeon task.
+Drag from the toolbar <img src="../../../../img/pigeon.png" width="20"/> to the canvas to create a new Pigeon task.
 
 ## Parameter
 
diff --git a/docs/docs/en/guide/task/python.md b/docs/docs/en/guide/task/python.md
index 31ea986e07..6d0a376696 100644
--- a/docs/docs/en/guide/task/python.md
+++ b/docs/docs/en/guide/task/python.md
@@ -8,7 +8,7 @@ it will generate a temporary python script, and executes the script by the Linux
 ## Create Task
 
 - Click Project Management-Project Name-Workflow Definition, and click the "Create Workflow" button to enter the DAG editing page.
-- Drag <img src="/img/tasks/icons/python.png" width="15"/> from the toolbar to the canvas.
+- Drag <img src="../../../../img/tasks/icons/python.png" width="15"/> from the toolbar to the canvas.
 
 ## Task Parameter
 
@@ -32,7 +32,7 @@ it will generate a temporary python script, and executes the script by the Linux
 This example simulates a common task that runs by a simple command. The example is to print one line in the log file, as shown in the following figure:
 "This is a demo of python task".
 
-![demo-python-simple](/img/tasks/demo/python_ui_next.jpg)
+![demo-python-simple](../../../../img/tasks/demo/python_ui_next.jpg)
 
 ```python
 print("This is a demo of python task")
@@ -44,7 +44,7 @@ This example simulates a custom parameter task. We use parameters for reusing ex
 we declare a custom parameter named "param_key", with the value "param_val". Then we use echo to print the parameter "${param_key}" we just declared.
 After running this example, we would see "param_val" print in the log.
 
-![demo-python-custom-param](/img/tasks/demo/python_custom_param_ui_next.jpg)
+![demo-python-custom-param](../../../../img/tasks/demo/python_custom_param_ui_next.jpg)
 
 ```python
 print("${param_key}")
diff --git a/docs/docs/en/guide/task/shell.md b/docs/docs/en/guide/task/shell.md
index 5a2d09cd44..e397df7b01 100644
--- a/docs/docs/en/guide/task/shell.md
+++ b/docs/docs/en/guide/task/shell.md
@@ -7,7 +7,7 @@ Shell task used to create a shell task type and execute a series of shell script
 ## Create Task
 
 - Click `Project -> Management-Project -> Name-Workflow Definition`, and click the `Create Workflow` button to enter the DAG editing page.
-- Drag  from the toolbar <img src="/img/tasks/icons/shell.png" width="15"/> to the canvas.
+- Drag  from the toolbar <img src="../../../../img/tasks/icons/shell.png" width="15"/> to the canvas.
 
 ## Task Parameter
 
@@ -32,7 +32,7 @@ Shell task used to create a shell task type and execute a series of shell script
 We make an example simulate from a common task which runs by one command. The example is to print one line in the log file, as shown in the following figure:
 "This is a demo of shell task".
 
-![demo-shell-simple](/img/tasks/demo/shell.jpg)
+![demo-shell-simple](../../../../img/tasks/demo/shell.jpg)
 
 ### Custom Parameters
 
@@ -40,7 +40,7 @@ This example simulates a custom parameter task. We use parameters for reusing ex
 we declare a custom parameter named "param_key", with the value "param_val". Then we use `echo` to print the parameter "${param_key}" we just declared. 
 After running this example, we would see "param_val" print in the log.
 
-![demo-shell-custom-param](/img/tasks/demo/shell_custom_param.jpg)
+![demo-shell-custom-param](../../../../img/tasks/demo/shell_custom_param.jpg)
 
 ## Attention
 
diff --git a/docs/docs/en/guide/task/spark.md b/docs/docs/en/guide/task/spark.md
index 9f94e260d1..eb3c4ae02a 100644
--- a/docs/docs/en/guide/task/spark.md
+++ b/docs/docs/en/guide/task/spark.md
@@ -11,7 +11,7 @@ Spark task type for executing Spark application. When executing the Spark task,
 ## Create Task
 
 - Click `Project -> Management-Project -> Name-Workflow Definition`, and click the `Create Workflow` button to enter the DAG editing page.
-- Drag from the toolbar <img src="/img/tasks/icons/spark.png" width="15"/> to the canvas.
+- Drag from the toolbar <img src="../../../../img/tasks/icons/spark.png" width="15"/> to the canvas.
 
 ## Task Parameter
 
@@ -55,7 +55,7 @@ This is a common introductory case in the big data ecosystem, which often apply
 
 If you are using the Spark task type in a production environment, it is necessary to configure the required environment first. The following is the configuration file: `bin/env/dolphinscheduler_env.sh`.
 
-![spark_configure](/img/tasks/demo/spark_task01.png)
+![spark_configure](../../../../img/tasks/demo/spark_task01.png)
 
 ##### Upload the Main Package
 
@@ -63,13 +63,13 @@ When using the Spark task node, you need to upload the jar package to the Resour
 
 After finish the Resource Centre configuration, upload the required target files directly by dragging and dropping.
 
-![resource_upload](/img/tasks/demo/upload_jar.png)
+![resource_upload](../../../../img/tasks/demo/upload_jar.png)
 
 ##### Configure Spark Nodes
 
 Configure the required content according to the parameter descriptions above.
 
-![demo-spark-simple](/img/tasks/demo/spark_task02.png)
+![demo-spark-simple](../../../../img/tasks/demo/spark_task02.png)
 
 ### spark sql
 
@@ -77,7 +77,7 @@ Configure the required content according to the parameter descriptions above.
 
 This case is to create a view table terms and write three rows of data and a table wc in parquet format and determine whether the table exists. The program type is SQL. Insert the data of the view table terms into the table wc in parquet format.
 
-![spark_sql](/img/tasks/demo/spark_sql.png)
+![spark_sql](../../../../img/tasks/demo/spark_sql.png)
 
 ## Notice
 
diff --git a/docs/docs/en/guide/task/sql.md b/docs/docs/en/guide/task/sql.md
index f40893ff22..294b876fed 100644
--- a/docs/docs/en/guide/task/sql.md
+++ b/docs/docs/en/guide/task/sql.md
@@ -11,7 +11,7 @@ Refer to [DataSource](../datasource/introduction.md)
 ## Create Task
 
 - Click `Project -> Management-Project -> Name-Workflow Definition`, and click the "Create Workflow" button to enter the DAG editing page.
-- Drag from the toolbar <img src="/img/tasks/icons/sql.png" width="25"/> to the canvas.
+- Drag from the toolbar <img src="../../../../img/tasks/icons/sql.png" width="25"/> to the canvas.
 
 ## Task Parameter
 
@@ -35,13 +35,13 @@ Refer to [DataSource](../datasource/introduction.md)
 
 This example creates a temporary table `tmp_hello_world` in Hive and writes a row of data. Before creating a temporary table, we need to ensure that the table does not exist. So we use custom parameters to obtain the time of the day as the suffix of the table name every time we run, this task can run every different day. The format of the created table name is: `tmp_hello_world_{yyyyMMdd}`.
 
-![hive-sql](/img/tasks/demo/hive-sql.png)
+![hive-sql](../../../../img/tasks/demo/hive-sql.png)
 
 ### After Running the Task Successfully, Query the Results in Hive
 
 Log in to the bigdata cluster and use 'hive' command or 'beeline' or 'JDBC' and other methods to connect to the 'Apache Hive' for the query. The query SQL is `select * from tmp_hello_world_{yyyyMMdd}`, please replace `{yyyyMMdd}` with the date of the running day. The following shows the query screenshot:
 
-![hive-sql](/img/tasks/demo/hive-result.png)
+![hive-sql](../../../../img/tasks/demo/hive-result.png)
 
 ## Notice
 
diff --git a/docs/docs/en/guide/task/stored-procedure.md b/docs/docs/en/guide/task/stored-procedure.md
index ea2ed48ec8..5b57df54d7 100644
--- a/docs/docs/en/guide/task/stored-procedure.md
+++ b/docs/docs/en/guide/task/stored-procedure.md
@@ -5,7 +5,7 @@
 > Drag from the toolbar ![PNG](https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_PROCEDURE.png) task node into the canvas, as shown in the figure below:
 
 <p align="center">
-   <img src="/img/procedure-en.png" width="80%" />
+   <img src="../../../../img/procedure-en.png" width="80%" />
  </p>
 
 - DataSource: The DataSource type of the stored procedure supports MySQL and POSTGRESQL, select the corresponding DataSource.
diff --git a/docs/docs/en/guide/task/sub-process.md b/docs/docs/en/guide/task/sub-process.md
index c1d3b2fa51..8b84db8fe7 100644
--- a/docs/docs/en/guide/task/sub-process.md
+++ b/docs/docs/en/guide/task/sub-process.md
@@ -7,7 +7,7 @@ The sub-process node is to execute an external workflow definition as a task nod
 ## Create Task
 
 - Click `Project -> Management-Project -> Name-Workflow Definition`, and click the `Create Workflow` button to enter the DAG editing page.
-- Drag from the toolbar <img src="/img/tasks/icons/sub_process.png" width="15"/> task node to canvas to create a new SubProcess task.
+- Drag from the toolbar <img src="../../../../img/tasks/icons/sub_process.png" width="15"/> task node to canvas to create a new SubProcess task.
 
 ## Task Parameter
 
@@ -29,17 +29,17 @@ This example simulates a common task type, here we use a child node task to reca
 
 Create a shell task to print "hello" and define the workflow as `test_dag01`.
 
-![subprocess_task01](/img/tasks/demo/subprocess_task01.png)
+![subprocess_task01](../../../../img/tasks/demo/subprocess_task01.png)
 
 ## Create the Sub_process task
 
 To use the sub_process, you need to create the sub-node task, which is the shell task we created in the first step. After that, as shown in the diagram below, select the corresponding sub-node in position ⑤.
 
-![subprocess_task02](/img/tasks/demo/subprocess_task02.png)
+![subprocess_task02](../../../../img/tasks/demo/subprocess_task02.png)
 
 After creating the sub_process, create a corresponding shell task for printing "world" and link both together. Save the current workflow and run it to get the expected result.
 
-![subprocess_task03](/img/tasks/demo/subprocess_task03.png)
+![subprocess_task03](../../../../img/tasks/demo/subprocess_task03.png)
 
 ## Notice
 
diff --git a/docs/docs/en/guide/task/switch.md b/docs/docs/en/guide/task/switch.md
index 341567b3ed..ad4a46e8eb 100644
--- a/docs/docs/en/guide/task/switch.md
+++ b/docs/docs/en/guide/task/switch.md
@@ -4,7 +4,7 @@ The switch is a conditional judgment node, decide the branch executes according
 
 ## Create
 
-Drag from the toolbar <img src="/img/switch.png" width="20"/>  task node to canvas to create a task. 
+Drag from the toolbar <img src="../../../../img/switch.png" width="20"/>  task node to canvas to create a task. 
 **Note** After created a switch task, you must first configure the upstream and downstream, then configure the parameter of task branches.
 
 ## Parameter
@@ -31,7 +31,7 @@ Here we have three tasks, the dependencies are `A -> B -> [C, D]`, and `task_a`
 
 The following shows the switch task configuration:
 
-![task-switch-configure](/img/switch_configure.jpg)
+![task-switch-configure](../../../../img/switch_configure.jpg)
 
 ## Related Task
 
diff --git a/docs/docs/en/guide/task/zeppelin.md b/docs/docs/en/guide/task/zeppelin.md
index 08414e664a..ea2d203394 100644
--- a/docs/docs/en/guide/task/zeppelin.md
+++ b/docs/docs/en/guide/task/zeppelin.md
@@ -8,7 +8,7 @@ it will call `Zeppelin Client API` to trigger zeppelin notebook paragraph. Click
 ## Create Task
 
 - Click Project Management-Project Name-Workflow Definition, and click the "Create Workflow" button to enter the DAG editing page.
-- Drag <img src="/img/tasks/icons/zeppelin.png" width="15"/> from the toolbar to the canvas.
+- Drag <img src="../../../../img/tasks/icons/zeppelin.png" width="15"/> from the toolbar to the canvas.
 
 ## Task Parameter
 
@@ -30,7 +30,7 @@ it will call `Zeppelin Client API` to trigger zeppelin notebook paragraph. Click
 
 This example illustrates how to create a zeppelin paragraph task node.
 
-![demo-zeppelin-paragraph](/img/tasks/demo/zeppelin.png)
+![demo-zeppelin-paragraph](../../../../img/tasks/demo/zeppelin.png)
 
-![demo-get-zeppelin-id](/img/tasks/demo/zeppelin_id.png)
+![demo-get-zeppelin-id](../../../../img/tasks/demo/zeppelin_id.png)
 
diff --git a/docs/docs/zh/about/glossary.md b/docs/docs/zh/about/glossary.md
index d39171a9eb..f3ce5f3bd1 100644
--- a/docs/docs/zh/about/glossary.md
+++ b/docs/docs/zh/about/glossary.md
@@ -6,7 +6,7 @@
 
 **DAG:** 全称 Directed Acyclic Graph,简称 DAG。工作流中的 Task 任务以有向无环图的形式组装起来,从入度为零的节点进行拓扑遍历,直到无后继节点为止。举例如下图:
 
-![about-glossary](/img/new_ui/dev/about/glossary.png)
+![about-glossary](../../../img/new_ui/dev/about/glossary.png)
 
 **流程定义**:通过拖拽任务节点并建立任务节点的关联所形成的可视化**DAG**
 
diff --git a/docs/docs/zh/architecture/cache.md b/docs/docs/zh/architecture/cache.md
index 8b4804db60..e5a55842c4 100644
--- a/docs/docs/zh/architecture/cache.md
+++ b/docs/docs/zh/architecture/cache.md
@@ -39,4 +39,4 @@ spring:
 
 时序图如下图所示:
 
-<img src="/img/cache-evict.png" alt="cache-evict" style="zoom: 67%;" />
\ No newline at end of file
+<img src="../../../img/cache-evict.png" alt="cache-evict" style="zoom: 67%;" />
\ No newline at end of file
diff --git a/docs/docs/zh/architecture/design.md b/docs/docs/zh/architecture/design.md
index a2183b0664..d2876df856 100644
--- a/docs/docs/zh/architecture/design.md
+++ b/docs/docs/zh/architecture/design.md
@@ -4,7 +4,7 @@
 
 #### 2.1 系统架构图
 <p align="center">
-  <img src="/img/architecture-1.3.0.jpg" alt="系统架构图"  width="70%" />
+  <img src="../../../img/architecture-1.3.0.jpg" alt="系统架构图"  width="70%" />
   <p align="center">
         <em>系统架构图</em>
   </p>
@@ -12,7 +12,7 @@
 
 #### 2.2 启动流程活动图
 <p align="center">
-  <img src="/img/process-start-flow-1.3.0.png" alt="启动流程活动图"  width="70%" />
+  <img src="../../../img/process-start-flow-1.3.0.png" alt="启动流程活动图"  width="70%" />
   <p align="center">
         <em>启动流程活动图</em>
   </p>
@@ -112,7 +112,7 @@ DolphinScheduler使用ZooKeeper分布式锁来实现同一时刻只有一台Mast
 
 2. DolphinScheduler中Scheduler线程分布式锁实现流程图:
  <p align="center">
-   <img src="/img/distributed_lock_procss.png" alt="获取分布式锁流程"  width="50%" />
+   <img src="../../../img/distributed_lock_procss.png" alt="获取分布式锁流程"  width="50%" />
  </p>
 
 
@@ -152,7 +152,7 @@ DolphinScheduler使用ZooKeeper分布式锁来实现同一时刻只有一台Mast
 - Master容错流程:
 
 <p align="center">
-   <img src="/img/failover-master.jpg" alt="容错流程"  width="50%" />
+   <img src="../../../img/failover-master.jpg" alt="容错流程"  width="50%" />
  </p>
 
 容错范围:从host的维度来看,Master的容错范围包括:自身host+注册中心上不存在的节点host,容错的整个过程会加锁;
@@ -166,7 +166,7 @@ DolphinScheduler使用ZooKeeper分布式锁来实现同一时刻只有一台Mast
 - Worker容错流程:
 
 <p align="center">
-   <img src="/img/failover-worker.jpg" alt="容错流程"  width="50%" />
+   <img src="../../../img/failover-worker.jpg" alt="容错流程"  width="50%" />
  </p>
 
 容错范围:从工作流实例的维度看,每个Master只负责容错自己的工作流实例;只有在`handleDeadServer`时会加锁;
diff --git a/docs/docs/zh/architecture/metadata.md b/docs/docs/zh/architecture/metadata.md
index e298b4813e..3c1370283a 100644
--- a/docs/docs/zh/architecture/metadata.md
+++ b/docs/docs/zh/architecture/metadata.md
@@ -31,14 +31,14 @@
 
 <a name="VNVGr"></a>
 ### 用户	队列	数据源
-![image.png](/img/metadata-erd/user-queue-datasource.png)
+![image.png](../../../img/metadata-erd/user-queue-datasource.png)
 
 - 一个租户下可以有多个用户<br />
 - t_ds_user中的queue字段存储的是队列表中的queue_name信息,t_ds_tenant下存的是queue_id,在流程定义执行过程中,用户队列优先级最高,用户队列为空则采用租户队列<br />
 - t_ds_datasource表中的user_id字段表示创建该数据源的用户,t_ds_relation_datasource_user中的user_id表示,对数据源有权限的用户<br />
 <a name="HHyGV"></a>
 ### 项目	资源	告警
-![image.png](/img/metadata-erd/project-resource-alert.png)
+![image.png](../../../img/metadata-erd/project-resource-alert.png)
 
 - 一个用户可以有多个项目,用户项目授权通过t_ds_relation_project_user表完成project_id和user_id的关系绑定<br />
 - t_ds_projcet表中的user_id表示创建该项目的用户,t_ds_relation_project_user表中的user_id表示对项目有权限的用户<br />
@@ -46,7 +46,7 @@
 - t_ds_udfs表中的user_id表示创建该UDF的用户,t_ds_relation_udfs_user表中的user_id表示对UDF有权限的用户<br />
 <a name="Bg2Sn"></a>
 ### 命令	流程	任务
-![image.png](/img/metadata-erd/command.png)<br />![image.png](/img/metadata-erd/process-task.png)
+![image.png](../../../img/metadata-erd/command.png)<br />![image.png](../../../img/metadata-erd/process-task.png)
 
 - 一个项目有多个流程定义,一个流程定义可以生成多个流程实例,一个流程实例可以生成多个任务实例<br />
 - t_ds_schedulers表存放流程定义的定时调度信息<br />
diff --git a/docs/docs/zh/development/architecture-design.md b/docs/docs/zh/development/architecture-design.md
index 8e439ad20f..13d0563cf0 100644
--- a/docs/docs/zh/development/architecture-design.md
+++ b/docs/docs/zh/development/architecture-design.md
@@ -5,7 +5,7 @@
 **DAG:** 全称Directed Acyclic Graph,简称DAG。工作流中的Task任务以有向无环图的形式组装起来,从入度为零的节点进行拓扑遍历,直到无后继节点为止。举例如下图:
 
 <p align="center">
-  <img src="/img/architecture-design/dag_examples.png" alt="dag示例"  width="80%" />
+  <img src="../../../img/architecture-design/dag_examples.png" alt="dag示例"  width="80%" />
   <p align="center">
         <em>dag示例</em>
   </p>
@@ -37,7 +37,7 @@
 
 #### 2.1 系统架构图
 <p align="center">
-  <img src="/img/architecture.jpg" alt="系统架构图"  />
+  <img src="../../../img/architecture.jpg" alt="系统架构图"  />
   <p align="center">
         <em>系统架构图</em>
   </p>
@@ -129,12 +129,12 @@
 DolphinScheduler使用ZooKeeper分布式锁来实现同一时刻只有一台Master执行Scheduler,或者只有一台Worker执行任务的提交。
 1. 获取分布式锁的核心流程算法如下
  <p align="center">
-   <img src="/img/architecture-design/distributed_lock.png" alt="获取分布式锁流程"  width="70%" />
+   <img src="../../../img/architecture-design/distributed_lock.png" alt="获取分布式锁流程"  width="70%" />
  </p>
 
 2. DolphinScheduler中Scheduler线程分布式锁实现流程图:
  <p align="center">
-   <img src="/img/architecture-design/distributed_lock_procss.png" alt="获取分布式锁流程" />
+   <img src="../../../img/architecture-design/distributed_lock_procss.png" alt="获取分布式锁流程" />
  </p>
 
 
@@ -144,7 +144,7 @@ DolphinScheduler使用ZooKeeper分布式锁来实现同一时刻只有一台Mast
 -  如果一个大的DAG中嵌套了很多子流程,如下图则会产生“死等”状态:
 
  <p align="center">
-   <img src="/img/architecture-design/lack_thread.png" alt="线程不足循环等待问题"  width="70%" />
+   <img src="../../../img/architecture-design/lack_thread.png" alt="线程不足循环等待问题"  width="70%" />
  </p>
 上图中MainFlowThread等待SubFlowThread1结束,SubFlowThread1等待SubFlowThread2结束, SubFlowThread2等待SubFlowThread3结束,而SubFlowThread3等待线程池有新线程,则整个DAG流程不能结束,从而其中的线程也不能释放。这样就形成的子父流程循环等待的状态。此时除非启动新的Master来增加线程来打破这样的”僵局”,否则调度集群将不能再使用。
 
@@ -167,7 +167,7 @@ DolphinScheduler使用ZooKeeper分布式锁来实现同一时刻只有一台Mast
 服务容错设计依赖于ZooKeeper的Watcher机制,实现原理如图:
 
  <p align="center">
-   <img src="/img/architecture-design/fault-tolerant.png" alt="DolphinScheduler容错设计"  width="70%" />
+   <img src="../../../img/architecture-design/fault-tolerant.png" alt="DolphinScheduler容错设计"  width="70%" />
  </p>
 其中Master监控其他Master和Worker的目录,如果监听到remove事件,则会根据具体的业务逻辑进行流程实例容错或者任务实例容错。
 
@@ -176,7 +176,7 @@ DolphinScheduler使用ZooKeeper分布式锁来实现同一时刻只有一台Mast
 - Master容错流程图:
 
  <p align="center">
-   <img src="/img/architecture-design/fault-tolerant_master.png" alt="Master容错流程图"  width="70%" />
+   <img src="../../../img/architecture-design/fault-tolerant_master.png" alt="Master容错流程图"  width="70%" />
  </p>
 ZooKeeper Master容错完成之后则重新由DolphinScheduler中Scheduler线程调度,遍历 DAG 找到”正在运行”和“提交成功”的任务,对”正在运行”的任务监控其任务实例的状态,对”提交成功”的任务需要判断Task Queue中是否已经存在,如果存在则同样监控任务实例的状态,如果不存在则重新提交任务实例。
 
@@ -185,7 +185,7 @@ ZooKeeper Master容错完成之后则重新由DolphinScheduler中Scheduler线程
 - Worker容错流程图:
 
  <p align="center">
-   <img src="/img/architecture-design/fault-tolerant_worker.png" alt="Worker容错流程图"  width="70%" />
+   <img src="../../../img/architecture-design/fault-tolerant_worker.png" alt="Worker容错流程图"  width="70%" />
  </p>
 
 Master Scheduler线程一旦发现任务实例为” 需要容错”状态,则接管任务并进行重新提交。
diff --git a/docs/docs/zh/development/e2e-test.md b/docs/docs/zh/development/e2e-test.md
index 2aac1bb0e1..c588afd555 100644
--- a/docs/docs/zh/development/e2e-test.md
+++ b/docs/docs/zh/development/e2e-test.md
@@ -103,7 +103,7 @@ public final class LoginPage extends NavBarPage {
     }
 ```
 
-![SecurityPage](/img/e2e-test/SecurityPage.png)
+![SecurityPage](../../../img/e2e-test/SecurityPage.png)
 
 对于导航栏选项的跳转,在`org/apache/dolphinscheduler/e2e/pages/common/NavBarPage.java` 中提供了 goToNav 的方法。当前支持的页面为:项目管理(ProjectPage)、安全中心(SecurityPage)和资源中心(ResourcePage)。
 
@@ -138,7 +138,7 @@ public final class LoginPage extends NavBarPage {
 
 当前所支持的 E2E 测试案例,主要包括:文件管理、项目管理、队列管理、租户管理、用户管理、Worker 分组管理和工作流测试。
 
-![E2E_Cases](/img/e2e-test/E2E_Cases.png)
+![E2E_Cases](../../../img/e2e-test/E2E_Cases.png)
 
 下面以租户管理测试为例,前文已经说明,我们使用 docker-compose 进行部署,所以每个测试案例,都需要以注解的形式引入对应的文件。
 
@@ -183,12 +183,12 @@ https://github.com/apache/dolphinscheduler/tree/dev/dolphinscheduler-e2e/dolphin
 
 如果是`M1`芯片的机器,可以使用`-Dm1_chip=true` 参数,用于配置使用`ARM64`支持的容器。
 
-![Dlocal](/img/e2e-test/Dlocal.png)
+![Dlocal](../../../img/e2e-test/Dlocal.png)
 
 在本地运行过程中,如果出现连接超时,可增大加载时间,建议 30 及其以上。
 
-![timeout](/img/e2e-test/timeout.png)
+![timeout](../../../img/e2e-test/timeout.png)
 
 测试的运行过程将会以 MP4 的文件格式存在。
 
-![MP4](/img/e2e-test/MP4.png)
+![MP4](../../../img/e2e-test/MP4.png)
diff --git a/docs/docs/zh/faq.md b/docs/docs/zh/faq.md
index f16701fae8..38dbf17cb7 100644
--- a/docs/docs/zh/faq.md
+++ b/docs/docs/zh/faq.md
@@ -538,7 +538,7 @@ master 服务、worker 服务在 zookeeper 注册时,会以 ip:port 的形式
 如果 ip 地址获取错误,请检查网络信息,如 Linux 系统通过 `ifconfig` 命令查看网络信息,以下图为例:
 
 <p align="center">
-  <img src="/img/network/network_config.png" width="60%" />
+  <img src="../../img/network/network_config.png" width="60%" />
 </p>
 
 可以使用 dolphinscheduler 提供的三种策略,获取可用 ip:
diff --git a/docs/docs/zh/guide/alert/alert_plugin_user_guide.md b/docs/docs/zh/guide/alert/alert_plugin_user_guide.md
index 231d6558a4..dbea97b961 100644
--- a/docs/docs/zh/guide/alert/alert_plugin_user_guide.md
+++ b/docs/docs/zh/guide/alert/alert_plugin_user_guide.md
@@ -2,7 +2,7 @@
 
 在 2.0.0 版本中,用户需要创建告警实例,在创建告警实例时,需要选择告警策略,有三个选项,成功发、失败发,以及成功和失败都发。在执行完工作流或任务时,如果触发告警,调用告警实例发送方法会进行逻辑判断,将告警实例与任务状态进行匹配,匹配则执行该告警实例发送逻辑,不匹配则过滤。创建完告警实例后,需要同告警组进行关联,一个告警组可以使用多个告警实例。
 告警模块支持场景如下:
-<img src="/img/alert/alert_scenarios_zh.png">
+<img src="../../../../img/alert/alert_scenarios_zh.png">
 
 使用步骤如下:
 
@@ -10,7 +10,7 @@
 
 然后选择告警组管理,创建告警组,选择相应的告警实例即可。
 
-![alert-instance01](/img/new_ui/dev/alert/alert_instance01.png)
-![alert-instance02](/img/new_ui/dev/alert/alert_instance02.png)
-![alert-instance03](/img/new_ui/dev/alert/alert_instance03.png)
-![alert-instance04](/img/new_ui/dev/alert/alert_instance04.png)
+![alert-instance01](../../../../img/new_ui/dev/alert/alert_instance01.png)
+![alert-instance02](../../../../img/new_ui/dev/alert/alert_instance02.png)
+![alert-instance03](../../../../img/new_ui/dev/alert/alert_instance03.png)
+![alert-instance04](../../../../img/new_ui/dev/alert/alert_instance04.png)
diff --git a/docs/docs/zh/guide/alert/dingtalk.md b/docs/docs/zh/guide/alert/dingtalk.md
index ec6f874ea7..a6cba29443 100644
--- a/docs/docs/zh/guide/alert/dingtalk.md
+++ b/docs/docs/zh/guide/alert/dingtalk.md
@@ -2,7 +2,7 @@
 
 如果您需要使用到钉钉进行告警,请在告警实例管理里创建告警实例,选择 DingTalk 插件。钉钉的配置样例如下:
 
-![alert-dingtalk](/img/new_ui/dev/alert/alert_dingtalk.png)
+![alert-dingtalk](../../../../img/new_ui/dev/alert/alert_dingtalk.png)
 
 参数配置
 
diff --git a/docs/docs/zh/guide/alert/enterprise-webexteams.md b/docs/docs/zh/guide/alert/enterprise-webexteams.md
index e324f0df7a..bb4d492e44 100644
--- a/docs/docs/zh/guide/alert/enterprise-webexteams.md
+++ b/docs/docs/zh/guide/alert/enterprise-webexteams.md
@@ -4,7 +4,7 @@
 你可以选择机器人私聊通知或聊天室通知。
 WebexTeams的配置样例如下:
 
-![enterprise-webexteams-plugin](/img/alert/enterprise-webexteams-plugin.png)
+![enterprise-webexteams-plugin](../../../../img/alert/enterprise-webexteams-plugin.png)
 
 ## 参数配置
 
@@ -25,13 +25,13 @@ WebexTeams的配置样例如下:
 
 访问[官网My-Apps](https://developer.webex.com/docs/api/v1/rooms/create-a-room)来创建一个机器人,点击`Create a New APP` 然后选择 `Create a Bot`,填入机器人信息后获取`bot username` 和 `bot ID`以备以下步骤使用。
 
-![enterprise-webexteams-bot-info](/img/alert/enterprise-webexteams-bot.png)
+![enterprise-webexteams-bot-info](../../../../img/alert/enterprise-webexteams-bot.png)
 
 ## 创建一个房间
 
 访问[官网开发者APIs](https://developer.webex.com/docs/api/v1/rooms/create-a-room)来创建一个房间,填入房间名称后获取`id`(room ID) 和 `creatorId`以备以下步骤使用。
 
-![enterprise-webexteams-room-info](/img/alert/enterprise-webexteams-room.png)
+![enterprise-webexteams-room-info](../../../../img/alert/enterprise-webexteams-room.png)
 
 ### 邀请机器人到房间
 
@@ -43,22 +43,22 @@ WebexTeams的配置样例如下:
 `用户邮箱`是用户注册Email地址。
 `用户`我们可以从新建房间返回的`creatorId`中获取。
 
-![enterprise-webexteams-private-message-form](/img/alert/enterprise-webexteams-private-form.png)
+![enterprise-webexteams-private-message-form](../../../../img/alert/enterprise-webexteams-private-form.png)
 
 ### 私聊告警样例
 
-![enterprise-webexteams-private-message-example](/img/alert/enterprise-webexteams-private-msg.png)
+![enterprise-webexteams-private-message-example](../../../../img/alert/enterprise-webexteams-private-msg.png)
 
 ## 发送群聊消息
 
 通过这种方式,你可以通过`房间`向一个房间发送告警,填入`房间` 和 `访问令牌`并选择`描述` `roomId`。
 `房间`我们可以从新建房间API返回的`id`中获取。
 
-![enterprise-webexteams-group-form](/img/alert/enterprise-webexteams-group-form.png)
+![enterprise-webexteams-group-form](../../../../img/alert/enterprise-webexteams-group-form.png)
 
 ### 群聊告警消息样例
 
-![enterprise-webexteams-room-message-example](/img/alert/enterprise-webexteams-room-msg.png)
+![enterprise-webexteams-room-message-example](../../../../img/alert/enterprise-webexteams-room-msg.png)
 
 [WebexTeams申请机器人文档](https://developer.webex.com/docs/bots)
 [WebexTeamsMessage开发文档](https://developer.webex.com/docs/api/v1/messages/create-a-message)
diff --git a/docs/docs/zh/guide/alert/enterprise-wechat.md b/docs/docs/zh/guide/alert/enterprise-wechat.md
index 263a44a257..258d0ee9f8 100644
--- a/docs/docs/zh/guide/alert/enterprise-wechat.md
+++ b/docs/docs/zh/guide/alert/enterprise-wechat.md
@@ -2,7 +2,7 @@
 
 如果您需要使用到企业微信进行告警,请在告警实例管理里创建告警实例,选择 WeChat 插件。企业微信的配置样例如下:
 
-![enterprise-wechat-plugin](/img/alert/enterprise-wechat-plugin.png)
+![enterprise-wechat-plugin](../../../../img/alert/enterprise-wechat-plugin.png)
 
 ## 发送类型
 
@@ -13,15 +13,15 @@
 应用指将告警结果通过企业微信的自定义应用进行通知,支持向特定用户发送消息和对所有人发送消息。目前还不支持部门和标签,欢迎提PR贡献代码。
 下图是应用告警配置的示例:
 
-![enterprise-wechat-app-msg-config](/img/alert/wechat-app-form-example.png)
+![enterprise-wechat-app-msg-config](../../../../img/alert/wechat-app-form-example.png)
 
 下图是`应用``MARKDOWN`告警消息的示例:
 
-![enterprise-wechat-app-msg-markdown](/img/alert/enterprise-wechat-app-msg-md.png)
+![enterprise-wechat-app-msg-markdown](../../../../img/alert/enterprise-wechat-app-msg-md.png)
 
 下图是`应用``TEXT`告警消息的示例:
 
-![enterprise-wechat-app-msg-text](/img/alert/enterprise-wechat-app-msg.png)
+![enterprise-wechat-app-msg-text](../../../../img/alert/enterprise-wechat-app-msg.png)
 
 #### 前置
 
@@ -33,7 +33,7 @@
 获取用户的userId请参考[官方文档](https://developer.work.weixin.qq.com/document/path/95402)根据手机号获取userId。
 下图是获取userId接口的示例:
 
-![enterprise-wechat-create-group](/img/alert/enterprise-wechat-query-userid.png)
+![enterprise-wechat-create-group](../../../../img/alert/enterprise-wechat-query-userid.png)
 
 #### 参考文档
 
@@ -44,15 +44,15 @@
 群聊指将告警结果通过企业微信API创建的群聊进行通知,会向该群聊下的所有人发送消息,不支持向特定用户发送消息。
 下图是群聊告警配置的示例:
 
-![enterprise-wechat-group-msg-config](/img/alert/wechat-group-form-example.png)
+![enterprise-wechat-group-msg-config](../../../../img/alert/wechat-group-form-example.png)
 
 下图是`群聊``MARKDOWN`告警消息的示例:
 
-![enterprise-wechat-group-msg-markdown](/img/alert/enterprise-wechat-group-msg-md.png)
+![enterprise-wechat-group-msg-markdown](../../../../img/alert/enterprise-wechat-group-msg-md.png)
 
 下图是`群聊``TEXT`告警消息的示例:
 
-![enterprise-wechat-group-msg-text](/img/alert/enterprise-wechat-group-msg.png)
+![enterprise-wechat-group-msg-text](../../../../img/alert/enterprise-wechat-group-msg.png)
 
 #### 前置
 
@@ -60,9 +60,9 @@
 其中获取用户的userId请参考[官方文档](https://developer.work.weixin.qq.com/document/path/95402)根据手机号获取userId。
 下图是创建新聊天群组和获取userId接口的示例:
 
-![enterprise-wechat-create-group](/img/alert/enterprise-wechat-create-group.png)
+![enterprise-wechat-create-group](../../../../img/alert/enterprise-wechat-create-group.png)
 
-![enterprise-wechat-create-group](/img/alert/enterprise-wechat-query-userid.png)
+![enterprise-wechat-create-group](../../../../img/alert/enterprise-wechat-query-userid.png)
 
 #### 参考文档
 
diff --git a/docs/docs/zh/guide/alert/http.md b/docs/docs/zh/guide/alert/http.md
index dd124df95b..e395bfead7 100644
--- a/docs/docs/zh/guide/alert/http.md
+++ b/docs/docs/zh/guide/alert/http.md
@@ -24,11 +24,11 @@
 GET `Http`告警指将告警结果作为参数通过`Http` GET方法进行请求。
 下图是GET告警配置的示例:
 
-![enterprise-wechat-app-msg-config](/img/alert/http-get-example.png)
+![enterprise-wechat-app-msg-config](../../../../img/alert/http-get-example.png)
 
 ### POST Http告警
 
 POST `Http`告警指将告警结果作为`BODY`参数通过`Http`POST方法进行请求。
 下图是POST告警配置的示例:
 
-![enterprise-wechat-app-msg-config](/img/alert/http-post-example.png)
+![enterprise-wechat-app-msg-config](../../../../img/alert/http-post-example.png)
diff --git a/docs/docs/zh/guide/alert/script.md b/docs/docs/zh/guide/alert/script.md
index 41be752ed4..7763f41854 100644
--- a/docs/docs/zh/guide/alert/script.md
+++ b/docs/docs/zh/guide/alert/script.md
@@ -2,7 +2,7 @@
 
 如果您需要使用到`Shell`脚本进行告警,请在告警实例管理里创建告警实例,选择`Script`插件。`Script`的配置样例如下:
 
-![dingtalk-plugin](/img/alert/script-plugin.png)
+![dingtalk-plugin](../../../../img/alert/script-plugin.png)
 
 参数配置
 
diff --git a/docs/docs/zh/guide/alert/telegram.md b/docs/docs/zh/guide/alert/telegram.md
index c586eb7052..643368c52d 100644
--- a/docs/docs/zh/guide/alert/telegram.md
+++ b/docs/docs/zh/guide/alert/telegram.md
@@ -4,7 +4,7 @@
 
 `Telegram` 的配置样例如下:
 
-![alert-telegram](/img/new_ui/dev/alert/alert_telegram.png)
+![alert-telegram](../../../../img/new_ui/dev/alert/alert_telegram.png)
 
 参数配置:
 * WebHook:
diff --git a/docs/docs/zh/guide/data-quality.md b/docs/docs/zh/guide/data-quality.md
index 89fc4e855f..50e2c92f14 100644
--- a/docs/docs/zh/guide/data-quality.md
+++ b/docs/docs/zh/guide/data-quality.md
@@ -68,7 +68,7 @@ data-quality.jar.name=dolphinscheduler-data-quality-dev-SNAPSHOT.jar
   ```
 
 ### 界面操作指南
-![dataquality_null_check](/img/tasks/demo/null_check.png)
+![dataquality_null_check](../../../img/tasks/demo/null_check.png)
 - 源数据类型:选择MySQL、PostgreSQL等
 - 源数据源:源数据类型下对应的数据源
 - 源数据表:下拉选择验证数据所在表
@@ -90,7 +90,7 @@ data-quality.jar.name=dolphinscheduler-data-quality-dev-SNAPSHOT.jar
 ### 检查介绍
 及时性检查用于检查数据是否在预期时间内处理完成,可指定开始时间、结束时间来界定时间范围,如果在该时间范围内的数据量没有达到设定的阈值,那么会判断该检查任务为失败
 ### 界面操作指南
-![dataquality_timeliness_check](/img/tasks/demo/timeliness_check.png)
+![dataquality_timeliness_check](../../../img/tasks/demo/timeliness_check.png)
 - 源数据类型:选择MySQL、PostgreSQL等
 - 源数据源:源数据类型下对应的数据源
 - 源数据表:下拉选择验证数据所在表
@@ -115,7 +115,7 @@ data-quality.jar.name=dolphinscheduler-data-quality-dev-SNAPSHOT.jar
 ### 检查介绍
 字段长度校验的目标是检查所选字段的长度是否满足预期,如果有存在不满足要求的数据,并且行数超过阈值则会判断任务为失败
 ### 界面操作指南
-![dataquality_length_check](/img/tasks/demo/field_length_check.png)
+![dataquality_length_check](../../../img/tasks/demo/field_length_check.png)
 - 源数据类型:选择MySQL、PostgreSQL等
 - 源数据源:源数据类型下对应的数据源
 - 源数据表:下拉选择验证数据所在表
@@ -139,7 +139,7 @@ data-quality.jar.name=dolphinscheduler-data-quality-dev-SNAPSHOT.jar
 ### 检查介绍
 唯一性校验的目标是检查字段是否存在重复的情况,一般用于检验primary key是否有重复,如果存在重复且达到阈值,则会判断检查任务为失败
 ### 界面操作指南
-![dataquality_uniqueness_check](/img/tasks/demo/uniqueness_check.png)
+![dataquality_uniqueness_check](../../../img/tasks/demo/uniqueness_check.png)
 - 源数据类型:选择MySQL、PostgreSQL等
 - 源数据源:源数据类型下对应的数据源
 - 源数据表:下拉选择验证数据所在表
@@ -161,7 +161,7 @@ data-quality.jar.name=dolphinscheduler-data-quality-dev-SNAPSHOT.jar
 ### 检查介绍
 正则表达式校验的目标是检查某字段的值的格式是否符合要求,例如时间格式、邮箱格式、身份证格式等等,如果存在不符合格式的数据并超过阈值,则会判断任务为失败
 ### 界面操作指南
-![dataquality_regex_check](/img/tasks/demo/regexp_check.png)
+![dataquality_regex_check](../../../img/tasks/demo/regexp_check.png)
 - 源数据类型:选择MySQL、PostgreSQL等
 - 源数据源:源数据类型下对应的数据源
 - 源数据表:下拉选择验证数据所在表
@@ -184,7 +184,7 @@ data-quality.jar.name=dolphinscheduler-data-quality-dev-SNAPSHOT.jar
 ### 检查介绍
 枚举值校验的目标是检查某字段的值是否在枚举值的范围内,如果存在不在枚举值范围里的数据并超过阈值,则会判断任务为失败
 ### 界面操作指南
-![dataquality_enum_check](/img/tasks/demo/enumeration_check.png)
+![dataquality_enum_check](../../../img/tasks/demo/enumeration_check.png)
 - 源数据类型:选择MySQL、PostgreSQL等
 - 源数据源:源数据类型下对应的数据源
 - 源数据表:下拉选择验证数据所在表
@@ -206,7 +206,7 @@ data-quality.jar.name=dolphinscheduler-data-quality-dev-SNAPSHOT.jar
 ### 检查介绍
 表行数校验的目标是检查表的行数是否达到预期的值,如果行数未达标,则会判断任务为失败
 ### 界面操作指南
-![dataquality_count_check](/img/tasks/demo/table_count_check.png)
+![dataquality_count_check](../../../img/tasks/demo/table_count_check.png)
 - 源数据类型:选择MySQL、PostgreSQL等
 - 源数据源:源数据类型下对应的数据源
 - 源数据表:下拉选择验证数据所在表
@@ -227,7 +227,7 @@ data-quality.jar.name=dolphinscheduler-data-quality-dev-SNAPSHOT.jar
 ## 单表检查之自定义SQL检查
 ### 检查介绍
 ### 界面操作指南
-![dataquality_custom_sql_check](/img/tasks/demo/custom_sql_check.png)
+![dataquality_custom_sql_check](../../../img/tasks/demo/custom_sql_check.png)
 - 源数据类型:选择MySQL、PostgreSQL等
 - 源数据源:源数据类型下对应的数据源
 - 源数据表:下拉选择要验证数据所在表
@@ -262,7 +262,7 @@ data-quality.jar.name=dolphinscheduler-data-quality-dev-SNAPSHOT.jar
 | b | 3|
 如果对比c1和c21中的数据,则表test1和test2完全一致。 如果对比c2和c22则表test1和表test2中的数据则存在不一致了。
 ### 界面操作指南
-![dataquality_multi_table_accuracy_check](/img/tasks/demo/multi_table_accuracy_check.png)
+![dataquality_multi_table_accuracy_check](../../../img/tasks/demo/multi_table_accuracy_check.png)
 - 源数据类型:选择MySQL、PostgreSQL等
 - 源数据源:源数据类型下对应的数据源
 - 源数据表:下拉选择要验证数据所在表
@@ -283,7 +283,7 @@ data-quality.jar.name=dolphinscheduler-data-quality-dev-SNAPSHOT.jar
 ### 检查介绍
 两表值比对允许用户对两张表自定义不同的SQL统计出相应的值进行比对,例如针对源表A统计出某一列的金额总值sum1,针对目标表统计出某一列的金额总值sum2,将sum1和sum2进行比较来判定检查结果
 ### 界面操作指南
-![dataquality_multi_table_comparison_check](/img/tasks/demo/multi_table_comparison_check.png)
+![dataquality_multi_table_comparison_check](../../../img/tasks/demo/multi_table_comparison_check.png)
 - 源数据类型:选择MySQL、PostgreSQL等
 - 源数据源:源数据类型下对应的数据源
 - 源数据表:要验证数据所在表
@@ -305,9 +305,9 @@ data-quality.jar.name=dolphinscheduler-data-quality-dev-SNAPSHOT.jar
     - 阻断:数据质量任务失败了,DolphinScheduler任务结果为失败,发送告警
 
 ## 任务结果查看
-![dataquality_result](/img/tasks/demo/result.png)
+![dataquality_result](../../../img/tasks/demo/result.png)
 ## 规则查看
 ### 规则列表
-![dataquality_rule_list](/img/tasks/demo/rule_list.png)
+![dataquality_rule_list](../../../img/tasks/demo/rule_list.png)
 ### 规则详情
-![dataquality_rule_detail](/img/tasks/demo/rule_detail.png)
\ No newline at end of file
+![dataquality_rule_detail](../../../img/tasks/demo/rule_detail.png)
\ No newline at end of file
diff --git a/docs/docs/zh/guide/datasource/hive.md b/docs/docs/zh/guide/datasource/hive.md
index 6598ba81b5..21dcf291e8 100644
--- a/docs/docs/zh/guide/datasource/hive.md
+++ b/docs/docs/zh/guide/datasource/hive.md
@@ -2,7 +2,7 @@
 
 ## 使用HiveServer2
 
-![hive](/img/new_ui/dev/datasource/hive.png)
+![hive](../../../../img/new_ui/dev/datasource/hive.png)
 
 - 数据源:选择 HIVE
 - 数据源名称:输入数据源的名称
@@ -19,7 +19,7 @@
 
 ## 使用 HiveServer2 HA Zookeeper
 
-![hive-server2](/img/new_ui/dev/datasource/hiveserver2.png)
+![hive-server2](../../../../img/new_ui/dev/datasource/hiveserver2.png)
 
 注意:如果没有开启 kerberos,请保证参数 `hadoop.security.authentication.startup.state` 值为 `false`,
 参数 `java.security.krb5.conf.path` 值为空. 开启了 **kerberos**,则需要在 `common.properties` 配置以下参数
diff --git a/docs/docs/zh/guide/datasource/mysql.md b/docs/docs/zh/guide/datasource/mysql.md
index 3727dac78e..2ad2aa081f 100644
--- a/docs/docs/zh/guide/datasource/mysql.md
+++ b/docs/docs/zh/guide/datasource/mysql.md
@@ -1,6 +1,6 @@
 # MySQL 数据源
 
-![mysql](/img/new_ui/dev/datasource/mysql.png)
+![mysql](../../../../img/new_ui/dev/datasource/mysql.png)
 
 - 数据源:选择 MYSQL
 - 数据源名称:输入数据源的名称
diff --git a/docs/docs/zh/guide/datasource/postgresql.md b/docs/docs/zh/guide/datasource/postgresql.md
index a0a8143641..ede929e993 100644
--- a/docs/docs/zh/guide/datasource/postgresql.md
+++ b/docs/docs/zh/guide/datasource/postgresql.md
@@ -1,6 +1,6 @@
 # POSTGRESQL 数据源
 
-![postgresql](/img/new_ui/dev/datasource/postgresql.png)
+![postgresql](../../../../img/new_ui/dev/datasource/postgresql.png)
 
 - 数据源:选择 POSTGRESQL
 - 数据源名称:输入数据源的名称
diff --git a/docs/docs/zh/guide/datasource/spark.md b/docs/docs/zh/guide/datasource/spark.md
index 946ff01101..e673cfe9a2 100644
--- a/docs/docs/zh/guide/datasource/spark.md
+++ b/docs/docs/zh/guide/datasource/spark.md
@@ -1,6 +1,6 @@
 # Spark数据源
 
-![sparksql](/img/new_ui/dev/datasource/sparksql.png)
+![sparksql](../../../../img/new_ui/dev/datasource/sparksql.png)
 
 - 数据源:选择 Spark
 - 数据源名称:输入数据源的名称
@@ -15,7 +15,7 @@
 注意:如果开启了**kerberos**,则需要填写 **Principal**
 
 <p align="center">
-    <img src="/img/sparksql_kerberos.png" width="80%" />
+    <img src="../../../../img/sparksql_kerberos.png" width="80%" />
   </p>
 
 ## 是否原生支持
diff --git a/docs/docs/zh/guide/homepage.md b/docs/docs/zh/guide/homepage.md
index 9d22fd2d2e..f03f23d7e0 100644
--- a/docs/docs/zh/guide/homepage.md
+++ b/docs/docs/zh/guide/homepage.md
@@ -2,4 +2,4 @@
 
 首页包含用户所有项目的任务状态统计、流程状态统计、工作流定义统计。
 
-![homepage](/img/new_ui/dev/homepage/homepage.png)
+![homepage](../../../img/new_ui/dev/homepage/homepage.png)
diff --git a/docs/docs/zh/guide/monitor.md b/docs/docs/zh/guide/monitor.md
index f5e8709f8e..63a989df52 100644
--- a/docs/docs/zh/guide/monitor.md
+++ b/docs/docs/zh/guide/monitor.md
@@ -8,25 +8,25 @@
 
 - 主要是 master 的相关信息。
 
-![master](/img/new_ui/dev/monitor/master.png)
+![master](../../../img/new_ui/dev/monitor/master.png)
 
 ### Worker
 
 - 主要是 worker 的相关信息。
 
-![worker](/img/new_ui/dev/monitor/worker.png)
+![worker](../../../img/new_ui/dev/monitor/worker.png)
 
 ### Database
 
 - 主要是 DB 的健康状况
 
-![db](/img/new_ui/dev/monitor/db.png)
+![db](../../../img/new_ui/dev/monitor/db.png)
  
 ## 统计管理
 
 ### Statistics
 
-![statistics](/img/new_ui/dev/monitor/statistics.png)
+![statistics](../../../img/new_ui/dev/monitor/statistics.png)
  
 - 待执行命令数:统计 t_ds_command 表的数据
 - 执行失败的命令数:统计 t_ds_error_command 表的数据
@@ -37,4 +37,4 @@
 
 审计日志的记录提供了有关谁访问了系统,以及他或她在给定时间段内执行了哪些操作的信息,他对于维护安全都很有用。
 
-![audit-log](/img/new_ui/dev/monitor/audit-log.jpg)
+![audit-log](../../../img/new_ui/dev/monitor/audit-log.jpg)
diff --git a/docs/docs/zh/guide/open-api.md b/docs/docs/zh/guide/open-api.md
index cd01d22868..66217c29bc 100644
--- a/docs/docs/zh/guide/open-api.md
+++ b/docs/docs/zh/guide/open-api.md
@@ -10,11 +10,11 @@
 
 1. 登录调度系统,点击 "安全中心",再点击左侧的 "令牌管理",点击 "令牌管理" 创建令牌。
 
-![create-token](/img/new_ui/dev/security/create-token.png)
+![create-token](../../../img/new_ui/dev/security/create-token.png)
  
 2. 选择 "失效时间" (Token 有效期),选择 "用户" (以指定的用户执行接口操作),点击 "生成令牌" ,拷贝 Token 字符串,然后点击 "提交" 。
 
-![token-expiration](/img/new_ui/dev/open-api/token_expiration.png)
+![token-expiration](../../../img/new_ui/dev/open-api/token_expiration.png)
 
 ### 使用案例
 
@@ -24,7 +24,7 @@
 
 > 地址:http://{api server ip}:12345/dolphinscheduler/doc.html?language=zh_CN&lang=cn
 
-![api-doc](/img/new_ui/dev/open-api/api_doc.png)
+![api-doc](../../../img/new_ui/dev/open-api/api_doc.png)
     
 2. 选一个测试的接口,本次测试选取的接口是:查询所有项目
 
@@ -36,7 +36,7 @@
     token: 刚刚生成的 Token
     ```
    
-![api-test](/img/new_ui/dev/open-api/api_test.png)
+![api-test](../../../img/new_ui/dev/open-api/api_test.png)
  
 #### 创建项目
 
@@ -44,15 +44,15 @@
 
 通过查阅 api 文档,在 Postman 的 Headers 中配置 KEY 为 Accept,VALUE 为 application/json 的参数。
 
-![create-project01](/img/new_ui/dev/open-api/create_project01.png)
+![create-project01](../../../img/new_ui/dev/open-api/create_project01.png)
 
 然后再 Body 中配置所需的 projectName 和 description 参数。
 
-![create-project02](/img/new_ui/dev/open-api/create_project02.png)
+![create-project02](../../../img/new_ui/dev/open-api/create_project02.png)
 
 检查 post 请求结果。
 
-![create-project03](/img/new_ui/dev/open-api/create_project03.png)
+![create-project03](../../../img/new_ui/dev/open-api/create_project03.png)
 
 返回 msg 信息为 "success",说明我们已经成功通过 API 的方式创建了项目。
 
@@ -60,8 +60,8 @@
 
 ### 附:创建项目源码
 
-![api-source01](/img/new_ui/dev/open-api/api_source01.png)
+![api-source01](../../../img/new_ui/dev/open-api/api_source01.png)
 
-![api-source02](/img/new_ui/dev/open-api/api_source02.png)
+![api-source02](../../../img/new_ui/dev/open-api/api_source02.png)
 
 
diff --git a/docs/docs/zh/guide/parameter/context.md b/docs/docs/zh/guide/parameter/context.md
index 2e55ac7414..bbbcd5201c 100644
--- a/docs/docs/zh/guide/parameter/context.md
+++ b/docs/docs/zh/guide/parameter/context.md
@@ -28,7 +28,7 @@ DolphinScheduler 允许在任务间进行参数传递,目前传递方向仅支
 
 创建 Node_A 任务,在自定义参数中添加 output 和 value 参数,并编写如下脚本:
 
-![context-parameter01](/img/new_ui/dev/parameter/context_parameter01.png)
+![context-parameter01](../../../../img/new_ui/dev/parameter/context_parameter01.png)
 
 参数说明:
 
@@ -39,13 +39,13 @@ SHELL 节点定义时当日志检测到 ${setValue(output=1)} 的格式时,会
 
 创建 Node_B 任务,主要用于测试输出上游任务 Node_A 传递的参数。
 
-![context-parameter02](/img/new_ui/dev/parameter/context_parameter02.png)
+![context-parameter02](../../../../img/new_ui/dev/parameter/context_parameter02.png)
 
 #### 创建 SQL 任务,使用参数
 
 完成上述的 SHELL 任务之后,我们可以使用上游所传递的 output 作为 SQL 的查询对象。其中将所查询的 id 重命名为 ID,作为参数输出。
 
-![context-parameter03](/img/new_ui/dev/parameter/context_parameter03.png)
+![context-parameter03](../../../../img/new_ui/dev/parameter/context_parameter03.png)
 
 > 注:如果 SQL 节点的结果只有一行,一个或多个字段,参数的名字需要和字段名称一致。数据类型可选择为除 LIST 以外的其他类型。变量会选择 SQL 查询结果中的列名中与该变量名称相同的列对应的值。
 >
@@ -55,7 +55,7 @@ SHELL 节点定义时当日志检测到 ${setValue(output=1)} 的格式时,会
 
 点击保存工作流图标,并设置全局参数 output 和 value。
 
-![context-parameter03](/img/new_ui/dev/parameter/context_parameter04.png)
+![context-parameter03](../../../../img/new_ui/dev/parameter/context_parameter04.png)
 
 #### 查看运行结果
 
@@ -63,15 +63,15 @@ SHELL 节点定义时当日志检测到 ${setValue(output=1)} 的格式时,会
 
 Node_A 运行结果如下:
 
-![context-log01](/img/new_ui/dev/parameter/context_log01.png)
+![context-log01](../../../../img/new_ui/dev/parameter/context_log01.png)
 
 Node_B 运行结果如下:
 
-![context-log02](/img/new_ui/dev/parameter/context_log02.png)
+![context-log02](../../../../img/new_ui/dev/parameter/context_log02.png)
 
 Node_mysql 运行结果如下:
 
-![context-log03](/img/new_ui/dev/parameter/context_log03.png)
+![context-log03](../../../../img/new_ui/dev/parameter/context_log03.png)
 
 虽然在 Node_A 的脚本中为 output 赋值为 1,但日志中显示的值仍然为 100。但根据[参数优先级](priority.md)的原则:`本地参数 > 上游任务传递的参数 > 全局参数`,在 Node_B 中输出的值为 1。则证明 output 参数参照预期的值在该工作流中传递,并在 Node_mysql 中使用该值完成查询操作。
 
diff --git a/docs/docs/zh/guide/parameter/global.md b/docs/docs/zh/guide/parameter/global.md
index 3d91c165ff..aa35413305 100644
--- a/docs/docs/zh/guide/parameter/global.md
+++ b/docs/docs/zh/guide/parameter/global.md
@@ -12,13 +12,13 @@
 
 创建一个 Shell 任务,并在脚本内容中输入 `echo ${dt}`。此时 dt 则为我们需要声明的全局参数。如下图所示:
 
-![global-parameter01](/img/new_ui/dev/parameter/global_parameter01.png)
+![global-parameter01](../../../../img/new_ui/dev/parameter/global_parameter01.png)
 
 ### 保存工作流,并设置全局参数
 
 全局参数配置方式如下:在工作流定义页面,点击“设置全局”右边的加号,填写对应的变量名称和对应的值,保存即可。如下图所示:
 
-![global-parameter02](/img/new_ui/dev/parameter/global_parameter02.png)
+![global-parameter02](../../../../img/new_ui/dev/parameter/global_parameter02.png)
 
 > 注:这里定义的 dt 参数可以被其它任一节点的局部参数引用。
 
@@ -26,5 +26,5 @@
 
 进入任务实例页面,可以通过查看日志,验证任务的执行结果,判断参数是否有效。
 
-![global-parameter03](/img/new_ui/dev/parameter/global_parameter03.png)
+![global-parameter03](../../../../img/new_ui/dev/parameter/global_parameter03.png)
 
diff --git a/docs/docs/zh/guide/parameter/local.md b/docs/docs/zh/guide/parameter/local.md
index 6cddf35fa1..700f230a30 100644
--- a/docs/docs/zh/guide/parameter/local.md
+++ b/docs/docs/zh/guide/parameter/local.md
@@ -20,7 +20,7 @@
 
 本样例展示了如何使用本地参数,打印输出当前日期。创建一个 Shell 任务,并编写脚本内容为 `echo ${dt}`。点击配置栏中的**自定义参数**,配置如下图所示:
 
-![local-parameter01](/img/new_ui/dev/parameter/local_parameter01.png)
+![local-parameter01](../../../../img/new_ui/dev/parameter/local_parameter01.png)
 
 参数说明:
 
@@ -31,7 +31,7 @@
 
 保存工作流并运行,查看 Shell 任务输出日志。
 
-![local-parameter02](/img/new_ui/dev/parameter/local_parameter02.png)
+![local-parameter02](../../../../img/new_ui/dev/parameter/local_parameter02.png)
 
 > 注:本地参数可以在当前任务节点的工作流中,设置其为 OUT 则可以传递给下游的工作流使用,可以参考:[参数传递](context.md)
 
@@ -40,7 +40,7 @@
 如果你想简单 export 参数然后在下游任务中使用它们,你可以在你的任务中使用 `setValue`,你可以将参数统一在一个任务中管理。在 Shell 任务中使用语法
 `echo '${setValue(set_val=123)}'`(**不要忘记单引号**) 并添加新的 `OUT` 自定义参数来 export 它。
 
-![local-parameter-set-val](/img/new_ui/dev/parameter/local_param_set_val.png)
+![local-parameter-set-val](../../../../img/new_ui/dev/parameter/local_param_set_val.png)
 
 你可以在下游任务中使用语法 `echo '${set_val}'` 在获取设定的值。
 
@@ -50,7 +50,7 @@
 “自定义参数”模块中的值,这让程序更加容易维护。您可以在 Shell 任务中使用语法 `echo "#{setValue(set_val_param=${val})}"`(**如果你想要将任何
 变量赋值给 `setValue`** 请不要忘记使用双引号)并添加新的 `IN` 自定义参数用于输入变量 `val` 和 `OUT` 自定义参数用于 export 参数 `set_val_param`。
 
-![local-parameter-set-val-param](/img/new_ui/dev/parameter/local_param_set_val_custom.png)
+![local-parameter-set-val-param](../../../../img/new_ui/dev/parameter/local_param_set_val_custom.png)
 
 你可以在下游任务中使用语法 `echo '${set_val_param}'` 在获取设定的值。
 
@@ -69,6 +69,6 @@ echo "#{setValue(set_val_var=${lines_num})}"
 在 Shell 任务中(**如果你想要将任何变量赋值给 `setValue`** 请不要忘记使用双引号)和 `OUT` 自定义参数用于 export 参数 `set_val_var`。
 .
 
-![local-parameter-set-val-bash](/img/new_ui/dev/parameter/local_param_set_val_bash.png)
+![local-parameter-set-val-bash](../../../../img/new_ui/dev/parameter/local_param_set_val_bash.png)
 
 你可以在下游任务中使用语法 `echo '${set_val_var}'` 在获取设定的值。
diff --git a/docs/docs/zh/guide/parameter/priority.md b/docs/docs/zh/guide/parameter/priority.md
index 8bf7506c08..a9d779b2a5 100644
--- a/docs/docs/zh/guide/parameter/priority.md
+++ b/docs/docs/zh/guide/parameter/priority.md
@@ -19,21 +19,21 @@ DolphinScheduler 中所涉及的参数值的定义可能来自三种类型:
 
 1:先以 shell 节点解释第一种情况
 
-![priority-parameter01](/img/new_ui/dev/parameter/priority_parameter01.png)
+![priority-parameter01](../../../../img/new_ui/dev/parameter/priority_parameter01.png)
 
 节点 【useParam】可以使用到节点【createParam】中设置的变量。而节点 【useParam】与节点【noUseParam】中并没有依赖关系,所以并不会获取到节点【noUseParam】的变量。上图中只是以 shell 节点作为例子,其他类型节点具有相同的使用规则。
 
-![priority-parameter02](/img/new_ui/dev/parameter/priority_parameter02.png)
+![priority-parameter02](../../../../img/new_ui/dev/parameter/priority_parameter02.png)
 
 其中节点【createParam】在使用变量时直接使用即可。另外该节点设置了 "key" 和 "key1" 两个变量,这里用户用定义了一个与上游节点传递的变量名相同的变量 key1,并且赋值为 "12",但是由于我们设置的优先级的关系,这里的值 "12" 会被使用,最终上游节点设置的变量值会被抛弃。
 
 2:我们再以 sql 节点来解释另外一种情况
 
-![priority-parameter03](/img/new_ui/dev/parameter/priority_parameter03.png)
+![priority-parameter03](../../../../img/new_ui/dev/parameter/priority_parameter03.png)
 
 节点【use_create】的定义如下:
 
-![priority-parameter04](/img/new_ui/dev/parameter/priority_parameter04.png)
+![priority-parameter04](../../../../img/new_ui/dev/parameter/priority_parameter04.png)
 
 "status" 是当前节点设置的节点的自有变量。但是用户在保存工作流时也同样设置了 "status" 变量(全局参数),并且赋值为 -1。那在该 SQL 执行时,status 的值为优先级更高的 2。抛弃了全局变量中的值。
 
diff --git a/docs/docs/zh/guide/project/project-list.md b/docs/docs/zh/guide/project/project-list.md
index e5f8e94468..9a8646b025 100644
--- a/docs/docs/zh/guide/project/project-list.md
+++ b/docs/docs/zh/guide/project/project-list.md
@@ -4,7 +4,7 @@
 
 点击"项目管理"进入项目管理页面,点击“创建项目”按钮,输入项目名称,项目描述,点击“提交”,创建新的项目。
 
-![project](/img/new_ui/dev/project/project-list.png)
+![project](../../../../img/new_ui/dev/project/project-list.png)
 
 ## 项目首页
 
@@ -14,4 +14,4 @@
 - **流程状态统计**:在指定时间范围内,统计工作流实例中状态为提交成功、正在运行、准备暂停、暂停、准备停止、停止、失败、成功、需要容错、kill、等待线程的个数
 - **工作流定义统计**:统计用户创建的工作流定义及管理员授予该用户的工作流定义
 
-![project-overview](/img/new_ui/dev/project/project-overview.png)
+![project-overview](../../../../img/new_ui/dev/project/project-overview.png)
diff --git a/docs/docs/zh/guide/project/task-definition.md b/docs/docs/zh/guide/project/task-definition.md
index 8079484378..12b775ec49 100644
--- a/docs/docs/zh/guide/project/task-definition.md
+++ b/docs/docs/zh/guide/project/task-definition.md
@@ -3,7 +3,7 @@
 任务定义允许您在基于任务级别而不是在工作流中操作修改任务。再此之前,我们已经有了工作流级别的任务编辑器,你可以在[工作流定义](workflow-definition.md)
 单击特定的工作流,然后编辑任务的定义。当您想编辑特定的任务定义但不记得它属于哪个工作流时,这是令人沮丧的。所以我们决定在 `任务` 菜单下添加 `任务定义` 视图。
 
-![task-definition](/img/new_ui/dev/project/task-definition.jpg)
+![task-definition](../../../../img/new_ui/dev/project/task-definition.jpg)
 
 在该视图中,您可以通过单击 `操作` 列中的相关按钮来进行创建、查询、更新、删除任务定义。最令人兴奋的是您可以通过通配符进行全部任务查询,当您只
 记得任务名称但忘记它属于哪个工作流时是非常有用的。也支持通过任务名称结合使用 `任务类型` 或 `工作流程名称` 进行查询。
diff --git a/docs/docs/zh/guide/project/task-instance.md b/docs/docs/zh/guide/project/task-instance.md
index b814dce567..205086d02a 100644
--- a/docs/docs/zh/guide/project/task-instance.md
+++ b/docs/docs/zh/guide/project/task-instance.md
@@ -2,8 +2,8 @@
 
 - 点击项目管理->工作流->任务实例,进入任务实例页面,如下图所示,点击工作流实例名称,可跳转到工作流实例DAG图查看任务状态。
 
-![task-instance](/img/new_ui/dev/project/task-instance.png)
+![task-instance](../../../../img/new_ui/dev/project/task-instance.png)
 
 - 查看日志:点击操作列中的“查看日志”按钮,可以查看任务执行的日志情况。
 
-![task-log](/img/new_ui/dev/project/task-log.png)
+![task-log](../../../../img/new_ui/dev/project/task-log.png)
diff --git a/docs/docs/zh/guide/project/workflow-definition.md b/docs/docs/zh/guide/project/workflow-definition.md
index 7832e94d88..930986b27d 100644
--- a/docs/docs/zh/guide/project/workflow-definition.md
+++ b/docs/docs/zh/guide/project/workflow-definition.md
@@ -4,11 +4,11 @@
 
 - 点击项目管理->工作流->工作流定义,进入工作流定义页面,点击“创建工作流”按钮,进入**工作流DAG编辑**页面,如下图所示:
 
-  ![workflow-dag](/img/new_ui/dev/project/workflow-dag.png)
+  ![workflow-dag](../../../../img/new_ui/dev/project/workflow-dag.png)
 
-- 工具栏中拖拽 <img src="/img/tasks/icons/shell.png" width="15"/> 到画板中,新增一个Shell任务,如下图所示:
+- 工具栏中拖拽 <img src="../../../../img/tasks/icons/shell.png" width="15"/> 到画板中,新增一个Shell任务,如下图所示:
   
-  ![demo-shell-simple](/img/tasks/demo/shell.jpg)
+  ![demo-shell-simple](../../../../img/tasks/demo/shell.jpg)
   
 - **添加 Shell 任务的参数设置:**
 
@@ -22,15 +22,15 @@
 
 - **配置任务之间的依赖关系:** 点击任务节点的右侧加号连接任务;如下图所示,任务 Node_B 和任务 Node_C 并行执行,当任务 Node_A 执行完,任务 Node_B、Node_C 会同时执行。
 
-  ![workflow-dependent](/img/new_ui/dev/project/workflow-dependent.png)
+  ![workflow-dependent](../../../../img/new_ui/dev/project/workflow-dependent.png)
 
-- **删除依赖关系:** 点击右上角"箭头"图标<img src="/img/arrow.png" width="35"/>,选中连接线,点击右上角"删除"图标<img src="/img/delete.png" width="35"/>,删除任务间的依赖关系。
+- **删除依赖关系:** 点击右上角"箭头"图标<img src="../../../../img/arrow.png" width="35"/>,选中连接线,点击右上角"删除"图标<img src="../../../../img/delete.png" width="35"/>,删除任务间的依赖关系。
 
-  ![workflow-delete](/img/new_ui/dev/project/workflow-delete.png)
+  ![workflow-delete](../../../../img/new_ui/dev/project/workflow-delete.png)
 
 - **保存工作流定义:** 点击”保存“按钮,弹出"设置DAG图名称"弹框,如下图所示,输入工作流定义名称,工作流定义描述,设置全局参数(选填,参考[全局参数](../parameter/global.md)),点击"添加"按钮,工作流定义创建成功。
 
-  ![workflow-save](/img/new_ui/dev/project/workflow-save.png)
+  ![workflow-save](../../../../img/new_ui/dev/project/workflow-save.png)
 
   > 其他类型任务,请参考 [任务节点类型和参数设置](#TaskParamers)。 <!-- markdown-link-check-disable-line -->
 
@@ -38,7 +38,7 @@
 
 点击项目管理->工作流->工作流定义,进入工作流定义页面,如下图所示:
 
-![workflow-list](/img/new_ui/dev/project/workflow-list.png)
+![workflow-list](../../../../img/new_ui/dev/project/workflow-list.png)
 
 工作流定义列表的操作功能如下:
 
@@ -52,18 +52,18 @@
 - **下载:** 下载工作流定义到本地。
 - **树形图:** 以树形结构展示任务节点的类型及任务状态,如下图所示:
 
-![workflow-tree](/img/new_ui/dev/project/workflow-tree.png)
+![workflow-tree](../../../../img/new_ui/dev/project/workflow-tree.png)
 
 ## 运行工作流
 
-- 点击项目管理->工作流->工作流定义,进入工作流定义页面,如下图所示,点击"上线"按钮<img src="/img/online.png" width="35"/>,上线工作流。
+- 点击项目管理->工作流->工作流定义,进入工作流定义页面,如下图所示,点击"上线"按钮<img src="../../../../img/online.png" width="35"/>,上线工作流。
 
-![workflow-online](/img/new_ui/dev/project/workflow-online.png)
+![workflow-online](../../../../img/new_ui/dev/project/workflow-online.png)
 
 
 - 点击”运行“按钮,弹出启动参数设置弹框,如下图所示,设置启动参数,点击弹框中的"运行"按钮,工作流开始运行,工作流实例页面生成一条工作流实例。
 
-![workflow-run](/img/new_ui/dev/project/workflow-run.png)
+![workflow-run](../../../../img/new_ui/dev/project/workflow-run.png)
   
   工作流运行参数说明: 
        
@@ -78,7 +78,7 @@
   * 补数:包括串行补数、并行补数 2 种模式。串行补数:指定时间范围内,从开始日期至结束日期依次执行补数,依次生成N条流程实例;并行补数:指定时间范围内,多天同时进行补数,同时生成 N 条流程实例。 
     * 补数: 执行指定日期的工作流定义,可以选择补数时间范围(当定时配置未上线时默认会根据所选时间范围进行每天一次的补数,如果定时配置已上线则会根据所选的时间范围结合定时配置进行补数),比如需要补 5 月 9 号到 5 月 10 号的数据,如下图所示: 
 
-    ![workflow-date](/img/new_ui/dev/project/workflow-date.png)
+    ![workflow-date](../../../../img/new_ui/dev/project/workflow-date.png)
 
     > 串行模式:补数从 5 月 9 号到 5 月 10 号依次执行,依次在流程实例页面生成十条流程实例;
 
@@ -86,20 +86,20 @@
 
 ## 工作流定时
 
-- 创建定时:点击项目管理->工作流->工作流定义,进入工作流定义页面,上线工作流,点击"定时"按钮<img src="/img/timing.png" width="35"/>,弹出定时参数设置弹框,如下图所示:
+- 创建定时:点击项目管理->工作流->工作流定义,进入工作流定义页面,上线工作流,点击"定时"按钮<img src="../../../../img/timing.png" width="35"/>,弹出定时参数设置弹框,如下图所示:
 
-  ![workflow-time01](/img/new_ui/dev/project/workflow-time01.png)
+  ![workflow-time01](../../../../img/new_ui/dev/project/workflow-time01.png)
 
 - 选择起止时间。在起止时间范围内,定时运行工作流;不在起止时间范围内,不再产生定时工作流实例。
 - 添加一个每隔 5 分钟执行一次的定时,如下图所示:
 
-  ![workflow-time02](/img/new_ui/dev/project/workflow-time02.png)
+  ![workflow-time02](../../../../img/new_ui/dev/project/workflow-time02.png)
 
 - 失败策略、通知策略、流程优先级、Worker 分组、通知组、收件人、抄送人同工作流运行参数。
 - 点击"创建"按钮,创建定时成功,此时定时状态为"**下线**",定时需**上线**才生效。
-- 定时上线:点击"定时管理"按钮<img src="/img/timeManagement.png" width="35"/>,进入定时管理页面,点击"上线"按钮,定时状态变为"上线",如下图所示,工作流定时生效。
+- 定时上线:点击"定时管理"按钮<img src="../../../../img/timeManagement.png" width="35"/>,进入定时管理页面,点击"上线"按钮,定时状态变为"上线",如下图所示,工作流定时生效。
 
-  ![workflow-time03](/img/new_ui/dev/project/workflow-time03.png)
+  ![workflow-time03](../../../../img/new_ui/dev/project/workflow-time03.png)
 
 ## 导入工作流
 
diff --git a/docs/docs/zh/guide/project/workflow-instance.md b/docs/docs/zh/guide/project/workflow-instance.md
index 1ca44c862d..df348ed0a0 100644
--- a/docs/docs/zh/guide/project/workflow-instance.md
+++ b/docs/docs/zh/guide/project/workflow-instance.md
@@ -4,45 +4,45 @@
 
 - 点击项目管理->工作流->工作流实例,进入工作流实例页面,如下图所示:
 
-![workflow-instance](/img/new_ui/dev/project/workflow-instance.png)
+![workflow-instance](../../../../img/new_ui/dev/project/workflow-instance.png)
           
 - 点击工作流名称,进入DAG查看页面,查看任务执行状态,如下图所示。
 
-![instance-state](/img/new_ui/dev/project/instance-state.png)
+![instance-state](../../../../img/new_ui/dev/project/instance-state.png)
 
 ## 查看任务日志
 
 - 进入工作流实例页面,点击工作流名称,进入DAG查看页面,双击任务节点,如下图所示:
 
-![instance-log01](/img/new_ui/dev/project/instance-log01.png)
+![instance-log01](../../../../img/new_ui/dev/project/instance-log01.png)
 
 - 点击"查看日志",弹出日志弹框,如下图所示,任务实例页面也可查看任务日志,参考[任务查看日志](./task-instance.md)。
 
-![instance-log02](/img/new_ui/dev/project/instance-log02.png)
+![instance-log02](../../../../img/new_ui/dev/project/instance-log02.png)
 
 ## 查看任务历史记录
 
 - 点击项目管理->工作流->工作流实例,进入工作流实例页面,点击工作流名称,进入工作流 DAG 页面;
 - 双击任务节点,如下图所示,点击"查看历史",跳转到任务实例页面,并展示该工作流实例运行的任务实例列表
 
-![instance-history](/img/new_ui/dev/project/instance-history.png)
+![instance-history](../../../../img/new_ui/dev/project/instance-history.png)
 
 ## 查看运行参数
 
 - 点击项目管理->工作流->工作流实例,进入工作流实例页面,点击工作流名称,进入工作流 DAG 页面; 
-- 点击左上角图标<img src="/img/run_params_button.png" width="35"/>,查看工作流实例的启动参数;点击图标<img src="/img/global_param.png" width="35"/>,查看工作流实例的全局参数和局部参数,如下图所示:
+- 点击左上角图标<img src="../../../../img/run_params_button.png" width="35"/>,查看工作流实例的启动参数;点击图标<img src="../../../../img/global_param.png" width="35"/>,查看工作流实例的全局参数和局部参数,如下图所示:
 
-![instance-parameter](/img/new_ui/dev/project/instance-parameter.png)
+![instance-parameter](../../../../img/new_ui/dev/project/instance-parameter.png)
 
 ## 工作流实例操作功能
 
 点击项目管理->工作流->工作流实例,进入工作流实例页面,如下图所示:          
 
-![workflow-instance](/img/new_ui/dev/project/workflow-instance.png)
+![workflow-instance](../../../../img/new_ui/dev/project/workflow-instance.png)
 
 - **编辑:** 只能编辑 成功/失败/停止 状态的流程。点击"编辑"按钮或工作流实例名称进入 DAG 编辑页面,编辑后点击"保存"按钮,弹出保存 DAG 弹框,如下图所示,修改流程定义信息,在弹框中勾选"是否更新工作流定义",保存后则将实例修改的信息更新到工作流定义;若不勾选,则不更新工作流定义。
        <p align="center">
-         <img src="/img/editDag.png" width="80%" />
+         <img src="../../../../img/editDag.png" width="80%" />
        </p>
 - **重跑:** 重新执行已经终止的流程。
 - **恢复失败:** 针对失败的流程,可以执行恢复失败操作,从失败的节点开始执行。
@@ -52,4 +52,4 @@
 - **删除:** 删除工作流实例及工作流实例下的任务实例
 - **甘特图:** Gantt 图纵轴是某个工作流实例下的任务实例的拓扑排序,横轴是任务实例的运行时间,如图示:         
 
-![instance-gantt](/img/new_ui/dev/project/instance-gantt.png)
+![instance-gantt](../../../../img/new_ui/dev/project/instance-gantt.png)
diff --git a/docs/docs/zh/guide/resource/file-manage.md b/docs/docs/zh/guide/resource/file-manage.md
index 76ab354a70..35eb002009 100644
--- a/docs/docs/zh/guide/resource/file-manage.md
+++ b/docs/docs/zh/guide/resource/file-manage.md
@@ -8,25 +8,25 @@
 
 ## 基础操作
 
-![file-manage](/img/new_ui/dev/resource/file-manage.png)
+![file-manage](../../../../img/new_ui/dev/resource/file-manage.png)
 
 ### 创建文件
 
 文件格式支持以下几种类型:txt、log、sh、conf、cfg、py、java、sql、xml、hql、properties
 
-![create-file](/img/new_ui/dev/resource/create-file.png)
+![create-file](../../../../img/new_ui/dev/resource/create-file.png)
 
 ### 上传文件
 
 上传文件:点击"上传文件"按钮进行上传,将文件拖拽到上传区域,文件名会自动以上传的文件名称补全
 
-![upload-file](/img/new_ui/dev/resource/upload-file.png)
+![upload-file](../../../../img/new_ui/dev/resource/upload-file.png)
 
 ### 文件查看
 
 对可查看的文件类型,点击文件名称,可查看文件详情
 
-![file_detail](/img/tasks/demo/file_detail.png)
+![file_detail](../../../../img/tasks/demo/file_detail.png)
 
 ### 下载文件
 
@@ -34,7 +34,7 @@
 
 ### 文件重命名
 
-![rename-file](/img/new_ui/dev/resource/rename-file.png)
+![rename-file](../../../../img/new_ui/dev/resource/rename-file.png)
 
 ### 删除文件
 
@@ -44,7 +44,7 @@
 
 点击文件列表中的”重新上传文件“按钮进行重新上传文件,将文件拖拽到上传区域,文件名会自动以上传的文件名称补全
 
-![reuplod_file](/img/reupload_file_en.png)
+![reuplod_file](../../../../img/reupload_file_en.png)
 
 > 注意:上传、创建、重命名文件时,文件名和源文件名(上传时)均不能带有 `.` 以及 `/` 特殊符号。
 
@@ -56,7 +56,7 @@
 
 创建一个 shell 文件,输出 “hello world”。
 
-![create-shell](/img/new_ui/dev/resource/demo/file-demo01.png)
+![create-shell](../../../../img/new_ui/dev/resource/demo/file-demo01.png)
 
 ### 创建工作流执行文件
 
@@ -65,13 +65,13 @@
 - 脚本:`sh hello.sh`
 - 资源:选择 `hello.sh`
 
-![use-shell](/img/new_ui/dev/resource/demo/file-demo02.png)
+![use-shell](../../../../img/new_ui/dev/resource/demo/file-demo02.png)
 
 ### 查看结果
 
 可以在工作流实例中,查看该节点运行的日志结果。如下图:
 
-![log-shell](/img/new_ui/dev/resource/demo/file-demo03.png)
+![log-shell](../../../../img/new_ui/dev/resource/demo/file-demo03.png)
 
 
 
diff --git a/docs/docs/zh/guide/resource/task-group.md b/docs/docs/zh/guide/resource/task-group.md
index 535024a848..fc77f41373 100644
--- a/docs/docs/zh/guide/resource/task-group.md
+++ b/docs/docs/zh/guide/resource/task-group.md
@@ -6,11 +6,11 @@
 
 #### 新建任务组   
 
-![taskGroup](/img/new_ui/dev/resource/taskGroup.png)
+![taskGroup](../../../../img/new_ui/dev/resource/taskGroup.png)
 
 用户点击【资源中心】-【任务组管理】-【任务组配置】-新建任务组
 
-![create-taskGroup](/img/new_ui/dev/resource/create-taskGroup.png) 
+![create-taskGroup](../../../../img/new_ui/dev/resource/create-taskGroup.png) 
 
 您需要输入图片中信息,其中
 
@@ -22,11 +22,11 @@
 
 #### 查看任务组队列
 
-![view-queue](/img/new_ui/dev/resource/view-queue.png) 
+![view-queue](../../../../img/new_ui/dev/resource/view-queue.png) 
 
 点击按钮查看任务组使用信息
 
-![view-queue](/img/new_ui/dev/resource/view-groupQueue.png) 
+![view-queue](../../../../img/new_ui/dev/resource/view-groupQueue.png) 
 
 #### 任务组的使用
 
@@ -34,7 +34,7 @@
 
 我们以 shell 节点为例:
 
-![use-queue](/img/new_ui/dev/resource/use-queue.png)         
+![use-queue](../../../../img/new_ui/dev/resource/use-queue.png)         
 
 关于任务组的配置,您需要做的只需要配置红色框内的部分,其中:
 
@@ -54,4 +54,4 @@ Master 在分发任务时判断该任务是否配置了任务组,如果任务
 
 #### 任务组流程图
 
-![task_group](/img/task_group_process.png)
+![task_group](../../../../img/task_group_process.png)
diff --git a/docs/docs/zh/guide/resource/udf-manage.md b/docs/docs/zh/guide/resource/udf-manage.md
index 97b16f7408..27c24b3e26 100644
--- a/docs/docs/zh/guide/resource/udf-manage.md
+++ b/docs/docs/zh/guide/resource/udf-manage.md
@@ -18,7 +18,7 @@
 - 包名类名:输入 UDF 函数的全路径  
 - UDF 资源:设置创建的 UDF 对应的资源文件
 
-![create-udf](/img/new_ui/dev/resource/create-udf.png)
+![create-udf](../../../../img/new_ui/dev/resource/create-udf.png)
 
 ## 任务样例
 
@@ -26,13 +26,13 @@
 
 用户可以根据实际生产需求,自定义想要的 UDF 函数。这里编写一个在任意字符串的末尾添加 "HelloWorld" 的函数。如下图所示:
 
-![code-udf](/img/new_ui/dev/resource/demo/udf-demo01.png)
+![code-udf](../../../../img/new_ui/dev/resource/demo/udf-demo01.png)
 
 ### 配置 UDF 函数
 
 配置 UDF 函数前,需要先通过资源管理上传所需的函数 jar 包。然后进入函数管理,配置相关信息即可。如下图所示:
 
-![conf-udf](/img/new_ui/dev/resource/demo/udf-demo02.png)
+![conf-udf](../../../../img/new_ui/dev/resource/demo/udf-demo02.png)
 
 ### 使用 UDF 函数
 
@@ -43,7 +43,7 @@
 - SQL 语句:`select HwUdf("abc");` 该函数与内置函数使用方式一样,直接使用函数名称即可访问。
 - UDF 函数:选择资源中心所配置的即可。
 
-![use-udf](/img/new_ui/dev/resource/demo/udf-demo03.png)
+![use-udf](../../../../img/new_ui/dev/resource/demo/udf-demo03.png)
 
 
 
diff --git a/docs/docs/zh/guide/security.md b/docs/docs/zh/guide/security.md
index 8dadd18cde..61666ccce3 100644
--- a/docs/docs/zh/guide/security.md
+++ b/docs/docs/zh/guide/security.md
@@ -8,7 +8,7 @@
 - 队列是在执行 spark、mapreduce 等程序,需要用到“队列”参数时使用的。
 - 管理员进入安全中心 -> 队列管理页面,点击“创建队列”按钮,创建队列。
 
-![create-queue](/img/new_ui/dev/security/create-queue.png)
+![create-queue](../../../img/new_ui/dev/security/create-queue.png)
 
 ## 添加租户
 
@@ -16,7 +16,7 @@
 - 租户编码:**租户编码是 Linux上 的用户,唯一,不能重复**
 - 管理员进入安全中心->租户管理页面,点击“创建租户”按钮,创建租户。
 
-![create-tenant](/img/new_ui/dev/security/create-tenant.png)
+![create-tenant](../../../img/new_ui/dev/security/create-tenant.png)
 
 ## 创建普通用户
 
@@ -28,7 +28,7 @@
 
 - 进入安全中心->用户管理页面,点击“创建用户”按钮,创建用户。        
 
-![create-user](/img/new_ui/dev/security/create-user.png)
+![create-user](../../../img/new_ui/dev/security/create-user.png)
   
 ### 编辑用户信息
 
@@ -46,14 +46,14 @@
 * 告警组是在启动时设置的参数,在流程结束以后会将流程的状态和其他信息以邮件形式发送给告警组。
 * 管理员进入安全中心->告警组管理页面,点击“创建告警组”按钮,创建告警组。
 
-![create-alarmInstance](/img/new_ui/dev/security/create-alarmInstance.png)
+![create-alarmInstance](../../../img/new_ui/dev/security/create-alarmInstance.png)
 
 ## 令牌管理
 
 > 由于后端接口有登录检查,令牌管理提供了一种可以通过调用接口的方式对系统进行各种操作。
 - 管理员进入安全中心->令牌管理页面,点击“创建令牌”按钮,选择失效时间与用户,点击"生成令牌"按钮,点击"提交"按钮,则选择用户的token创建成功。
 
-![create-token](/img/new_ui/dev/security/create-token.png)
+![create-token](../../../img/new_ui/dev/security/create-token.png)
   
 - 普通用户登录后,点击用户名下拉框中的用户信息,进入令牌管理页面,选择失效时间,点击"生成令牌"按钮,点击"提交"按钮,则该用户创建 token 成功。
     
@@ -102,11 +102,11 @@
  
 - 管理员进入安全中心->用户管理页面,点击需授权用户的“授权”按钮,如下图所示:
 
-![user-authorize](/img/new_ui/dev/security/user-authorize.png)
+![user-authorize](../../../img/new_ui/dev/security/user-authorize.png)
 
 - 选择项目,进行项目授权。
 
-![project-authorize](/img/new_ui/dev/security/project-authorize.png)
+![project-authorize](../../../img/new_ui/dev/security/project-authorize.png)
   
 - 资源、数据源、UDF 函数授权同项目授权。
 
@@ -141,13 +141,13 @@ worker.groups=default,test
 
 - 环境配置等价于dolphinscheduler_env.sh文件内配置
 
-![create-environment](/img/new_ui/dev/security/create-environment.png)
+![create-environment](../../../img/new_ui/dev/security/create-environment.png)
 
 > 使用环境
 
 - 在工作流定义中创建任务节点选择 worker 分组和 worker 分组对应的环境,任务执行时 worker 会先执行环境在执行任务.
 
-![use-environment](/img/new_ui/dev/security/use-environment.png)
+![use-environment](../../../img/new_ui/dev/security/use-environment.png)
 
 ## 命名空间管理
 
@@ -159,4 +159,4 @@ worker.groups=default,test
 
 - 创建和授权后,在相关k8s任务选择命名空间时下拉可选,如果k8s集群名字是`ds_null_k8s`是测试模式,不会真正操作集群.
 
-![create-environment](/img/new_ui/dev/security/create-namespace.png)
+![create-environment](../../../img/new_ui/dev/security/create-namespace.png)
diff --git a/docs/docs/zh/guide/start/docker.md b/docs/docs/zh/guide/start/docker.md
index 6c11b9a0e9..4666558fa6 100644
--- a/docs/docs/zh/guide/start/docker.md
+++ b/docs/docs/zh/guide/start/docker.md
@@ -118,7 +118,7 @@ $ docker run -d --name dolphinscheduler-alert-server \
 访问 DolphinScheduler。访问上述链接后会跳转到登陆页面,DolphinScheduler 默认的用户和密码分别为 `admin` 和 `dolphinscheduler123`。
 想要了解更多操作请参考用户手册[快速上手](../start/quick-start.md)。
 
-![login](/img/new_ui/dev/quick-start/login.png)
+![login](../../../../img/new_ui/dev/quick-start/login.png)
 
 > 注意:如果你使用沿用已有的 PostgreSQL 和 ZooKeeper 服务方式启动服务,且服务分布在多台机器中,
 > 请将上述的地址改成你 API 容器启动的 hostname 或者 IP。
diff --git a/docs/docs/zh/guide/start/quick-start.md b/docs/docs/zh/guide/start/quick-start.md
index 5de08cc607..1b960b1b9e 100644
--- a/docs/docs/zh/guide/start/quick-start.md
+++ b/docs/docs/zh/guide/start/quick-start.md
@@ -6,38 +6,38 @@
 * 管理员用户登录
   >地址:http://localhost:12345/dolphinscheduler/ui 用户名/密码:admin/dolphinscheduler123
 
-![login](/img/new_ui/dev/quick-start/login.png)
+![login](../../../../img/new_ui/dev/quick-start/login.png)
 
 * 创建队列
 
-![create-queue](/img/new_ui/dev/quick-start/create-queue.png)
+![create-queue](../../../../img/new_ui/dev/quick-start/create-queue.png)
 
 * 创建租户
 
-![create-tenant](/img/new_ui/dev/quick-start/create-tenant.png)
+![create-tenant](../../../../img/new_ui/dev/quick-start/create-tenant.png)
 
 * 创建普通用户
 
-![create-user](/img/new_ui/dev/quick-start/create-user.png)
+![create-user](../../../../img/new_ui/dev/quick-start/create-user.png)
 
 * 创建告警组实例
 
-![create-alarmInstance](/img/new_ui/dev/quick-start/create-alarmInstance.png)
+![create-alarmInstance](../../../../img/new_ui/dev/quick-start/create-alarmInstance.png)
 
 * 创建告警组
 
-![create-alarmGroup](/img/new_ui/dev/quick-start/create-alarmGroup.png)
+![create-alarmGroup](../../../../img/new_ui/dev/quick-start/create-alarmGroup.png)
 
  * 创建 Worker 分组
 
-![create-workerGroup](/img/new_ui/dev/quick-start/create-workerGroup.png)
+![create-workerGroup](../../../../img/new_ui/dev/quick-start/create-workerGroup.png)
 
 * 创建环境
-![create-environment](/img/new_ui/dev/quick-start/create-environment.png)
+![create-environment](../../../../img/new_ui/dev/quick-start/create-environment.png)
  
  * 创建 Token 令牌
 
-![create-token](/img/new_ui/dev/quick-start/create-token.png)
+![create-token](../../../../img/new_ui/dev/quick-start/create-token.png)
 
 
 * 使用普通用户登录
@@ -45,16 +45,16 @@
 
 * 项目管理->创建项目->点击项目名称
 
-![project](/img/new_ui/dev/quick-start/project.png)
+![project](../../../../img/new_ui/dev/quick-start/project.png)
 
   * 点击工作流定义->创建工作流定义->上线工作流定义
 
 <p align="center">
-   <img src="/img/dag1.png" width="60%" />
+   <img src="../../../../img/dag1.png" width="60%" />
  </p>
 
   * 运行工作流定义->点击工作流实例->点击工作流实例名称->双击任务节点->查看任务执行日志
 
  <p align="center">
-   <img src="/img/task-log.png" width="60%" />
+   <img src="../../../../img/task-log.png" width="60%" />
 </p>
diff --git a/docs/docs/zh/guide/task/conditions.md b/docs/docs/zh/guide/task/conditions.md
index c7fef6d6f1..1c70b7872c 100644
--- a/docs/docs/zh/guide/task/conditions.md
+++ b/docs/docs/zh/guide/task/conditions.md
@@ -5,7 +5,7 @@ Conditions 是一个条件节点,根据上游任务运行状态,判断应该
 ## 创建任务
 
 - 点击项目管理-项目名称-工作流定义,点击“创建工作流”按钮,进入 DAG 编辑页面;
-- 拖动工具栏中的<img src="/img/conditions.png" width="20"/>任务节点到画板中。
+- 拖动工具栏中的<img src="../../../../img/conditions.png" width="20"/>任务节点到画板中。
 
 ## 任务参数
 
@@ -41,13 +41,13 @@ Conditions 是一个条件节点,根据上游任务运行状态,判断应该
 - Node_Success:Shell 任务,打印输出 “success”,Node_A 执行成功的分支。
 - Node_False:Shell 任务,打印输出 ”false“,Node_A 执行失败的分支。
 
-![condition_task01](/img/tasks/demo/condition_task01.png)
+![condition_task01](../../../../img/tasks/demo/condition_task01.png)
 
 ### 2、查看执行结果
 
 当完成创建工作流之后,可以上线运行该工作流。在工作流实例页面可以查看到各个任务的执行状态。如下图所示:
 
-![condition_task02](/img/tasks/demo/condition_task02.png)
+![condition_task02](../../../../img/tasks/demo/condition_task02.png)
 
 上图中,任务状态标记为绿色对号的,即为成功执行的任务节点。
 
@@ -57,5 +57,5 @@ Conditions 是一个条件节点,根据上游任务运行状态,判断应该
 - Conditions 任务以及包含该任务的工作流不支持复制操作。
 - Conditions 的前置任务不能连接其分支节点,会造成逻辑混乱,不符合 DAG 调度。如下图所示的情况是**错误**的。
 
-![condition_task03](/img/tasks/demo/condition_task03.png)
-![condition_task04](/img/tasks/demo/condition_task04.png)
+![condition_task03](../../../../img/tasks/demo/condition_task03.png)
+![condition_task04](../../../../img/tasks/demo/condition_task04.png)
diff --git a/docs/docs/zh/guide/task/datax.md b/docs/docs/zh/guide/task/datax.md
index fa1d62a42c..7f9248e9dd 100644
--- a/docs/docs/zh/guide/task/datax.md
+++ b/docs/docs/zh/guide/task/datax.md
@@ -1,63 +1,63 @@
-# DATAX 节点
-
-## 综述
-
-DataX 任务类型,用于执行 DataX 程序。对于 DataX 节点,worker 会通过执行 `${DATAX_HOME}/bin/datax.py` 来解析传入的 json 文件。
-
-## 创建任务
-
-- 点击项目管理 -> 项目名称 -> 工作流定义,点击“创建工作流”按钮,进入 DAG 编辑页面;
-- 拖动工具栏的<img src="/img/tasks/icons/datax.png" width="15"/> 任务节点到画板中。
-
-## 任务参数
-
-- 节点名称:设置任务节点的名称。一个工作流定义中的节点名称是唯一的。
-- 运行标志:标识这个结点是否能正常调度,如果不需要执行,可以打开禁止执行开关。
-- 描述:描述该节点的功能。
-- 任务优先级:worker 线程数不足时,根据优先级从高到低依次执行,优先级一样时根据先进先出原则执行。
-- Worker 分组:任务分配给 worker 组的机器执行,选择 Default ,会随机选择一台 worker 机执行。
-- 环境名称:配置运行脚本的环境。
-- 失败重试次数:任务失败重新提交的次数。
-- 失败重试间隔:任务失败重新提交任务的时间间隔,以分为单位。
-- 延时执行时间:任务延迟执行的时间,以分为单位。
-- 超时警告:勾选超时警告、超时失败,当任务超过“超时时长”后,会发送告警邮件并且任务执行失败。
-- 自定义模板:当默认提供的数据源不满足所需要求的时,可自定义 datax 节点的 json 配置文件内容。
-- json:DataX 同步的 json 配置文件。
-- 自定义参数:sql 任务类型,而存储过程是自定义参数顺序的给方法设置值自定义参数类型和数据类型同存储过程任务类型一样。区别在于SQL任务类型自定义参数会替换 sql 语句中 ${变量}。
-- 数据源:选择抽取数据的数据源。
-- sql 语句:目标库抽取数据的 sql 语句,节点执行时自动解析 sql 查询列名,映射为目标表同步列名,源表和目标表列名不一致时,可以通过列别名(as)转换。
-- 目标库:选择数据同步的目标库。
-- 目标库前置 sql:前置 sql 在 sql 语句之前执行(目标库执行)。
-- 目标库后置 sql:后置 sql 在 sql 语句之后执行(目标库执行)。
-- 限流(字节数):限制查询的字节数。
-- 限流(记录数):限制查询的记录数。
-- 运行内存:可根据实际生产环境配置所需的最小和最大内存。
-- 前置任务:选择当前任务的前置任务,会将被选择的前置任务设置为当前任务的上游。
-
-## 任务样例
-
-该样例演示为从 Hive 数据导入到 MySQL 中。
-
-### 在 DolphinScheduler 中配置 DataX 环境
-
-若生产环境中要是使用到 DataX 任务类型,则需要先配置好所需的环境。配置文件如下:`/dolphinscheduler/conf/env/dolphinscheduler_env.sh`。
-
-![datax_task01](/img/tasks/demo/datax_task01.png)
-
-当环境配置完成之后,需要重启 DolphinScheduler。
-
-### 配置 DataX 任务节点
-
-由于默认的的数据源中并不包含从 Hive 中读取数据,所以需要自定义 json,可参考:[HDFS Writer](https://github.com/alibaba/DataX/blob/master/hdfswriter/doc/hdfswriter.md)。其中需要注意的是 HDFS 路径上存在分区目录,在实际情况导入数据时,分区建议进行传参,即使用自定义参数。
-
-在编写好所需的 json 之后,可按照下图步骤进行配置节点内容。
-
-![datax_task02](/img/tasks/demo/datax_task02.png)
-
-### 查看运行结果
-
-![datax_task03](/img/tasks/demo/datax_task03.png)
-
-## 注意事项:
-
+# DATAX 节点
+
+## 综述
+
+DataX 任务类型,用于执行 DataX 程序。对于 DataX 节点,worker 会通过执行 `${DATAX_HOME}/bin/datax.py` 来解析传入的 json 文件。
+
+## 创建任务
+
+- 点击项目管理 -> 项目名称 -> 工作流定义,点击“创建工作流”按钮,进入 DAG 编辑页面;
+- 拖动工具栏的<img src="../../../../img/tasks/icons/datax.png" width="15"/> 任务节点到画板中。
+
+## 任务参数
+
+- 节点名称:设置任务节点的名称。一个工作流定义中的节点名称是唯一的。
+- 运行标志:标识这个结点是否能正常调度,如果不需要执行,可以打开禁止执行开关。
+- 描述:描述该节点的功能。
+- 任务优先级:worker 线程数不足时,根据优先级从高到低依次执行,优先级一样时根据先进先出原则执行。
+- Worker 分组:任务分配给 worker 组的机器执行,选择 Default ,会随机选择一台 worker 机执行。
+- 环境名称:配置运行脚本的环境。
+- 失败重试次数:任务失败重新提交的次数。
+- 失败重试间隔:任务失败重新提交任务的时间间隔,以分为单位。
+- 延时执行时间:任务延迟执行的时间,以分为单位。
+- 超时警告:勾选超时警告、超时失败,当任务超过“超时时长”后,会发送告警邮件并且任务执行失败。
+- 自定义模板:当默认提供的数据源不满足所需要求的时,可自定义 datax 节点的 json 配置文件内容。
+- json:DataX 同步的 json 配置文件。
+- 自定义参数:sql 任务类型,而存储过程是自定义参数顺序的给方法设置值自定义参数类型和数据类型同存储过程任务类型一样。区别在于SQL任务类型自定义参数会替换 sql 语句中 ${变量}。
+- 数据源:选择抽取数据的数据源。
+- sql 语句:目标库抽取数据的 sql 语句,节点执行时自动解析 sql 查询列名,映射为目标表同步列名,源表和目标表列名不一致时,可以通过列别名(as)转换。
+- 目标库:选择数据同步的目标库。
+- 目标库前置 sql:前置 sql 在 sql 语句之前执行(目标库执行)。
+- 目标库后置 sql:后置 sql 在 sql 语句之后执行(目标库执行)。
+- 限流(字节数):限制查询的字节数。
+- 限流(记录数):限制查询的记录数。
+- 运行内存:可根据实际生产环境配置所需的最小和最大内存。
+- 前置任务:选择当前任务的前置任务,会将被选择的前置任务设置为当前任务的上游。
+
+## 任务样例
+
+该样例演示为从 Hive 数据导入到 MySQL 中。
+
+### 在 DolphinScheduler 中配置 DataX 环境
+
+若生产环境中要是使用到 DataX 任务类型,则需要先配置好所需的环境。配置文件如下:`/dolphinscheduler/conf/env/dolphinscheduler_env.sh`。
+
+![datax_task01](../../../../img/tasks/demo/datax_task01.png)
+
+当环境配置完成之后,需要重启 DolphinScheduler。
+
+### 配置 DataX 任务节点
+
+由于默认的的数据源中并不包含从 Hive 中读取数据,所以需要自定义 json,可参考:[HDFS Writer](https://github.com/alibaba/DataX/blob/master/hdfswriter/doc/hdfswriter.md)。其中需要注意的是 HDFS 路径上存在分区目录,在实际情况导入数据时,分区建议进行传参,即使用自定义参数。
+
+在编写好所需的 json 之后,可按照下图步骤进行配置节点内容。
+
+![datax_task02](../../../../img/tasks/demo/datax_task02.png)
+
+### 查看运行结果
+
+![datax_task03](../../../../img/tasks/demo/datax_task03.png)
+
+## 注意事项:
+
 若默认提供的数据源不满足需求,可在自定义模板选项中,根据实际使用环境来配置 DataX 的 writer 和 reader,可参考:https://github.com/alibaba/DataX
\ No newline at end of file
diff --git a/docs/docs/zh/guide/task/dependent.md b/docs/docs/zh/guide/task/dependent.md
index b3fbd91f8d..ae0606a163 100644
--- a/docs/docs/zh/guide/task/dependent.md
+++ b/docs/docs/zh/guide/task/dependent.md
@@ -7,7 +7,7 @@ Dependent 节点,就是**依赖检查节点**。比如 A 流程依赖昨天的
 ## 创建任务
 
 - 点击项目管理 -> 项目名称 -> 工作流定义,点击“创建工作流”按钮,进入 DAG 编辑页面;
-- 拖动工具栏的<img src="/img/tasks/icons/dependent.png" width="15"/> 任务节点到画板中。
+- 拖动工具栏的<img src="../../../../img/tasks/icons/dependent.png" width="15"/> 任务节点到画板中。
 
 ## 任务参数
 
@@ -29,12 +29,12 @@ Dependent 节点提供了逻辑判断功能,可以按照逻辑来检测所依
 
 例如,A 流程为周报任务,B、C 流程为天任务,A 任务需要 B、C 任务在上周的每一天都执行成功,如图示:
 
-![dependent_task01](/img/tasks/demo/dependent_task01.png)
+![dependent_task01](../../../../img/tasks/demo/dependent_task01.png)
 
 例如,A 流程为周报任务,B、C 流程为天任务,A 任务需要 B 或 C 任务在上周的每一天都执行成功,如图示:
 
-![dependent_task02](/img/tasks/demo/dependent_task02.png)
+![dependent_task02](../../../../img/tasks/demo/dependent_task02.png)
 
 假如,周报 A 同时还需要自身在上周二执行成功:
 
-![dependent_task03](/img/tasks/demo/dependent_task03.png)
+![dependent_task03](../../../../img/tasks/demo/dependent_task03.png)
diff --git a/docs/docs/zh/guide/task/flink.md b/docs/docs/zh/guide/task/flink.md
index 84acd60422..212f6ee208 100644
--- a/docs/docs/zh/guide/task/flink.md
+++ b/docs/docs/zh/guide/task/flink.md
@@ -11,7 +11,7 @@ Flink 任务类型,用于执行 Flink 程序。对于 Flink 节点:
 ## 创建任务
 
 - 点击项目管理-项目名称-工作流定义,点击“创建工作流”按钮,进入 DAG 编辑页面;
-- 拖动工具栏的 <img src="/img/tasks/icons/flink.png" width="15"/> 任务节点到画板中。
+- 拖动工具栏的 <img src="../../../../img/tasks/icons/flink.png" width="15"/> 任务节点到画板中。
 
 ## 任务参数
 
@@ -54,7 +54,7 @@ Flink 任务类型,用于执行 Flink 程序。对于 Flink 节点:
 
 若生产环境中要是使用到 flink 任务类型,则需要先配置好所需的环境。配置文件如下:`bin/env/dolphinscheduler_env.sh`。
 
-![flink-configure](/img/tasks/demo/flink_task01.png)
+![flink-configure](../../../../img/tasks/demo/flink_task01.png)
 
 ####  上传主程序包
 
@@ -62,19 +62,19 @@ Flink 任务类型,用于执行 Flink 程序。对于 Flink 节点:
 
 当配置完成资源中心之后,直接使用拖拽的方式,即可上传所需目标文件。
 
-![resource_upload](/img/tasks/demo/upload_jar.png)
+![resource_upload](../../../../img/tasks/demo/upload_jar.png)
 
 #### 配置 Flink 节点
 
 根据上述参数说明,配置所需的内容即可。
 
-![demo-flink-simple](/img/tasks/demo/flink_task02.png)
+![demo-flink-simple](../../../../img/tasks/demo/flink_task02.png)
 
 ### 执行 FlinkSQL 程序
 
 根据上述参数说明,配置所需的内容即可。
 
-![demo-flink-sql-simple](/img/tasks/demo/flink_sql_test.png)
+![demo-flink-sql-simple](../../../../img/tasks/demo/flink_sql_test.png)
 
 ## 注意事项:
 
diff --git a/docs/docs/zh/guide/task/http.md b/docs/docs/zh/guide/task/http.md
index 18cf3716bc..5245c3b0cd 100644
--- a/docs/docs/zh/guide/task/http.md
+++ b/docs/docs/zh/guide/task/http.md
@@ -8,7 +8,7 @@
 
 - 点击项目管理 -> 项目名称 -> 工作流定义,点击”创建工作流”按钮,进入 DAG 编辑页面:
 
-- 拖动工具栏的 <img src="/img/tasks/icons/http.png" width="15"/> 任务节点到画板中。
+- 拖动工具栏的 <img src="../../../../img/tasks/icons/http.png" width="15"/> 任务节点到画板中。
 
 ## 任务参数
 
@@ -41,7 +41,7 @@ HTTP 定义了与服务器交互的不同方法,最基本的方法有4种,
      - userName:用户名;
      - userPassword:用户登录密码。
 
-![http_task](/img/tasks/demo/http_task01.png)
+![http_task](../../../../img/tasks/demo/http_task01.png)
 
 ## 注意事项
 
diff --git a/docs/docs/zh/guide/task/jupyter.md b/docs/docs/zh/guide/task/jupyter.md
index 502157c355..bcde5122df 100644
--- a/docs/docs/zh/guide/task/jupyter.md
+++ b/docs/docs/zh/guide/task/jupyter.md
@@ -15,7 +15,7 @@
 ## Create Task
 
 - 点击项目管理-项目名称-工作流定义,点击"创建工作流"按钮,进入DAG编辑页面。
-- 工具栏中拖动 <img src="/img/tasks/icons/jupyter.png" width="15"/> 到画板中,即可完成创建。
+- 工具栏中拖动 <img src="../../../../img/tasks/icons/jupyter.png" width="15"/> 到画板中,即可完成创建。
 
 ## Task Parameter
 
@@ -44,5 +44,5 @@
 
 这个示例展示了如何创建Jupyter任务节点:
 
-![demo-jupyter-simple](/img/tasks/demo/jupyter.png)
+![demo-jupyter-simple](../../../../img/tasks/demo/jupyter.png)
 
diff --git a/docs/docs/zh/guide/task/kubernetes.md b/docs/docs/zh/guide/task/kubernetes.md
index 7802774b7c..b282203462 100644
--- a/docs/docs/zh/guide/task/kubernetes.md
+++ b/docs/docs/zh/guide/task/kubernetes.md
@@ -7,7 +7,7 @@ kubernetes任务类型,用于在kubernetes上执行一个短时和批处理的
 ## 创建任务
 
 - 点击项目管理-项目名称-工作流定义,点击"创建工作流"按钮,进入DAG编辑页面。
-- 工具栏中拖动 <img src="/img/tasks/icons/kubernetes.png" width="25"/> 到画板中,选择需要连接的数据源,即可完成创建。
+- 工具栏中拖动 <img src="../../../../img/tasks/icons/kubernetes.png" width="25"/> 到画板中,选择需要连接的数据源,即可完成创建。
 
 ## 任务参数
 
@@ -38,7 +38,7 @@ kubernetes任务类型,用于在kubernetes上执行一个短时和批处理的
 
 根据上述参数说明,配置所需的内容即可。
 
-![kubernetes](/img/tasks/demo/kubernetes-task-en.png)
+![kubernetes](../../../../img/tasks/demo/kubernetes-task-en.png)
 
 ## 注意事项
 
diff --git a/docs/docs/zh/guide/task/map-reduce.md b/docs/docs/zh/guide/task/map-reduce.md
index bd17583afa..cee00e927a 100644
--- a/docs/docs/zh/guide/task/map-reduce.md
+++ b/docs/docs/zh/guide/task/map-reduce.md
@@ -7,7 +7,7 @@ MapReduce(MR) 任务类型,用于执行 MapReduce 程序。对于 MapReduce 
 ## 创建任务
 
 - 点击项目管理-项目名称-工作流定义,点击"创建工作流"按钮,进入 DAG 编辑页面。
-- 拖动工具栏中的 <img src="/img/tasks/icons/mr.png" width="15"/> 任务节点到画板中,如下图所示:
+- 拖动工具栏中的 <img src="../../../../img/tasks/icons/mr.png" width="15"/> 任务节点到画板中,如下图所示:
 
 ## 任务参数
 
@@ -56,7 +56,7 @@ MapReduce(MR) 任务类型,用于执行 MapReduce 程序。对于 MapReduce 
 
 若生产环境中要是使用到 MapReduce 任务类型,则需要先配置好所需的环境。配置文件如下:`bin/env/dolphinscheduler_env.sh`。
 
-![mr_configure](/img/tasks/demo/mr_task01.png)
+![mr_configure](../../../../img/tasks/demo/mr_task01.png)
 
 #### 上传主程序包
 
@@ -64,10 +64,10 @@ MapReduce(MR) 任务类型,用于执行 MapReduce 程序。对于 MapReduce 
 
 当配置完成资源中心之后,直接使用拖拽的方式,即可上传所需目标文件。
 
-![resource_upload](/img/tasks/demo/upload_jar.png)
+![resource_upload](../../../../img/tasks/demo/upload_jar.png)
 
 #### 配置 MapReduce 节点
 
 根据上述参数说明,配置所需的内容即可。
 
-![demo-mr-simple](/img/tasks/demo/mr_task02.png)
+![demo-mr-simple](../../../../img/tasks/demo/mr_task02.png)
diff --git a/docs/docs/zh/guide/task/mlflow.md b/docs/docs/zh/guide/task/mlflow.md
index 52fd4d32ef..7045374b42 100644
--- a/docs/docs/zh/guide/task/mlflow.md
+++ b/docs/docs/zh/guide/task/mlflow.md
@@ -29,7 +29,7 @@ MLflow 组件用于执行 MLflow 任务,目前包含Mlflow Projects, 和MLflow
 ## 创建任务
 
 - 点击项目管理-项目名称-工作流定义,点击“创建工作流”按钮,进入 DAG 编辑页面;
-- 拖动工具栏的 <img src="/img/tasks/icons/mlflow.png" width="15"/> 任务节点到画板中。
+- 拖动工具栏的 <img src="../../../../img/tasks/icons/mlflow.png" width="15"/> 任务节点到画板中。
 
 
 ## 任务样例
@@ -52,7 +52,7 @@ MLflow 组件用于执行 MLflow 任务,目前包含Mlflow Projects, 和MLflow
 
 #### BasicAlgorithm
 
-![mlflow-conda-env](/img/tasks/demo/mlflow-basic-algorithm.png)
+![mlflow-conda-env](../../../../img/tasks/demo/mlflow-basic-algorithm.png)
 
 **任务参数**
 
@@ -71,7 +71,7 @@ MLflow 组件用于执行 MLflow 任务,目前包含Mlflow Projects, 和MLflow
 
 #### AutoML
 
-![mlflow-automl](/img/tasks/demo/mlflow-automl.png)
+![mlflow-automl](../../../../img/tasks/demo/mlflow-automl.png)
 
 **任务参数**
 
@@ -88,7 +88,7 @@ MLflow 组件用于执行 MLflow 任务,目前包含Mlflow Projects, 和MLflow
 
 #### Custom projects
 
-![mlflow-custom-project-template.png](/img/tasks/demo/mlflow-custom-project-template.png)
+![mlflow-custom-project-template.png](../../../../img/tasks/demo/mlflow-custom-project-template.png)
 
 **任务参数**
 
@@ -102,14 +102,14 @@ MLflow 组件用于执行 MLflow 任务,目前包含Mlflow Projects, 和MLflow
 
 实际运行界面如下
 
-![mlflow-custom-project.png](/img/tasks/demo/mlflow-custom-project.png)
+![mlflow-custom-project.png](../../../../img/tasks/demo/mlflow-custom-project.png)
 
 
 ### MLflow Models
 
 #### MLFLOW
 
-![mlflow-models-mlflow](/img/tasks/demo/mlflow-models-mlflow.png)
+![mlflow-models-mlflow](../../../../img/tasks/demo/mlflow-models-mlflow.png)
 
 **任务参数**
 
@@ -119,7 +119,7 @@ MLflow 组件用于执行 MLflow 任务,目前包含Mlflow Projects, 和MLflow
 
 #### Docker
 
-![mlflow-models-docker](/img/tasks/demo/mlflow-models-docker.png)
+![mlflow-models-docker](../../../../img/tasks/demo/mlflow-models-docker.png)
 
 - **mlflow server tracking uri** :MLflow server 的连接, 默认 http://localhost:5000。
 - **部署模型的uri** :mlflow 服务里面模型对应的uri, 支持 `models:/<model_name>/suffix` 格式 和 `runs:/` 格式。
@@ -132,11 +132,11 @@ MLflow 组件用于执行 MLflow 任务,目前包含Mlflow Projects, 和MLflow
 你需要进入admin账户配置一个conda环境变量(请提前[安装anaconda](https://docs.continuum.io/anaconda/install/)
 或者[安装miniconda](https://docs.conda.io/en/latest/miniconda.html#installing) )
 
-![mlflow-conda-env](/img/tasks/demo/mlflow-conda-env.png)
+![mlflow-conda-env](../../../../img/tasks/demo/mlflow-conda-env.png)
 
 后续注意配置任务时,环境选择上面创建的conda环境,否则程序会找不到conda环境
 
-![mlflow-set-conda-env](/img/tasks/demo/mlflow-set-conda-env.png)
+![mlflow-set-conda-env](../../../../img/tasks/demo/mlflow-set-conda-env.png)
 
 ### mlflow service 启动
 
@@ -154,5 +154,5 @@ mlflow server -h 0.0.0.0 -p 5000 --serve-artifacts --backend-store-uri sqlite://
 
 可以通过访问 mlflow service (`http://localhost:5000`) 页面查看实验与模型
 
-![mlflow-server](/img/tasks/demo/mlflow-server.png)
+![mlflow-server](../../../../img/tasks/demo/mlflow-server.png)
 
diff --git a/docs/docs/zh/guide/task/openmldb.md b/docs/docs/zh/guide/task/openmldb.md
index f889b978f2..47287b3572 100644
--- a/docs/docs/zh/guide/task/openmldb.md
+++ b/docs/docs/zh/guide/task/openmldb.md
@@ -9,7 +9,7 @@ OpenMLDB任务组件可以连接OpenMLDB集群执行任务。
 ## 创建任务
 
 - 点击项目管理-项目名称-工作流定义,点击“创建工作流”按钮,进入 DAG 编辑页面;
-- 拖动工具栏的 <img src="/img/tasks/icons/openmldb.png" width="15"/> 任务节点到画板中。
+- 拖动工具栏的 <img src="../../../../img/tasks/icons/openmldb.png" width="15"/> 任务节点到画板中。
 
 ## 任务样例
 
@@ -41,13 +41,13 @@ OpenMLDB任务组件可以连接OpenMLDB集群执行任务。
 
 #### 导入数据
 
-![load data](/img/tasks/demo/openmldb-load-data.png)
+![load data](../../../../img/tasks/demo/openmldb-load-data.png)
 
 我们使用`LOAD DATA`语句导入数据到OpenMLDB集群。因为选择的是离线执行模式,所以将会导入数据到离线存储中。
 
 #### 特征抽取
 
-![fe](/img/tasks/demo/openmldb-feature-extraction.png)
+![fe](../../../../img/tasks/demo/openmldb-feature-extraction.png)
 
 我们使用`SELECT INTO`进行特征抽取。因为选择的是离线执行模式,所以会使用离线引擎做特征计算。
 
diff --git a/docs/docs/zh/guide/task/pigeon.md b/docs/docs/zh/guide/task/pigeon.md
index ec7d32f40b..80bd8d82af 100644
--- a/docs/docs/zh/guide/task/pigeon.md
+++ b/docs/docs/zh/guide/task/pigeon.md
@@ -4,7 +4,7 @@ Pigeon任务类型是通过调用远程websocket服务,实现远程任务的
 
 ## 创建任务
 
-拖动工具栏中的<img src="/img/pigeon.png" width="20"/>任务节点到画板中即能完成任务创建
+拖动工具栏中的<img src="../../../../img/pigeon.png" width="20"/>任务节点到画板中即能完成任务创建
 
 ## 任务参数
 
diff --git a/docs/docs/zh/guide/task/python.md b/docs/docs/zh/guide/task/python.md
index e4316ae8bd..d7e2b9561e 100644
--- a/docs/docs/zh/guide/task/python.md
+++ b/docs/docs/zh/guide/task/python.md
@@ -8,7 +8,7 @@ Python 任务类型,用于创建 Python 类型的任务并执行一系列的 P
 ## Create Task
 
 - 点击项目管理-项目名称-工作流定义,点击"创建工作流"按钮,进入DAG编辑页面。
-- 工具栏中拖动 <img src="/img/tasks/icons/python.png" width="15"/> 到画板中,即可完成创建。
+- 工具栏中拖动 <img src="../../../../img/tasks/icons/python.png" width="15"/> 到画板中,即可完成创建。
 
 ## Task Parameter
 
@@ -33,7 +33,7 @@ Python 任务类型,用于创建 Python 类型的任务并执行一系列的 P
 该样例模拟了常见的简单任务,这些任务只需要简单的一两行命令就能运行起来。我们以打印一行日志为例,该任务仅会在日志文件中打印一行
 "This is a demo of python task"
 
-![demo-python-simple](/img/tasks/demo/python_ui_next.jpg)
+![demo-python-simple](../../../../img/tasks/demo/python_ui_next.jpg)
 
 ```python
 print("This is a demo of python task")
@@ -45,7 +45,7 @@ print("This is a demo of python task")
 中定义了参数 "param_key",并将他的值设置为 "param_val"。接着在"脚本"中使用了 print 函数,将参数 "param_key" 打印了出来。当我们保存
 并运行任务后,在日志中会看到将参数 "param_key" 对应的值 "param_val" 打印出来。
 
-![demo-python-custom-param](/img/tasks/demo/python_custom_param_ui_next.jpg)
+![demo-python-custom-param](../../../../img/tasks/demo/python_custom_param_ui_next.jpg)
 
 ```python
 print("${param_key}")
diff --git a/docs/docs/zh/guide/task/shell.md b/docs/docs/zh/guide/task/shell.md
index 239c00aecf..79c7e4fab1 100644
--- a/docs/docs/zh/guide/task/shell.md
+++ b/docs/docs/zh/guide/task/shell.md
@@ -7,7 +7,7 @@ Shell 任务类型,用于创建 Shell 类型的任务并执行一系列的 She
 ## 创建任务
 
 - 点击项目管理-项目名称-工作流定义,点击"创建工作流"按钮,进入 DAG 编辑页面。
-- 工具栏中拖动 <img src="/img/tasks/icons/shell.png" width="15"/> 到画板中,即可完成创建。
+- 工具栏中拖动 <img src="../../../../img/tasks/icons/shell.png" width="15"/> 到画板中,即可完成创建。
 
 ## 任务参数
 
@@ -32,7 +32,7 @@ Shell 任务类型,用于创建 Shell 类型的任务并执行一系列的 She
 该样例模拟了常见的简单任务,这些任务只需要简单的一两行命令就能运行起来。我们以打印一行日志为例,该任务仅会在日志文件中打印一行
 "This is a demo of shell task"
 
-![demo-shell-simple](/img/tasks/demo/shell.jpg)
+![demo-shell-simple](../../../../img/tasks/demo/shell.jpg)
 
 ### 使用自定义参数
 
@@ -40,7 +40,7 @@ Shell 任务类型,用于创建 Shell 类型的任务并执行一系列的 She
 中定义了参数 "param_key",并将他的值设置为 "param_val"。接着在"脚本"中声明了 echo 命令,将参数 "param_key" 打印了出来。当我们保存
 并运行任务后,在日志中会看到将参数 "param_key" 对应的值 "param_val" 打印出来。
 
-![demo-shell-custom-param](/img/tasks/demo/shell_custom_param.jpg)
+![demo-shell-custom-param](../../../../img/tasks/demo/shell_custom_param.jpg)
 
 ## 注意事项
 
diff --git a/docs/docs/zh/guide/task/spark.md b/docs/docs/zh/guide/task/spark.md
index 9079316c8b..b5ee2d84b4 100644
--- a/docs/docs/zh/guide/task/spark.md
+++ b/docs/docs/zh/guide/task/spark.md
@@ -12,7 +12,7 @@ Spark  任务类型用于执行 Spark 应用。对于 Spark 节点,worker 支
 
 - 点击项目管理 -> 项目名称 -> 工作流定义,点击”创建工作流”按钮,进入 DAG 编辑页面:
 
-- 拖动工具栏的 <img src="/img/tasks/icons/spark.png" width="15"/> 任务节点到画板中。
+- 拖动工具栏的 <img src="../../../../img/tasks/icons/spark.png" width="15"/> 任务节点到画板中。
 
 ## 任务参数
 
@@ -56,7 +56,7 @@ Spark  任务类型用于执行 Spark 应用。对于 Spark 节点,worker 支
 
 若生产环境中要是使用到 Spark 任务类型,则需要先配置好所需的环境。配置文件如下:`bin/env/dolphinscheduler_env.sh`。
 
-![spark_configure](/img/tasks/demo/spark_task01.png)
+![spark_configure](../../../../img/tasks/demo/spark_task01.png)
 
 #####  上传主程序包
 
@@ -64,13 +64,13 @@ Spark  任务类型用于执行 Spark 应用。对于 Spark 节点,worker 支
 
 当配置完成资源中心之后,直接使用拖拽的方式,即可上传所需目标文件。
 
-![resource_upload](/img/tasks/demo/upload_jar.png)
+![resource_upload](../../../../img/tasks/demo/upload_jar.png)
 
 ##### 配置 Spark 节点
 
 根据上述参数说明,配置所需的内容即可。
 
-![demo-spark-simple](/img/tasks/demo/spark_task02.png)
+![demo-spark-simple](../../../../img/tasks/demo/spark_task02.png)
 
 ### spark sql
 
@@ -78,7 +78,7 @@ Spark  任务类型用于执行 Spark 应用。对于 Spark 节点,worker 支
 
 本案例为创建一个视图表 terms 并写入三行数据和一个格式为 parquet 的表 wc 并判断该表是否存在。程序类型为 SQL。将视图表 terms 的数据插入到格式为 parquet 的表 wc。
 
-![spark_sql](/img/tasks/demo/spark_sql.png)
+![spark_sql](../../../../img/tasks/demo/spark_sql.png)
 
 ## 注意事项:
 
diff --git a/docs/docs/zh/guide/task/sql.md b/docs/docs/zh/guide/task/sql.md
index 4896e80bf9..5fd1b13875 100644
--- a/docs/docs/zh/guide/task/sql.md
+++ b/docs/docs/zh/guide/task/sql.md
@@ -11,7 +11,7 @@ SQL任务类型,用于连接数据库并执行相应SQL。
 ## 创建任务
 
 - 点击项目管理-项目名称-工作流定义,点击"创建工作流"按钮,进入DAG编辑页面。
-- 工具栏中拖动 <img src="/img/tasks/icons/sql.png" width="25"/> 到画板中,选择需要连接的数据源,即可完成创建。
+- 工具栏中拖动 <img src="../../../../img/tasks/icons/sql.png" width="25"/> 到画板中,选择需要连接的数据源,即可完成创建。
 
 ## 任务参数
 
@@ -35,13 +35,13 @@ SQL任务类型,用于连接数据库并执行相应SQL。
 
 该样例向hive中创建临时表`tmp_hello_world`并写入一行数据。选择SQL类型为非查询,在创建临时表之前需要确保该表不存在,所以我们使用自定义参数,在每次运行时获取当天时间作为表名后缀,这样这个任务就可以每天运行。创建的表名格式为:`tmp_hello_world_{yyyyMMdd}`。
 
-![hive-sql](/img/tasks/demo/hive-sql.png)
+![hive-sql](../../../../img/tasks/demo/hive-sql.png)
 
 ### 运行该任务成功之后在hive中查询结果
 
 登录集群使用`hive`命令或使用`beeline`、`JDBC`等方式连接`apache hive`进行查询,查询SQL为`select * from tmp_hello_world_{yyyyMMdd}`,请将`{yyyyMMdd}`替换为运行当天的日期,查询截图如下:
 
-![hive-sql](/img/tasks/demo/hive-result.png)
+![hive-sql](../../../../img/tasks/demo/hive-result.png)
 
 ## 注意事项
 
diff --git a/docs/docs/zh/guide/task/stored-procedure.md b/docs/docs/zh/guide/task/stored-procedure.md
index bca32550ee..48b3dc946b 100644
--- a/docs/docs/zh/guide/task/stored-procedure.md
+++ b/docs/docs/zh/guide/task/stored-procedure.md
@@ -4,7 +4,7 @@
 > 拖动工具栏中的![PNG](https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_PROCEDURE.png)任务节点到画板中,如下图所示:
 
 <p align="center">
-   <img src="/img/procedure_edit.png" width="80%" />
+   <img src="../../../../img/procedure_edit.png" width="80%" />
  </p>
 
 - 数据源:存储过程的数据源类型支持MySQL和POSTGRESQL两种,选择对应的数据源
diff --git a/docs/docs/zh/guide/task/sub-process.md b/docs/docs/zh/guide/task/sub-process.md
index 1e5a989b16..763d9fa69a 100644
--- a/docs/docs/zh/guide/task/sub-process.md
+++ b/docs/docs/zh/guide/task/sub-process.md
@@ -8,7 +8,7 @@
 
 - 点击项目管理 -> 项目名称 -> 工作流定义,点击”创建工作流”按钮,进入 DAG 编辑页面:
 
-- 拖动工具栏的 <img src="/img/tasks/icons/sub_process.png" width="15"/> 任务节点到画板中。
+- 拖动工具栏的 <img src="../../../../img/tasks/icons/sub_process.png" width="15"/> 任务节点到画板中。
 
 ## 任务参数
 
@@ -30,17 +30,17 @@
 
 创建一个 shell 任务,用于打印 “hello”。并为该工作流定义为 test_dag01。
 
-![subprocess_task01](/img/tasks/demo/subprocess_task01.png)
+![subprocess_task01](../../../../img/tasks/demo/subprocess_task01.png)
 
 ### 创建 sub_process 任务
 
 在使用 sub_process 的过程中,需要创建所需的子结点任务,也就是我们第一步所创建的 shell 任务。然后如下图所示,在 ⑤ 的位置选择对应的子结点即可。
 
-![subprocess_task02](/img/tasks/demo/subprocess_task02.png)
+![subprocess_task02](../../../../img/tasks/demo/subprocess_task02.png)
 
 创建 sub_process 完成之后,再创建一个对应的 shell 任务,用于打印 “world”,并将二者连接起来。保存当前工作流,并上线运行,即可得到想要的结果。
 
-![subprocess_task03](/img/tasks/demo/subprocess_task03.png)
+![subprocess_task03](../../../../img/tasks/demo/subprocess_task03.png)
 
 ## 注意事项
 
diff --git a/docs/docs/zh/guide/task/switch.md b/docs/docs/zh/guide/task/switch.md
index 7a385ae38f..e9c2e3dae8 100644
--- a/docs/docs/zh/guide/task/switch.md
+++ b/docs/docs/zh/guide/task/switch.md
@@ -4,7 +4,7 @@ Switch是一个条件判断节点,依据[全局变量](../parameter/global.md)
 
 ## 创建任务
 
-拖动工具栏中的<img src="/img/switch.png" width="20"/>任务节点到画板中即能完成任务创建,**注意**switch任务创建后,要先配置上下游,才能配置任务分支的参数
+拖动工具栏中的<img src="../../../../img/switch.png" width="20"/>任务节点到画板中即能完成任务创建,**注意**switch任务创建后,要先配置上下游,才能配置任务分支的参数
 
 ## 任务参数
 
@@ -30,7 +30,7 @@ Switch是一个条件判断节点,依据[全局变量](../parameter/global.md)
 
 最终switch任务的配置如下
 
-![task-switch-configure](/img/switch_configure.jpg)
+![task-switch-configure](../../../../img/switch_configure.jpg)
 
 ## 相关任务
 
diff --git a/docs/docs/zh/guide/task/zeppelin.md b/docs/docs/zh/guide/task/zeppelin.md
index 78285a0317..fa2477a7dc 100644
--- a/docs/docs/zh/guide/task/zeppelin.md
+++ b/docs/docs/zh/guide/task/zeppelin.md
@@ -8,7 +8,7 @@
 ## Create Task
 
 - 点击项目管理-项目名称-工作流定义,点击"创建工作流"按钮,进入DAG编辑页面。
-- 工具栏中拖动 <img src="/img/tasks/icons/zeppelin.png" width="15"/> 到画板中,即可完成创建。
+- 工具栏中拖动 <img src="../../../../img/tasks/icons/zeppelin.png" width="15"/> 到画板中,即可完成创建。
 
 ## Task Parameter
 
@@ -31,7 +31,7 @@
 
 这个示例展示了如何创建Zeppelin Paragraph任务节点:
 
-![demo-zeppelin-paragraph](/img/tasks/demo/zeppelin.png)
+![demo-zeppelin-paragraph](../../../../img/tasks/demo/zeppelin.png)
 
-![demo-get-zeppelin-id](/img/tasks/demo/zeppelin_id.png)
+![demo-get-zeppelin-id](../../../../img/tasks/demo/zeppelin_id.png)
 
diff --git a/docs/img_utils.py b/docs/img_utils.py
index 493ff7169e..a0116dfbd8 100644
--- a/docs/img_utils.py
+++ b/docs/img_utils.py
@@ -63,12 +63,12 @@ def get_paths_rel_path(paths: Set[Path], rel: Path) -> Set:
 def get_docs_img_path(paths: Set[Path]) -> Set:
     """Get all img syntax from given :param:`paths` using the regexp from :param:`pattern`."""
     res = set()
-    pattern = re.compile(r"/img[\w./-]+")
+    pattern = re.compile(r"../img[\w./-]+")
     for path in paths:
         content = path.read_text()
         find = pattern.findall(content)
         if find:
-            res |= {item for item in find}
+            res |= {item.lstrip(".") for item in find}
     return res
 
 
@@ -125,8 +125,11 @@ def prune() -> None:
 
 
 def dev_syntax() -> None:
-    """Check temp whether temporary do not support syntax in development."""
-    pattern = re.compile("(\\(\\.\\.[\\w./-]+\\))")
+    """Check whether directory development contain do not support syntax or not.
+
+    * It should not ref document from other document in `docs` directory
+    """
+    pattern = re.compile("(\\(\\.\\.[\\w./-]+\\.md\\))")
     dev_files_path = get_files_recurse(dev_en_dir) | get_files_recurse(dev_zh_dir)
     get_files_recurse(dev_en_dir)
     for path in dev_files_path: